1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/etherdevice.h> 34 #include <linux/mlx5/driver.h> 35 #include <linux/mlx5/mlx5_ifc.h> 36 #include <linux/mlx5/vport.h> 37 #include <linux/mlx5/fs.h> 38 #include <linux/mlx5/mpfs.h> 39 #include <linux/debugfs.h> 40 #include "esw/acl/lgcy.h" 41 #include "esw/legacy.h" 42 #include "esw/qos.h" 43 #include "mlx5_core.h" 44 #include "lib/eq.h" 45 #include "eswitch.h" 46 #include "fs_core.h" 47 #include "devlink.h" 48 #include "ecpf.h" 49 #include "en/mod_hdr.h" 50 51 enum { 52 MLX5_ACTION_NONE = 0, 53 MLX5_ACTION_ADD = 1, 54 MLX5_ACTION_DEL = 2, 55 }; 56 57 /* Vport UC/MC hash node */ 58 struct vport_addr { 59 struct l2addr_node node; 60 u8 action; 61 u16 vport; 62 struct mlx5_flow_handle *flow_rule; 63 bool mpfs; /* UC MAC was added to MPFs */ 64 /* A flag indicating that mac was added due to mc promiscuous vport */ 65 bool mc_promisc; 66 }; 67 68 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) 69 { 70 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 71 return -EOPNOTSUPP; 72 73 if (!MLX5_ESWITCH_MANAGER(dev)) 74 return -EOPNOTSUPP; 75 76 return 0; 77 } 78 79 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink) 80 { 81 struct mlx5_core_dev *dev = devlink_priv(devlink); 82 int err; 83 84 err = mlx5_eswitch_check(dev); 85 if (err) 86 return ERR_PTR(err); 87 88 return dev->priv.eswitch; 89 } 90 91 struct mlx5_vport *__must_check 92 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) 93 { 94 struct mlx5_vport *vport; 95 96 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) 97 return ERR_PTR(-EPERM); 98 99 vport = xa_load(&esw->vports, vport_num); 100 if (!vport) { 101 esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num); 102 return ERR_PTR(-EINVAL); 103 } 104 return vport; 105 } 106 107 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, 108 u32 events_mask) 109 { 110 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {}; 111 void *nic_vport_ctx; 112 113 MLX5_SET(modify_nic_vport_context_in, in, 114 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 115 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); 116 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 117 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); 118 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 119 in, nic_vport_context); 120 121 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); 122 123 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE) 124 MLX5_SET(nic_vport_context, nic_vport_ctx, 125 event_on_uc_address_change, 1); 126 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE) 127 MLX5_SET(nic_vport_context, nic_vport_ctx, 128 event_on_mc_address_change, 1); 129 if (events_mask & MLX5_VPORT_PROMISC_CHANGE) 130 MLX5_SET(nic_vport_context, nic_vport_ctx, 131 event_on_promisc_change, 1); 132 133 return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in); 134 } 135 136 /* E-Switch vport context HW commands */ 137 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 138 bool other_vport, void *in) 139 { 140 MLX5_SET(modify_esw_vport_context_in, in, opcode, 141 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); 142 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); 143 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport); 144 return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in); 145 } 146 147 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, 148 u16 vlan, u8 qos, u8 set_flags) 149 { 150 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; 151 152 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || 153 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) 154 return -EOPNOTSUPP; 155 156 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", 157 vport, vlan, qos, set_flags); 158 159 if (set_flags & SET_VLAN_STRIP) 160 MLX5_SET(modify_esw_vport_context_in, in, 161 esw_vport_context.vport_cvlan_strip, 1); 162 163 if (set_flags & SET_VLAN_INSERT) { 164 if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) { 165 /* insert either if vlan exist in packet or not */ 166 MLX5_SET(modify_esw_vport_context_in, in, 167 esw_vport_context.vport_cvlan_insert, 168 MLX5_VPORT_CVLAN_INSERT_ALWAYS); 169 } else { 170 /* insert only if no vlan in packet */ 171 MLX5_SET(modify_esw_vport_context_in, in, 172 esw_vport_context.vport_cvlan_insert, 173 MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN); 174 } 175 MLX5_SET(modify_esw_vport_context_in, in, 176 esw_vport_context.cvlan_pcp, qos); 177 MLX5_SET(modify_esw_vport_context_in, in, 178 esw_vport_context.cvlan_id, vlan); 179 } 180 181 MLX5_SET(modify_esw_vport_context_in, in, 182 field_select.vport_cvlan_strip, 1); 183 MLX5_SET(modify_esw_vport_context_in, in, 184 field_select.vport_cvlan_insert, 1); 185 186 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in); 187 } 188 189 /* E-Switch FDB */ 190 static struct mlx5_flow_handle * 191 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule, 192 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) 193 { 194 int match_header = (is_zero_ether_addr(mac_c) ? 0 : 195 MLX5_MATCH_OUTER_HEADERS); 196 struct mlx5_flow_handle *flow_rule = NULL; 197 struct mlx5_flow_act flow_act = {0}; 198 struct mlx5_flow_destination dest = {}; 199 struct mlx5_flow_spec *spec; 200 void *mv_misc = NULL; 201 void *mc_misc = NULL; 202 u8 *dmac_v = NULL; 203 u8 *dmac_c = NULL; 204 205 if (rx_rule) 206 match_header |= MLX5_MATCH_MISC_PARAMETERS; 207 208 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 209 if (!spec) 210 return NULL; 211 212 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 213 outer_headers.dmac_47_16); 214 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 215 outer_headers.dmac_47_16); 216 217 if (match_header & MLX5_MATCH_OUTER_HEADERS) { 218 ether_addr_copy(dmac_v, mac_v); 219 ether_addr_copy(dmac_c, mac_c); 220 } 221 222 if (match_header & MLX5_MATCH_MISC_PARAMETERS) { 223 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 224 misc_parameters); 225 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 226 misc_parameters); 227 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK); 228 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); 229 } 230 231 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 232 dest.vport.num = vport; 233 234 esw_debug(esw->dev, 235 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", 236 dmac_v, dmac_c, vport); 237 spec->match_criteria_enable = match_header; 238 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 239 flow_rule = 240 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec, 241 &flow_act, &dest, 1); 242 if (IS_ERR(flow_rule)) { 243 esw_warn(esw->dev, 244 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", 245 dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); 246 flow_rule = NULL; 247 } 248 249 kvfree(spec); 250 return flow_rule; 251 } 252 253 static struct mlx5_flow_handle * 254 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport) 255 { 256 u8 mac_c[ETH_ALEN]; 257 258 eth_broadcast_addr(mac_c); 259 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac); 260 } 261 262 static struct mlx5_flow_handle * 263 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport) 264 { 265 u8 mac_c[ETH_ALEN]; 266 u8 mac_v[ETH_ALEN]; 267 268 eth_zero_addr(mac_c); 269 eth_zero_addr(mac_v); 270 mac_c[0] = 0x01; 271 mac_v[0] = 0x01; 272 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v); 273 } 274 275 static struct mlx5_flow_handle * 276 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport) 277 { 278 u8 mac_c[ETH_ALEN]; 279 u8 mac_v[ETH_ALEN]; 280 281 eth_zero_addr(mac_c); 282 eth_zero_addr(mac_v); 283 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); 284 } 285 286 /* E-Switch vport UC/MC lists management */ 287 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, 288 struct vport_addr *vaddr); 289 290 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 291 { 292 u8 *mac = vaddr->node.addr; 293 u16 vport = vaddr->vport; 294 int err; 295 296 /* Skip mlx5_mpfs_add_mac for eswitch_managers, 297 * it is already done by its netdev in mlx5e_execute_l2_action 298 */ 299 if (mlx5_esw_is_manager_vport(esw, vport)) 300 goto fdb_add; 301 302 err = mlx5_mpfs_add_mac(esw->dev, mac); 303 if (err) { 304 esw_warn(esw->dev, 305 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n", 306 mac, vport, err); 307 return err; 308 } 309 vaddr->mpfs = true; 310 311 fdb_add: 312 /* SRIOV is enabled: Forward UC MAC to vport */ 313 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) 314 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 315 316 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", 317 vport, mac, vaddr->flow_rule); 318 319 return 0; 320 } 321 322 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 323 { 324 u8 *mac = vaddr->node.addr; 325 u16 vport = vaddr->vport; 326 int err = 0; 327 328 /* Skip mlx5_mpfs_del_mac for eswitch managers, 329 * it is already done by its netdev in mlx5e_execute_l2_action 330 */ 331 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport)) 332 goto fdb_del; 333 334 err = mlx5_mpfs_del_mac(esw->dev, mac); 335 if (err) 336 esw_warn(esw->dev, 337 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n", 338 mac, vport, err); 339 vaddr->mpfs = false; 340 341 fdb_del: 342 if (vaddr->flow_rule) 343 mlx5_del_flow_rules(vaddr->flow_rule); 344 vaddr->flow_rule = NULL; 345 346 return 0; 347 } 348 349 static void update_allmulti_vports(struct mlx5_eswitch *esw, 350 struct vport_addr *vaddr, 351 struct esw_mc_addr *esw_mc) 352 { 353 u8 *mac = vaddr->node.addr; 354 struct mlx5_vport *vport; 355 unsigned long i; 356 u16 vport_num; 357 358 mlx5_esw_for_each_vport(esw, i, vport) { 359 struct hlist_head *vport_hash = vport->mc_list; 360 struct vport_addr *iter_vaddr = 361 l2addr_hash_find(vport_hash, 362 mac, 363 struct vport_addr); 364 vport_num = vport->vport; 365 if (IS_ERR_OR_NULL(vport->allmulti_rule) || 366 vaddr->vport == vport_num) 367 continue; 368 switch (vaddr->action) { 369 case MLX5_ACTION_ADD: 370 if (iter_vaddr) 371 continue; 372 iter_vaddr = l2addr_hash_add(vport_hash, mac, 373 struct vport_addr, 374 GFP_KERNEL); 375 if (!iter_vaddr) { 376 esw_warn(esw->dev, 377 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n", 378 mac, vport_num); 379 continue; 380 } 381 iter_vaddr->vport = vport_num; 382 iter_vaddr->flow_rule = 383 esw_fdb_set_vport_rule(esw, 384 mac, 385 vport_num); 386 iter_vaddr->mc_promisc = true; 387 break; 388 case MLX5_ACTION_DEL: 389 if (!iter_vaddr) 390 continue; 391 mlx5_del_flow_rules(iter_vaddr->flow_rule); 392 l2addr_hash_del(iter_vaddr); 393 break; 394 } 395 } 396 } 397 398 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 399 { 400 struct hlist_head *hash = esw->mc_table; 401 struct esw_mc_addr *esw_mc; 402 u8 *mac = vaddr->node.addr; 403 u16 vport = vaddr->vport; 404 405 if (!esw->fdb_table.legacy.fdb) 406 return 0; 407 408 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); 409 if (esw_mc) 410 goto add; 411 412 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL); 413 if (!esw_mc) 414 return -ENOMEM; 415 416 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ 417 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK); 418 419 /* Add this multicast mac to all the mc promiscuous vports */ 420 update_allmulti_vports(esw, vaddr, esw_mc); 421 422 add: 423 /* If the multicast mac is added as a result of mc promiscuous vport, 424 * don't increment the multicast ref count 425 */ 426 if (!vaddr->mc_promisc) 427 esw_mc->refcnt++; 428 429 /* Forward MC MAC to vport */ 430 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 431 esw_debug(esw->dev, 432 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", 433 vport, mac, vaddr->flow_rule, 434 esw_mc->refcnt, esw_mc->uplink_rule); 435 return 0; 436 } 437 438 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 439 { 440 struct hlist_head *hash = esw->mc_table; 441 struct esw_mc_addr *esw_mc; 442 u8 *mac = vaddr->node.addr; 443 u16 vport = vaddr->vport; 444 445 if (!esw->fdb_table.legacy.fdb) 446 return 0; 447 448 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); 449 if (!esw_mc) { 450 esw_warn(esw->dev, 451 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)", 452 mac, vport); 453 return -EINVAL; 454 } 455 esw_debug(esw->dev, 456 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", 457 vport, mac, vaddr->flow_rule, esw_mc->refcnt, 458 esw_mc->uplink_rule); 459 460 if (vaddr->flow_rule) 461 mlx5_del_flow_rules(vaddr->flow_rule); 462 vaddr->flow_rule = NULL; 463 464 /* If the multicast mac is added as a result of mc promiscuous vport, 465 * don't decrement the multicast ref count. 466 */ 467 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0)) 468 return 0; 469 470 /* Remove this multicast mac from all the mc promiscuous vports */ 471 update_allmulti_vports(esw, vaddr, esw_mc); 472 473 if (esw_mc->uplink_rule) 474 mlx5_del_flow_rules(esw_mc->uplink_rule); 475 476 l2addr_hash_del(esw_mc); 477 return 0; 478 } 479 480 /* Apply vport UC/MC list to HW l2 table and FDB table */ 481 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, 482 struct mlx5_vport *vport, int list_type) 483 { 484 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; 485 vport_addr_action vport_addr_add; 486 vport_addr_action vport_addr_del; 487 struct vport_addr *addr; 488 struct l2addr_node *node; 489 struct hlist_head *hash; 490 struct hlist_node *tmp; 491 int hi; 492 493 vport_addr_add = is_uc ? esw_add_uc_addr : 494 esw_add_mc_addr; 495 vport_addr_del = is_uc ? esw_del_uc_addr : 496 esw_del_mc_addr; 497 498 hash = is_uc ? vport->uc_list : vport->mc_list; 499 for_each_l2hash_node(node, tmp, hash, hi) { 500 addr = container_of(node, struct vport_addr, node); 501 switch (addr->action) { 502 case MLX5_ACTION_ADD: 503 vport_addr_add(esw, addr); 504 addr->action = MLX5_ACTION_NONE; 505 break; 506 case MLX5_ACTION_DEL: 507 vport_addr_del(esw, addr); 508 l2addr_hash_del(addr); 509 break; 510 } 511 } 512 } 513 514 /* Sync vport UC/MC list from vport context */ 515 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, 516 struct mlx5_vport *vport, int list_type) 517 { 518 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; 519 u8 (*mac_list)[ETH_ALEN]; 520 struct l2addr_node *node; 521 struct vport_addr *addr; 522 struct hlist_head *hash; 523 struct hlist_node *tmp; 524 int size; 525 int err; 526 int hi; 527 int i; 528 529 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) : 530 MLX5_MAX_MC_PER_VPORT(esw->dev); 531 532 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL); 533 if (!mac_list) 534 return; 535 536 hash = is_uc ? vport->uc_list : vport->mc_list; 537 538 for_each_l2hash_node(node, tmp, hash, hi) { 539 addr = container_of(node, struct vport_addr, node); 540 addr->action = MLX5_ACTION_DEL; 541 } 542 543 if (!vport->enabled) 544 goto out; 545 546 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type, 547 mac_list, &size); 548 if (err) 549 goto out; 550 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n", 551 vport->vport, is_uc ? "UC" : "MC", size); 552 553 for (i = 0; i < size; i++) { 554 if (is_uc && !is_valid_ether_addr(mac_list[i])) 555 continue; 556 557 if (!is_uc && !is_multicast_ether_addr(mac_list[i])) 558 continue; 559 560 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr); 561 if (addr) { 562 addr->action = MLX5_ACTION_NONE; 563 /* If this mac was previously added because of allmulti 564 * promiscuous rx mode, its now converted to be original 565 * vport mac. 566 */ 567 if (addr->mc_promisc) { 568 struct esw_mc_addr *esw_mc = 569 l2addr_hash_find(esw->mc_table, 570 mac_list[i], 571 struct esw_mc_addr); 572 if (!esw_mc) { 573 esw_warn(esw->dev, 574 "Failed to MAC(%pM) in mcast DB\n", 575 mac_list[i]); 576 continue; 577 } 578 esw_mc->refcnt++; 579 addr->mc_promisc = false; 580 } 581 continue; 582 } 583 584 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr, 585 GFP_KERNEL); 586 if (!addr) { 587 esw_warn(esw->dev, 588 "Failed to add MAC(%pM) to vport[%d] DB\n", 589 mac_list[i], vport->vport); 590 continue; 591 } 592 addr->vport = vport->vport; 593 addr->action = MLX5_ACTION_ADD; 594 } 595 out: 596 kfree(mac_list); 597 } 598 599 /* Sync vport UC/MC list from vport context 600 * Must be called after esw_update_vport_addr_list 601 */ 602 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, 603 struct mlx5_vport *vport) 604 { 605 struct l2addr_node *node; 606 struct vport_addr *addr; 607 struct hlist_head *hash; 608 struct hlist_node *tmp; 609 int hi; 610 611 hash = vport->mc_list; 612 613 for_each_l2hash_node(node, tmp, esw->mc_table, hi) { 614 u8 *mac = node->addr; 615 616 addr = l2addr_hash_find(hash, mac, struct vport_addr); 617 if (addr) { 618 if (addr->action == MLX5_ACTION_DEL) 619 addr->action = MLX5_ACTION_NONE; 620 continue; 621 } 622 addr = l2addr_hash_add(hash, mac, struct vport_addr, 623 GFP_KERNEL); 624 if (!addr) { 625 esw_warn(esw->dev, 626 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n", 627 mac, vport->vport); 628 continue; 629 } 630 addr->vport = vport->vport; 631 addr->action = MLX5_ACTION_ADD; 632 addr->mc_promisc = true; 633 } 634 } 635 636 /* Apply vport rx mode to HW FDB table */ 637 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, 638 struct mlx5_vport *vport, 639 bool promisc, bool mc_promisc) 640 { 641 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc; 642 643 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc) 644 goto promisc; 645 646 if (mc_promisc) { 647 vport->allmulti_rule = 648 esw_fdb_set_vport_allmulti_rule(esw, vport->vport); 649 if (!allmulti_addr->uplink_rule) 650 allmulti_addr->uplink_rule = 651 esw_fdb_set_vport_allmulti_rule(esw, 652 MLX5_VPORT_UPLINK); 653 allmulti_addr->refcnt++; 654 } else if (vport->allmulti_rule) { 655 mlx5_del_flow_rules(vport->allmulti_rule); 656 vport->allmulti_rule = NULL; 657 658 if (--allmulti_addr->refcnt > 0) 659 goto promisc; 660 661 if (allmulti_addr->uplink_rule) 662 mlx5_del_flow_rules(allmulti_addr->uplink_rule); 663 allmulti_addr->uplink_rule = NULL; 664 } 665 666 promisc: 667 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc) 668 return; 669 670 if (promisc) { 671 vport->promisc_rule = 672 esw_fdb_set_vport_promisc_rule(esw, vport->vport); 673 } else if (vport->promisc_rule) { 674 mlx5_del_flow_rules(vport->promisc_rule); 675 vport->promisc_rule = NULL; 676 } 677 } 678 679 /* Sync vport rx mode from vport context */ 680 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, 681 struct mlx5_vport *vport) 682 { 683 int promisc_all = 0; 684 int promisc_uc = 0; 685 int promisc_mc = 0; 686 int err; 687 688 err = mlx5_query_nic_vport_promisc(esw->dev, 689 vport->vport, 690 &promisc_uc, 691 &promisc_mc, 692 &promisc_all); 693 if (err) 694 return; 695 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n", 696 vport->vport, promisc_all, promisc_mc); 697 698 if (!vport->info.trusted || !vport->enabled) { 699 promisc_uc = 0; 700 promisc_mc = 0; 701 promisc_all = 0; 702 } 703 704 esw_apply_vport_rx_mode(esw, vport, promisc_all, 705 (promisc_all || promisc_mc)); 706 } 707 708 void esw_vport_change_handle_locked(struct mlx5_vport *vport) 709 { 710 struct mlx5_core_dev *dev = vport->dev; 711 struct mlx5_eswitch *esw = dev->priv.eswitch; 712 u8 mac[ETH_ALEN]; 713 714 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac); 715 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", 716 vport->vport, mac); 717 718 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) { 719 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); 720 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); 721 } 722 723 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE) 724 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); 725 726 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) { 727 esw_update_vport_rx_mode(esw, vport); 728 if (!IS_ERR_OR_NULL(vport->allmulti_rule)) 729 esw_update_vport_mc_promisc(esw, vport); 730 } 731 732 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE)) 733 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); 734 735 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); 736 if (vport->enabled) 737 arm_vport_context_events_cmd(dev, vport->vport, 738 vport->enabled_events); 739 } 740 741 static void esw_vport_change_handler(struct work_struct *work) 742 { 743 struct mlx5_vport *vport = 744 container_of(work, struct mlx5_vport, vport_change_handler); 745 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; 746 747 mutex_lock(&esw->state_lock); 748 esw_vport_change_handle_locked(vport); 749 mutex_unlock(&esw->state_lock); 750 } 751 752 static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) 753 { 754 ((u8 *)node_guid)[7] = mac[0]; 755 ((u8 *)node_guid)[6] = mac[1]; 756 ((u8 *)node_guid)[5] = mac[2]; 757 ((u8 *)node_guid)[4] = 0xff; 758 ((u8 *)node_guid)[3] = 0xfe; 759 ((u8 *)node_guid)[2] = mac[3]; 760 ((u8 *)node_guid)[1] = mac[4]; 761 ((u8 *)node_guid)[0] = mac[5]; 762 } 763 764 static int esw_vport_setup_acl(struct mlx5_eswitch *esw, 765 struct mlx5_vport *vport) 766 { 767 if (esw->mode == MLX5_ESWITCH_LEGACY) 768 return esw_legacy_vport_acl_setup(esw, vport); 769 else 770 return esw_vport_create_offloads_acl_tables(esw, vport); 771 } 772 773 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, 774 struct mlx5_vport *vport) 775 { 776 if (esw->mode == MLX5_ESWITCH_LEGACY) 777 esw_legacy_vport_acl_cleanup(esw, vport); 778 else 779 esw_vport_destroy_offloads_acl_tables(esw, vport); 780 } 781 782 static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 783 { 784 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 785 void *query_ctx; 786 void *hca_caps; 787 int err; 788 789 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 790 return 0; 791 792 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 793 if (!query_ctx) 794 return -ENOMEM; 795 796 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx, 797 MLX5_CAP_GENERAL); 798 if (err) 799 goto out_free; 800 801 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 802 vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce); 803 804 memset(query_ctx, 0, query_out_sz); 805 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx, 806 MLX5_CAP_GENERAL_2); 807 if (err) 808 goto out_free; 809 810 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 811 vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable); 812 out_free: 813 kfree(query_ctx); 814 return err; 815 } 816 817 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 818 { 819 bool vst_mode_steering = esw_vst_mode_is_steering(esw); 820 u16 vport_num = vport->vport; 821 int flags; 822 int err; 823 824 err = esw_vport_setup_acl(esw, vport); 825 if (err) 826 return err; 827 828 if (mlx5_esw_is_manager_vport(esw, vport_num)) 829 return 0; 830 831 err = mlx5_esw_vport_caps_get(esw, vport); 832 if (err) 833 goto err_caps; 834 835 mlx5_modify_vport_admin_state(esw->dev, 836 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 837 vport_num, 1, 838 vport->info.link_state); 839 840 /* Host PF has its own mac/guid. */ 841 if (vport_num) { 842 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, 843 vport->info.mac); 844 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, 845 vport->info.node_guid); 846 } 847 848 flags = (vport->info.vlan || vport->info.qos) ? 849 SET_VLAN_STRIP | SET_VLAN_INSERT : 0; 850 if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) 851 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, 852 vport->info.qos, flags); 853 854 return 0; 855 856 err_caps: 857 esw_vport_cleanup_acl(esw, vport); 858 return err; 859 } 860 861 /* Don't cleanup vport->info, it's needed to restore vport configuration */ 862 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 863 { 864 u16 vport_num = vport->vport; 865 866 if (!mlx5_esw_is_manager_vport(esw, vport_num)) 867 mlx5_modify_vport_admin_state(esw->dev, 868 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 869 vport_num, 1, 870 MLX5_VPORT_ADMIN_STATE_DOWN); 871 872 mlx5_esw_qos_vport_disable(esw, vport); 873 esw_vport_cleanup_acl(esw, vport); 874 } 875 876 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, 877 enum mlx5_eswitch_vport_event enabled_events) 878 { 879 struct mlx5_vport *vport; 880 int ret; 881 882 vport = mlx5_eswitch_get_vport(esw, vport_num); 883 if (IS_ERR(vport)) 884 return PTR_ERR(vport); 885 886 mutex_lock(&esw->state_lock); 887 WARN_ON(vport->enabled); 888 889 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); 890 891 ret = esw_vport_setup(esw, vport); 892 if (ret) 893 goto done; 894 895 /* Sync with current vport context */ 896 vport->enabled_events = enabled_events; 897 vport->enabled = true; 898 899 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well 900 * in smartNIC as it's a vport group manager. 901 */ 902 if (mlx5_esw_is_manager_vport(esw, vport_num) || 903 (!vport_num && mlx5_core_is_ecpf(esw->dev))) 904 vport->info.trusted = true; 905 906 if (!mlx5_esw_is_manager_vport(esw, vport->vport) && 907 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { 908 ret = mlx5_esw_vport_vhca_id_set(esw, vport_num); 909 if (ret) 910 goto err_vhca_mapping; 911 } 912 913 /* External controller host PF has factory programmed MAC. 914 * Read it from the device. 915 */ 916 if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) 917 mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac); 918 919 esw_vport_change_handle_locked(vport); 920 921 esw->enabled_vports++; 922 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); 923 done: 924 mutex_unlock(&esw->state_lock); 925 return ret; 926 927 err_vhca_mapping: 928 esw_vport_cleanup(esw, vport); 929 mutex_unlock(&esw->state_lock); 930 return ret; 931 } 932 933 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) 934 { 935 struct mlx5_vport *vport; 936 937 vport = mlx5_eswitch_get_vport(esw, vport_num); 938 if (IS_ERR(vport)) 939 return; 940 941 mutex_lock(&esw->state_lock); 942 if (!vport->enabled) 943 goto done; 944 945 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); 946 /* Mark this vport as disabled to discard new events */ 947 vport->enabled = false; 948 949 /* Disable events from this vport */ 950 arm_vport_context_events_cmd(esw->dev, vport->vport, 0); 951 952 if (!mlx5_esw_is_manager_vport(esw, vport->vport) && 953 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 954 mlx5_esw_vport_vhca_id_clear(esw, vport_num); 955 956 /* We don't assume VFs will cleanup after themselves. 957 * Calling vport change handler while vport is disabled will cleanup 958 * the vport resources. 959 */ 960 esw_vport_change_handle_locked(vport); 961 vport->enabled_events = 0; 962 esw_apply_vport_rx_mode(esw, vport, false, false); 963 esw_vport_cleanup(esw, vport); 964 esw->enabled_vports--; 965 966 done: 967 mutex_unlock(&esw->state_lock); 968 } 969 970 static int eswitch_vport_event(struct notifier_block *nb, 971 unsigned long type, void *data) 972 { 973 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb); 974 struct mlx5_eqe *eqe = data; 975 struct mlx5_vport *vport; 976 u16 vport_num; 977 978 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); 979 vport = mlx5_eswitch_get_vport(esw, vport_num); 980 if (!IS_ERR(vport)) 981 queue_work(esw->work_queue, &vport->vport_change_handler); 982 return NOTIFY_OK; 983 } 984 985 /** 986 * mlx5_esw_query_functions - Returns raw output about functions state 987 * @dev: Pointer to device to query 988 * 989 * mlx5_esw_query_functions() allocates and returns functions changed 990 * raw output memory pointer from device on success. Otherwise returns ERR_PTR. 991 * Caller must free the memory using kvfree() when valid pointer is returned. 992 */ 993 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 994 { 995 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out); 996 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {}; 997 u32 *out; 998 int err; 999 1000 out = kvzalloc(outlen, GFP_KERNEL); 1001 if (!out) 1002 return ERR_PTR(-ENOMEM); 1003 1004 MLX5_SET(query_esw_functions_in, in, opcode, 1005 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS); 1006 1007 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 1008 if (!err) 1009 return out; 1010 1011 kvfree(out); 1012 return ERR_PTR(err); 1013 } 1014 1015 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw) 1016 { 1017 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); 1018 mlx5_eq_notifier_register(esw->dev, &esw->nb); 1019 1020 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) { 1021 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler, 1022 ESW_FUNCTIONS_CHANGED); 1023 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb); 1024 } 1025 } 1026 1027 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) 1028 { 1029 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) 1030 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); 1031 1032 mlx5_eq_notifier_unregister(esw->dev, &esw->nb); 1033 1034 flush_workqueue(esw->work_queue); 1035 } 1036 1037 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) 1038 { 1039 struct mlx5_vport *vport; 1040 unsigned long i; 1041 1042 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 1043 memset(&vport->qos, 0, sizeof(vport->qos)); 1044 memset(&vport->info, 0, sizeof(vport->info)); 1045 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 1046 } 1047 } 1048 1049 /* Public E-Switch API */ 1050 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, 1051 enum mlx5_eswitch_vport_event enabled_events) 1052 { 1053 int err; 1054 1055 err = mlx5_esw_vport_enable(esw, vport_num, enabled_events); 1056 if (err) 1057 return err; 1058 1059 mlx5_esw_vport_debugfs_create(esw, vport_num, false, 0); 1060 err = esw_offloads_load_rep(esw, vport_num); 1061 if (err) 1062 goto err_rep; 1063 1064 return err; 1065 1066 err_rep: 1067 mlx5_esw_vport_debugfs_destroy(esw, vport_num); 1068 mlx5_esw_vport_disable(esw, vport_num); 1069 return err; 1070 } 1071 1072 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num) 1073 { 1074 esw_offloads_unload_rep(esw, vport_num); 1075 mlx5_esw_vport_debugfs_destroy(esw, vport_num); 1076 mlx5_esw_vport_disable(esw, vport_num); 1077 } 1078 1079 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs) 1080 { 1081 struct mlx5_vport *vport; 1082 unsigned long i; 1083 1084 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { 1085 if (!vport->enabled) 1086 continue; 1087 mlx5_eswitch_unload_vport(esw, vport->vport); 1088 } 1089 } 1090 1091 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, 1092 enum mlx5_eswitch_vport_event enabled_events) 1093 { 1094 struct mlx5_vport *vport; 1095 unsigned long i; 1096 int err; 1097 1098 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { 1099 err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events); 1100 if (err) 1101 goto vf_err; 1102 } 1103 1104 return 0; 1105 1106 vf_err: 1107 mlx5_eswitch_unload_vf_vports(esw, num_vfs); 1108 return err; 1109 } 1110 1111 static int host_pf_enable_hca(struct mlx5_core_dev *dev) 1112 { 1113 if (!mlx5_core_is_ecpf(dev)) 1114 return 0; 1115 1116 /* Once vport and representor are ready, take out the external host PF 1117 * out of initializing state. Enabling HCA clears the iser->initializing 1118 * bit and host PF driver loading can progress. 1119 */ 1120 return mlx5_cmd_host_pf_enable_hca(dev); 1121 } 1122 1123 static void host_pf_disable_hca(struct mlx5_core_dev *dev) 1124 { 1125 if (!mlx5_core_is_ecpf(dev)) 1126 return; 1127 1128 mlx5_cmd_host_pf_disable_hca(dev); 1129 } 1130 1131 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs 1132 * whichever are present on the eswitch. 1133 */ 1134 int 1135 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 1136 enum mlx5_eswitch_vport_event enabled_events) 1137 { 1138 int ret; 1139 1140 /* Enable PF vport */ 1141 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events); 1142 if (ret) 1143 return ret; 1144 1145 /* Enable external host PF HCA */ 1146 ret = host_pf_enable_hca(esw->dev); 1147 if (ret) 1148 goto pf_hca_err; 1149 1150 /* Enable ECPF vport */ 1151 if (mlx5_ecpf_vport_exists(esw->dev)) { 1152 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events); 1153 if (ret) 1154 goto ecpf_err; 1155 } 1156 1157 /* Enable VF vports */ 1158 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs, 1159 enabled_events); 1160 if (ret) 1161 goto vf_err; 1162 return 0; 1163 1164 vf_err: 1165 if (mlx5_ecpf_vport_exists(esw->dev)) 1166 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); 1167 ecpf_err: 1168 host_pf_disable_hca(esw->dev); 1169 pf_hca_err: 1170 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); 1171 return ret; 1172 } 1173 1174 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs 1175 * whichever are previously enabled on the eswitch. 1176 */ 1177 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) 1178 { 1179 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 1180 1181 if (mlx5_ecpf_vport_exists(esw->dev)) 1182 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); 1183 1184 host_pf_disable_hca(esw->dev); 1185 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); 1186 } 1187 1188 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) 1189 { 1190 struct devlink *devlink = priv_to_devlink(esw->dev); 1191 union devlink_param_value val; 1192 int err; 1193 1194 err = devl_param_driverinit_value_get(devlink, 1195 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, 1196 &val); 1197 if (!err) { 1198 esw->params.large_group_num = val.vu32; 1199 } else { 1200 esw_warn(esw->dev, 1201 "Devlink can't get param fdb_large_groups, uses default (%d).\n", 1202 ESW_OFFLOADS_DEFAULT_NUM_GROUPS); 1203 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS; 1204 } 1205 } 1206 1207 static void 1208 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) 1209 { 1210 const u32 *out; 1211 1212 if (num_vfs < 0) 1213 return; 1214 1215 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1216 esw->esw_funcs.num_vfs = num_vfs; 1217 return; 1218 } 1219 1220 out = mlx5_esw_query_functions(esw->dev); 1221 if (IS_ERR(out)) 1222 return; 1223 1224 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out, 1225 host_params_context.host_num_of_vfs); 1226 kvfree(out); 1227 } 1228 1229 static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode) 1230 { 1231 struct mlx5_esw_event_info info = {}; 1232 1233 info.new_mode = mode; 1234 1235 blocking_notifier_call_chain(&esw->n_head, 0, &info); 1236 } 1237 1238 static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw) 1239 { 1240 struct mlx5_core_dev *dev = esw->dev; 1241 int total_vports; 1242 int err; 1243 1244 if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED) 1245 return 0; 1246 1247 total_vports = mlx5_eswitch_get_total_vports(dev); 1248 1249 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { 1250 err = mlx5_fs_egress_acls_init(dev, total_vports); 1251 if (err) 1252 return err; 1253 } else { 1254 esw_warn(dev, "egress ACL is not supported by FW\n"); 1255 } 1256 1257 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { 1258 err = mlx5_fs_ingress_acls_init(dev, total_vports); 1259 if (err) 1260 goto err; 1261 } else { 1262 esw_warn(dev, "ingress ACL is not supported by FW\n"); 1263 } 1264 esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED; 1265 return 0; 1266 1267 err: 1268 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) 1269 mlx5_fs_egress_acls_cleanup(dev); 1270 return err; 1271 } 1272 1273 static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw) 1274 { 1275 struct mlx5_core_dev *dev = esw->dev; 1276 1277 esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED; 1278 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) 1279 mlx5_fs_ingress_acls_cleanup(dev); 1280 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) 1281 mlx5_fs_egress_acls_cleanup(dev); 1282 } 1283 1284 /** 1285 * mlx5_eswitch_enable_locked - Enable eswitch 1286 * @esw: Pointer to eswitch 1287 * @num_vfs: Enable eswitch for given number of VFs. This is optional. 1288 * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS. 1289 * Caller should pass num_vfs > 0 when enabling eswitch for 1290 * vf vports. Caller should pass num_vfs = 0, when eswitch 1291 * is enabled without sriov VFs or when caller 1292 * is unaware of the sriov state of the host PF on ECPF based 1293 * eswitch. Caller should pass < 0 when num_vfs should be 1294 * completely ignored. This is typically the case when eswitch 1295 * is enabled without sriov regardless of PF/ECPF system. 1296 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads 1297 * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports. 1298 * It returns 0 on success or error code on failure. 1299 */ 1300 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) 1301 { 1302 int err; 1303 1304 lockdep_assert_held(&esw->mode_lock); 1305 1306 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1307 esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); 1308 return -EOPNOTSUPP; 1309 } 1310 1311 mlx5_eswitch_get_devlink_param(esw); 1312 1313 err = mlx5_esw_acls_ns_init(esw); 1314 if (err) 1315 return err; 1316 1317 mlx5_eswitch_update_num_of_vfs(esw, num_vfs); 1318 1319 if (esw->mode == MLX5_ESWITCH_LEGACY) { 1320 err = esw_legacy_enable(esw); 1321 } else { 1322 mlx5_rescan_drivers(esw->dev); 1323 err = esw_offloads_enable(esw); 1324 } 1325 1326 if (err) 1327 goto abort; 1328 1329 esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED; 1330 1331 mlx5_eswitch_event_handlers_register(esw); 1332 1333 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", 1334 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1335 esw->esw_funcs.num_vfs, esw->enabled_vports); 1336 1337 mlx5_esw_mode_change_notify(esw, esw->mode); 1338 1339 return 0; 1340 1341 abort: 1342 mlx5_esw_acls_ns_cleanup(esw); 1343 return err; 1344 } 1345 1346 /** 1347 * mlx5_eswitch_enable - Enable eswitch 1348 * @esw: Pointer to eswitch 1349 * @num_vfs: Enable eswitch switch for given number of VFs. 1350 * Caller must pass num_vfs > 0 when enabling eswitch for 1351 * vf vports. 1352 * mlx5_eswitch_enable() returns 0 on success or error code on failure. 1353 */ 1354 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) 1355 { 1356 bool toggle_lag; 1357 int ret; 1358 1359 if (!mlx5_esw_allowed(esw)) 1360 return 0; 1361 1362 devl_assert_locked(priv_to_devlink(esw->dev)); 1363 1364 toggle_lag = !mlx5_esw_is_fdb_created(esw); 1365 1366 if (toggle_lag) 1367 mlx5_lag_disable_change(esw->dev); 1368 1369 down_write(&esw->mode_lock); 1370 if (!mlx5_esw_is_fdb_created(esw)) { 1371 ret = mlx5_eswitch_enable_locked(esw, num_vfs); 1372 } else { 1373 enum mlx5_eswitch_vport_event vport_events; 1374 1375 vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ? 1376 MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE; 1377 ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events); 1378 if (!ret) 1379 esw->esw_funcs.num_vfs = num_vfs; 1380 } 1381 up_write(&esw->mode_lock); 1382 1383 if (toggle_lag) 1384 mlx5_lag_enable_change(esw->dev); 1385 1386 return ret; 1387 } 1388 1389 /* When disabling sriov, free driver level resources. */ 1390 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) 1391 { 1392 if (!mlx5_esw_allowed(esw)) 1393 return; 1394 1395 devl_assert_locked(priv_to_devlink(esw->dev)); 1396 down_write(&esw->mode_lock); 1397 /* If driver is unloaded, this function is called twice by remove_one() 1398 * and mlx5_unload(). Prevent the second call. 1399 */ 1400 if (!esw->esw_funcs.num_vfs && !clear_vf) 1401 goto unlock; 1402 1403 esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n", 1404 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1405 esw->esw_funcs.num_vfs, esw->enabled_vports); 1406 1407 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 1408 if (clear_vf) 1409 mlx5_eswitch_clear_vf_vports_info(esw); 1410 1411 if (esw->mode == MLX5_ESWITCH_OFFLOADS) { 1412 struct devlink *devlink = priv_to_devlink(esw->dev); 1413 1414 devl_rate_nodes_destroy(devlink); 1415 } 1416 /* Destroy legacy fdb when disabling sriov in legacy mode. */ 1417 if (esw->mode == MLX5_ESWITCH_LEGACY) 1418 mlx5_eswitch_disable_locked(esw); 1419 1420 esw->esw_funcs.num_vfs = 0; 1421 1422 unlock: 1423 up_write(&esw->mode_lock); 1424 } 1425 1426 /* Free resources for corresponding eswitch mode. It is called by devlink 1427 * when changing eswitch mode or modprobe when unloading driver. 1428 */ 1429 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw) 1430 { 1431 struct devlink *devlink = priv_to_devlink(esw->dev); 1432 1433 /* Notify eswitch users that it is exiting from current mode. 1434 * So that it can do necessary cleanup before the eswitch is disabled. 1435 */ 1436 mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY); 1437 1438 mlx5_eswitch_event_handlers_unregister(esw); 1439 1440 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", 1441 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1442 esw->esw_funcs.num_vfs, esw->enabled_vports); 1443 1444 if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) { 1445 esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED; 1446 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 1447 esw_offloads_disable(esw); 1448 else if (esw->mode == MLX5_ESWITCH_LEGACY) 1449 esw_legacy_disable(esw); 1450 mlx5_esw_acls_ns_cleanup(esw); 1451 } 1452 1453 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 1454 devl_rate_nodes_destroy(devlink); 1455 } 1456 1457 void mlx5_eswitch_disable(struct mlx5_eswitch *esw) 1458 { 1459 if (!mlx5_esw_allowed(esw)) 1460 return; 1461 1462 devl_assert_locked(priv_to_devlink(esw->dev)); 1463 mlx5_lag_disable_change(esw->dev); 1464 down_write(&esw->mode_lock); 1465 mlx5_eswitch_disable_locked(esw); 1466 esw->mode = MLX5_ESWITCH_LEGACY; 1467 up_write(&esw->mode_lock); 1468 mlx5_lag_enable_change(esw->dev); 1469 } 1470 1471 static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out) 1472 { 1473 u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01); 1474 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {}; 1475 1476 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 1477 MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 1478 MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF); 1479 MLX5_SET(query_hca_cap_in, in, other_function, true); 1480 return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out); 1481 } 1482 1483 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id) 1484 1485 { 1486 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 1487 void *query_ctx; 1488 void *hca_caps; 1489 int err; 1490 1491 if (!mlx5_core_is_ecpf(dev) || mlx5_core_is_management_pf(dev)) { 1492 *max_sfs = 0; 1493 return 0; 1494 } 1495 1496 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 1497 if (!query_ctx) 1498 return -ENOMEM; 1499 1500 err = mlx5_query_hca_cap_host_pf(dev, query_ctx); 1501 if (err) 1502 goto out_free; 1503 1504 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 1505 *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf); 1506 *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id); 1507 1508 out_free: 1509 kfree(query_ctx); 1510 return err; 1511 } 1512 1513 static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev, 1514 int index, u16 vport_num) 1515 { 1516 struct mlx5_vport *vport; 1517 int err; 1518 1519 vport = kzalloc(sizeof(*vport), GFP_KERNEL); 1520 if (!vport) 1521 return -ENOMEM; 1522 1523 vport->dev = esw->dev; 1524 vport->vport = vport_num; 1525 vport->index = index; 1526 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 1527 INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); 1528 err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL); 1529 if (err) 1530 goto insert_err; 1531 1532 esw->total_vports++; 1533 return 0; 1534 1535 insert_err: 1536 kfree(vport); 1537 return err; 1538 } 1539 1540 static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 1541 { 1542 xa_erase(&esw->vports, vport->vport); 1543 kfree(vport); 1544 } 1545 1546 static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw) 1547 { 1548 struct mlx5_vport *vport; 1549 unsigned long i; 1550 1551 mlx5_esw_for_each_vport(esw, i, vport) 1552 mlx5_esw_vport_free(esw, vport); 1553 xa_destroy(&esw->vports); 1554 } 1555 1556 static int mlx5_esw_vports_init(struct mlx5_eswitch *esw) 1557 { 1558 struct mlx5_core_dev *dev = esw->dev; 1559 u16 max_host_pf_sfs; 1560 u16 base_sf_num; 1561 int idx = 0; 1562 int err; 1563 int i; 1564 1565 xa_init(&esw->vports); 1566 1567 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF); 1568 if (err) 1569 goto err; 1570 if (esw->first_host_vport == MLX5_VPORT_PF) 1571 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); 1572 idx++; 1573 1574 for (i = 0; i < mlx5_core_max_vfs(dev); i++) { 1575 err = mlx5_esw_vport_alloc(esw, dev, idx, idx); 1576 if (err) 1577 goto err; 1578 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF); 1579 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); 1580 idx++; 1581 } 1582 base_sf_num = mlx5_sf_start_function_id(dev); 1583 for (i = 0; i < mlx5_sf_max_functions(dev); i++) { 1584 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i); 1585 if (err) 1586 goto err; 1587 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF); 1588 idx++; 1589 } 1590 1591 err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num); 1592 if (err) 1593 goto err; 1594 for (i = 0; i < max_host_pf_sfs; i++) { 1595 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i); 1596 if (err) 1597 goto err; 1598 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF); 1599 idx++; 1600 } 1601 1602 if (mlx5_ecpf_vport_exists(dev)) { 1603 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF); 1604 if (err) 1605 goto err; 1606 idx++; 1607 } 1608 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK); 1609 if (err) 1610 goto err; 1611 return 0; 1612 1613 err: 1614 mlx5_esw_vports_cleanup(esw); 1615 return err; 1616 } 1617 1618 int mlx5_eswitch_init(struct mlx5_core_dev *dev) 1619 { 1620 struct mlx5_eswitch *esw; 1621 int err; 1622 1623 if (!MLX5_VPORT_MANAGER(dev)) 1624 return 0; 1625 1626 esw = kzalloc(sizeof(*esw), GFP_KERNEL); 1627 if (!esw) 1628 return -ENOMEM; 1629 1630 esw->dev = dev; 1631 esw->manager_vport = mlx5_eswitch_manager_vport(dev); 1632 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev); 1633 1634 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); 1635 if (!esw->work_queue) { 1636 err = -ENOMEM; 1637 goto abort; 1638 } 1639 1640 err = mlx5_esw_vports_init(esw); 1641 if (err) 1642 goto abort; 1643 1644 err = esw_offloads_init(esw); 1645 if (err) 1646 goto reps_err; 1647 1648 mutex_init(&esw->offloads.encap_tbl_lock); 1649 hash_init(esw->offloads.encap_tbl); 1650 mutex_init(&esw->offloads.decap_tbl_lock); 1651 hash_init(esw->offloads.decap_tbl); 1652 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr); 1653 atomic64_set(&esw->offloads.num_flows, 0); 1654 ida_init(&esw->offloads.vport_metadata_ida); 1655 xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); 1656 mutex_init(&esw->state_lock); 1657 init_rwsem(&esw->mode_lock); 1658 refcount_set(&esw->qos.refcnt, 0); 1659 1660 esw->enabled_vports = 0; 1661 esw->mode = MLX5_ESWITCH_LEGACY; 1662 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; 1663 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) && 1664 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) 1665 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; 1666 else 1667 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; 1668 if (MLX5_ESWITCH_MANAGER(dev) && 1669 mlx5_esw_vport_match_metadata_supported(esw)) 1670 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; 1671 1672 dev->priv.eswitch = esw; 1673 BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); 1674 1675 esw->dbgfs = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(esw->dev)); 1676 esw_info(dev, 1677 "Total vports %d, per vport: max uc(%d) max mc(%d)\n", 1678 esw->total_vports, 1679 MLX5_MAX_UC_PER_VPORT(dev), 1680 MLX5_MAX_MC_PER_VPORT(dev)); 1681 return 0; 1682 1683 reps_err: 1684 mlx5_esw_vports_cleanup(esw); 1685 abort: 1686 if (esw->work_queue) 1687 destroy_workqueue(esw->work_queue); 1688 kfree(esw); 1689 return err; 1690 } 1691 1692 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1693 { 1694 if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) 1695 return; 1696 1697 esw_info(esw->dev, "cleanup\n"); 1698 1699 debugfs_remove_recursive(esw->dbgfs); 1700 esw->dev->priv.eswitch = NULL; 1701 destroy_workqueue(esw->work_queue); 1702 WARN_ON(refcount_read(&esw->qos.refcnt)); 1703 mutex_destroy(&esw->state_lock); 1704 WARN_ON(!xa_empty(&esw->offloads.vhca_map)); 1705 xa_destroy(&esw->offloads.vhca_map); 1706 ida_destroy(&esw->offloads.vport_metadata_ida); 1707 mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr); 1708 mutex_destroy(&esw->offloads.encap_tbl_lock); 1709 mutex_destroy(&esw->offloads.decap_tbl_lock); 1710 esw_offloads_cleanup(esw); 1711 mlx5_esw_vports_cleanup(esw); 1712 kfree(esw); 1713 } 1714 1715 /* Vport Administration */ 1716 static int 1717 mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw, 1718 struct mlx5_vport *evport, const u8 *mac) 1719 { 1720 u16 vport_num = evport->vport; 1721 u64 node_guid; 1722 int err = 0; 1723 1724 if (is_multicast_ether_addr(mac)) 1725 return -EINVAL; 1726 1727 if (evport->info.spoofchk && !is_valid_ether_addr(mac)) 1728 mlx5_core_warn(esw->dev, 1729 "Set invalid MAC while spoofchk is on, vport(%d)\n", 1730 vport_num); 1731 1732 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac); 1733 if (err) { 1734 mlx5_core_warn(esw->dev, 1735 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n", 1736 vport_num, err); 1737 return err; 1738 } 1739 1740 node_guid_gen_from_mac(&node_guid, mac); 1741 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid); 1742 if (err) 1743 mlx5_core_warn(esw->dev, 1744 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", 1745 vport_num, err); 1746 1747 ether_addr_copy(evport->info.mac, mac); 1748 evport->info.node_guid = node_guid; 1749 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) 1750 err = esw_acl_ingress_lgcy_setup(esw, evport); 1751 1752 return err; 1753 } 1754 1755 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 1756 u16 vport, const u8 *mac) 1757 { 1758 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1759 int err = 0; 1760 1761 if (IS_ERR(evport)) 1762 return PTR_ERR(evport); 1763 1764 mutex_lock(&esw->state_lock); 1765 err = mlx5_esw_set_vport_mac_locked(esw, evport, mac); 1766 mutex_unlock(&esw->state_lock); 1767 return err; 1768 } 1769 1770 static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark) 1771 { 1772 struct mlx5_vport *vport; 1773 1774 vport = mlx5_eswitch_get_vport(esw, vport_num); 1775 if (IS_ERR(vport)) 1776 return false; 1777 1778 return xa_get_mark(&esw->vports, vport_num, mark); 1779 } 1780 1781 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num) 1782 { 1783 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF); 1784 } 1785 1786 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num) 1787 { 1788 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF); 1789 } 1790 1791 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 1792 u16 vport, int link_state) 1793 { 1794 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1795 int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT; 1796 int other_vport = 1; 1797 int err = 0; 1798 1799 if (!mlx5_esw_allowed(esw)) 1800 return -EPERM; 1801 if (IS_ERR(evport)) 1802 return PTR_ERR(evport); 1803 1804 if (vport == MLX5_VPORT_UPLINK) { 1805 opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK; 1806 other_vport = 0; 1807 vport = 0; 1808 } 1809 mutex_lock(&esw->state_lock); 1810 if (esw->mode != MLX5_ESWITCH_LEGACY) { 1811 err = -EOPNOTSUPP; 1812 goto unlock; 1813 } 1814 1815 err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); 1816 if (err) { 1817 mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d", 1818 vport, opmod, err); 1819 goto unlock; 1820 } 1821 1822 evport->info.link_state = link_state; 1823 1824 unlock: 1825 mutex_unlock(&esw->state_lock); 1826 return err; 1827 } 1828 1829 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 1830 u16 vport, struct ifla_vf_info *ivi) 1831 { 1832 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1833 1834 if (IS_ERR(evport)) 1835 return PTR_ERR(evport); 1836 1837 memset(ivi, 0, sizeof(*ivi)); 1838 ivi->vf = vport - 1; 1839 1840 mutex_lock(&esw->state_lock); 1841 ether_addr_copy(ivi->mac, evport->info.mac); 1842 ivi->linkstate = evport->info.link_state; 1843 ivi->vlan = evport->info.vlan; 1844 ivi->qos = evport->info.qos; 1845 ivi->spoofchk = evport->info.spoofchk; 1846 ivi->trusted = evport->info.trusted; 1847 if (evport->qos.enabled) { 1848 ivi->min_tx_rate = evport->qos.min_rate; 1849 ivi->max_tx_rate = evport->qos.max_rate; 1850 } 1851 mutex_unlock(&esw->state_lock); 1852 1853 return 0; 1854 } 1855 1856 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 1857 u16 vport, u16 vlan, u8 qos, u8 set_flags) 1858 { 1859 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1860 bool vst_mode_steering = esw_vst_mode_is_steering(esw); 1861 int err = 0; 1862 1863 if (IS_ERR(evport)) 1864 return PTR_ERR(evport); 1865 if (vlan > 4095 || qos > 7) 1866 return -EINVAL; 1867 1868 if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) { 1869 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); 1870 if (err) 1871 return err; 1872 } 1873 1874 evport->info.vlan = vlan; 1875 evport->info.qos = qos; 1876 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) { 1877 err = esw_acl_ingress_lgcy_setup(esw, evport); 1878 if (err) 1879 return err; 1880 err = esw_acl_egress_lgcy_setup(esw, evport); 1881 } 1882 1883 return err; 1884 } 1885 1886 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 1887 u16 vport_num, 1888 struct ifla_vf_stats *vf_stats) 1889 { 1890 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 1891 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); 1892 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; 1893 struct mlx5_vport_drop_stats stats = {}; 1894 int err = 0; 1895 u32 *out; 1896 1897 if (IS_ERR(vport)) 1898 return PTR_ERR(vport); 1899 1900 out = kvzalloc(outlen, GFP_KERNEL); 1901 if (!out) 1902 return -ENOMEM; 1903 1904 MLX5_SET(query_vport_counter_in, in, opcode, 1905 MLX5_CMD_OP_QUERY_VPORT_COUNTER); 1906 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 1907 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport); 1908 MLX5_SET(query_vport_counter_in, in, other_vport, 1); 1909 1910 err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out); 1911 if (err) 1912 goto free_out; 1913 1914 #define MLX5_GET_CTR(p, x) \ 1915 MLX5_GET64(query_vport_counter_out, p, x) 1916 1917 memset(vf_stats, 0, sizeof(*vf_stats)); 1918 vf_stats->rx_packets = 1919 MLX5_GET_CTR(out, received_eth_unicast.packets) + 1920 MLX5_GET_CTR(out, received_ib_unicast.packets) + 1921 MLX5_GET_CTR(out, received_eth_multicast.packets) + 1922 MLX5_GET_CTR(out, received_ib_multicast.packets) + 1923 MLX5_GET_CTR(out, received_eth_broadcast.packets); 1924 1925 vf_stats->rx_bytes = 1926 MLX5_GET_CTR(out, received_eth_unicast.octets) + 1927 MLX5_GET_CTR(out, received_ib_unicast.octets) + 1928 MLX5_GET_CTR(out, received_eth_multicast.octets) + 1929 MLX5_GET_CTR(out, received_ib_multicast.octets) + 1930 MLX5_GET_CTR(out, received_eth_broadcast.octets); 1931 1932 vf_stats->tx_packets = 1933 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + 1934 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) + 1935 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + 1936 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) + 1937 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); 1938 1939 vf_stats->tx_bytes = 1940 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + 1941 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) + 1942 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + 1943 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) + 1944 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 1945 1946 vf_stats->multicast = 1947 MLX5_GET_CTR(out, received_eth_multicast.packets) + 1948 MLX5_GET_CTR(out, received_ib_multicast.packets); 1949 1950 vf_stats->broadcast = 1951 MLX5_GET_CTR(out, received_eth_broadcast.packets); 1952 1953 err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats); 1954 if (err) 1955 goto free_out; 1956 vf_stats->rx_dropped = stats.rx_dropped; 1957 vf_stats->tx_dropped = stats.tx_dropped; 1958 1959 free_out: 1960 kvfree(out); 1961 return err; 1962 } 1963 1964 u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) 1965 { 1966 struct mlx5_eswitch *esw = dev->priv.eswitch; 1967 1968 return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY; 1969 } 1970 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); 1971 1972 enum devlink_eswitch_encap_mode 1973 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) 1974 { 1975 struct mlx5_eswitch *esw; 1976 1977 esw = dev->priv.eswitch; 1978 return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap : 1979 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 1980 } 1981 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode); 1982 1983 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 1984 struct mlx5_core_dev *dev1) 1985 { 1986 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS && 1987 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS); 1988 } 1989 1990 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb) 1991 { 1992 return blocking_notifier_chain_register(&esw->n_head, nb); 1993 } 1994 1995 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb) 1996 { 1997 blocking_notifier_chain_unregister(&esw->n_head, nb); 1998 } 1999 2000 /** 2001 * mlx5_esw_hold() - Try to take a read lock on esw mode lock. 2002 * @mdev: mlx5 core device. 2003 * 2004 * Should be called by esw resources callers. 2005 * 2006 * Return: true on success or false. 2007 */ 2008 bool mlx5_esw_hold(struct mlx5_core_dev *mdev) 2009 { 2010 struct mlx5_eswitch *esw = mdev->priv.eswitch; 2011 2012 /* e.g. VF doesn't have eswitch so nothing to do */ 2013 if (!mlx5_esw_allowed(esw)) 2014 return true; 2015 2016 if (down_read_trylock(&esw->mode_lock) != 0) 2017 return true; 2018 2019 return false; 2020 } 2021 2022 /** 2023 * mlx5_esw_release() - Release a read lock on esw mode lock. 2024 * @mdev: mlx5 core device. 2025 */ 2026 void mlx5_esw_release(struct mlx5_core_dev *mdev) 2027 { 2028 struct mlx5_eswitch *esw = mdev->priv.eswitch; 2029 2030 if (mlx5_esw_allowed(esw)) 2031 up_read(&esw->mode_lock); 2032 } 2033 2034 /** 2035 * mlx5_esw_get() - Increase esw user count. 2036 * @mdev: mlx5 core device. 2037 */ 2038 void mlx5_esw_get(struct mlx5_core_dev *mdev) 2039 { 2040 struct mlx5_eswitch *esw = mdev->priv.eswitch; 2041 2042 if (mlx5_esw_allowed(esw)) 2043 atomic64_inc(&esw->user_count); 2044 } 2045 2046 /** 2047 * mlx5_esw_put() - Decrease esw user count. 2048 * @mdev: mlx5 core device. 2049 */ 2050 void mlx5_esw_put(struct mlx5_core_dev *mdev) 2051 { 2052 struct mlx5_eswitch *esw = mdev->priv.eswitch; 2053 2054 if (mlx5_esw_allowed(esw)) 2055 atomic64_dec_if_positive(&esw->user_count); 2056 } 2057 2058 /** 2059 * mlx5_esw_try_lock() - Take a write lock on esw mode lock. 2060 * @esw: eswitch device. 2061 * 2062 * Should be called by esw mode change routine. 2063 * 2064 * Return: 2065 * * 0 - esw mode if successfully locked and refcount is 0. 2066 * * -EBUSY - refcount is not 0. 2067 * * -EINVAL - In the middle of switching mode or lock is already held. 2068 */ 2069 int mlx5_esw_try_lock(struct mlx5_eswitch *esw) 2070 { 2071 if (down_write_trylock(&esw->mode_lock) == 0) 2072 return -EINVAL; 2073 2074 if (atomic64_read(&esw->user_count) > 0) { 2075 up_write(&esw->mode_lock); 2076 return -EBUSY; 2077 } 2078 2079 return esw->mode; 2080 } 2081 2082 /** 2083 * mlx5_esw_unlock() - Release write lock on esw mode lock 2084 * @esw: eswitch device. 2085 */ 2086 void mlx5_esw_unlock(struct mlx5_eswitch *esw) 2087 { 2088 up_write(&esw->mode_lock); 2089 } 2090 2091 /** 2092 * mlx5_eswitch_get_total_vports - Get total vports of the eswitch 2093 * 2094 * @dev: Pointer to core device 2095 * 2096 * mlx5_eswitch_get_total_vports returns total number of eswitch vports. 2097 */ 2098 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) 2099 { 2100 struct mlx5_eswitch *esw; 2101 2102 esw = dev->priv.eswitch; 2103 return mlx5_esw_allowed(esw) ? esw->total_vports : 0; 2104 } 2105 EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports); 2106 2107 /** 2108 * mlx5_eswitch_get_core_dev - Get the mdev device 2109 * @esw : eswitch device. 2110 * 2111 * Return the mellanox core device which manages the eswitch. 2112 */ 2113 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw) 2114 { 2115 return mlx5_esw_allowed(esw) ? esw->dev : NULL; 2116 } 2117 EXPORT_SYMBOL(mlx5_eswitch_get_core_dev); 2118