1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/etherdevice.h> 34 #include <linux/mlx5/driver.h> 35 #include <linux/mlx5/mlx5_ifc.h> 36 #include <linux/mlx5/vport.h> 37 #include <linux/mlx5/fs.h> 38 #include <linux/mlx5/mpfs.h> 39 #include <linux/debugfs.h> 40 #include "esw/acl/lgcy.h" 41 #include "esw/legacy.h" 42 #include "esw/qos.h" 43 #include "mlx5_core.h" 44 #include "lib/eq.h" 45 #include "eswitch.h" 46 #include "fs_core.h" 47 #include "devlink.h" 48 #include "ecpf.h" 49 #include "en/mod_hdr.h" 50 51 enum { 52 MLX5_ACTION_NONE = 0, 53 MLX5_ACTION_ADD = 1, 54 MLX5_ACTION_DEL = 2, 55 }; 56 57 /* Vport UC/MC hash node */ 58 struct vport_addr { 59 struct l2addr_node node; 60 u8 action; 61 u16 vport; 62 struct mlx5_flow_handle *flow_rule; 63 bool mpfs; /* UC MAC was added to MPFs */ 64 /* A flag indicating that mac was added due to mc promiscuous vport */ 65 bool mc_promisc; 66 }; 67 68 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) 69 { 70 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 71 return -EOPNOTSUPP; 72 73 if (!MLX5_ESWITCH_MANAGER(dev)) 74 return -EOPNOTSUPP; 75 76 return 0; 77 } 78 79 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink) 80 { 81 struct mlx5_core_dev *dev = devlink_priv(devlink); 82 int err; 83 84 err = mlx5_eswitch_check(dev); 85 if (err) 86 return ERR_PTR(err); 87 88 return dev->priv.eswitch; 89 } 90 91 struct mlx5_vport *__must_check 92 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) 93 { 94 struct mlx5_vport *vport; 95 96 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) 97 return ERR_PTR(-EPERM); 98 99 vport = xa_load(&esw->vports, vport_num); 100 if (!vport) { 101 esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num); 102 return ERR_PTR(-EINVAL); 103 } 104 return vport; 105 } 106 107 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, 108 u32 events_mask) 109 { 110 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {}; 111 void *nic_vport_ctx; 112 113 MLX5_SET(modify_nic_vport_context_in, in, 114 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 115 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); 116 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 117 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); 118 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 119 in, nic_vport_context); 120 121 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); 122 123 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE) 124 MLX5_SET(nic_vport_context, nic_vport_ctx, 125 event_on_uc_address_change, 1); 126 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE) 127 MLX5_SET(nic_vport_context, nic_vport_ctx, 128 event_on_mc_address_change, 1); 129 if (events_mask & MLX5_VPORT_PROMISC_CHANGE) 130 MLX5_SET(nic_vport_context, nic_vport_ctx, 131 event_on_promisc_change, 1); 132 133 return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in); 134 } 135 136 /* E-Switch vport context HW commands */ 137 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 138 bool other_vport, void *in) 139 { 140 MLX5_SET(modify_esw_vport_context_in, in, opcode, 141 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); 142 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); 143 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport); 144 return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in); 145 } 146 147 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, 148 u16 vlan, u8 qos, u8 set_flags) 149 { 150 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; 151 152 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || 153 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) 154 return -EOPNOTSUPP; 155 156 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", 157 vport, vlan, qos, set_flags); 158 159 if (set_flags & SET_VLAN_STRIP) 160 MLX5_SET(modify_esw_vport_context_in, in, 161 esw_vport_context.vport_cvlan_strip, 1); 162 163 if (set_flags & SET_VLAN_INSERT) { 164 /* insert only if no vlan in packet */ 165 MLX5_SET(modify_esw_vport_context_in, in, 166 esw_vport_context.vport_cvlan_insert, 1); 167 168 MLX5_SET(modify_esw_vport_context_in, in, 169 esw_vport_context.cvlan_pcp, qos); 170 MLX5_SET(modify_esw_vport_context_in, in, 171 esw_vport_context.cvlan_id, vlan); 172 } 173 174 MLX5_SET(modify_esw_vport_context_in, in, 175 field_select.vport_cvlan_strip, 1); 176 MLX5_SET(modify_esw_vport_context_in, in, 177 field_select.vport_cvlan_insert, 1); 178 179 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in); 180 } 181 182 /* E-Switch FDB */ 183 static struct mlx5_flow_handle * 184 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule, 185 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) 186 { 187 int match_header = (is_zero_ether_addr(mac_c) ? 0 : 188 MLX5_MATCH_OUTER_HEADERS); 189 struct mlx5_flow_handle *flow_rule = NULL; 190 struct mlx5_flow_act flow_act = {0}; 191 struct mlx5_flow_destination dest = {}; 192 struct mlx5_flow_spec *spec; 193 void *mv_misc = NULL; 194 void *mc_misc = NULL; 195 u8 *dmac_v = NULL; 196 u8 *dmac_c = NULL; 197 198 if (rx_rule) 199 match_header |= MLX5_MATCH_MISC_PARAMETERS; 200 201 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 202 if (!spec) 203 return NULL; 204 205 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 206 outer_headers.dmac_47_16); 207 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 208 outer_headers.dmac_47_16); 209 210 if (match_header & MLX5_MATCH_OUTER_HEADERS) { 211 ether_addr_copy(dmac_v, mac_v); 212 ether_addr_copy(dmac_c, mac_c); 213 } 214 215 if (match_header & MLX5_MATCH_MISC_PARAMETERS) { 216 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 217 misc_parameters); 218 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 219 misc_parameters); 220 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK); 221 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); 222 } 223 224 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 225 dest.vport.num = vport; 226 227 esw_debug(esw->dev, 228 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", 229 dmac_v, dmac_c, vport); 230 spec->match_criteria_enable = match_header; 231 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 232 flow_rule = 233 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec, 234 &flow_act, &dest, 1); 235 if (IS_ERR(flow_rule)) { 236 esw_warn(esw->dev, 237 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", 238 dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); 239 flow_rule = NULL; 240 } 241 242 kvfree(spec); 243 return flow_rule; 244 } 245 246 static struct mlx5_flow_handle * 247 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport) 248 { 249 u8 mac_c[ETH_ALEN]; 250 251 eth_broadcast_addr(mac_c); 252 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac); 253 } 254 255 static struct mlx5_flow_handle * 256 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport) 257 { 258 u8 mac_c[ETH_ALEN]; 259 u8 mac_v[ETH_ALEN]; 260 261 eth_zero_addr(mac_c); 262 eth_zero_addr(mac_v); 263 mac_c[0] = 0x01; 264 mac_v[0] = 0x01; 265 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v); 266 } 267 268 static struct mlx5_flow_handle * 269 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport) 270 { 271 u8 mac_c[ETH_ALEN]; 272 u8 mac_v[ETH_ALEN]; 273 274 eth_zero_addr(mac_c); 275 eth_zero_addr(mac_v); 276 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); 277 } 278 279 /* E-Switch vport UC/MC lists management */ 280 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, 281 struct vport_addr *vaddr); 282 283 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 284 { 285 u8 *mac = vaddr->node.addr; 286 u16 vport = vaddr->vport; 287 int err; 288 289 /* Skip mlx5_mpfs_add_mac for eswitch_managers, 290 * it is already done by its netdev in mlx5e_execute_l2_action 291 */ 292 if (mlx5_esw_is_manager_vport(esw, vport)) 293 goto fdb_add; 294 295 err = mlx5_mpfs_add_mac(esw->dev, mac); 296 if (err) { 297 esw_warn(esw->dev, 298 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n", 299 mac, vport, err); 300 return err; 301 } 302 vaddr->mpfs = true; 303 304 fdb_add: 305 /* SRIOV is enabled: Forward UC MAC to vport */ 306 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) 307 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 308 309 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", 310 vport, mac, vaddr->flow_rule); 311 312 return 0; 313 } 314 315 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 316 { 317 u8 *mac = vaddr->node.addr; 318 u16 vport = vaddr->vport; 319 int err = 0; 320 321 /* Skip mlx5_mpfs_del_mac for eswitch managers, 322 * it is already done by its netdev in mlx5e_execute_l2_action 323 */ 324 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport)) 325 goto fdb_del; 326 327 err = mlx5_mpfs_del_mac(esw->dev, mac); 328 if (err) 329 esw_warn(esw->dev, 330 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n", 331 mac, vport, err); 332 vaddr->mpfs = false; 333 334 fdb_del: 335 if (vaddr->flow_rule) 336 mlx5_del_flow_rules(vaddr->flow_rule); 337 vaddr->flow_rule = NULL; 338 339 return 0; 340 } 341 342 static void update_allmulti_vports(struct mlx5_eswitch *esw, 343 struct vport_addr *vaddr, 344 struct esw_mc_addr *esw_mc) 345 { 346 u8 *mac = vaddr->node.addr; 347 struct mlx5_vport *vport; 348 unsigned long i; 349 u16 vport_num; 350 351 mlx5_esw_for_each_vport(esw, i, vport) { 352 struct hlist_head *vport_hash = vport->mc_list; 353 struct vport_addr *iter_vaddr = 354 l2addr_hash_find(vport_hash, 355 mac, 356 struct vport_addr); 357 vport_num = vport->vport; 358 if (IS_ERR_OR_NULL(vport->allmulti_rule) || 359 vaddr->vport == vport_num) 360 continue; 361 switch (vaddr->action) { 362 case MLX5_ACTION_ADD: 363 if (iter_vaddr) 364 continue; 365 iter_vaddr = l2addr_hash_add(vport_hash, mac, 366 struct vport_addr, 367 GFP_KERNEL); 368 if (!iter_vaddr) { 369 esw_warn(esw->dev, 370 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n", 371 mac, vport_num); 372 continue; 373 } 374 iter_vaddr->vport = vport_num; 375 iter_vaddr->flow_rule = 376 esw_fdb_set_vport_rule(esw, 377 mac, 378 vport_num); 379 iter_vaddr->mc_promisc = true; 380 break; 381 case MLX5_ACTION_DEL: 382 if (!iter_vaddr) 383 continue; 384 mlx5_del_flow_rules(iter_vaddr->flow_rule); 385 l2addr_hash_del(iter_vaddr); 386 break; 387 } 388 } 389 } 390 391 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 392 { 393 struct hlist_head *hash = esw->mc_table; 394 struct esw_mc_addr *esw_mc; 395 u8 *mac = vaddr->node.addr; 396 u16 vport = vaddr->vport; 397 398 if (!esw->fdb_table.legacy.fdb) 399 return 0; 400 401 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); 402 if (esw_mc) 403 goto add; 404 405 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL); 406 if (!esw_mc) 407 return -ENOMEM; 408 409 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ 410 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK); 411 412 /* Add this multicast mac to all the mc promiscuous vports */ 413 update_allmulti_vports(esw, vaddr, esw_mc); 414 415 add: 416 /* If the multicast mac is added as a result of mc promiscuous vport, 417 * don't increment the multicast ref count 418 */ 419 if (!vaddr->mc_promisc) 420 esw_mc->refcnt++; 421 422 /* Forward MC MAC to vport */ 423 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 424 esw_debug(esw->dev, 425 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", 426 vport, mac, vaddr->flow_rule, 427 esw_mc->refcnt, esw_mc->uplink_rule); 428 return 0; 429 } 430 431 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 432 { 433 struct hlist_head *hash = esw->mc_table; 434 struct esw_mc_addr *esw_mc; 435 u8 *mac = vaddr->node.addr; 436 u16 vport = vaddr->vport; 437 438 if (!esw->fdb_table.legacy.fdb) 439 return 0; 440 441 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); 442 if (!esw_mc) { 443 esw_warn(esw->dev, 444 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)", 445 mac, vport); 446 return -EINVAL; 447 } 448 esw_debug(esw->dev, 449 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", 450 vport, mac, vaddr->flow_rule, esw_mc->refcnt, 451 esw_mc->uplink_rule); 452 453 if (vaddr->flow_rule) 454 mlx5_del_flow_rules(vaddr->flow_rule); 455 vaddr->flow_rule = NULL; 456 457 /* If the multicast mac is added as a result of mc promiscuous vport, 458 * don't decrement the multicast ref count. 459 */ 460 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0)) 461 return 0; 462 463 /* Remove this multicast mac from all the mc promiscuous vports */ 464 update_allmulti_vports(esw, vaddr, esw_mc); 465 466 if (esw_mc->uplink_rule) 467 mlx5_del_flow_rules(esw_mc->uplink_rule); 468 469 l2addr_hash_del(esw_mc); 470 return 0; 471 } 472 473 /* Apply vport UC/MC list to HW l2 table and FDB table */ 474 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, 475 struct mlx5_vport *vport, int list_type) 476 { 477 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; 478 vport_addr_action vport_addr_add; 479 vport_addr_action vport_addr_del; 480 struct vport_addr *addr; 481 struct l2addr_node *node; 482 struct hlist_head *hash; 483 struct hlist_node *tmp; 484 int hi; 485 486 vport_addr_add = is_uc ? esw_add_uc_addr : 487 esw_add_mc_addr; 488 vport_addr_del = is_uc ? esw_del_uc_addr : 489 esw_del_mc_addr; 490 491 hash = is_uc ? vport->uc_list : vport->mc_list; 492 for_each_l2hash_node(node, tmp, hash, hi) { 493 addr = container_of(node, struct vport_addr, node); 494 switch (addr->action) { 495 case MLX5_ACTION_ADD: 496 vport_addr_add(esw, addr); 497 addr->action = MLX5_ACTION_NONE; 498 break; 499 case MLX5_ACTION_DEL: 500 vport_addr_del(esw, addr); 501 l2addr_hash_del(addr); 502 break; 503 } 504 } 505 } 506 507 /* Sync vport UC/MC list from vport context */ 508 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, 509 struct mlx5_vport *vport, int list_type) 510 { 511 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; 512 u8 (*mac_list)[ETH_ALEN]; 513 struct l2addr_node *node; 514 struct vport_addr *addr; 515 struct hlist_head *hash; 516 struct hlist_node *tmp; 517 int size; 518 int err; 519 int hi; 520 int i; 521 522 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) : 523 MLX5_MAX_MC_PER_VPORT(esw->dev); 524 525 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL); 526 if (!mac_list) 527 return; 528 529 hash = is_uc ? vport->uc_list : vport->mc_list; 530 531 for_each_l2hash_node(node, tmp, hash, hi) { 532 addr = container_of(node, struct vport_addr, node); 533 addr->action = MLX5_ACTION_DEL; 534 } 535 536 if (!vport->enabled) 537 goto out; 538 539 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type, 540 mac_list, &size); 541 if (err) 542 goto out; 543 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n", 544 vport->vport, is_uc ? "UC" : "MC", size); 545 546 for (i = 0; i < size; i++) { 547 if (is_uc && !is_valid_ether_addr(mac_list[i])) 548 continue; 549 550 if (!is_uc && !is_multicast_ether_addr(mac_list[i])) 551 continue; 552 553 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr); 554 if (addr) { 555 addr->action = MLX5_ACTION_NONE; 556 /* If this mac was previously added because of allmulti 557 * promiscuous rx mode, its now converted to be original 558 * vport mac. 559 */ 560 if (addr->mc_promisc) { 561 struct esw_mc_addr *esw_mc = 562 l2addr_hash_find(esw->mc_table, 563 mac_list[i], 564 struct esw_mc_addr); 565 if (!esw_mc) { 566 esw_warn(esw->dev, 567 "Failed to MAC(%pM) in mcast DB\n", 568 mac_list[i]); 569 continue; 570 } 571 esw_mc->refcnt++; 572 addr->mc_promisc = false; 573 } 574 continue; 575 } 576 577 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr, 578 GFP_KERNEL); 579 if (!addr) { 580 esw_warn(esw->dev, 581 "Failed to add MAC(%pM) to vport[%d] DB\n", 582 mac_list[i], vport->vport); 583 continue; 584 } 585 addr->vport = vport->vport; 586 addr->action = MLX5_ACTION_ADD; 587 } 588 out: 589 kfree(mac_list); 590 } 591 592 /* Sync vport UC/MC list from vport context 593 * Must be called after esw_update_vport_addr_list 594 */ 595 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, 596 struct mlx5_vport *vport) 597 { 598 struct l2addr_node *node; 599 struct vport_addr *addr; 600 struct hlist_head *hash; 601 struct hlist_node *tmp; 602 int hi; 603 604 hash = vport->mc_list; 605 606 for_each_l2hash_node(node, tmp, esw->mc_table, hi) { 607 u8 *mac = node->addr; 608 609 addr = l2addr_hash_find(hash, mac, struct vport_addr); 610 if (addr) { 611 if (addr->action == MLX5_ACTION_DEL) 612 addr->action = MLX5_ACTION_NONE; 613 continue; 614 } 615 addr = l2addr_hash_add(hash, mac, struct vport_addr, 616 GFP_KERNEL); 617 if (!addr) { 618 esw_warn(esw->dev, 619 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n", 620 mac, vport->vport); 621 continue; 622 } 623 addr->vport = vport->vport; 624 addr->action = MLX5_ACTION_ADD; 625 addr->mc_promisc = true; 626 } 627 } 628 629 /* Apply vport rx mode to HW FDB table */ 630 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, 631 struct mlx5_vport *vport, 632 bool promisc, bool mc_promisc) 633 { 634 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc; 635 636 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc) 637 goto promisc; 638 639 if (mc_promisc) { 640 vport->allmulti_rule = 641 esw_fdb_set_vport_allmulti_rule(esw, vport->vport); 642 if (!allmulti_addr->uplink_rule) 643 allmulti_addr->uplink_rule = 644 esw_fdb_set_vport_allmulti_rule(esw, 645 MLX5_VPORT_UPLINK); 646 allmulti_addr->refcnt++; 647 } else if (vport->allmulti_rule) { 648 mlx5_del_flow_rules(vport->allmulti_rule); 649 vport->allmulti_rule = NULL; 650 651 if (--allmulti_addr->refcnt > 0) 652 goto promisc; 653 654 if (allmulti_addr->uplink_rule) 655 mlx5_del_flow_rules(allmulti_addr->uplink_rule); 656 allmulti_addr->uplink_rule = NULL; 657 } 658 659 promisc: 660 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc) 661 return; 662 663 if (promisc) { 664 vport->promisc_rule = 665 esw_fdb_set_vport_promisc_rule(esw, vport->vport); 666 } else if (vport->promisc_rule) { 667 mlx5_del_flow_rules(vport->promisc_rule); 668 vport->promisc_rule = NULL; 669 } 670 } 671 672 /* Sync vport rx mode from vport context */ 673 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, 674 struct mlx5_vport *vport) 675 { 676 int promisc_all = 0; 677 int promisc_uc = 0; 678 int promisc_mc = 0; 679 int err; 680 681 err = mlx5_query_nic_vport_promisc(esw->dev, 682 vport->vport, 683 &promisc_uc, 684 &promisc_mc, 685 &promisc_all); 686 if (err) 687 return; 688 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n", 689 vport->vport, promisc_all, promisc_mc); 690 691 if (!vport->info.trusted || !vport->enabled) { 692 promisc_uc = 0; 693 promisc_mc = 0; 694 promisc_all = 0; 695 } 696 697 esw_apply_vport_rx_mode(esw, vport, promisc_all, 698 (promisc_all || promisc_mc)); 699 } 700 701 void esw_vport_change_handle_locked(struct mlx5_vport *vport) 702 { 703 struct mlx5_core_dev *dev = vport->dev; 704 struct mlx5_eswitch *esw = dev->priv.eswitch; 705 u8 mac[ETH_ALEN]; 706 707 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac); 708 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", 709 vport->vport, mac); 710 711 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) { 712 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); 713 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); 714 } 715 716 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE) 717 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); 718 719 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) { 720 esw_update_vport_rx_mode(esw, vport); 721 if (!IS_ERR_OR_NULL(vport->allmulti_rule)) 722 esw_update_vport_mc_promisc(esw, vport); 723 } 724 725 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE)) 726 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); 727 728 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); 729 if (vport->enabled) 730 arm_vport_context_events_cmd(dev, vport->vport, 731 vport->enabled_events); 732 } 733 734 static void esw_vport_change_handler(struct work_struct *work) 735 { 736 struct mlx5_vport *vport = 737 container_of(work, struct mlx5_vport, vport_change_handler); 738 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; 739 740 mutex_lock(&esw->state_lock); 741 esw_vport_change_handle_locked(vport); 742 mutex_unlock(&esw->state_lock); 743 } 744 745 static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) 746 { 747 ((u8 *)node_guid)[7] = mac[0]; 748 ((u8 *)node_guid)[6] = mac[1]; 749 ((u8 *)node_guid)[5] = mac[2]; 750 ((u8 *)node_guid)[4] = 0xff; 751 ((u8 *)node_guid)[3] = 0xfe; 752 ((u8 *)node_guid)[2] = mac[3]; 753 ((u8 *)node_guid)[1] = mac[4]; 754 ((u8 *)node_guid)[0] = mac[5]; 755 } 756 757 static int esw_vport_setup_acl(struct mlx5_eswitch *esw, 758 struct mlx5_vport *vport) 759 { 760 if (esw->mode == MLX5_ESWITCH_LEGACY) 761 return esw_legacy_vport_acl_setup(esw, vport); 762 else 763 return esw_vport_create_offloads_acl_tables(esw, vport); 764 } 765 766 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, 767 struct mlx5_vport *vport) 768 { 769 if (esw->mode == MLX5_ESWITCH_LEGACY) 770 esw_legacy_vport_acl_cleanup(esw, vport); 771 else 772 esw_vport_destroy_offloads_acl_tables(esw, vport); 773 } 774 775 static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 776 { 777 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 778 void *query_ctx; 779 void *hca_caps; 780 int err; 781 782 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 783 return 0; 784 785 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 786 if (!query_ctx) 787 return -ENOMEM; 788 789 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx, 790 MLX5_CAP_GENERAL); 791 if (err) 792 goto out_free; 793 794 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 795 vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce); 796 797 memset(query_ctx, 0, query_out_sz); 798 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx, 799 MLX5_CAP_GENERAL_2); 800 if (err) 801 goto out_free; 802 803 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 804 vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable); 805 out_free: 806 kfree(query_ctx); 807 return err; 808 } 809 810 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 811 { 812 u16 vport_num = vport->vport; 813 int flags; 814 int err; 815 816 err = esw_vport_setup_acl(esw, vport); 817 if (err) 818 return err; 819 820 if (mlx5_esw_is_manager_vport(esw, vport_num)) 821 return 0; 822 823 err = mlx5_esw_vport_caps_get(esw, vport); 824 if (err) 825 goto err_caps; 826 827 mlx5_modify_vport_admin_state(esw->dev, 828 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 829 vport_num, 1, 830 vport->info.link_state); 831 832 /* Host PF has its own mac/guid. */ 833 if (vport_num) { 834 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, 835 vport->info.mac); 836 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, 837 vport->info.node_guid); 838 } 839 840 flags = (vport->info.vlan || vport->info.qos) ? 841 SET_VLAN_STRIP | SET_VLAN_INSERT : 0; 842 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, 843 vport->info.qos, flags); 844 845 return 0; 846 847 err_caps: 848 esw_vport_cleanup_acl(esw, vport); 849 return err; 850 } 851 852 /* Don't cleanup vport->info, it's needed to restore vport configuration */ 853 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 854 { 855 u16 vport_num = vport->vport; 856 857 if (!mlx5_esw_is_manager_vport(esw, vport_num)) 858 mlx5_modify_vport_admin_state(esw->dev, 859 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 860 vport_num, 1, 861 MLX5_VPORT_ADMIN_STATE_DOWN); 862 863 mlx5_esw_qos_vport_disable(esw, vport); 864 esw_vport_cleanup_acl(esw, vport); 865 } 866 867 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, 868 enum mlx5_eswitch_vport_event enabled_events) 869 { 870 struct mlx5_vport *vport; 871 int ret; 872 873 vport = mlx5_eswitch_get_vport(esw, vport_num); 874 if (IS_ERR(vport)) 875 return PTR_ERR(vport); 876 877 mutex_lock(&esw->state_lock); 878 WARN_ON(vport->enabled); 879 880 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); 881 882 ret = esw_vport_setup(esw, vport); 883 if (ret) 884 goto done; 885 886 /* Sync with current vport context */ 887 vport->enabled_events = enabled_events; 888 vport->enabled = true; 889 890 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well 891 * in smartNIC as it's a vport group manager. 892 */ 893 if (mlx5_esw_is_manager_vport(esw, vport_num) || 894 (!vport_num && mlx5_core_is_ecpf(esw->dev))) 895 vport->info.trusted = true; 896 897 if (!mlx5_esw_is_manager_vport(esw, vport->vport) && 898 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { 899 ret = mlx5_esw_vport_vhca_id_set(esw, vport_num); 900 if (ret) 901 goto err_vhca_mapping; 902 } 903 904 /* External controller host PF has factory programmed MAC. 905 * Read it from the device. 906 */ 907 if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) 908 mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac); 909 910 esw_vport_change_handle_locked(vport); 911 912 esw->enabled_vports++; 913 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); 914 done: 915 mutex_unlock(&esw->state_lock); 916 return ret; 917 918 err_vhca_mapping: 919 esw_vport_cleanup(esw, vport); 920 mutex_unlock(&esw->state_lock); 921 return ret; 922 } 923 924 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) 925 { 926 struct mlx5_vport *vport; 927 928 vport = mlx5_eswitch_get_vport(esw, vport_num); 929 if (IS_ERR(vport)) 930 return; 931 932 mutex_lock(&esw->state_lock); 933 if (!vport->enabled) 934 goto done; 935 936 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); 937 /* Mark this vport as disabled to discard new events */ 938 vport->enabled = false; 939 940 /* Disable events from this vport */ 941 arm_vport_context_events_cmd(esw->dev, vport->vport, 0); 942 943 if (!mlx5_esw_is_manager_vport(esw, vport->vport) && 944 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 945 mlx5_esw_vport_vhca_id_clear(esw, vport_num); 946 947 /* We don't assume VFs will cleanup after themselves. 948 * Calling vport change handler while vport is disabled will cleanup 949 * the vport resources. 950 */ 951 esw_vport_change_handle_locked(vport); 952 vport->enabled_events = 0; 953 esw_vport_cleanup(esw, vport); 954 esw->enabled_vports--; 955 956 done: 957 mutex_unlock(&esw->state_lock); 958 } 959 960 static int eswitch_vport_event(struct notifier_block *nb, 961 unsigned long type, void *data) 962 { 963 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb); 964 struct mlx5_eqe *eqe = data; 965 struct mlx5_vport *vport; 966 u16 vport_num; 967 968 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); 969 vport = mlx5_eswitch_get_vport(esw, vport_num); 970 if (!IS_ERR(vport)) 971 queue_work(esw->work_queue, &vport->vport_change_handler); 972 return NOTIFY_OK; 973 } 974 975 /** 976 * mlx5_esw_query_functions - Returns raw output about functions state 977 * @dev: Pointer to device to query 978 * 979 * mlx5_esw_query_functions() allocates and returns functions changed 980 * raw output memory pointer from device on success. Otherwise returns ERR_PTR. 981 * Caller must free the memory using kvfree() when valid pointer is returned. 982 */ 983 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 984 { 985 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out); 986 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {}; 987 u32 *out; 988 int err; 989 990 out = kvzalloc(outlen, GFP_KERNEL); 991 if (!out) 992 return ERR_PTR(-ENOMEM); 993 994 MLX5_SET(query_esw_functions_in, in, opcode, 995 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS); 996 997 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 998 if (!err) 999 return out; 1000 1001 kvfree(out); 1002 return ERR_PTR(err); 1003 } 1004 1005 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw) 1006 { 1007 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); 1008 mlx5_eq_notifier_register(esw->dev, &esw->nb); 1009 1010 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) { 1011 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler, 1012 ESW_FUNCTIONS_CHANGED); 1013 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb); 1014 } 1015 } 1016 1017 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) 1018 { 1019 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) 1020 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); 1021 1022 mlx5_eq_notifier_unregister(esw->dev, &esw->nb); 1023 1024 flush_workqueue(esw->work_queue); 1025 } 1026 1027 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) 1028 { 1029 struct mlx5_vport *vport; 1030 unsigned long i; 1031 1032 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 1033 memset(&vport->qos, 0, sizeof(vport->qos)); 1034 memset(&vport->info, 0, sizeof(vport->info)); 1035 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 1036 } 1037 } 1038 1039 /* Public E-Switch API */ 1040 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, 1041 enum mlx5_eswitch_vport_event enabled_events) 1042 { 1043 int err; 1044 1045 err = mlx5_esw_vport_enable(esw, vport_num, enabled_events); 1046 if (err) 1047 return err; 1048 1049 mlx5_esw_vport_debugfs_create(esw, vport_num, false, 0); 1050 err = esw_offloads_load_rep(esw, vport_num); 1051 if (err) 1052 goto err_rep; 1053 1054 return err; 1055 1056 err_rep: 1057 mlx5_esw_vport_debugfs_destroy(esw, vport_num); 1058 mlx5_esw_vport_disable(esw, vport_num); 1059 return err; 1060 } 1061 1062 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num) 1063 { 1064 esw_offloads_unload_rep(esw, vport_num); 1065 mlx5_esw_vport_debugfs_destroy(esw, vport_num); 1066 mlx5_esw_vport_disable(esw, vport_num); 1067 } 1068 1069 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs) 1070 { 1071 struct mlx5_vport *vport; 1072 unsigned long i; 1073 1074 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { 1075 if (!vport->enabled) 1076 continue; 1077 mlx5_eswitch_unload_vport(esw, vport->vport); 1078 } 1079 } 1080 1081 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, 1082 enum mlx5_eswitch_vport_event enabled_events) 1083 { 1084 struct mlx5_vport *vport; 1085 unsigned long i; 1086 int err; 1087 1088 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { 1089 err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events); 1090 if (err) 1091 goto vf_err; 1092 } 1093 1094 return 0; 1095 1096 vf_err: 1097 mlx5_eswitch_unload_vf_vports(esw, num_vfs); 1098 return err; 1099 } 1100 1101 static int host_pf_enable_hca(struct mlx5_core_dev *dev) 1102 { 1103 if (!mlx5_core_is_ecpf(dev)) 1104 return 0; 1105 1106 /* Once vport and representor are ready, take out the external host PF 1107 * out of initializing state. Enabling HCA clears the iser->initializing 1108 * bit and host PF driver loading can progress. 1109 */ 1110 return mlx5_cmd_host_pf_enable_hca(dev); 1111 } 1112 1113 static void host_pf_disable_hca(struct mlx5_core_dev *dev) 1114 { 1115 if (!mlx5_core_is_ecpf(dev)) 1116 return; 1117 1118 mlx5_cmd_host_pf_disable_hca(dev); 1119 } 1120 1121 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs 1122 * whichever are present on the eswitch. 1123 */ 1124 int 1125 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 1126 enum mlx5_eswitch_vport_event enabled_events) 1127 { 1128 int ret; 1129 1130 /* Enable PF vport */ 1131 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events); 1132 if (ret) 1133 return ret; 1134 1135 /* Enable external host PF HCA */ 1136 ret = host_pf_enable_hca(esw->dev); 1137 if (ret) 1138 goto pf_hca_err; 1139 1140 /* Enable ECPF vport */ 1141 if (mlx5_ecpf_vport_exists(esw->dev)) { 1142 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events); 1143 if (ret) 1144 goto ecpf_err; 1145 } 1146 1147 /* Enable VF vports */ 1148 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs, 1149 enabled_events); 1150 if (ret) 1151 goto vf_err; 1152 return 0; 1153 1154 vf_err: 1155 if (mlx5_ecpf_vport_exists(esw->dev)) 1156 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); 1157 ecpf_err: 1158 host_pf_disable_hca(esw->dev); 1159 pf_hca_err: 1160 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); 1161 return ret; 1162 } 1163 1164 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs 1165 * whichever are previously enabled on the eswitch. 1166 */ 1167 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) 1168 { 1169 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 1170 1171 if (mlx5_ecpf_vport_exists(esw->dev)) 1172 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); 1173 1174 host_pf_disable_hca(esw->dev); 1175 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); 1176 } 1177 1178 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) 1179 { 1180 struct devlink *devlink = priv_to_devlink(esw->dev); 1181 union devlink_param_value val; 1182 int err; 1183 1184 err = devlink_param_driverinit_value_get(devlink, 1185 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, 1186 &val); 1187 if (!err) { 1188 esw->params.large_group_num = val.vu32; 1189 } else { 1190 esw_warn(esw->dev, 1191 "Devlink can't get param fdb_large_groups, uses default (%d).\n", 1192 ESW_OFFLOADS_DEFAULT_NUM_GROUPS); 1193 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS; 1194 } 1195 } 1196 1197 static void 1198 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) 1199 { 1200 const u32 *out; 1201 1202 if (num_vfs < 0) 1203 return; 1204 1205 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1206 esw->esw_funcs.num_vfs = num_vfs; 1207 return; 1208 } 1209 1210 out = mlx5_esw_query_functions(esw->dev); 1211 if (IS_ERR(out)) 1212 return; 1213 1214 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out, 1215 host_params_context.host_num_of_vfs); 1216 kvfree(out); 1217 } 1218 1219 static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode) 1220 { 1221 struct mlx5_esw_event_info info = {}; 1222 1223 info.new_mode = mode; 1224 1225 blocking_notifier_call_chain(&esw->n_head, 0, &info); 1226 } 1227 1228 static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw) 1229 { 1230 struct mlx5_core_dev *dev = esw->dev; 1231 int total_vports; 1232 int err; 1233 1234 if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED) 1235 return 0; 1236 1237 total_vports = mlx5_eswitch_get_total_vports(dev); 1238 1239 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { 1240 err = mlx5_fs_egress_acls_init(dev, total_vports); 1241 if (err) 1242 return err; 1243 } else { 1244 esw_warn(dev, "engress ACL is not supported by FW\n"); 1245 } 1246 1247 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { 1248 err = mlx5_fs_ingress_acls_init(dev, total_vports); 1249 if (err) 1250 goto err; 1251 } else { 1252 esw_warn(dev, "ingress ACL is not supported by FW\n"); 1253 } 1254 esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED; 1255 return 0; 1256 1257 err: 1258 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) 1259 mlx5_fs_egress_acls_cleanup(dev); 1260 return err; 1261 } 1262 1263 static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw) 1264 { 1265 struct mlx5_core_dev *dev = esw->dev; 1266 1267 esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED; 1268 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) 1269 mlx5_fs_ingress_acls_cleanup(dev); 1270 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) 1271 mlx5_fs_egress_acls_cleanup(dev); 1272 } 1273 1274 /** 1275 * mlx5_eswitch_enable_locked - Enable eswitch 1276 * @esw: Pointer to eswitch 1277 * @num_vfs: Enable eswitch for given number of VFs. This is optional. 1278 * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS. 1279 * Caller should pass num_vfs > 0 when enabling eswitch for 1280 * vf vports. Caller should pass num_vfs = 0, when eswitch 1281 * is enabled without sriov VFs or when caller 1282 * is unaware of the sriov state of the host PF on ECPF based 1283 * eswitch. Caller should pass < 0 when num_vfs should be 1284 * completely ignored. This is typically the case when eswitch 1285 * is enabled without sriov regardless of PF/ECPF system. 1286 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads 1287 * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports. 1288 * It returns 0 on success or error code on failure. 1289 */ 1290 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) 1291 { 1292 int err; 1293 1294 lockdep_assert_held(&esw->mode_lock); 1295 1296 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1297 esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); 1298 return -EOPNOTSUPP; 1299 } 1300 1301 mlx5_eswitch_get_devlink_param(esw); 1302 1303 err = mlx5_esw_acls_ns_init(esw); 1304 if (err) 1305 return err; 1306 1307 mlx5_eswitch_update_num_of_vfs(esw, num_vfs); 1308 1309 if (esw->mode == MLX5_ESWITCH_LEGACY) { 1310 err = esw_legacy_enable(esw); 1311 } else { 1312 mlx5_rescan_drivers(esw->dev); 1313 err = esw_offloads_enable(esw); 1314 } 1315 1316 if (err) 1317 goto abort; 1318 1319 esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED; 1320 1321 mlx5_eswitch_event_handlers_register(esw); 1322 1323 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", 1324 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1325 esw->esw_funcs.num_vfs, esw->enabled_vports); 1326 1327 mlx5_esw_mode_change_notify(esw, esw->mode); 1328 1329 return 0; 1330 1331 abort: 1332 mlx5_esw_acls_ns_cleanup(esw); 1333 return err; 1334 } 1335 1336 /** 1337 * mlx5_eswitch_enable - Enable eswitch 1338 * @esw: Pointer to eswitch 1339 * @num_vfs: Enable eswitch switch for given number of VFs. 1340 * Caller must pass num_vfs > 0 when enabling eswitch for 1341 * vf vports. 1342 * mlx5_eswitch_enable() returns 0 on success or error code on failure. 1343 */ 1344 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) 1345 { 1346 bool toggle_lag; 1347 int ret; 1348 1349 if (!mlx5_esw_allowed(esw)) 1350 return 0; 1351 1352 devl_assert_locked(priv_to_devlink(esw->dev)); 1353 1354 toggle_lag = !mlx5_esw_is_fdb_created(esw); 1355 1356 if (toggle_lag) 1357 mlx5_lag_disable_change(esw->dev); 1358 1359 down_write(&esw->mode_lock); 1360 if (!mlx5_esw_is_fdb_created(esw)) { 1361 ret = mlx5_eswitch_enable_locked(esw, num_vfs); 1362 } else { 1363 enum mlx5_eswitch_vport_event vport_events; 1364 1365 vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ? 1366 MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE; 1367 ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events); 1368 if (!ret) 1369 esw->esw_funcs.num_vfs = num_vfs; 1370 } 1371 up_write(&esw->mode_lock); 1372 1373 if (toggle_lag) 1374 mlx5_lag_enable_change(esw->dev); 1375 1376 return ret; 1377 } 1378 1379 /* When disabling sriov, free driver level resources. */ 1380 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) 1381 { 1382 if (!mlx5_esw_allowed(esw)) 1383 return; 1384 1385 devl_assert_locked(priv_to_devlink(esw->dev)); 1386 down_write(&esw->mode_lock); 1387 /* If driver is unloaded, this function is called twice by remove_one() 1388 * and mlx5_unload(). Prevent the second call. 1389 */ 1390 if (!esw->esw_funcs.num_vfs && !clear_vf) 1391 goto unlock; 1392 1393 esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n", 1394 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1395 esw->esw_funcs.num_vfs, esw->enabled_vports); 1396 1397 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 1398 if (clear_vf) 1399 mlx5_eswitch_clear_vf_vports_info(esw); 1400 /* If disabling sriov in switchdev mode, free meta rules here 1401 * because it depends on num_vfs. 1402 */ 1403 if (esw->mode == MLX5_ESWITCH_OFFLOADS) { 1404 struct devlink *devlink = priv_to_devlink(esw->dev); 1405 1406 devl_rate_nodes_destroy(devlink); 1407 } 1408 /* Destroy legacy fdb when disabling sriov in legacy mode. */ 1409 if (esw->mode == MLX5_ESWITCH_LEGACY) 1410 mlx5_eswitch_disable_locked(esw); 1411 1412 esw->esw_funcs.num_vfs = 0; 1413 1414 unlock: 1415 up_write(&esw->mode_lock); 1416 } 1417 1418 /* Free resources for corresponding eswitch mode. It is called by devlink 1419 * when changing eswitch mode or modprobe when unloading driver. 1420 */ 1421 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw) 1422 { 1423 struct devlink *devlink = priv_to_devlink(esw->dev); 1424 1425 /* Notify eswitch users that it is exiting from current mode. 1426 * So that it can do necessary cleanup before the eswitch is disabled. 1427 */ 1428 mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY); 1429 1430 mlx5_eswitch_event_handlers_unregister(esw); 1431 1432 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", 1433 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1434 esw->esw_funcs.num_vfs, esw->enabled_vports); 1435 1436 if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) { 1437 esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED; 1438 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 1439 esw_offloads_disable(esw); 1440 else if (esw->mode == MLX5_ESWITCH_LEGACY) 1441 esw_legacy_disable(esw); 1442 mlx5_esw_acls_ns_cleanup(esw); 1443 } 1444 1445 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 1446 devl_rate_nodes_destroy(devlink); 1447 } 1448 1449 void mlx5_eswitch_disable(struct mlx5_eswitch *esw) 1450 { 1451 if (!mlx5_esw_allowed(esw)) 1452 return; 1453 1454 devl_assert_locked(priv_to_devlink(esw->dev)); 1455 mlx5_lag_disable_change(esw->dev); 1456 down_write(&esw->mode_lock); 1457 mlx5_eswitch_disable_locked(esw); 1458 up_write(&esw->mode_lock); 1459 mlx5_lag_enable_change(esw->dev); 1460 } 1461 1462 static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out) 1463 { 1464 u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01); 1465 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {}; 1466 1467 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 1468 MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 1469 MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF); 1470 MLX5_SET(query_hca_cap_in, in, other_function, true); 1471 return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out); 1472 } 1473 1474 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id) 1475 1476 { 1477 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 1478 void *query_ctx; 1479 void *hca_caps; 1480 int err; 1481 1482 if (!mlx5_core_is_ecpf(dev)) { 1483 *max_sfs = 0; 1484 return 0; 1485 } 1486 1487 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 1488 if (!query_ctx) 1489 return -ENOMEM; 1490 1491 err = mlx5_query_hca_cap_host_pf(dev, query_ctx); 1492 if (err) 1493 goto out_free; 1494 1495 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 1496 *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf); 1497 *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id); 1498 1499 out_free: 1500 kfree(query_ctx); 1501 return err; 1502 } 1503 1504 static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev, 1505 int index, u16 vport_num) 1506 { 1507 struct mlx5_vport *vport; 1508 int err; 1509 1510 vport = kzalloc(sizeof(*vport), GFP_KERNEL); 1511 if (!vport) 1512 return -ENOMEM; 1513 1514 vport->dev = esw->dev; 1515 vport->vport = vport_num; 1516 vport->index = index; 1517 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 1518 INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); 1519 err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL); 1520 if (err) 1521 goto insert_err; 1522 1523 esw->total_vports++; 1524 return 0; 1525 1526 insert_err: 1527 kfree(vport); 1528 return err; 1529 } 1530 1531 static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 1532 { 1533 xa_erase(&esw->vports, vport->vport); 1534 kfree(vport); 1535 } 1536 1537 static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw) 1538 { 1539 struct mlx5_vport *vport; 1540 unsigned long i; 1541 1542 mlx5_esw_for_each_vport(esw, i, vport) 1543 mlx5_esw_vport_free(esw, vport); 1544 xa_destroy(&esw->vports); 1545 } 1546 1547 static int mlx5_esw_vports_init(struct mlx5_eswitch *esw) 1548 { 1549 struct mlx5_core_dev *dev = esw->dev; 1550 u16 max_host_pf_sfs; 1551 u16 base_sf_num; 1552 int idx = 0; 1553 int err; 1554 int i; 1555 1556 xa_init(&esw->vports); 1557 1558 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF); 1559 if (err) 1560 goto err; 1561 if (esw->first_host_vport == MLX5_VPORT_PF) 1562 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); 1563 idx++; 1564 1565 for (i = 0; i < mlx5_core_max_vfs(dev); i++) { 1566 err = mlx5_esw_vport_alloc(esw, dev, idx, idx); 1567 if (err) 1568 goto err; 1569 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF); 1570 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); 1571 idx++; 1572 } 1573 base_sf_num = mlx5_sf_start_function_id(dev); 1574 for (i = 0; i < mlx5_sf_max_functions(dev); i++) { 1575 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i); 1576 if (err) 1577 goto err; 1578 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF); 1579 idx++; 1580 } 1581 1582 err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num); 1583 if (err) 1584 goto err; 1585 for (i = 0; i < max_host_pf_sfs; i++) { 1586 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i); 1587 if (err) 1588 goto err; 1589 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF); 1590 idx++; 1591 } 1592 1593 if (mlx5_ecpf_vport_exists(dev)) { 1594 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF); 1595 if (err) 1596 goto err; 1597 idx++; 1598 } 1599 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK); 1600 if (err) 1601 goto err; 1602 return 0; 1603 1604 err: 1605 mlx5_esw_vports_cleanup(esw); 1606 return err; 1607 } 1608 1609 int mlx5_eswitch_init(struct mlx5_core_dev *dev) 1610 { 1611 struct mlx5_eswitch *esw; 1612 int err; 1613 1614 if (!MLX5_VPORT_MANAGER(dev)) 1615 return 0; 1616 1617 esw = kzalloc(sizeof(*esw), GFP_KERNEL); 1618 if (!esw) 1619 return -ENOMEM; 1620 1621 esw->dev = dev; 1622 esw->manager_vport = mlx5_eswitch_manager_vport(dev); 1623 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev); 1624 1625 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); 1626 if (!esw->work_queue) { 1627 err = -ENOMEM; 1628 goto abort; 1629 } 1630 1631 err = mlx5_esw_vports_init(esw); 1632 if (err) 1633 goto abort; 1634 1635 err = esw_offloads_init_reps(esw); 1636 if (err) 1637 goto reps_err; 1638 1639 mutex_init(&esw->offloads.encap_tbl_lock); 1640 hash_init(esw->offloads.encap_tbl); 1641 mutex_init(&esw->offloads.decap_tbl_lock); 1642 hash_init(esw->offloads.decap_tbl); 1643 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr); 1644 atomic64_set(&esw->offloads.num_flows, 0); 1645 ida_init(&esw->offloads.vport_metadata_ida); 1646 xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); 1647 mutex_init(&esw->state_lock); 1648 init_rwsem(&esw->mode_lock); 1649 refcount_set(&esw->qos.refcnt, 0); 1650 1651 esw->enabled_vports = 0; 1652 esw->mode = MLX5_ESWITCH_LEGACY; 1653 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; 1654 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) && 1655 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) 1656 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; 1657 else 1658 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; 1659 if (MLX5_ESWITCH_MANAGER(dev) && 1660 mlx5_esw_vport_match_metadata_supported(esw)) 1661 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; 1662 1663 dev->priv.eswitch = esw; 1664 BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); 1665 1666 esw->dbgfs = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(esw->dev)); 1667 esw_info(dev, 1668 "Total vports %d, per vport: max uc(%d) max mc(%d)\n", 1669 esw->total_vports, 1670 MLX5_MAX_UC_PER_VPORT(dev), 1671 MLX5_MAX_MC_PER_VPORT(dev)); 1672 return 0; 1673 1674 reps_err: 1675 mlx5_esw_vports_cleanup(esw); 1676 abort: 1677 if (esw->work_queue) 1678 destroy_workqueue(esw->work_queue); 1679 kfree(esw); 1680 return err; 1681 } 1682 1683 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1684 { 1685 if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) 1686 return; 1687 1688 esw_info(esw->dev, "cleanup\n"); 1689 1690 debugfs_remove_recursive(esw->dbgfs); 1691 esw->dev->priv.eswitch = NULL; 1692 destroy_workqueue(esw->work_queue); 1693 WARN_ON(refcount_read(&esw->qos.refcnt)); 1694 mutex_destroy(&esw->state_lock); 1695 WARN_ON(!xa_empty(&esw->offloads.vhca_map)); 1696 xa_destroy(&esw->offloads.vhca_map); 1697 ida_destroy(&esw->offloads.vport_metadata_ida); 1698 mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr); 1699 mutex_destroy(&esw->offloads.encap_tbl_lock); 1700 mutex_destroy(&esw->offloads.decap_tbl_lock); 1701 esw_offloads_cleanup_reps(esw); 1702 mlx5_esw_vports_cleanup(esw); 1703 kfree(esw); 1704 } 1705 1706 /* Vport Administration */ 1707 static int 1708 mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw, 1709 struct mlx5_vport *evport, const u8 *mac) 1710 { 1711 u16 vport_num = evport->vport; 1712 u64 node_guid; 1713 int err = 0; 1714 1715 if (is_multicast_ether_addr(mac)) 1716 return -EINVAL; 1717 1718 if (evport->info.spoofchk && !is_valid_ether_addr(mac)) 1719 mlx5_core_warn(esw->dev, 1720 "Set invalid MAC while spoofchk is on, vport(%d)\n", 1721 vport_num); 1722 1723 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac); 1724 if (err) { 1725 mlx5_core_warn(esw->dev, 1726 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n", 1727 vport_num, err); 1728 return err; 1729 } 1730 1731 node_guid_gen_from_mac(&node_guid, mac); 1732 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid); 1733 if (err) 1734 mlx5_core_warn(esw->dev, 1735 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", 1736 vport_num, err); 1737 1738 ether_addr_copy(evport->info.mac, mac); 1739 evport->info.node_guid = node_guid; 1740 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) 1741 err = esw_acl_ingress_lgcy_setup(esw, evport); 1742 1743 return err; 1744 } 1745 1746 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 1747 u16 vport, const u8 *mac) 1748 { 1749 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1750 int err = 0; 1751 1752 if (IS_ERR(evport)) 1753 return PTR_ERR(evport); 1754 1755 mutex_lock(&esw->state_lock); 1756 err = mlx5_esw_set_vport_mac_locked(esw, evport, mac); 1757 mutex_unlock(&esw->state_lock); 1758 return err; 1759 } 1760 1761 static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark) 1762 { 1763 struct mlx5_vport *vport; 1764 1765 vport = mlx5_eswitch_get_vport(esw, vport_num); 1766 if (IS_ERR(vport)) 1767 return false; 1768 1769 return xa_get_mark(&esw->vports, vport_num, mark); 1770 } 1771 1772 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num) 1773 { 1774 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF); 1775 } 1776 1777 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num) 1778 { 1779 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF); 1780 } 1781 1782 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 1783 u16 vport, int link_state) 1784 { 1785 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1786 int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT; 1787 int other_vport = 1; 1788 int err = 0; 1789 1790 if (!mlx5_esw_allowed(esw)) 1791 return -EPERM; 1792 if (IS_ERR(evport)) 1793 return PTR_ERR(evport); 1794 1795 if (vport == MLX5_VPORT_UPLINK) { 1796 opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK; 1797 other_vport = 0; 1798 vport = 0; 1799 } 1800 mutex_lock(&esw->state_lock); 1801 if (esw->mode != MLX5_ESWITCH_LEGACY) { 1802 err = -EOPNOTSUPP; 1803 goto unlock; 1804 } 1805 1806 err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); 1807 if (err) { 1808 mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d", 1809 vport, opmod, err); 1810 goto unlock; 1811 } 1812 1813 evport->info.link_state = link_state; 1814 1815 unlock: 1816 mutex_unlock(&esw->state_lock); 1817 return err; 1818 } 1819 1820 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 1821 u16 vport, struct ifla_vf_info *ivi) 1822 { 1823 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1824 1825 if (IS_ERR(evport)) 1826 return PTR_ERR(evport); 1827 1828 memset(ivi, 0, sizeof(*ivi)); 1829 ivi->vf = vport - 1; 1830 1831 mutex_lock(&esw->state_lock); 1832 ether_addr_copy(ivi->mac, evport->info.mac); 1833 ivi->linkstate = evport->info.link_state; 1834 ivi->vlan = evport->info.vlan; 1835 ivi->qos = evport->info.qos; 1836 ivi->spoofchk = evport->info.spoofchk; 1837 ivi->trusted = evport->info.trusted; 1838 if (evport->qos.enabled) { 1839 ivi->min_tx_rate = evport->qos.min_rate; 1840 ivi->max_tx_rate = evport->qos.max_rate; 1841 } 1842 mutex_unlock(&esw->state_lock); 1843 1844 return 0; 1845 } 1846 1847 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 1848 u16 vport, u16 vlan, u8 qos, u8 set_flags) 1849 { 1850 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1851 int err = 0; 1852 1853 if (IS_ERR(evport)) 1854 return PTR_ERR(evport); 1855 if (vlan > 4095 || qos > 7) 1856 return -EINVAL; 1857 1858 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); 1859 if (err) 1860 return err; 1861 1862 evport->info.vlan = vlan; 1863 evport->info.qos = qos; 1864 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) { 1865 err = esw_acl_ingress_lgcy_setup(esw, evport); 1866 if (err) 1867 return err; 1868 err = esw_acl_egress_lgcy_setup(esw, evport); 1869 } 1870 1871 return err; 1872 } 1873 1874 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 1875 u16 vport_num, 1876 struct ifla_vf_stats *vf_stats) 1877 { 1878 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 1879 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); 1880 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; 1881 struct mlx5_vport_drop_stats stats = {}; 1882 int err = 0; 1883 u32 *out; 1884 1885 if (IS_ERR(vport)) 1886 return PTR_ERR(vport); 1887 1888 out = kvzalloc(outlen, GFP_KERNEL); 1889 if (!out) 1890 return -ENOMEM; 1891 1892 MLX5_SET(query_vport_counter_in, in, opcode, 1893 MLX5_CMD_OP_QUERY_VPORT_COUNTER); 1894 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 1895 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport); 1896 MLX5_SET(query_vport_counter_in, in, other_vport, 1); 1897 1898 err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out); 1899 if (err) 1900 goto free_out; 1901 1902 #define MLX5_GET_CTR(p, x) \ 1903 MLX5_GET64(query_vport_counter_out, p, x) 1904 1905 memset(vf_stats, 0, sizeof(*vf_stats)); 1906 vf_stats->rx_packets = 1907 MLX5_GET_CTR(out, received_eth_unicast.packets) + 1908 MLX5_GET_CTR(out, received_ib_unicast.packets) + 1909 MLX5_GET_CTR(out, received_eth_multicast.packets) + 1910 MLX5_GET_CTR(out, received_ib_multicast.packets) + 1911 MLX5_GET_CTR(out, received_eth_broadcast.packets); 1912 1913 vf_stats->rx_bytes = 1914 MLX5_GET_CTR(out, received_eth_unicast.octets) + 1915 MLX5_GET_CTR(out, received_ib_unicast.octets) + 1916 MLX5_GET_CTR(out, received_eth_multicast.octets) + 1917 MLX5_GET_CTR(out, received_ib_multicast.octets) + 1918 MLX5_GET_CTR(out, received_eth_broadcast.octets); 1919 1920 vf_stats->tx_packets = 1921 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + 1922 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) + 1923 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + 1924 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) + 1925 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); 1926 1927 vf_stats->tx_bytes = 1928 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + 1929 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) + 1930 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + 1931 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) + 1932 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 1933 1934 vf_stats->multicast = 1935 MLX5_GET_CTR(out, received_eth_multicast.packets) + 1936 MLX5_GET_CTR(out, received_ib_multicast.packets); 1937 1938 vf_stats->broadcast = 1939 MLX5_GET_CTR(out, received_eth_broadcast.packets); 1940 1941 err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats); 1942 if (err) 1943 goto free_out; 1944 vf_stats->rx_dropped = stats.rx_dropped; 1945 vf_stats->tx_dropped = stats.tx_dropped; 1946 1947 free_out: 1948 kvfree(out); 1949 return err; 1950 } 1951 1952 u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) 1953 { 1954 struct mlx5_eswitch *esw = dev->priv.eswitch; 1955 1956 return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY; 1957 } 1958 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); 1959 1960 enum devlink_eswitch_encap_mode 1961 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) 1962 { 1963 struct mlx5_eswitch *esw; 1964 1965 esw = dev->priv.eswitch; 1966 return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap : 1967 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 1968 } 1969 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode); 1970 1971 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 1972 struct mlx5_core_dev *dev1) 1973 { 1974 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS && 1975 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS); 1976 } 1977 1978 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb) 1979 { 1980 return blocking_notifier_chain_register(&esw->n_head, nb); 1981 } 1982 1983 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb) 1984 { 1985 blocking_notifier_chain_unregister(&esw->n_head, nb); 1986 } 1987 1988 /** 1989 * mlx5_esw_hold() - Try to take a read lock on esw mode lock. 1990 * @mdev: mlx5 core device. 1991 * 1992 * Should be called by esw resources callers. 1993 * 1994 * Return: true on success or false. 1995 */ 1996 bool mlx5_esw_hold(struct mlx5_core_dev *mdev) 1997 { 1998 struct mlx5_eswitch *esw = mdev->priv.eswitch; 1999 2000 /* e.g. VF doesn't have eswitch so nothing to do */ 2001 if (!mlx5_esw_allowed(esw)) 2002 return true; 2003 2004 if (down_read_trylock(&esw->mode_lock) != 0) 2005 return true; 2006 2007 return false; 2008 } 2009 2010 /** 2011 * mlx5_esw_release() - Release a read lock on esw mode lock. 2012 * @mdev: mlx5 core device. 2013 */ 2014 void mlx5_esw_release(struct mlx5_core_dev *mdev) 2015 { 2016 struct mlx5_eswitch *esw = mdev->priv.eswitch; 2017 2018 if (mlx5_esw_allowed(esw)) 2019 up_read(&esw->mode_lock); 2020 } 2021 2022 /** 2023 * mlx5_esw_get() - Increase esw user count. 2024 * @mdev: mlx5 core device. 2025 */ 2026 void mlx5_esw_get(struct mlx5_core_dev *mdev) 2027 { 2028 struct mlx5_eswitch *esw = mdev->priv.eswitch; 2029 2030 if (mlx5_esw_allowed(esw)) 2031 atomic64_inc(&esw->user_count); 2032 } 2033 2034 /** 2035 * mlx5_esw_put() - Decrease esw user count. 2036 * @mdev: mlx5 core device. 2037 */ 2038 void mlx5_esw_put(struct mlx5_core_dev *mdev) 2039 { 2040 struct mlx5_eswitch *esw = mdev->priv.eswitch; 2041 2042 if (mlx5_esw_allowed(esw)) 2043 atomic64_dec_if_positive(&esw->user_count); 2044 } 2045 2046 /** 2047 * mlx5_esw_try_lock() - Take a write lock on esw mode lock. 2048 * @esw: eswitch device. 2049 * 2050 * Should be called by esw mode change routine. 2051 * 2052 * Return: 2053 * * 0 - esw mode if successfully locked and refcount is 0. 2054 * * -EBUSY - refcount is not 0. 2055 * * -EINVAL - In the middle of switching mode or lock is already held. 2056 */ 2057 int mlx5_esw_try_lock(struct mlx5_eswitch *esw) 2058 { 2059 if (down_write_trylock(&esw->mode_lock) == 0) 2060 return -EINVAL; 2061 2062 if (atomic64_read(&esw->user_count) > 0) { 2063 up_write(&esw->mode_lock); 2064 return -EBUSY; 2065 } 2066 2067 return esw->mode; 2068 } 2069 2070 /** 2071 * mlx5_esw_unlock() - Release write lock on esw mode lock 2072 * @esw: eswitch device. 2073 */ 2074 void mlx5_esw_unlock(struct mlx5_eswitch *esw) 2075 { 2076 up_write(&esw->mode_lock); 2077 } 2078 2079 /** 2080 * mlx5_eswitch_get_total_vports - Get total vports of the eswitch 2081 * 2082 * @dev: Pointer to core device 2083 * 2084 * mlx5_eswitch_get_total_vports returns total number of eswitch vports. 2085 */ 2086 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) 2087 { 2088 struct mlx5_eswitch *esw; 2089 2090 esw = dev->priv.eswitch; 2091 return mlx5_esw_allowed(esw) ? esw->total_vports : 0; 2092 } 2093 EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports); 2094 2095 /** 2096 * mlx5_eswitch_get_core_dev - Get the mdev device 2097 * @esw : eswitch device. 2098 * 2099 * Return the mellanox core device which manages the eswitch. 2100 */ 2101 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw) 2102 { 2103 return mlx5_esw_allowed(esw) ? esw->dev : NULL; 2104 } 2105 EXPORT_SYMBOL(mlx5_eswitch_get_core_dev); 2106