1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/etherdevice.h> 34 #include <linux/mlx5/driver.h> 35 #include <linux/mlx5/mlx5_ifc.h> 36 #include <linux/mlx5/vport.h> 37 #include <linux/mlx5/fs.h> 38 #include <linux/mlx5/mpfs.h> 39 #include <linux/debugfs.h> 40 #include "esw/acl/lgcy.h" 41 #include "esw/legacy.h" 42 #include "esw/qos.h" 43 #include "mlx5_core.h" 44 #include "lib/eq.h" 45 #include "eswitch.h" 46 #include "fs_core.h" 47 #include "devlink.h" 48 #include "ecpf.h" 49 #include "en/mod_hdr.h" 50 51 enum { 52 MLX5_ACTION_NONE = 0, 53 MLX5_ACTION_ADD = 1, 54 MLX5_ACTION_DEL = 2, 55 }; 56 57 /* Vport UC/MC hash node */ 58 struct vport_addr { 59 struct l2addr_node node; 60 u8 action; 61 u16 vport; 62 struct mlx5_flow_handle *flow_rule; 63 bool mpfs; /* UC MAC was added to MPFs */ 64 /* A flag indicating that mac was added due to mc promiscuous vport */ 65 bool mc_promisc; 66 }; 67 68 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) 69 { 70 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 71 return -EOPNOTSUPP; 72 73 if (!MLX5_ESWITCH_MANAGER(dev)) 74 return -EOPNOTSUPP; 75 76 return 0; 77 } 78 79 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink) 80 { 81 struct mlx5_core_dev *dev = devlink_priv(devlink); 82 int err; 83 84 err = mlx5_eswitch_check(dev); 85 if (err) 86 return ERR_PTR(err); 87 88 return dev->priv.eswitch; 89 } 90 91 struct mlx5_vport *__must_check 92 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) 93 { 94 struct mlx5_vport *vport; 95 96 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) 97 return ERR_PTR(-EPERM); 98 99 vport = xa_load(&esw->vports, vport_num); 100 if (!vport) { 101 esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num); 102 return ERR_PTR(-EINVAL); 103 } 104 return vport; 105 } 106 107 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, 108 u32 events_mask) 109 { 110 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {}; 111 void *nic_vport_ctx; 112 113 MLX5_SET(modify_nic_vport_context_in, in, 114 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 115 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); 116 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 117 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); 118 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 119 in, nic_vport_context); 120 121 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); 122 123 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE) 124 MLX5_SET(nic_vport_context, nic_vport_ctx, 125 event_on_uc_address_change, 1); 126 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE) 127 MLX5_SET(nic_vport_context, nic_vport_ctx, 128 event_on_mc_address_change, 1); 129 if (events_mask & MLX5_VPORT_PROMISC_CHANGE) 130 MLX5_SET(nic_vport_context, nic_vport_ctx, 131 event_on_promisc_change, 1); 132 133 return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in); 134 } 135 136 /* E-Switch vport context HW commands */ 137 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 138 bool other_vport, void *in) 139 { 140 MLX5_SET(modify_esw_vport_context_in, in, opcode, 141 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); 142 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); 143 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport); 144 return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in); 145 } 146 147 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, 148 u16 vlan, u8 qos, u8 set_flags) 149 { 150 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; 151 152 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || 153 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) 154 return -EOPNOTSUPP; 155 156 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", 157 vport, vlan, qos, set_flags); 158 159 if (set_flags & SET_VLAN_STRIP) 160 MLX5_SET(modify_esw_vport_context_in, in, 161 esw_vport_context.vport_cvlan_strip, 1); 162 163 if (set_flags & SET_VLAN_INSERT) { 164 /* insert only if no vlan in packet */ 165 MLX5_SET(modify_esw_vport_context_in, in, 166 esw_vport_context.vport_cvlan_insert, 1); 167 168 MLX5_SET(modify_esw_vport_context_in, in, 169 esw_vport_context.cvlan_pcp, qos); 170 MLX5_SET(modify_esw_vport_context_in, in, 171 esw_vport_context.cvlan_id, vlan); 172 } 173 174 MLX5_SET(modify_esw_vport_context_in, in, 175 field_select.vport_cvlan_strip, 1); 176 MLX5_SET(modify_esw_vport_context_in, in, 177 field_select.vport_cvlan_insert, 1); 178 179 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in); 180 } 181 182 /* E-Switch FDB */ 183 static struct mlx5_flow_handle * 184 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule, 185 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) 186 { 187 int match_header = (is_zero_ether_addr(mac_c) ? 0 : 188 MLX5_MATCH_OUTER_HEADERS); 189 struct mlx5_flow_handle *flow_rule = NULL; 190 struct mlx5_flow_act flow_act = {0}; 191 struct mlx5_flow_destination dest = {}; 192 struct mlx5_flow_spec *spec; 193 void *mv_misc = NULL; 194 void *mc_misc = NULL; 195 u8 *dmac_v = NULL; 196 u8 *dmac_c = NULL; 197 198 if (rx_rule) 199 match_header |= MLX5_MATCH_MISC_PARAMETERS; 200 201 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 202 if (!spec) 203 return NULL; 204 205 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 206 outer_headers.dmac_47_16); 207 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 208 outer_headers.dmac_47_16); 209 210 if (match_header & MLX5_MATCH_OUTER_HEADERS) { 211 ether_addr_copy(dmac_v, mac_v); 212 ether_addr_copy(dmac_c, mac_c); 213 } 214 215 if (match_header & MLX5_MATCH_MISC_PARAMETERS) { 216 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 217 misc_parameters); 218 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 219 misc_parameters); 220 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK); 221 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); 222 } 223 224 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 225 dest.vport.num = vport; 226 227 esw_debug(esw->dev, 228 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", 229 dmac_v, dmac_c, vport); 230 spec->match_criteria_enable = match_header; 231 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 232 flow_rule = 233 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec, 234 &flow_act, &dest, 1); 235 if (IS_ERR(flow_rule)) { 236 esw_warn(esw->dev, 237 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", 238 dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); 239 flow_rule = NULL; 240 } 241 242 kvfree(spec); 243 return flow_rule; 244 } 245 246 static struct mlx5_flow_handle * 247 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport) 248 { 249 u8 mac_c[ETH_ALEN]; 250 251 eth_broadcast_addr(mac_c); 252 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac); 253 } 254 255 static struct mlx5_flow_handle * 256 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport) 257 { 258 u8 mac_c[ETH_ALEN]; 259 u8 mac_v[ETH_ALEN]; 260 261 eth_zero_addr(mac_c); 262 eth_zero_addr(mac_v); 263 mac_c[0] = 0x01; 264 mac_v[0] = 0x01; 265 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v); 266 } 267 268 static struct mlx5_flow_handle * 269 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport) 270 { 271 u8 mac_c[ETH_ALEN]; 272 u8 mac_v[ETH_ALEN]; 273 274 eth_zero_addr(mac_c); 275 eth_zero_addr(mac_v); 276 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); 277 } 278 279 /* E-Switch vport UC/MC lists management */ 280 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, 281 struct vport_addr *vaddr); 282 283 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 284 { 285 u8 *mac = vaddr->node.addr; 286 u16 vport = vaddr->vport; 287 int err; 288 289 /* Skip mlx5_mpfs_add_mac for eswitch_managers, 290 * it is already done by its netdev in mlx5e_execute_l2_action 291 */ 292 if (mlx5_esw_is_manager_vport(esw, vport)) 293 goto fdb_add; 294 295 err = mlx5_mpfs_add_mac(esw->dev, mac); 296 if (err) { 297 esw_warn(esw->dev, 298 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n", 299 mac, vport, err); 300 return err; 301 } 302 vaddr->mpfs = true; 303 304 fdb_add: 305 /* SRIOV is enabled: Forward UC MAC to vport */ 306 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) 307 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 308 309 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", 310 vport, mac, vaddr->flow_rule); 311 312 return 0; 313 } 314 315 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 316 { 317 u8 *mac = vaddr->node.addr; 318 u16 vport = vaddr->vport; 319 int err = 0; 320 321 /* Skip mlx5_mpfs_del_mac for eswitch managers, 322 * it is already done by its netdev in mlx5e_execute_l2_action 323 */ 324 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport)) 325 goto fdb_del; 326 327 err = mlx5_mpfs_del_mac(esw->dev, mac); 328 if (err) 329 esw_warn(esw->dev, 330 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n", 331 mac, vport, err); 332 vaddr->mpfs = false; 333 334 fdb_del: 335 if (vaddr->flow_rule) 336 mlx5_del_flow_rules(vaddr->flow_rule); 337 vaddr->flow_rule = NULL; 338 339 return 0; 340 } 341 342 static void update_allmulti_vports(struct mlx5_eswitch *esw, 343 struct vport_addr *vaddr, 344 struct esw_mc_addr *esw_mc) 345 { 346 u8 *mac = vaddr->node.addr; 347 struct mlx5_vport *vport; 348 unsigned long i; 349 u16 vport_num; 350 351 mlx5_esw_for_each_vport(esw, i, vport) { 352 struct hlist_head *vport_hash = vport->mc_list; 353 struct vport_addr *iter_vaddr = 354 l2addr_hash_find(vport_hash, 355 mac, 356 struct vport_addr); 357 vport_num = vport->vport; 358 if (IS_ERR_OR_NULL(vport->allmulti_rule) || 359 vaddr->vport == vport_num) 360 continue; 361 switch (vaddr->action) { 362 case MLX5_ACTION_ADD: 363 if (iter_vaddr) 364 continue; 365 iter_vaddr = l2addr_hash_add(vport_hash, mac, 366 struct vport_addr, 367 GFP_KERNEL); 368 if (!iter_vaddr) { 369 esw_warn(esw->dev, 370 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n", 371 mac, vport_num); 372 continue; 373 } 374 iter_vaddr->vport = vport_num; 375 iter_vaddr->flow_rule = 376 esw_fdb_set_vport_rule(esw, 377 mac, 378 vport_num); 379 iter_vaddr->mc_promisc = true; 380 break; 381 case MLX5_ACTION_DEL: 382 if (!iter_vaddr) 383 continue; 384 mlx5_del_flow_rules(iter_vaddr->flow_rule); 385 l2addr_hash_del(iter_vaddr); 386 break; 387 } 388 } 389 } 390 391 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 392 { 393 struct hlist_head *hash = esw->mc_table; 394 struct esw_mc_addr *esw_mc; 395 u8 *mac = vaddr->node.addr; 396 u16 vport = vaddr->vport; 397 398 if (!esw->fdb_table.legacy.fdb) 399 return 0; 400 401 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); 402 if (esw_mc) 403 goto add; 404 405 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL); 406 if (!esw_mc) 407 return -ENOMEM; 408 409 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ 410 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK); 411 412 /* Add this multicast mac to all the mc promiscuous vports */ 413 update_allmulti_vports(esw, vaddr, esw_mc); 414 415 add: 416 /* If the multicast mac is added as a result of mc promiscuous vport, 417 * don't increment the multicast ref count 418 */ 419 if (!vaddr->mc_promisc) 420 esw_mc->refcnt++; 421 422 /* Forward MC MAC to vport */ 423 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 424 esw_debug(esw->dev, 425 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", 426 vport, mac, vaddr->flow_rule, 427 esw_mc->refcnt, esw_mc->uplink_rule); 428 return 0; 429 } 430 431 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 432 { 433 struct hlist_head *hash = esw->mc_table; 434 struct esw_mc_addr *esw_mc; 435 u8 *mac = vaddr->node.addr; 436 u16 vport = vaddr->vport; 437 438 if (!esw->fdb_table.legacy.fdb) 439 return 0; 440 441 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); 442 if (!esw_mc) { 443 esw_warn(esw->dev, 444 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)", 445 mac, vport); 446 return -EINVAL; 447 } 448 esw_debug(esw->dev, 449 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", 450 vport, mac, vaddr->flow_rule, esw_mc->refcnt, 451 esw_mc->uplink_rule); 452 453 if (vaddr->flow_rule) 454 mlx5_del_flow_rules(vaddr->flow_rule); 455 vaddr->flow_rule = NULL; 456 457 /* If the multicast mac is added as a result of mc promiscuous vport, 458 * don't decrement the multicast ref count. 459 */ 460 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0)) 461 return 0; 462 463 /* Remove this multicast mac from all the mc promiscuous vports */ 464 update_allmulti_vports(esw, vaddr, esw_mc); 465 466 if (esw_mc->uplink_rule) 467 mlx5_del_flow_rules(esw_mc->uplink_rule); 468 469 l2addr_hash_del(esw_mc); 470 return 0; 471 } 472 473 /* Apply vport UC/MC list to HW l2 table and FDB table */ 474 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, 475 struct mlx5_vport *vport, int list_type) 476 { 477 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; 478 vport_addr_action vport_addr_add; 479 vport_addr_action vport_addr_del; 480 struct vport_addr *addr; 481 struct l2addr_node *node; 482 struct hlist_head *hash; 483 struct hlist_node *tmp; 484 int hi; 485 486 vport_addr_add = is_uc ? esw_add_uc_addr : 487 esw_add_mc_addr; 488 vport_addr_del = is_uc ? esw_del_uc_addr : 489 esw_del_mc_addr; 490 491 hash = is_uc ? vport->uc_list : vport->mc_list; 492 for_each_l2hash_node(node, tmp, hash, hi) { 493 addr = container_of(node, struct vport_addr, node); 494 switch (addr->action) { 495 case MLX5_ACTION_ADD: 496 vport_addr_add(esw, addr); 497 addr->action = MLX5_ACTION_NONE; 498 break; 499 case MLX5_ACTION_DEL: 500 vport_addr_del(esw, addr); 501 l2addr_hash_del(addr); 502 break; 503 } 504 } 505 } 506 507 /* Sync vport UC/MC list from vport context */ 508 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, 509 struct mlx5_vport *vport, int list_type) 510 { 511 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; 512 u8 (*mac_list)[ETH_ALEN]; 513 struct l2addr_node *node; 514 struct vport_addr *addr; 515 struct hlist_head *hash; 516 struct hlist_node *tmp; 517 int size; 518 int err; 519 int hi; 520 int i; 521 522 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) : 523 MLX5_MAX_MC_PER_VPORT(esw->dev); 524 525 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL); 526 if (!mac_list) 527 return; 528 529 hash = is_uc ? vport->uc_list : vport->mc_list; 530 531 for_each_l2hash_node(node, tmp, hash, hi) { 532 addr = container_of(node, struct vport_addr, node); 533 addr->action = MLX5_ACTION_DEL; 534 } 535 536 if (!vport->enabled) 537 goto out; 538 539 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type, 540 mac_list, &size); 541 if (err) 542 goto out; 543 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n", 544 vport->vport, is_uc ? "UC" : "MC", size); 545 546 for (i = 0; i < size; i++) { 547 if (is_uc && !is_valid_ether_addr(mac_list[i])) 548 continue; 549 550 if (!is_uc && !is_multicast_ether_addr(mac_list[i])) 551 continue; 552 553 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr); 554 if (addr) { 555 addr->action = MLX5_ACTION_NONE; 556 /* If this mac was previously added because of allmulti 557 * promiscuous rx mode, its now converted to be original 558 * vport mac. 559 */ 560 if (addr->mc_promisc) { 561 struct esw_mc_addr *esw_mc = 562 l2addr_hash_find(esw->mc_table, 563 mac_list[i], 564 struct esw_mc_addr); 565 if (!esw_mc) { 566 esw_warn(esw->dev, 567 "Failed to MAC(%pM) in mcast DB\n", 568 mac_list[i]); 569 continue; 570 } 571 esw_mc->refcnt++; 572 addr->mc_promisc = false; 573 } 574 continue; 575 } 576 577 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr, 578 GFP_KERNEL); 579 if (!addr) { 580 esw_warn(esw->dev, 581 "Failed to add MAC(%pM) to vport[%d] DB\n", 582 mac_list[i], vport->vport); 583 continue; 584 } 585 addr->vport = vport->vport; 586 addr->action = MLX5_ACTION_ADD; 587 } 588 out: 589 kfree(mac_list); 590 } 591 592 /* Sync vport UC/MC list from vport context 593 * Must be called after esw_update_vport_addr_list 594 */ 595 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, 596 struct mlx5_vport *vport) 597 { 598 struct l2addr_node *node; 599 struct vport_addr *addr; 600 struct hlist_head *hash; 601 struct hlist_node *tmp; 602 int hi; 603 604 hash = vport->mc_list; 605 606 for_each_l2hash_node(node, tmp, esw->mc_table, hi) { 607 u8 *mac = node->addr; 608 609 addr = l2addr_hash_find(hash, mac, struct vport_addr); 610 if (addr) { 611 if (addr->action == MLX5_ACTION_DEL) 612 addr->action = MLX5_ACTION_NONE; 613 continue; 614 } 615 addr = l2addr_hash_add(hash, mac, struct vport_addr, 616 GFP_KERNEL); 617 if (!addr) { 618 esw_warn(esw->dev, 619 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n", 620 mac, vport->vport); 621 continue; 622 } 623 addr->vport = vport->vport; 624 addr->action = MLX5_ACTION_ADD; 625 addr->mc_promisc = true; 626 } 627 } 628 629 /* Apply vport rx mode to HW FDB table */ 630 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, 631 struct mlx5_vport *vport, 632 bool promisc, bool mc_promisc) 633 { 634 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc; 635 636 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc) 637 goto promisc; 638 639 if (mc_promisc) { 640 vport->allmulti_rule = 641 esw_fdb_set_vport_allmulti_rule(esw, vport->vport); 642 if (!allmulti_addr->uplink_rule) 643 allmulti_addr->uplink_rule = 644 esw_fdb_set_vport_allmulti_rule(esw, 645 MLX5_VPORT_UPLINK); 646 allmulti_addr->refcnt++; 647 } else if (vport->allmulti_rule) { 648 mlx5_del_flow_rules(vport->allmulti_rule); 649 vport->allmulti_rule = NULL; 650 651 if (--allmulti_addr->refcnt > 0) 652 goto promisc; 653 654 if (allmulti_addr->uplink_rule) 655 mlx5_del_flow_rules(allmulti_addr->uplink_rule); 656 allmulti_addr->uplink_rule = NULL; 657 } 658 659 promisc: 660 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc) 661 return; 662 663 if (promisc) { 664 vport->promisc_rule = 665 esw_fdb_set_vport_promisc_rule(esw, vport->vport); 666 } else if (vport->promisc_rule) { 667 mlx5_del_flow_rules(vport->promisc_rule); 668 vport->promisc_rule = NULL; 669 } 670 } 671 672 /* Sync vport rx mode from vport context */ 673 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, 674 struct mlx5_vport *vport) 675 { 676 int promisc_all = 0; 677 int promisc_uc = 0; 678 int promisc_mc = 0; 679 int err; 680 681 err = mlx5_query_nic_vport_promisc(esw->dev, 682 vport->vport, 683 &promisc_uc, 684 &promisc_mc, 685 &promisc_all); 686 if (err) 687 return; 688 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n", 689 vport->vport, promisc_all, promisc_mc); 690 691 if (!vport->info.trusted || !vport->enabled) { 692 promisc_uc = 0; 693 promisc_mc = 0; 694 promisc_all = 0; 695 } 696 697 esw_apply_vport_rx_mode(esw, vport, promisc_all, 698 (promisc_all || promisc_mc)); 699 } 700 701 void esw_vport_change_handle_locked(struct mlx5_vport *vport) 702 { 703 struct mlx5_core_dev *dev = vport->dev; 704 struct mlx5_eswitch *esw = dev->priv.eswitch; 705 u8 mac[ETH_ALEN]; 706 707 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac); 708 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", 709 vport->vport, mac); 710 711 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) { 712 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); 713 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); 714 } 715 716 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE) 717 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); 718 719 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) { 720 esw_update_vport_rx_mode(esw, vport); 721 if (!IS_ERR_OR_NULL(vport->allmulti_rule)) 722 esw_update_vport_mc_promisc(esw, vport); 723 } 724 725 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE)) 726 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); 727 728 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); 729 if (vport->enabled) 730 arm_vport_context_events_cmd(dev, vport->vport, 731 vport->enabled_events); 732 } 733 734 static void esw_vport_change_handler(struct work_struct *work) 735 { 736 struct mlx5_vport *vport = 737 container_of(work, struct mlx5_vport, vport_change_handler); 738 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; 739 740 mutex_lock(&esw->state_lock); 741 esw_vport_change_handle_locked(vport); 742 mutex_unlock(&esw->state_lock); 743 } 744 745 static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) 746 { 747 ((u8 *)node_guid)[7] = mac[0]; 748 ((u8 *)node_guid)[6] = mac[1]; 749 ((u8 *)node_guid)[5] = mac[2]; 750 ((u8 *)node_guid)[4] = 0xff; 751 ((u8 *)node_guid)[3] = 0xfe; 752 ((u8 *)node_guid)[2] = mac[3]; 753 ((u8 *)node_guid)[1] = mac[4]; 754 ((u8 *)node_guid)[0] = mac[5]; 755 } 756 757 static int esw_vport_setup_acl(struct mlx5_eswitch *esw, 758 struct mlx5_vport *vport) 759 { 760 if (esw->mode == MLX5_ESWITCH_LEGACY) 761 return esw_legacy_vport_acl_setup(esw, vport); 762 else 763 return esw_vport_create_offloads_acl_tables(esw, vport); 764 } 765 766 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, 767 struct mlx5_vport *vport) 768 { 769 if (esw->mode == MLX5_ESWITCH_LEGACY) 770 esw_legacy_vport_acl_cleanup(esw, vport); 771 else 772 esw_vport_destroy_offloads_acl_tables(esw, vport); 773 } 774 775 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 776 { 777 u16 vport_num = vport->vport; 778 int flags; 779 int err; 780 781 err = esw_vport_setup_acl(esw, vport); 782 if (err) 783 return err; 784 785 if (mlx5_esw_is_manager_vport(esw, vport_num)) 786 return 0; 787 788 mlx5_modify_vport_admin_state(esw->dev, 789 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 790 vport_num, 1, 791 vport->info.link_state); 792 793 /* Host PF has its own mac/guid. */ 794 if (vport_num) { 795 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, 796 vport->info.mac); 797 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, 798 vport->info.node_guid); 799 } 800 801 flags = (vport->info.vlan || vport->info.qos) ? 802 SET_VLAN_STRIP | SET_VLAN_INSERT : 0; 803 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, 804 vport->info.qos, flags); 805 806 return 0; 807 } 808 809 /* Don't cleanup vport->info, it's needed to restore vport configuration */ 810 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 811 { 812 u16 vport_num = vport->vport; 813 814 if (!mlx5_esw_is_manager_vport(esw, vport_num)) 815 mlx5_modify_vport_admin_state(esw->dev, 816 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 817 vport_num, 1, 818 MLX5_VPORT_ADMIN_STATE_DOWN); 819 820 mlx5_esw_qos_vport_disable(esw, vport); 821 esw_vport_cleanup_acl(esw, vport); 822 } 823 824 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, 825 enum mlx5_eswitch_vport_event enabled_events) 826 { 827 struct mlx5_vport *vport; 828 int ret; 829 830 vport = mlx5_eswitch_get_vport(esw, vport_num); 831 if (IS_ERR(vport)) 832 return PTR_ERR(vport); 833 834 mutex_lock(&esw->state_lock); 835 WARN_ON(vport->enabled); 836 837 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); 838 839 ret = esw_vport_setup(esw, vport); 840 if (ret) 841 goto done; 842 843 /* Sync with current vport context */ 844 vport->enabled_events = enabled_events; 845 vport->enabled = true; 846 847 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well 848 * in smartNIC as it's a vport group manager. 849 */ 850 if (mlx5_esw_is_manager_vport(esw, vport_num) || 851 (!vport_num && mlx5_core_is_ecpf(esw->dev))) 852 vport->info.trusted = true; 853 854 if (!mlx5_esw_is_manager_vport(esw, vport->vport) && 855 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { 856 ret = mlx5_esw_vport_vhca_id_set(esw, vport_num); 857 if (ret) 858 goto err_vhca_mapping; 859 } 860 861 /* External controller host PF has factory programmed MAC. 862 * Read it from the device. 863 */ 864 if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) 865 mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac); 866 867 esw_vport_change_handle_locked(vport); 868 869 esw->enabled_vports++; 870 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); 871 done: 872 mutex_unlock(&esw->state_lock); 873 return ret; 874 875 err_vhca_mapping: 876 esw_vport_cleanup(esw, vport); 877 mutex_unlock(&esw->state_lock); 878 return ret; 879 } 880 881 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) 882 { 883 struct mlx5_vport *vport; 884 885 vport = mlx5_eswitch_get_vport(esw, vport_num); 886 if (IS_ERR(vport)) 887 return; 888 889 mutex_lock(&esw->state_lock); 890 if (!vport->enabled) 891 goto done; 892 893 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); 894 /* Mark this vport as disabled to discard new events */ 895 vport->enabled = false; 896 897 /* Disable events from this vport */ 898 arm_vport_context_events_cmd(esw->dev, vport->vport, 0); 899 900 if (!mlx5_esw_is_manager_vport(esw, vport->vport) && 901 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 902 mlx5_esw_vport_vhca_id_clear(esw, vport_num); 903 904 /* We don't assume VFs will cleanup after themselves. 905 * Calling vport change handler while vport is disabled will cleanup 906 * the vport resources. 907 */ 908 esw_vport_change_handle_locked(vport); 909 vport->enabled_events = 0; 910 esw_vport_cleanup(esw, vport); 911 esw->enabled_vports--; 912 913 done: 914 mutex_unlock(&esw->state_lock); 915 } 916 917 static int eswitch_vport_event(struct notifier_block *nb, 918 unsigned long type, void *data) 919 { 920 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb); 921 struct mlx5_eqe *eqe = data; 922 struct mlx5_vport *vport; 923 u16 vport_num; 924 925 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); 926 vport = mlx5_eswitch_get_vport(esw, vport_num); 927 if (!IS_ERR(vport)) 928 queue_work(esw->work_queue, &vport->vport_change_handler); 929 return NOTIFY_OK; 930 } 931 932 /** 933 * mlx5_esw_query_functions - Returns raw output about functions state 934 * @dev: Pointer to device to query 935 * 936 * mlx5_esw_query_functions() allocates and returns functions changed 937 * raw output memory pointer from device on success. Otherwise returns ERR_PTR. 938 * Caller must free the memory using kvfree() when valid pointer is returned. 939 */ 940 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 941 { 942 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out); 943 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {}; 944 u32 *out; 945 int err; 946 947 out = kvzalloc(outlen, GFP_KERNEL); 948 if (!out) 949 return ERR_PTR(-ENOMEM); 950 951 MLX5_SET(query_esw_functions_in, in, opcode, 952 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS); 953 954 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 955 if (!err) 956 return out; 957 958 kvfree(out); 959 return ERR_PTR(err); 960 } 961 962 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw) 963 { 964 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); 965 mlx5_eq_notifier_register(esw->dev, &esw->nb); 966 967 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) { 968 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler, 969 ESW_FUNCTIONS_CHANGED); 970 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb); 971 } 972 } 973 974 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) 975 { 976 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) 977 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); 978 979 mlx5_eq_notifier_unregister(esw->dev, &esw->nb); 980 981 flush_workqueue(esw->work_queue); 982 } 983 984 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) 985 { 986 struct mlx5_vport *vport; 987 unsigned long i; 988 989 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 990 memset(&vport->qos, 0, sizeof(vport->qos)); 991 memset(&vport->info, 0, sizeof(vport->info)); 992 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 993 } 994 } 995 996 /* Public E-Switch API */ 997 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, 998 enum mlx5_eswitch_vport_event enabled_events) 999 { 1000 int err; 1001 1002 err = mlx5_esw_vport_enable(esw, vport_num, enabled_events); 1003 if (err) 1004 return err; 1005 1006 mlx5_esw_vport_debugfs_create(esw, vport_num, false, 0); 1007 err = esw_offloads_load_rep(esw, vport_num); 1008 if (err) 1009 goto err_rep; 1010 1011 return err; 1012 1013 err_rep: 1014 mlx5_esw_vport_debugfs_destroy(esw, vport_num); 1015 mlx5_esw_vport_disable(esw, vport_num); 1016 return err; 1017 } 1018 1019 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num) 1020 { 1021 esw_offloads_unload_rep(esw, vport_num); 1022 mlx5_esw_vport_debugfs_destroy(esw, vport_num); 1023 mlx5_esw_vport_disable(esw, vport_num); 1024 } 1025 1026 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs) 1027 { 1028 struct mlx5_vport *vport; 1029 unsigned long i; 1030 1031 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { 1032 if (!vport->enabled) 1033 continue; 1034 mlx5_eswitch_unload_vport(esw, vport->vport); 1035 } 1036 } 1037 1038 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, 1039 enum mlx5_eswitch_vport_event enabled_events) 1040 { 1041 struct mlx5_vport *vport; 1042 unsigned long i; 1043 int err; 1044 1045 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { 1046 err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events); 1047 if (err) 1048 goto vf_err; 1049 } 1050 1051 return 0; 1052 1053 vf_err: 1054 mlx5_eswitch_unload_vf_vports(esw, num_vfs); 1055 return err; 1056 } 1057 1058 static int host_pf_enable_hca(struct mlx5_core_dev *dev) 1059 { 1060 if (!mlx5_core_is_ecpf(dev)) 1061 return 0; 1062 1063 /* Once vport and representor are ready, take out the external host PF 1064 * out of initializing state. Enabling HCA clears the iser->initializing 1065 * bit and host PF driver loading can progress. 1066 */ 1067 return mlx5_cmd_host_pf_enable_hca(dev); 1068 } 1069 1070 static void host_pf_disable_hca(struct mlx5_core_dev *dev) 1071 { 1072 if (!mlx5_core_is_ecpf(dev)) 1073 return; 1074 1075 mlx5_cmd_host_pf_disable_hca(dev); 1076 } 1077 1078 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs 1079 * whichever are present on the eswitch. 1080 */ 1081 int 1082 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 1083 enum mlx5_eswitch_vport_event enabled_events) 1084 { 1085 int ret; 1086 1087 /* Enable PF vport */ 1088 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events); 1089 if (ret) 1090 return ret; 1091 1092 /* Enable external host PF HCA */ 1093 ret = host_pf_enable_hca(esw->dev); 1094 if (ret) 1095 goto pf_hca_err; 1096 1097 /* Enable ECPF vport */ 1098 if (mlx5_ecpf_vport_exists(esw->dev)) { 1099 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events); 1100 if (ret) 1101 goto ecpf_err; 1102 } 1103 1104 /* Enable VF vports */ 1105 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs, 1106 enabled_events); 1107 if (ret) 1108 goto vf_err; 1109 return 0; 1110 1111 vf_err: 1112 if (mlx5_ecpf_vport_exists(esw->dev)) 1113 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); 1114 ecpf_err: 1115 host_pf_disable_hca(esw->dev); 1116 pf_hca_err: 1117 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); 1118 return ret; 1119 } 1120 1121 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs 1122 * whichever are previously enabled on the eswitch. 1123 */ 1124 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) 1125 { 1126 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 1127 1128 if (mlx5_ecpf_vport_exists(esw->dev)) 1129 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); 1130 1131 host_pf_disable_hca(esw->dev); 1132 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); 1133 } 1134 1135 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) 1136 { 1137 struct devlink *devlink = priv_to_devlink(esw->dev); 1138 union devlink_param_value val; 1139 int err; 1140 1141 err = devlink_param_driverinit_value_get(devlink, 1142 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, 1143 &val); 1144 if (!err) { 1145 esw->params.large_group_num = val.vu32; 1146 } else { 1147 esw_warn(esw->dev, 1148 "Devlink can't get param fdb_large_groups, uses default (%d).\n", 1149 ESW_OFFLOADS_DEFAULT_NUM_GROUPS); 1150 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS; 1151 } 1152 } 1153 1154 static void 1155 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) 1156 { 1157 const u32 *out; 1158 1159 if (num_vfs < 0) 1160 return; 1161 1162 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1163 esw->esw_funcs.num_vfs = num_vfs; 1164 return; 1165 } 1166 1167 out = mlx5_esw_query_functions(esw->dev); 1168 if (IS_ERR(out)) 1169 return; 1170 1171 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out, 1172 host_params_context.host_num_of_vfs); 1173 kvfree(out); 1174 } 1175 1176 static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode) 1177 { 1178 struct mlx5_esw_event_info info = {}; 1179 1180 info.new_mode = mode; 1181 1182 blocking_notifier_call_chain(&esw->n_head, 0, &info); 1183 } 1184 1185 static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw) 1186 { 1187 struct mlx5_core_dev *dev = esw->dev; 1188 int total_vports; 1189 int err; 1190 1191 if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED) 1192 return 0; 1193 1194 total_vports = mlx5_eswitch_get_total_vports(dev); 1195 1196 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { 1197 err = mlx5_fs_egress_acls_init(dev, total_vports); 1198 if (err) 1199 return err; 1200 } else { 1201 esw_warn(dev, "engress ACL is not supported by FW\n"); 1202 } 1203 1204 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { 1205 err = mlx5_fs_ingress_acls_init(dev, total_vports); 1206 if (err) 1207 goto err; 1208 } else { 1209 esw_warn(dev, "ingress ACL is not supported by FW\n"); 1210 } 1211 esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED; 1212 return 0; 1213 1214 err: 1215 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) 1216 mlx5_fs_egress_acls_cleanup(dev); 1217 return err; 1218 } 1219 1220 static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw) 1221 { 1222 struct mlx5_core_dev *dev = esw->dev; 1223 1224 esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED; 1225 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) 1226 mlx5_fs_ingress_acls_cleanup(dev); 1227 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) 1228 mlx5_fs_egress_acls_cleanup(dev); 1229 } 1230 1231 /** 1232 * mlx5_eswitch_enable_locked - Enable eswitch 1233 * @esw: Pointer to eswitch 1234 * @num_vfs: Enable eswitch for given number of VFs. This is optional. 1235 * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS. 1236 * Caller should pass num_vfs > 0 when enabling eswitch for 1237 * vf vports. Caller should pass num_vfs = 0, when eswitch 1238 * is enabled without sriov VFs or when caller 1239 * is unaware of the sriov state of the host PF on ECPF based 1240 * eswitch. Caller should pass < 0 when num_vfs should be 1241 * completely ignored. This is typically the case when eswitch 1242 * is enabled without sriov regardless of PF/ECPF system. 1243 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads 1244 * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports. 1245 * It returns 0 on success or error code on failure. 1246 */ 1247 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) 1248 { 1249 int err; 1250 1251 lockdep_assert_held(&esw->mode_lock); 1252 1253 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1254 esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); 1255 return -EOPNOTSUPP; 1256 } 1257 1258 mlx5_eswitch_get_devlink_param(esw); 1259 1260 err = mlx5_esw_acls_ns_init(esw); 1261 if (err) 1262 return err; 1263 1264 mlx5_eswitch_update_num_of_vfs(esw, num_vfs); 1265 1266 if (esw->mode == MLX5_ESWITCH_LEGACY) { 1267 err = esw_legacy_enable(esw); 1268 } else { 1269 mlx5_rescan_drivers(esw->dev); 1270 err = esw_offloads_enable(esw); 1271 } 1272 1273 if (err) 1274 goto abort; 1275 1276 esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED; 1277 1278 mlx5_eswitch_event_handlers_register(esw); 1279 1280 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", 1281 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1282 esw->esw_funcs.num_vfs, esw->enabled_vports); 1283 1284 mlx5_esw_mode_change_notify(esw, esw->mode); 1285 1286 return 0; 1287 1288 abort: 1289 mlx5_esw_acls_ns_cleanup(esw); 1290 return err; 1291 } 1292 1293 /** 1294 * mlx5_eswitch_enable - Enable eswitch 1295 * @esw: Pointer to eswitch 1296 * @num_vfs: Enable eswitch switch for given number of VFs. 1297 * Caller must pass num_vfs > 0 when enabling eswitch for 1298 * vf vports. 1299 * mlx5_eswitch_enable() returns 0 on success or error code on failure. 1300 */ 1301 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) 1302 { 1303 bool toggle_lag; 1304 int ret; 1305 1306 if (!mlx5_esw_allowed(esw)) 1307 return 0; 1308 1309 devl_assert_locked(priv_to_devlink(esw->dev)); 1310 1311 toggle_lag = !mlx5_esw_is_fdb_created(esw); 1312 1313 if (toggle_lag) 1314 mlx5_lag_disable_change(esw->dev); 1315 1316 down_write(&esw->mode_lock); 1317 if (!mlx5_esw_is_fdb_created(esw)) { 1318 ret = mlx5_eswitch_enable_locked(esw, num_vfs); 1319 } else { 1320 enum mlx5_eswitch_vport_event vport_events; 1321 1322 vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ? 1323 MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE; 1324 ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events); 1325 if (!ret) 1326 esw->esw_funcs.num_vfs = num_vfs; 1327 } 1328 up_write(&esw->mode_lock); 1329 1330 if (toggle_lag) 1331 mlx5_lag_enable_change(esw->dev); 1332 1333 return ret; 1334 } 1335 1336 /* When disabling sriov, free driver level resources. */ 1337 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) 1338 { 1339 if (!mlx5_esw_allowed(esw)) 1340 return; 1341 1342 devl_assert_locked(priv_to_devlink(esw->dev)); 1343 down_write(&esw->mode_lock); 1344 /* If driver is unloaded, this function is called twice by remove_one() 1345 * and mlx5_unload(). Prevent the second call. 1346 */ 1347 if (!esw->esw_funcs.num_vfs && !clear_vf) 1348 goto unlock; 1349 1350 esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n", 1351 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1352 esw->esw_funcs.num_vfs, esw->enabled_vports); 1353 1354 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 1355 if (clear_vf) 1356 mlx5_eswitch_clear_vf_vports_info(esw); 1357 /* If disabling sriov in switchdev mode, free meta rules here 1358 * because it depends on num_vfs. 1359 */ 1360 if (esw->mode == MLX5_ESWITCH_OFFLOADS) { 1361 struct devlink *devlink = priv_to_devlink(esw->dev); 1362 1363 devl_rate_nodes_destroy(devlink); 1364 } 1365 1366 esw->esw_funcs.num_vfs = 0; 1367 1368 unlock: 1369 up_write(&esw->mode_lock); 1370 } 1371 1372 /* Free resources for corresponding eswitch mode. It is called by devlink 1373 * when changing eswitch mode or modprobe when unloading driver. 1374 */ 1375 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw) 1376 { 1377 struct devlink *devlink = priv_to_devlink(esw->dev); 1378 1379 /* Notify eswitch users that it is exiting from current mode. 1380 * So that it can do necessary cleanup before the eswitch is disabled. 1381 */ 1382 mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY); 1383 1384 mlx5_eswitch_event_handlers_unregister(esw); 1385 1386 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", 1387 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1388 esw->esw_funcs.num_vfs, esw->enabled_vports); 1389 1390 if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) { 1391 esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED; 1392 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 1393 esw_offloads_disable(esw); 1394 else if (esw->mode == MLX5_ESWITCH_LEGACY) 1395 esw_legacy_disable(esw); 1396 mlx5_esw_acls_ns_cleanup(esw); 1397 } 1398 1399 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 1400 devl_rate_nodes_destroy(devlink); 1401 } 1402 1403 void mlx5_eswitch_disable(struct mlx5_eswitch *esw) 1404 { 1405 if (!mlx5_esw_allowed(esw)) 1406 return; 1407 1408 devl_assert_locked(priv_to_devlink(esw->dev)); 1409 mlx5_lag_disable_change(esw->dev); 1410 down_write(&esw->mode_lock); 1411 mlx5_eswitch_disable_locked(esw); 1412 up_write(&esw->mode_lock); 1413 mlx5_lag_enable_change(esw->dev); 1414 } 1415 1416 static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out) 1417 { 1418 u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01); 1419 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {}; 1420 1421 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 1422 MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 1423 MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF); 1424 MLX5_SET(query_hca_cap_in, in, other_function, true); 1425 return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out); 1426 } 1427 1428 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id) 1429 1430 { 1431 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 1432 void *query_ctx; 1433 void *hca_caps; 1434 int err; 1435 1436 if (!mlx5_core_is_ecpf(dev)) { 1437 *max_sfs = 0; 1438 return 0; 1439 } 1440 1441 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 1442 if (!query_ctx) 1443 return -ENOMEM; 1444 1445 err = mlx5_query_hca_cap_host_pf(dev, query_ctx); 1446 if (err) 1447 goto out_free; 1448 1449 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 1450 *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf); 1451 *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id); 1452 1453 out_free: 1454 kfree(query_ctx); 1455 return err; 1456 } 1457 1458 static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev, 1459 int index, u16 vport_num) 1460 { 1461 struct mlx5_vport *vport; 1462 int err; 1463 1464 vport = kzalloc(sizeof(*vport), GFP_KERNEL); 1465 if (!vport) 1466 return -ENOMEM; 1467 1468 vport->dev = esw->dev; 1469 vport->vport = vport_num; 1470 vport->index = index; 1471 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 1472 INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); 1473 err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL); 1474 if (err) 1475 goto insert_err; 1476 1477 esw->total_vports++; 1478 return 0; 1479 1480 insert_err: 1481 kfree(vport); 1482 return err; 1483 } 1484 1485 static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 1486 { 1487 xa_erase(&esw->vports, vport->vport); 1488 kfree(vport); 1489 } 1490 1491 static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw) 1492 { 1493 struct mlx5_vport *vport; 1494 unsigned long i; 1495 1496 mlx5_esw_for_each_vport(esw, i, vport) 1497 mlx5_esw_vport_free(esw, vport); 1498 xa_destroy(&esw->vports); 1499 } 1500 1501 static int mlx5_esw_vports_init(struct mlx5_eswitch *esw) 1502 { 1503 struct mlx5_core_dev *dev = esw->dev; 1504 u16 max_host_pf_sfs; 1505 u16 base_sf_num; 1506 int idx = 0; 1507 int err; 1508 int i; 1509 1510 xa_init(&esw->vports); 1511 1512 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF); 1513 if (err) 1514 goto err; 1515 if (esw->first_host_vport == MLX5_VPORT_PF) 1516 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); 1517 idx++; 1518 1519 for (i = 0; i < mlx5_core_max_vfs(dev); i++) { 1520 err = mlx5_esw_vport_alloc(esw, dev, idx, idx); 1521 if (err) 1522 goto err; 1523 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF); 1524 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); 1525 idx++; 1526 } 1527 base_sf_num = mlx5_sf_start_function_id(dev); 1528 for (i = 0; i < mlx5_sf_max_functions(dev); i++) { 1529 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i); 1530 if (err) 1531 goto err; 1532 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF); 1533 idx++; 1534 } 1535 1536 err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num); 1537 if (err) 1538 goto err; 1539 for (i = 0; i < max_host_pf_sfs; i++) { 1540 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i); 1541 if (err) 1542 goto err; 1543 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF); 1544 idx++; 1545 } 1546 1547 if (mlx5_ecpf_vport_exists(dev)) { 1548 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF); 1549 if (err) 1550 goto err; 1551 idx++; 1552 } 1553 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK); 1554 if (err) 1555 goto err; 1556 return 0; 1557 1558 err: 1559 mlx5_esw_vports_cleanup(esw); 1560 return err; 1561 } 1562 1563 int mlx5_eswitch_init(struct mlx5_core_dev *dev) 1564 { 1565 struct mlx5_eswitch *esw; 1566 int err; 1567 1568 if (!MLX5_VPORT_MANAGER(dev)) 1569 return 0; 1570 1571 esw = kzalloc(sizeof(*esw), GFP_KERNEL); 1572 if (!esw) 1573 return -ENOMEM; 1574 1575 esw->dev = dev; 1576 esw->manager_vport = mlx5_eswitch_manager_vport(dev); 1577 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev); 1578 1579 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); 1580 if (!esw->work_queue) { 1581 err = -ENOMEM; 1582 goto abort; 1583 } 1584 1585 err = mlx5_esw_vports_init(esw); 1586 if (err) 1587 goto abort; 1588 1589 err = esw_offloads_init_reps(esw); 1590 if (err) 1591 goto reps_err; 1592 1593 mutex_init(&esw->offloads.encap_tbl_lock); 1594 hash_init(esw->offloads.encap_tbl); 1595 mutex_init(&esw->offloads.decap_tbl_lock); 1596 hash_init(esw->offloads.decap_tbl); 1597 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr); 1598 atomic64_set(&esw->offloads.num_flows, 0); 1599 ida_init(&esw->offloads.vport_metadata_ida); 1600 xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); 1601 mutex_init(&esw->state_lock); 1602 init_rwsem(&esw->mode_lock); 1603 refcount_set(&esw->qos.refcnt, 0); 1604 1605 esw->enabled_vports = 0; 1606 esw->mode = MLX5_ESWITCH_LEGACY; 1607 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; 1608 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) && 1609 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) 1610 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; 1611 else 1612 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; 1613 if (MLX5_ESWITCH_MANAGER(dev) && 1614 mlx5_esw_vport_match_metadata_supported(esw)) 1615 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; 1616 1617 dev->priv.eswitch = esw; 1618 BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); 1619 1620 esw->dbgfs = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(esw->dev)); 1621 esw_info(dev, 1622 "Total vports %d, per vport: max uc(%d) max mc(%d)\n", 1623 esw->total_vports, 1624 MLX5_MAX_UC_PER_VPORT(dev), 1625 MLX5_MAX_MC_PER_VPORT(dev)); 1626 return 0; 1627 1628 reps_err: 1629 mlx5_esw_vports_cleanup(esw); 1630 abort: 1631 if (esw->work_queue) 1632 destroy_workqueue(esw->work_queue); 1633 kfree(esw); 1634 return err; 1635 } 1636 1637 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1638 { 1639 if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) 1640 return; 1641 1642 esw_info(esw->dev, "cleanup\n"); 1643 1644 debugfs_remove_recursive(esw->dbgfs); 1645 esw->dev->priv.eswitch = NULL; 1646 destroy_workqueue(esw->work_queue); 1647 WARN_ON(refcount_read(&esw->qos.refcnt)); 1648 mutex_destroy(&esw->state_lock); 1649 WARN_ON(!xa_empty(&esw->offloads.vhca_map)); 1650 xa_destroy(&esw->offloads.vhca_map); 1651 ida_destroy(&esw->offloads.vport_metadata_ida); 1652 mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr); 1653 mutex_destroy(&esw->offloads.encap_tbl_lock); 1654 mutex_destroy(&esw->offloads.decap_tbl_lock); 1655 esw_offloads_cleanup_reps(esw); 1656 mlx5_esw_vports_cleanup(esw); 1657 kfree(esw); 1658 } 1659 1660 /* Vport Administration */ 1661 static int 1662 mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw, 1663 struct mlx5_vport *evport, const u8 *mac) 1664 { 1665 u16 vport_num = evport->vport; 1666 u64 node_guid; 1667 int err = 0; 1668 1669 if (is_multicast_ether_addr(mac)) 1670 return -EINVAL; 1671 1672 if (evport->info.spoofchk && !is_valid_ether_addr(mac)) 1673 mlx5_core_warn(esw->dev, 1674 "Set invalid MAC while spoofchk is on, vport(%d)\n", 1675 vport_num); 1676 1677 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac); 1678 if (err) { 1679 mlx5_core_warn(esw->dev, 1680 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n", 1681 vport_num, err); 1682 return err; 1683 } 1684 1685 node_guid_gen_from_mac(&node_guid, mac); 1686 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid); 1687 if (err) 1688 mlx5_core_warn(esw->dev, 1689 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", 1690 vport_num, err); 1691 1692 ether_addr_copy(evport->info.mac, mac); 1693 evport->info.node_guid = node_guid; 1694 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) 1695 err = esw_acl_ingress_lgcy_setup(esw, evport); 1696 1697 return err; 1698 } 1699 1700 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 1701 u16 vport, const u8 *mac) 1702 { 1703 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1704 int err = 0; 1705 1706 if (IS_ERR(evport)) 1707 return PTR_ERR(evport); 1708 1709 mutex_lock(&esw->state_lock); 1710 err = mlx5_esw_set_vport_mac_locked(esw, evport, mac); 1711 mutex_unlock(&esw->state_lock); 1712 return err; 1713 } 1714 1715 static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark) 1716 { 1717 struct mlx5_vport *vport; 1718 1719 vport = mlx5_eswitch_get_vport(esw, vport_num); 1720 if (IS_ERR(vport)) 1721 return false; 1722 1723 return xa_get_mark(&esw->vports, vport_num, mark); 1724 } 1725 1726 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num) 1727 { 1728 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF); 1729 } 1730 1731 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num) 1732 { 1733 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF); 1734 } 1735 1736 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 1737 u16 vport, int link_state) 1738 { 1739 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1740 int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT; 1741 int other_vport = 1; 1742 int err = 0; 1743 1744 if (!mlx5_esw_allowed(esw)) 1745 return -EPERM; 1746 if (IS_ERR(evport)) 1747 return PTR_ERR(evport); 1748 1749 if (vport == MLX5_VPORT_UPLINK) { 1750 opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK; 1751 other_vport = 0; 1752 vport = 0; 1753 } 1754 mutex_lock(&esw->state_lock); 1755 if (esw->mode != MLX5_ESWITCH_LEGACY) { 1756 err = -EOPNOTSUPP; 1757 goto unlock; 1758 } 1759 1760 err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); 1761 if (err) { 1762 mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d", 1763 vport, opmod, err); 1764 goto unlock; 1765 } 1766 1767 evport->info.link_state = link_state; 1768 1769 unlock: 1770 mutex_unlock(&esw->state_lock); 1771 return err; 1772 } 1773 1774 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 1775 u16 vport, struct ifla_vf_info *ivi) 1776 { 1777 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1778 1779 if (IS_ERR(evport)) 1780 return PTR_ERR(evport); 1781 1782 memset(ivi, 0, sizeof(*ivi)); 1783 ivi->vf = vport - 1; 1784 1785 mutex_lock(&esw->state_lock); 1786 ether_addr_copy(ivi->mac, evport->info.mac); 1787 ivi->linkstate = evport->info.link_state; 1788 ivi->vlan = evport->info.vlan; 1789 ivi->qos = evport->info.qos; 1790 ivi->spoofchk = evport->info.spoofchk; 1791 ivi->trusted = evport->info.trusted; 1792 if (evport->qos.enabled) { 1793 ivi->min_tx_rate = evport->qos.min_rate; 1794 ivi->max_tx_rate = evport->qos.max_rate; 1795 } 1796 mutex_unlock(&esw->state_lock); 1797 1798 return 0; 1799 } 1800 1801 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 1802 u16 vport, u16 vlan, u8 qos, u8 set_flags) 1803 { 1804 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1805 int err = 0; 1806 1807 if (IS_ERR(evport)) 1808 return PTR_ERR(evport); 1809 if (vlan > 4095 || qos > 7) 1810 return -EINVAL; 1811 1812 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); 1813 if (err) 1814 return err; 1815 1816 evport->info.vlan = vlan; 1817 evport->info.qos = qos; 1818 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) { 1819 err = esw_acl_ingress_lgcy_setup(esw, evport); 1820 if (err) 1821 return err; 1822 err = esw_acl_egress_lgcy_setup(esw, evport); 1823 } 1824 1825 return err; 1826 } 1827 1828 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 1829 u16 vport_num, 1830 struct ifla_vf_stats *vf_stats) 1831 { 1832 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 1833 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); 1834 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; 1835 struct mlx5_vport_drop_stats stats = {}; 1836 int err = 0; 1837 u32 *out; 1838 1839 if (IS_ERR(vport)) 1840 return PTR_ERR(vport); 1841 1842 out = kvzalloc(outlen, GFP_KERNEL); 1843 if (!out) 1844 return -ENOMEM; 1845 1846 MLX5_SET(query_vport_counter_in, in, opcode, 1847 MLX5_CMD_OP_QUERY_VPORT_COUNTER); 1848 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 1849 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport); 1850 MLX5_SET(query_vport_counter_in, in, other_vport, 1); 1851 1852 err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out); 1853 if (err) 1854 goto free_out; 1855 1856 #define MLX5_GET_CTR(p, x) \ 1857 MLX5_GET64(query_vport_counter_out, p, x) 1858 1859 memset(vf_stats, 0, sizeof(*vf_stats)); 1860 vf_stats->rx_packets = 1861 MLX5_GET_CTR(out, received_eth_unicast.packets) + 1862 MLX5_GET_CTR(out, received_ib_unicast.packets) + 1863 MLX5_GET_CTR(out, received_eth_multicast.packets) + 1864 MLX5_GET_CTR(out, received_ib_multicast.packets) + 1865 MLX5_GET_CTR(out, received_eth_broadcast.packets); 1866 1867 vf_stats->rx_bytes = 1868 MLX5_GET_CTR(out, received_eth_unicast.octets) + 1869 MLX5_GET_CTR(out, received_ib_unicast.octets) + 1870 MLX5_GET_CTR(out, received_eth_multicast.octets) + 1871 MLX5_GET_CTR(out, received_ib_multicast.octets) + 1872 MLX5_GET_CTR(out, received_eth_broadcast.octets); 1873 1874 vf_stats->tx_packets = 1875 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + 1876 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) + 1877 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + 1878 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) + 1879 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); 1880 1881 vf_stats->tx_bytes = 1882 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + 1883 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) + 1884 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + 1885 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) + 1886 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 1887 1888 vf_stats->multicast = 1889 MLX5_GET_CTR(out, received_eth_multicast.packets) + 1890 MLX5_GET_CTR(out, received_ib_multicast.packets); 1891 1892 vf_stats->broadcast = 1893 MLX5_GET_CTR(out, received_eth_broadcast.packets); 1894 1895 err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats); 1896 if (err) 1897 goto free_out; 1898 vf_stats->rx_dropped = stats.rx_dropped; 1899 vf_stats->tx_dropped = stats.tx_dropped; 1900 1901 free_out: 1902 kvfree(out); 1903 return err; 1904 } 1905 1906 u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) 1907 { 1908 struct mlx5_eswitch *esw = dev->priv.eswitch; 1909 1910 return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY; 1911 } 1912 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); 1913 1914 enum devlink_eswitch_encap_mode 1915 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) 1916 { 1917 struct mlx5_eswitch *esw; 1918 1919 esw = dev->priv.eswitch; 1920 return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap : 1921 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 1922 } 1923 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode); 1924 1925 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 1926 struct mlx5_core_dev *dev1) 1927 { 1928 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS && 1929 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS); 1930 } 1931 1932 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb) 1933 { 1934 return blocking_notifier_chain_register(&esw->n_head, nb); 1935 } 1936 1937 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb) 1938 { 1939 blocking_notifier_chain_unregister(&esw->n_head, nb); 1940 } 1941 1942 /** 1943 * mlx5_esw_hold() - Try to take a read lock on esw mode lock. 1944 * @mdev: mlx5 core device. 1945 * 1946 * Should be called by esw resources callers. 1947 * 1948 * Return: true on success or false. 1949 */ 1950 bool mlx5_esw_hold(struct mlx5_core_dev *mdev) 1951 { 1952 struct mlx5_eswitch *esw = mdev->priv.eswitch; 1953 1954 /* e.g. VF doesn't have eswitch so nothing to do */ 1955 if (!mlx5_esw_allowed(esw)) 1956 return true; 1957 1958 if (down_read_trylock(&esw->mode_lock) != 0) 1959 return true; 1960 1961 return false; 1962 } 1963 1964 /** 1965 * mlx5_esw_release() - Release a read lock on esw mode lock. 1966 * @mdev: mlx5 core device. 1967 */ 1968 void mlx5_esw_release(struct mlx5_core_dev *mdev) 1969 { 1970 struct mlx5_eswitch *esw = mdev->priv.eswitch; 1971 1972 if (mlx5_esw_allowed(esw)) 1973 up_read(&esw->mode_lock); 1974 } 1975 1976 /** 1977 * mlx5_esw_get() - Increase esw user count. 1978 * @mdev: mlx5 core device. 1979 */ 1980 void mlx5_esw_get(struct mlx5_core_dev *mdev) 1981 { 1982 struct mlx5_eswitch *esw = mdev->priv.eswitch; 1983 1984 if (mlx5_esw_allowed(esw)) 1985 atomic64_inc(&esw->user_count); 1986 } 1987 1988 /** 1989 * mlx5_esw_put() - Decrease esw user count. 1990 * @mdev: mlx5 core device. 1991 */ 1992 void mlx5_esw_put(struct mlx5_core_dev *mdev) 1993 { 1994 struct mlx5_eswitch *esw = mdev->priv.eswitch; 1995 1996 if (mlx5_esw_allowed(esw)) 1997 atomic64_dec_if_positive(&esw->user_count); 1998 } 1999 2000 /** 2001 * mlx5_esw_try_lock() - Take a write lock on esw mode lock. 2002 * @esw: eswitch device. 2003 * 2004 * Should be called by esw mode change routine. 2005 * 2006 * Return: 2007 * * 0 - esw mode if successfully locked and refcount is 0. 2008 * * -EBUSY - refcount is not 0. 2009 * * -EINVAL - In the middle of switching mode or lock is already held. 2010 */ 2011 int mlx5_esw_try_lock(struct mlx5_eswitch *esw) 2012 { 2013 if (down_write_trylock(&esw->mode_lock) == 0) 2014 return -EINVAL; 2015 2016 if (atomic64_read(&esw->user_count) > 0) { 2017 up_write(&esw->mode_lock); 2018 return -EBUSY; 2019 } 2020 2021 return esw->mode; 2022 } 2023 2024 /** 2025 * mlx5_esw_unlock() - Release write lock on esw mode lock 2026 * @esw: eswitch device. 2027 */ 2028 void mlx5_esw_unlock(struct mlx5_eswitch *esw) 2029 { 2030 up_write(&esw->mode_lock); 2031 } 2032 2033 /** 2034 * mlx5_eswitch_get_total_vports - Get total vports of the eswitch 2035 * 2036 * @dev: Pointer to core device 2037 * 2038 * mlx5_eswitch_get_total_vports returns total number of eswitch vports. 2039 */ 2040 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) 2041 { 2042 struct mlx5_eswitch *esw; 2043 2044 esw = dev->priv.eswitch; 2045 return mlx5_esw_allowed(esw) ? esw->total_vports : 0; 2046 } 2047 EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports); 2048 2049 /** 2050 * mlx5_eswitch_get_core_dev - Get the mdev device 2051 * @esw : eswitch device. 2052 * 2053 * Return the mellanox core device which manages the eswitch. 2054 */ 2055 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw) 2056 { 2057 return mlx5_esw_allowed(esw) ? esw->dev : NULL; 2058 } 2059 EXPORT_SYMBOL(mlx5_eswitch_get_core_dev); 2060