1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/etherdevice.h> 34 #include <linux/mlx5/driver.h> 35 #include <linux/mlx5/mlx5_ifc.h> 36 #include <linux/mlx5/vport.h> 37 #include <linux/mlx5/fs.h> 38 #include "esw/acl/lgcy.h" 39 #include "mlx5_core.h" 40 #include "lib/eq.h" 41 #include "eswitch.h" 42 #include "fs_core.h" 43 #include "devlink.h" 44 #include "ecpf.h" 45 #include "en/mod_hdr.h" 46 47 enum { 48 MLX5_ACTION_NONE = 0, 49 MLX5_ACTION_ADD = 1, 50 MLX5_ACTION_DEL = 2, 51 }; 52 53 /* Vport UC/MC hash node */ 54 struct vport_addr { 55 struct l2addr_node node; 56 u8 action; 57 u16 vport; 58 struct mlx5_flow_handle *flow_rule; 59 bool mpfs; /* UC MAC was added to MPFs */ 60 /* A flag indicating that mac was added due to mc promiscuous vport */ 61 bool mc_promisc; 62 }; 63 64 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw); 65 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw); 66 67 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) 68 { 69 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 70 return -EOPNOTSUPP; 71 72 if (!MLX5_ESWITCH_MANAGER(dev)) 73 return -EOPNOTSUPP; 74 75 return 0; 76 } 77 78 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink) 79 { 80 struct mlx5_core_dev *dev = devlink_priv(devlink); 81 int err; 82 83 err = mlx5_eswitch_check(dev); 84 if (err) 85 return ERR_PTR(err); 86 87 return dev->priv.eswitch; 88 } 89 90 struct mlx5_vport *__must_check 91 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) 92 { 93 u16 idx; 94 95 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) 96 return ERR_PTR(-EPERM); 97 98 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); 99 100 if (idx > esw->total_vports - 1) { 101 esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n", 102 vport_num, idx); 103 return ERR_PTR(-EINVAL); 104 } 105 106 return &esw->vports[idx]; 107 } 108 109 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, 110 u32 events_mask) 111 { 112 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {}; 113 void *nic_vport_ctx; 114 115 MLX5_SET(modify_nic_vport_context_in, in, 116 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 117 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); 118 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 119 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); 120 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 121 in, nic_vport_context); 122 123 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); 124 125 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE) 126 MLX5_SET(nic_vport_context, nic_vport_ctx, 127 event_on_uc_address_change, 1); 128 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE) 129 MLX5_SET(nic_vport_context, nic_vport_ctx, 130 event_on_mc_address_change, 1); 131 if (events_mask & MLX5_VPORT_PROMISC_CHANGE) 132 MLX5_SET(nic_vport_context, nic_vport_ctx, 133 event_on_promisc_change, 1); 134 135 return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in); 136 } 137 138 /* E-Switch vport context HW commands */ 139 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 140 bool other_vport, void *in) 141 { 142 MLX5_SET(modify_esw_vport_context_in, in, opcode, 143 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); 144 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); 145 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport); 146 return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in); 147 } 148 149 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, 150 u16 vlan, u8 qos, u8 set_flags) 151 { 152 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; 153 154 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || 155 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) 156 return -EOPNOTSUPP; 157 158 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", 159 vport, vlan, qos, set_flags); 160 161 if (set_flags & SET_VLAN_STRIP) 162 MLX5_SET(modify_esw_vport_context_in, in, 163 esw_vport_context.vport_cvlan_strip, 1); 164 165 if (set_flags & SET_VLAN_INSERT) { 166 /* insert only if no vlan in packet */ 167 MLX5_SET(modify_esw_vport_context_in, in, 168 esw_vport_context.vport_cvlan_insert, 1); 169 170 MLX5_SET(modify_esw_vport_context_in, in, 171 esw_vport_context.cvlan_pcp, qos); 172 MLX5_SET(modify_esw_vport_context_in, in, 173 esw_vport_context.cvlan_id, vlan); 174 } 175 176 MLX5_SET(modify_esw_vport_context_in, in, 177 field_select.vport_cvlan_strip, 1); 178 MLX5_SET(modify_esw_vport_context_in, in, 179 field_select.vport_cvlan_insert, 1); 180 181 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in); 182 } 183 184 /* E-Switch FDB */ 185 static struct mlx5_flow_handle * 186 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule, 187 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) 188 { 189 int match_header = (is_zero_ether_addr(mac_c) ? 0 : 190 MLX5_MATCH_OUTER_HEADERS); 191 struct mlx5_flow_handle *flow_rule = NULL; 192 struct mlx5_flow_act flow_act = {0}; 193 struct mlx5_flow_destination dest = {}; 194 struct mlx5_flow_spec *spec; 195 void *mv_misc = NULL; 196 void *mc_misc = NULL; 197 u8 *dmac_v = NULL; 198 u8 *dmac_c = NULL; 199 200 if (rx_rule) 201 match_header |= MLX5_MATCH_MISC_PARAMETERS; 202 203 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 204 if (!spec) 205 return NULL; 206 207 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 208 outer_headers.dmac_47_16); 209 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 210 outer_headers.dmac_47_16); 211 212 if (match_header & MLX5_MATCH_OUTER_HEADERS) { 213 ether_addr_copy(dmac_v, mac_v); 214 ether_addr_copy(dmac_c, mac_c); 215 } 216 217 if (match_header & MLX5_MATCH_MISC_PARAMETERS) { 218 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 219 misc_parameters); 220 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 221 misc_parameters); 222 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK); 223 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); 224 } 225 226 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 227 dest.vport.num = vport; 228 229 esw_debug(esw->dev, 230 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", 231 dmac_v, dmac_c, vport); 232 spec->match_criteria_enable = match_header; 233 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 234 flow_rule = 235 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec, 236 &flow_act, &dest, 1); 237 if (IS_ERR(flow_rule)) { 238 esw_warn(esw->dev, 239 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", 240 dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); 241 flow_rule = NULL; 242 } 243 244 kvfree(spec); 245 return flow_rule; 246 } 247 248 static struct mlx5_flow_handle * 249 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport) 250 { 251 u8 mac_c[ETH_ALEN]; 252 253 eth_broadcast_addr(mac_c); 254 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac); 255 } 256 257 static struct mlx5_flow_handle * 258 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport) 259 { 260 u8 mac_c[ETH_ALEN]; 261 u8 mac_v[ETH_ALEN]; 262 263 eth_zero_addr(mac_c); 264 eth_zero_addr(mac_v); 265 mac_c[0] = 0x01; 266 mac_v[0] = 0x01; 267 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v); 268 } 269 270 static struct mlx5_flow_handle * 271 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport) 272 { 273 u8 mac_c[ETH_ALEN]; 274 u8 mac_v[ETH_ALEN]; 275 276 eth_zero_addr(mac_c); 277 eth_zero_addr(mac_v); 278 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); 279 } 280 281 enum { 282 LEGACY_VEPA_PRIO = 0, 283 LEGACY_FDB_PRIO, 284 }; 285 286 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) 287 { 288 struct mlx5_flow_table_attr ft_attr = {}; 289 struct mlx5_core_dev *dev = esw->dev; 290 struct mlx5_flow_namespace *root_ns; 291 struct mlx5_flow_table *fdb; 292 int err; 293 294 root_ns = mlx5_get_fdb_sub_ns(dev, 0); 295 if (!root_ns) { 296 esw_warn(dev, "Failed to get FDB flow namespace\n"); 297 return -EOPNOTSUPP; 298 } 299 300 /* num FTE 2, num FG 2 */ 301 ft_attr.prio = LEGACY_VEPA_PRIO; 302 ft_attr.max_fte = 2; 303 ft_attr.autogroup.max_num_groups = 2; 304 fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); 305 if (IS_ERR(fdb)) { 306 err = PTR_ERR(fdb); 307 esw_warn(dev, "Failed to create VEPA FDB err %d\n", err); 308 return err; 309 } 310 esw->fdb_table.legacy.vepa_fdb = fdb; 311 312 return 0; 313 } 314 315 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) 316 { 317 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 318 struct mlx5_flow_table_attr ft_attr = {}; 319 struct mlx5_core_dev *dev = esw->dev; 320 struct mlx5_flow_namespace *root_ns; 321 struct mlx5_flow_table *fdb; 322 struct mlx5_flow_group *g; 323 void *match_criteria; 324 int table_size; 325 u32 *flow_group_in; 326 u8 *dmac; 327 int err = 0; 328 329 esw_debug(dev, "Create FDB log_max_size(%d)\n", 330 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); 331 332 root_ns = mlx5_get_fdb_sub_ns(dev, 0); 333 if (!root_ns) { 334 esw_warn(dev, "Failed to get FDB flow namespace\n"); 335 return -EOPNOTSUPP; 336 } 337 338 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 339 if (!flow_group_in) 340 return -ENOMEM; 341 342 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); 343 ft_attr.max_fte = table_size; 344 ft_attr.prio = LEGACY_FDB_PRIO; 345 fdb = mlx5_create_flow_table(root_ns, &ft_attr); 346 if (IS_ERR(fdb)) { 347 err = PTR_ERR(fdb); 348 esw_warn(dev, "Failed to create FDB Table err %d\n", err); 349 goto out; 350 } 351 esw->fdb_table.legacy.fdb = fdb; 352 353 /* Addresses group : Full match unicast/multicast addresses */ 354 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 355 MLX5_MATCH_OUTER_HEADERS); 356 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 357 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16); 358 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 359 /* Preserve 2 entries for allmulti and promisc rules*/ 360 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); 361 eth_broadcast_addr(dmac); 362 g = mlx5_create_flow_group(fdb, flow_group_in); 363 if (IS_ERR(g)) { 364 err = PTR_ERR(g); 365 esw_warn(dev, "Failed to create flow group err(%d)\n", err); 366 goto out; 367 } 368 esw->fdb_table.legacy.addr_grp = g; 369 370 /* Allmulti group : One rule that forwards any mcast traffic */ 371 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 372 MLX5_MATCH_OUTER_HEADERS); 373 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2); 374 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2); 375 eth_zero_addr(dmac); 376 dmac[0] = 0x01; 377 g = mlx5_create_flow_group(fdb, flow_group_in); 378 if (IS_ERR(g)) { 379 err = PTR_ERR(g); 380 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); 381 goto out; 382 } 383 esw->fdb_table.legacy.allmulti_grp = g; 384 385 /* Promiscuous group : 386 * One rule that forward all unmatched traffic from previous groups 387 */ 388 eth_zero_addr(dmac); 389 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 390 MLX5_MATCH_MISC_PARAMETERS); 391 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); 392 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); 393 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); 394 g = mlx5_create_flow_group(fdb, flow_group_in); 395 if (IS_ERR(g)) { 396 err = PTR_ERR(g); 397 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); 398 goto out; 399 } 400 esw->fdb_table.legacy.promisc_grp = g; 401 402 out: 403 if (err) 404 esw_destroy_legacy_fdb_table(esw); 405 406 kvfree(flow_group_in); 407 return err; 408 } 409 410 static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw) 411 { 412 esw_debug(esw->dev, "Destroy VEPA Table\n"); 413 if (!esw->fdb_table.legacy.vepa_fdb) 414 return; 415 416 mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb); 417 esw->fdb_table.legacy.vepa_fdb = NULL; 418 } 419 420 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) 421 { 422 esw_debug(esw->dev, "Destroy FDB Table\n"); 423 if (!esw->fdb_table.legacy.fdb) 424 return; 425 426 if (esw->fdb_table.legacy.promisc_grp) 427 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); 428 if (esw->fdb_table.legacy.allmulti_grp) 429 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); 430 if (esw->fdb_table.legacy.addr_grp) 431 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); 432 mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); 433 434 esw->fdb_table.legacy.fdb = NULL; 435 esw->fdb_table.legacy.addr_grp = NULL; 436 esw->fdb_table.legacy.allmulti_grp = NULL; 437 esw->fdb_table.legacy.promisc_grp = NULL; 438 } 439 440 static int esw_create_legacy_table(struct mlx5_eswitch *esw) 441 { 442 int err; 443 444 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); 445 446 err = esw_create_legacy_vepa_table(esw); 447 if (err) 448 return err; 449 450 err = esw_create_legacy_fdb_table(esw); 451 if (err) 452 esw_destroy_legacy_vepa_table(esw); 453 454 return err; 455 } 456 457 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) 458 { 459 esw_cleanup_vepa_rules(esw); 460 esw_destroy_legacy_fdb_table(esw); 461 esw_destroy_legacy_vepa_table(esw); 462 } 463 464 #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ 465 MLX5_VPORT_MC_ADDR_CHANGE | \ 466 MLX5_VPORT_PROMISC_CHANGE) 467 468 static int esw_legacy_enable(struct mlx5_eswitch *esw) 469 { 470 struct mlx5_vport *vport; 471 int ret, i; 472 473 ret = esw_create_legacy_table(esw); 474 if (ret) 475 return ret; 476 477 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) 478 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 479 480 ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); 481 if (ret) 482 esw_destroy_legacy_table(esw); 483 return ret; 484 } 485 486 static void esw_legacy_disable(struct mlx5_eswitch *esw) 487 { 488 struct esw_mc_addr *mc_promisc; 489 490 mlx5_eswitch_disable_pf_vf_vports(esw); 491 492 mc_promisc = &esw->mc_promisc; 493 if (mc_promisc->uplink_rule) 494 mlx5_del_flow_rules(mc_promisc->uplink_rule); 495 496 esw_destroy_legacy_table(esw); 497 } 498 499 /* E-Switch vport UC/MC lists management */ 500 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, 501 struct vport_addr *vaddr); 502 503 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 504 { 505 u8 *mac = vaddr->node.addr; 506 u16 vport = vaddr->vport; 507 int err; 508 509 /* Skip mlx5_mpfs_add_mac for eswitch_managers, 510 * it is already done by its netdev in mlx5e_execute_l2_action 511 */ 512 if (mlx5_esw_is_manager_vport(esw, vport)) 513 goto fdb_add; 514 515 err = mlx5_mpfs_add_mac(esw->dev, mac); 516 if (err) { 517 esw_warn(esw->dev, 518 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n", 519 mac, vport, err); 520 return err; 521 } 522 vaddr->mpfs = true; 523 524 fdb_add: 525 /* SRIOV is enabled: Forward UC MAC to vport */ 526 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) 527 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 528 529 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", 530 vport, mac, vaddr->flow_rule); 531 532 return 0; 533 } 534 535 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 536 { 537 u8 *mac = vaddr->node.addr; 538 u16 vport = vaddr->vport; 539 int err = 0; 540 541 /* Skip mlx5_mpfs_del_mac for eswitch managers, 542 * it is already done by its netdev in mlx5e_execute_l2_action 543 */ 544 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport)) 545 goto fdb_del; 546 547 err = mlx5_mpfs_del_mac(esw->dev, mac); 548 if (err) 549 esw_warn(esw->dev, 550 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n", 551 mac, vport, err); 552 vaddr->mpfs = false; 553 554 fdb_del: 555 if (vaddr->flow_rule) 556 mlx5_del_flow_rules(vaddr->flow_rule); 557 vaddr->flow_rule = NULL; 558 559 return 0; 560 } 561 562 static void update_allmulti_vports(struct mlx5_eswitch *esw, 563 struct vport_addr *vaddr, 564 struct esw_mc_addr *esw_mc) 565 { 566 u8 *mac = vaddr->node.addr; 567 struct mlx5_vport *vport; 568 u16 i, vport_num; 569 570 mlx5_esw_for_all_vports(esw, i, vport) { 571 struct hlist_head *vport_hash = vport->mc_list; 572 struct vport_addr *iter_vaddr = 573 l2addr_hash_find(vport_hash, 574 mac, 575 struct vport_addr); 576 vport_num = vport->vport; 577 if (IS_ERR_OR_NULL(vport->allmulti_rule) || 578 vaddr->vport == vport_num) 579 continue; 580 switch (vaddr->action) { 581 case MLX5_ACTION_ADD: 582 if (iter_vaddr) 583 continue; 584 iter_vaddr = l2addr_hash_add(vport_hash, mac, 585 struct vport_addr, 586 GFP_KERNEL); 587 if (!iter_vaddr) { 588 esw_warn(esw->dev, 589 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n", 590 mac, vport_num); 591 continue; 592 } 593 iter_vaddr->vport = vport_num; 594 iter_vaddr->flow_rule = 595 esw_fdb_set_vport_rule(esw, 596 mac, 597 vport_num); 598 iter_vaddr->mc_promisc = true; 599 break; 600 case MLX5_ACTION_DEL: 601 if (!iter_vaddr) 602 continue; 603 mlx5_del_flow_rules(iter_vaddr->flow_rule); 604 l2addr_hash_del(iter_vaddr); 605 break; 606 } 607 } 608 } 609 610 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 611 { 612 struct hlist_head *hash = esw->mc_table; 613 struct esw_mc_addr *esw_mc; 614 u8 *mac = vaddr->node.addr; 615 u16 vport = vaddr->vport; 616 617 if (!esw->fdb_table.legacy.fdb) 618 return 0; 619 620 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); 621 if (esw_mc) 622 goto add; 623 624 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL); 625 if (!esw_mc) 626 return -ENOMEM; 627 628 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ 629 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK); 630 631 /* Add this multicast mac to all the mc promiscuous vports */ 632 update_allmulti_vports(esw, vaddr, esw_mc); 633 634 add: 635 /* If the multicast mac is added as a result of mc promiscuous vport, 636 * don't increment the multicast ref count 637 */ 638 if (!vaddr->mc_promisc) 639 esw_mc->refcnt++; 640 641 /* Forward MC MAC to vport */ 642 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 643 esw_debug(esw->dev, 644 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", 645 vport, mac, vaddr->flow_rule, 646 esw_mc->refcnt, esw_mc->uplink_rule); 647 return 0; 648 } 649 650 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 651 { 652 struct hlist_head *hash = esw->mc_table; 653 struct esw_mc_addr *esw_mc; 654 u8 *mac = vaddr->node.addr; 655 u16 vport = vaddr->vport; 656 657 if (!esw->fdb_table.legacy.fdb) 658 return 0; 659 660 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); 661 if (!esw_mc) { 662 esw_warn(esw->dev, 663 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)", 664 mac, vport); 665 return -EINVAL; 666 } 667 esw_debug(esw->dev, 668 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", 669 vport, mac, vaddr->flow_rule, esw_mc->refcnt, 670 esw_mc->uplink_rule); 671 672 if (vaddr->flow_rule) 673 mlx5_del_flow_rules(vaddr->flow_rule); 674 vaddr->flow_rule = NULL; 675 676 /* If the multicast mac is added as a result of mc promiscuous vport, 677 * don't decrement the multicast ref count. 678 */ 679 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0)) 680 return 0; 681 682 /* Remove this multicast mac from all the mc promiscuous vports */ 683 update_allmulti_vports(esw, vaddr, esw_mc); 684 685 if (esw_mc->uplink_rule) 686 mlx5_del_flow_rules(esw_mc->uplink_rule); 687 688 l2addr_hash_del(esw_mc); 689 return 0; 690 } 691 692 /* Apply vport UC/MC list to HW l2 table and FDB table */ 693 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, 694 struct mlx5_vport *vport, int list_type) 695 { 696 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; 697 vport_addr_action vport_addr_add; 698 vport_addr_action vport_addr_del; 699 struct vport_addr *addr; 700 struct l2addr_node *node; 701 struct hlist_head *hash; 702 struct hlist_node *tmp; 703 int hi; 704 705 vport_addr_add = is_uc ? esw_add_uc_addr : 706 esw_add_mc_addr; 707 vport_addr_del = is_uc ? esw_del_uc_addr : 708 esw_del_mc_addr; 709 710 hash = is_uc ? vport->uc_list : vport->mc_list; 711 for_each_l2hash_node(node, tmp, hash, hi) { 712 addr = container_of(node, struct vport_addr, node); 713 switch (addr->action) { 714 case MLX5_ACTION_ADD: 715 vport_addr_add(esw, addr); 716 addr->action = MLX5_ACTION_NONE; 717 break; 718 case MLX5_ACTION_DEL: 719 vport_addr_del(esw, addr); 720 l2addr_hash_del(addr); 721 break; 722 } 723 } 724 } 725 726 /* Sync vport UC/MC list from vport context */ 727 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, 728 struct mlx5_vport *vport, int list_type) 729 { 730 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; 731 u8 (*mac_list)[ETH_ALEN]; 732 struct l2addr_node *node; 733 struct vport_addr *addr; 734 struct hlist_head *hash; 735 struct hlist_node *tmp; 736 int size; 737 int err; 738 int hi; 739 int i; 740 741 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) : 742 MLX5_MAX_MC_PER_VPORT(esw->dev); 743 744 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL); 745 if (!mac_list) 746 return; 747 748 hash = is_uc ? vport->uc_list : vport->mc_list; 749 750 for_each_l2hash_node(node, tmp, hash, hi) { 751 addr = container_of(node, struct vport_addr, node); 752 addr->action = MLX5_ACTION_DEL; 753 } 754 755 if (!vport->enabled) 756 goto out; 757 758 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type, 759 mac_list, &size); 760 if (err) 761 goto out; 762 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n", 763 vport->vport, is_uc ? "UC" : "MC", size); 764 765 for (i = 0; i < size; i++) { 766 if (is_uc && !is_valid_ether_addr(mac_list[i])) 767 continue; 768 769 if (!is_uc && !is_multicast_ether_addr(mac_list[i])) 770 continue; 771 772 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr); 773 if (addr) { 774 addr->action = MLX5_ACTION_NONE; 775 /* If this mac was previously added because of allmulti 776 * promiscuous rx mode, its now converted to be original 777 * vport mac. 778 */ 779 if (addr->mc_promisc) { 780 struct esw_mc_addr *esw_mc = 781 l2addr_hash_find(esw->mc_table, 782 mac_list[i], 783 struct esw_mc_addr); 784 if (!esw_mc) { 785 esw_warn(esw->dev, 786 "Failed to MAC(%pM) in mcast DB\n", 787 mac_list[i]); 788 continue; 789 } 790 esw_mc->refcnt++; 791 addr->mc_promisc = false; 792 } 793 continue; 794 } 795 796 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr, 797 GFP_KERNEL); 798 if (!addr) { 799 esw_warn(esw->dev, 800 "Failed to add MAC(%pM) to vport[%d] DB\n", 801 mac_list[i], vport->vport); 802 continue; 803 } 804 addr->vport = vport->vport; 805 addr->action = MLX5_ACTION_ADD; 806 } 807 out: 808 kfree(mac_list); 809 } 810 811 /* Sync vport UC/MC list from vport context 812 * Must be called after esw_update_vport_addr_list 813 */ 814 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, 815 struct mlx5_vport *vport) 816 { 817 struct l2addr_node *node; 818 struct vport_addr *addr; 819 struct hlist_head *hash; 820 struct hlist_node *tmp; 821 int hi; 822 823 hash = vport->mc_list; 824 825 for_each_l2hash_node(node, tmp, esw->mc_table, hi) { 826 u8 *mac = node->addr; 827 828 addr = l2addr_hash_find(hash, mac, struct vport_addr); 829 if (addr) { 830 if (addr->action == MLX5_ACTION_DEL) 831 addr->action = MLX5_ACTION_NONE; 832 continue; 833 } 834 addr = l2addr_hash_add(hash, mac, struct vport_addr, 835 GFP_KERNEL); 836 if (!addr) { 837 esw_warn(esw->dev, 838 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n", 839 mac, vport->vport); 840 continue; 841 } 842 addr->vport = vport->vport; 843 addr->action = MLX5_ACTION_ADD; 844 addr->mc_promisc = true; 845 } 846 } 847 848 /* Apply vport rx mode to HW FDB table */ 849 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, 850 struct mlx5_vport *vport, 851 bool promisc, bool mc_promisc) 852 { 853 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc; 854 855 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc) 856 goto promisc; 857 858 if (mc_promisc) { 859 vport->allmulti_rule = 860 esw_fdb_set_vport_allmulti_rule(esw, vport->vport); 861 if (!allmulti_addr->uplink_rule) 862 allmulti_addr->uplink_rule = 863 esw_fdb_set_vport_allmulti_rule(esw, 864 MLX5_VPORT_UPLINK); 865 allmulti_addr->refcnt++; 866 } else if (vport->allmulti_rule) { 867 mlx5_del_flow_rules(vport->allmulti_rule); 868 vport->allmulti_rule = NULL; 869 870 if (--allmulti_addr->refcnt > 0) 871 goto promisc; 872 873 if (allmulti_addr->uplink_rule) 874 mlx5_del_flow_rules(allmulti_addr->uplink_rule); 875 allmulti_addr->uplink_rule = NULL; 876 } 877 878 promisc: 879 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc) 880 return; 881 882 if (promisc) { 883 vport->promisc_rule = 884 esw_fdb_set_vport_promisc_rule(esw, vport->vport); 885 } else if (vport->promisc_rule) { 886 mlx5_del_flow_rules(vport->promisc_rule); 887 vport->promisc_rule = NULL; 888 } 889 } 890 891 /* Sync vport rx mode from vport context */ 892 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, 893 struct mlx5_vport *vport) 894 { 895 int promisc_all = 0; 896 int promisc_uc = 0; 897 int promisc_mc = 0; 898 int err; 899 900 err = mlx5_query_nic_vport_promisc(esw->dev, 901 vport->vport, 902 &promisc_uc, 903 &promisc_mc, 904 &promisc_all); 905 if (err) 906 return; 907 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n", 908 vport->vport, promisc_all, promisc_mc); 909 910 if (!vport->info.trusted || !vport->enabled) { 911 promisc_uc = 0; 912 promisc_mc = 0; 913 promisc_all = 0; 914 } 915 916 esw_apply_vport_rx_mode(esw, vport, promisc_all, 917 (promisc_all || promisc_mc)); 918 } 919 920 static void esw_vport_change_handle_locked(struct mlx5_vport *vport) 921 { 922 struct mlx5_core_dev *dev = vport->dev; 923 struct mlx5_eswitch *esw = dev->priv.eswitch; 924 u8 mac[ETH_ALEN]; 925 926 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac); 927 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", 928 vport->vport, mac); 929 930 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) { 931 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); 932 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); 933 } 934 935 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE) 936 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); 937 938 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) { 939 esw_update_vport_rx_mode(esw, vport); 940 if (!IS_ERR_OR_NULL(vport->allmulti_rule)) 941 esw_update_vport_mc_promisc(esw, vport); 942 } 943 944 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE)) 945 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); 946 947 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); 948 if (vport->enabled) 949 arm_vport_context_events_cmd(dev, vport->vport, 950 vport->enabled_events); 951 } 952 953 static void esw_vport_change_handler(struct work_struct *work) 954 { 955 struct mlx5_vport *vport = 956 container_of(work, struct mlx5_vport, vport_change_handler); 957 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; 958 959 mutex_lock(&esw->state_lock); 960 esw_vport_change_handle_locked(vport); 961 mutex_unlock(&esw->state_lock); 962 } 963 964 static bool element_type_supported(struct mlx5_eswitch *esw, int type) 965 { 966 const struct mlx5_core_dev *dev = esw->dev; 967 968 switch (type) { 969 case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: 970 return MLX5_CAP_QOS(dev, esw_element_type) & 971 ELEMENT_TYPE_CAP_MASK_TASR; 972 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: 973 return MLX5_CAP_QOS(dev, esw_element_type) & 974 ELEMENT_TYPE_CAP_MASK_VPORT; 975 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: 976 return MLX5_CAP_QOS(dev, esw_element_type) & 977 ELEMENT_TYPE_CAP_MASK_VPORT_TC; 978 case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: 979 return MLX5_CAP_QOS(dev, esw_element_type) & 980 ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; 981 } 982 return false; 983 } 984 985 /* Vport QoS management */ 986 static void esw_create_tsar(struct mlx5_eswitch *esw) 987 { 988 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; 989 struct mlx5_core_dev *dev = esw->dev; 990 __be32 *attr; 991 int err; 992 993 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) 994 return; 995 996 if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR)) 997 return; 998 999 if (esw->qos.enabled) 1000 return; 1001 1002 MLX5_SET(scheduling_context, tsar_ctx, element_type, 1003 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); 1004 1005 attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); 1006 *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16); 1007 1008 err = mlx5_create_scheduling_element_cmd(dev, 1009 SCHEDULING_HIERARCHY_E_SWITCH, 1010 tsar_ctx, 1011 &esw->qos.root_tsar_id); 1012 if (err) { 1013 esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); 1014 return; 1015 } 1016 1017 esw->qos.enabled = true; 1018 } 1019 1020 static void esw_destroy_tsar(struct mlx5_eswitch *esw) 1021 { 1022 int err; 1023 1024 if (!esw->qos.enabled) 1025 return; 1026 1027 err = mlx5_destroy_scheduling_element_cmd(esw->dev, 1028 SCHEDULING_HIERARCHY_E_SWITCH, 1029 esw->qos.root_tsar_id); 1030 if (err) 1031 esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err); 1032 1033 esw->qos.enabled = false; 1034 } 1035 1036 static int esw_vport_enable_qos(struct mlx5_eswitch *esw, 1037 struct mlx5_vport *vport, 1038 u32 initial_max_rate, u32 initial_bw_share) 1039 { 1040 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; 1041 struct mlx5_core_dev *dev = esw->dev; 1042 void *vport_elem; 1043 int err = 0; 1044 1045 if (!esw->qos.enabled) 1046 return 0; 1047 1048 if (vport->qos.enabled) 1049 return -EEXIST; 1050 1051 MLX5_SET(scheduling_context, sched_ctx, element_type, 1052 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); 1053 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, 1054 element_attributes); 1055 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); 1056 MLX5_SET(scheduling_context, sched_ctx, parent_element_id, 1057 esw->qos.root_tsar_id); 1058 MLX5_SET(scheduling_context, sched_ctx, max_average_bw, 1059 initial_max_rate); 1060 MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share); 1061 1062 err = mlx5_create_scheduling_element_cmd(dev, 1063 SCHEDULING_HIERARCHY_E_SWITCH, 1064 sched_ctx, 1065 &vport->qos.esw_tsar_ix); 1066 if (err) { 1067 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", 1068 vport->vport, err); 1069 return err; 1070 } 1071 1072 vport->qos.enabled = true; 1073 return 0; 1074 } 1075 1076 static void esw_vport_disable_qos(struct mlx5_eswitch *esw, 1077 struct mlx5_vport *vport) 1078 { 1079 int err; 1080 1081 if (!vport->qos.enabled) 1082 return; 1083 1084 err = mlx5_destroy_scheduling_element_cmd(esw->dev, 1085 SCHEDULING_HIERARCHY_E_SWITCH, 1086 vport->qos.esw_tsar_ix); 1087 if (err) 1088 esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n", 1089 vport->vport, err); 1090 1091 vport->qos.enabled = false; 1092 } 1093 1094 static int esw_vport_qos_config(struct mlx5_eswitch *esw, 1095 struct mlx5_vport *vport, 1096 u32 max_rate, u32 bw_share) 1097 { 1098 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; 1099 struct mlx5_core_dev *dev = esw->dev; 1100 void *vport_elem; 1101 u32 bitmask = 0; 1102 int err = 0; 1103 1104 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) 1105 return -EOPNOTSUPP; 1106 1107 if (!vport->qos.enabled) 1108 return -EIO; 1109 1110 MLX5_SET(scheduling_context, sched_ctx, element_type, 1111 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); 1112 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, 1113 element_attributes); 1114 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); 1115 MLX5_SET(scheduling_context, sched_ctx, parent_element_id, 1116 esw->qos.root_tsar_id); 1117 MLX5_SET(scheduling_context, sched_ctx, max_average_bw, 1118 max_rate); 1119 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); 1120 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; 1121 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; 1122 1123 err = mlx5_modify_scheduling_element_cmd(dev, 1124 SCHEDULING_HIERARCHY_E_SWITCH, 1125 sched_ctx, 1126 vport->qos.esw_tsar_ix, 1127 bitmask); 1128 if (err) { 1129 esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n", 1130 vport->vport, err); 1131 return err; 1132 } 1133 1134 return 0; 1135 } 1136 1137 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, 1138 u32 rate_mbps) 1139 { 1140 u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; 1141 struct mlx5_vport *vport; 1142 1143 vport = mlx5_eswitch_get_vport(esw, vport_num); 1144 1145 if (!vport->qos.enabled) 1146 return -EOPNOTSUPP; 1147 1148 MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); 1149 1150 return mlx5_modify_scheduling_element_cmd(esw->dev, 1151 SCHEDULING_HIERARCHY_E_SWITCH, 1152 ctx, 1153 vport->qos.esw_tsar_ix, 1154 MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW); 1155 } 1156 1157 static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) 1158 { 1159 ((u8 *)node_guid)[7] = mac[0]; 1160 ((u8 *)node_guid)[6] = mac[1]; 1161 ((u8 *)node_guid)[5] = mac[2]; 1162 ((u8 *)node_guid)[4] = 0xff; 1163 ((u8 *)node_guid)[3] = 0xfe; 1164 ((u8 *)node_guid)[2] = mac[3]; 1165 ((u8 *)node_guid)[1] = mac[4]; 1166 ((u8 *)node_guid)[0] = mac[5]; 1167 } 1168 1169 static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw, 1170 struct mlx5_vport *vport) 1171 { 1172 int ret; 1173 1174 /* Only non manager vports need ACL in legacy mode */ 1175 if (mlx5_esw_is_manager_vport(esw, vport->vport)) 1176 return 0; 1177 1178 ret = esw_acl_ingress_lgcy_setup(esw, vport); 1179 if (ret) 1180 goto ingress_err; 1181 1182 ret = esw_acl_egress_lgcy_setup(esw, vport); 1183 if (ret) 1184 goto egress_err; 1185 1186 return 0; 1187 1188 egress_err: 1189 esw_acl_ingress_lgcy_cleanup(esw, vport); 1190 ingress_err: 1191 return ret; 1192 } 1193 1194 static int esw_vport_setup_acl(struct mlx5_eswitch *esw, 1195 struct mlx5_vport *vport) 1196 { 1197 if (esw->mode == MLX5_ESWITCH_LEGACY) 1198 return esw_vport_create_legacy_acl_tables(esw, vport); 1199 else 1200 return esw_vport_create_offloads_acl_tables(esw, vport); 1201 } 1202 1203 static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw, 1204 struct mlx5_vport *vport) 1205 1206 { 1207 if (mlx5_esw_is_manager_vport(esw, vport->vport)) 1208 return; 1209 1210 esw_acl_egress_lgcy_cleanup(esw, vport); 1211 esw_acl_ingress_lgcy_cleanup(esw, vport); 1212 } 1213 1214 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, 1215 struct mlx5_vport *vport) 1216 { 1217 if (esw->mode == MLX5_ESWITCH_LEGACY) 1218 esw_vport_destroy_legacy_acl_tables(esw, vport); 1219 else 1220 esw_vport_destroy_offloads_acl_tables(esw, vport); 1221 } 1222 1223 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 1224 { 1225 u16 vport_num = vport->vport; 1226 int flags; 1227 int err; 1228 1229 err = esw_vport_setup_acl(esw, vport); 1230 if (err) 1231 return err; 1232 1233 /* Attach vport to the eswitch rate limiter */ 1234 esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share); 1235 1236 if (mlx5_esw_is_manager_vport(esw, vport_num)) 1237 return 0; 1238 1239 mlx5_modify_vport_admin_state(esw->dev, 1240 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 1241 vport_num, 1, 1242 vport->info.link_state); 1243 1244 /* Host PF has its own mac/guid. */ 1245 if (vport_num) { 1246 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, 1247 vport->info.mac); 1248 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, 1249 vport->info.node_guid); 1250 } 1251 1252 flags = (vport->info.vlan || vport->info.qos) ? 1253 SET_VLAN_STRIP | SET_VLAN_INSERT : 0; 1254 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, 1255 vport->info.qos, flags); 1256 1257 return 0; 1258 } 1259 1260 /* Don't cleanup vport->info, it's needed to restore vport configuration */ 1261 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 1262 { 1263 u16 vport_num = vport->vport; 1264 1265 if (!mlx5_esw_is_manager_vport(esw, vport_num)) 1266 mlx5_modify_vport_admin_state(esw->dev, 1267 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 1268 vport_num, 1, 1269 MLX5_VPORT_ADMIN_STATE_DOWN); 1270 1271 esw_vport_disable_qos(esw, vport); 1272 esw_vport_cleanup_acl(esw, vport); 1273 } 1274 1275 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, 1276 enum mlx5_eswitch_vport_event enabled_events) 1277 { 1278 struct mlx5_vport *vport; 1279 int ret; 1280 1281 vport = mlx5_eswitch_get_vport(esw, vport_num); 1282 1283 mutex_lock(&esw->state_lock); 1284 WARN_ON(vport->enabled); 1285 1286 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); 1287 1288 ret = esw_vport_setup(esw, vport); 1289 if (ret) 1290 goto done; 1291 1292 /* Sync with current vport context */ 1293 vport->enabled_events = enabled_events; 1294 vport->enabled = true; 1295 1296 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well 1297 * in smartNIC as it's a vport group manager. 1298 */ 1299 if (mlx5_esw_is_manager_vport(esw, vport_num) || 1300 (!vport_num && mlx5_core_is_ecpf(esw->dev))) 1301 vport->info.trusted = true; 1302 1303 if (!mlx5_esw_is_manager_vport(esw, vport->vport) && 1304 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { 1305 ret = mlx5_esw_vport_vhca_id_set(esw, vport_num); 1306 if (ret) 1307 goto err_vhca_mapping; 1308 } 1309 1310 esw_vport_change_handle_locked(vport); 1311 1312 esw->enabled_vports++; 1313 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); 1314 done: 1315 mutex_unlock(&esw->state_lock); 1316 return ret; 1317 1318 err_vhca_mapping: 1319 esw_vport_cleanup(esw, vport); 1320 mutex_unlock(&esw->state_lock); 1321 return ret; 1322 } 1323 1324 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) 1325 { 1326 struct mlx5_vport *vport; 1327 1328 vport = mlx5_eswitch_get_vport(esw, vport_num); 1329 1330 mutex_lock(&esw->state_lock); 1331 if (!vport->enabled) 1332 goto done; 1333 1334 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); 1335 /* Mark this vport as disabled to discard new events */ 1336 vport->enabled = false; 1337 1338 /* Disable events from this vport */ 1339 arm_vport_context_events_cmd(esw->dev, vport->vport, 0); 1340 1341 if (!mlx5_esw_is_manager_vport(esw, vport->vport) && 1342 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 1343 mlx5_esw_vport_vhca_id_clear(esw, vport_num); 1344 1345 /* We don't assume VFs will cleanup after themselves. 1346 * Calling vport change handler while vport is disabled will cleanup 1347 * the vport resources. 1348 */ 1349 esw_vport_change_handle_locked(vport); 1350 vport->enabled_events = 0; 1351 esw_vport_cleanup(esw, vport); 1352 esw->enabled_vports--; 1353 1354 done: 1355 mutex_unlock(&esw->state_lock); 1356 } 1357 1358 static int eswitch_vport_event(struct notifier_block *nb, 1359 unsigned long type, void *data) 1360 { 1361 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb); 1362 struct mlx5_eqe *eqe = data; 1363 struct mlx5_vport *vport; 1364 u16 vport_num; 1365 1366 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); 1367 vport = mlx5_eswitch_get_vport(esw, vport_num); 1368 if (!IS_ERR(vport)) 1369 queue_work(esw->work_queue, &vport->vport_change_handler); 1370 return NOTIFY_OK; 1371 } 1372 1373 /** 1374 * mlx5_esw_query_functions - Returns raw output about functions state 1375 * @dev: Pointer to device to query 1376 * 1377 * mlx5_esw_query_functions() allocates and returns functions changed 1378 * raw output memory pointer from device on success. Otherwise returns ERR_PTR. 1379 * Caller must free the memory using kvfree() when valid pointer is returned. 1380 */ 1381 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 1382 { 1383 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out); 1384 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {}; 1385 u16 max_sf_vports; 1386 u32 *out; 1387 int err; 1388 1389 max_sf_vports = mlx5_sf_max_functions(dev); 1390 /* Device interface is array of 64-bits */ 1391 if (max_sf_vports) 1392 outlen += DIV_ROUND_UP(max_sf_vports, BITS_PER_TYPE(__be64)) * sizeof(__be64); 1393 1394 out = kvzalloc(outlen, GFP_KERNEL); 1395 if (!out) 1396 return ERR_PTR(-ENOMEM); 1397 1398 MLX5_SET(query_esw_functions_in, in, opcode, 1399 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS); 1400 1401 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 1402 if (!err) 1403 return out; 1404 1405 kvfree(out); 1406 return ERR_PTR(err); 1407 } 1408 1409 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw) 1410 { 1411 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); 1412 mlx5_eq_notifier_register(esw->dev, &esw->nb); 1413 1414 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) { 1415 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler, 1416 ESW_FUNCTIONS_CHANGED); 1417 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb); 1418 } 1419 } 1420 1421 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) 1422 { 1423 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) 1424 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); 1425 1426 mlx5_eq_notifier_unregister(esw->dev, &esw->nb); 1427 1428 flush_workqueue(esw->work_queue); 1429 } 1430 1431 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) 1432 { 1433 struct mlx5_vport *vport; 1434 int i; 1435 1436 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 1437 memset(&vport->qos, 0, sizeof(vport->qos)); 1438 memset(&vport->info, 0, sizeof(vport->info)); 1439 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 1440 } 1441 } 1442 1443 /* Public E-Switch API */ 1444 #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) 1445 1446 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, 1447 enum mlx5_eswitch_vport_event enabled_events) 1448 { 1449 int err; 1450 1451 err = mlx5_esw_vport_enable(esw, vport_num, enabled_events); 1452 if (err) 1453 return err; 1454 1455 err = esw_offloads_load_rep(esw, vport_num); 1456 if (err) 1457 goto err_rep; 1458 1459 return err; 1460 1461 err_rep: 1462 mlx5_esw_vport_disable(esw, vport_num); 1463 return err; 1464 } 1465 1466 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num) 1467 { 1468 esw_offloads_unload_rep(esw, vport_num); 1469 mlx5_esw_vport_disable(esw, vport_num); 1470 } 1471 1472 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs) 1473 { 1474 int i; 1475 1476 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs) 1477 mlx5_eswitch_unload_vport(esw, i); 1478 } 1479 1480 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, 1481 enum mlx5_eswitch_vport_event enabled_events) 1482 { 1483 int err; 1484 int i; 1485 1486 mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) { 1487 err = mlx5_eswitch_load_vport(esw, i, enabled_events); 1488 if (err) 1489 goto vf_err; 1490 } 1491 1492 return 0; 1493 1494 vf_err: 1495 mlx5_eswitch_unload_vf_vports(esw, i - 1); 1496 return err; 1497 } 1498 1499 static int host_pf_enable_hca(struct mlx5_core_dev *dev) 1500 { 1501 if (!mlx5_core_is_ecpf(dev)) 1502 return 0; 1503 1504 /* Once vport and representor are ready, take out the external host PF 1505 * out of initializing state. Enabling HCA clears the iser->initializing 1506 * bit and host PF driver loading can progress. 1507 */ 1508 return mlx5_cmd_host_pf_enable_hca(dev); 1509 } 1510 1511 static void host_pf_disable_hca(struct mlx5_core_dev *dev) 1512 { 1513 if (!mlx5_core_is_ecpf(dev)) 1514 return; 1515 1516 mlx5_cmd_host_pf_disable_hca(dev); 1517 } 1518 1519 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs 1520 * whichever are present on the eswitch. 1521 */ 1522 int 1523 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 1524 enum mlx5_eswitch_vport_event enabled_events) 1525 { 1526 int ret; 1527 1528 /* Enable PF vport */ 1529 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events); 1530 if (ret) 1531 return ret; 1532 1533 /* Enable external host PF HCA */ 1534 ret = host_pf_enable_hca(esw->dev); 1535 if (ret) 1536 goto pf_hca_err; 1537 1538 /* Enable ECPF vport */ 1539 if (mlx5_ecpf_vport_exists(esw->dev)) { 1540 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events); 1541 if (ret) 1542 goto ecpf_err; 1543 } 1544 1545 /* Enable VF vports */ 1546 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs, 1547 enabled_events); 1548 if (ret) 1549 goto vf_err; 1550 return 0; 1551 1552 vf_err: 1553 if (mlx5_ecpf_vport_exists(esw->dev)) 1554 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); 1555 ecpf_err: 1556 host_pf_disable_hca(esw->dev); 1557 pf_hca_err: 1558 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); 1559 return ret; 1560 } 1561 1562 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs 1563 * whichever are previously enabled on the eswitch. 1564 */ 1565 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) 1566 { 1567 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 1568 1569 if (mlx5_ecpf_vport_exists(esw->dev)) 1570 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); 1571 1572 host_pf_disable_hca(esw->dev); 1573 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); 1574 } 1575 1576 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) 1577 { 1578 struct devlink *devlink = priv_to_devlink(esw->dev); 1579 union devlink_param_value val; 1580 int err; 1581 1582 err = devlink_param_driverinit_value_get(devlink, 1583 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, 1584 &val); 1585 if (!err) { 1586 esw->params.large_group_num = val.vu32; 1587 } else { 1588 esw_warn(esw->dev, 1589 "Devlink can't get param fdb_large_groups, uses default (%d).\n", 1590 ESW_OFFLOADS_DEFAULT_NUM_GROUPS); 1591 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS; 1592 } 1593 } 1594 1595 static void 1596 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) 1597 { 1598 const u32 *out; 1599 1600 WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE); 1601 1602 if (num_vfs < 0) 1603 return; 1604 1605 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1606 esw->esw_funcs.num_vfs = num_vfs; 1607 return; 1608 } 1609 1610 out = mlx5_esw_query_functions(esw->dev); 1611 if (IS_ERR(out)) 1612 return; 1613 1614 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out, 1615 host_params_context.host_num_of_vfs); 1616 kvfree(out); 1617 } 1618 1619 static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode) 1620 { 1621 struct mlx5_esw_event_info info = {}; 1622 1623 info.new_mode = mode; 1624 1625 blocking_notifier_call_chain(&esw->n_head, 0, &info); 1626 } 1627 1628 /** 1629 * mlx5_eswitch_enable_locked - Enable eswitch 1630 * @esw: Pointer to eswitch 1631 * @mode: Eswitch mode to enable 1632 * @num_vfs: Enable eswitch for given number of VFs. This is optional. 1633 * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS. 1634 * Caller should pass num_vfs > 0 when enabling eswitch for 1635 * vf vports. Caller should pass num_vfs = 0, when eswitch 1636 * is enabled without sriov VFs or when caller 1637 * is unaware of the sriov state of the host PF on ECPF based 1638 * eswitch. Caller should pass < 0 when num_vfs should be 1639 * completely ignored. This is typically the case when eswitch 1640 * is enabled without sriov regardless of PF/ECPF system. 1641 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads 1642 * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports. 1643 * It returns 0 on success or error code on failure. 1644 */ 1645 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) 1646 { 1647 int err; 1648 1649 lockdep_assert_held(&esw->mode_lock); 1650 1651 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1652 esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); 1653 return -EOPNOTSUPP; 1654 } 1655 1656 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) 1657 esw_warn(esw->dev, "ingress ACL is not supported by FW\n"); 1658 1659 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) 1660 esw_warn(esw->dev, "engress ACL is not supported by FW\n"); 1661 1662 mlx5_eswitch_get_devlink_param(esw); 1663 1664 mlx5_eswitch_update_num_of_vfs(esw, num_vfs); 1665 1666 esw_create_tsar(esw); 1667 1668 esw->mode = mode; 1669 1670 mlx5_lag_update(esw->dev); 1671 1672 if (mode == MLX5_ESWITCH_LEGACY) { 1673 err = esw_legacy_enable(esw); 1674 } else { 1675 mlx5_rescan_drivers(esw->dev); 1676 err = esw_offloads_enable(esw); 1677 } 1678 1679 if (err) 1680 goto abort; 1681 1682 mlx5_eswitch_event_handlers_register(esw); 1683 1684 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", 1685 mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1686 esw->esw_funcs.num_vfs, esw->enabled_vports); 1687 1688 mlx5_esw_mode_change_notify(esw, mode); 1689 1690 return 0; 1691 1692 abort: 1693 esw->mode = MLX5_ESWITCH_NONE; 1694 1695 if (mode == MLX5_ESWITCH_OFFLOADS) 1696 mlx5_rescan_drivers(esw->dev); 1697 1698 esw_destroy_tsar(esw); 1699 return err; 1700 } 1701 1702 /** 1703 * mlx5_eswitch_enable - Enable eswitch 1704 * @esw: Pointer to eswitch 1705 * @num_vfs: Enable eswitch swich for given number of VFs. 1706 * Caller must pass num_vfs > 0 when enabling eswitch for 1707 * vf vports. 1708 * mlx5_eswitch_enable() returns 0 on success or error code on failure. 1709 */ 1710 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) 1711 { 1712 int ret; 1713 1714 if (!ESW_ALLOWED(esw)) 1715 return 0; 1716 1717 mutex_lock(&esw->mode_lock); 1718 if (esw->mode == MLX5_ESWITCH_NONE) { 1719 ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); 1720 } else { 1721 enum mlx5_eswitch_vport_event vport_events; 1722 1723 vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ? 1724 MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE; 1725 ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events); 1726 if (!ret) 1727 esw->esw_funcs.num_vfs = num_vfs; 1728 } 1729 mutex_unlock(&esw->mode_lock); 1730 return ret; 1731 } 1732 1733 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) 1734 { 1735 int old_mode; 1736 1737 lockdep_assert_held_write(&esw->mode_lock); 1738 1739 if (esw->mode == MLX5_ESWITCH_NONE) 1740 return; 1741 1742 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", 1743 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1744 esw->esw_funcs.num_vfs, esw->enabled_vports); 1745 1746 /* Notify eswitch users that it is exiting from current mode. 1747 * So that it can do necessary cleanup before the eswitch is disabled. 1748 */ 1749 mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE); 1750 1751 mlx5_eswitch_event_handlers_unregister(esw); 1752 1753 if (esw->mode == MLX5_ESWITCH_LEGACY) 1754 esw_legacy_disable(esw); 1755 else if (esw->mode == MLX5_ESWITCH_OFFLOADS) 1756 esw_offloads_disable(esw); 1757 1758 old_mode = esw->mode; 1759 esw->mode = MLX5_ESWITCH_NONE; 1760 1761 mlx5_lag_update(esw->dev); 1762 1763 if (old_mode == MLX5_ESWITCH_OFFLOADS) 1764 mlx5_rescan_drivers(esw->dev); 1765 1766 esw_destroy_tsar(esw); 1767 1768 if (clear_vf) 1769 mlx5_eswitch_clear_vf_vports_info(esw); 1770 } 1771 1772 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) 1773 { 1774 if (!ESW_ALLOWED(esw)) 1775 return; 1776 1777 mutex_lock(&esw->mode_lock); 1778 mlx5_eswitch_disable_locked(esw, clear_vf); 1779 esw->esw_funcs.num_vfs = 0; 1780 mutex_unlock(&esw->mode_lock); 1781 } 1782 1783 int mlx5_eswitch_init(struct mlx5_core_dev *dev) 1784 { 1785 struct mlx5_eswitch *esw; 1786 struct mlx5_vport *vport; 1787 int total_vports; 1788 int err, i; 1789 1790 if (!MLX5_VPORT_MANAGER(dev)) 1791 return 0; 1792 1793 total_vports = mlx5_eswitch_get_total_vports(dev); 1794 1795 esw_info(dev, 1796 "Total vports %d, per vport: max uc(%d) max mc(%d)\n", 1797 total_vports, 1798 MLX5_MAX_UC_PER_VPORT(dev), 1799 MLX5_MAX_MC_PER_VPORT(dev)); 1800 1801 esw = kzalloc(sizeof(*esw), GFP_KERNEL); 1802 if (!esw) 1803 return -ENOMEM; 1804 1805 esw->dev = dev; 1806 esw->manager_vport = mlx5_eswitch_manager_vport(dev); 1807 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev); 1808 1809 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); 1810 if (!esw->work_queue) { 1811 err = -ENOMEM; 1812 goto abort; 1813 } 1814 1815 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport), 1816 GFP_KERNEL); 1817 if (!esw->vports) { 1818 err = -ENOMEM; 1819 goto abort; 1820 } 1821 1822 esw->total_vports = total_vports; 1823 1824 err = esw_offloads_init_reps(esw); 1825 if (err) 1826 goto abort; 1827 1828 mutex_init(&esw->offloads.encap_tbl_lock); 1829 hash_init(esw->offloads.encap_tbl); 1830 mutex_init(&esw->offloads.decap_tbl_lock); 1831 hash_init(esw->offloads.decap_tbl); 1832 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr); 1833 atomic64_set(&esw->offloads.num_flows, 0); 1834 ida_init(&esw->offloads.vport_metadata_ida); 1835 xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); 1836 mutex_init(&esw->state_lock); 1837 mutex_init(&esw->mode_lock); 1838 1839 mlx5_esw_for_all_vports(esw, i, vport) { 1840 vport->vport = mlx5_eswitch_index_to_vport_num(esw, i); 1841 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 1842 vport->dev = dev; 1843 INIT_WORK(&vport->vport_change_handler, 1844 esw_vport_change_handler); 1845 } 1846 1847 esw->enabled_vports = 0; 1848 esw->mode = MLX5_ESWITCH_NONE; 1849 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; 1850 1851 dev->priv.eswitch = esw; 1852 BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); 1853 return 0; 1854 abort: 1855 if (esw->work_queue) 1856 destroy_workqueue(esw->work_queue); 1857 esw_offloads_cleanup_reps(esw); 1858 kfree(esw->vports); 1859 kfree(esw); 1860 return err; 1861 } 1862 1863 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1864 { 1865 if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) 1866 return; 1867 1868 esw_info(esw->dev, "cleanup\n"); 1869 1870 esw->dev->priv.eswitch = NULL; 1871 destroy_workqueue(esw->work_queue); 1872 esw_offloads_cleanup_reps(esw); 1873 mutex_destroy(&esw->mode_lock); 1874 mutex_destroy(&esw->state_lock); 1875 WARN_ON(!xa_empty(&esw->offloads.vhca_map)); 1876 xa_destroy(&esw->offloads.vhca_map); 1877 ida_destroy(&esw->offloads.vport_metadata_ida); 1878 mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr); 1879 mutex_destroy(&esw->offloads.encap_tbl_lock); 1880 mutex_destroy(&esw->offloads.decap_tbl_lock); 1881 kfree(esw->vports); 1882 kfree(esw); 1883 } 1884 1885 /* Vport Administration */ 1886 static int 1887 mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw, 1888 struct mlx5_vport *evport, const u8 *mac) 1889 { 1890 u16 vport_num = evport->vport; 1891 u64 node_guid; 1892 int err = 0; 1893 1894 if (is_multicast_ether_addr(mac)) 1895 return -EINVAL; 1896 1897 if (evport->info.spoofchk && !is_valid_ether_addr(mac)) 1898 mlx5_core_warn(esw->dev, 1899 "Set invalid MAC while spoofchk is on, vport(%d)\n", 1900 vport_num); 1901 1902 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac); 1903 if (err) { 1904 mlx5_core_warn(esw->dev, 1905 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n", 1906 vport_num, err); 1907 return err; 1908 } 1909 1910 node_guid_gen_from_mac(&node_guid, mac); 1911 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid); 1912 if (err) 1913 mlx5_core_warn(esw->dev, 1914 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", 1915 vport_num, err); 1916 1917 ether_addr_copy(evport->info.mac, mac); 1918 evport->info.node_guid = node_guid; 1919 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) 1920 err = esw_acl_ingress_lgcy_setup(esw, evport); 1921 1922 return err; 1923 } 1924 1925 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 1926 u16 vport, const u8 *mac) 1927 { 1928 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1929 int err = 0; 1930 1931 if (IS_ERR(evport)) 1932 return PTR_ERR(evport); 1933 1934 mutex_lock(&esw->state_lock); 1935 err = mlx5_esw_set_vport_mac_locked(esw, evport, mac); 1936 mutex_unlock(&esw->state_lock); 1937 return err; 1938 } 1939 1940 static bool 1941 is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num) 1942 { 1943 return vport_num == MLX5_VPORT_PF || 1944 mlx5_eswitch_is_vf_vport(esw, vport_num) || 1945 mlx5_esw_is_sf_vport(esw, vport_num); 1946 } 1947 1948 int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, 1949 struct devlink_port *port, 1950 u8 *hw_addr, int *hw_addr_len, 1951 struct netlink_ext_ack *extack) 1952 { 1953 struct mlx5_eswitch *esw; 1954 struct mlx5_vport *vport; 1955 int err = -EOPNOTSUPP; 1956 u16 vport_num; 1957 1958 esw = mlx5_devlink_eswitch_get(devlink); 1959 if (IS_ERR(esw)) 1960 return PTR_ERR(esw); 1961 1962 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 1963 if (!is_port_function_supported(esw, vport_num)) 1964 return -EOPNOTSUPP; 1965 1966 vport = mlx5_eswitch_get_vport(esw, vport_num); 1967 if (IS_ERR(vport)) { 1968 NL_SET_ERR_MSG_MOD(extack, "Invalid port"); 1969 return PTR_ERR(vport); 1970 } 1971 1972 mutex_lock(&esw->state_lock); 1973 if (vport->enabled) { 1974 ether_addr_copy(hw_addr, vport->info.mac); 1975 *hw_addr_len = ETH_ALEN; 1976 err = 0; 1977 } 1978 mutex_unlock(&esw->state_lock); 1979 return err; 1980 } 1981 1982 int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink, 1983 struct devlink_port *port, 1984 const u8 *hw_addr, int hw_addr_len, 1985 struct netlink_ext_ack *extack) 1986 { 1987 struct mlx5_eswitch *esw; 1988 struct mlx5_vport *vport; 1989 int err = -EOPNOTSUPP; 1990 u16 vport_num; 1991 1992 esw = mlx5_devlink_eswitch_get(devlink); 1993 if (IS_ERR(esw)) { 1994 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); 1995 return PTR_ERR(esw); 1996 } 1997 1998 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 1999 if (!is_port_function_supported(esw, vport_num)) { 2000 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr"); 2001 return -EINVAL; 2002 } 2003 vport = mlx5_eswitch_get_vport(esw, vport_num); 2004 if (IS_ERR(vport)) { 2005 NL_SET_ERR_MSG_MOD(extack, "Invalid port"); 2006 return PTR_ERR(vport); 2007 } 2008 2009 mutex_lock(&esw->state_lock); 2010 if (vport->enabled) 2011 err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr); 2012 else 2013 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); 2014 mutex_unlock(&esw->state_lock); 2015 return err; 2016 } 2017 2018 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 2019 u16 vport, int link_state) 2020 { 2021 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 2022 int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT; 2023 int other_vport = 1; 2024 int err = 0; 2025 2026 if (!ESW_ALLOWED(esw)) 2027 return -EPERM; 2028 if (IS_ERR(evport)) 2029 return PTR_ERR(evport); 2030 2031 if (vport == MLX5_VPORT_UPLINK) { 2032 opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK; 2033 other_vport = 0; 2034 vport = 0; 2035 } 2036 mutex_lock(&esw->state_lock); 2037 2038 err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); 2039 if (err) { 2040 mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d", 2041 vport, opmod, err); 2042 goto unlock; 2043 } 2044 2045 evport->info.link_state = link_state; 2046 2047 unlock: 2048 mutex_unlock(&esw->state_lock); 2049 return err; 2050 } 2051 2052 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 2053 u16 vport, struct ifla_vf_info *ivi) 2054 { 2055 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 2056 2057 if (IS_ERR(evport)) 2058 return PTR_ERR(evport); 2059 2060 memset(ivi, 0, sizeof(*ivi)); 2061 ivi->vf = vport - 1; 2062 2063 mutex_lock(&esw->state_lock); 2064 ether_addr_copy(ivi->mac, evport->info.mac); 2065 ivi->linkstate = evport->info.link_state; 2066 ivi->vlan = evport->info.vlan; 2067 ivi->qos = evport->info.qos; 2068 ivi->spoofchk = evport->info.spoofchk; 2069 ivi->trusted = evport->info.trusted; 2070 ivi->min_tx_rate = evport->info.min_rate; 2071 ivi->max_tx_rate = evport->info.max_rate; 2072 mutex_unlock(&esw->state_lock); 2073 2074 return 0; 2075 } 2076 2077 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 2078 u16 vport, u16 vlan, u8 qos, u8 set_flags) 2079 { 2080 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 2081 int err = 0; 2082 2083 if (IS_ERR(evport)) 2084 return PTR_ERR(evport); 2085 if (vlan > 4095 || qos > 7) 2086 return -EINVAL; 2087 2088 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); 2089 if (err) 2090 return err; 2091 2092 evport->info.vlan = vlan; 2093 evport->info.qos = qos; 2094 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) { 2095 err = esw_acl_ingress_lgcy_setup(esw, evport); 2096 if (err) 2097 return err; 2098 err = esw_acl_egress_lgcy_setup(esw, evport); 2099 } 2100 2101 return err; 2102 } 2103 2104 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 2105 u16 vport, u16 vlan, u8 qos) 2106 { 2107 u8 set_flags = 0; 2108 int err; 2109 2110 if (!ESW_ALLOWED(esw)) 2111 return -EPERM; 2112 2113 if (vlan || qos) 2114 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; 2115 2116 mutex_lock(&esw->state_lock); 2117 err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags); 2118 mutex_unlock(&esw->state_lock); 2119 2120 return err; 2121 } 2122 2123 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, 2124 u16 vport, bool spoofchk) 2125 { 2126 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 2127 bool pschk; 2128 int err = 0; 2129 2130 if (!ESW_ALLOWED(esw)) 2131 return -EPERM; 2132 if (IS_ERR(evport)) 2133 return PTR_ERR(evport); 2134 2135 mutex_lock(&esw->state_lock); 2136 pschk = evport->info.spoofchk; 2137 evport->info.spoofchk = spoofchk; 2138 if (pschk && !is_valid_ether_addr(evport->info.mac)) 2139 mlx5_core_warn(esw->dev, 2140 "Spoofchk in set while MAC is invalid, vport(%d)\n", 2141 evport->vport); 2142 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) 2143 err = esw_acl_ingress_lgcy_setup(esw, evport); 2144 if (err) 2145 evport->info.spoofchk = pschk; 2146 mutex_unlock(&esw->state_lock); 2147 2148 return err; 2149 } 2150 2151 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw) 2152 { 2153 if (esw->fdb_table.legacy.vepa_uplink_rule) 2154 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule); 2155 2156 if (esw->fdb_table.legacy.vepa_star_rule) 2157 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule); 2158 2159 esw->fdb_table.legacy.vepa_uplink_rule = NULL; 2160 esw->fdb_table.legacy.vepa_star_rule = NULL; 2161 } 2162 2163 static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, 2164 u8 setting) 2165 { 2166 struct mlx5_flow_destination dest = {}; 2167 struct mlx5_flow_act flow_act = {}; 2168 struct mlx5_flow_handle *flow_rule; 2169 struct mlx5_flow_spec *spec; 2170 int err = 0; 2171 void *misc; 2172 2173 if (!setting) { 2174 esw_cleanup_vepa_rules(esw); 2175 return 0; 2176 } 2177 2178 if (esw->fdb_table.legacy.vepa_uplink_rule) 2179 return 0; 2180 2181 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 2182 if (!spec) 2183 return -ENOMEM; 2184 2185 /* Uplink rule forward uplink traffic to FDB */ 2186 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 2187 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); 2188 2189 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 2190 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 2191 2192 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 2193 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 2194 dest.ft = esw->fdb_table.legacy.fdb; 2195 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2196 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec, 2197 &flow_act, &dest, 1); 2198 if (IS_ERR(flow_rule)) { 2199 err = PTR_ERR(flow_rule); 2200 goto out; 2201 } else { 2202 esw->fdb_table.legacy.vepa_uplink_rule = flow_rule; 2203 } 2204 2205 /* Star rule to forward all traffic to uplink vport */ 2206 memset(&dest, 0, sizeof(dest)); 2207 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2208 dest.vport.num = MLX5_VPORT_UPLINK; 2209 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2210 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL, 2211 &flow_act, &dest, 1); 2212 if (IS_ERR(flow_rule)) { 2213 err = PTR_ERR(flow_rule); 2214 goto out; 2215 } else { 2216 esw->fdb_table.legacy.vepa_star_rule = flow_rule; 2217 } 2218 2219 out: 2220 kvfree(spec); 2221 if (err) 2222 esw_cleanup_vepa_rules(esw); 2223 return err; 2224 } 2225 2226 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting) 2227 { 2228 int err = 0; 2229 2230 if (!esw) 2231 return -EOPNOTSUPP; 2232 2233 if (!ESW_ALLOWED(esw)) 2234 return -EPERM; 2235 2236 mutex_lock(&esw->state_lock); 2237 if (esw->mode != MLX5_ESWITCH_LEGACY) { 2238 err = -EOPNOTSUPP; 2239 goto out; 2240 } 2241 2242 err = _mlx5_eswitch_set_vepa_locked(esw, setting); 2243 2244 out: 2245 mutex_unlock(&esw->state_lock); 2246 return err; 2247 } 2248 2249 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) 2250 { 2251 if (!esw) 2252 return -EOPNOTSUPP; 2253 2254 if (!ESW_ALLOWED(esw)) 2255 return -EPERM; 2256 2257 if (esw->mode != MLX5_ESWITCH_LEGACY) 2258 return -EOPNOTSUPP; 2259 2260 *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; 2261 return 0; 2262 } 2263 2264 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, 2265 u16 vport, bool setting) 2266 { 2267 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 2268 2269 if (!ESW_ALLOWED(esw)) 2270 return -EPERM; 2271 if (IS_ERR(evport)) 2272 return PTR_ERR(evport); 2273 2274 mutex_lock(&esw->state_lock); 2275 evport->info.trusted = setting; 2276 if (evport->enabled) 2277 esw_vport_change_handle_locked(evport); 2278 mutex_unlock(&esw->state_lock); 2279 2280 return 0; 2281 } 2282 2283 static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) 2284 { 2285 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); 2286 struct mlx5_vport *evport; 2287 u32 max_guarantee = 0; 2288 int i; 2289 2290 mlx5_esw_for_all_vports(esw, i, evport) { 2291 if (!evport->enabled || evport->info.min_rate < max_guarantee) 2292 continue; 2293 max_guarantee = evport->info.min_rate; 2294 } 2295 2296 if (max_guarantee) 2297 return max_t(u32, max_guarantee / fw_max_bw_share, 1); 2298 return 0; 2299 } 2300 2301 static int normalize_vports_min_rate(struct mlx5_eswitch *esw) 2302 { 2303 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); 2304 u32 divider = calculate_vports_min_rate_divider(esw); 2305 struct mlx5_vport *evport; 2306 u32 vport_max_rate; 2307 u32 vport_min_rate; 2308 u32 bw_share; 2309 int err; 2310 int i; 2311 2312 mlx5_esw_for_all_vports(esw, i, evport) { 2313 if (!evport->enabled) 2314 continue; 2315 vport_min_rate = evport->info.min_rate; 2316 vport_max_rate = evport->info.max_rate; 2317 bw_share = 0; 2318 2319 if (divider) 2320 bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate, 2321 divider, 2322 fw_max_bw_share); 2323 2324 if (bw_share == evport->qos.bw_share) 2325 continue; 2326 2327 err = esw_vport_qos_config(esw, evport, vport_max_rate, 2328 bw_share); 2329 if (!err) 2330 evport->qos.bw_share = bw_share; 2331 else 2332 return err; 2333 } 2334 2335 return 0; 2336 } 2337 2338 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, 2339 u32 max_rate, u32 min_rate) 2340 { 2341 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 2342 u32 fw_max_bw_share; 2343 u32 previous_min_rate; 2344 bool min_rate_supported; 2345 bool max_rate_supported; 2346 int err = 0; 2347 2348 if (!ESW_ALLOWED(esw)) 2349 return -EPERM; 2350 if (IS_ERR(evport)) 2351 return PTR_ERR(evport); 2352 2353 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); 2354 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && 2355 fw_max_bw_share >= MLX5_MIN_BW_SHARE; 2356 max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); 2357 2358 if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) 2359 return -EOPNOTSUPP; 2360 2361 mutex_lock(&esw->state_lock); 2362 2363 if (min_rate == evport->info.min_rate) 2364 goto set_max_rate; 2365 2366 previous_min_rate = evport->info.min_rate; 2367 evport->info.min_rate = min_rate; 2368 err = normalize_vports_min_rate(esw); 2369 if (err) { 2370 evport->info.min_rate = previous_min_rate; 2371 goto unlock; 2372 } 2373 2374 set_max_rate: 2375 if (max_rate == evport->info.max_rate) 2376 goto unlock; 2377 2378 err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share); 2379 if (!err) 2380 evport->info.max_rate = max_rate; 2381 2382 unlock: 2383 mutex_unlock(&esw->state_lock); 2384 return err; 2385 } 2386 2387 static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, 2388 struct mlx5_vport *vport, 2389 struct mlx5_vport_drop_stats *stats) 2390 { 2391 struct mlx5_eswitch *esw = dev->priv.eswitch; 2392 u64 rx_discard_vport_down, tx_discard_vport_down; 2393 u64 bytes = 0; 2394 int err = 0; 2395 2396 if (esw->mode != MLX5_ESWITCH_LEGACY) 2397 return 0; 2398 2399 mutex_lock(&esw->state_lock); 2400 if (!vport->enabled) 2401 goto unlock; 2402 2403 if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) 2404 mlx5_fc_query(dev, vport->egress.legacy.drop_counter, 2405 &stats->rx_dropped, &bytes); 2406 2407 if (vport->ingress.legacy.drop_counter) 2408 mlx5_fc_query(dev, vport->ingress.legacy.drop_counter, 2409 &stats->tx_dropped, &bytes); 2410 2411 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) && 2412 !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) 2413 goto unlock; 2414 2415 err = mlx5_query_vport_down_stats(dev, vport->vport, 1, 2416 &rx_discard_vport_down, 2417 &tx_discard_vport_down); 2418 if (err) 2419 goto unlock; 2420 2421 if (MLX5_CAP_GEN(dev, receive_discard_vport_down)) 2422 stats->rx_dropped += rx_discard_vport_down; 2423 if (MLX5_CAP_GEN(dev, transmit_discard_vport_down)) 2424 stats->tx_dropped += tx_discard_vport_down; 2425 2426 unlock: 2427 mutex_unlock(&esw->state_lock); 2428 return err; 2429 } 2430 2431 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 2432 u16 vport_num, 2433 struct ifla_vf_stats *vf_stats) 2434 { 2435 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 2436 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); 2437 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; 2438 struct mlx5_vport_drop_stats stats = {}; 2439 int err = 0; 2440 u32 *out; 2441 2442 if (IS_ERR(vport)) 2443 return PTR_ERR(vport); 2444 2445 out = kvzalloc(outlen, GFP_KERNEL); 2446 if (!out) 2447 return -ENOMEM; 2448 2449 MLX5_SET(query_vport_counter_in, in, opcode, 2450 MLX5_CMD_OP_QUERY_VPORT_COUNTER); 2451 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 2452 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport); 2453 MLX5_SET(query_vport_counter_in, in, other_vport, 1); 2454 2455 err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out); 2456 if (err) 2457 goto free_out; 2458 2459 #define MLX5_GET_CTR(p, x) \ 2460 MLX5_GET64(query_vport_counter_out, p, x) 2461 2462 memset(vf_stats, 0, sizeof(*vf_stats)); 2463 vf_stats->rx_packets = 2464 MLX5_GET_CTR(out, received_eth_unicast.packets) + 2465 MLX5_GET_CTR(out, received_ib_unicast.packets) + 2466 MLX5_GET_CTR(out, received_eth_multicast.packets) + 2467 MLX5_GET_CTR(out, received_ib_multicast.packets) + 2468 MLX5_GET_CTR(out, received_eth_broadcast.packets); 2469 2470 vf_stats->rx_bytes = 2471 MLX5_GET_CTR(out, received_eth_unicast.octets) + 2472 MLX5_GET_CTR(out, received_ib_unicast.octets) + 2473 MLX5_GET_CTR(out, received_eth_multicast.octets) + 2474 MLX5_GET_CTR(out, received_ib_multicast.octets) + 2475 MLX5_GET_CTR(out, received_eth_broadcast.octets); 2476 2477 vf_stats->tx_packets = 2478 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + 2479 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) + 2480 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + 2481 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) + 2482 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); 2483 2484 vf_stats->tx_bytes = 2485 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + 2486 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) + 2487 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + 2488 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) + 2489 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 2490 2491 vf_stats->multicast = 2492 MLX5_GET_CTR(out, received_eth_multicast.packets) + 2493 MLX5_GET_CTR(out, received_ib_multicast.packets); 2494 2495 vf_stats->broadcast = 2496 MLX5_GET_CTR(out, received_eth_broadcast.packets); 2497 2498 err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats); 2499 if (err) 2500 goto free_out; 2501 vf_stats->rx_dropped = stats.rx_dropped; 2502 vf_stats->tx_dropped = stats.tx_dropped; 2503 2504 free_out: 2505 kvfree(out); 2506 return err; 2507 } 2508 2509 u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) 2510 { 2511 struct mlx5_eswitch *esw = dev->priv.eswitch; 2512 2513 return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE; 2514 } 2515 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); 2516 2517 enum devlink_eswitch_encap_mode 2518 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) 2519 { 2520 struct mlx5_eswitch *esw; 2521 2522 esw = dev->priv.eswitch; 2523 return ESW_ALLOWED(esw) ? esw->offloads.encap : 2524 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 2525 } 2526 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode); 2527 2528 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) 2529 { 2530 if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE && 2531 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) || 2532 (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS && 2533 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS)) 2534 return true; 2535 2536 return false; 2537 } 2538 2539 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 2540 struct mlx5_core_dev *dev1) 2541 { 2542 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS && 2543 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS); 2544 } 2545 2546 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb) 2547 { 2548 return blocking_notifier_chain_register(&esw->n_head, nb); 2549 } 2550 2551 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb) 2552 { 2553 blocking_notifier_chain_unregister(&esw->n_head, nb); 2554 } 2555