1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/etherdevice.h> 34 #include <linux/mlx5/driver.h> 35 #include <linux/mlx5/mlx5_ifc.h> 36 #include <linux/mlx5/vport.h> 37 #include <linux/mlx5/fs.h> 38 #include "mlx5_core.h" 39 #include "eswitch.h" 40 #include "en.h" 41 #include "fs_core.h" 42 #include "lib/devcom.h" 43 #include "ecpf.h" 44 #include "lib/eq.h" 45 46 enum { 47 FDB_FAST_PATH = 0, 48 FDB_SLOW_PATH 49 }; 50 51 /* There are two match-all miss flows, one for unicast dst mac and 52 * one for multicast. 53 */ 54 #define MLX5_ESW_MISS_FLOWS (2) 55 56 #define fdb_prio_table(esw, chain, prio, level) \ 57 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)] 58 59 #define UPLINK_REP_INDEX 0 60 61 /* The rep getter/iterator are only valid after esw->total_vports 62 * and vport->vport are initialized in mlx5_eswitch_init. 63 */ 64 #define mlx5_esw_for_all_reps(esw, i, rep) \ 65 for ((i) = MLX5_VPORT_PF; \ 66 (rep) = &(esw)->offloads.vport_reps[i], \ 67 (i) < (esw)->total_vports; (i)++) 68 69 #define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ 70 for ((i) = MLX5_VPORT_FIRST_VF; \ 71 (rep) = &(esw)->offloads.vport_reps[i], \ 72 (i) <= (nvfs); (i)++) 73 74 #define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \ 75 for ((i) = (nvfs); \ 76 (rep) = &(esw)->offloads.vport_reps[i], \ 77 (i) >= MLX5_VPORT_FIRST_VF; (i)--) 78 79 #define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \ 80 for ((vport) = MLX5_VPORT_FIRST_VF; \ 81 (vport) <= (nvfs); (vport)++) 82 83 #define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \ 84 for ((vport) = (nvfs); \ 85 (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) 86 87 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, 88 u16 vport_num) 89 { 90 u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); 91 92 WARN_ON(idx > esw->total_vports - 1); 93 return &esw->offloads.vport_reps[idx]; 94 } 95 96 static struct mlx5_flow_table * 97 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); 98 static void 99 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); 100 101 bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw) 102 { 103 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)); 104 } 105 106 u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw) 107 { 108 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) 109 return FDB_MAX_CHAIN; 110 111 return 0; 112 } 113 114 u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) 115 { 116 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) 117 return FDB_MAX_PRIO; 118 119 return 1; 120 } 121 122 struct mlx5_flow_handle * 123 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 124 struct mlx5_flow_spec *spec, 125 struct mlx5_esw_flow_attr *attr) 126 { 127 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; 128 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 129 bool split = !!(attr->split_count); 130 struct mlx5_flow_handle *rule; 131 struct mlx5_flow_table *fdb; 132 int j, i = 0; 133 void *misc; 134 135 if (esw->mode != SRIOV_OFFLOADS) 136 return ERR_PTR(-EOPNOTSUPP); 137 138 flow_act.action = attr->action; 139 /* if per flow vlan pop/push is emulated, don't set that into the firmware */ 140 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 141 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | 142 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 143 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { 144 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]); 145 flow_act.vlan[0].vid = attr->vlan_vid[0]; 146 flow_act.vlan[0].prio = attr->vlan_prio[0]; 147 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { 148 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]); 149 flow_act.vlan[1].vid = attr->vlan_vid[1]; 150 flow_act.vlan[1].prio = attr->vlan_prio[1]; 151 } 152 } 153 154 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 155 if (attr->dest_chain) { 156 struct mlx5_flow_table *ft; 157 158 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0); 159 if (IS_ERR(ft)) { 160 rule = ERR_CAST(ft); 161 goto err_create_goto_table; 162 } 163 164 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 165 dest[i].ft = ft; 166 i++; 167 } else { 168 for (j = attr->split_count; j < attr->out_count; j++) { 169 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 170 dest[i].vport.num = attr->dests[j].rep->vport; 171 dest[i].vport.vhca_id = 172 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id); 173 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 174 dest[i].vport.flags |= 175 MLX5_FLOW_DEST_VPORT_VHCA_ID; 176 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) { 177 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 178 flow_act.reformat_id = attr->dests[j].encap_id; 179 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 180 dest[i].vport.reformat_id = 181 attr->dests[j].encap_id; 182 } 183 i++; 184 } 185 } 186 } 187 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 188 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 189 dest[i].counter_id = mlx5_fc_id(attr->counter); 190 i++; 191 } 192 193 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 194 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); 195 196 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 197 MLX5_SET(fte_match_set_misc, misc, 198 source_eswitch_owner_vhca_id, 199 MLX5_CAP_GEN(attr->in_mdev, vhca_id)); 200 201 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 202 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 203 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 204 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 205 source_eswitch_owner_vhca_id); 206 207 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 208 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { 209 if (attr->tunnel_match_level != MLX5_MATCH_NONE) 210 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 211 if (attr->match_level != MLX5_MATCH_NONE) 212 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 213 } else if (attr->match_level != MLX5_MATCH_NONE) { 214 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 215 } 216 217 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 218 flow_act.modify_id = attr->mod_hdr_id; 219 220 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split); 221 if (IS_ERR(fdb)) { 222 rule = ERR_CAST(fdb); 223 goto err_esw_get; 224 } 225 226 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); 227 if (IS_ERR(rule)) 228 goto err_add_rule; 229 else 230 esw->offloads.num_flows++; 231 232 return rule; 233 234 err_add_rule: 235 esw_put_prio_table(esw, attr->chain, attr->prio, !!split); 236 err_esw_get: 237 if (attr->dest_chain) 238 esw_put_prio_table(esw, attr->dest_chain, 1, 0); 239 err_create_goto_table: 240 return rule; 241 } 242 243 struct mlx5_flow_handle * 244 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 245 struct mlx5_flow_spec *spec, 246 struct mlx5_esw_flow_attr *attr) 247 { 248 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; 249 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 250 struct mlx5_flow_table *fast_fdb; 251 struct mlx5_flow_table *fwd_fdb; 252 struct mlx5_flow_handle *rule; 253 void *misc; 254 int i; 255 256 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0); 257 if (IS_ERR(fast_fdb)) { 258 rule = ERR_CAST(fast_fdb); 259 goto err_get_fast; 260 } 261 262 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1); 263 if (IS_ERR(fwd_fdb)) { 264 rule = ERR_CAST(fwd_fdb); 265 goto err_get_fwd; 266 } 267 268 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 269 for (i = 0; i < attr->split_count; i++) { 270 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 271 dest[i].vport.num = attr->dests[i].rep->vport; 272 dest[i].vport.vhca_id = 273 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id); 274 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 275 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 276 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) { 277 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 278 dest[i].vport.reformat_id = attr->dests[i].encap_id; 279 } 280 } 281 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 282 dest[i].ft = fwd_fdb, 283 i++; 284 285 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 286 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); 287 288 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 289 MLX5_SET(fte_match_set_misc, misc, 290 source_eswitch_owner_vhca_id, 291 MLX5_CAP_GEN(attr->in_mdev, vhca_id)); 292 293 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 294 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 295 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 296 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 297 source_eswitch_owner_vhca_id); 298 299 if (attr->match_level == MLX5_MATCH_NONE) 300 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 301 else 302 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | 303 MLX5_MATCH_MISC_PARAMETERS; 304 305 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); 306 307 if (IS_ERR(rule)) 308 goto add_err; 309 310 esw->offloads.num_flows++; 311 312 return rule; 313 add_err: 314 esw_put_prio_table(esw, attr->chain, attr->prio, 1); 315 err_get_fwd: 316 esw_put_prio_table(esw, attr->chain, attr->prio, 0); 317 err_get_fast: 318 return rule; 319 } 320 321 static void 322 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, 323 struct mlx5_flow_handle *rule, 324 struct mlx5_esw_flow_attr *attr, 325 bool fwd_rule) 326 { 327 bool split = (attr->split_count > 0); 328 329 mlx5_del_flow_rules(rule); 330 esw->offloads.num_flows--; 331 332 if (fwd_rule) { 333 esw_put_prio_table(esw, attr->chain, attr->prio, 1); 334 esw_put_prio_table(esw, attr->chain, attr->prio, 0); 335 } else { 336 esw_put_prio_table(esw, attr->chain, attr->prio, !!split); 337 if (attr->dest_chain) 338 esw_put_prio_table(esw, attr->dest_chain, 1, 0); 339 } 340 } 341 342 void 343 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 344 struct mlx5_flow_handle *rule, 345 struct mlx5_esw_flow_attr *attr) 346 { 347 __mlx5_eswitch_del_rule(esw, rule, attr, false); 348 } 349 350 void 351 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 352 struct mlx5_flow_handle *rule, 353 struct mlx5_esw_flow_attr *attr) 354 { 355 __mlx5_eswitch_del_rule(esw, rule, attr, true); 356 } 357 358 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) 359 { 360 struct mlx5_eswitch_rep *rep; 361 int vf_vport, err = 0; 362 363 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); 364 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { 365 rep = &esw->offloads.vport_reps[vf_vport]; 366 if (rep->rep_if[REP_ETH].state != REP_LOADED) 367 continue; 368 369 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); 370 if (err) 371 goto out; 372 } 373 374 out: 375 return err; 376 } 377 378 static struct mlx5_eswitch_rep * 379 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) 380 { 381 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; 382 383 in_rep = attr->in_rep; 384 out_rep = attr->dests[0].rep; 385 386 if (push) 387 vport = in_rep; 388 else if (pop) 389 vport = out_rep; 390 else 391 vport = in_rep; 392 393 return vport; 394 } 395 396 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, 397 bool push, bool pop, bool fwd) 398 { 399 struct mlx5_eswitch_rep *in_rep, *out_rep; 400 401 if ((push || pop) && !fwd) 402 goto out_notsupp; 403 404 in_rep = attr->in_rep; 405 out_rep = attr->dests[0].rep; 406 407 if (push && in_rep->vport == MLX5_VPORT_UPLINK) 408 goto out_notsupp; 409 410 if (pop && out_rep->vport == MLX5_VPORT_UPLINK) 411 goto out_notsupp; 412 413 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */ 414 if (!push && !pop && fwd) 415 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK) 416 goto out_notsupp; 417 418 /* protects against (1) setting rules with different vlans to push and 419 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) 420 */ 421 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0])) 422 goto out_notsupp; 423 424 return 0; 425 426 out_notsupp: 427 return -EOPNOTSUPP; 428 } 429 430 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 431 struct mlx5_esw_flow_attr *attr) 432 { 433 struct offloads_fdb *offloads = &esw->fdb_table.offloads; 434 struct mlx5_eswitch_rep *vport = NULL; 435 bool push, pop, fwd; 436 int err = 0; 437 438 /* nop if we're on the vlan push/pop non emulation mode */ 439 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 440 return 0; 441 442 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); 443 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 444 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && 445 !attr->dest_chain); 446 447 err = esw_add_vlan_action_check(attr, push, pop, fwd); 448 if (err) 449 return err; 450 451 attr->vlan_handled = false; 452 453 vport = esw_vlan_action_get_vport(attr, push, pop); 454 455 if (!push && !pop && fwd) { 456 /* tracks VF --> wire rules without vlan push action */ 457 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) { 458 vport->vlan_refcount++; 459 attr->vlan_handled = true; 460 } 461 462 return 0; 463 } 464 465 if (!push && !pop) 466 return 0; 467 468 if (!(offloads->vlan_push_pop_refcount)) { 469 /* it's the 1st vlan rule, apply global vlan pop policy */ 470 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP); 471 if (err) 472 goto out; 473 } 474 offloads->vlan_push_pop_refcount++; 475 476 if (push) { 477 if (vport->vlan_refcount) 478 goto skip_set_push; 479 480 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0, 481 SET_VLAN_INSERT | SET_VLAN_STRIP); 482 if (err) 483 goto out; 484 vport->vlan = attr->vlan_vid[0]; 485 skip_set_push: 486 vport->vlan_refcount++; 487 } 488 out: 489 if (!err) 490 attr->vlan_handled = true; 491 return err; 492 } 493 494 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, 495 struct mlx5_esw_flow_attr *attr) 496 { 497 struct offloads_fdb *offloads = &esw->fdb_table.offloads; 498 struct mlx5_eswitch_rep *vport = NULL; 499 bool push, pop, fwd; 500 int err = 0; 501 502 /* nop if we're on the vlan push/pop non emulation mode */ 503 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 504 return 0; 505 506 if (!attr->vlan_handled) 507 return 0; 508 509 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); 510 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 511 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); 512 513 vport = esw_vlan_action_get_vport(attr, push, pop); 514 515 if (!push && !pop && fwd) { 516 /* tracks VF --> wire rules without vlan push action */ 517 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) 518 vport->vlan_refcount--; 519 520 return 0; 521 } 522 523 if (push) { 524 vport->vlan_refcount--; 525 if (vport->vlan_refcount) 526 goto skip_unset_push; 527 528 vport->vlan = 0; 529 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, 530 0, 0, SET_VLAN_STRIP); 531 if (err) 532 goto out; 533 } 534 535 skip_unset_push: 536 offloads->vlan_push_pop_refcount--; 537 if (offloads->vlan_push_pop_refcount) 538 return 0; 539 540 /* no more vlan rules, stop global vlan pop policy */ 541 err = esw_set_global_vlan_pop(esw, 0); 542 543 out: 544 return err; 545 } 546 547 struct mlx5_flow_handle * 548 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) 549 { 550 struct mlx5_flow_act flow_act = {0}; 551 struct mlx5_flow_destination dest = {}; 552 struct mlx5_flow_handle *flow_rule; 553 struct mlx5_flow_spec *spec; 554 void *misc; 555 556 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 557 if (!spec) { 558 flow_rule = ERR_PTR(-ENOMEM); 559 goto out; 560 } 561 562 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 563 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); 564 /* source vport is the esw manager */ 565 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport); 566 567 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 568 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); 569 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 570 571 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 572 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 573 dest.vport.num = vport; 574 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 575 576 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, 577 &flow_act, &dest, 1); 578 if (IS_ERR(flow_rule)) 579 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); 580 out: 581 kvfree(spec); 582 return flow_rule; 583 } 584 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule); 585 586 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) 587 { 588 mlx5_del_flow_rules(rule); 589 } 590 591 static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev, 592 struct mlx5_flow_spec *spec, 593 struct mlx5_flow_destination *dest) 594 { 595 void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 596 misc_parameters); 597 598 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 599 MLX5_CAP_GEN(peer_dev, vhca_id)); 600 601 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 602 603 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 604 misc_parameters); 605 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 606 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 607 source_eswitch_owner_vhca_id); 608 609 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 610 dest->vport.num = peer_dev->priv.eswitch->manager_vport; 611 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); 612 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 613 } 614 615 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 616 struct mlx5_core_dev *peer_dev) 617 { 618 struct mlx5_flow_destination dest = {}; 619 struct mlx5_flow_act flow_act = {0}; 620 struct mlx5_flow_handle **flows; 621 struct mlx5_flow_handle *flow; 622 struct mlx5_flow_spec *spec; 623 /* total vports is the same for both e-switches */ 624 int nvports = esw->total_vports; 625 void *misc; 626 int err, i; 627 628 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 629 if (!spec) 630 return -ENOMEM; 631 632 peer_miss_rules_setup(peer_dev, spec, &dest); 633 634 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL); 635 if (!flows) { 636 err = -ENOMEM; 637 goto alloc_flows_err; 638 } 639 640 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 641 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 642 misc_parameters); 643 644 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 645 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_PF); 646 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 647 spec, &flow_act, &dest, 1); 648 if (IS_ERR(flow)) { 649 err = PTR_ERR(flow); 650 goto add_pf_flow_err; 651 } 652 flows[MLX5_VPORT_PF] = flow; 653 } 654 655 if (mlx5_ecpf_vport_exists(esw->dev)) { 656 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); 657 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 658 spec, &flow_act, &dest, 1); 659 if (IS_ERR(flow)) { 660 err = PTR_ERR(flow); 661 goto add_ecpf_flow_err; 662 } 663 flows[mlx5_eswitch_ecpf_idx(esw)] = flow; 664 } 665 666 mlx5_esw_for_each_vf_vport(esw, i, mlx5_core_max_vfs(esw->dev)) { 667 MLX5_SET(fte_match_set_misc, misc, source_port, i); 668 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 669 spec, &flow_act, &dest, 1); 670 if (IS_ERR(flow)) { 671 err = PTR_ERR(flow); 672 goto add_vf_flow_err; 673 } 674 flows[i] = flow; 675 } 676 677 esw->fdb_table.offloads.peer_miss_rules = flows; 678 679 kvfree(spec); 680 return 0; 681 682 add_vf_flow_err: 683 nvports = --i; 684 mlx5_esw_for_each_vf_vport_reverse(esw, i, nvports) 685 mlx5_del_flow_rules(flows[i]); 686 687 if (mlx5_ecpf_vport_exists(esw->dev)) 688 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); 689 add_ecpf_flow_err: 690 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) 691 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); 692 add_pf_flow_err: 693 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); 694 kvfree(flows); 695 alloc_flows_err: 696 kvfree(spec); 697 return err; 698 } 699 700 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) 701 { 702 struct mlx5_flow_handle **flows; 703 int i; 704 705 flows = esw->fdb_table.offloads.peer_miss_rules; 706 707 mlx5_esw_for_each_vf_vport_reverse(esw, i, mlx5_core_max_vfs(esw->dev)) 708 mlx5_del_flow_rules(flows[i]); 709 710 if (mlx5_ecpf_vport_exists(esw->dev)) 711 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); 712 713 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) 714 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); 715 716 kvfree(flows); 717 } 718 719 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) 720 { 721 struct mlx5_flow_act flow_act = {0}; 722 struct mlx5_flow_destination dest = {}; 723 struct mlx5_flow_handle *flow_rule = NULL; 724 struct mlx5_flow_spec *spec; 725 void *headers_c; 726 void *headers_v; 727 int err = 0; 728 u8 *dmac_c; 729 u8 *dmac_v; 730 731 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 732 if (!spec) { 733 err = -ENOMEM; 734 goto out; 735 } 736 737 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 738 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 739 outer_headers); 740 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, 741 outer_headers.dmac_47_16); 742 dmac_c[0] = 0x01; 743 744 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 745 dest.vport.num = esw->manager_vport; 746 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 747 748 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, 749 &flow_act, &dest, 1); 750 if (IS_ERR(flow_rule)) { 751 err = PTR_ERR(flow_rule); 752 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); 753 goto out; 754 } 755 756 esw->fdb_table.offloads.miss_rule_uni = flow_rule; 757 758 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 759 outer_headers); 760 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, 761 outer_headers.dmac_47_16); 762 dmac_v[0] = 0x01; 763 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, 764 &flow_act, &dest, 1); 765 if (IS_ERR(flow_rule)) { 766 err = PTR_ERR(flow_rule); 767 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); 768 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 769 goto out; 770 } 771 772 esw->fdb_table.offloads.miss_rule_multi = flow_rule; 773 774 out: 775 kvfree(spec); 776 return err; 777 } 778 779 #define ESW_OFFLOADS_NUM_GROUPS 4 780 781 /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), 782 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated 783 * for each flow table pool. We can allocate up to 16M of each pool, 784 * and we keep track of how much we used via put/get_sz_to_pool. 785 * Firmware doesn't report any of this for now. 786 * ESW_POOL is expected to be sorted from large to small 787 */ 788 #define ESW_SIZE (16 * 1024 * 1024) 789 const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024, 790 64 * 1024, 4 * 1024 }; 791 792 static int 793 get_sz_from_pool(struct mlx5_eswitch *esw) 794 { 795 int sz = 0, i; 796 797 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) { 798 if (esw->fdb_table.offloads.fdb_left[i]) { 799 --esw->fdb_table.offloads.fdb_left[i]; 800 sz = ESW_POOLS[i]; 801 break; 802 } 803 } 804 805 return sz; 806 } 807 808 static void 809 put_sz_to_pool(struct mlx5_eswitch *esw, int sz) 810 { 811 int i; 812 813 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) { 814 if (sz >= ESW_POOLS[i]) { 815 ++esw->fdb_table.offloads.fdb_left[i]; 816 break; 817 } 818 } 819 } 820 821 static struct mlx5_flow_table * 822 create_next_size_table(struct mlx5_eswitch *esw, 823 struct mlx5_flow_namespace *ns, 824 u16 table_prio, 825 int level, 826 u32 flags) 827 { 828 struct mlx5_flow_table *fdb; 829 int sz; 830 831 sz = get_sz_from_pool(esw); 832 if (!sz) 833 return ERR_PTR(-ENOSPC); 834 835 fdb = mlx5_create_auto_grouped_flow_table(ns, 836 table_prio, 837 sz, 838 ESW_OFFLOADS_NUM_GROUPS, 839 level, 840 flags); 841 if (IS_ERR(fdb)) { 842 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n", 843 (int)PTR_ERR(fdb), table_prio, level, sz); 844 put_sz_to_pool(esw, sz); 845 } 846 847 return fdb; 848 } 849 850 static struct mlx5_flow_table * 851 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) 852 { 853 struct mlx5_core_dev *dev = esw->dev; 854 struct mlx5_flow_table *fdb = NULL; 855 struct mlx5_flow_namespace *ns; 856 int table_prio, l = 0; 857 u32 flags = 0; 858 859 if (chain == FDB_SLOW_PATH_CHAIN) 860 return esw->fdb_table.offloads.slow_fdb; 861 862 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); 863 864 fdb = fdb_prio_table(esw, chain, prio, level).fdb; 865 if (fdb) { 866 /* take ref on earlier levels as well */ 867 while (level >= 0) 868 fdb_prio_table(esw, chain, prio, level--).num_rules++; 869 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); 870 return fdb; 871 } 872 873 ns = mlx5_get_fdb_sub_ns(dev, chain); 874 if (!ns) { 875 esw_warn(dev, "Failed to get FDB sub namespace\n"); 876 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); 877 return ERR_PTR(-EOPNOTSUPP); 878 } 879 880 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 881 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 882 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 883 884 table_prio = (chain * FDB_MAX_PRIO) + prio - 1; 885 886 /* create earlier levels for correct fs_core lookup when 887 * connecting tables 888 */ 889 for (l = 0; l <= level; l++) { 890 if (fdb_prio_table(esw, chain, prio, l).fdb) { 891 fdb_prio_table(esw, chain, prio, l).num_rules++; 892 continue; 893 } 894 895 fdb = create_next_size_table(esw, ns, table_prio, l, flags); 896 if (IS_ERR(fdb)) { 897 l--; 898 goto err_create_fdb; 899 } 900 901 fdb_prio_table(esw, chain, prio, l).fdb = fdb; 902 fdb_prio_table(esw, chain, prio, l).num_rules = 1; 903 } 904 905 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); 906 return fdb; 907 908 err_create_fdb: 909 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); 910 if (l >= 0) 911 esw_put_prio_table(esw, chain, prio, l); 912 913 return fdb; 914 } 915 916 static void 917 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) 918 { 919 int l; 920 921 if (chain == FDB_SLOW_PATH_CHAIN) 922 return; 923 924 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); 925 926 for (l = level; l >= 0; l--) { 927 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0) 928 continue; 929 930 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte); 931 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb); 932 fdb_prio_table(esw, chain, prio, l).fdb = NULL; 933 } 934 935 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); 936 } 937 938 static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw) 939 { 940 /* If lazy creation isn't supported, deref the fast path tables */ 941 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) { 942 esw_put_prio_table(esw, 0, 1, 1); 943 esw_put_prio_table(esw, 0, 1, 0); 944 } 945 } 946 947 #define MAX_PF_SQ 256 948 #define MAX_SQ_NVPORTS 32 949 950 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) 951 { 952 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 953 struct mlx5_flow_table_attr ft_attr = {}; 954 struct mlx5_core_dev *dev = esw->dev; 955 u32 *flow_group_in, max_flow_counter; 956 struct mlx5_flow_namespace *root_ns; 957 struct mlx5_flow_table *fdb = NULL; 958 int table_size, ix, err = 0, i; 959 struct mlx5_flow_group *g; 960 u32 flags = 0, fdb_max; 961 void *match_criteria; 962 u8 *dmac; 963 964 esw_debug(esw->dev, "Create offloads FDB Tables\n"); 965 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 966 if (!flow_group_in) 967 return -ENOMEM; 968 969 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 970 if (!root_ns) { 971 esw_warn(dev, "Failed to get FDB flow namespace\n"); 972 err = -EOPNOTSUPP; 973 goto ns_err; 974 } 975 976 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | 977 MLX5_CAP_GEN(dev, max_flow_counter_15_0); 978 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); 979 980 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n", 981 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), 982 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, 983 fdb_max); 984 985 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) 986 esw->fdb_table.offloads.fdb_left[i] = 987 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0; 988 989 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 990 MLX5_ESW_MISS_FLOWS + esw->total_vports; 991 992 /* create the slow path fdb with encap set, so further table instances 993 * can be created at run time while VFs are probed if the FW allows that. 994 */ 995 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 996 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 997 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 998 999 ft_attr.flags = flags; 1000 ft_attr.max_fte = table_size; 1001 ft_attr.prio = FDB_SLOW_PATH; 1002 1003 fdb = mlx5_create_flow_table(root_ns, &ft_attr); 1004 if (IS_ERR(fdb)) { 1005 err = PTR_ERR(fdb); 1006 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); 1007 goto slow_fdb_err; 1008 } 1009 esw->fdb_table.offloads.slow_fdb = fdb; 1010 1011 /* If lazy creation isn't supported, open the fast path tables now */ 1012 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) && 1013 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { 1014 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; 1015 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n"); 1016 esw_get_prio_table(esw, 0, 1, 0); 1017 esw_get_prio_table(esw, 0, 1, 1); 1018 } else { 1019 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n"); 1020 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; 1021 } 1022 1023 /* create send-to-vport group */ 1024 memset(flow_group_in, 0, inlen); 1025 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1026 MLX5_MATCH_MISC_PARAMETERS); 1027 1028 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1029 1030 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); 1031 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); 1032 1033 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ; 1034 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1035 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); 1036 1037 g = mlx5_create_flow_group(fdb, flow_group_in); 1038 if (IS_ERR(g)) { 1039 err = PTR_ERR(g); 1040 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err); 1041 goto send_vport_err; 1042 } 1043 esw->fdb_table.offloads.send_to_vport_grp = g; 1044 1045 /* create peer esw miss group */ 1046 memset(flow_group_in, 0, inlen); 1047 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1048 MLX5_MATCH_MISC_PARAMETERS); 1049 1050 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 1051 match_criteria); 1052 1053 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1054 misc_parameters.source_port); 1055 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1056 misc_parameters.source_eswitch_owner_vhca_id); 1057 1058 MLX5_SET(create_flow_group_in, flow_group_in, 1059 source_eswitch_owner_vhca_id_valid, 1); 1060 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); 1061 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1062 ix + esw->total_vports - 1); 1063 ix += esw->total_vports; 1064 1065 g = mlx5_create_flow_group(fdb, flow_group_in); 1066 if (IS_ERR(g)) { 1067 err = PTR_ERR(g); 1068 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); 1069 goto peer_miss_err; 1070 } 1071 esw->fdb_table.offloads.peer_miss_grp = g; 1072 1073 /* create miss group */ 1074 memset(flow_group_in, 0, inlen); 1075 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1076 MLX5_MATCH_OUTER_HEADERS); 1077 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 1078 match_criteria); 1079 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, 1080 outer_headers.dmac_47_16); 1081 dmac[0] = 0x01; 1082 1083 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); 1084 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1085 ix + MLX5_ESW_MISS_FLOWS); 1086 1087 g = mlx5_create_flow_group(fdb, flow_group_in); 1088 if (IS_ERR(g)) { 1089 err = PTR_ERR(g); 1090 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err); 1091 goto miss_err; 1092 } 1093 esw->fdb_table.offloads.miss_grp = g; 1094 1095 err = esw_add_fdb_miss_rule(esw); 1096 if (err) 1097 goto miss_rule_err; 1098 1099 esw->nvports = nvports; 1100 kvfree(flow_group_in); 1101 return 0; 1102 1103 miss_rule_err: 1104 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1105 miss_err: 1106 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1107 peer_miss_err: 1108 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1109 send_vport_err: 1110 esw_destroy_offloads_fast_fdb_tables(esw); 1111 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); 1112 slow_fdb_err: 1113 ns_err: 1114 kvfree(flow_group_in); 1115 return err; 1116 } 1117 1118 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) 1119 { 1120 if (!esw->fdb_table.offloads.slow_fdb) 1121 return; 1122 1123 esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); 1124 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); 1125 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1126 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1127 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1128 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1129 1130 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); 1131 esw_destroy_offloads_fast_fdb_tables(esw); 1132 } 1133 1134 static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports) 1135 { 1136 struct mlx5_flow_table_attr ft_attr = {}; 1137 struct mlx5_core_dev *dev = esw->dev; 1138 struct mlx5_flow_table *ft_offloads; 1139 struct mlx5_flow_namespace *ns; 1140 int err = 0; 1141 1142 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 1143 if (!ns) { 1144 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 1145 return -EOPNOTSUPP; 1146 } 1147 1148 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS; 1149 1150 ft_offloads = mlx5_create_flow_table(ns, &ft_attr); 1151 if (IS_ERR(ft_offloads)) { 1152 err = PTR_ERR(ft_offloads); 1153 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); 1154 return err; 1155 } 1156 1157 esw->offloads.ft_offloads = ft_offloads; 1158 return 0; 1159 } 1160 1161 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) 1162 { 1163 struct mlx5_esw_offload *offloads = &esw->offloads; 1164 1165 mlx5_destroy_flow_table(offloads->ft_offloads); 1166 } 1167 1168 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports) 1169 { 1170 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1171 struct mlx5_flow_group *g; 1172 u32 *flow_group_in; 1173 void *match_criteria, *misc; 1174 int err = 0; 1175 1176 nvports = nvports + MLX5_ESW_MISS_FLOWS; 1177 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1178 if (!flow_group_in) 1179 return -ENOMEM; 1180 1181 /* create vport rx group */ 1182 memset(flow_group_in, 0, inlen); 1183 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1184 MLX5_MATCH_MISC_PARAMETERS); 1185 1186 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1187 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters); 1188 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 1189 1190 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1191 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); 1192 1193 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); 1194 1195 if (IS_ERR(g)) { 1196 err = PTR_ERR(g); 1197 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err); 1198 goto out; 1199 } 1200 1201 esw->offloads.vport_rx_group = g; 1202 out: 1203 kvfree(flow_group_in); 1204 return err; 1205 } 1206 1207 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) 1208 { 1209 mlx5_destroy_flow_group(esw->offloads.vport_rx_group); 1210 } 1211 1212 struct mlx5_flow_handle * 1213 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, 1214 struct mlx5_flow_destination *dest) 1215 { 1216 struct mlx5_flow_act flow_act = {0}; 1217 struct mlx5_flow_handle *flow_rule; 1218 struct mlx5_flow_spec *spec; 1219 void *misc; 1220 1221 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1222 if (!spec) { 1223 flow_rule = ERR_PTR(-ENOMEM); 1224 goto out; 1225 } 1226 1227 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 1228 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 1229 1230 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 1231 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 1232 1233 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 1234 1235 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1236 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, 1237 &flow_act, dest, 1); 1238 if (IS_ERR(flow_rule)) { 1239 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); 1240 goto out; 1241 } 1242 1243 out: 1244 kvfree(spec); 1245 return flow_rule; 1246 } 1247 1248 static int esw_offloads_start(struct mlx5_eswitch *esw, 1249 struct netlink_ext_ack *extack) 1250 { 1251 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; 1252 1253 if (esw->mode != SRIOV_LEGACY && 1254 !mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1255 NL_SET_ERR_MSG_MOD(extack, 1256 "Can't set offloads mode, SRIOV legacy not enabled"); 1257 return -EINVAL; 1258 } 1259 1260 mlx5_eswitch_disable_sriov(esw); 1261 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); 1262 if (err) { 1263 NL_SET_ERR_MSG_MOD(extack, 1264 "Failed setting eswitch to offloads"); 1265 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 1266 if (err1) { 1267 NL_SET_ERR_MSG_MOD(extack, 1268 "Failed setting eswitch back to legacy"); 1269 } 1270 } 1271 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 1272 if (mlx5_eswitch_inline_mode_get(esw, 1273 num_vfs, 1274 &esw->offloads.inline_mode)) { 1275 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; 1276 NL_SET_ERR_MSG_MOD(extack, 1277 "Inline mode is different between vports"); 1278 } 1279 } 1280 return err; 1281 } 1282 1283 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) 1284 { 1285 kfree(esw->offloads.vport_reps); 1286 } 1287 1288 int esw_offloads_init_reps(struct mlx5_eswitch *esw) 1289 { 1290 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev); 1291 struct mlx5_core_dev *dev = esw->dev; 1292 struct mlx5_eswitch_rep *rep; 1293 u8 hw_id[ETH_ALEN], rep_type; 1294 int vport; 1295 1296 esw->offloads.vport_reps = kcalloc(total_vfs, 1297 sizeof(struct mlx5_eswitch_rep), 1298 GFP_KERNEL); 1299 if (!esw->offloads.vport_reps) 1300 return -ENOMEM; 1301 1302 mlx5_query_nic_vport_mac_address(dev, 0, hw_id); 1303 1304 mlx5_esw_for_all_reps(esw, vport, rep) { 1305 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport); 1306 ether_addr_copy(rep->hw_id, hw_id); 1307 1308 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) 1309 rep->rep_if[rep_type].state = REP_UNREGISTERED; 1310 } 1311 1312 return 0; 1313 } 1314 1315 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, 1316 struct mlx5_eswitch_rep *rep, u8 rep_type) 1317 { 1318 if (rep->rep_if[rep_type].state != REP_LOADED) 1319 return; 1320 1321 rep->rep_if[rep_type].unload(rep); 1322 rep->rep_if[rep_type].state = REP_REGISTERED; 1323 } 1324 1325 static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) 1326 { 1327 struct mlx5_eswitch_rep *rep; 1328 1329 if (mlx5_ecpf_vport_exists(esw->dev)) { 1330 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF); 1331 __esw_offloads_unload_rep(esw, rep, rep_type); 1332 } 1333 1334 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1335 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); 1336 __esw_offloads_unload_rep(esw, rep, rep_type); 1337 } 1338 1339 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 1340 __esw_offloads_unload_rep(esw, rep, rep_type); 1341 } 1342 1343 static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports, 1344 u8 rep_type) 1345 { 1346 struct mlx5_eswitch_rep *rep; 1347 int i; 1348 1349 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports) 1350 __esw_offloads_unload_rep(esw, rep, rep_type); 1351 } 1352 1353 static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports) 1354 { 1355 u8 rep_type = NUM_REP_TYPES; 1356 1357 while (rep_type-- > 0) 1358 __unload_reps_vf_vport(esw, nvports, rep_type); 1359 } 1360 1361 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports, 1362 u8 rep_type) 1363 { 1364 __unload_reps_vf_vport(esw, nvports, rep_type); 1365 1366 /* Special vports must be the last to unload. */ 1367 __unload_reps_special_vport(esw, rep_type); 1368 } 1369 1370 static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports) 1371 { 1372 u8 rep_type = NUM_REP_TYPES; 1373 1374 while (rep_type-- > 0) 1375 __unload_reps_all_vport(esw, nvports, rep_type); 1376 } 1377 1378 static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, 1379 struct mlx5_eswitch_rep *rep, u8 rep_type) 1380 { 1381 int err = 0; 1382 1383 if (rep->rep_if[rep_type].state != REP_REGISTERED) 1384 return 0; 1385 1386 err = rep->rep_if[rep_type].load(esw->dev, rep); 1387 if (err) 1388 return err; 1389 1390 rep->rep_if[rep_type].state = REP_LOADED; 1391 1392 return 0; 1393 } 1394 1395 static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) 1396 { 1397 struct mlx5_eswitch_rep *rep; 1398 int err; 1399 1400 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 1401 err = __esw_offloads_load_rep(esw, rep, rep_type); 1402 if (err) 1403 return err; 1404 1405 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1406 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); 1407 err = __esw_offloads_load_rep(esw, rep, rep_type); 1408 if (err) 1409 goto err_pf; 1410 } 1411 1412 if (mlx5_ecpf_vport_exists(esw->dev)) { 1413 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF); 1414 err = __esw_offloads_load_rep(esw, rep, rep_type); 1415 if (err) 1416 goto err_ecpf; 1417 } 1418 1419 return 0; 1420 1421 err_ecpf: 1422 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1423 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); 1424 __esw_offloads_unload_rep(esw, rep, rep_type); 1425 } 1426 1427 err_pf: 1428 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 1429 __esw_offloads_unload_rep(esw, rep, rep_type); 1430 return err; 1431 } 1432 1433 static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports, 1434 u8 rep_type) 1435 { 1436 struct mlx5_eswitch_rep *rep; 1437 int err, i; 1438 1439 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) { 1440 err = __esw_offloads_load_rep(esw, rep, rep_type); 1441 if (err) 1442 goto err_vf; 1443 } 1444 1445 return 0; 1446 1447 err_vf: 1448 __unload_reps_vf_vport(esw, --i, rep_type); 1449 return err; 1450 } 1451 1452 static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports) 1453 { 1454 u8 rep_type = 0; 1455 int err; 1456 1457 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { 1458 err = __load_reps_vf_vport(esw, nvports, rep_type); 1459 if (err) 1460 goto err_reps; 1461 } 1462 1463 return err; 1464 1465 err_reps: 1466 while (rep_type-- > 0) 1467 __unload_reps_vf_vport(esw, nvports, rep_type); 1468 return err; 1469 } 1470 1471 static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports, 1472 u8 rep_type) 1473 { 1474 int err; 1475 1476 /* Special vports must be loaded first. */ 1477 err = __load_reps_special_vport(esw, rep_type); 1478 if (err) 1479 return err; 1480 1481 err = __load_reps_vf_vport(esw, nvports, rep_type); 1482 if (err) 1483 goto err_vfs; 1484 1485 return 0; 1486 1487 err_vfs: 1488 __unload_reps_special_vport(esw, rep_type); 1489 return err; 1490 } 1491 1492 static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports) 1493 { 1494 u8 rep_type = 0; 1495 int err; 1496 1497 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { 1498 err = __load_reps_all_vport(esw, nvports, rep_type); 1499 if (err) 1500 goto err_reps; 1501 } 1502 1503 return err; 1504 1505 err_reps: 1506 while (rep_type-- > 0) 1507 __unload_reps_all_vport(esw, nvports, rep_type); 1508 return err; 1509 } 1510 1511 #define ESW_OFFLOADS_DEVCOM_PAIR (0) 1512 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1) 1513 1514 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, 1515 struct mlx5_eswitch *peer_esw) 1516 { 1517 int err; 1518 1519 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); 1520 if (err) 1521 return err; 1522 1523 return 0; 1524 } 1525 1526 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); 1527 1528 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) 1529 { 1530 mlx5e_tc_clean_fdb_peer_flows(esw); 1531 esw_del_fdb_peer_miss_rules(esw); 1532 } 1533 1534 static int mlx5_esw_offloads_devcom_event(int event, 1535 void *my_data, 1536 void *event_data) 1537 { 1538 struct mlx5_eswitch *esw = my_data; 1539 struct mlx5_eswitch *peer_esw = event_data; 1540 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 1541 int err; 1542 1543 switch (event) { 1544 case ESW_OFFLOADS_DEVCOM_PAIR: 1545 err = mlx5_esw_offloads_pair(esw, peer_esw); 1546 if (err) 1547 goto err_out; 1548 1549 err = mlx5_esw_offloads_pair(peer_esw, esw); 1550 if (err) 1551 goto err_pair; 1552 1553 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); 1554 break; 1555 1556 case ESW_OFFLOADS_DEVCOM_UNPAIR: 1557 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) 1558 break; 1559 1560 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); 1561 mlx5_esw_offloads_unpair(peer_esw); 1562 mlx5_esw_offloads_unpair(esw); 1563 break; 1564 } 1565 1566 return 0; 1567 1568 err_pair: 1569 mlx5_esw_offloads_unpair(esw); 1570 1571 err_out: 1572 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d", 1573 event, err); 1574 return err; 1575 } 1576 1577 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) 1578 { 1579 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 1580 1581 INIT_LIST_HEAD(&esw->offloads.peer_flows); 1582 mutex_init(&esw->offloads.peer_mutex); 1583 1584 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1585 return; 1586 1587 mlx5_devcom_register_component(devcom, 1588 MLX5_DEVCOM_ESW_OFFLOADS, 1589 mlx5_esw_offloads_devcom_event, 1590 esw); 1591 1592 mlx5_devcom_send_event(devcom, 1593 MLX5_DEVCOM_ESW_OFFLOADS, 1594 ESW_OFFLOADS_DEVCOM_PAIR, esw); 1595 } 1596 1597 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) 1598 { 1599 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 1600 1601 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1602 return; 1603 1604 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, 1605 ESW_OFFLOADS_DEVCOM_UNPAIR, esw); 1606 1607 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 1608 } 1609 1610 static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports) 1611 { 1612 int err; 1613 1614 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); 1615 1616 err = esw_create_offloads_fdb_tables(esw, nvports); 1617 if (err) 1618 return err; 1619 1620 err = esw_create_offloads_table(esw, nvports); 1621 if (err) 1622 goto create_ft_err; 1623 1624 err = esw_create_vport_rx_group(esw, nvports); 1625 if (err) 1626 goto create_fg_err; 1627 1628 return 0; 1629 1630 create_fg_err: 1631 esw_destroy_offloads_table(esw); 1632 1633 create_ft_err: 1634 esw_destroy_offloads_fdb_tables(esw); 1635 1636 return err; 1637 } 1638 1639 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) 1640 { 1641 esw_destroy_vport_rx_group(esw); 1642 esw_destroy_offloads_table(esw); 1643 esw_destroy_offloads_fdb_tables(esw); 1644 } 1645 1646 static void esw_host_params_event_handler(struct work_struct *work) 1647 { 1648 struct mlx5_host_work *host_work; 1649 struct mlx5_eswitch *esw; 1650 int err, num_vf = 0; 1651 1652 host_work = container_of(work, struct mlx5_host_work, work); 1653 esw = host_work->esw; 1654 1655 err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf); 1656 if (err || num_vf == esw->host_info.num_vfs) 1657 goto out; 1658 1659 /* Number of VFs can only change from "0 to x" or "x to 0". */ 1660 if (esw->host_info.num_vfs > 0) { 1661 esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs); 1662 } else { 1663 err = esw_offloads_load_vf_reps(esw, num_vf); 1664 1665 if (err) 1666 goto out; 1667 } 1668 1669 esw->host_info.num_vfs = num_vf; 1670 1671 out: 1672 kfree(host_work); 1673 } 1674 1675 static int esw_host_params_event(struct notifier_block *nb, 1676 unsigned long type, void *data) 1677 { 1678 struct mlx5_host_work *host_work; 1679 struct mlx5_host_info *host_info; 1680 struct mlx5_eswitch *esw; 1681 1682 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC); 1683 if (!host_work) 1684 return NOTIFY_DONE; 1685 1686 host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb); 1687 esw = container_of(host_info, struct mlx5_eswitch, host_info); 1688 1689 host_work->esw = esw; 1690 1691 INIT_WORK(&host_work->work, esw_host_params_event_handler); 1692 queue_work(esw->work_queue, &host_work->work); 1693 1694 return NOTIFY_OK; 1695 } 1696 1697 int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, 1698 int total_nvports) 1699 { 1700 int err; 1701 1702 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); 1703 1704 err = esw_offloads_steering_init(esw, total_nvports); 1705 if (err) 1706 return err; 1707 1708 err = esw_offloads_load_all_reps(esw, vf_nvports); 1709 if (err) 1710 goto err_reps; 1711 1712 esw_offloads_devcom_init(esw); 1713 1714 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1715 MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event, 1716 HOST_PARAMS_CHANGE); 1717 mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb); 1718 esw->host_info.num_vfs = vf_nvports; 1719 } 1720 1721 return 0; 1722 1723 err_reps: 1724 esw_offloads_steering_cleanup(esw); 1725 return err; 1726 } 1727 1728 static int esw_offloads_stop(struct mlx5_eswitch *esw, 1729 struct netlink_ext_ack *extack) 1730 { 1731 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; 1732 1733 mlx5_eswitch_disable_sriov(esw); 1734 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 1735 if (err) { 1736 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); 1737 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); 1738 if (err1) { 1739 NL_SET_ERR_MSG_MOD(extack, 1740 "Failed setting eswitch back to offloads"); 1741 } 1742 } 1743 1744 return err; 1745 } 1746 1747 void esw_offloads_cleanup(struct mlx5_eswitch *esw) 1748 { 1749 u16 num_vfs; 1750 1751 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1752 mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb); 1753 flush_workqueue(esw->work_queue); 1754 num_vfs = esw->host_info.num_vfs; 1755 } else { 1756 num_vfs = esw->dev->priv.sriov.num_vfs; 1757 } 1758 1759 esw_offloads_devcom_cleanup(esw); 1760 esw_offloads_unload_all_reps(esw, num_vfs); 1761 esw_offloads_steering_cleanup(esw); 1762 } 1763 1764 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) 1765 { 1766 switch (mode) { 1767 case DEVLINK_ESWITCH_MODE_LEGACY: 1768 *mlx5_mode = SRIOV_LEGACY; 1769 break; 1770 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 1771 *mlx5_mode = SRIOV_OFFLOADS; 1772 break; 1773 default: 1774 return -EINVAL; 1775 } 1776 1777 return 0; 1778 } 1779 1780 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) 1781 { 1782 switch (mlx5_mode) { 1783 case SRIOV_LEGACY: 1784 *mode = DEVLINK_ESWITCH_MODE_LEGACY; 1785 break; 1786 case SRIOV_OFFLOADS: 1787 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; 1788 break; 1789 default: 1790 return -EINVAL; 1791 } 1792 1793 return 0; 1794 } 1795 1796 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode) 1797 { 1798 switch (mode) { 1799 case DEVLINK_ESWITCH_INLINE_MODE_NONE: 1800 *mlx5_mode = MLX5_INLINE_MODE_NONE; 1801 break; 1802 case DEVLINK_ESWITCH_INLINE_MODE_LINK: 1803 *mlx5_mode = MLX5_INLINE_MODE_L2; 1804 break; 1805 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK: 1806 *mlx5_mode = MLX5_INLINE_MODE_IP; 1807 break; 1808 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: 1809 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP; 1810 break; 1811 default: 1812 return -EINVAL; 1813 } 1814 1815 return 0; 1816 } 1817 1818 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) 1819 { 1820 switch (mlx5_mode) { 1821 case MLX5_INLINE_MODE_NONE: 1822 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE; 1823 break; 1824 case MLX5_INLINE_MODE_L2: 1825 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK; 1826 break; 1827 case MLX5_INLINE_MODE_IP: 1828 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK; 1829 break; 1830 case MLX5_INLINE_MODE_TCP_UDP: 1831 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT; 1832 break; 1833 default: 1834 return -EINVAL; 1835 } 1836 1837 return 0; 1838 } 1839 1840 static int mlx5_devlink_eswitch_check(struct devlink *devlink) 1841 { 1842 struct mlx5_core_dev *dev = devlink_priv(devlink); 1843 1844 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1845 return -EOPNOTSUPP; 1846 1847 if(!MLX5_ESWITCH_MANAGER(dev)) 1848 return -EPERM; 1849 1850 if (dev->priv.eswitch->mode == SRIOV_NONE && 1851 !mlx5_core_is_ecpf_esw_manager(dev)) 1852 return -EOPNOTSUPP; 1853 1854 return 0; 1855 } 1856 1857 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 1858 struct netlink_ext_ack *extack) 1859 { 1860 struct mlx5_core_dev *dev = devlink_priv(devlink); 1861 u16 cur_mlx5_mode, mlx5_mode = 0; 1862 int err; 1863 1864 err = mlx5_devlink_eswitch_check(devlink); 1865 if (err) 1866 return err; 1867 1868 cur_mlx5_mode = dev->priv.eswitch->mode; 1869 1870 if (esw_mode_from_devlink(mode, &mlx5_mode)) 1871 return -EINVAL; 1872 1873 if (cur_mlx5_mode == mlx5_mode) 1874 return 0; 1875 1876 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) 1877 return esw_offloads_start(dev->priv.eswitch, extack); 1878 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) 1879 return esw_offloads_stop(dev->priv.eswitch, extack); 1880 else 1881 return -EINVAL; 1882 } 1883 1884 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 1885 { 1886 struct mlx5_core_dev *dev = devlink_priv(devlink); 1887 int err; 1888 1889 err = mlx5_devlink_eswitch_check(devlink); 1890 if (err) 1891 return err; 1892 1893 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); 1894 } 1895 1896 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 1897 struct netlink_ext_ack *extack) 1898 { 1899 struct mlx5_core_dev *dev = devlink_priv(devlink); 1900 struct mlx5_eswitch *esw = dev->priv.eswitch; 1901 int err, vport; 1902 u8 mlx5_mode; 1903 1904 err = mlx5_devlink_eswitch_check(devlink); 1905 if (err) 1906 return err; 1907 1908 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 1909 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 1910 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) 1911 return 0; 1912 /* fall through */ 1913 case MLX5_CAP_INLINE_MODE_L2: 1914 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); 1915 return -EOPNOTSUPP; 1916 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 1917 break; 1918 } 1919 1920 if (esw->offloads.num_flows > 0) { 1921 NL_SET_ERR_MSG_MOD(extack, 1922 "Can't set inline mode when flows are configured"); 1923 return -EOPNOTSUPP; 1924 } 1925 1926 err = esw_inline_mode_from_devlink(mode, &mlx5_mode); 1927 if (err) 1928 goto out; 1929 1930 for (vport = 1; vport < esw->enabled_vports; vport++) { 1931 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); 1932 if (err) { 1933 NL_SET_ERR_MSG_MOD(extack, 1934 "Failed to set min inline on vport"); 1935 goto revert_inline_mode; 1936 } 1937 } 1938 1939 esw->offloads.inline_mode = mlx5_mode; 1940 return 0; 1941 1942 revert_inline_mode: 1943 while (--vport > 0) 1944 mlx5_modify_nic_vport_min_inline(dev, 1945 vport, 1946 esw->offloads.inline_mode); 1947 out: 1948 return err; 1949 } 1950 1951 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) 1952 { 1953 struct mlx5_core_dev *dev = devlink_priv(devlink); 1954 struct mlx5_eswitch *esw = dev->priv.eswitch; 1955 int err; 1956 1957 err = mlx5_devlink_eswitch_check(devlink); 1958 if (err) 1959 return err; 1960 1961 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 1962 } 1963 1964 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) 1965 { 1966 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 1967 struct mlx5_core_dev *dev = esw->dev; 1968 int vport; 1969 1970 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1971 return -EOPNOTSUPP; 1972 1973 if (esw->mode == SRIOV_NONE) 1974 return -EOPNOTSUPP; 1975 1976 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 1977 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 1978 mlx5_mode = MLX5_INLINE_MODE_NONE; 1979 goto out; 1980 case MLX5_CAP_INLINE_MODE_L2: 1981 mlx5_mode = MLX5_INLINE_MODE_L2; 1982 goto out; 1983 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 1984 goto query_vports; 1985 } 1986 1987 query_vports: 1988 for (vport = 1; vport <= nvfs; vport++) { 1989 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); 1990 if (vport > 1 && prev_mlx5_mode != mlx5_mode) 1991 return -EINVAL; 1992 prev_mlx5_mode = mlx5_mode; 1993 } 1994 1995 out: 1996 *mode = mlx5_mode; 1997 return 0; 1998 } 1999 2000 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, 2001 struct netlink_ext_ack *extack) 2002 { 2003 struct mlx5_core_dev *dev = devlink_priv(devlink); 2004 struct mlx5_eswitch *esw = dev->priv.eswitch; 2005 int err; 2006 2007 err = mlx5_devlink_eswitch_check(devlink); 2008 if (err) 2009 return err; 2010 2011 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 2012 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || 2013 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) 2014 return -EOPNOTSUPP; 2015 2016 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) 2017 return -EOPNOTSUPP; 2018 2019 if (esw->mode == SRIOV_LEGACY) { 2020 esw->offloads.encap = encap; 2021 return 0; 2022 } 2023 2024 if (esw->offloads.encap == encap) 2025 return 0; 2026 2027 if (esw->offloads.num_flows > 0) { 2028 NL_SET_ERR_MSG_MOD(extack, 2029 "Can't set encapsulation when flows are configured"); 2030 return -EOPNOTSUPP; 2031 } 2032 2033 esw_destroy_offloads_fdb_tables(esw); 2034 2035 esw->offloads.encap = encap; 2036 2037 err = esw_create_offloads_fdb_tables(esw, esw->nvports); 2038 2039 if (err) { 2040 NL_SET_ERR_MSG_MOD(extack, 2041 "Failed re-creating fast FDB table"); 2042 esw->offloads.encap = !encap; 2043 (void)esw_create_offloads_fdb_tables(esw, esw->nvports); 2044 } 2045 2046 return err; 2047 } 2048 2049 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) 2050 { 2051 struct mlx5_core_dev *dev = devlink_priv(devlink); 2052 struct mlx5_eswitch *esw = dev->priv.eswitch; 2053 int err; 2054 2055 err = mlx5_devlink_eswitch_check(devlink); 2056 if (err) 2057 return err; 2058 2059 *encap = esw->offloads.encap; 2060 return 0; 2061 } 2062 2063 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, 2064 struct mlx5_eswitch_rep_if *__rep_if, 2065 u8 rep_type) 2066 { 2067 struct mlx5_eswitch_rep_if *rep_if; 2068 struct mlx5_eswitch_rep *rep; 2069 int i; 2070 2071 mlx5_esw_for_all_reps(esw, i, rep) { 2072 rep_if = &rep->rep_if[rep_type]; 2073 rep_if->load = __rep_if->load; 2074 rep_if->unload = __rep_if->unload; 2075 rep_if->get_proto_dev = __rep_if->get_proto_dev; 2076 rep_if->priv = __rep_if->priv; 2077 2078 rep_if->state = REP_REGISTERED; 2079 } 2080 } 2081 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); 2082 2083 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) 2084 { 2085 u16 max_vf = mlx5_core_max_vfs(esw->dev); 2086 struct mlx5_eswitch_rep *rep; 2087 int i; 2088 2089 if (esw->mode == SRIOV_OFFLOADS) 2090 __unload_reps_all_vport(esw, max_vf, rep_type); 2091 2092 mlx5_esw_for_all_reps(esw, i, rep) 2093 rep->rep_if[rep_type].state = REP_UNREGISTERED; 2094 } 2095 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); 2096 2097 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) 2098 { 2099 struct mlx5_eswitch_rep *rep; 2100 2101 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 2102 return rep->rep_if[rep_type].priv; 2103 } 2104 2105 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, 2106 int vport, 2107 u8 rep_type) 2108 { 2109 struct mlx5_eswitch_rep *rep; 2110 2111 rep = mlx5_eswitch_get_rep(esw, vport); 2112 2113 if (rep->rep_if[rep_type].state == REP_LOADED && 2114 rep->rep_if[rep_type].get_proto_dev) 2115 return rep->rep_if[rep_type].get_proto_dev(rep); 2116 return NULL; 2117 } 2118 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); 2119 2120 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) 2121 { 2122 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type); 2123 } 2124 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); 2125 2126 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, 2127 int vport) 2128 { 2129 return mlx5_eswitch_get_rep(esw, vport); 2130 } 2131 EXPORT_SYMBOL(mlx5_eswitch_vport_rep); 2132