1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/etherdevice.h> 34 #include <linux/idr.h> 35 #include <linux/mlx5/driver.h> 36 #include <linux/mlx5/mlx5_ifc.h> 37 #include <linux/mlx5/vport.h> 38 #include <linux/mlx5/fs.h> 39 #include "mlx5_core.h" 40 #include "eswitch.h" 41 #include "esw/indir_table.h" 42 #include "esw/acl/ofld.h" 43 #include "rdma.h" 44 #include "en.h" 45 #include "fs_core.h" 46 #include "lib/devcom.h" 47 #include "lib/eq.h" 48 #include "lib/fs_chains.h" 49 #include "en_tc.h" 50 #include "en/mapping.h" 51 #include "devlink.h" 52 53 #define mlx5_esw_for_each_rep(esw, i, rep) \ 54 xa_for_each(&((esw)->offloads.vport_reps), i, rep) 55 56 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \ 57 xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF) 58 59 #define mlx5_esw_for_each_vf_rep(esw, index, rep) \ 60 mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \ 61 rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF) 62 63 /* There are two match-all miss flows, one for unicast dst mac and 64 * one for multicast. 65 */ 66 #define MLX5_ESW_MISS_FLOWS (2) 67 #define UPLINK_REP_INDEX 0 68 69 #define MLX5_ESW_VPORT_TBL_SIZE 128 70 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4 71 72 static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { 73 .max_fte = MLX5_ESW_VPORT_TBL_SIZE, 74 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, 75 .flags = 0, 76 }; 77 78 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, 79 u16 vport_num) 80 { 81 return xa_load(&esw->offloads.vport_reps, vport_num); 82 } 83 84 static void 85 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, 86 struct mlx5_flow_spec *spec, 87 struct mlx5_esw_flow_attr *attr) 88 { 89 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep) 90 return; 91 92 if (attr->int_port) { 93 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port); 94 95 return; 96 } 97 98 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ? 99 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK : 100 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; 101 } 102 103 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits 104 * are not needed as well in the following process. So clear them all for simplicity. 105 */ 106 void 107 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec) 108 { 109 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 110 void *misc2; 111 112 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 113 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 114 115 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 116 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 117 118 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2))) 119 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2; 120 } 121 } 122 123 static void 124 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, 125 struct mlx5_flow_spec *spec, 126 struct mlx5_flow_attr *attr, 127 struct mlx5_eswitch *src_esw, 128 u16 vport) 129 { 130 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 131 u32 metadata; 132 void *misc2; 133 void *misc; 134 135 /* Use metadata matching because vport is not represented by single 136 * VHCA in dual-port RoCE mode, and matching on source vport may fail. 137 */ 138 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 139 if (mlx5_esw_indir_table_decap_vport(attr)) 140 vport = mlx5_esw_indir_table_decap_vport(attr); 141 142 if (esw_attr->int_port) 143 metadata = 144 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port); 145 else 146 metadata = 147 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport); 148 149 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 150 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata); 151 152 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 153 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 154 mlx5_eswitch_get_vport_metadata_mask()); 155 156 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 157 } else { 158 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 159 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 160 161 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 162 MLX5_SET(fte_match_set_misc, misc, 163 source_eswitch_owner_vhca_id, 164 MLX5_CAP_GEN(src_esw->dev, vhca_id)); 165 166 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 167 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 168 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 169 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 170 source_eswitch_owner_vhca_id); 171 172 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 173 } 174 } 175 176 static int 177 esw_setup_decap_indir(struct mlx5_eswitch *esw, 178 struct mlx5_flow_attr *attr, 179 struct mlx5_flow_spec *spec) 180 { 181 struct mlx5_flow_table *ft; 182 183 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE)) 184 return -EOPNOTSUPP; 185 186 ft = mlx5_esw_indir_table_get(esw, attr, spec, 187 mlx5_esw_indir_table_decap_vport(attr), true); 188 return PTR_ERR_OR_ZERO(ft); 189 } 190 191 static void 192 esw_cleanup_decap_indir(struct mlx5_eswitch *esw, 193 struct mlx5_flow_attr *attr) 194 { 195 if (mlx5_esw_indir_table_decap_vport(attr)) 196 mlx5_esw_indir_table_put(esw, attr, 197 mlx5_esw_indir_table_decap_vport(attr), 198 true); 199 } 200 201 static int 202 esw_setup_sampler_dest(struct mlx5_flow_destination *dest, 203 struct mlx5_flow_act *flow_act, 204 struct mlx5_flow_attr *attr, 205 int i) 206 { 207 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 208 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; 209 dest[i].sampler_id = attr->sample_attr->sampler_id; 210 211 return 0; 212 } 213 214 static int 215 esw_setup_ft_dest(struct mlx5_flow_destination *dest, 216 struct mlx5_flow_act *flow_act, 217 struct mlx5_eswitch *esw, 218 struct mlx5_flow_attr *attr, 219 struct mlx5_flow_spec *spec, 220 int i) 221 { 222 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 223 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 224 dest[i].ft = attr->dest_ft; 225 226 if (mlx5_esw_indir_table_decap_vport(attr)) 227 return esw_setup_decap_indir(esw, attr, spec); 228 return 0; 229 } 230 231 static void 232 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, 233 struct mlx5_flow_act *flow_act, 234 struct mlx5_fs_chains *chains, 235 int i) 236 { 237 if (mlx5_chains_ignore_flow_level_supported(chains)) 238 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 239 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 240 dest[i].ft = mlx5_chains_get_tc_end_ft(chains); 241 } 242 243 static int 244 esw_setup_chain_dest(struct mlx5_flow_destination *dest, 245 struct mlx5_flow_act *flow_act, 246 struct mlx5_fs_chains *chains, 247 u32 chain, u32 prio, u32 level, 248 int i) 249 { 250 struct mlx5_flow_table *ft; 251 252 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 253 ft = mlx5_chains_get_table(chains, chain, prio, level); 254 if (IS_ERR(ft)) 255 return PTR_ERR(ft); 256 257 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 258 dest[i].ft = ft; 259 return 0; 260 } 261 262 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, 263 int from, int to) 264 { 265 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 266 struct mlx5_fs_chains *chains = esw_chains(esw); 267 int i; 268 269 for (i = from; i < to; i++) 270 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 271 mlx5_chains_put_table(chains, 0, 1, 0); 272 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 273 esw_attr->dests[i].mdev)) 274 mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport, 275 false); 276 } 277 278 static bool 279 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr) 280 { 281 int i; 282 283 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) 284 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 285 return true; 286 return false; 287 } 288 289 static int 290 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest, 291 struct mlx5_flow_act *flow_act, 292 struct mlx5_eswitch *esw, 293 struct mlx5_fs_chains *chains, 294 struct mlx5_flow_attr *attr, 295 int *i) 296 { 297 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 298 int err; 299 300 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE)) 301 return -EOPNOTSUPP; 302 303 /* flow steering cannot handle more than one dest with the same ft 304 * in a single flow 305 */ 306 if (esw_attr->out_count - esw_attr->split_count > 1) 307 return -EOPNOTSUPP; 308 309 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i); 310 if (err) 311 return err; 312 313 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) { 314 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 315 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat; 316 } 317 (*i)++; 318 319 return 0; 320 } 321 322 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw, 323 struct mlx5_flow_attr *attr) 324 { 325 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 326 327 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 328 } 329 330 static bool 331 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 332 { 333 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 334 bool result = false; 335 int i; 336 337 /* Indirect table is supported only for flows with in_port uplink 338 * and the destination is vport on the same eswitch as the uplink, 339 * return false in case at least one of destinations doesn't meet 340 * this criteria. 341 */ 342 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { 343 if (esw_attr->dests[i].rep && 344 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 345 esw_attr->dests[i].mdev)) { 346 result = true; 347 } else { 348 result = false; 349 break; 350 } 351 } 352 return result; 353 } 354 355 static int 356 esw_setup_indir_table(struct mlx5_flow_destination *dest, 357 struct mlx5_flow_act *flow_act, 358 struct mlx5_eswitch *esw, 359 struct mlx5_flow_attr *attr, 360 struct mlx5_flow_spec *spec, 361 bool ignore_flow_lvl, 362 int *i) 363 { 364 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 365 int j, err; 366 367 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE)) 368 return -EOPNOTSUPP; 369 370 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { 371 if (ignore_flow_lvl) 372 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 373 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 374 375 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec, 376 esw_attr->dests[j].rep->vport, false); 377 if (IS_ERR(dest[*i].ft)) { 378 err = PTR_ERR(dest[*i].ft); 379 goto err_indir_tbl_get; 380 } 381 } 382 383 if (mlx5_esw_indir_table_decap_vport(attr)) { 384 err = esw_setup_decap_indir(esw, attr, spec); 385 if (err) 386 goto err_indir_tbl_get; 387 } 388 389 return 0; 390 391 err_indir_tbl_get: 392 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); 393 return err; 394 } 395 396 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 397 { 398 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 399 400 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 401 esw_cleanup_decap_indir(esw, attr); 402 } 403 404 static void 405 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level) 406 { 407 mlx5_chains_put_table(chains, chain, prio, level); 408 } 409 410 static void 411 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 412 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 413 int attr_idx, int dest_idx, bool pkt_reformat) 414 { 415 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 416 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; 417 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 418 dest[dest_idx].vport.vhca_id = 419 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); 420 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 421 } 422 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) { 423 if (pkt_reformat) { 424 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 425 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 426 } 427 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 428 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 429 } 430 } 431 432 static int 433 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 434 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 435 int i) 436 { 437 int j; 438 439 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++) 440 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true); 441 return i; 442 } 443 444 static bool 445 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw) 446 { 447 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) && 448 mlx5_eswitch_vport_match_metadata_enabled(esw) && 449 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level); 450 } 451 452 static int 453 esw_setup_dests(struct mlx5_flow_destination *dest, 454 struct mlx5_flow_act *flow_act, 455 struct mlx5_eswitch *esw, 456 struct mlx5_flow_attr *attr, 457 struct mlx5_flow_spec *spec, 458 int *i) 459 { 460 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 461 struct mlx5_fs_chains *chains = esw_chains(esw); 462 int err = 0; 463 464 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && 465 esw_src_port_rewrite_supported(esw)) 466 attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE; 467 468 if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) { 469 esw_setup_sampler_dest(dest, flow_act, attr, *i); 470 (*i)++; 471 } else if (attr->dest_ft) { 472 esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); 473 (*i)++; 474 } else if (mlx5_esw_attr_flags_skip(attr->flags)) { 475 esw_setup_slow_path_dest(dest, flow_act, chains, *i); 476 (*i)++; 477 } else if (attr->dest_chain) { 478 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 479 1, 0, *i); 480 (*i)++; 481 } else if (esw_is_indir_table(esw, attr)) { 482 err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i); 483 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { 484 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); 485 } else { 486 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); 487 } 488 489 return err; 490 } 491 492 static void 493 esw_cleanup_dests(struct mlx5_eswitch *esw, 494 struct mlx5_flow_attr *attr) 495 { 496 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 497 struct mlx5_fs_chains *chains = esw_chains(esw); 498 499 if (attr->dest_ft) { 500 esw_cleanup_decap_indir(esw, attr); 501 } else if (!mlx5_esw_attr_flags_skip(attr->flags)) { 502 if (attr->dest_chain) 503 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); 504 else if (esw_is_indir_table(esw, attr)) 505 esw_cleanup_indir_table(esw, attr); 506 else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) 507 esw_cleanup_chain_src_port_rewrite(esw, attr); 508 } 509 } 510 511 struct mlx5_flow_handle * 512 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 513 struct mlx5_flow_spec *spec, 514 struct mlx5_flow_attr *attr) 515 { 516 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 517 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 518 struct mlx5_fs_chains *chains = esw_chains(esw); 519 bool split = !!(esw_attr->split_count); 520 struct mlx5_vport_tbl_attr fwd_attr; 521 struct mlx5_flow_destination *dest; 522 struct mlx5_flow_handle *rule; 523 struct mlx5_flow_table *fdb; 524 int i = 0; 525 526 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 527 return ERR_PTR(-EOPNOTSUPP); 528 529 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL); 530 if (!dest) 531 return ERR_PTR(-ENOMEM); 532 533 flow_act.action = attr->action; 534 /* if per flow vlan pop/push is emulated, don't set that into the firmware */ 535 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 536 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | 537 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 538 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { 539 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]); 540 flow_act.vlan[0].vid = esw_attr->vlan_vid[0]; 541 flow_act.vlan[0].prio = esw_attr->vlan_prio[0]; 542 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { 543 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]); 544 flow_act.vlan[1].vid = esw_attr->vlan_vid[1]; 545 flow_act.vlan[1].prio = esw_attr->vlan_prio[1]; 546 } 547 } 548 549 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr); 550 551 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 552 int err; 553 554 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i); 555 if (err) { 556 rule = ERR_PTR(err); 557 goto err_create_goto_table; 558 } 559 } 560 561 if (esw_attr->decap_pkt_reformat) 562 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat; 563 564 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 565 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 566 dest[i].counter_id = mlx5_fc_id(attr->counter); 567 i++; 568 } 569 570 if (attr->outer_match_level != MLX5_MATCH_NONE) 571 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 572 if (attr->inner_match_level != MLX5_MATCH_NONE) 573 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 574 575 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 576 flow_act.modify_hdr = attr->modify_hdr; 577 578 if (split) { 579 fwd_attr.chain = attr->chain; 580 fwd_attr.prio = attr->prio; 581 fwd_attr.vport = esw_attr->in_rep->vport; 582 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 583 584 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 585 } else { 586 if (attr->chain || attr->prio) 587 fdb = mlx5_chains_get_table(chains, attr->chain, 588 attr->prio, 0); 589 else 590 fdb = attr->ft; 591 592 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT)) 593 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 594 esw_attr->in_mdev->priv.eswitch, 595 esw_attr->in_rep->vport); 596 } 597 if (IS_ERR(fdb)) { 598 rule = ERR_CAST(fdb); 599 goto err_esw_get; 600 } 601 602 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) 603 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr, 604 &flow_act, dest, i); 605 else 606 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); 607 if (IS_ERR(rule)) 608 goto err_add_rule; 609 else 610 atomic64_inc(&esw->offloads.num_flows); 611 612 kfree(dest); 613 return rule; 614 615 err_add_rule: 616 if (split) 617 mlx5_esw_vporttbl_put(esw, &fwd_attr); 618 else if (attr->chain || attr->prio) 619 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 620 err_esw_get: 621 esw_cleanup_dests(esw, attr); 622 err_create_goto_table: 623 kfree(dest); 624 return rule; 625 } 626 627 struct mlx5_flow_handle * 628 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 629 struct mlx5_flow_spec *spec, 630 struct mlx5_flow_attr *attr) 631 { 632 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 633 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 634 struct mlx5_fs_chains *chains = esw_chains(esw); 635 struct mlx5_vport_tbl_attr fwd_attr; 636 struct mlx5_flow_destination *dest; 637 struct mlx5_flow_table *fast_fdb; 638 struct mlx5_flow_table *fwd_fdb; 639 struct mlx5_flow_handle *rule; 640 int i, err = 0; 641 642 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL); 643 if (!dest) 644 return ERR_PTR(-ENOMEM); 645 646 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0); 647 if (IS_ERR(fast_fdb)) { 648 rule = ERR_CAST(fast_fdb); 649 goto err_get_fast; 650 } 651 652 fwd_attr.chain = attr->chain; 653 fwd_attr.prio = attr->prio; 654 fwd_attr.vport = esw_attr->in_rep->vport; 655 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 656 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 657 if (IS_ERR(fwd_fdb)) { 658 rule = ERR_CAST(fwd_fdb); 659 goto err_get_fwd; 660 } 661 662 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 663 for (i = 0; i < esw_attr->split_count; i++) { 664 if (esw_is_indir_table(esw, attr)) 665 err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i); 666 else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) 667 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr, 668 &i); 669 else 670 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); 671 672 if (err) { 673 rule = ERR_PTR(err); 674 goto err_chain_src_rewrite; 675 } 676 } 677 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 678 dest[i].ft = fwd_fdb; 679 i++; 680 681 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 682 esw_attr->in_mdev->priv.eswitch, 683 esw_attr->in_rep->vport); 684 685 if (attr->outer_match_level != MLX5_MATCH_NONE) 686 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 687 688 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 689 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); 690 691 if (IS_ERR(rule)) { 692 i = esw_attr->split_count; 693 goto err_chain_src_rewrite; 694 } 695 696 atomic64_inc(&esw->offloads.num_flows); 697 698 kfree(dest); 699 return rule; 700 err_chain_src_rewrite: 701 esw_put_dest_tables_loop(esw, attr, 0, i); 702 mlx5_esw_vporttbl_put(esw, &fwd_attr); 703 err_get_fwd: 704 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 705 err_get_fast: 706 kfree(dest); 707 return rule; 708 } 709 710 static void 711 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, 712 struct mlx5_flow_handle *rule, 713 struct mlx5_flow_attr *attr, 714 bool fwd_rule) 715 { 716 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 717 struct mlx5_fs_chains *chains = esw_chains(esw); 718 bool split = (esw_attr->split_count > 0); 719 struct mlx5_vport_tbl_attr fwd_attr; 720 int i; 721 722 mlx5_del_flow_rules(rule); 723 724 if (!mlx5_esw_attr_flags_skip(attr->flags)) { 725 /* unref the term table */ 726 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { 727 if (esw_attr->dests[i].termtbl) 728 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl); 729 } 730 } 731 732 atomic64_dec(&esw->offloads.num_flows); 733 734 if (fwd_rule || split) { 735 fwd_attr.chain = attr->chain; 736 fwd_attr.prio = attr->prio; 737 fwd_attr.vport = esw_attr->in_rep->vport; 738 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 739 } 740 741 if (fwd_rule) { 742 mlx5_esw_vporttbl_put(esw, &fwd_attr); 743 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 744 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count); 745 } else { 746 if (split) 747 mlx5_esw_vporttbl_put(esw, &fwd_attr); 748 else if (attr->chain || attr->prio) 749 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 750 esw_cleanup_dests(esw, attr); 751 } 752 } 753 754 void 755 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 756 struct mlx5_flow_handle *rule, 757 struct mlx5_flow_attr *attr) 758 { 759 __mlx5_eswitch_del_rule(esw, rule, attr, false); 760 } 761 762 void 763 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 764 struct mlx5_flow_handle *rule, 765 struct mlx5_flow_attr *attr) 766 { 767 __mlx5_eswitch_del_rule(esw, rule, attr, true); 768 } 769 770 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) 771 { 772 struct mlx5_eswitch_rep *rep; 773 unsigned long i; 774 int err = 0; 775 776 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); 777 mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) { 778 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) 779 continue; 780 781 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); 782 if (err) 783 goto out; 784 } 785 786 out: 787 return err; 788 } 789 790 static struct mlx5_eswitch_rep * 791 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) 792 { 793 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; 794 795 in_rep = attr->in_rep; 796 out_rep = attr->dests[0].rep; 797 798 if (push) 799 vport = in_rep; 800 else if (pop) 801 vport = out_rep; 802 else 803 vport = in_rep; 804 805 return vport; 806 } 807 808 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, 809 bool push, bool pop, bool fwd) 810 { 811 struct mlx5_eswitch_rep *in_rep, *out_rep; 812 813 if ((push || pop) && !fwd) 814 goto out_notsupp; 815 816 in_rep = attr->in_rep; 817 out_rep = attr->dests[0].rep; 818 819 if (push && in_rep->vport == MLX5_VPORT_UPLINK) 820 goto out_notsupp; 821 822 if (pop && out_rep->vport == MLX5_VPORT_UPLINK) 823 goto out_notsupp; 824 825 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */ 826 if (!push && !pop && fwd) 827 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK) 828 goto out_notsupp; 829 830 /* protects against (1) setting rules with different vlans to push and 831 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) 832 */ 833 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0])) 834 goto out_notsupp; 835 836 return 0; 837 838 out_notsupp: 839 return -EOPNOTSUPP; 840 } 841 842 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 843 struct mlx5_flow_attr *attr) 844 { 845 struct offloads_fdb *offloads = &esw->fdb_table.offloads; 846 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 847 struct mlx5_eswitch_rep *vport = NULL; 848 bool push, pop, fwd; 849 int err = 0; 850 851 /* nop if we're on the vlan push/pop non emulation mode */ 852 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 853 return 0; 854 855 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); 856 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 857 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && 858 !attr->dest_chain); 859 860 mutex_lock(&esw->state_lock); 861 862 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd); 863 if (err) 864 goto unlock; 865 866 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; 867 868 vport = esw_vlan_action_get_vport(esw_attr, push, pop); 869 870 if (!push && !pop && fwd) { 871 /* tracks VF --> wire rules without vlan push action */ 872 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) { 873 vport->vlan_refcount++; 874 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; 875 } 876 877 goto unlock; 878 } 879 880 if (!push && !pop) 881 goto unlock; 882 883 if (!(offloads->vlan_push_pop_refcount)) { 884 /* it's the 1st vlan rule, apply global vlan pop policy */ 885 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP); 886 if (err) 887 goto out; 888 } 889 offloads->vlan_push_pop_refcount++; 890 891 if (push) { 892 if (vport->vlan_refcount) 893 goto skip_set_push; 894 895 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0], 896 0, SET_VLAN_INSERT | SET_VLAN_STRIP); 897 if (err) 898 goto out; 899 vport->vlan = esw_attr->vlan_vid[0]; 900 skip_set_push: 901 vport->vlan_refcount++; 902 } 903 out: 904 if (!err) 905 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; 906 unlock: 907 mutex_unlock(&esw->state_lock); 908 return err; 909 } 910 911 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, 912 struct mlx5_flow_attr *attr) 913 { 914 struct offloads_fdb *offloads = &esw->fdb_table.offloads; 915 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 916 struct mlx5_eswitch_rep *vport = NULL; 917 bool push, pop, fwd; 918 int err = 0; 919 920 /* nop if we're on the vlan push/pop non emulation mode */ 921 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 922 return 0; 923 924 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED)) 925 return 0; 926 927 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); 928 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 929 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); 930 931 mutex_lock(&esw->state_lock); 932 933 vport = esw_vlan_action_get_vport(esw_attr, push, pop); 934 935 if (!push && !pop && fwd) { 936 /* tracks VF --> wire rules without vlan push action */ 937 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) 938 vport->vlan_refcount--; 939 940 goto out; 941 } 942 943 if (push) { 944 vport->vlan_refcount--; 945 if (vport->vlan_refcount) 946 goto skip_unset_push; 947 948 vport->vlan = 0; 949 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, 950 0, 0, SET_VLAN_STRIP); 951 if (err) 952 goto out; 953 } 954 955 skip_unset_push: 956 offloads->vlan_push_pop_refcount--; 957 if (offloads->vlan_push_pop_refcount) 958 goto out; 959 960 /* no more vlan rules, stop global vlan pop policy */ 961 err = esw_set_global_vlan_pop(esw, 0); 962 963 out: 964 mutex_unlock(&esw->state_lock); 965 return err; 966 } 967 968 struct mlx5_flow_handle * 969 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, 970 struct mlx5_eswitch *from_esw, 971 struct mlx5_eswitch_rep *rep, 972 u32 sqn) 973 { 974 struct mlx5_flow_act flow_act = {0}; 975 struct mlx5_flow_destination dest = {}; 976 struct mlx5_flow_handle *flow_rule; 977 struct mlx5_flow_spec *spec; 978 void *misc; 979 980 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 981 if (!spec) { 982 flow_rule = ERR_PTR(-ENOMEM); 983 goto out; 984 } 985 986 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 987 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); 988 /* source vport is the esw manager */ 989 MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport); 990 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 991 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 992 MLX5_CAP_GEN(from_esw->dev, vhca_id)); 993 994 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 995 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); 996 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 997 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 998 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 999 source_eswitch_owner_vhca_id); 1000 1001 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 1002 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1003 dest.vport.num = rep->vport; 1004 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id); 1005 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 1006 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1007 1008 if (rep->vport == MLX5_VPORT_UPLINK) 1009 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; 1010 1011 flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb, 1012 spec, &flow_act, &dest, 1); 1013 if (IS_ERR(flow_rule)) 1014 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n", 1015 PTR_ERR(flow_rule)); 1016 out: 1017 kvfree(spec); 1018 return flow_rule; 1019 } 1020 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule); 1021 1022 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) 1023 { 1024 mlx5_del_flow_rules(rule); 1025 } 1026 1027 static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw) 1028 { 1029 struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules; 1030 int i = 0, num_vfs = esw->esw_funcs.num_vfs; 1031 1032 if (!num_vfs || !flows) 1033 return; 1034 1035 for (i = 0; i < num_vfs; i++) 1036 mlx5_del_flow_rules(flows[i]); 1037 1038 kvfree(flows); 1039 } 1040 1041 static int 1042 mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) 1043 { 1044 struct mlx5_flow_destination dest = {}; 1045 struct mlx5_flow_act flow_act = {0}; 1046 int num_vfs, rule_idx = 0, err = 0; 1047 struct mlx5_flow_handle *flow_rule; 1048 struct mlx5_flow_handle **flows; 1049 struct mlx5_flow_spec *spec; 1050 struct mlx5_vport *vport; 1051 unsigned long i; 1052 u16 vport_num; 1053 1054 num_vfs = esw->esw_funcs.num_vfs; 1055 flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL); 1056 if (!flows) 1057 return -ENOMEM; 1058 1059 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1060 if (!spec) { 1061 err = -ENOMEM; 1062 goto alloc_err; 1063 } 1064 1065 MLX5_SET(fte_match_param, spec->match_criteria, 1066 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); 1067 MLX5_SET(fte_match_param, spec->match_criteria, 1068 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 1069 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1, 1070 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK); 1071 1072 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1073 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1074 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1075 1076 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { 1077 vport_num = vport->vport; 1078 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0, 1079 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); 1080 dest.vport.num = vport_num; 1081 1082 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1083 spec, &flow_act, &dest, 1); 1084 if (IS_ERR(flow_rule)) { 1085 err = PTR_ERR(flow_rule); 1086 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n", 1087 rule_idx, PTR_ERR(flow_rule)); 1088 goto rule_err; 1089 } 1090 flows[rule_idx++] = flow_rule; 1091 } 1092 1093 esw->fdb_table.offloads.send_to_vport_meta_rules = flows; 1094 kvfree(spec); 1095 return 0; 1096 1097 rule_err: 1098 while (--rule_idx >= 0) 1099 mlx5_del_flow_rules(flows[rule_idx]); 1100 kvfree(spec); 1101 alloc_err: 1102 kvfree(flows); 1103 return err; 1104 } 1105 1106 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw) 1107 { 1108 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 1109 MLX5_FDB_TO_VPORT_REG_C_1; 1110 } 1111 1112 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) 1113 { 1114 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; 1115 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; 1116 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; 1117 u8 curr, wanted; 1118 int err; 1119 1120 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) && 1121 !mlx5_eswitch_vport_match_metadata_enabled(esw)) 1122 return 0; 1123 1124 MLX5_SET(query_esw_vport_context_in, in, opcode, 1125 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); 1126 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out); 1127 if (err) 1128 return err; 1129 1130 curr = MLX5_GET(query_esw_vport_context_out, out, 1131 esw_vport_context.fdb_to_vport_reg_c_id); 1132 wanted = MLX5_FDB_TO_VPORT_REG_C_0; 1133 if (mlx5_eswitch_reg_c1_loopback_supported(esw)) 1134 wanted |= MLX5_FDB_TO_VPORT_REG_C_1; 1135 1136 if (enable) 1137 curr |= wanted; 1138 else 1139 curr &= ~wanted; 1140 1141 MLX5_SET(modify_esw_vport_context_in, min, 1142 esw_vport_context.fdb_to_vport_reg_c_id, curr); 1143 MLX5_SET(modify_esw_vport_context_in, min, 1144 field_select.fdb_to_vport_reg_c_id, 1); 1145 1146 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min); 1147 if (!err) { 1148 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1)) 1149 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 1150 else 1151 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 1152 } 1153 1154 return err; 1155 } 1156 1157 static void peer_miss_rules_setup(struct mlx5_eswitch *esw, 1158 struct mlx5_core_dev *peer_dev, 1159 struct mlx5_flow_spec *spec, 1160 struct mlx5_flow_destination *dest) 1161 { 1162 void *misc; 1163 1164 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1165 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1166 misc_parameters_2); 1167 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1168 mlx5_eswitch_get_vport_metadata_mask()); 1169 1170 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1171 } else { 1172 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1173 misc_parameters); 1174 1175 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 1176 MLX5_CAP_GEN(peer_dev, vhca_id)); 1177 1178 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 1179 1180 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1181 misc_parameters); 1182 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 1183 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 1184 source_eswitch_owner_vhca_id); 1185 } 1186 1187 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1188 dest->vport.num = peer_dev->priv.eswitch->manager_vport; 1189 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); 1190 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 1191 } 1192 1193 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw, 1194 struct mlx5_eswitch *peer_esw, 1195 struct mlx5_flow_spec *spec, 1196 u16 vport) 1197 { 1198 void *misc; 1199 1200 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1201 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1202 misc_parameters_2); 1203 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1204 mlx5_eswitch_get_vport_metadata_for_match(peer_esw, 1205 vport)); 1206 } else { 1207 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1208 misc_parameters); 1209 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 1210 } 1211 } 1212 1213 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1214 struct mlx5_core_dev *peer_dev) 1215 { 1216 struct mlx5_flow_destination dest = {}; 1217 struct mlx5_flow_act flow_act = {0}; 1218 struct mlx5_flow_handle **flows; 1219 /* total vports is the same for both e-switches */ 1220 int nvports = esw->total_vports; 1221 struct mlx5_flow_handle *flow; 1222 struct mlx5_flow_spec *spec; 1223 struct mlx5_vport *vport; 1224 unsigned long i; 1225 void *misc; 1226 int err; 1227 1228 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1229 if (!spec) 1230 return -ENOMEM; 1231 1232 peer_miss_rules_setup(esw, peer_dev, spec, &dest); 1233 1234 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); 1235 if (!flows) { 1236 err = -ENOMEM; 1237 goto alloc_flows_err; 1238 } 1239 1240 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1241 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1242 misc_parameters); 1243 1244 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1245 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1246 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, 1247 spec, MLX5_VPORT_PF); 1248 1249 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1250 spec, &flow_act, &dest, 1); 1251 if (IS_ERR(flow)) { 1252 err = PTR_ERR(flow); 1253 goto add_pf_flow_err; 1254 } 1255 flows[vport->index] = flow; 1256 } 1257 1258 if (mlx5_ecpf_vport_exists(esw->dev)) { 1259 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1260 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); 1261 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1262 spec, &flow_act, &dest, 1); 1263 if (IS_ERR(flow)) { 1264 err = PTR_ERR(flow); 1265 goto add_ecpf_flow_err; 1266 } 1267 flows[vport->index] = flow; 1268 } 1269 1270 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1271 esw_set_peer_miss_rule_source_port(esw, 1272 peer_dev->priv.eswitch, 1273 spec, vport->vport); 1274 1275 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1276 spec, &flow_act, &dest, 1); 1277 if (IS_ERR(flow)) { 1278 err = PTR_ERR(flow); 1279 goto add_vf_flow_err; 1280 } 1281 flows[vport->index] = flow; 1282 } 1283 1284 esw->fdb_table.offloads.peer_miss_rules = flows; 1285 1286 kvfree(spec); 1287 return 0; 1288 1289 add_vf_flow_err: 1290 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1291 if (!flows[vport->index]) 1292 continue; 1293 mlx5_del_flow_rules(flows[vport->index]); 1294 } 1295 if (mlx5_ecpf_vport_exists(esw->dev)) { 1296 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1297 mlx5_del_flow_rules(flows[vport->index]); 1298 } 1299 add_ecpf_flow_err: 1300 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1301 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1302 mlx5_del_flow_rules(flows[vport->index]); 1303 } 1304 add_pf_flow_err: 1305 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); 1306 kvfree(flows); 1307 alloc_flows_err: 1308 kvfree(spec); 1309 return err; 1310 } 1311 1312 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) 1313 { 1314 struct mlx5_flow_handle **flows; 1315 struct mlx5_vport *vport; 1316 unsigned long i; 1317 1318 flows = esw->fdb_table.offloads.peer_miss_rules; 1319 1320 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) 1321 mlx5_del_flow_rules(flows[vport->index]); 1322 1323 if (mlx5_ecpf_vport_exists(esw->dev)) { 1324 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1325 mlx5_del_flow_rules(flows[vport->index]); 1326 } 1327 1328 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1329 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1330 mlx5_del_flow_rules(flows[vport->index]); 1331 } 1332 kvfree(flows); 1333 } 1334 1335 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) 1336 { 1337 struct mlx5_flow_act flow_act = {0}; 1338 struct mlx5_flow_destination dest = {}; 1339 struct mlx5_flow_handle *flow_rule = NULL; 1340 struct mlx5_flow_spec *spec; 1341 void *headers_c; 1342 void *headers_v; 1343 int err = 0; 1344 u8 *dmac_c; 1345 u8 *dmac_v; 1346 1347 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1348 if (!spec) { 1349 err = -ENOMEM; 1350 goto out; 1351 } 1352 1353 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1354 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1355 outer_headers); 1356 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, 1357 outer_headers.dmac_47_16); 1358 dmac_c[0] = 0x01; 1359 1360 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1361 dest.vport.num = esw->manager_vport; 1362 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1363 1364 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1365 spec, &flow_act, &dest, 1); 1366 if (IS_ERR(flow_rule)) { 1367 err = PTR_ERR(flow_rule); 1368 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); 1369 goto out; 1370 } 1371 1372 esw->fdb_table.offloads.miss_rule_uni = flow_rule; 1373 1374 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1375 outer_headers); 1376 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, 1377 outer_headers.dmac_47_16); 1378 dmac_v[0] = 0x01; 1379 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1380 spec, &flow_act, &dest, 1); 1381 if (IS_ERR(flow_rule)) { 1382 err = PTR_ERR(flow_rule); 1383 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); 1384 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1385 goto out; 1386 } 1387 1388 esw->fdb_table.offloads.miss_rule_multi = flow_rule; 1389 1390 out: 1391 kvfree(spec); 1392 return err; 1393 } 1394 1395 struct mlx5_flow_handle * 1396 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 1397 { 1398 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 1399 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore; 1400 struct mlx5_flow_context *flow_context; 1401 struct mlx5_flow_handle *flow_rule; 1402 struct mlx5_flow_destination dest; 1403 struct mlx5_flow_spec *spec; 1404 void *misc; 1405 1406 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 1407 return ERR_PTR(-EOPNOTSUPP); 1408 1409 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1410 if (!spec) 1411 return ERR_PTR(-ENOMEM); 1412 1413 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1414 misc_parameters_2); 1415 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1416 ESW_REG_C0_USER_DATA_METADATA_MASK); 1417 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1418 misc_parameters_2); 1419 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag); 1420 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1421 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1422 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1423 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id; 1424 1425 flow_context = &spec->flow_context; 1426 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 1427 flow_context->flow_tag = tag; 1428 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1429 dest.ft = esw->offloads.ft_offloads; 1430 1431 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 1432 kvfree(spec); 1433 1434 if (IS_ERR(flow_rule)) 1435 esw_warn(esw->dev, 1436 "Failed to create restore rule for tag: %d, err(%d)\n", 1437 tag, (int)PTR_ERR(flow_rule)); 1438 1439 return flow_rule; 1440 } 1441 1442 #define MAX_PF_SQ 256 1443 #define MAX_SQ_NVPORTS 32 1444 1445 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, 1446 u32 *flow_group_in) 1447 { 1448 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1449 flow_group_in, 1450 match_criteria); 1451 1452 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1453 MLX5_SET(create_flow_group_in, flow_group_in, 1454 match_criteria_enable, 1455 MLX5_MATCH_MISC_PARAMETERS_2); 1456 1457 MLX5_SET(fte_match_param, match_criteria, 1458 misc_parameters_2.metadata_reg_c_0, 1459 mlx5_eswitch_get_vport_metadata_mask()); 1460 } else { 1461 MLX5_SET(create_flow_group_in, flow_group_in, 1462 match_criteria_enable, 1463 MLX5_MATCH_MISC_PARAMETERS); 1464 1465 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1466 misc_parameters.source_port); 1467 } 1468 } 1469 1470 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 1471 static void esw_vport_tbl_put(struct mlx5_eswitch *esw) 1472 { 1473 struct mlx5_vport_tbl_attr attr; 1474 struct mlx5_vport *vport; 1475 unsigned long i; 1476 1477 attr.chain = 0; 1478 attr.prio = 1; 1479 mlx5_esw_for_each_vport(esw, i, vport) { 1480 attr.vport = vport->vport; 1481 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1482 mlx5_esw_vporttbl_put(esw, &attr); 1483 } 1484 } 1485 1486 static int esw_vport_tbl_get(struct mlx5_eswitch *esw) 1487 { 1488 struct mlx5_vport_tbl_attr attr; 1489 struct mlx5_flow_table *fdb; 1490 struct mlx5_vport *vport; 1491 unsigned long i; 1492 1493 attr.chain = 0; 1494 attr.prio = 1; 1495 mlx5_esw_for_each_vport(esw, i, vport) { 1496 attr.vport = vport->vport; 1497 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1498 fdb = mlx5_esw_vporttbl_get(esw, &attr); 1499 if (IS_ERR(fdb)) 1500 goto out; 1501 } 1502 return 0; 1503 1504 out: 1505 esw_vport_tbl_put(esw); 1506 return PTR_ERR(fdb); 1507 } 1508 1509 #define fdb_modify_header_fwd_to_table_supported(esw) \ 1510 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table)) 1511 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags) 1512 { 1513 struct mlx5_core_dev *dev = esw->dev; 1514 1515 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level)) 1516 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 1517 1518 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) && 1519 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { 1520 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1521 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); 1522 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) { 1523 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1524 esw_warn(dev, "Tc chains and priorities offload aren't supported\n"); 1525 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) { 1526 /* Disabled when ttl workaround is needed, e.g 1527 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig 1528 */ 1529 esw_warn(dev, 1530 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n"); 1531 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1532 } else { 1533 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1534 esw_info(dev, "Supported tc chains and prios offload\n"); 1535 } 1536 1537 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1538 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED; 1539 } 1540 1541 static int 1542 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1543 { 1544 struct mlx5_core_dev *dev = esw->dev; 1545 struct mlx5_flow_table *nf_ft, *ft; 1546 struct mlx5_chains_attr attr = {}; 1547 struct mlx5_fs_chains *chains; 1548 u32 fdb_max; 1549 int err; 1550 1551 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); 1552 1553 esw_init_chains_offload_flags(esw, &attr.flags); 1554 attr.ns = MLX5_FLOW_NAMESPACE_FDB; 1555 attr.max_ft_sz = fdb_max; 1556 attr.max_grp_num = esw->params.large_group_num; 1557 attr.default_ft = miss_fdb; 1558 attr.mapping = esw->offloads.reg_c0_obj_pool; 1559 1560 chains = mlx5_chains_create(dev, &attr); 1561 if (IS_ERR(chains)) { 1562 err = PTR_ERR(chains); 1563 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err); 1564 return err; 1565 } 1566 1567 esw->fdb_table.offloads.esw_chains_priv = chains; 1568 1569 /* Create tc_end_ft which is the always created ft chain */ 1570 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1571 1, 0); 1572 if (IS_ERR(nf_ft)) { 1573 err = PTR_ERR(nf_ft); 1574 goto nf_ft_err; 1575 } 1576 1577 /* Always open the root for fast path */ 1578 ft = mlx5_chains_get_table(chains, 0, 1, 0); 1579 if (IS_ERR(ft)) { 1580 err = PTR_ERR(ft); 1581 goto level_0_err; 1582 } 1583 1584 /* Open level 1 for split fdb rules now if prios isn't supported */ 1585 if (!mlx5_chains_prios_supported(chains)) { 1586 err = esw_vport_tbl_get(esw); 1587 if (err) 1588 goto level_1_err; 1589 } 1590 1591 mlx5_chains_set_end_ft(chains, nf_ft); 1592 1593 return 0; 1594 1595 level_1_err: 1596 mlx5_chains_put_table(chains, 0, 1, 0); 1597 level_0_err: 1598 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1599 nf_ft_err: 1600 mlx5_chains_destroy(chains); 1601 esw->fdb_table.offloads.esw_chains_priv = NULL; 1602 1603 return err; 1604 } 1605 1606 static void 1607 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1608 { 1609 if (!mlx5_chains_prios_supported(chains)) 1610 esw_vport_tbl_put(esw); 1611 mlx5_chains_put_table(chains, 0, 1, 0); 1612 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1613 mlx5_chains_destroy(chains); 1614 } 1615 1616 #else /* CONFIG_MLX5_CLS_ACT */ 1617 1618 static int 1619 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1620 { return 0; } 1621 1622 static void 1623 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1624 {} 1625 1626 #endif 1627 1628 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) 1629 { 1630 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1631 struct mlx5_flow_table_attr ft_attr = {}; 1632 int num_vfs, table_size, ix, err = 0; 1633 struct mlx5_core_dev *dev = esw->dev; 1634 struct mlx5_flow_namespace *root_ns; 1635 struct mlx5_flow_table *fdb = NULL; 1636 u32 flags = 0, *flow_group_in; 1637 struct mlx5_flow_group *g; 1638 void *match_criteria; 1639 u8 *dmac; 1640 1641 esw_debug(esw->dev, "Create offloads FDB Tables\n"); 1642 1643 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1644 if (!flow_group_in) 1645 return -ENOMEM; 1646 1647 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 1648 if (!root_ns) { 1649 esw_warn(dev, "Failed to get FDB flow namespace\n"); 1650 err = -EOPNOTSUPP; 1651 goto ns_err; 1652 } 1653 esw->fdb_table.offloads.ns = root_ns; 1654 err = mlx5_flow_namespace_set_mode(root_ns, 1655 esw->dev->priv.steering->mode); 1656 if (err) { 1657 esw_warn(dev, "Failed to set FDB namespace steering mode\n"); 1658 goto ns_err; 1659 } 1660 1661 /* To be strictly correct: 1662 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) 1663 * should be: 1664 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + 1665 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ 1666 * but as the peer device might not be in switchdev mode it's not 1667 * possible. We use the fact that by default FW sets max vfs and max sfs 1668 * to the same value on both devices. If it needs to be changed in the future note 1669 * the peer miss group should also be created based on the number of 1670 * total vports of the peer (currently is also uses esw->total_vports). 1671 */ 1672 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) + 1673 MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs; 1674 1675 /* create the slow path fdb with encap set, so further table instances 1676 * can be created at run time while VFs are probed if the FW allows that. 1677 */ 1678 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1679 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 1680 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 1681 1682 ft_attr.flags = flags; 1683 ft_attr.max_fte = table_size; 1684 ft_attr.prio = FDB_SLOW_PATH; 1685 1686 fdb = mlx5_create_flow_table(root_ns, &ft_attr); 1687 if (IS_ERR(fdb)) { 1688 err = PTR_ERR(fdb); 1689 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); 1690 goto slow_fdb_err; 1691 } 1692 esw->fdb_table.offloads.slow_fdb = fdb; 1693 1694 /* Create empty TC-miss managed table. This allows plugging in following 1695 * priorities without directly exposing their level 0 table to 1696 * eswitch_offloads and passing it as miss_fdb to following call to 1697 * esw_chains_create(). 1698 */ 1699 memset(&ft_attr, 0, sizeof(ft_attr)); 1700 ft_attr.prio = FDB_TC_MISS; 1701 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr); 1702 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) { 1703 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table); 1704 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err); 1705 goto tc_miss_table_err; 1706 } 1707 1708 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table); 1709 if (err) { 1710 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err); 1711 goto fdb_chains_err; 1712 } 1713 1714 /* create send-to-vport group */ 1715 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1716 MLX5_MATCH_MISC_PARAMETERS); 1717 1718 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1719 1720 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); 1721 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); 1722 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 1723 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1724 misc_parameters.source_eswitch_owner_vhca_id); 1725 MLX5_SET(create_flow_group_in, flow_group_in, 1726 source_eswitch_owner_vhca_id_valid, 1); 1727 } 1728 1729 /* See comment above table_size calculation */ 1730 ix = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ); 1731 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1732 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); 1733 1734 g = mlx5_create_flow_group(fdb, flow_group_in); 1735 if (IS_ERR(g)) { 1736 err = PTR_ERR(g); 1737 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err); 1738 goto send_vport_err; 1739 } 1740 esw->fdb_table.offloads.send_to_vport_grp = g; 1741 1742 if (esw_src_port_rewrite_supported(esw)) { 1743 /* meta send to vport */ 1744 memset(flow_group_in, 0, inlen); 1745 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1746 MLX5_MATCH_MISC_PARAMETERS_2); 1747 1748 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1749 1750 MLX5_SET(fte_match_param, match_criteria, 1751 misc_parameters_2.metadata_reg_c_0, 1752 mlx5_eswitch_get_vport_metadata_mask()); 1753 MLX5_SET(fte_match_param, match_criteria, 1754 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 1755 1756 num_vfs = esw->esw_funcs.num_vfs; 1757 if (num_vfs) { 1758 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); 1759 MLX5_SET(create_flow_group_in, flow_group_in, 1760 end_flow_index, ix + num_vfs - 1); 1761 ix += num_vfs; 1762 1763 g = mlx5_create_flow_group(fdb, flow_group_in); 1764 if (IS_ERR(g)) { 1765 err = PTR_ERR(g); 1766 esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n", 1767 err); 1768 goto send_vport_meta_err; 1769 } 1770 esw->fdb_table.offloads.send_to_vport_meta_grp = g; 1771 1772 err = mlx5_eswitch_add_send_to_vport_meta_rules(esw); 1773 if (err) 1774 goto meta_rule_err; 1775 } 1776 } 1777 1778 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 1779 /* create peer esw miss group */ 1780 memset(flow_group_in, 0, inlen); 1781 1782 esw_set_flow_group_source_port(esw, flow_group_in); 1783 1784 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1785 match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1786 flow_group_in, 1787 match_criteria); 1788 1789 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1790 misc_parameters.source_eswitch_owner_vhca_id); 1791 1792 MLX5_SET(create_flow_group_in, flow_group_in, 1793 source_eswitch_owner_vhca_id_valid, 1); 1794 } 1795 1796 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); 1797 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1798 ix + esw->total_vports - 1); 1799 ix += esw->total_vports; 1800 1801 g = mlx5_create_flow_group(fdb, flow_group_in); 1802 if (IS_ERR(g)) { 1803 err = PTR_ERR(g); 1804 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); 1805 goto peer_miss_err; 1806 } 1807 esw->fdb_table.offloads.peer_miss_grp = g; 1808 } 1809 1810 /* create miss group */ 1811 memset(flow_group_in, 0, inlen); 1812 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1813 MLX5_MATCH_OUTER_HEADERS); 1814 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 1815 match_criteria); 1816 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, 1817 outer_headers.dmac_47_16); 1818 dmac[0] = 0x01; 1819 1820 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); 1821 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1822 ix + MLX5_ESW_MISS_FLOWS); 1823 1824 g = mlx5_create_flow_group(fdb, flow_group_in); 1825 if (IS_ERR(g)) { 1826 err = PTR_ERR(g); 1827 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err); 1828 goto miss_err; 1829 } 1830 esw->fdb_table.offloads.miss_grp = g; 1831 1832 err = esw_add_fdb_miss_rule(esw); 1833 if (err) 1834 goto miss_rule_err; 1835 1836 kvfree(flow_group_in); 1837 return 0; 1838 1839 miss_rule_err: 1840 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1841 miss_err: 1842 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1843 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1844 peer_miss_err: 1845 mlx5_eswitch_del_send_to_vport_meta_rules(esw); 1846 meta_rule_err: 1847 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1848 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1849 send_vport_meta_err: 1850 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1851 send_vport_err: 1852 esw_chains_destroy(esw, esw_chains(esw)); 1853 fdb_chains_err: 1854 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1855 tc_miss_table_err: 1856 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); 1857 slow_fdb_err: 1858 /* Holds true only as long as DMFS is the default */ 1859 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS); 1860 ns_err: 1861 kvfree(flow_group_in); 1862 return err; 1863 } 1864 1865 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) 1866 { 1867 if (!esw->fdb_table.offloads.slow_fdb) 1868 return; 1869 1870 esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); 1871 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); 1872 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1873 mlx5_eswitch_del_send_to_vport_meta_rules(esw); 1874 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1875 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1876 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1877 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1878 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1879 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1880 1881 esw_chains_destroy(esw, esw_chains(esw)); 1882 1883 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1884 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); 1885 /* Holds true only as long as DMFS is the default */ 1886 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, 1887 MLX5_FLOW_STEERING_MODE_DMFS); 1888 atomic64_set(&esw->user_count, 0); 1889 } 1890 1891 static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw) 1892 { 1893 int nvports; 1894 1895 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS; 1896 if (mlx5e_tc_int_port_supported(esw)) 1897 nvports += MLX5E_TC_MAX_INT_PORT_NUM; 1898 1899 return nvports; 1900 } 1901 1902 static int esw_create_offloads_table(struct mlx5_eswitch *esw) 1903 { 1904 struct mlx5_flow_table_attr ft_attr = {}; 1905 struct mlx5_core_dev *dev = esw->dev; 1906 struct mlx5_flow_table *ft_offloads; 1907 struct mlx5_flow_namespace *ns; 1908 int err = 0; 1909 1910 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 1911 if (!ns) { 1912 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 1913 return -EOPNOTSUPP; 1914 } 1915 1916 ft_attr.max_fte = esw_get_offloads_ft_size(esw); 1917 ft_attr.prio = 1; 1918 1919 ft_offloads = mlx5_create_flow_table(ns, &ft_attr); 1920 if (IS_ERR(ft_offloads)) { 1921 err = PTR_ERR(ft_offloads); 1922 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); 1923 return err; 1924 } 1925 1926 esw->offloads.ft_offloads = ft_offloads; 1927 return 0; 1928 } 1929 1930 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) 1931 { 1932 struct mlx5_esw_offload *offloads = &esw->offloads; 1933 1934 mlx5_destroy_flow_table(offloads->ft_offloads); 1935 } 1936 1937 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) 1938 { 1939 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1940 struct mlx5_flow_group *g; 1941 u32 *flow_group_in; 1942 int nvports; 1943 int err = 0; 1944 1945 nvports = esw_get_offloads_ft_size(esw); 1946 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1947 if (!flow_group_in) 1948 return -ENOMEM; 1949 1950 /* create vport rx group */ 1951 esw_set_flow_group_source_port(esw, flow_group_in); 1952 1953 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1954 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); 1955 1956 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); 1957 1958 if (IS_ERR(g)) { 1959 err = PTR_ERR(g); 1960 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err); 1961 goto out; 1962 } 1963 1964 esw->offloads.vport_rx_group = g; 1965 out: 1966 kvfree(flow_group_in); 1967 return err; 1968 } 1969 1970 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) 1971 { 1972 mlx5_destroy_flow_group(esw->offloads.vport_rx_group); 1973 } 1974 1975 struct mlx5_flow_handle * 1976 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 1977 struct mlx5_flow_destination *dest) 1978 { 1979 struct mlx5_flow_act flow_act = {0}; 1980 struct mlx5_flow_handle *flow_rule; 1981 struct mlx5_flow_spec *spec; 1982 void *misc; 1983 1984 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1985 if (!spec) { 1986 flow_rule = ERR_PTR(-ENOMEM); 1987 goto out; 1988 } 1989 1990 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1991 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 1992 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1993 mlx5_eswitch_get_vport_metadata_for_match(esw, vport)); 1994 1995 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 1996 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1997 mlx5_eswitch_get_vport_metadata_mask()); 1998 1999 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 2000 } else { 2001 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 2002 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 2003 2004 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 2005 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 2006 2007 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 2008 } 2009 2010 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2011 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, 2012 &flow_act, dest, 1); 2013 if (IS_ERR(flow_rule)) { 2014 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); 2015 goto out; 2016 } 2017 2018 out: 2019 kvfree(spec); 2020 return flow_rule; 2021 } 2022 2023 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) 2024 { 2025 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 2026 struct mlx5_core_dev *dev = esw->dev; 2027 struct mlx5_vport *vport; 2028 unsigned long i; 2029 2030 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 2031 return -EOPNOTSUPP; 2032 2033 if (esw->mode == MLX5_ESWITCH_NONE) 2034 return -EOPNOTSUPP; 2035 2036 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 2037 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 2038 mlx5_mode = MLX5_INLINE_MODE_NONE; 2039 goto out; 2040 case MLX5_CAP_INLINE_MODE_L2: 2041 mlx5_mode = MLX5_INLINE_MODE_L2; 2042 goto out; 2043 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 2044 goto query_vports; 2045 } 2046 2047 query_vports: 2048 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode); 2049 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 2050 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode); 2051 if (prev_mlx5_mode != mlx5_mode) 2052 return -EINVAL; 2053 prev_mlx5_mode = mlx5_mode; 2054 } 2055 2056 out: 2057 *mode = mlx5_mode; 2058 return 0; 2059 } 2060 2061 static void esw_destroy_restore_table(struct mlx5_eswitch *esw) 2062 { 2063 struct mlx5_esw_offload *offloads = &esw->offloads; 2064 2065 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 2066 return; 2067 2068 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); 2069 mlx5_destroy_flow_group(offloads->restore_group); 2070 mlx5_destroy_flow_table(offloads->ft_offloads_restore); 2071 } 2072 2073 static int esw_create_restore_table(struct mlx5_eswitch *esw) 2074 { 2075 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 2076 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2077 struct mlx5_flow_table_attr ft_attr = {}; 2078 struct mlx5_core_dev *dev = esw->dev; 2079 struct mlx5_flow_namespace *ns; 2080 struct mlx5_modify_hdr *mod_hdr; 2081 void *match_criteria, *misc; 2082 struct mlx5_flow_table *ft; 2083 struct mlx5_flow_group *g; 2084 u32 *flow_group_in; 2085 int err = 0; 2086 2087 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 2088 return 0; 2089 2090 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 2091 if (!ns) { 2092 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 2093 return -EOPNOTSUPP; 2094 } 2095 2096 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2097 if (!flow_group_in) { 2098 err = -ENOMEM; 2099 goto out_free; 2100 } 2101 2102 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS; 2103 ft = mlx5_create_flow_table(ns, &ft_attr); 2104 if (IS_ERR(ft)) { 2105 err = PTR_ERR(ft); 2106 esw_warn(esw->dev, "Failed to create restore table, err %d\n", 2107 err); 2108 goto out_free; 2109 } 2110 2111 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 2112 match_criteria); 2113 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, 2114 misc_parameters_2); 2115 2116 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2117 ESW_REG_C0_USER_DATA_METADATA_MASK); 2118 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2119 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2120 ft_attr.max_fte - 1); 2121 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 2122 MLX5_MATCH_MISC_PARAMETERS_2); 2123 g = mlx5_create_flow_group(ft, flow_group_in); 2124 if (IS_ERR(g)) { 2125 err = PTR_ERR(g); 2126 esw_warn(dev, "Failed to create restore flow group, err: %d\n", 2127 err); 2128 goto err_group; 2129 } 2130 2131 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY); 2132 MLX5_SET(copy_action_in, modact, src_field, 2133 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); 2134 MLX5_SET(copy_action_in, modact, dst_field, 2135 MLX5_ACTION_IN_FIELD_METADATA_REG_B); 2136 mod_hdr = mlx5_modify_header_alloc(esw->dev, 2137 MLX5_FLOW_NAMESPACE_KERNEL, 1, 2138 modact); 2139 if (IS_ERR(mod_hdr)) { 2140 err = PTR_ERR(mod_hdr); 2141 esw_warn(dev, "Failed to create restore mod header, err: %d\n", 2142 err); 2143 goto err_mod_hdr; 2144 } 2145 2146 esw->offloads.ft_offloads_restore = ft; 2147 esw->offloads.restore_group = g; 2148 esw->offloads.restore_copy_hdr_id = mod_hdr; 2149 2150 kvfree(flow_group_in); 2151 2152 return 0; 2153 2154 err_mod_hdr: 2155 mlx5_destroy_flow_group(g); 2156 err_group: 2157 mlx5_destroy_flow_table(ft); 2158 out_free: 2159 kvfree(flow_group_in); 2160 2161 return err; 2162 } 2163 2164 static int esw_offloads_start(struct mlx5_eswitch *esw, 2165 struct netlink_ext_ack *extack) 2166 { 2167 int err, err1; 2168 2169 mlx5_eswitch_disable_locked(esw, false); 2170 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, 2171 esw->dev->priv.sriov.num_vfs); 2172 if (err) { 2173 NL_SET_ERR_MSG_MOD(extack, 2174 "Failed setting eswitch to offloads"); 2175 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, 2176 MLX5_ESWITCH_IGNORE_NUM_VFS); 2177 if (err1) { 2178 NL_SET_ERR_MSG_MOD(extack, 2179 "Failed setting eswitch back to legacy"); 2180 } 2181 } 2182 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 2183 if (mlx5_eswitch_inline_mode_get(esw, 2184 &esw->offloads.inline_mode)) { 2185 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; 2186 NL_SET_ERR_MSG_MOD(extack, 2187 "Inline mode is different between vports"); 2188 } 2189 } 2190 return err; 2191 } 2192 2193 static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw, 2194 struct mlx5_eswitch_rep *rep, 2195 xa_mark_t mark) 2196 { 2197 bool mark_set; 2198 2199 /* Copy the mark from vport to its rep */ 2200 mark_set = xa_get_mark(&esw->vports, rep->vport, mark); 2201 if (mark_set) 2202 xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark); 2203 } 2204 2205 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport) 2206 { 2207 struct mlx5_eswitch_rep *rep; 2208 int rep_type; 2209 int err; 2210 2211 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 2212 if (!rep) 2213 return -ENOMEM; 2214 2215 rep->vport = vport->vport; 2216 rep->vport_index = vport->index; 2217 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) 2218 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); 2219 2220 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL); 2221 if (err) 2222 goto insert_err; 2223 2224 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN); 2225 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF); 2226 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF); 2227 return 0; 2228 2229 insert_err: 2230 kfree(rep); 2231 return err; 2232 } 2233 2234 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw, 2235 struct mlx5_eswitch_rep *rep) 2236 { 2237 xa_erase(&esw->offloads.vport_reps, rep->vport); 2238 kfree(rep); 2239 } 2240 2241 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) 2242 { 2243 struct mlx5_eswitch_rep *rep; 2244 unsigned long i; 2245 2246 mlx5_esw_for_each_rep(esw, i, rep) 2247 mlx5_esw_offloads_rep_cleanup(esw, rep); 2248 xa_destroy(&esw->offloads.vport_reps); 2249 } 2250 2251 int esw_offloads_init_reps(struct mlx5_eswitch *esw) 2252 { 2253 struct mlx5_vport *vport; 2254 unsigned long i; 2255 int err; 2256 2257 xa_init(&esw->offloads.vport_reps); 2258 2259 mlx5_esw_for_each_vport(esw, i, vport) { 2260 err = mlx5_esw_offloads_rep_init(esw, vport); 2261 if (err) 2262 goto err; 2263 } 2264 return 0; 2265 2266 err: 2267 esw_offloads_cleanup_reps(esw); 2268 return err; 2269 } 2270 2271 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, 2272 struct mlx5_eswitch_rep *rep, u8 rep_type) 2273 { 2274 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2275 REP_LOADED, REP_REGISTERED) == REP_LOADED) 2276 esw->offloads.rep_ops[rep_type]->unload(rep); 2277 } 2278 2279 static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type) 2280 { 2281 struct mlx5_eswitch_rep *rep; 2282 unsigned long i; 2283 2284 mlx5_esw_for_each_sf_rep(esw, i, rep) 2285 __esw_offloads_unload_rep(esw, rep, rep_type); 2286 } 2287 2288 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) 2289 { 2290 struct mlx5_eswitch_rep *rep; 2291 unsigned long i; 2292 2293 __unload_reps_sf_vport(esw, rep_type); 2294 2295 mlx5_esw_for_each_vf_rep(esw, i, rep) 2296 __esw_offloads_unload_rep(esw, rep, rep_type); 2297 2298 if (mlx5_ecpf_vport_exists(esw->dev)) { 2299 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF); 2300 __esw_offloads_unload_rep(esw, rep, rep_type); 2301 } 2302 2303 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 2304 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); 2305 __esw_offloads_unload_rep(esw, rep, rep_type); 2306 } 2307 2308 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 2309 __esw_offloads_unload_rep(esw, rep, rep_type); 2310 } 2311 2312 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) 2313 { 2314 struct mlx5_eswitch_rep *rep; 2315 int rep_type; 2316 int err; 2317 2318 rep = mlx5_eswitch_get_rep(esw, vport_num); 2319 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) 2320 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2321 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) { 2322 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep); 2323 if (err) 2324 goto err_reps; 2325 } 2326 2327 return 0; 2328 2329 err_reps: 2330 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED); 2331 for (--rep_type; rep_type >= 0; rep_type--) 2332 __esw_offloads_unload_rep(esw, rep, rep_type); 2333 return err; 2334 } 2335 2336 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) 2337 { 2338 struct mlx5_eswitch_rep *rep; 2339 int rep_type; 2340 2341 rep = mlx5_eswitch_get_rep(esw, vport_num); 2342 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--) 2343 __esw_offloads_unload_rep(esw, rep, rep_type); 2344 } 2345 2346 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num) 2347 { 2348 int err; 2349 2350 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2351 return 0; 2352 2353 if (vport_num != MLX5_VPORT_UPLINK) { 2354 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num); 2355 if (err) 2356 return err; 2357 } 2358 2359 err = mlx5_esw_offloads_rep_load(esw, vport_num); 2360 if (err) 2361 goto load_err; 2362 return err; 2363 2364 load_err: 2365 if (vport_num != MLX5_VPORT_UPLINK) 2366 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); 2367 return err; 2368 } 2369 2370 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) 2371 { 2372 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2373 return; 2374 2375 mlx5_esw_offloads_rep_unload(esw, vport_num); 2376 2377 if (vport_num != MLX5_VPORT_UPLINK) 2378 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); 2379 } 2380 2381 static int esw_set_uplink_slave_ingress_root(struct mlx5_core_dev *master, 2382 struct mlx5_core_dev *slave) 2383 { 2384 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 2385 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; 2386 struct mlx5_eswitch *esw; 2387 struct mlx5_flow_root_namespace *root; 2388 struct mlx5_flow_namespace *ns; 2389 struct mlx5_vport *vport; 2390 int err; 2391 2392 MLX5_SET(set_flow_table_root_in, in, opcode, 2393 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 2394 MLX5_SET(set_flow_table_root_in, in, table_type, FS_FT_ESW_INGRESS_ACL); 2395 MLX5_SET(set_flow_table_root_in, in, other_vport, 1); 2396 MLX5_SET(set_flow_table_root_in, in, vport_number, MLX5_VPORT_UPLINK); 2397 2398 if (master) { 2399 esw = master->priv.eswitch; 2400 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 2401 MLX5_SET(set_flow_table_root_in, in, table_of_other_vport, 1); 2402 MLX5_SET(set_flow_table_root_in, in, table_vport_number, 2403 MLX5_VPORT_UPLINK); 2404 2405 ns = mlx5_get_flow_vport_acl_namespace(master, 2406 MLX5_FLOW_NAMESPACE_ESW_INGRESS, 2407 vport->index); 2408 root = find_root(&ns->node); 2409 mutex_lock(&root->chain_lock); 2410 2411 MLX5_SET(set_flow_table_root_in, in, 2412 table_eswitch_owner_vhca_id_valid, 1); 2413 MLX5_SET(set_flow_table_root_in, in, 2414 table_eswitch_owner_vhca_id, 2415 MLX5_CAP_GEN(master, vhca_id)); 2416 MLX5_SET(set_flow_table_root_in, in, table_id, 2417 root->root_ft->id); 2418 } else { 2419 esw = slave->priv.eswitch; 2420 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 2421 ns = mlx5_get_flow_vport_acl_namespace(slave, 2422 MLX5_FLOW_NAMESPACE_ESW_INGRESS, 2423 vport->index); 2424 root = find_root(&ns->node); 2425 mutex_lock(&root->chain_lock); 2426 MLX5_SET(set_flow_table_root_in, in, table_id, root->root_ft->id); 2427 } 2428 2429 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); 2430 mutex_unlock(&root->chain_lock); 2431 2432 return err; 2433 } 2434 2435 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master, 2436 struct mlx5_core_dev *slave) 2437 { 2438 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 2439 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; 2440 struct mlx5_flow_root_namespace *root; 2441 struct mlx5_flow_namespace *ns; 2442 int err; 2443 2444 MLX5_SET(set_flow_table_root_in, in, opcode, 2445 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 2446 MLX5_SET(set_flow_table_root_in, in, table_type, 2447 FS_FT_FDB); 2448 2449 if (master) { 2450 ns = mlx5_get_flow_namespace(master, 2451 MLX5_FLOW_NAMESPACE_FDB); 2452 root = find_root(&ns->node); 2453 mutex_lock(&root->chain_lock); 2454 MLX5_SET(set_flow_table_root_in, in, 2455 table_eswitch_owner_vhca_id_valid, 1); 2456 MLX5_SET(set_flow_table_root_in, in, 2457 table_eswitch_owner_vhca_id, 2458 MLX5_CAP_GEN(master, vhca_id)); 2459 MLX5_SET(set_flow_table_root_in, in, table_id, 2460 root->root_ft->id); 2461 } else { 2462 ns = mlx5_get_flow_namespace(slave, 2463 MLX5_FLOW_NAMESPACE_FDB); 2464 root = find_root(&ns->node); 2465 mutex_lock(&root->chain_lock); 2466 MLX5_SET(set_flow_table_root_in, in, table_id, 2467 root->root_ft->id); 2468 } 2469 2470 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); 2471 mutex_unlock(&root->chain_lock); 2472 2473 return err; 2474 } 2475 2476 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master, 2477 struct mlx5_core_dev *slave, 2478 struct mlx5_vport *vport, 2479 struct mlx5_flow_table *acl) 2480 { 2481 struct mlx5_flow_handle *flow_rule = NULL; 2482 struct mlx5_flow_destination dest = {}; 2483 struct mlx5_flow_act flow_act = {}; 2484 struct mlx5_flow_spec *spec; 2485 int err = 0; 2486 void *misc; 2487 2488 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 2489 if (!spec) 2490 return -ENOMEM; 2491 2492 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 2493 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2494 misc_parameters); 2495 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); 2496 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 2497 MLX5_CAP_GEN(slave, vhca_id)); 2498 2499 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 2500 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 2501 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 2502 source_eswitch_owner_vhca_id); 2503 2504 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2505 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2506 dest.vport.num = slave->priv.eswitch->manager_vport; 2507 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id); 2508 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 2509 2510 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act, 2511 &dest, 1); 2512 if (IS_ERR(flow_rule)) 2513 err = PTR_ERR(flow_rule); 2514 else 2515 vport->egress.offloads.bounce_rule = flow_rule; 2516 2517 kvfree(spec); 2518 return err; 2519 } 2520 2521 static int esw_set_master_egress_rule(struct mlx5_core_dev *master, 2522 struct mlx5_core_dev *slave) 2523 { 2524 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2525 struct mlx5_eswitch *esw = master->priv.eswitch; 2526 struct mlx5_flow_table_attr ft_attr = { 2527 .max_fte = 1, .prio = 0, .level = 0, 2528 .flags = MLX5_FLOW_TABLE_OTHER_VPORT, 2529 }; 2530 struct mlx5_flow_namespace *egress_ns; 2531 struct mlx5_flow_table *acl; 2532 struct mlx5_flow_group *g; 2533 struct mlx5_vport *vport; 2534 void *match_criteria; 2535 u32 *flow_group_in; 2536 int err; 2537 2538 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); 2539 if (IS_ERR(vport)) 2540 return PTR_ERR(vport); 2541 2542 egress_ns = mlx5_get_flow_vport_acl_namespace(master, 2543 MLX5_FLOW_NAMESPACE_ESW_EGRESS, 2544 vport->index); 2545 if (!egress_ns) 2546 return -EINVAL; 2547 2548 if (vport->egress.acl) 2549 return -EINVAL; 2550 2551 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2552 if (!flow_group_in) 2553 return -ENOMEM; 2554 2555 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport); 2556 if (IS_ERR(acl)) { 2557 err = PTR_ERR(acl); 2558 goto out; 2559 } 2560 2561 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 2562 match_criteria); 2563 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 2564 misc_parameters.source_port); 2565 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 2566 misc_parameters.source_eswitch_owner_vhca_id); 2567 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 2568 MLX5_MATCH_MISC_PARAMETERS); 2569 2570 MLX5_SET(create_flow_group_in, flow_group_in, 2571 source_eswitch_owner_vhca_id_valid, 1); 2572 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2573 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 2574 2575 g = mlx5_create_flow_group(acl, flow_group_in); 2576 if (IS_ERR(g)) { 2577 err = PTR_ERR(g); 2578 goto err_group; 2579 } 2580 2581 err = __esw_set_master_egress_rule(master, slave, vport, acl); 2582 if (err) 2583 goto err_rule; 2584 2585 vport->egress.acl = acl; 2586 vport->egress.offloads.bounce_grp = g; 2587 2588 kvfree(flow_group_in); 2589 2590 return 0; 2591 2592 err_rule: 2593 mlx5_destroy_flow_group(g); 2594 err_group: 2595 mlx5_destroy_flow_table(acl); 2596 out: 2597 kvfree(flow_group_in); 2598 return err; 2599 } 2600 2601 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev) 2602 { 2603 struct mlx5_vport *vport; 2604 2605 vport = mlx5_eswitch_get_vport(dev->priv.eswitch, 2606 dev->priv.eswitch->manager_vport); 2607 2608 esw_acl_egress_ofld_cleanup(vport); 2609 } 2610 2611 int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, 2612 struct mlx5_eswitch *slave_esw) 2613 { 2614 int err; 2615 2616 err = esw_set_uplink_slave_ingress_root(master_esw->dev, 2617 slave_esw->dev); 2618 if (err) 2619 return -EINVAL; 2620 2621 err = esw_set_slave_root_fdb(master_esw->dev, 2622 slave_esw->dev); 2623 if (err) 2624 goto err_fdb; 2625 2626 err = esw_set_master_egress_rule(master_esw->dev, 2627 slave_esw->dev); 2628 if (err) 2629 goto err_acl; 2630 2631 return err; 2632 2633 err_acl: 2634 esw_set_slave_root_fdb(NULL, slave_esw->dev); 2635 2636 err_fdb: 2637 esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev); 2638 2639 return err; 2640 } 2641 2642 void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, 2643 struct mlx5_eswitch *slave_esw) 2644 { 2645 esw_unset_master_egress_rule(master_esw->dev); 2646 esw_set_slave_root_fdb(NULL, slave_esw->dev); 2647 esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev); 2648 } 2649 2650 #define ESW_OFFLOADS_DEVCOM_PAIR (0) 2651 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1) 2652 2653 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw) 2654 { 2655 const struct mlx5_eswitch_rep_ops *ops; 2656 struct mlx5_eswitch_rep *rep; 2657 unsigned long i; 2658 u8 rep_type; 2659 2660 mlx5_esw_for_each_rep(esw, i, rep) { 2661 rep_type = NUM_REP_TYPES; 2662 while (rep_type--) { 2663 ops = esw->offloads.rep_ops[rep_type]; 2664 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 2665 ops->event) 2666 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, NULL); 2667 } 2668 } 2669 } 2670 2671 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) 2672 { 2673 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 2674 mlx5e_tc_clean_fdb_peer_flows(esw); 2675 #endif 2676 mlx5_esw_offloads_rep_event_unpair(esw); 2677 esw_del_fdb_peer_miss_rules(esw); 2678 } 2679 2680 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, 2681 struct mlx5_eswitch *peer_esw) 2682 { 2683 const struct mlx5_eswitch_rep_ops *ops; 2684 struct mlx5_eswitch_rep *rep; 2685 unsigned long i; 2686 u8 rep_type; 2687 int err; 2688 2689 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); 2690 if (err) 2691 return err; 2692 2693 mlx5_esw_for_each_rep(esw, i, rep) { 2694 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { 2695 ops = esw->offloads.rep_ops[rep_type]; 2696 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 2697 ops->event) { 2698 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw); 2699 if (err) 2700 goto err_out; 2701 } 2702 } 2703 } 2704 2705 return 0; 2706 2707 err_out: 2708 mlx5_esw_offloads_unpair(esw); 2709 return err; 2710 } 2711 2712 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, 2713 struct mlx5_eswitch *peer_esw, 2714 bool pair) 2715 { 2716 struct mlx5_flow_root_namespace *peer_ns; 2717 struct mlx5_flow_root_namespace *ns; 2718 int err; 2719 2720 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns; 2721 ns = esw->dev->priv.steering->fdb_root_ns; 2722 2723 if (pair) { 2724 err = mlx5_flow_namespace_set_peer(ns, peer_ns); 2725 if (err) 2726 return err; 2727 2728 err = mlx5_flow_namespace_set_peer(peer_ns, ns); 2729 if (err) { 2730 mlx5_flow_namespace_set_peer(ns, NULL); 2731 return err; 2732 } 2733 } else { 2734 mlx5_flow_namespace_set_peer(ns, NULL); 2735 mlx5_flow_namespace_set_peer(peer_ns, NULL); 2736 } 2737 2738 return 0; 2739 } 2740 2741 static int mlx5_esw_offloads_devcom_event(int event, 2742 void *my_data, 2743 void *event_data) 2744 { 2745 struct mlx5_eswitch *esw = my_data; 2746 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2747 struct mlx5_eswitch *peer_esw = event_data; 2748 int err; 2749 2750 switch (event) { 2751 case ESW_OFFLOADS_DEVCOM_PAIR: 2752 if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev) 2753 break; 2754 2755 if (mlx5_eswitch_vport_match_metadata_enabled(esw) != 2756 mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) 2757 break; 2758 2759 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); 2760 if (err) 2761 goto err_out; 2762 err = mlx5_esw_offloads_pair(esw, peer_esw); 2763 if (err) 2764 goto err_peer; 2765 2766 err = mlx5_esw_offloads_pair(peer_esw, esw); 2767 if (err) 2768 goto err_pair; 2769 2770 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); 2771 break; 2772 2773 case ESW_OFFLOADS_DEVCOM_UNPAIR: 2774 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) 2775 break; 2776 2777 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); 2778 mlx5_esw_offloads_unpair(peer_esw); 2779 mlx5_esw_offloads_unpair(esw); 2780 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 2781 break; 2782 } 2783 2784 return 0; 2785 2786 err_pair: 2787 mlx5_esw_offloads_unpair(esw); 2788 err_peer: 2789 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 2790 err_out: 2791 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d", 2792 event, err); 2793 return err; 2794 } 2795 2796 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) 2797 { 2798 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2799 2800 INIT_LIST_HEAD(&esw->offloads.peer_flows); 2801 mutex_init(&esw->offloads.peer_mutex); 2802 2803 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2804 return; 2805 2806 mlx5_devcom_register_component(devcom, 2807 MLX5_DEVCOM_ESW_OFFLOADS, 2808 mlx5_esw_offloads_devcom_event, 2809 esw); 2810 2811 mlx5_devcom_send_event(devcom, 2812 MLX5_DEVCOM_ESW_OFFLOADS, 2813 ESW_OFFLOADS_DEVCOM_PAIR, esw); 2814 } 2815 2816 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) 2817 { 2818 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2819 2820 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2821 return; 2822 2823 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, 2824 ESW_OFFLOADS_DEVCOM_UNPAIR, esw); 2825 2826 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 2827 } 2828 2829 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) 2830 { 2831 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl)) 2832 return false; 2833 2834 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 2835 MLX5_FDB_TO_VPORT_REG_C_0)) 2836 return false; 2837 2838 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) 2839 return false; 2840 2841 if (mlx5_core_is_ecpf_esw_manager(esw->dev) || 2842 mlx5_ecpf_vport_exists(esw->dev)) 2843 return false; 2844 2845 return true; 2846 } 2847 2848 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) 2849 { 2850 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1; 2851 /* Reserve 0xf for internal port offload */ 2852 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2; 2853 u32 pf_num; 2854 int id; 2855 2856 /* Only 4 bits of pf_num */ 2857 pf_num = mlx5_get_dev_index(esw->dev); 2858 if (pf_num > max_pf_num) 2859 return 0; 2860 2861 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */ 2862 /* Use only non-zero vport_id (1-4095) for all PF's */ 2863 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL); 2864 if (id < 0) 2865 return 0; 2866 id = (pf_num << ESW_VPORT_BITS) | id; 2867 return id; 2868 } 2869 2870 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata) 2871 { 2872 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1; 2873 2874 /* Metadata contains only 12 bits of actual ida id */ 2875 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask); 2876 } 2877 2878 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw, 2879 struct mlx5_vport *vport) 2880 { 2881 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw); 2882 vport->metadata = vport->default_metadata; 2883 return vport->metadata ? 0 : -ENOSPC; 2884 } 2885 2886 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, 2887 struct mlx5_vport *vport) 2888 { 2889 if (!vport->default_metadata) 2890 return; 2891 2892 WARN_ON(vport->metadata != vport->default_metadata); 2893 mlx5_esw_match_metadata_free(esw, vport->default_metadata); 2894 } 2895 2896 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw) 2897 { 2898 struct mlx5_vport *vport; 2899 unsigned long i; 2900 2901 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 2902 return; 2903 2904 mlx5_esw_for_each_vport(esw, i, vport) 2905 esw_offloads_vport_metadata_cleanup(esw, vport); 2906 } 2907 2908 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw) 2909 { 2910 struct mlx5_vport *vport; 2911 unsigned long i; 2912 int err; 2913 2914 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 2915 return 0; 2916 2917 mlx5_esw_for_each_vport(esw, i, vport) { 2918 err = esw_offloads_vport_metadata_setup(esw, vport); 2919 if (err) 2920 goto metadata_err; 2921 } 2922 2923 return 0; 2924 2925 metadata_err: 2926 esw_offloads_metadata_uninit(esw); 2927 return err; 2928 } 2929 2930 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable) 2931 { 2932 int err = 0; 2933 2934 down_write(&esw->mode_lock); 2935 if (esw->mode != MLX5_ESWITCH_NONE) { 2936 err = -EBUSY; 2937 goto done; 2938 } 2939 if (!mlx5_esw_vport_match_metadata_supported(esw)) { 2940 err = -EOPNOTSUPP; 2941 goto done; 2942 } 2943 if (enable) 2944 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; 2945 else 2946 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; 2947 done: 2948 up_write(&esw->mode_lock); 2949 return err; 2950 } 2951 2952 int 2953 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 2954 struct mlx5_vport *vport) 2955 { 2956 int err; 2957 2958 err = esw_acl_ingress_ofld_setup(esw, vport); 2959 if (err) 2960 return err; 2961 2962 err = esw_acl_egress_ofld_setup(esw, vport); 2963 if (err) 2964 goto egress_err; 2965 2966 return 0; 2967 2968 egress_err: 2969 esw_acl_ingress_ofld_cleanup(esw, vport); 2970 return err; 2971 } 2972 2973 void 2974 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 2975 struct mlx5_vport *vport) 2976 { 2977 esw_acl_egress_ofld_cleanup(vport); 2978 esw_acl_ingress_ofld_cleanup(esw, vport); 2979 } 2980 2981 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 2982 { 2983 struct mlx5_vport *vport; 2984 2985 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 2986 if (IS_ERR(vport)) 2987 return PTR_ERR(vport); 2988 2989 return esw_vport_create_offloads_acl_tables(esw, vport); 2990 } 2991 2992 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 2993 { 2994 struct mlx5_vport *vport; 2995 2996 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 2997 if (IS_ERR(vport)) 2998 return; 2999 3000 esw_vport_destroy_offloads_acl_tables(esw, vport); 3001 } 3002 3003 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) 3004 { 3005 struct mlx5_eswitch_rep *rep; 3006 unsigned long i; 3007 int ret; 3008 3009 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS) 3010 return 0; 3011 3012 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 3013 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) 3014 return 0; 3015 3016 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK); 3017 if (ret) 3018 return ret; 3019 3020 mlx5_esw_for_each_rep(esw, i, rep) { 3021 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED) 3022 mlx5_esw_offloads_rep_load(esw, rep->vport); 3023 } 3024 3025 return 0; 3026 } 3027 3028 static int esw_offloads_steering_init(struct mlx5_eswitch *esw) 3029 { 3030 struct mlx5_esw_indir_table *indir; 3031 int err; 3032 3033 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); 3034 mutex_init(&esw->fdb_table.offloads.vports.lock); 3035 hash_init(esw->fdb_table.offloads.vports.table); 3036 atomic64_set(&esw->user_count, 0); 3037 3038 indir = mlx5_esw_indir_table_init(); 3039 if (IS_ERR(indir)) { 3040 err = PTR_ERR(indir); 3041 goto create_indir_err; 3042 } 3043 esw->fdb_table.offloads.indir = indir; 3044 3045 err = esw_create_uplink_offloads_acl_tables(esw); 3046 if (err) 3047 goto create_acl_err; 3048 3049 err = esw_create_offloads_table(esw); 3050 if (err) 3051 goto create_offloads_err; 3052 3053 err = esw_create_restore_table(esw); 3054 if (err) 3055 goto create_restore_err; 3056 3057 err = esw_create_offloads_fdb_tables(esw); 3058 if (err) 3059 goto create_fdb_err; 3060 3061 err = esw_create_vport_rx_group(esw); 3062 if (err) 3063 goto create_fg_err; 3064 3065 return 0; 3066 3067 create_fg_err: 3068 esw_destroy_offloads_fdb_tables(esw); 3069 create_fdb_err: 3070 esw_destroy_restore_table(esw); 3071 create_restore_err: 3072 esw_destroy_offloads_table(esw); 3073 create_offloads_err: 3074 esw_destroy_uplink_offloads_acl_tables(esw); 3075 create_acl_err: 3076 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 3077 create_indir_err: 3078 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 3079 return err; 3080 } 3081 3082 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) 3083 { 3084 esw_destroy_vport_rx_group(esw); 3085 esw_destroy_offloads_fdb_tables(esw); 3086 esw_destroy_restore_table(esw); 3087 esw_destroy_offloads_table(esw); 3088 esw_destroy_uplink_offloads_acl_tables(esw); 3089 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 3090 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 3091 } 3092 3093 static void 3094 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out) 3095 { 3096 bool host_pf_disabled; 3097 u16 new_num_vfs; 3098 3099 new_num_vfs = MLX5_GET(query_esw_functions_out, out, 3100 host_params_context.host_num_of_vfs); 3101 host_pf_disabled = MLX5_GET(query_esw_functions_out, out, 3102 host_params_context.host_pf_disabled); 3103 3104 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) 3105 return; 3106 3107 /* Number of VFs can only change from "0 to x" or "x to 0". */ 3108 if (esw->esw_funcs.num_vfs > 0) { 3109 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 3110 } else { 3111 int err; 3112 3113 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs, 3114 MLX5_VPORT_UC_ADDR_CHANGE); 3115 if (err) 3116 return; 3117 } 3118 esw->esw_funcs.num_vfs = new_num_vfs; 3119 } 3120 3121 static void esw_functions_changed_event_handler(struct work_struct *work) 3122 { 3123 struct mlx5_host_work *host_work; 3124 struct mlx5_eswitch *esw; 3125 const u32 *out; 3126 3127 host_work = container_of(work, struct mlx5_host_work, work); 3128 esw = host_work->esw; 3129 3130 out = mlx5_esw_query_functions(esw->dev); 3131 if (IS_ERR(out)) 3132 goto out; 3133 3134 esw_vfs_changed_event_handler(esw, out); 3135 kvfree(out); 3136 out: 3137 kfree(host_work); 3138 } 3139 3140 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data) 3141 { 3142 struct mlx5_esw_functions *esw_funcs; 3143 struct mlx5_host_work *host_work; 3144 struct mlx5_eswitch *esw; 3145 3146 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC); 3147 if (!host_work) 3148 return NOTIFY_DONE; 3149 3150 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb); 3151 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs); 3152 3153 host_work->esw = esw; 3154 3155 INIT_WORK(&host_work->work, esw_functions_changed_event_handler); 3156 queue_work(esw->work_queue, &host_work->work); 3157 3158 return NOTIFY_OK; 3159 } 3160 3161 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw) 3162 { 3163 const u32 *query_host_out; 3164 3165 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 3166 return 0; 3167 3168 query_host_out = mlx5_esw_query_functions(esw->dev); 3169 if (IS_ERR(query_host_out)) 3170 return PTR_ERR(query_host_out); 3171 3172 /* Mark non local controller with non zero controller number. */ 3173 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out, 3174 host_params_context.host_number); 3175 kvfree(query_host_out); 3176 return 0; 3177 } 3178 3179 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller) 3180 { 3181 /* Local controller is always valid */ 3182 if (controller == 0) 3183 return true; 3184 3185 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 3186 return false; 3187 3188 /* External host number starts with zero in device */ 3189 return (controller == esw->offloads.host_number + 1); 3190 } 3191 3192 int esw_offloads_enable(struct mlx5_eswitch *esw) 3193 { 3194 struct mapping_ctx *reg_c0_obj_pool; 3195 struct mlx5_vport *vport; 3196 unsigned long i; 3197 u64 mapping_id; 3198 int err; 3199 3200 mutex_init(&esw->offloads.termtbl_mutex); 3201 mlx5_rdma_enable_roce(esw->dev); 3202 3203 err = mlx5_esw_host_number_init(esw); 3204 if (err) 3205 goto err_metadata; 3206 3207 err = esw_offloads_metadata_init(esw); 3208 if (err) 3209 goto err_metadata; 3210 3211 err = esw_set_passing_vport_metadata(esw, true); 3212 if (err) 3213 goto err_vport_metadata; 3214 3215 mapping_id = mlx5_query_nic_system_image_guid(esw->dev); 3216 3217 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, 3218 sizeof(struct mlx5_mapped_obj), 3219 ESW_REG_C0_USER_DATA_METADATA_MASK, 3220 true); 3221 3222 if (IS_ERR(reg_c0_obj_pool)) { 3223 err = PTR_ERR(reg_c0_obj_pool); 3224 goto err_pool; 3225 } 3226 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool; 3227 3228 err = esw_offloads_steering_init(esw); 3229 if (err) 3230 goto err_steering_init; 3231 3232 /* Representor will control the vport link state */ 3233 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) 3234 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; 3235 3236 /* Uplink vport rep must load first. */ 3237 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK); 3238 if (err) 3239 goto err_uplink; 3240 3241 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); 3242 if (err) 3243 goto err_vports; 3244 3245 esw_offloads_devcom_init(esw); 3246 3247 return 0; 3248 3249 err_vports: 3250 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); 3251 err_uplink: 3252 esw_offloads_steering_cleanup(esw); 3253 err_steering_init: 3254 mapping_destroy(reg_c0_obj_pool); 3255 err_pool: 3256 esw_set_passing_vport_metadata(esw, false); 3257 err_vport_metadata: 3258 esw_offloads_metadata_uninit(esw); 3259 err_metadata: 3260 mlx5_rdma_disable_roce(esw->dev); 3261 mutex_destroy(&esw->offloads.termtbl_mutex); 3262 return err; 3263 } 3264 3265 static int esw_offloads_stop(struct mlx5_eswitch *esw, 3266 struct netlink_ext_ack *extack) 3267 { 3268 int err, err1; 3269 3270 mlx5_eswitch_disable_locked(esw, false); 3271 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, 3272 MLX5_ESWITCH_IGNORE_NUM_VFS); 3273 if (err) { 3274 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); 3275 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, 3276 MLX5_ESWITCH_IGNORE_NUM_VFS); 3277 if (err1) { 3278 NL_SET_ERR_MSG_MOD(extack, 3279 "Failed setting eswitch back to offloads"); 3280 } 3281 } 3282 3283 return err; 3284 } 3285 3286 void esw_offloads_disable(struct mlx5_eswitch *esw) 3287 { 3288 esw_offloads_devcom_cleanup(esw); 3289 mlx5_eswitch_disable_pf_vf_vports(esw); 3290 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); 3291 esw_set_passing_vport_metadata(esw, false); 3292 esw_offloads_steering_cleanup(esw); 3293 mapping_destroy(esw->offloads.reg_c0_obj_pool); 3294 esw_offloads_metadata_uninit(esw); 3295 mlx5_rdma_disable_roce(esw->dev); 3296 mutex_destroy(&esw->offloads.termtbl_mutex); 3297 } 3298 3299 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) 3300 { 3301 switch (mode) { 3302 case DEVLINK_ESWITCH_MODE_LEGACY: 3303 *mlx5_mode = MLX5_ESWITCH_LEGACY; 3304 break; 3305 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 3306 *mlx5_mode = MLX5_ESWITCH_OFFLOADS; 3307 break; 3308 default: 3309 return -EINVAL; 3310 } 3311 3312 return 0; 3313 } 3314 3315 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) 3316 { 3317 switch (mlx5_mode) { 3318 case MLX5_ESWITCH_LEGACY: 3319 *mode = DEVLINK_ESWITCH_MODE_LEGACY; 3320 break; 3321 case MLX5_ESWITCH_OFFLOADS: 3322 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; 3323 break; 3324 default: 3325 return -EINVAL; 3326 } 3327 3328 return 0; 3329 } 3330 3331 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode) 3332 { 3333 switch (mode) { 3334 case DEVLINK_ESWITCH_INLINE_MODE_NONE: 3335 *mlx5_mode = MLX5_INLINE_MODE_NONE; 3336 break; 3337 case DEVLINK_ESWITCH_INLINE_MODE_LINK: 3338 *mlx5_mode = MLX5_INLINE_MODE_L2; 3339 break; 3340 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK: 3341 *mlx5_mode = MLX5_INLINE_MODE_IP; 3342 break; 3343 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: 3344 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP; 3345 break; 3346 default: 3347 return -EINVAL; 3348 } 3349 3350 return 0; 3351 } 3352 3353 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) 3354 { 3355 switch (mlx5_mode) { 3356 case MLX5_INLINE_MODE_NONE: 3357 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE; 3358 break; 3359 case MLX5_INLINE_MODE_L2: 3360 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK; 3361 break; 3362 case MLX5_INLINE_MODE_IP: 3363 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK; 3364 break; 3365 case MLX5_INLINE_MODE_TCP_UDP: 3366 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT; 3367 break; 3368 default: 3369 return -EINVAL; 3370 } 3371 3372 return 0; 3373 } 3374 3375 static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw) 3376 { 3377 /* devlink commands in NONE eswitch mode are currently supported only 3378 * on ECPF. 3379 */ 3380 return (esw->mode == MLX5_ESWITCH_NONE && 3381 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0; 3382 } 3383 3384 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 3385 struct netlink_ext_ack *extack) 3386 { 3387 u16 cur_mlx5_mode, mlx5_mode = 0; 3388 struct mlx5_eswitch *esw; 3389 int err = 0; 3390 3391 esw = mlx5_devlink_eswitch_get(devlink); 3392 if (IS_ERR(esw)) 3393 return PTR_ERR(esw); 3394 3395 if (esw_mode_from_devlink(mode, &mlx5_mode)) 3396 return -EINVAL; 3397 3398 mlx5_lag_disable_change(esw->dev); 3399 err = mlx5_esw_try_lock(esw); 3400 if (err < 0) { 3401 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy"); 3402 goto enable_lag; 3403 } 3404 cur_mlx5_mode = err; 3405 err = 0; 3406 3407 if (cur_mlx5_mode == mlx5_mode) 3408 goto unlock; 3409 3410 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { 3411 if (mlx5_devlink_trap_get_num_active(esw->dev)) { 3412 NL_SET_ERR_MSG_MOD(extack, 3413 "Can't change mode while devlink traps are active"); 3414 err = -EOPNOTSUPP; 3415 goto unlock; 3416 } 3417 err = esw_offloads_start(esw, extack); 3418 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { 3419 err = esw_offloads_stop(esw, extack); 3420 } else { 3421 err = -EINVAL; 3422 } 3423 3424 unlock: 3425 mlx5_esw_unlock(esw); 3426 enable_lag: 3427 mlx5_lag_enable_change(esw->dev); 3428 return err; 3429 } 3430 3431 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 3432 { 3433 struct mlx5_eswitch *esw; 3434 int err; 3435 3436 esw = mlx5_devlink_eswitch_get(devlink); 3437 if (IS_ERR(esw)) 3438 return PTR_ERR(esw); 3439 3440 down_write(&esw->mode_lock); 3441 err = eswitch_devlink_esw_mode_check(esw); 3442 if (err) 3443 goto unlock; 3444 3445 err = esw_mode_to_devlink(esw->mode, mode); 3446 unlock: 3447 up_write(&esw->mode_lock); 3448 return err; 3449 } 3450 3451 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode, 3452 struct netlink_ext_ack *extack) 3453 { 3454 struct mlx5_core_dev *dev = esw->dev; 3455 struct mlx5_vport *vport; 3456 u16 err_vport_num = 0; 3457 unsigned long i; 3458 int err = 0; 3459 3460 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 3461 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode); 3462 if (err) { 3463 err_vport_num = vport->vport; 3464 NL_SET_ERR_MSG_MOD(extack, 3465 "Failed to set min inline on vport"); 3466 goto revert_inline_mode; 3467 } 3468 } 3469 return 0; 3470 3471 revert_inline_mode: 3472 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 3473 if (vport->vport == err_vport_num) 3474 break; 3475 mlx5_modify_nic_vport_min_inline(dev, 3476 vport->vport, 3477 esw->offloads.inline_mode); 3478 } 3479 return err; 3480 } 3481 3482 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 3483 struct netlink_ext_ack *extack) 3484 { 3485 struct mlx5_core_dev *dev = devlink_priv(devlink); 3486 struct mlx5_eswitch *esw; 3487 u8 mlx5_mode; 3488 int err; 3489 3490 esw = mlx5_devlink_eswitch_get(devlink); 3491 if (IS_ERR(esw)) 3492 return PTR_ERR(esw); 3493 3494 down_write(&esw->mode_lock); 3495 err = eswitch_devlink_esw_mode_check(esw); 3496 if (err) 3497 goto out; 3498 3499 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 3500 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 3501 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) { 3502 err = 0; 3503 goto out; 3504 } 3505 3506 fallthrough; 3507 case MLX5_CAP_INLINE_MODE_L2: 3508 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); 3509 err = -EOPNOTSUPP; 3510 goto out; 3511 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 3512 break; 3513 } 3514 3515 if (atomic64_read(&esw->offloads.num_flows) > 0) { 3516 NL_SET_ERR_MSG_MOD(extack, 3517 "Can't set inline mode when flows are configured"); 3518 err = -EOPNOTSUPP; 3519 goto out; 3520 } 3521 3522 err = esw_inline_mode_from_devlink(mode, &mlx5_mode); 3523 if (err) 3524 goto out; 3525 3526 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack); 3527 if (err) 3528 goto out; 3529 3530 esw->offloads.inline_mode = mlx5_mode; 3531 up_write(&esw->mode_lock); 3532 return 0; 3533 3534 out: 3535 up_write(&esw->mode_lock); 3536 return err; 3537 } 3538 3539 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) 3540 { 3541 struct mlx5_eswitch *esw; 3542 int err; 3543 3544 esw = mlx5_devlink_eswitch_get(devlink); 3545 if (IS_ERR(esw)) 3546 return PTR_ERR(esw); 3547 3548 down_write(&esw->mode_lock); 3549 err = eswitch_devlink_esw_mode_check(esw); 3550 if (err) 3551 goto unlock; 3552 3553 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 3554 unlock: 3555 up_write(&esw->mode_lock); 3556 return err; 3557 } 3558 3559 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 3560 enum devlink_eswitch_encap_mode encap, 3561 struct netlink_ext_ack *extack) 3562 { 3563 struct mlx5_core_dev *dev = devlink_priv(devlink); 3564 struct mlx5_eswitch *esw; 3565 int err; 3566 3567 esw = mlx5_devlink_eswitch_get(devlink); 3568 if (IS_ERR(esw)) 3569 return PTR_ERR(esw); 3570 3571 down_write(&esw->mode_lock); 3572 err = eswitch_devlink_esw_mode_check(esw); 3573 if (err) 3574 goto unlock; 3575 3576 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 3577 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || 3578 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) { 3579 err = -EOPNOTSUPP; 3580 goto unlock; 3581 } 3582 3583 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) { 3584 err = -EOPNOTSUPP; 3585 goto unlock; 3586 } 3587 3588 if (esw->mode == MLX5_ESWITCH_LEGACY) { 3589 esw->offloads.encap = encap; 3590 goto unlock; 3591 } 3592 3593 if (esw->offloads.encap == encap) 3594 goto unlock; 3595 3596 if (atomic64_read(&esw->offloads.num_flows) > 0) { 3597 NL_SET_ERR_MSG_MOD(extack, 3598 "Can't set encapsulation when flows are configured"); 3599 err = -EOPNOTSUPP; 3600 goto unlock; 3601 } 3602 3603 esw_destroy_offloads_fdb_tables(esw); 3604 3605 esw->offloads.encap = encap; 3606 3607 err = esw_create_offloads_fdb_tables(esw); 3608 3609 if (err) { 3610 NL_SET_ERR_MSG_MOD(extack, 3611 "Failed re-creating fast FDB table"); 3612 esw->offloads.encap = !encap; 3613 (void)esw_create_offloads_fdb_tables(esw); 3614 } 3615 3616 unlock: 3617 up_write(&esw->mode_lock); 3618 return err; 3619 } 3620 3621 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 3622 enum devlink_eswitch_encap_mode *encap) 3623 { 3624 struct mlx5_eswitch *esw; 3625 int err; 3626 3627 esw = mlx5_devlink_eswitch_get(devlink); 3628 if (IS_ERR(esw)) 3629 return PTR_ERR(esw); 3630 3631 3632 down_write(&esw->mode_lock); 3633 err = eswitch_devlink_esw_mode_check(esw); 3634 if (err) 3635 goto unlock; 3636 3637 *encap = esw->offloads.encap; 3638 unlock: 3639 up_write(&esw->mode_lock); 3640 return err; 3641 } 3642 3643 static bool 3644 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num) 3645 { 3646 /* Currently, only ECPF based device has representor for host PF. */ 3647 if (vport_num == MLX5_VPORT_PF && 3648 !mlx5_core_is_ecpf_esw_manager(esw->dev)) 3649 return false; 3650 3651 if (vport_num == MLX5_VPORT_ECPF && 3652 !mlx5_ecpf_vport_exists(esw->dev)) 3653 return false; 3654 3655 return true; 3656 } 3657 3658 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, 3659 const struct mlx5_eswitch_rep_ops *ops, 3660 u8 rep_type) 3661 { 3662 struct mlx5_eswitch_rep_data *rep_data; 3663 struct mlx5_eswitch_rep *rep; 3664 unsigned long i; 3665 3666 esw->offloads.rep_ops[rep_type] = ops; 3667 mlx5_esw_for_each_rep(esw, i, rep) { 3668 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) { 3669 rep->esw = esw; 3670 rep_data = &rep->rep_data[rep_type]; 3671 atomic_set(&rep_data->state, REP_REGISTERED); 3672 } 3673 } 3674 } 3675 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); 3676 3677 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) 3678 { 3679 struct mlx5_eswitch_rep *rep; 3680 unsigned long i; 3681 3682 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 3683 __unload_reps_all_vport(esw, rep_type); 3684 3685 mlx5_esw_for_each_rep(esw, i, rep) 3686 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); 3687 } 3688 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); 3689 3690 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) 3691 { 3692 struct mlx5_eswitch_rep *rep; 3693 3694 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 3695 return rep->rep_data[rep_type].priv; 3696 } 3697 3698 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, 3699 u16 vport, 3700 u8 rep_type) 3701 { 3702 struct mlx5_eswitch_rep *rep; 3703 3704 rep = mlx5_eswitch_get_rep(esw, vport); 3705 3706 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 3707 esw->offloads.rep_ops[rep_type]->get_proto_dev) 3708 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep); 3709 return NULL; 3710 } 3711 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); 3712 3713 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) 3714 { 3715 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type); 3716 } 3717 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); 3718 3719 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, 3720 u16 vport) 3721 { 3722 return mlx5_eswitch_get_rep(esw, vport); 3723 } 3724 EXPORT_SYMBOL(mlx5_eswitch_vport_rep); 3725 3726 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) 3727 { 3728 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED); 3729 } 3730 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled); 3731 3732 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) 3733 { 3734 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA); 3735 } 3736 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled); 3737 3738 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, 3739 u16 vport_num) 3740 { 3741 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 3742 3743 if (WARN_ON_ONCE(IS_ERR(vport))) 3744 return 0; 3745 3746 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS); 3747 } 3748 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); 3749 3750 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, 3751 u16 vport_num, u32 controller, u32 sfnum) 3752 { 3753 int err; 3754 3755 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE); 3756 if (err) 3757 return err; 3758 3759 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum); 3760 if (err) 3761 goto devlink_err; 3762 3763 err = mlx5_esw_offloads_rep_load(esw, vport_num); 3764 if (err) 3765 goto rep_err; 3766 return 0; 3767 3768 rep_err: 3769 mlx5_esw_devlink_sf_port_unregister(esw, vport_num); 3770 devlink_err: 3771 mlx5_esw_vport_disable(esw, vport_num); 3772 return err; 3773 } 3774 3775 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) 3776 { 3777 mlx5_esw_offloads_rep_unload(esw, vport_num); 3778 mlx5_esw_devlink_sf_port_unregister(esw, vport_num); 3779 mlx5_esw_vport_disable(esw, vport_num); 3780 } 3781 3782 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id) 3783 { 3784 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 3785 void *query_ctx; 3786 void *hca_caps; 3787 int err; 3788 3789 *vhca_id = 0; 3790 if (mlx5_esw_is_manager_vport(esw, vport_num) || 3791 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 3792 return -EPERM; 3793 3794 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 3795 if (!query_ctx) 3796 return -ENOMEM; 3797 3798 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx); 3799 if (err) 3800 goto out_free; 3801 3802 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 3803 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id); 3804 3805 out_free: 3806 kfree(query_ctx); 3807 return err; 3808 } 3809 3810 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num) 3811 { 3812 u16 *old_entry, *vhca_map_entry, vhca_id; 3813 int err; 3814 3815 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); 3816 if (err) { 3817 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n", 3818 vport_num, err); 3819 return err; 3820 } 3821 3822 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL); 3823 if (!vhca_map_entry) 3824 return -ENOMEM; 3825 3826 *vhca_map_entry = vport_num; 3827 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL); 3828 if (xa_is_err(old_entry)) { 3829 kfree(vhca_map_entry); 3830 return xa_err(old_entry); 3831 } 3832 kfree(old_entry); 3833 return 0; 3834 } 3835 3836 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num) 3837 { 3838 u16 *vhca_map_entry, vhca_id; 3839 int err; 3840 3841 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); 3842 if (err) 3843 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n", 3844 vport_num, err); 3845 3846 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id); 3847 kfree(vhca_map_entry); 3848 } 3849 3850 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num) 3851 { 3852 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id); 3853 3854 if (!res) 3855 return -ENOENT; 3856 3857 *vport_num = *res; 3858 return 0; 3859 } 3860 3861 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, 3862 u16 vport_num) 3863 { 3864 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 3865 3866 if (WARN_ON_ONCE(IS_ERR(vport))) 3867 return 0; 3868 3869 return vport->metadata; 3870 } 3871 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set); 3872 3873 static bool 3874 is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) 3875 { 3876 return vport_num == MLX5_VPORT_PF || 3877 mlx5_eswitch_is_vf_vport(esw, vport_num) || 3878 mlx5_esw_is_sf_vport(esw, vport_num); 3879 } 3880 3881 int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, 3882 u8 *hw_addr, int *hw_addr_len, 3883 struct netlink_ext_ack *extack) 3884 { 3885 struct mlx5_eswitch *esw; 3886 struct mlx5_vport *vport; 3887 u16 vport_num; 3888 3889 esw = mlx5_devlink_eswitch_get(port->devlink); 3890 if (IS_ERR(esw)) 3891 return PTR_ERR(esw); 3892 3893 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 3894 if (!is_port_function_supported(esw, vport_num)) 3895 return -EOPNOTSUPP; 3896 3897 vport = mlx5_eswitch_get_vport(esw, vport_num); 3898 if (IS_ERR(vport)) { 3899 NL_SET_ERR_MSG_MOD(extack, "Invalid port"); 3900 return PTR_ERR(vport); 3901 } 3902 3903 mutex_lock(&esw->state_lock); 3904 ether_addr_copy(hw_addr, vport->info.mac); 3905 *hw_addr_len = ETH_ALEN; 3906 mutex_unlock(&esw->state_lock); 3907 return 0; 3908 } 3909 3910 int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, 3911 const u8 *hw_addr, int hw_addr_len, 3912 struct netlink_ext_ack *extack) 3913 { 3914 struct mlx5_eswitch *esw; 3915 u16 vport_num; 3916 3917 esw = mlx5_devlink_eswitch_get(port->devlink); 3918 if (IS_ERR(esw)) { 3919 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); 3920 return PTR_ERR(esw); 3921 } 3922 3923 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 3924 if (!is_port_function_supported(esw, vport_num)) { 3925 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr"); 3926 return -EINVAL; 3927 } 3928 3929 return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr); 3930 } 3931