1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/etherdevice.h> 34 #include <linux/idr.h> 35 #include <linux/mlx5/driver.h> 36 #include <linux/mlx5/mlx5_ifc.h> 37 #include <linux/mlx5/vport.h> 38 #include <linux/mlx5/fs.h> 39 #include "mlx5_core.h" 40 #include "eswitch.h" 41 #include "esw/indir_table.h" 42 #include "esw/acl/ofld.h" 43 #include "rdma.h" 44 #include "en.h" 45 #include "fs_core.h" 46 #include "lib/devcom.h" 47 #include "lib/eq.h" 48 #include "lib/fs_chains.h" 49 #include "en_tc.h" 50 #include "en/mapping.h" 51 52 #define mlx5_esw_for_each_rep(esw, i, rep) \ 53 xa_for_each(&((esw)->offloads.vport_reps), i, rep) 54 55 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \ 56 xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF) 57 58 #define mlx5_esw_for_each_vf_rep(esw, index, rep) \ 59 mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \ 60 rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF) 61 62 /* There are two match-all miss flows, one for unicast dst mac and 63 * one for multicast. 64 */ 65 #define MLX5_ESW_MISS_FLOWS (2) 66 #define UPLINK_REP_INDEX 0 67 68 #define MLX5_ESW_VPORT_TBL_SIZE 128 69 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4 70 71 static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { 72 .max_fte = MLX5_ESW_VPORT_TBL_SIZE, 73 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, 74 .flags = 0, 75 }; 76 77 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, 78 u16 vport_num) 79 { 80 return xa_load(&esw->offloads.vport_reps, vport_num); 81 } 82 83 static void 84 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, 85 struct mlx5_flow_spec *spec, 86 struct mlx5_esw_flow_attr *attr) 87 { 88 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) && 89 attr && attr->in_rep) 90 spec->flow_context.flow_source = 91 attr->in_rep->vport == MLX5_VPORT_UPLINK ? 92 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK : 93 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; 94 } 95 96 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits 97 * are not needed as well in the following process. So clear them all for simplicity. 98 */ 99 void 100 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec) 101 { 102 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 103 void *misc2; 104 105 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 106 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 107 108 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 109 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 110 111 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2))) 112 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2; 113 } 114 } 115 116 static void 117 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, 118 struct mlx5_flow_spec *spec, 119 struct mlx5_flow_attr *attr, 120 struct mlx5_eswitch *src_esw, 121 u16 vport) 122 { 123 void *misc2; 124 void *misc; 125 126 /* Use metadata matching because vport is not represented by single 127 * VHCA in dual-port RoCE mode, and matching on source vport may fail. 128 */ 129 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 130 if (mlx5_esw_indir_table_decap_vport(attr)) 131 vport = mlx5_esw_indir_table_decap_vport(attr); 132 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 133 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 134 mlx5_eswitch_get_vport_metadata_for_match(src_esw, 135 vport)); 136 137 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 138 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 139 mlx5_eswitch_get_vport_metadata_mask()); 140 141 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 142 } else { 143 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 144 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 145 146 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 147 MLX5_SET(fte_match_set_misc, misc, 148 source_eswitch_owner_vhca_id, 149 MLX5_CAP_GEN(src_esw->dev, vhca_id)); 150 151 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 152 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 153 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 154 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 155 source_eswitch_owner_vhca_id); 156 157 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 158 } 159 } 160 161 static int 162 esw_setup_decap_indir(struct mlx5_eswitch *esw, 163 struct mlx5_flow_attr *attr, 164 struct mlx5_flow_spec *spec) 165 { 166 struct mlx5_flow_table *ft; 167 168 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE)) 169 return -EOPNOTSUPP; 170 171 ft = mlx5_esw_indir_table_get(esw, attr, spec, 172 mlx5_esw_indir_table_decap_vport(attr), true); 173 return PTR_ERR_OR_ZERO(ft); 174 } 175 176 static void 177 esw_cleanup_decap_indir(struct mlx5_eswitch *esw, 178 struct mlx5_flow_attr *attr) 179 { 180 if (mlx5_esw_indir_table_decap_vport(attr)) 181 mlx5_esw_indir_table_put(esw, attr, 182 mlx5_esw_indir_table_decap_vport(attr), 183 true); 184 } 185 186 static int 187 esw_setup_sampler_dest(struct mlx5_flow_destination *dest, 188 struct mlx5_flow_act *flow_act, 189 struct mlx5_esw_flow_attr *esw_attr, 190 int i) 191 { 192 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 193 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; 194 dest[i].sampler_id = esw_attr->sample->sampler_id; 195 196 return 0; 197 } 198 199 static int 200 esw_setup_ft_dest(struct mlx5_flow_destination *dest, 201 struct mlx5_flow_act *flow_act, 202 struct mlx5_eswitch *esw, 203 struct mlx5_flow_attr *attr, 204 struct mlx5_flow_spec *spec, 205 int i) 206 { 207 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 208 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 209 dest[i].ft = attr->dest_ft; 210 211 if (mlx5_esw_indir_table_decap_vport(attr)) 212 return esw_setup_decap_indir(esw, attr, spec); 213 return 0; 214 } 215 216 static void 217 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, 218 struct mlx5_flow_act *flow_act, 219 struct mlx5_fs_chains *chains, 220 int i) 221 { 222 if (mlx5_chains_ignore_flow_level_supported(chains)) 223 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 224 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 225 dest[i].ft = mlx5_chains_get_tc_end_ft(chains); 226 } 227 228 static int 229 esw_setup_chain_dest(struct mlx5_flow_destination *dest, 230 struct mlx5_flow_act *flow_act, 231 struct mlx5_fs_chains *chains, 232 u32 chain, u32 prio, u32 level, 233 int i) 234 { 235 struct mlx5_flow_table *ft; 236 237 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 238 ft = mlx5_chains_get_table(chains, chain, prio, level); 239 if (IS_ERR(ft)) 240 return PTR_ERR(ft); 241 242 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 243 dest[i].ft = ft; 244 return 0; 245 } 246 247 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, 248 int from, int to) 249 { 250 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 251 struct mlx5_fs_chains *chains = esw_chains(esw); 252 int i; 253 254 for (i = from; i < to; i++) 255 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 256 mlx5_chains_put_table(chains, 0, 1, 0); 257 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 258 esw_attr->dests[i].mdev)) 259 mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport, 260 false); 261 } 262 263 static bool 264 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr) 265 { 266 int i; 267 268 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) 269 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 270 return true; 271 return false; 272 } 273 274 static int 275 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest, 276 struct mlx5_flow_act *flow_act, 277 struct mlx5_eswitch *esw, 278 struct mlx5_fs_chains *chains, 279 struct mlx5_flow_attr *attr, 280 int *i) 281 { 282 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 283 int j, err; 284 285 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE)) 286 return -EOPNOTSUPP; 287 288 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { 289 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i); 290 if (err) 291 goto err_setup_chain; 292 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 293 flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat; 294 } 295 return 0; 296 297 err_setup_chain: 298 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); 299 return err; 300 } 301 302 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw, 303 struct mlx5_flow_attr *attr) 304 { 305 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 306 307 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 308 } 309 310 static bool 311 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 312 { 313 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 314 int i; 315 316 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) 317 if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 318 esw_attr->dests[i].mdev)) 319 return true; 320 return false; 321 } 322 323 static int 324 esw_setup_indir_table(struct mlx5_flow_destination *dest, 325 struct mlx5_flow_act *flow_act, 326 struct mlx5_eswitch *esw, 327 struct mlx5_flow_attr *attr, 328 struct mlx5_flow_spec *spec, 329 bool ignore_flow_lvl, 330 int *i) 331 { 332 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 333 int j, err; 334 335 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE)) 336 return -EOPNOTSUPP; 337 338 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { 339 if (ignore_flow_lvl) 340 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 341 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 342 343 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec, 344 esw_attr->dests[j].rep->vport, false); 345 if (IS_ERR(dest[*i].ft)) { 346 err = PTR_ERR(dest[*i].ft); 347 goto err_indir_tbl_get; 348 } 349 } 350 351 if (mlx5_esw_indir_table_decap_vport(attr)) { 352 err = esw_setup_decap_indir(esw, attr, spec); 353 if (err) 354 goto err_indir_tbl_get; 355 } 356 357 return 0; 358 359 err_indir_tbl_get: 360 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); 361 return err; 362 } 363 364 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 365 { 366 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 367 368 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 369 esw_cleanup_decap_indir(esw, attr); 370 } 371 372 static void 373 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level) 374 { 375 mlx5_chains_put_table(chains, chain, prio, level); 376 } 377 378 static void 379 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 380 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 381 int attr_idx, int dest_idx, bool pkt_reformat) 382 { 383 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 384 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; 385 dest[dest_idx].vport.vhca_id = 386 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); 387 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 388 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 389 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) { 390 if (pkt_reformat) { 391 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 392 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 393 } 394 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 395 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 396 } 397 } 398 399 static int 400 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 401 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 402 int i) 403 { 404 int j; 405 406 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++) 407 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true); 408 return i; 409 } 410 411 static bool 412 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw) 413 { 414 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) && 415 mlx5_eswitch_vport_match_metadata_enabled(esw) && 416 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level); 417 } 418 419 static int 420 esw_setup_dests(struct mlx5_flow_destination *dest, 421 struct mlx5_flow_act *flow_act, 422 struct mlx5_eswitch *esw, 423 struct mlx5_flow_attr *attr, 424 struct mlx5_flow_spec *spec, 425 int *i) 426 { 427 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 428 struct mlx5_fs_chains *chains = esw_chains(esw); 429 int err = 0; 430 431 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && 432 esw_src_port_rewrite_supported(esw)) 433 attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE; 434 435 if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) { 436 esw_setup_sampler_dest(dest, flow_act, esw_attr, *i); 437 (*i)++; 438 } else if (attr->dest_ft) { 439 esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); 440 (*i)++; 441 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { 442 esw_setup_slow_path_dest(dest, flow_act, chains, *i); 443 (*i)++; 444 } else if (attr->dest_chain) { 445 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 446 1, 0, *i); 447 (*i)++; 448 } else if (esw_is_indir_table(esw, attr)) { 449 err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i); 450 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { 451 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); 452 } else { 453 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); 454 } 455 456 return err; 457 } 458 459 static void 460 esw_cleanup_dests(struct mlx5_eswitch *esw, 461 struct mlx5_flow_attr *attr) 462 { 463 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 464 struct mlx5_fs_chains *chains = esw_chains(esw); 465 466 if (attr->dest_ft) { 467 esw_cleanup_decap_indir(esw, attr); 468 } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) { 469 if (attr->dest_chain) 470 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); 471 else if (esw_is_indir_table(esw, attr)) 472 esw_cleanup_indir_table(esw, attr); 473 else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) 474 esw_cleanup_chain_src_port_rewrite(esw, attr); 475 } 476 } 477 478 struct mlx5_flow_handle * 479 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 480 struct mlx5_flow_spec *spec, 481 struct mlx5_flow_attr *attr) 482 { 483 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; 484 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 485 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 486 struct mlx5_fs_chains *chains = esw_chains(esw); 487 bool split = !!(esw_attr->split_count); 488 struct mlx5_vport_tbl_attr fwd_attr; 489 struct mlx5_flow_handle *rule; 490 struct mlx5_flow_table *fdb; 491 int i = 0; 492 493 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 494 return ERR_PTR(-EOPNOTSUPP); 495 496 flow_act.action = attr->action; 497 /* if per flow vlan pop/push is emulated, don't set that into the firmware */ 498 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 499 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | 500 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 501 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { 502 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]); 503 flow_act.vlan[0].vid = esw_attr->vlan_vid[0]; 504 flow_act.vlan[0].prio = esw_attr->vlan_prio[0]; 505 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { 506 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]); 507 flow_act.vlan[1].vid = esw_attr->vlan_vid[1]; 508 flow_act.vlan[1].prio = esw_attr->vlan_prio[1]; 509 } 510 } 511 512 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr); 513 514 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 515 int err; 516 517 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i); 518 if (err) { 519 rule = ERR_PTR(err); 520 goto err_create_goto_table; 521 } 522 } 523 524 if (esw_attr->decap_pkt_reformat) 525 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat; 526 527 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 528 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 529 dest[i].counter_id = mlx5_fc_id(attr->counter); 530 i++; 531 } 532 533 if (attr->outer_match_level != MLX5_MATCH_NONE) 534 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 535 if (attr->inner_match_level != MLX5_MATCH_NONE) 536 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 537 538 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 539 flow_act.modify_hdr = attr->modify_hdr; 540 541 /* esw_attr->sample is allocated only when there is a sample action */ 542 if (esw_attr->sample && esw_attr->sample->sample_default_tbl) { 543 fdb = esw_attr->sample->sample_default_tbl; 544 } else if (split) { 545 fwd_attr.chain = attr->chain; 546 fwd_attr.prio = attr->prio; 547 fwd_attr.vport = esw_attr->in_rep->vport; 548 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 549 550 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 551 } else { 552 if (attr->chain || attr->prio) 553 fdb = mlx5_chains_get_table(chains, attr->chain, 554 attr->prio, 0); 555 else 556 fdb = attr->ft; 557 558 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT)) 559 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 560 esw_attr->in_mdev->priv.eswitch, 561 esw_attr->in_rep->vport); 562 } 563 if (IS_ERR(fdb)) { 564 rule = ERR_CAST(fdb); 565 goto err_esw_get; 566 } 567 568 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) 569 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr, 570 &flow_act, dest, i); 571 else 572 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); 573 if (IS_ERR(rule)) 574 goto err_add_rule; 575 else 576 atomic64_inc(&esw->offloads.num_flows); 577 578 return rule; 579 580 err_add_rule: 581 if (split) 582 mlx5_esw_vporttbl_put(esw, &fwd_attr); 583 else if (attr->chain || attr->prio) 584 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 585 err_esw_get: 586 esw_cleanup_dests(esw, attr); 587 err_create_goto_table: 588 return rule; 589 } 590 591 struct mlx5_flow_handle * 592 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 593 struct mlx5_flow_spec *spec, 594 struct mlx5_flow_attr *attr) 595 { 596 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; 597 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 598 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 599 struct mlx5_fs_chains *chains = esw_chains(esw); 600 struct mlx5_vport_tbl_attr fwd_attr; 601 struct mlx5_flow_table *fast_fdb; 602 struct mlx5_flow_table *fwd_fdb; 603 struct mlx5_flow_handle *rule; 604 int i, err = 0; 605 606 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0); 607 if (IS_ERR(fast_fdb)) { 608 rule = ERR_CAST(fast_fdb); 609 goto err_get_fast; 610 } 611 612 fwd_attr.chain = attr->chain; 613 fwd_attr.prio = attr->prio; 614 fwd_attr.vport = esw_attr->in_rep->vport; 615 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 616 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 617 if (IS_ERR(fwd_fdb)) { 618 rule = ERR_CAST(fwd_fdb); 619 goto err_get_fwd; 620 } 621 622 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 623 for (i = 0; i < esw_attr->split_count; i++) { 624 if (esw_is_indir_table(esw, attr)) 625 err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i); 626 else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) 627 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr, 628 &i); 629 else 630 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); 631 632 if (err) { 633 rule = ERR_PTR(err); 634 goto err_chain_src_rewrite; 635 } 636 } 637 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 638 dest[i].ft = fwd_fdb; 639 i++; 640 641 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 642 esw_attr->in_mdev->priv.eswitch, 643 esw_attr->in_rep->vport); 644 645 if (attr->outer_match_level != MLX5_MATCH_NONE) 646 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 647 648 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 649 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); 650 651 if (IS_ERR(rule)) { 652 i = esw_attr->split_count; 653 goto err_chain_src_rewrite; 654 } 655 656 atomic64_inc(&esw->offloads.num_flows); 657 658 return rule; 659 err_chain_src_rewrite: 660 esw_put_dest_tables_loop(esw, attr, 0, i); 661 mlx5_esw_vporttbl_put(esw, &fwd_attr); 662 err_get_fwd: 663 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 664 err_get_fast: 665 return rule; 666 } 667 668 static void 669 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, 670 struct mlx5_flow_handle *rule, 671 struct mlx5_flow_attr *attr, 672 bool fwd_rule) 673 { 674 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 675 struct mlx5_fs_chains *chains = esw_chains(esw); 676 bool split = (esw_attr->split_count > 0); 677 struct mlx5_vport_tbl_attr fwd_attr; 678 int i; 679 680 mlx5_del_flow_rules(rule); 681 682 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) { 683 /* unref the term table */ 684 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { 685 if (esw_attr->dests[i].termtbl) 686 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl); 687 } 688 } 689 690 atomic64_dec(&esw->offloads.num_flows); 691 692 if (fwd_rule || split) { 693 fwd_attr.chain = attr->chain; 694 fwd_attr.prio = attr->prio; 695 fwd_attr.vport = esw_attr->in_rep->vport; 696 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 697 } 698 699 if (fwd_rule) { 700 mlx5_esw_vporttbl_put(esw, &fwd_attr); 701 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 702 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count); 703 } else { 704 if (split) 705 mlx5_esw_vporttbl_put(esw, &fwd_attr); 706 else if (attr->chain || attr->prio) 707 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 708 esw_cleanup_dests(esw, attr); 709 } 710 } 711 712 void 713 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 714 struct mlx5_flow_handle *rule, 715 struct mlx5_flow_attr *attr) 716 { 717 __mlx5_eswitch_del_rule(esw, rule, attr, false); 718 } 719 720 void 721 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 722 struct mlx5_flow_handle *rule, 723 struct mlx5_flow_attr *attr) 724 { 725 __mlx5_eswitch_del_rule(esw, rule, attr, true); 726 } 727 728 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) 729 { 730 struct mlx5_eswitch_rep *rep; 731 unsigned long i; 732 int err = 0; 733 734 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); 735 mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) { 736 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) 737 continue; 738 739 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); 740 if (err) 741 goto out; 742 } 743 744 out: 745 return err; 746 } 747 748 static struct mlx5_eswitch_rep * 749 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) 750 { 751 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; 752 753 in_rep = attr->in_rep; 754 out_rep = attr->dests[0].rep; 755 756 if (push) 757 vport = in_rep; 758 else if (pop) 759 vport = out_rep; 760 else 761 vport = in_rep; 762 763 return vport; 764 } 765 766 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, 767 bool push, bool pop, bool fwd) 768 { 769 struct mlx5_eswitch_rep *in_rep, *out_rep; 770 771 if ((push || pop) && !fwd) 772 goto out_notsupp; 773 774 in_rep = attr->in_rep; 775 out_rep = attr->dests[0].rep; 776 777 if (push && in_rep->vport == MLX5_VPORT_UPLINK) 778 goto out_notsupp; 779 780 if (pop && out_rep->vport == MLX5_VPORT_UPLINK) 781 goto out_notsupp; 782 783 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */ 784 if (!push && !pop && fwd) 785 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK) 786 goto out_notsupp; 787 788 /* protects against (1) setting rules with different vlans to push and 789 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) 790 */ 791 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0])) 792 goto out_notsupp; 793 794 return 0; 795 796 out_notsupp: 797 return -EOPNOTSUPP; 798 } 799 800 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 801 struct mlx5_flow_attr *attr) 802 { 803 struct offloads_fdb *offloads = &esw->fdb_table.offloads; 804 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 805 struct mlx5_eswitch_rep *vport = NULL; 806 bool push, pop, fwd; 807 int err = 0; 808 809 /* nop if we're on the vlan push/pop non emulation mode */ 810 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 811 return 0; 812 813 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); 814 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 815 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && 816 !attr->dest_chain); 817 818 mutex_lock(&esw->state_lock); 819 820 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd); 821 if (err) 822 goto unlock; 823 824 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; 825 826 vport = esw_vlan_action_get_vport(esw_attr, push, pop); 827 828 if (!push && !pop && fwd) { 829 /* tracks VF --> wire rules without vlan push action */ 830 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) { 831 vport->vlan_refcount++; 832 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; 833 } 834 835 goto unlock; 836 } 837 838 if (!push && !pop) 839 goto unlock; 840 841 if (!(offloads->vlan_push_pop_refcount)) { 842 /* it's the 1st vlan rule, apply global vlan pop policy */ 843 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP); 844 if (err) 845 goto out; 846 } 847 offloads->vlan_push_pop_refcount++; 848 849 if (push) { 850 if (vport->vlan_refcount) 851 goto skip_set_push; 852 853 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0], 854 0, SET_VLAN_INSERT | SET_VLAN_STRIP); 855 if (err) 856 goto out; 857 vport->vlan = esw_attr->vlan_vid[0]; 858 skip_set_push: 859 vport->vlan_refcount++; 860 } 861 out: 862 if (!err) 863 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; 864 unlock: 865 mutex_unlock(&esw->state_lock); 866 return err; 867 } 868 869 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, 870 struct mlx5_flow_attr *attr) 871 { 872 struct offloads_fdb *offloads = &esw->fdb_table.offloads; 873 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 874 struct mlx5_eswitch_rep *vport = NULL; 875 bool push, pop, fwd; 876 int err = 0; 877 878 /* nop if we're on the vlan push/pop non emulation mode */ 879 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 880 return 0; 881 882 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED)) 883 return 0; 884 885 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); 886 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 887 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); 888 889 mutex_lock(&esw->state_lock); 890 891 vport = esw_vlan_action_get_vport(esw_attr, push, pop); 892 893 if (!push && !pop && fwd) { 894 /* tracks VF --> wire rules without vlan push action */ 895 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) 896 vport->vlan_refcount--; 897 898 goto out; 899 } 900 901 if (push) { 902 vport->vlan_refcount--; 903 if (vport->vlan_refcount) 904 goto skip_unset_push; 905 906 vport->vlan = 0; 907 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, 908 0, 0, SET_VLAN_STRIP); 909 if (err) 910 goto out; 911 } 912 913 skip_unset_push: 914 offloads->vlan_push_pop_refcount--; 915 if (offloads->vlan_push_pop_refcount) 916 goto out; 917 918 /* no more vlan rules, stop global vlan pop policy */ 919 err = esw_set_global_vlan_pop(esw, 0); 920 921 out: 922 mutex_unlock(&esw->state_lock); 923 return err; 924 } 925 926 struct mlx5_flow_handle * 927 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, 928 struct mlx5_eswitch_rep *rep, 929 u32 sqn) 930 { 931 struct mlx5_flow_act flow_act = {0}; 932 struct mlx5_flow_destination dest = {}; 933 struct mlx5_flow_handle *flow_rule; 934 struct mlx5_flow_spec *spec; 935 void *misc; 936 937 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 938 if (!spec) { 939 flow_rule = ERR_PTR(-ENOMEM); 940 goto out; 941 } 942 943 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 944 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); 945 /* source vport is the esw manager */ 946 MLX5_SET(fte_match_set_misc, misc, source_port, rep->esw->manager_vport); 947 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 948 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 949 MLX5_CAP_GEN(rep->esw->dev, vhca_id)); 950 951 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 952 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); 953 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 954 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 955 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 956 source_eswitch_owner_vhca_id); 957 958 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 959 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 960 dest.vport.num = rep->vport; 961 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id); 962 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 963 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 964 965 flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb, 966 spec, &flow_act, &dest, 1); 967 if (IS_ERR(flow_rule)) 968 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n", 969 PTR_ERR(flow_rule)); 970 out: 971 kvfree(spec); 972 return flow_rule; 973 } 974 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule); 975 976 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) 977 { 978 mlx5_del_flow_rules(rule); 979 } 980 981 static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw) 982 { 983 struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules; 984 int i = 0, num_vfs = esw->esw_funcs.num_vfs; 985 986 if (!num_vfs || !flows) 987 return; 988 989 for (i = 0; i < num_vfs; i++) 990 mlx5_del_flow_rules(flows[i]); 991 992 kvfree(flows); 993 } 994 995 static int 996 mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) 997 { 998 struct mlx5_flow_destination dest = {}; 999 struct mlx5_flow_act flow_act = {0}; 1000 int num_vfs, rule_idx = 0, err = 0; 1001 struct mlx5_flow_handle *flow_rule; 1002 struct mlx5_flow_handle **flows; 1003 struct mlx5_flow_spec *spec; 1004 struct mlx5_vport *vport; 1005 unsigned long i; 1006 u16 vport_num; 1007 1008 num_vfs = esw->esw_funcs.num_vfs; 1009 flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL); 1010 if (!flows) 1011 return -ENOMEM; 1012 1013 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1014 if (!spec) { 1015 err = -ENOMEM; 1016 goto alloc_err; 1017 } 1018 1019 MLX5_SET(fte_match_param, spec->match_criteria, 1020 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); 1021 MLX5_SET(fte_match_param, spec->match_criteria, 1022 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 1023 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1, 1024 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK); 1025 1026 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1027 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1028 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1029 1030 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { 1031 vport_num = vport->vport; 1032 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0, 1033 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); 1034 dest.vport.num = vport_num; 1035 1036 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1037 spec, &flow_act, &dest, 1); 1038 if (IS_ERR(flow_rule)) { 1039 err = PTR_ERR(flow_rule); 1040 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n", 1041 rule_idx, PTR_ERR(flow_rule)); 1042 goto rule_err; 1043 } 1044 flows[rule_idx++] = flow_rule; 1045 } 1046 1047 esw->fdb_table.offloads.send_to_vport_meta_rules = flows; 1048 kvfree(spec); 1049 return 0; 1050 1051 rule_err: 1052 while (--rule_idx >= 0) 1053 mlx5_del_flow_rules(flows[rule_idx]); 1054 kvfree(spec); 1055 alloc_err: 1056 kvfree(flows); 1057 return err; 1058 } 1059 1060 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw) 1061 { 1062 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 1063 MLX5_FDB_TO_VPORT_REG_C_1; 1064 } 1065 1066 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) 1067 { 1068 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; 1069 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; 1070 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; 1071 u8 curr, wanted; 1072 int err; 1073 1074 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) && 1075 !mlx5_eswitch_vport_match_metadata_enabled(esw)) 1076 return 0; 1077 1078 MLX5_SET(query_esw_vport_context_in, in, opcode, 1079 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); 1080 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out); 1081 if (err) 1082 return err; 1083 1084 curr = MLX5_GET(query_esw_vport_context_out, out, 1085 esw_vport_context.fdb_to_vport_reg_c_id); 1086 wanted = MLX5_FDB_TO_VPORT_REG_C_0; 1087 if (mlx5_eswitch_reg_c1_loopback_supported(esw)) 1088 wanted |= MLX5_FDB_TO_VPORT_REG_C_1; 1089 1090 if (enable) 1091 curr |= wanted; 1092 else 1093 curr &= ~wanted; 1094 1095 MLX5_SET(modify_esw_vport_context_in, min, 1096 esw_vport_context.fdb_to_vport_reg_c_id, curr); 1097 MLX5_SET(modify_esw_vport_context_in, min, 1098 field_select.fdb_to_vport_reg_c_id, 1); 1099 1100 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min); 1101 if (!err) { 1102 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1)) 1103 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 1104 else 1105 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 1106 } 1107 1108 return err; 1109 } 1110 1111 static void peer_miss_rules_setup(struct mlx5_eswitch *esw, 1112 struct mlx5_core_dev *peer_dev, 1113 struct mlx5_flow_spec *spec, 1114 struct mlx5_flow_destination *dest) 1115 { 1116 void *misc; 1117 1118 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1119 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1120 misc_parameters_2); 1121 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1122 mlx5_eswitch_get_vport_metadata_mask()); 1123 1124 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1125 } else { 1126 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1127 misc_parameters); 1128 1129 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 1130 MLX5_CAP_GEN(peer_dev, vhca_id)); 1131 1132 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 1133 1134 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1135 misc_parameters); 1136 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 1137 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 1138 source_eswitch_owner_vhca_id); 1139 } 1140 1141 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1142 dest->vport.num = peer_dev->priv.eswitch->manager_vport; 1143 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); 1144 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 1145 } 1146 1147 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw, 1148 struct mlx5_eswitch *peer_esw, 1149 struct mlx5_flow_spec *spec, 1150 u16 vport) 1151 { 1152 void *misc; 1153 1154 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1155 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1156 misc_parameters_2); 1157 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1158 mlx5_eswitch_get_vport_metadata_for_match(peer_esw, 1159 vport)); 1160 } else { 1161 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1162 misc_parameters); 1163 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 1164 } 1165 } 1166 1167 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1168 struct mlx5_core_dev *peer_dev) 1169 { 1170 struct mlx5_flow_destination dest = {}; 1171 struct mlx5_flow_act flow_act = {0}; 1172 struct mlx5_flow_handle **flows; 1173 /* total vports is the same for both e-switches */ 1174 int nvports = esw->total_vports; 1175 struct mlx5_flow_handle *flow; 1176 struct mlx5_flow_spec *spec; 1177 struct mlx5_vport *vport; 1178 unsigned long i; 1179 void *misc; 1180 int err; 1181 1182 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1183 if (!spec) 1184 return -ENOMEM; 1185 1186 peer_miss_rules_setup(esw, peer_dev, spec, &dest); 1187 1188 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL); 1189 if (!flows) { 1190 err = -ENOMEM; 1191 goto alloc_flows_err; 1192 } 1193 1194 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1195 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1196 misc_parameters); 1197 1198 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1199 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1200 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, 1201 spec, MLX5_VPORT_PF); 1202 1203 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1204 spec, &flow_act, &dest, 1); 1205 if (IS_ERR(flow)) { 1206 err = PTR_ERR(flow); 1207 goto add_pf_flow_err; 1208 } 1209 flows[vport->index] = flow; 1210 } 1211 1212 if (mlx5_ecpf_vport_exists(esw->dev)) { 1213 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1214 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); 1215 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1216 spec, &flow_act, &dest, 1); 1217 if (IS_ERR(flow)) { 1218 err = PTR_ERR(flow); 1219 goto add_ecpf_flow_err; 1220 } 1221 flows[vport->index] = flow; 1222 } 1223 1224 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1225 esw_set_peer_miss_rule_source_port(esw, 1226 peer_dev->priv.eswitch, 1227 spec, vport->vport); 1228 1229 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1230 spec, &flow_act, &dest, 1); 1231 if (IS_ERR(flow)) { 1232 err = PTR_ERR(flow); 1233 goto add_vf_flow_err; 1234 } 1235 flows[vport->index] = flow; 1236 } 1237 1238 esw->fdb_table.offloads.peer_miss_rules = flows; 1239 1240 kvfree(spec); 1241 return 0; 1242 1243 add_vf_flow_err: 1244 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1245 if (!flows[vport->index]) 1246 continue; 1247 mlx5_del_flow_rules(flows[vport->index]); 1248 } 1249 if (mlx5_ecpf_vport_exists(esw->dev)) { 1250 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1251 mlx5_del_flow_rules(flows[vport->index]); 1252 } 1253 add_ecpf_flow_err: 1254 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1255 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1256 mlx5_del_flow_rules(flows[vport->index]); 1257 } 1258 add_pf_flow_err: 1259 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); 1260 kvfree(flows); 1261 alloc_flows_err: 1262 kvfree(spec); 1263 return err; 1264 } 1265 1266 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) 1267 { 1268 struct mlx5_flow_handle **flows; 1269 struct mlx5_vport *vport; 1270 unsigned long i; 1271 1272 flows = esw->fdb_table.offloads.peer_miss_rules; 1273 1274 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) 1275 mlx5_del_flow_rules(flows[vport->index]); 1276 1277 if (mlx5_ecpf_vport_exists(esw->dev)) { 1278 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1279 mlx5_del_flow_rules(flows[vport->index]); 1280 } 1281 1282 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1283 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1284 mlx5_del_flow_rules(flows[vport->index]); 1285 } 1286 kvfree(flows); 1287 } 1288 1289 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) 1290 { 1291 struct mlx5_flow_act flow_act = {0}; 1292 struct mlx5_flow_destination dest = {}; 1293 struct mlx5_flow_handle *flow_rule = NULL; 1294 struct mlx5_flow_spec *spec; 1295 void *headers_c; 1296 void *headers_v; 1297 int err = 0; 1298 u8 *dmac_c; 1299 u8 *dmac_v; 1300 1301 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1302 if (!spec) { 1303 err = -ENOMEM; 1304 goto out; 1305 } 1306 1307 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1308 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1309 outer_headers); 1310 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, 1311 outer_headers.dmac_47_16); 1312 dmac_c[0] = 0x01; 1313 1314 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1315 dest.vport.num = esw->manager_vport; 1316 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1317 1318 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1319 spec, &flow_act, &dest, 1); 1320 if (IS_ERR(flow_rule)) { 1321 err = PTR_ERR(flow_rule); 1322 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); 1323 goto out; 1324 } 1325 1326 esw->fdb_table.offloads.miss_rule_uni = flow_rule; 1327 1328 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1329 outer_headers); 1330 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, 1331 outer_headers.dmac_47_16); 1332 dmac_v[0] = 0x01; 1333 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1334 spec, &flow_act, &dest, 1); 1335 if (IS_ERR(flow_rule)) { 1336 err = PTR_ERR(flow_rule); 1337 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); 1338 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1339 goto out; 1340 } 1341 1342 esw->fdb_table.offloads.miss_rule_multi = flow_rule; 1343 1344 out: 1345 kvfree(spec); 1346 return err; 1347 } 1348 1349 struct mlx5_flow_handle * 1350 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 1351 { 1352 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 1353 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore; 1354 struct mlx5_flow_context *flow_context; 1355 struct mlx5_flow_handle *flow_rule; 1356 struct mlx5_flow_destination dest; 1357 struct mlx5_flow_spec *spec; 1358 void *misc; 1359 1360 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 1361 return ERR_PTR(-EOPNOTSUPP); 1362 1363 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1364 if (!spec) 1365 return ERR_PTR(-ENOMEM); 1366 1367 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1368 misc_parameters_2); 1369 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1370 ESW_REG_C0_USER_DATA_METADATA_MASK); 1371 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1372 misc_parameters_2); 1373 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag); 1374 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1375 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1376 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1377 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id; 1378 1379 flow_context = &spec->flow_context; 1380 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 1381 flow_context->flow_tag = tag; 1382 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1383 dest.ft = esw->offloads.ft_offloads; 1384 1385 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 1386 kvfree(spec); 1387 1388 if (IS_ERR(flow_rule)) 1389 esw_warn(esw->dev, 1390 "Failed to create restore rule for tag: %d, err(%d)\n", 1391 tag, (int)PTR_ERR(flow_rule)); 1392 1393 return flow_rule; 1394 } 1395 1396 #define MAX_PF_SQ 256 1397 #define MAX_SQ_NVPORTS 32 1398 1399 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, 1400 u32 *flow_group_in) 1401 { 1402 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1403 flow_group_in, 1404 match_criteria); 1405 1406 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1407 MLX5_SET(create_flow_group_in, flow_group_in, 1408 match_criteria_enable, 1409 MLX5_MATCH_MISC_PARAMETERS_2); 1410 1411 MLX5_SET(fte_match_param, match_criteria, 1412 misc_parameters_2.metadata_reg_c_0, 1413 mlx5_eswitch_get_vport_metadata_mask()); 1414 } else { 1415 MLX5_SET(create_flow_group_in, flow_group_in, 1416 match_criteria_enable, 1417 MLX5_MATCH_MISC_PARAMETERS); 1418 1419 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1420 misc_parameters.source_port); 1421 } 1422 } 1423 1424 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 1425 static void esw_vport_tbl_put(struct mlx5_eswitch *esw) 1426 { 1427 struct mlx5_vport_tbl_attr attr; 1428 struct mlx5_vport *vport; 1429 unsigned long i; 1430 1431 attr.chain = 0; 1432 attr.prio = 1; 1433 mlx5_esw_for_each_vport(esw, i, vport) { 1434 attr.vport = vport->vport; 1435 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1436 mlx5_esw_vporttbl_put(esw, &attr); 1437 } 1438 } 1439 1440 static int esw_vport_tbl_get(struct mlx5_eswitch *esw) 1441 { 1442 struct mlx5_vport_tbl_attr attr; 1443 struct mlx5_flow_table *fdb; 1444 struct mlx5_vport *vport; 1445 unsigned long i; 1446 1447 attr.chain = 0; 1448 attr.prio = 1; 1449 mlx5_esw_for_each_vport(esw, i, vport) { 1450 attr.vport = vport->vport; 1451 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1452 fdb = mlx5_esw_vporttbl_get(esw, &attr); 1453 if (IS_ERR(fdb)) 1454 goto out; 1455 } 1456 return 0; 1457 1458 out: 1459 esw_vport_tbl_put(esw); 1460 return PTR_ERR(fdb); 1461 } 1462 1463 #define fdb_modify_header_fwd_to_table_supported(esw) \ 1464 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table)) 1465 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags) 1466 { 1467 struct mlx5_core_dev *dev = esw->dev; 1468 1469 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level)) 1470 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 1471 1472 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) && 1473 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { 1474 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1475 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); 1476 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) { 1477 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1478 esw_warn(dev, "Tc chains and priorities offload aren't supported\n"); 1479 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) { 1480 /* Disabled when ttl workaround is needed, e.g 1481 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig 1482 */ 1483 esw_warn(dev, 1484 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n"); 1485 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1486 } else { 1487 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1488 esw_info(dev, "Supported tc chains and prios offload\n"); 1489 } 1490 1491 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1492 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED; 1493 } 1494 1495 static int 1496 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1497 { 1498 struct mlx5_core_dev *dev = esw->dev; 1499 struct mlx5_flow_table *nf_ft, *ft; 1500 struct mlx5_chains_attr attr = {}; 1501 struct mlx5_fs_chains *chains; 1502 u32 fdb_max; 1503 int err; 1504 1505 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); 1506 1507 esw_init_chains_offload_flags(esw, &attr.flags); 1508 attr.ns = MLX5_FLOW_NAMESPACE_FDB; 1509 attr.max_ft_sz = fdb_max; 1510 attr.max_grp_num = esw->params.large_group_num; 1511 attr.default_ft = miss_fdb; 1512 attr.mapping = esw->offloads.reg_c0_obj_pool; 1513 1514 chains = mlx5_chains_create(dev, &attr); 1515 if (IS_ERR(chains)) { 1516 err = PTR_ERR(chains); 1517 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err); 1518 return err; 1519 } 1520 1521 esw->fdb_table.offloads.esw_chains_priv = chains; 1522 1523 /* Create tc_end_ft which is the always created ft chain */ 1524 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1525 1, 0); 1526 if (IS_ERR(nf_ft)) { 1527 err = PTR_ERR(nf_ft); 1528 goto nf_ft_err; 1529 } 1530 1531 /* Always open the root for fast path */ 1532 ft = mlx5_chains_get_table(chains, 0, 1, 0); 1533 if (IS_ERR(ft)) { 1534 err = PTR_ERR(ft); 1535 goto level_0_err; 1536 } 1537 1538 /* Open level 1 for split fdb rules now if prios isn't supported */ 1539 if (!mlx5_chains_prios_supported(chains)) { 1540 err = esw_vport_tbl_get(esw); 1541 if (err) 1542 goto level_1_err; 1543 } 1544 1545 mlx5_chains_set_end_ft(chains, nf_ft); 1546 1547 return 0; 1548 1549 level_1_err: 1550 mlx5_chains_put_table(chains, 0, 1, 0); 1551 level_0_err: 1552 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1553 nf_ft_err: 1554 mlx5_chains_destroy(chains); 1555 esw->fdb_table.offloads.esw_chains_priv = NULL; 1556 1557 return err; 1558 } 1559 1560 static void 1561 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1562 { 1563 if (!mlx5_chains_prios_supported(chains)) 1564 esw_vport_tbl_put(esw); 1565 mlx5_chains_put_table(chains, 0, 1, 0); 1566 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1567 mlx5_chains_destroy(chains); 1568 } 1569 1570 #else /* CONFIG_MLX5_CLS_ACT */ 1571 1572 static int 1573 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1574 { return 0; } 1575 1576 static void 1577 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1578 {} 1579 1580 #endif 1581 1582 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) 1583 { 1584 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1585 struct mlx5_flow_table_attr ft_attr = {}; 1586 int num_vfs, table_size, ix, err = 0; 1587 struct mlx5_core_dev *dev = esw->dev; 1588 struct mlx5_flow_namespace *root_ns; 1589 struct mlx5_flow_table *fdb = NULL; 1590 u32 flags = 0, *flow_group_in; 1591 struct mlx5_flow_group *g; 1592 void *match_criteria; 1593 u8 *dmac; 1594 1595 esw_debug(esw->dev, "Create offloads FDB Tables\n"); 1596 1597 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1598 if (!flow_group_in) 1599 return -ENOMEM; 1600 1601 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 1602 if (!root_ns) { 1603 esw_warn(dev, "Failed to get FDB flow namespace\n"); 1604 err = -EOPNOTSUPP; 1605 goto ns_err; 1606 } 1607 esw->fdb_table.offloads.ns = root_ns; 1608 err = mlx5_flow_namespace_set_mode(root_ns, 1609 esw->dev->priv.steering->mode); 1610 if (err) { 1611 esw_warn(dev, "Failed to set FDB namespace steering mode\n"); 1612 goto ns_err; 1613 } 1614 1615 table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + 1616 MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs; 1617 1618 /* create the slow path fdb with encap set, so further table instances 1619 * can be created at run time while VFs are probed if the FW allows that. 1620 */ 1621 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1622 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 1623 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 1624 1625 ft_attr.flags = flags; 1626 ft_attr.max_fte = table_size; 1627 ft_attr.prio = FDB_SLOW_PATH; 1628 1629 fdb = mlx5_create_flow_table(root_ns, &ft_attr); 1630 if (IS_ERR(fdb)) { 1631 err = PTR_ERR(fdb); 1632 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); 1633 goto slow_fdb_err; 1634 } 1635 esw->fdb_table.offloads.slow_fdb = fdb; 1636 1637 /* Create empty TC-miss managed table. This allows plugging in following 1638 * priorities without directly exposing their level 0 table to 1639 * eswitch_offloads and passing it as miss_fdb to following call to 1640 * esw_chains_create(). 1641 */ 1642 memset(&ft_attr, 0, sizeof(ft_attr)); 1643 ft_attr.prio = FDB_TC_MISS; 1644 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr); 1645 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) { 1646 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table); 1647 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err); 1648 goto tc_miss_table_err; 1649 } 1650 1651 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table); 1652 if (err) { 1653 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err); 1654 goto fdb_chains_err; 1655 } 1656 1657 /* create send-to-vport group */ 1658 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1659 MLX5_MATCH_MISC_PARAMETERS); 1660 1661 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1662 1663 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); 1664 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); 1665 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 1666 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1667 misc_parameters.source_eswitch_owner_vhca_id); 1668 MLX5_SET(create_flow_group_in, flow_group_in, 1669 source_eswitch_owner_vhca_id_valid, 1); 1670 } 1671 1672 ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ; 1673 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1674 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); 1675 1676 g = mlx5_create_flow_group(fdb, flow_group_in); 1677 if (IS_ERR(g)) { 1678 err = PTR_ERR(g); 1679 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err); 1680 goto send_vport_err; 1681 } 1682 esw->fdb_table.offloads.send_to_vport_grp = g; 1683 1684 if (esw_src_port_rewrite_supported(esw)) { 1685 /* meta send to vport */ 1686 memset(flow_group_in, 0, inlen); 1687 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1688 MLX5_MATCH_MISC_PARAMETERS_2); 1689 1690 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1691 1692 MLX5_SET(fte_match_param, match_criteria, 1693 misc_parameters_2.metadata_reg_c_0, 1694 mlx5_eswitch_get_vport_metadata_mask()); 1695 MLX5_SET(fte_match_param, match_criteria, 1696 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 1697 1698 num_vfs = esw->esw_funcs.num_vfs; 1699 if (num_vfs) { 1700 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); 1701 MLX5_SET(create_flow_group_in, flow_group_in, 1702 end_flow_index, ix + num_vfs - 1); 1703 ix += num_vfs; 1704 1705 g = mlx5_create_flow_group(fdb, flow_group_in); 1706 if (IS_ERR(g)) { 1707 err = PTR_ERR(g); 1708 esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n", 1709 err); 1710 goto send_vport_meta_err; 1711 } 1712 esw->fdb_table.offloads.send_to_vport_meta_grp = g; 1713 1714 err = mlx5_eswitch_add_send_to_vport_meta_rules(esw); 1715 if (err) 1716 goto meta_rule_err; 1717 } 1718 } 1719 1720 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 1721 /* create peer esw miss group */ 1722 memset(flow_group_in, 0, inlen); 1723 1724 esw_set_flow_group_source_port(esw, flow_group_in); 1725 1726 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1727 match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1728 flow_group_in, 1729 match_criteria); 1730 1731 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1732 misc_parameters.source_eswitch_owner_vhca_id); 1733 1734 MLX5_SET(create_flow_group_in, flow_group_in, 1735 source_eswitch_owner_vhca_id_valid, 1); 1736 } 1737 1738 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); 1739 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1740 ix + esw->total_vports - 1); 1741 ix += esw->total_vports; 1742 1743 g = mlx5_create_flow_group(fdb, flow_group_in); 1744 if (IS_ERR(g)) { 1745 err = PTR_ERR(g); 1746 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); 1747 goto peer_miss_err; 1748 } 1749 esw->fdb_table.offloads.peer_miss_grp = g; 1750 } 1751 1752 /* create miss group */ 1753 memset(flow_group_in, 0, inlen); 1754 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1755 MLX5_MATCH_OUTER_HEADERS); 1756 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 1757 match_criteria); 1758 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, 1759 outer_headers.dmac_47_16); 1760 dmac[0] = 0x01; 1761 1762 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); 1763 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1764 ix + MLX5_ESW_MISS_FLOWS); 1765 1766 g = mlx5_create_flow_group(fdb, flow_group_in); 1767 if (IS_ERR(g)) { 1768 err = PTR_ERR(g); 1769 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err); 1770 goto miss_err; 1771 } 1772 esw->fdb_table.offloads.miss_grp = g; 1773 1774 err = esw_add_fdb_miss_rule(esw); 1775 if (err) 1776 goto miss_rule_err; 1777 1778 kvfree(flow_group_in); 1779 return 0; 1780 1781 miss_rule_err: 1782 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1783 miss_err: 1784 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1785 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1786 peer_miss_err: 1787 mlx5_eswitch_del_send_to_vport_meta_rules(esw); 1788 meta_rule_err: 1789 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1790 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1791 send_vport_meta_err: 1792 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1793 send_vport_err: 1794 esw_chains_destroy(esw, esw_chains(esw)); 1795 fdb_chains_err: 1796 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1797 tc_miss_table_err: 1798 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); 1799 slow_fdb_err: 1800 /* Holds true only as long as DMFS is the default */ 1801 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS); 1802 ns_err: 1803 kvfree(flow_group_in); 1804 return err; 1805 } 1806 1807 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) 1808 { 1809 if (!esw->fdb_table.offloads.slow_fdb) 1810 return; 1811 1812 esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); 1813 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); 1814 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1815 mlx5_eswitch_del_send_to_vport_meta_rules(esw); 1816 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1817 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1818 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1819 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1820 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1821 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1822 1823 esw_chains_destroy(esw, esw_chains(esw)); 1824 1825 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1826 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); 1827 /* Holds true only as long as DMFS is the default */ 1828 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, 1829 MLX5_FLOW_STEERING_MODE_DMFS); 1830 atomic64_set(&esw->user_count, 0); 1831 } 1832 1833 static int esw_create_offloads_table(struct mlx5_eswitch *esw) 1834 { 1835 struct mlx5_flow_table_attr ft_attr = {}; 1836 struct mlx5_core_dev *dev = esw->dev; 1837 struct mlx5_flow_table *ft_offloads; 1838 struct mlx5_flow_namespace *ns; 1839 int err = 0; 1840 1841 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 1842 if (!ns) { 1843 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 1844 return -EOPNOTSUPP; 1845 } 1846 1847 ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS; 1848 ft_attr.prio = 1; 1849 1850 ft_offloads = mlx5_create_flow_table(ns, &ft_attr); 1851 if (IS_ERR(ft_offloads)) { 1852 err = PTR_ERR(ft_offloads); 1853 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); 1854 return err; 1855 } 1856 1857 esw->offloads.ft_offloads = ft_offloads; 1858 return 0; 1859 } 1860 1861 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) 1862 { 1863 struct mlx5_esw_offload *offloads = &esw->offloads; 1864 1865 mlx5_destroy_flow_table(offloads->ft_offloads); 1866 } 1867 1868 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) 1869 { 1870 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1871 struct mlx5_flow_group *g; 1872 u32 *flow_group_in; 1873 int nvports; 1874 int err = 0; 1875 1876 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS; 1877 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1878 if (!flow_group_in) 1879 return -ENOMEM; 1880 1881 /* create vport rx group */ 1882 esw_set_flow_group_source_port(esw, flow_group_in); 1883 1884 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1885 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); 1886 1887 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); 1888 1889 if (IS_ERR(g)) { 1890 err = PTR_ERR(g); 1891 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err); 1892 goto out; 1893 } 1894 1895 esw->offloads.vport_rx_group = g; 1896 out: 1897 kvfree(flow_group_in); 1898 return err; 1899 } 1900 1901 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) 1902 { 1903 mlx5_destroy_flow_group(esw->offloads.vport_rx_group); 1904 } 1905 1906 struct mlx5_flow_handle * 1907 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 1908 struct mlx5_flow_destination *dest) 1909 { 1910 struct mlx5_flow_act flow_act = {0}; 1911 struct mlx5_flow_handle *flow_rule; 1912 struct mlx5_flow_spec *spec; 1913 void *misc; 1914 1915 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1916 if (!spec) { 1917 flow_rule = ERR_PTR(-ENOMEM); 1918 goto out; 1919 } 1920 1921 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1922 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 1923 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1924 mlx5_eswitch_get_vport_metadata_for_match(esw, vport)); 1925 1926 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 1927 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1928 mlx5_eswitch_get_vport_metadata_mask()); 1929 1930 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1931 } else { 1932 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 1933 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 1934 1935 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 1936 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 1937 1938 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 1939 } 1940 1941 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1942 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, 1943 &flow_act, dest, 1); 1944 if (IS_ERR(flow_rule)) { 1945 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); 1946 goto out; 1947 } 1948 1949 out: 1950 kvfree(spec); 1951 return flow_rule; 1952 } 1953 1954 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) 1955 { 1956 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 1957 struct mlx5_core_dev *dev = esw->dev; 1958 struct mlx5_vport *vport; 1959 unsigned long i; 1960 1961 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1962 return -EOPNOTSUPP; 1963 1964 if (esw->mode == MLX5_ESWITCH_NONE) 1965 return -EOPNOTSUPP; 1966 1967 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 1968 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 1969 mlx5_mode = MLX5_INLINE_MODE_NONE; 1970 goto out; 1971 case MLX5_CAP_INLINE_MODE_L2: 1972 mlx5_mode = MLX5_INLINE_MODE_L2; 1973 goto out; 1974 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 1975 goto query_vports; 1976 } 1977 1978 query_vports: 1979 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode); 1980 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 1981 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode); 1982 if (prev_mlx5_mode != mlx5_mode) 1983 return -EINVAL; 1984 prev_mlx5_mode = mlx5_mode; 1985 } 1986 1987 out: 1988 *mode = mlx5_mode; 1989 return 0; 1990 } 1991 1992 static void esw_destroy_restore_table(struct mlx5_eswitch *esw) 1993 { 1994 struct mlx5_esw_offload *offloads = &esw->offloads; 1995 1996 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 1997 return; 1998 1999 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); 2000 mlx5_destroy_flow_group(offloads->restore_group); 2001 mlx5_destroy_flow_table(offloads->ft_offloads_restore); 2002 } 2003 2004 static int esw_create_restore_table(struct mlx5_eswitch *esw) 2005 { 2006 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 2007 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2008 struct mlx5_flow_table_attr ft_attr = {}; 2009 struct mlx5_core_dev *dev = esw->dev; 2010 struct mlx5_flow_namespace *ns; 2011 struct mlx5_modify_hdr *mod_hdr; 2012 void *match_criteria, *misc; 2013 struct mlx5_flow_table *ft; 2014 struct mlx5_flow_group *g; 2015 u32 *flow_group_in; 2016 int err = 0; 2017 2018 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 2019 return 0; 2020 2021 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 2022 if (!ns) { 2023 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 2024 return -EOPNOTSUPP; 2025 } 2026 2027 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2028 if (!flow_group_in) { 2029 err = -ENOMEM; 2030 goto out_free; 2031 } 2032 2033 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS; 2034 ft = mlx5_create_flow_table(ns, &ft_attr); 2035 if (IS_ERR(ft)) { 2036 err = PTR_ERR(ft); 2037 esw_warn(esw->dev, "Failed to create restore table, err %d\n", 2038 err); 2039 goto out_free; 2040 } 2041 2042 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 2043 match_criteria); 2044 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, 2045 misc_parameters_2); 2046 2047 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2048 ESW_REG_C0_USER_DATA_METADATA_MASK); 2049 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2050 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2051 ft_attr.max_fte - 1); 2052 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 2053 MLX5_MATCH_MISC_PARAMETERS_2); 2054 g = mlx5_create_flow_group(ft, flow_group_in); 2055 if (IS_ERR(g)) { 2056 err = PTR_ERR(g); 2057 esw_warn(dev, "Failed to create restore flow group, err: %d\n", 2058 err); 2059 goto err_group; 2060 } 2061 2062 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY); 2063 MLX5_SET(copy_action_in, modact, src_field, 2064 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); 2065 MLX5_SET(copy_action_in, modact, dst_field, 2066 MLX5_ACTION_IN_FIELD_METADATA_REG_B); 2067 mod_hdr = mlx5_modify_header_alloc(esw->dev, 2068 MLX5_FLOW_NAMESPACE_KERNEL, 1, 2069 modact); 2070 if (IS_ERR(mod_hdr)) { 2071 err = PTR_ERR(mod_hdr); 2072 esw_warn(dev, "Failed to create restore mod header, err: %d\n", 2073 err); 2074 goto err_mod_hdr; 2075 } 2076 2077 esw->offloads.ft_offloads_restore = ft; 2078 esw->offloads.restore_group = g; 2079 esw->offloads.restore_copy_hdr_id = mod_hdr; 2080 2081 kvfree(flow_group_in); 2082 2083 return 0; 2084 2085 err_mod_hdr: 2086 mlx5_destroy_flow_group(g); 2087 err_group: 2088 mlx5_destroy_flow_table(ft); 2089 out_free: 2090 kvfree(flow_group_in); 2091 2092 return err; 2093 } 2094 2095 static int esw_offloads_start(struct mlx5_eswitch *esw, 2096 struct netlink_ext_ack *extack) 2097 { 2098 int err, err1; 2099 2100 mlx5_eswitch_disable_locked(esw, false); 2101 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, 2102 esw->dev->priv.sriov.num_vfs); 2103 if (err) { 2104 NL_SET_ERR_MSG_MOD(extack, 2105 "Failed setting eswitch to offloads"); 2106 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, 2107 MLX5_ESWITCH_IGNORE_NUM_VFS); 2108 if (err1) { 2109 NL_SET_ERR_MSG_MOD(extack, 2110 "Failed setting eswitch back to legacy"); 2111 } 2112 } 2113 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 2114 if (mlx5_eswitch_inline_mode_get(esw, 2115 &esw->offloads.inline_mode)) { 2116 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; 2117 NL_SET_ERR_MSG_MOD(extack, 2118 "Inline mode is different between vports"); 2119 } 2120 } 2121 return err; 2122 } 2123 2124 static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw, 2125 struct mlx5_eswitch_rep *rep, 2126 xa_mark_t mark) 2127 { 2128 bool mark_set; 2129 2130 /* Copy the mark from vport to its rep */ 2131 mark_set = xa_get_mark(&esw->vports, rep->vport, mark); 2132 if (mark_set) 2133 xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark); 2134 } 2135 2136 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport) 2137 { 2138 struct mlx5_eswitch_rep *rep; 2139 int rep_type; 2140 int err; 2141 2142 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 2143 if (!rep) 2144 return -ENOMEM; 2145 2146 rep->vport = vport->vport; 2147 rep->vport_index = vport->index; 2148 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) 2149 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); 2150 2151 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL); 2152 if (err) 2153 goto insert_err; 2154 2155 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN); 2156 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF); 2157 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF); 2158 return 0; 2159 2160 insert_err: 2161 kfree(rep); 2162 return err; 2163 } 2164 2165 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw, 2166 struct mlx5_eswitch_rep *rep) 2167 { 2168 xa_erase(&esw->offloads.vport_reps, rep->vport); 2169 kfree(rep); 2170 } 2171 2172 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) 2173 { 2174 struct mlx5_eswitch_rep *rep; 2175 unsigned long i; 2176 2177 mlx5_esw_for_each_rep(esw, i, rep) 2178 mlx5_esw_offloads_rep_cleanup(esw, rep); 2179 xa_destroy(&esw->offloads.vport_reps); 2180 } 2181 2182 int esw_offloads_init_reps(struct mlx5_eswitch *esw) 2183 { 2184 struct mlx5_vport *vport; 2185 unsigned long i; 2186 int err; 2187 2188 xa_init(&esw->offloads.vport_reps); 2189 2190 mlx5_esw_for_each_vport(esw, i, vport) { 2191 err = mlx5_esw_offloads_rep_init(esw, vport); 2192 if (err) 2193 goto err; 2194 } 2195 return 0; 2196 2197 err: 2198 esw_offloads_cleanup_reps(esw); 2199 return err; 2200 } 2201 2202 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, 2203 struct mlx5_eswitch_rep *rep, u8 rep_type) 2204 { 2205 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2206 REP_LOADED, REP_REGISTERED) == REP_LOADED) 2207 esw->offloads.rep_ops[rep_type]->unload(rep); 2208 } 2209 2210 static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type) 2211 { 2212 struct mlx5_eswitch_rep *rep; 2213 unsigned long i; 2214 2215 mlx5_esw_for_each_sf_rep(esw, i, rep) 2216 __esw_offloads_unload_rep(esw, rep, rep_type); 2217 } 2218 2219 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) 2220 { 2221 struct mlx5_eswitch_rep *rep; 2222 unsigned long i; 2223 2224 __unload_reps_sf_vport(esw, rep_type); 2225 2226 mlx5_esw_for_each_vf_rep(esw, i, rep) 2227 __esw_offloads_unload_rep(esw, rep, rep_type); 2228 2229 if (mlx5_ecpf_vport_exists(esw->dev)) { 2230 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF); 2231 __esw_offloads_unload_rep(esw, rep, rep_type); 2232 } 2233 2234 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 2235 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); 2236 __esw_offloads_unload_rep(esw, rep, rep_type); 2237 } 2238 2239 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 2240 __esw_offloads_unload_rep(esw, rep, rep_type); 2241 } 2242 2243 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) 2244 { 2245 struct mlx5_eswitch_rep *rep; 2246 int rep_type; 2247 int err; 2248 2249 rep = mlx5_eswitch_get_rep(esw, vport_num); 2250 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) 2251 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2252 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) { 2253 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep); 2254 if (err) 2255 goto err_reps; 2256 } 2257 2258 return 0; 2259 2260 err_reps: 2261 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED); 2262 for (--rep_type; rep_type >= 0; rep_type--) 2263 __esw_offloads_unload_rep(esw, rep, rep_type); 2264 return err; 2265 } 2266 2267 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) 2268 { 2269 struct mlx5_eswitch_rep *rep; 2270 int rep_type; 2271 2272 rep = mlx5_eswitch_get_rep(esw, vport_num); 2273 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--) 2274 __esw_offloads_unload_rep(esw, rep, rep_type); 2275 } 2276 2277 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num) 2278 { 2279 int err; 2280 2281 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2282 return 0; 2283 2284 if (vport_num != MLX5_VPORT_UPLINK) { 2285 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num); 2286 if (err) 2287 return err; 2288 } 2289 2290 err = mlx5_esw_offloads_rep_load(esw, vport_num); 2291 if (err) 2292 goto load_err; 2293 return err; 2294 2295 load_err: 2296 if (vport_num != MLX5_VPORT_UPLINK) 2297 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); 2298 return err; 2299 } 2300 2301 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) 2302 { 2303 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2304 return; 2305 2306 mlx5_esw_offloads_rep_unload(esw, vport_num); 2307 2308 if (vport_num != MLX5_VPORT_UPLINK) 2309 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); 2310 } 2311 2312 #define ESW_OFFLOADS_DEVCOM_PAIR (0) 2313 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1) 2314 2315 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, 2316 struct mlx5_eswitch *peer_esw) 2317 { 2318 2319 return esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); 2320 } 2321 2322 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) 2323 { 2324 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 2325 mlx5e_tc_clean_fdb_peer_flows(esw); 2326 #endif 2327 esw_del_fdb_peer_miss_rules(esw); 2328 } 2329 2330 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, 2331 struct mlx5_eswitch *peer_esw, 2332 bool pair) 2333 { 2334 struct mlx5_flow_root_namespace *peer_ns; 2335 struct mlx5_flow_root_namespace *ns; 2336 int err; 2337 2338 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns; 2339 ns = esw->dev->priv.steering->fdb_root_ns; 2340 2341 if (pair) { 2342 err = mlx5_flow_namespace_set_peer(ns, peer_ns); 2343 if (err) 2344 return err; 2345 2346 err = mlx5_flow_namespace_set_peer(peer_ns, ns); 2347 if (err) { 2348 mlx5_flow_namespace_set_peer(ns, NULL); 2349 return err; 2350 } 2351 } else { 2352 mlx5_flow_namespace_set_peer(ns, NULL); 2353 mlx5_flow_namespace_set_peer(peer_ns, NULL); 2354 } 2355 2356 return 0; 2357 } 2358 2359 static int mlx5_esw_offloads_devcom_event(int event, 2360 void *my_data, 2361 void *event_data) 2362 { 2363 struct mlx5_eswitch *esw = my_data; 2364 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2365 struct mlx5_eswitch *peer_esw = event_data; 2366 int err; 2367 2368 switch (event) { 2369 case ESW_OFFLOADS_DEVCOM_PAIR: 2370 if (mlx5_eswitch_vport_match_metadata_enabled(esw) != 2371 mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) 2372 break; 2373 2374 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); 2375 if (err) 2376 goto err_out; 2377 err = mlx5_esw_offloads_pair(esw, peer_esw); 2378 if (err) 2379 goto err_peer; 2380 2381 err = mlx5_esw_offloads_pair(peer_esw, esw); 2382 if (err) 2383 goto err_pair; 2384 2385 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); 2386 break; 2387 2388 case ESW_OFFLOADS_DEVCOM_UNPAIR: 2389 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) 2390 break; 2391 2392 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); 2393 mlx5_esw_offloads_unpair(peer_esw); 2394 mlx5_esw_offloads_unpair(esw); 2395 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 2396 break; 2397 } 2398 2399 return 0; 2400 2401 err_pair: 2402 mlx5_esw_offloads_unpair(esw); 2403 err_peer: 2404 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 2405 err_out: 2406 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d", 2407 event, err); 2408 return err; 2409 } 2410 2411 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) 2412 { 2413 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2414 2415 INIT_LIST_HEAD(&esw->offloads.peer_flows); 2416 mutex_init(&esw->offloads.peer_mutex); 2417 2418 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2419 return; 2420 2421 mlx5_devcom_register_component(devcom, 2422 MLX5_DEVCOM_ESW_OFFLOADS, 2423 mlx5_esw_offloads_devcom_event, 2424 esw); 2425 2426 mlx5_devcom_send_event(devcom, 2427 MLX5_DEVCOM_ESW_OFFLOADS, 2428 ESW_OFFLOADS_DEVCOM_PAIR, esw); 2429 } 2430 2431 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) 2432 { 2433 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2434 2435 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2436 return; 2437 2438 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, 2439 ESW_OFFLOADS_DEVCOM_UNPAIR, esw); 2440 2441 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 2442 } 2443 2444 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) 2445 { 2446 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl)) 2447 return false; 2448 2449 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 2450 MLX5_FDB_TO_VPORT_REG_C_0)) 2451 return false; 2452 2453 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) 2454 return false; 2455 2456 if (mlx5_core_is_ecpf_esw_manager(esw->dev) || 2457 mlx5_ecpf_vport_exists(esw->dev)) 2458 return false; 2459 2460 return true; 2461 } 2462 2463 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) 2464 { 2465 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1; 2466 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1; 2467 u32 pf_num; 2468 int id; 2469 2470 /* Only 4 bits of pf_num */ 2471 pf_num = PCI_FUNC(esw->dev->pdev->devfn); 2472 if (pf_num > max_pf_num) 2473 return 0; 2474 2475 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */ 2476 /* Use only non-zero vport_id (1-4095) for all PF's */ 2477 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL); 2478 if (id < 0) 2479 return 0; 2480 id = (pf_num << ESW_VPORT_BITS) | id; 2481 return id; 2482 } 2483 2484 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata) 2485 { 2486 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1; 2487 2488 /* Metadata contains only 12 bits of actual ida id */ 2489 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask); 2490 } 2491 2492 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw, 2493 struct mlx5_vport *vport) 2494 { 2495 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw); 2496 vport->metadata = vport->default_metadata; 2497 return vport->metadata ? 0 : -ENOSPC; 2498 } 2499 2500 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, 2501 struct mlx5_vport *vport) 2502 { 2503 if (!vport->default_metadata) 2504 return; 2505 2506 WARN_ON(vport->metadata != vport->default_metadata); 2507 mlx5_esw_match_metadata_free(esw, vport->default_metadata); 2508 } 2509 2510 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw) 2511 { 2512 struct mlx5_vport *vport; 2513 unsigned long i; 2514 2515 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 2516 return; 2517 2518 mlx5_esw_for_each_vport(esw, i, vport) 2519 esw_offloads_vport_metadata_cleanup(esw, vport); 2520 } 2521 2522 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw) 2523 { 2524 struct mlx5_vport *vport; 2525 unsigned long i; 2526 int err; 2527 2528 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 2529 return 0; 2530 2531 mlx5_esw_for_each_vport(esw, i, vport) { 2532 err = esw_offloads_vport_metadata_setup(esw, vport); 2533 if (err) 2534 goto metadata_err; 2535 } 2536 2537 return 0; 2538 2539 metadata_err: 2540 esw_offloads_metadata_uninit(esw); 2541 return err; 2542 } 2543 2544 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable) 2545 { 2546 int err = 0; 2547 2548 down_write(&esw->mode_lock); 2549 if (esw->mode != MLX5_ESWITCH_NONE) { 2550 err = -EBUSY; 2551 goto done; 2552 } 2553 if (!mlx5_esw_vport_match_metadata_supported(esw)) { 2554 err = -EOPNOTSUPP; 2555 goto done; 2556 } 2557 if (enable) 2558 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; 2559 else 2560 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; 2561 done: 2562 up_write(&esw->mode_lock); 2563 return err; 2564 } 2565 2566 int 2567 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 2568 struct mlx5_vport *vport) 2569 { 2570 int err; 2571 2572 err = esw_acl_ingress_ofld_setup(esw, vport); 2573 if (err) 2574 return err; 2575 2576 err = esw_acl_egress_ofld_setup(esw, vport); 2577 if (err) 2578 goto egress_err; 2579 2580 return 0; 2581 2582 egress_err: 2583 esw_acl_ingress_ofld_cleanup(esw, vport); 2584 return err; 2585 } 2586 2587 void 2588 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 2589 struct mlx5_vport *vport) 2590 { 2591 esw_acl_egress_ofld_cleanup(vport); 2592 esw_acl_ingress_ofld_cleanup(esw, vport); 2593 } 2594 2595 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 2596 { 2597 struct mlx5_vport *vport; 2598 2599 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 2600 if (IS_ERR(vport)) 2601 return PTR_ERR(vport); 2602 2603 return esw_vport_create_offloads_acl_tables(esw, vport); 2604 } 2605 2606 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 2607 { 2608 struct mlx5_vport *vport; 2609 2610 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 2611 if (IS_ERR(vport)) 2612 return; 2613 2614 esw_vport_destroy_offloads_acl_tables(esw, vport); 2615 } 2616 2617 static int esw_offloads_steering_init(struct mlx5_eswitch *esw) 2618 { 2619 struct mlx5_esw_indir_table *indir; 2620 int err; 2621 2622 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); 2623 mutex_init(&esw->fdb_table.offloads.vports.lock); 2624 hash_init(esw->fdb_table.offloads.vports.table); 2625 atomic64_set(&esw->user_count, 0); 2626 2627 indir = mlx5_esw_indir_table_init(); 2628 if (IS_ERR(indir)) { 2629 err = PTR_ERR(indir); 2630 goto create_indir_err; 2631 } 2632 esw->fdb_table.offloads.indir = indir; 2633 2634 err = esw_create_uplink_offloads_acl_tables(esw); 2635 if (err) 2636 goto create_acl_err; 2637 2638 err = esw_create_offloads_table(esw); 2639 if (err) 2640 goto create_offloads_err; 2641 2642 err = esw_create_restore_table(esw); 2643 if (err) 2644 goto create_restore_err; 2645 2646 err = esw_create_offloads_fdb_tables(esw); 2647 if (err) 2648 goto create_fdb_err; 2649 2650 err = esw_create_vport_rx_group(esw); 2651 if (err) 2652 goto create_fg_err; 2653 2654 return 0; 2655 2656 create_fg_err: 2657 esw_destroy_offloads_fdb_tables(esw); 2658 create_fdb_err: 2659 esw_destroy_restore_table(esw); 2660 create_restore_err: 2661 esw_destroy_offloads_table(esw); 2662 create_offloads_err: 2663 esw_destroy_uplink_offloads_acl_tables(esw); 2664 create_acl_err: 2665 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 2666 create_indir_err: 2667 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 2668 return err; 2669 } 2670 2671 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) 2672 { 2673 esw_destroy_vport_rx_group(esw); 2674 esw_destroy_offloads_fdb_tables(esw); 2675 esw_destroy_restore_table(esw); 2676 esw_destroy_offloads_table(esw); 2677 esw_destroy_uplink_offloads_acl_tables(esw); 2678 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 2679 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 2680 } 2681 2682 static void 2683 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out) 2684 { 2685 bool host_pf_disabled; 2686 u16 new_num_vfs; 2687 2688 new_num_vfs = MLX5_GET(query_esw_functions_out, out, 2689 host_params_context.host_num_of_vfs); 2690 host_pf_disabled = MLX5_GET(query_esw_functions_out, out, 2691 host_params_context.host_pf_disabled); 2692 2693 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) 2694 return; 2695 2696 /* Number of VFs can only change from "0 to x" or "x to 0". */ 2697 if (esw->esw_funcs.num_vfs > 0) { 2698 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 2699 } else { 2700 int err; 2701 2702 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs, 2703 MLX5_VPORT_UC_ADDR_CHANGE); 2704 if (err) 2705 return; 2706 } 2707 esw->esw_funcs.num_vfs = new_num_vfs; 2708 } 2709 2710 static void esw_functions_changed_event_handler(struct work_struct *work) 2711 { 2712 struct mlx5_host_work *host_work; 2713 struct mlx5_eswitch *esw; 2714 const u32 *out; 2715 2716 host_work = container_of(work, struct mlx5_host_work, work); 2717 esw = host_work->esw; 2718 2719 out = mlx5_esw_query_functions(esw->dev); 2720 if (IS_ERR(out)) 2721 goto out; 2722 2723 esw_vfs_changed_event_handler(esw, out); 2724 kvfree(out); 2725 out: 2726 kfree(host_work); 2727 } 2728 2729 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data) 2730 { 2731 struct mlx5_esw_functions *esw_funcs; 2732 struct mlx5_host_work *host_work; 2733 struct mlx5_eswitch *esw; 2734 2735 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC); 2736 if (!host_work) 2737 return NOTIFY_DONE; 2738 2739 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb); 2740 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs); 2741 2742 host_work->esw = esw; 2743 2744 INIT_WORK(&host_work->work, esw_functions_changed_event_handler); 2745 queue_work(esw->work_queue, &host_work->work); 2746 2747 return NOTIFY_OK; 2748 } 2749 2750 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw) 2751 { 2752 const u32 *query_host_out; 2753 2754 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 2755 return 0; 2756 2757 query_host_out = mlx5_esw_query_functions(esw->dev); 2758 if (IS_ERR(query_host_out)) 2759 return PTR_ERR(query_host_out); 2760 2761 /* Mark non local controller with non zero controller number. */ 2762 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out, 2763 host_params_context.host_number); 2764 kvfree(query_host_out); 2765 return 0; 2766 } 2767 2768 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller) 2769 { 2770 /* Local controller is always valid */ 2771 if (controller == 0) 2772 return true; 2773 2774 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 2775 return false; 2776 2777 /* External host number starts with zero in device */ 2778 return (controller == esw->offloads.host_number + 1); 2779 } 2780 2781 int esw_offloads_enable(struct mlx5_eswitch *esw) 2782 { 2783 struct mapping_ctx *reg_c0_obj_pool; 2784 struct mlx5_vport *vport; 2785 unsigned long i; 2786 int err; 2787 2788 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && 2789 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap)) 2790 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; 2791 else 2792 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; 2793 2794 mutex_init(&esw->offloads.termtbl_mutex); 2795 mlx5_rdma_enable_roce(esw->dev); 2796 2797 err = mlx5_esw_host_number_init(esw); 2798 if (err) 2799 goto err_metadata; 2800 2801 err = esw_offloads_metadata_init(esw); 2802 if (err) 2803 goto err_metadata; 2804 2805 err = esw_set_passing_vport_metadata(esw, true); 2806 if (err) 2807 goto err_vport_metadata; 2808 2809 reg_c0_obj_pool = mapping_create(sizeof(struct mlx5_mapped_obj), 2810 ESW_REG_C0_USER_DATA_METADATA_MASK, 2811 true); 2812 if (IS_ERR(reg_c0_obj_pool)) { 2813 err = PTR_ERR(reg_c0_obj_pool); 2814 goto err_pool; 2815 } 2816 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool; 2817 2818 err = esw_offloads_steering_init(esw); 2819 if (err) 2820 goto err_steering_init; 2821 2822 /* Representor will control the vport link state */ 2823 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) 2824 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; 2825 2826 /* Uplink vport rep must load first. */ 2827 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK); 2828 if (err) 2829 goto err_uplink; 2830 2831 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); 2832 if (err) 2833 goto err_vports; 2834 2835 esw_offloads_devcom_init(esw); 2836 2837 return 0; 2838 2839 err_vports: 2840 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); 2841 err_uplink: 2842 esw_offloads_steering_cleanup(esw); 2843 err_steering_init: 2844 mapping_destroy(reg_c0_obj_pool); 2845 err_pool: 2846 esw_set_passing_vport_metadata(esw, false); 2847 err_vport_metadata: 2848 esw_offloads_metadata_uninit(esw); 2849 err_metadata: 2850 mlx5_rdma_disable_roce(esw->dev); 2851 mutex_destroy(&esw->offloads.termtbl_mutex); 2852 return err; 2853 } 2854 2855 static int esw_offloads_stop(struct mlx5_eswitch *esw, 2856 struct netlink_ext_ack *extack) 2857 { 2858 int err, err1; 2859 2860 mlx5_eswitch_disable_locked(esw, false); 2861 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, 2862 MLX5_ESWITCH_IGNORE_NUM_VFS); 2863 if (err) { 2864 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); 2865 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, 2866 MLX5_ESWITCH_IGNORE_NUM_VFS); 2867 if (err1) { 2868 NL_SET_ERR_MSG_MOD(extack, 2869 "Failed setting eswitch back to offloads"); 2870 } 2871 } 2872 2873 return err; 2874 } 2875 2876 void esw_offloads_disable(struct mlx5_eswitch *esw) 2877 { 2878 esw_offloads_devcom_cleanup(esw); 2879 mlx5_eswitch_disable_pf_vf_vports(esw); 2880 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); 2881 esw_set_passing_vport_metadata(esw, false); 2882 esw_offloads_steering_cleanup(esw); 2883 mapping_destroy(esw->offloads.reg_c0_obj_pool); 2884 esw_offloads_metadata_uninit(esw); 2885 mlx5_rdma_disable_roce(esw->dev); 2886 mutex_destroy(&esw->offloads.termtbl_mutex); 2887 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; 2888 } 2889 2890 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) 2891 { 2892 switch (mode) { 2893 case DEVLINK_ESWITCH_MODE_LEGACY: 2894 *mlx5_mode = MLX5_ESWITCH_LEGACY; 2895 break; 2896 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 2897 *mlx5_mode = MLX5_ESWITCH_OFFLOADS; 2898 break; 2899 default: 2900 return -EINVAL; 2901 } 2902 2903 return 0; 2904 } 2905 2906 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) 2907 { 2908 switch (mlx5_mode) { 2909 case MLX5_ESWITCH_LEGACY: 2910 *mode = DEVLINK_ESWITCH_MODE_LEGACY; 2911 break; 2912 case MLX5_ESWITCH_OFFLOADS: 2913 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; 2914 break; 2915 default: 2916 return -EINVAL; 2917 } 2918 2919 return 0; 2920 } 2921 2922 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode) 2923 { 2924 switch (mode) { 2925 case DEVLINK_ESWITCH_INLINE_MODE_NONE: 2926 *mlx5_mode = MLX5_INLINE_MODE_NONE; 2927 break; 2928 case DEVLINK_ESWITCH_INLINE_MODE_LINK: 2929 *mlx5_mode = MLX5_INLINE_MODE_L2; 2930 break; 2931 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK: 2932 *mlx5_mode = MLX5_INLINE_MODE_IP; 2933 break; 2934 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: 2935 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP; 2936 break; 2937 default: 2938 return -EINVAL; 2939 } 2940 2941 return 0; 2942 } 2943 2944 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) 2945 { 2946 switch (mlx5_mode) { 2947 case MLX5_INLINE_MODE_NONE: 2948 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE; 2949 break; 2950 case MLX5_INLINE_MODE_L2: 2951 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK; 2952 break; 2953 case MLX5_INLINE_MODE_IP: 2954 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK; 2955 break; 2956 case MLX5_INLINE_MODE_TCP_UDP: 2957 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT; 2958 break; 2959 default: 2960 return -EINVAL; 2961 } 2962 2963 return 0; 2964 } 2965 2966 static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw) 2967 { 2968 /* devlink commands in NONE eswitch mode are currently supported only 2969 * on ECPF. 2970 */ 2971 return (esw->mode == MLX5_ESWITCH_NONE && 2972 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0; 2973 } 2974 2975 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 2976 struct netlink_ext_ack *extack) 2977 { 2978 u16 cur_mlx5_mode, mlx5_mode = 0; 2979 struct mlx5_eswitch *esw; 2980 int err = 0; 2981 2982 esw = mlx5_devlink_eswitch_get(devlink); 2983 if (IS_ERR(esw)) 2984 return PTR_ERR(esw); 2985 2986 if (esw_mode_from_devlink(mode, &mlx5_mode)) 2987 return -EINVAL; 2988 2989 err = mlx5_esw_try_lock(esw); 2990 if (err < 0) { 2991 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy"); 2992 return err; 2993 } 2994 cur_mlx5_mode = err; 2995 err = 0; 2996 2997 if (cur_mlx5_mode == mlx5_mode) 2998 goto unlock; 2999 3000 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) 3001 err = esw_offloads_start(esw, extack); 3002 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) 3003 err = esw_offloads_stop(esw, extack); 3004 else 3005 err = -EINVAL; 3006 3007 unlock: 3008 mlx5_esw_unlock(esw); 3009 return err; 3010 } 3011 3012 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 3013 { 3014 struct mlx5_eswitch *esw; 3015 int err; 3016 3017 esw = mlx5_devlink_eswitch_get(devlink); 3018 if (IS_ERR(esw)) 3019 return PTR_ERR(esw); 3020 3021 down_write(&esw->mode_lock); 3022 err = eswitch_devlink_esw_mode_check(esw); 3023 if (err) 3024 goto unlock; 3025 3026 err = esw_mode_to_devlink(esw->mode, mode); 3027 unlock: 3028 up_write(&esw->mode_lock); 3029 return err; 3030 } 3031 3032 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode, 3033 struct netlink_ext_ack *extack) 3034 { 3035 struct mlx5_core_dev *dev = esw->dev; 3036 struct mlx5_vport *vport; 3037 u16 err_vport_num = 0; 3038 unsigned long i; 3039 int err = 0; 3040 3041 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 3042 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode); 3043 if (err) { 3044 err_vport_num = vport->vport; 3045 NL_SET_ERR_MSG_MOD(extack, 3046 "Failed to set min inline on vport"); 3047 goto revert_inline_mode; 3048 } 3049 } 3050 return 0; 3051 3052 revert_inline_mode: 3053 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 3054 if (vport->vport == err_vport_num) 3055 break; 3056 mlx5_modify_nic_vport_min_inline(dev, 3057 vport->vport, 3058 esw->offloads.inline_mode); 3059 } 3060 return err; 3061 } 3062 3063 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 3064 struct netlink_ext_ack *extack) 3065 { 3066 struct mlx5_core_dev *dev = devlink_priv(devlink); 3067 struct mlx5_eswitch *esw; 3068 u8 mlx5_mode; 3069 int err; 3070 3071 esw = mlx5_devlink_eswitch_get(devlink); 3072 if (IS_ERR(esw)) 3073 return PTR_ERR(esw); 3074 3075 down_write(&esw->mode_lock); 3076 err = eswitch_devlink_esw_mode_check(esw); 3077 if (err) 3078 goto out; 3079 3080 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 3081 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 3082 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) 3083 goto out; 3084 fallthrough; 3085 case MLX5_CAP_INLINE_MODE_L2: 3086 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); 3087 err = -EOPNOTSUPP; 3088 goto out; 3089 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 3090 break; 3091 } 3092 3093 if (atomic64_read(&esw->offloads.num_flows) > 0) { 3094 NL_SET_ERR_MSG_MOD(extack, 3095 "Can't set inline mode when flows are configured"); 3096 err = -EOPNOTSUPP; 3097 goto out; 3098 } 3099 3100 err = esw_inline_mode_from_devlink(mode, &mlx5_mode); 3101 if (err) 3102 goto out; 3103 3104 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack); 3105 if (err) 3106 goto out; 3107 3108 esw->offloads.inline_mode = mlx5_mode; 3109 up_write(&esw->mode_lock); 3110 return 0; 3111 3112 out: 3113 up_write(&esw->mode_lock); 3114 return err; 3115 } 3116 3117 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) 3118 { 3119 struct mlx5_eswitch *esw; 3120 int err; 3121 3122 esw = mlx5_devlink_eswitch_get(devlink); 3123 if (IS_ERR(esw)) 3124 return PTR_ERR(esw); 3125 3126 down_write(&esw->mode_lock); 3127 err = eswitch_devlink_esw_mode_check(esw); 3128 if (err) 3129 goto unlock; 3130 3131 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 3132 unlock: 3133 up_write(&esw->mode_lock); 3134 return err; 3135 } 3136 3137 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 3138 enum devlink_eswitch_encap_mode encap, 3139 struct netlink_ext_ack *extack) 3140 { 3141 struct mlx5_core_dev *dev = devlink_priv(devlink); 3142 struct mlx5_eswitch *esw; 3143 int err; 3144 3145 esw = mlx5_devlink_eswitch_get(devlink); 3146 if (IS_ERR(esw)) 3147 return PTR_ERR(esw); 3148 3149 down_write(&esw->mode_lock); 3150 err = eswitch_devlink_esw_mode_check(esw); 3151 if (err) 3152 goto unlock; 3153 3154 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 3155 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || 3156 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) { 3157 err = -EOPNOTSUPP; 3158 goto unlock; 3159 } 3160 3161 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) { 3162 err = -EOPNOTSUPP; 3163 goto unlock; 3164 } 3165 3166 if (esw->mode == MLX5_ESWITCH_LEGACY) { 3167 esw->offloads.encap = encap; 3168 goto unlock; 3169 } 3170 3171 if (esw->offloads.encap == encap) 3172 goto unlock; 3173 3174 if (atomic64_read(&esw->offloads.num_flows) > 0) { 3175 NL_SET_ERR_MSG_MOD(extack, 3176 "Can't set encapsulation when flows are configured"); 3177 err = -EOPNOTSUPP; 3178 goto unlock; 3179 } 3180 3181 esw_destroy_offloads_fdb_tables(esw); 3182 3183 esw->offloads.encap = encap; 3184 3185 err = esw_create_offloads_fdb_tables(esw); 3186 3187 if (err) { 3188 NL_SET_ERR_MSG_MOD(extack, 3189 "Failed re-creating fast FDB table"); 3190 esw->offloads.encap = !encap; 3191 (void)esw_create_offloads_fdb_tables(esw); 3192 } 3193 3194 unlock: 3195 up_write(&esw->mode_lock); 3196 return err; 3197 } 3198 3199 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 3200 enum devlink_eswitch_encap_mode *encap) 3201 { 3202 struct mlx5_eswitch *esw; 3203 int err; 3204 3205 esw = mlx5_devlink_eswitch_get(devlink); 3206 if (IS_ERR(esw)) 3207 return PTR_ERR(esw); 3208 3209 3210 down_write(&esw->mode_lock); 3211 err = eswitch_devlink_esw_mode_check(esw); 3212 if (err) 3213 goto unlock; 3214 3215 *encap = esw->offloads.encap; 3216 unlock: 3217 up_write(&esw->mode_lock); 3218 return 0; 3219 } 3220 3221 static bool 3222 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num) 3223 { 3224 /* Currently, only ECPF based device has representor for host PF. */ 3225 if (vport_num == MLX5_VPORT_PF && 3226 !mlx5_core_is_ecpf_esw_manager(esw->dev)) 3227 return false; 3228 3229 if (vport_num == MLX5_VPORT_ECPF && 3230 !mlx5_ecpf_vport_exists(esw->dev)) 3231 return false; 3232 3233 return true; 3234 } 3235 3236 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, 3237 const struct mlx5_eswitch_rep_ops *ops, 3238 u8 rep_type) 3239 { 3240 struct mlx5_eswitch_rep_data *rep_data; 3241 struct mlx5_eswitch_rep *rep; 3242 unsigned long i; 3243 3244 esw->offloads.rep_ops[rep_type] = ops; 3245 mlx5_esw_for_each_rep(esw, i, rep) { 3246 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) { 3247 rep->esw = esw; 3248 rep_data = &rep->rep_data[rep_type]; 3249 atomic_set(&rep_data->state, REP_REGISTERED); 3250 } 3251 } 3252 } 3253 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); 3254 3255 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) 3256 { 3257 struct mlx5_eswitch_rep *rep; 3258 unsigned long i; 3259 3260 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 3261 __unload_reps_all_vport(esw, rep_type); 3262 3263 mlx5_esw_for_each_rep(esw, i, rep) 3264 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); 3265 } 3266 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); 3267 3268 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) 3269 { 3270 struct mlx5_eswitch_rep *rep; 3271 3272 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 3273 return rep->rep_data[rep_type].priv; 3274 } 3275 3276 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, 3277 u16 vport, 3278 u8 rep_type) 3279 { 3280 struct mlx5_eswitch_rep *rep; 3281 3282 rep = mlx5_eswitch_get_rep(esw, vport); 3283 3284 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 3285 esw->offloads.rep_ops[rep_type]->get_proto_dev) 3286 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep); 3287 return NULL; 3288 } 3289 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); 3290 3291 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) 3292 { 3293 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type); 3294 } 3295 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); 3296 3297 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, 3298 u16 vport) 3299 { 3300 return mlx5_eswitch_get_rep(esw, vport); 3301 } 3302 EXPORT_SYMBOL(mlx5_eswitch_vport_rep); 3303 3304 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) 3305 { 3306 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED); 3307 } 3308 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled); 3309 3310 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) 3311 { 3312 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA); 3313 } 3314 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled); 3315 3316 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, 3317 u16 vport_num) 3318 { 3319 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 3320 3321 if (WARN_ON_ONCE(IS_ERR(vport))) 3322 return 0; 3323 3324 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS); 3325 } 3326 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); 3327 3328 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, 3329 u16 vport_num, u32 controller, u32 sfnum) 3330 { 3331 int err; 3332 3333 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE); 3334 if (err) 3335 return err; 3336 3337 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum); 3338 if (err) 3339 goto devlink_err; 3340 3341 err = mlx5_esw_offloads_rep_load(esw, vport_num); 3342 if (err) 3343 goto rep_err; 3344 return 0; 3345 3346 rep_err: 3347 mlx5_esw_devlink_sf_port_unregister(esw, vport_num); 3348 devlink_err: 3349 mlx5_esw_vport_disable(esw, vport_num); 3350 return err; 3351 } 3352 3353 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) 3354 { 3355 mlx5_esw_offloads_rep_unload(esw, vport_num); 3356 mlx5_esw_devlink_sf_port_unregister(esw, vport_num); 3357 mlx5_esw_vport_disable(esw, vport_num); 3358 } 3359 3360 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id) 3361 { 3362 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 3363 void *query_ctx; 3364 void *hca_caps; 3365 int err; 3366 3367 *vhca_id = 0; 3368 if (mlx5_esw_is_manager_vport(esw, vport_num) || 3369 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 3370 return -EPERM; 3371 3372 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 3373 if (!query_ctx) 3374 return -ENOMEM; 3375 3376 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx); 3377 if (err) 3378 goto out_free; 3379 3380 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 3381 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id); 3382 3383 out_free: 3384 kfree(query_ctx); 3385 return err; 3386 } 3387 3388 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num) 3389 { 3390 u16 *old_entry, *vhca_map_entry, vhca_id; 3391 int err; 3392 3393 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); 3394 if (err) { 3395 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n", 3396 vport_num, err); 3397 return err; 3398 } 3399 3400 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL); 3401 if (!vhca_map_entry) 3402 return -ENOMEM; 3403 3404 *vhca_map_entry = vport_num; 3405 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL); 3406 if (xa_is_err(old_entry)) { 3407 kfree(vhca_map_entry); 3408 return xa_err(old_entry); 3409 } 3410 kfree(old_entry); 3411 return 0; 3412 } 3413 3414 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num) 3415 { 3416 u16 *vhca_map_entry, vhca_id; 3417 int err; 3418 3419 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); 3420 if (err) 3421 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n", 3422 vport_num, err); 3423 3424 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id); 3425 kfree(vhca_map_entry); 3426 } 3427 3428 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num) 3429 { 3430 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id); 3431 3432 if (!res) 3433 return -ENOENT; 3434 3435 *vport_num = *res; 3436 return 0; 3437 } 3438 3439 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, 3440 u16 vport_num) 3441 { 3442 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 3443 3444 if (WARN_ON_ONCE(IS_ERR(vport))) 3445 return 0; 3446 3447 return vport->metadata; 3448 } 3449 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set); 3450