1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/etherdevice.h> 34 #include <linux/idr.h> 35 #include <linux/mlx5/driver.h> 36 #include <linux/mlx5/mlx5_ifc.h> 37 #include <linux/mlx5/vport.h> 38 #include <linux/mlx5/fs.h> 39 #include "mlx5_core.h" 40 #include "eswitch.h" 41 #include "esw/indir_table.h" 42 #include "esw/acl/ofld.h" 43 #include "rdma.h" 44 #include "en.h" 45 #include "fs_core.h" 46 #include "lib/devcom.h" 47 #include "lib/eq.h" 48 #include "lib/fs_chains.h" 49 #include "en_tc.h" 50 #include "en/mapping.h" 51 #include "devlink.h" 52 #include "lag/lag.h" 53 54 #define mlx5_esw_for_each_rep(esw, i, rep) \ 55 xa_for_each(&((esw)->offloads.vport_reps), i, rep) 56 57 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \ 58 xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF) 59 60 #define mlx5_esw_for_each_vf_rep(esw, index, rep) \ 61 mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \ 62 rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF) 63 64 /* There are two match-all miss flows, one for unicast dst mac and 65 * one for multicast. 66 */ 67 #define MLX5_ESW_MISS_FLOWS (2) 68 #define UPLINK_REP_INDEX 0 69 70 #define MLX5_ESW_VPORT_TBL_SIZE 128 71 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4 72 73 #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) 74 75 static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { 76 .max_fte = MLX5_ESW_VPORT_TBL_SIZE, 77 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, 78 .flags = 0, 79 }; 80 81 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, 82 u16 vport_num) 83 { 84 return xa_load(&esw->offloads.vport_reps, vport_num); 85 } 86 87 static void 88 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, 89 struct mlx5_flow_spec *spec, 90 struct mlx5_esw_flow_attr *attr) 91 { 92 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep) 93 return; 94 95 if (attr->int_port) { 96 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port); 97 98 return; 99 } 100 101 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ? 102 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK : 103 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; 104 } 105 106 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits 107 * are not needed as well in the following process. So clear them all for simplicity. 108 */ 109 void 110 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec) 111 { 112 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 113 void *misc2; 114 115 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 116 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 117 118 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 119 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 120 121 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2))) 122 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2; 123 } 124 } 125 126 static void 127 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, 128 struct mlx5_flow_spec *spec, 129 struct mlx5_flow_attr *attr, 130 struct mlx5_eswitch *src_esw, 131 u16 vport) 132 { 133 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 134 u32 metadata; 135 void *misc2; 136 void *misc; 137 138 /* Use metadata matching because vport is not represented by single 139 * VHCA in dual-port RoCE mode, and matching on source vport may fail. 140 */ 141 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 142 if (mlx5_esw_indir_table_decap_vport(attr)) 143 vport = mlx5_esw_indir_table_decap_vport(attr); 144 145 if (attr && !attr->chain && esw_attr->int_port) 146 metadata = 147 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port); 148 else 149 metadata = 150 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport); 151 152 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 153 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata); 154 155 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 156 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 157 mlx5_eswitch_get_vport_metadata_mask()); 158 159 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 160 } else { 161 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 162 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 163 164 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 165 MLX5_SET(fte_match_set_misc, misc, 166 source_eswitch_owner_vhca_id, 167 MLX5_CAP_GEN(src_esw->dev, vhca_id)); 168 169 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 170 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 171 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 172 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 173 source_eswitch_owner_vhca_id); 174 175 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 176 } 177 } 178 179 static int 180 esw_setup_decap_indir(struct mlx5_eswitch *esw, 181 struct mlx5_flow_attr *attr, 182 struct mlx5_flow_spec *spec) 183 { 184 struct mlx5_flow_table *ft; 185 186 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 187 return -EOPNOTSUPP; 188 189 ft = mlx5_esw_indir_table_get(esw, attr, spec, 190 mlx5_esw_indir_table_decap_vport(attr), true); 191 return PTR_ERR_OR_ZERO(ft); 192 } 193 194 static void 195 esw_cleanup_decap_indir(struct mlx5_eswitch *esw, 196 struct mlx5_flow_attr *attr) 197 { 198 if (mlx5_esw_indir_table_decap_vport(attr)) 199 mlx5_esw_indir_table_put(esw, attr, 200 mlx5_esw_indir_table_decap_vport(attr), 201 true); 202 } 203 204 static int 205 esw_setup_sampler_dest(struct mlx5_flow_destination *dest, 206 struct mlx5_flow_act *flow_act, 207 u32 sampler_id, 208 int i) 209 { 210 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 211 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; 212 dest[i].sampler_id = sampler_id; 213 214 return 0; 215 } 216 217 static int 218 esw_setup_ft_dest(struct mlx5_flow_destination *dest, 219 struct mlx5_flow_act *flow_act, 220 struct mlx5_eswitch *esw, 221 struct mlx5_flow_attr *attr, 222 struct mlx5_flow_spec *spec, 223 int i) 224 { 225 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 226 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 227 dest[i].ft = attr->dest_ft; 228 229 if (mlx5_esw_indir_table_decap_vport(attr)) 230 return esw_setup_decap_indir(esw, attr, spec); 231 return 0; 232 } 233 234 static void 235 esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 236 struct mlx5_fs_chains *chains, int i) 237 { 238 if (mlx5_chains_ignore_flow_level_supported(chains)) 239 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 240 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 241 dest[i].ft = mlx5_chains_get_tc_end_ft(chains); 242 } 243 244 static void 245 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 246 struct mlx5_eswitch *esw, int i) 247 { 248 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) 249 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 250 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 251 dest[i].ft = esw->fdb_table.offloads.slow_fdb; 252 } 253 254 static int 255 esw_setup_chain_dest(struct mlx5_flow_destination *dest, 256 struct mlx5_flow_act *flow_act, 257 struct mlx5_fs_chains *chains, 258 u32 chain, u32 prio, u32 level, 259 int i) 260 { 261 struct mlx5_flow_table *ft; 262 263 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 264 ft = mlx5_chains_get_table(chains, chain, prio, level); 265 if (IS_ERR(ft)) 266 return PTR_ERR(ft); 267 268 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 269 dest[i].ft = ft; 270 return 0; 271 } 272 273 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, 274 int from, int to) 275 { 276 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 277 struct mlx5_fs_chains *chains = esw_chains(esw); 278 int i; 279 280 for (i = from; i < to; i++) 281 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 282 mlx5_chains_put_table(chains, 0, 1, 0); 283 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 284 esw_attr->dests[i].mdev)) 285 mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport, 286 false); 287 } 288 289 static bool 290 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr) 291 { 292 int i; 293 294 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) 295 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 296 return true; 297 return false; 298 } 299 300 static int 301 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest, 302 struct mlx5_flow_act *flow_act, 303 struct mlx5_eswitch *esw, 304 struct mlx5_fs_chains *chains, 305 struct mlx5_flow_attr *attr, 306 int *i) 307 { 308 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 309 int err; 310 311 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 312 return -EOPNOTSUPP; 313 314 /* flow steering cannot handle more than one dest with the same ft 315 * in a single flow 316 */ 317 if (esw_attr->out_count - esw_attr->split_count > 1) 318 return -EOPNOTSUPP; 319 320 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i); 321 if (err) 322 return err; 323 324 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) { 325 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 326 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat; 327 } 328 (*i)++; 329 330 return 0; 331 } 332 333 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw, 334 struct mlx5_flow_attr *attr) 335 { 336 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 337 338 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 339 } 340 341 static bool 342 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 343 { 344 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 345 bool result = false; 346 int i; 347 348 /* Indirect table is supported only for flows with in_port uplink 349 * and the destination is vport on the same eswitch as the uplink, 350 * return false in case at least one of destinations doesn't meet 351 * this criteria. 352 */ 353 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { 354 if (esw_attr->dests[i].rep && 355 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 356 esw_attr->dests[i].mdev)) { 357 result = true; 358 } else { 359 result = false; 360 break; 361 } 362 } 363 return result; 364 } 365 366 static int 367 esw_setup_indir_table(struct mlx5_flow_destination *dest, 368 struct mlx5_flow_act *flow_act, 369 struct mlx5_eswitch *esw, 370 struct mlx5_flow_attr *attr, 371 struct mlx5_flow_spec *spec, 372 bool ignore_flow_lvl, 373 int *i) 374 { 375 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 376 int j, err; 377 378 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 379 return -EOPNOTSUPP; 380 381 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { 382 if (ignore_flow_lvl) 383 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 384 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 385 386 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec, 387 esw_attr->dests[j].rep->vport, false); 388 if (IS_ERR(dest[*i].ft)) { 389 err = PTR_ERR(dest[*i].ft); 390 goto err_indir_tbl_get; 391 } 392 } 393 394 if (mlx5_esw_indir_table_decap_vport(attr)) { 395 err = esw_setup_decap_indir(esw, attr, spec); 396 if (err) 397 goto err_indir_tbl_get; 398 } 399 400 return 0; 401 402 err_indir_tbl_get: 403 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); 404 return err; 405 } 406 407 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 408 { 409 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 410 411 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 412 esw_cleanup_decap_indir(esw, attr); 413 } 414 415 static void 416 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level) 417 { 418 mlx5_chains_put_table(chains, chain, prio, level); 419 } 420 421 static void 422 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 423 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 424 int attr_idx, int dest_idx, bool pkt_reformat) 425 { 426 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 427 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; 428 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 429 dest[dest_idx].vport.vhca_id = 430 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); 431 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 432 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK && 433 mlx5_lag_mpesw_is_activated(esw->dev)) 434 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; 435 } 436 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) { 437 if (pkt_reformat) { 438 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 439 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 440 } 441 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 442 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 443 } 444 } 445 446 static int 447 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 448 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 449 int i) 450 { 451 int j; 452 453 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++) 454 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true); 455 return i; 456 } 457 458 static bool 459 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw) 460 { 461 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) && 462 mlx5_eswitch_vport_match_metadata_enabled(esw) && 463 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level); 464 } 465 466 static int 467 esw_setup_dests(struct mlx5_flow_destination *dest, 468 struct mlx5_flow_act *flow_act, 469 struct mlx5_eswitch *esw, 470 struct mlx5_flow_attr *attr, 471 struct mlx5_flow_spec *spec, 472 int *i) 473 { 474 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 475 struct mlx5_fs_chains *chains = esw_chains(esw); 476 int err = 0; 477 478 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && 479 esw_src_port_rewrite_supported(esw)) 480 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE; 481 482 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE && 483 !(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) { 484 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i); 485 (*i)++; 486 } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) { 487 esw_setup_slow_path_dest(dest, flow_act, esw, *i); 488 (*i)++; 489 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) { 490 esw_setup_accept_dest(dest, flow_act, chains, *i); 491 (*i)++; 492 } else if (esw_is_indir_table(esw, attr)) { 493 err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i); 494 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { 495 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); 496 } else { 497 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); 498 499 if (attr->dest_ft) { 500 err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); 501 (*i)++; 502 } else if (attr->dest_chain) { 503 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 504 1, 0, *i); 505 (*i)++; 506 } 507 } 508 509 return err; 510 } 511 512 static void 513 esw_cleanup_dests(struct mlx5_eswitch *esw, 514 struct mlx5_flow_attr *attr) 515 { 516 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 517 struct mlx5_fs_chains *chains = esw_chains(esw); 518 519 if (attr->dest_ft) { 520 esw_cleanup_decap_indir(esw, attr); 521 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) { 522 if (attr->dest_chain) 523 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); 524 else if (esw_is_indir_table(esw, attr)) 525 esw_cleanup_indir_table(esw, attr); 526 else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) 527 esw_cleanup_chain_src_port_rewrite(esw, attr); 528 } 529 } 530 531 static void 532 esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act) 533 { 534 struct mlx5e_flow_meter_handle *meter; 535 536 meter = attr->meter_attr.meter; 537 flow_act->exe_aso.type = attr->exe_aso_type; 538 flow_act->exe_aso.object_id = meter->obj_id; 539 flow_act->exe_aso.flow_meter.meter_idx = meter->idx; 540 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN; 541 /* use metadata reg 5 for packet color */ 542 flow_act->exe_aso.return_reg_id = 5; 543 } 544 545 struct mlx5_flow_handle * 546 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 547 struct mlx5_flow_spec *spec, 548 struct mlx5_flow_attr *attr) 549 { 550 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 551 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 552 struct mlx5_fs_chains *chains = esw_chains(esw); 553 bool split = !!(esw_attr->split_count); 554 struct mlx5_vport_tbl_attr fwd_attr; 555 struct mlx5_flow_destination *dest; 556 struct mlx5_flow_handle *rule; 557 struct mlx5_flow_table *fdb; 558 int i = 0; 559 560 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 561 return ERR_PTR(-EOPNOTSUPP); 562 563 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL); 564 if (!dest) 565 return ERR_PTR(-ENOMEM); 566 567 flow_act.action = attr->action; 568 /* if per flow vlan pop/push is emulated, don't set that into the firmware */ 569 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 570 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | 571 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 572 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { 573 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]); 574 flow_act.vlan[0].vid = esw_attr->vlan_vid[0]; 575 flow_act.vlan[0].prio = esw_attr->vlan_prio[0]; 576 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { 577 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]); 578 flow_act.vlan[1].vid = esw_attr->vlan_vid[1]; 579 flow_act.vlan[1].prio = esw_attr->vlan_prio[1]; 580 } 581 } 582 583 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr); 584 585 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 586 int err; 587 588 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i); 589 if (err) { 590 rule = ERR_PTR(err); 591 goto err_create_goto_table; 592 } 593 } 594 595 if (esw_attr->decap_pkt_reformat) 596 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat; 597 598 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 599 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 600 dest[i].counter_id = mlx5_fc_id(attr->counter); 601 i++; 602 } 603 604 if (attr->outer_match_level != MLX5_MATCH_NONE) 605 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 606 if (attr->inner_match_level != MLX5_MATCH_NONE) 607 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 608 609 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 610 flow_act.modify_hdr = attr->modify_hdr; 611 612 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) && 613 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER) 614 esw_setup_meter(attr, &flow_act); 615 616 if (split) { 617 fwd_attr.chain = attr->chain; 618 fwd_attr.prio = attr->prio; 619 fwd_attr.vport = esw_attr->in_rep->vport; 620 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 621 622 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 623 } else { 624 if (attr->chain || attr->prio) 625 fdb = mlx5_chains_get_table(chains, attr->chain, 626 attr->prio, 0); 627 else 628 fdb = attr->ft; 629 630 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT)) 631 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 632 esw_attr->in_mdev->priv.eswitch, 633 esw_attr->in_rep->vport); 634 } 635 if (IS_ERR(fdb)) { 636 rule = ERR_CAST(fdb); 637 goto err_esw_get; 638 } 639 640 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) 641 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr, 642 &flow_act, dest, i); 643 else 644 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); 645 if (IS_ERR(rule)) 646 goto err_add_rule; 647 else 648 atomic64_inc(&esw->offloads.num_flows); 649 650 kfree(dest); 651 return rule; 652 653 err_add_rule: 654 if (split) 655 mlx5_esw_vporttbl_put(esw, &fwd_attr); 656 else if (attr->chain || attr->prio) 657 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 658 err_esw_get: 659 esw_cleanup_dests(esw, attr); 660 err_create_goto_table: 661 kfree(dest); 662 return rule; 663 } 664 665 struct mlx5_flow_handle * 666 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 667 struct mlx5_flow_spec *spec, 668 struct mlx5_flow_attr *attr) 669 { 670 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 671 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 672 struct mlx5_fs_chains *chains = esw_chains(esw); 673 struct mlx5_vport_tbl_attr fwd_attr; 674 struct mlx5_flow_destination *dest; 675 struct mlx5_flow_table *fast_fdb; 676 struct mlx5_flow_table *fwd_fdb; 677 struct mlx5_flow_handle *rule; 678 int i, err = 0; 679 680 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL); 681 if (!dest) 682 return ERR_PTR(-ENOMEM); 683 684 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0); 685 if (IS_ERR(fast_fdb)) { 686 rule = ERR_CAST(fast_fdb); 687 goto err_get_fast; 688 } 689 690 fwd_attr.chain = attr->chain; 691 fwd_attr.prio = attr->prio; 692 fwd_attr.vport = esw_attr->in_rep->vport; 693 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 694 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 695 if (IS_ERR(fwd_fdb)) { 696 rule = ERR_CAST(fwd_fdb); 697 goto err_get_fwd; 698 } 699 700 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 701 for (i = 0; i < esw_attr->split_count; i++) { 702 if (esw_is_indir_table(esw, attr)) 703 err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i); 704 else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) 705 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr, 706 &i); 707 else 708 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); 709 710 if (err) { 711 rule = ERR_PTR(err); 712 goto err_chain_src_rewrite; 713 } 714 } 715 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 716 dest[i].ft = fwd_fdb; 717 i++; 718 719 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 720 esw_attr->in_mdev->priv.eswitch, 721 esw_attr->in_rep->vport); 722 723 if (attr->outer_match_level != MLX5_MATCH_NONE) 724 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 725 726 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 727 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); 728 729 if (IS_ERR(rule)) { 730 i = esw_attr->split_count; 731 goto err_chain_src_rewrite; 732 } 733 734 atomic64_inc(&esw->offloads.num_flows); 735 736 kfree(dest); 737 return rule; 738 err_chain_src_rewrite: 739 esw_put_dest_tables_loop(esw, attr, 0, i); 740 mlx5_esw_vporttbl_put(esw, &fwd_attr); 741 err_get_fwd: 742 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 743 err_get_fast: 744 kfree(dest); 745 return rule; 746 } 747 748 static void 749 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, 750 struct mlx5_flow_handle *rule, 751 struct mlx5_flow_attr *attr, 752 bool fwd_rule) 753 { 754 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 755 struct mlx5_fs_chains *chains = esw_chains(esw); 756 bool split = (esw_attr->split_count > 0); 757 struct mlx5_vport_tbl_attr fwd_attr; 758 int i; 759 760 mlx5_del_flow_rules(rule); 761 762 if (!mlx5e_tc_attr_flags_skip(attr->flags)) { 763 /* unref the term table */ 764 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { 765 if (esw_attr->dests[i].termtbl) 766 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl); 767 } 768 } 769 770 atomic64_dec(&esw->offloads.num_flows); 771 772 if (fwd_rule || split) { 773 fwd_attr.chain = attr->chain; 774 fwd_attr.prio = attr->prio; 775 fwd_attr.vport = esw_attr->in_rep->vport; 776 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 777 } 778 779 if (fwd_rule) { 780 mlx5_esw_vporttbl_put(esw, &fwd_attr); 781 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 782 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count); 783 } else { 784 if (split) 785 mlx5_esw_vporttbl_put(esw, &fwd_attr); 786 else if (attr->chain || attr->prio) 787 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 788 esw_cleanup_dests(esw, attr); 789 } 790 } 791 792 void 793 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 794 struct mlx5_flow_handle *rule, 795 struct mlx5_flow_attr *attr) 796 { 797 __mlx5_eswitch_del_rule(esw, rule, attr, false); 798 } 799 800 void 801 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 802 struct mlx5_flow_handle *rule, 803 struct mlx5_flow_attr *attr) 804 { 805 __mlx5_eswitch_del_rule(esw, rule, attr, true); 806 } 807 808 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) 809 { 810 struct mlx5_eswitch_rep *rep; 811 unsigned long i; 812 int err = 0; 813 814 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); 815 mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) { 816 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) 817 continue; 818 819 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); 820 if (err) 821 goto out; 822 } 823 824 out: 825 return err; 826 } 827 828 static struct mlx5_eswitch_rep * 829 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) 830 { 831 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; 832 833 in_rep = attr->in_rep; 834 out_rep = attr->dests[0].rep; 835 836 if (push) 837 vport = in_rep; 838 else if (pop) 839 vport = out_rep; 840 else 841 vport = in_rep; 842 843 return vport; 844 } 845 846 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, 847 bool push, bool pop, bool fwd) 848 { 849 struct mlx5_eswitch_rep *in_rep, *out_rep; 850 851 if ((push || pop) && !fwd) 852 goto out_notsupp; 853 854 in_rep = attr->in_rep; 855 out_rep = attr->dests[0].rep; 856 857 if (push && in_rep->vport == MLX5_VPORT_UPLINK) 858 goto out_notsupp; 859 860 if (pop && out_rep->vport == MLX5_VPORT_UPLINK) 861 goto out_notsupp; 862 863 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */ 864 if (!push && !pop && fwd) 865 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK) 866 goto out_notsupp; 867 868 /* protects against (1) setting rules with different vlans to push and 869 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) 870 */ 871 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0])) 872 goto out_notsupp; 873 874 return 0; 875 876 out_notsupp: 877 return -EOPNOTSUPP; 878 } 879 880 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 881 struct mlx5_flow_attr *attr) 882 { 883 struct offloads_fdb *offloads = &esw->fdb_table.offloads; 884 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 885 struct mlx5_eswitch_rep *vport = NULL; 886 bool push, pop, fwd; 887 int err = 0; 888 889 /* nop if we're on the vlan push/pop non emulation mode */ 890 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 891 return 0; 892 893 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); 894 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 895 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && 896 !attr->dest_chain); 897 898 mutex_lock(&esw->state_lock); 899 900 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd); 901 if (err) 902 goto unlock; 903 904 attr->flags &= ~MLX5_ATTR_FLAG_VLAN_HANDLED; 905 906 vport = esw_vlan_action_get_vport(esw_attr, push, pop); 907 908 if (!push && !pop && fwd) { 909 /* tracks VF --> wire rules without vlan push action */ 910 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) { 911 vport->vlan_refcount++; 912 attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED; 913 } 914 915 goto unlock; 916 } 917 918 if (!push && !pop) 919 goto unlock; 920 921 if (!(offloads->vlan_push_pop_refcount)) { 922 /* it's the 1st vlan rule, apply global vlan pop policy */ 923 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP); 924 if (err) 925 goto out; 926 } 927 offloads->vlan_push_pop_refcount++; 928 929 if (push) { 930 if (vport->vlan_refcount) 931 goto skip_set_push; 932 933 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0], 934 0, SET_VLAN_INSERT | SET_VLAN_STRIP); 935 if (err) 936 goto out; 937 vport->vlan = esw_attr->vlan_vid[0]; 938 skip_set_push: 939 vport->vlan_refcount++; 940 } 941 out: 942 if (!err) 943 attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED; 944 unlock: 945 mutex_unlock(&esw->state_lock); 946 return err; 947 } 948 949 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, 950 struct mlx5_flow_attr *attr) 951 { 952 struct offloads_fdb *offloads = &esw->fdb_table.offloads; 953 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 954 struct mlx5_eswitch_rep *vport = NULL; 955 bool push, pop, fwd; 956 int err = 0; 957 958 /* nop if we're on the vlan push/pop non emulation mode */ 959 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 960 return 0; 961 962 if (!(attr->flags & MLX5_ATTR_FLAG_VLAN_HANDLED)) 963 return 0; 964 965 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); 966 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 967 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); 968 969 mutex_lock(&esw->state_lock); 970 971 vport = esw_vlan_action_get_vport(esw_attr, push, pop); 972 973 if (!push && !pop && fwd) { 974 /* tracks VF --> wire rules without vlan push action */ 975 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) 976 vport->vlan_refcount--; 977 978 goto out; 979 } 980 981 if (push) { 982 vport->vlan_refcount--; 983 if (vport->vlan_refcount) 984 goto skip_unset_push; 985 986 vport->vlan = 0; 987 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, 988 0, 0, SET_VLAN_STRIP); 989 if (err) 990 goto out; 991 } 992 993 skip_unset_push: 994 offloads->vlan_push_pop_refcount--; 995 if (offloads->vlan_push_pop_refcount) 996 goto out; 997 998 /* no more vlan rules, stop global vlan pop policy */ 999 err = esw_set_global_vlan_pop(esw, 0); 1000 1001 out: 1002 mutex_unlock(&esw->state_lock); 1003 return err; 1004 } 1005 1006 struct mlx5_flow_handle * 1007 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, 1008 struct mlx5_eswitch *from_esw, 1009 struct mlx5_eswitch_rep *rep, 1010 u32 sqn) 1011 { 1012 struct mlx5_flow_act flow_act = {0}; 1013 struct mlx5_flow_destination dest = {}; 1014 struct mlx5_flow_handle *flow_rule; 1015 struct mlx5_flow_spec *spec; 1016 void *misc; 1017 1018 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1019 if (!spec) { 1020 flow_rule = ERR_PTR(-ENOMEM); 1021 goto out; 1022 } 1023 1024 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 1025 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); 1026 /* source vport is the esw manager */ 1027 MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport); 1028 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 1029 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 1030 MLX5_CAP_GEN(from_esw->dev, vhca_id)); 1031 1032 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 1033 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); 1034 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 1035 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 1036 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 1037 source_eswitch_owner_vhca_id); 1038 1039 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 1040 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1041 dest.vport.num = rep->vport; 1042 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id); 1043 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 1044 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1045 1046 if (rep->vport == MLX5_VPORT_UPLINK) 1047 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; 1048 1049 flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb, 1050 spec, &flow_act, &dest, 1); 1051 if (IS_ERR(flow_rule)) 1052 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n", 1053 PTR_ERR(flow_rule)); 1054 out: 1055 kvfree(spec); 1056 return flow_rule; 1057 } 1058 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule); 1059 1060 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) 1061 { 1062 mlx5_del_flow_rules(rule); 1063 } 1064 1065 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule) 1066 { 1067 if (rule) 1068 mlx5_del_flow_rules(rule); 1069 } 1070 1071 struct mlx5_flow_handle * 1072 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num) 1073 { 1074 struct mlx5_flow_destination dest = {}; 1075 struct mlx5_flow_act flow_act = {0}; 1076 struct mlx5_flow_handle *flow_rule; 1077 struct mlx5_flow_spec *spec; 1078 1079 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1080 if (!spec) 1081 return ERR_PTR(-ENOMEM); 1082 1083 MLX5_SET(fte_match_param, spec->match_criteria, 1084 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); 1085 MLX5_SET(fte_match_param, spec->match_criteria, 1086 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 1087 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1, 1088 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK); 1089 1090 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1091 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1092 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1093 1094 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0, 1095 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); 1096 dest.vport.num = vport_num; 1097 1098 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1099 spec, &flow_act, &dest, 1); 1100 if (IS_ERR(flow_rule)) 1101 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n", 1102 vport_num, PTR_ERR(flow_rule)); 1103 1104 kvfree(spec); 1105 return flow_rule; 1106 } 1107 1108 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw) 1109 { 1110 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 1111 MLX5_FDB_TO_VPORT_REG_C_1; 1112 } 1113 1114 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) 1115 { 1116 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; 1117 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; 1118 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; 1119 u8 curr, wanted; 1120 int err; 1121 1122 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) && 1123 !mlx5_eswitch_vport_match_metadata_enabled(esw)) 1124 return 0; 1125 1126 MLX5_SET(query_esw_vport_context_in, in, opcode, 1127 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); 1128 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out); 1129 if (err) 1130 return err; 1131 1132 curr = MLX5_GET(query_esw_vport_context_out, out, 1133 esw_vport_context.fdb_to_vport_reg_c_id); 1134 wanted = MLX5_FDB_TO_VPORT_REG_C_0; 1135 if (mlx5_eswitch_reg_c1_loopback_supported(esw)) 1136 wanted |= MLX5_FDB_TO_VPORT_REG_C_1; 1137 1138 if (enable) 1139 curr |= wanted; 1140 else 1141 curr &= ~wanted; 1142 1143 MLX5_SET(modify_esw_vport_context_in, min, 1144 esw_vport_context.fdb_to_vport_reg_c_id, curr); 1145 MLX5_SET(modify_esw_vport_context_in, min, 1146 field_select.fdb_to_vport_reg_c_id, 1); 1147 1148 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min); 1149 if (!err) { 1150 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1)) 1151 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 1152 else 1153 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 1154 } 1155 1156 return err; 1157 } 1158 1159 static void peer_miss_rules_setup(struct mlx5_eswitch *esw, 1160 struct mlx5_core_dev *peer_dev, 1161 struct mlx5_flow_spec *spec, 1162 struct mlx5_flow_destination *dest) 1163 { 1164 void *misc; 1165 1166 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1167 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1168 misc_parameters_2); 1169 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1170 mlx5_eswitch_get_vport_metadata_mask()); 1171 1172 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1173 } else { 1174 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1175 misc_parameters); 1176 1177 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 1178 MLX5_CAP_GEN(peer_dev, vhca_id)); 1179 1180 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 1181 1182 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1183 misc_parameters); 1184 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 1185 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 1186 source_eswitch_owner_vhca_id); 1187 } 1188 1189 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1190 dest->vport.num = peer_dev->priv.eswitch->manager_vport; 1191 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); 1192 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 1193 } 1194 1195 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw, 1196 struct mlx5_eswitch *peer_esw, 1197 struct mlx5_flow_spec *spec, 1198 u16 vport) 1199 { 1200 void *misc; 1201 1202 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1203 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1204 misc_parameters_2); 1205 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1206 mlx5_eswitch_get_vport_metadata_for_match(peer_esw, 1207 vport)); 1208 } else { 1209 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1210 misc_parameters); 1211 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 1212 } 1213 } 1214 1215 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1216 struct mlx5_core_dev *peer_dev) 1217 { 1218 struct mlx5_flow_destination dest = {}; 1219 struct mlx5_flow_act flow_act = {0}; 1220 struct mlx5_flow_handle **flows; 1221 /* total vports is the same for both e-switches */ 1222 int nvports = esw->total_vports; 1223 struct mlx5_flow_handle *flow; 1224 struct mlx5_flow_spec *spec; 1225 struct mlx5_vport *vport; 1226 unsigned long i; 1227 void *misc; 1228 int err; 1229 1230 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1231 if (!spec) 1232 return -ENOMEM; 1233 1234 peer_miss_rules_setup(esw, peer_dev, spec, &dest); 1235 1236 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); 1237 if (!flows) { 1238 err = -ENOMEM; 1239 goto alloc_flows_err; 1240 } 1241 1242 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1243 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1244 misc_parameters); 1245 1246 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1247 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1248 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, 1249 spec, MLX5_VPORT_PF); 1250 1251 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1252 spec, &flow_act, &dest, 1); 1253 if (IS_ERR(flow)) { 1254 err = PTR_ERR(flow); 1255 goto add_pf_flow_err; 1256 } 1257 flows[vport->index] = flow; 1258 } 1259 1260 if (mlx5_ecpf_vport_exists(esw->dev)) { 1261 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1262 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); 1263 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1264 spec, &flow_act, &dest, 1); 1265 if (IS_ERR(flow)) { 1266 err = PTR_ERR(flow); 1267 goto add_ecpf_flow_err; 1268 } 1269 flows[vport->index] = flow; 1270 } 1271 1272 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1273 esw_set_peer_miss_rule_source_port(esw, 1274 peer_dev->priv.eswitch, 1275 spec, vport->vport); 1276 1277 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1278 spec, &flow_act, &dest, 1); 1279 if (IS_ERR(flow)) { 1280 err = PTR_ERR(flow); 1281 goto add_vf_flow_err; 1282 } 1283 flows[vport->index] = flow; 1284 } 1285 1286 esw->fdb_table.offloads.peer_miss_rules = flows; 1287 1288 kvfree(spec); 1289 return 0; 1290 1291 add_vf_flow_err: 1292 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1293 if (!flows[vport->index]) 1294 continue; 1295 mlx5_del_flow_rules(flows[vport->index]); 1296 } 1297 if (mlx5_ecpf_vport_exists(esw->dev)) { 1298 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1299 mlx5_del_flow_rules(flows[vport->index]); 1300 } 1301 add_ecpf_flow_err: 1302 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1303 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1304 mlx5_del_flow_rules(flows[vport->index]); 1305 } 1306 add_pf_flow_err: 1307 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); 1308 kvfree(flows); 1309 alloc_flows_err: 1310 kvfree(spec); 1311 return err; 1312 } 1313 1314 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) 1315 { 1316 struct mlx5_flow_handle **flows; 1317 struct mlx5_vport *vport; 1318 unsigned long i; 1319 1320 flows = esw->fdb_table.offloads.peer_miss_rules; 1321 1322 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) 1323 mlx5_del_flow_rules(flows[vport->index]); 1324 1325 if (mlx5_ecpf_vport_exists(esw->dev)) { 1326 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1327 mlx5_del_flow_rules(flows[vport->index]); 1328 } 1329 1330 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1331 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1332 mlx5_del_flow_rules(flows[vport->index]); 1333 } 1334 kvfree(flows); 1335 } 1336 1337 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) 1338 { 1339 struct mlx5_flow_act flow_act = {0}; 1340 struct mlx5_flow_destination dest = {}; 1341 struct mlx5_flow_handle *flow_rule = NULL; 1342 struct mlx5_flow_spec *spec; 1343 void *headers_c; 1344 void *headers_v; 1345 int err = 0; 1346 u8 *dmac_c; 1347 u8 *dmac_v; 1348 1349 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1350 if (!spec) { 1351 err = -ENOMEM; 1352 goto out; 1353 } 1354 1355 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1356 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1357 outer_headers); 1358 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, 1359 outer_headers.dmac_47_16); 1360 dmac_c[0] = 0x01; 1361 1362 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1363 dest.vport.num = esw->manager_vport; 1364 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1365 1366 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1367 spec, &flow_act, &dest, 1); 1368 if (IS_ERR(flow_rule)) { 1369 err = PTR_ERR(flow_rule); 1370 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); 1371 goto out; 1372 } 1373 1374 esw->fdb_table.offloads.miss_rule_uni = flow_rule; 1375 1376 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1377 outer_headers); 1378 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, 1379 outer_headers.dmac_47_16); 1380 dmac_v[0] = 0x01; 1381 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1382 spec, &flow_act, &dest, 1); 1383 if (IS_ERR(flow_rule)) { 1384 err = PTR_ERR(flow_rule); 1385 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); 1386 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1387 goto out; 1388 } 1389 1390 esw->fdb_table.offloads.miss_rule_multi = flow_rule; 1391 1392 out: 1393 kvfree(spec); 1394 return err; 1395 } 1396 1397 struct mlx5_flow_handle * 1398 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 1399 { 1400 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 1401 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore; 1402 struct mlx5_flow_context *flow_context; 1403 struct mlx5_flow_handle *flow_rule; 1404 struct mlx5_flow_destination dest; 1405 struct mlx5_flow_spec *spec; 1406 void *misc; 1407 1408 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 1409 return ERR_PTR(-EOPNOTSUPP); 1410 1411 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1412 if (!spec) 1413 return ERR_PTR(-ENOMEM); 1414 1415 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1416 misc_parameters_2); 1417 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1418 ESW_REG_C0_USER_DATA_METADATA_MASK); 1419 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1420 misc_parameters_2); 1421 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag); 1422 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1423 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1424 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1425 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id; 1426 1427 flow_context = &spec->flow_context; 1428 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 1429 flow_context->flow_tag = tag; 1430 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1431 dest.ft = esw->offloads.ft_offloads; 1432 1433 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 1434 kvfree(spec); 1435 1436 if (IS_ERR(flow_rule)) 1437 esw_warn(esw->dev, 1438 "Failed to create restore rule for tag: %d, err(%d)\n", 1439 tag, (int)PTR_ERR(flow_rule)); 1440 1441 return flow_rule; 1442 } 1443 1444 #define MAX_PF_SQ 256 1445 #define MAX_SQ_NVPORTS 32 1446 1447 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, 1448 u32 *flow_group_in) 1449 { 1450 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1451 flow_group_in, 1452 match_criteria); 1453 1454 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1455 MLX5_SET(create_flow_group_in, flow_group_in, 1456 match_criteria_enable, 1457 MLX5_MATCH_MISC_PARAMETERS_2); 1458 1459 MLX5_SET(fte_match_param, match_criteria, 1460 misc_parameters_2.metadata_reg_c_0, 1461 mlx5_eswitch_get_vport_metadata_mask()); 1462 } else { 1463 MLX5_SET(create_flow_group_in, flow_group_in, 1464 match_criteria_enable, 1465 MLX5_MATCH_MISC_PARAMETERS); 1466 1467 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1468 misc_parameters.source_port); 1469 } 1470 } 1471 1472 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 1473 static void esw_vport_tbl_put(struct mlx5_eswitch *esw) 1474 { 1475 struct mlx5_vport_tbl_attr attr; 1476 struct mlx5_vport *vport; 1477 unsigned long i; 1478 1479 attr.chain = 0; 1480 attr.prio = 1; 1481 mlx5_esw_for_each_vport(esw, i, vport) { 1482 attr.vport = vport->vport; 1483 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1484 mlx5_esw_vporttbl_put(esw, &attr); 1485 } 1486 } 1487 1488 static int esw_vport_tbl_get(struct mlx5_eswitch *esw) 1489 { 1490 struct mlx5_vport_tbl_attr attr; 1491 struct mlx5_flow_table *fdb; 1492 struct mlx5_vport *vport; 1493 unsigned long i; 1494 1495 attr.chain = 0; 1496 attr.prio = 1; 1497 mlx5_esw_for_each_vport(esw, i, vport) { 1498 attr.vport = vport->vport; 1499 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1500 fdb = mlx5_esw_vporttbl_get(esw, &attr); 1501 if (IS_ERR(fdb)) 1502 goto out; 1503 } 1504 return 0; 1505 1506 out: 1507 esw_vport_tbl_put(esw); 1508 return PTR_ERR(fdb); 1509 } 1510 1511 #define fdb_modify_header_fwd_to_table_supported(esw) \ 1512 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table)) 1513 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags) 1514 { 1515 struct mlx5_core_dev *dev = esw->dev; 1516 1517 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level)) 1518 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 1519 1520 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) && 1521 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { 1522 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1523 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); 1524 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) { 1525 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1526 esw_warn(dev, "Tc chains and priorities offload aren't supported\n"); 1527 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) { 1528 /* Disabled when ttl workaround is needed, e.g 1529 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig 1530 */ 1531 esw_warn(dev, 1532 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n"); 1533 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1534 } else { 1535 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1536 esw_info(dev, "Supported tc chains and prios offload\n"); 1537 } 1538 1539 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1540 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED; 1541 } 1542 1543 static int 1544 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1545 { 1546 struct mlx5_core_dev *dev = esw->dev; 1547 struct mlx5_flow_table *nf_ft, *ft; 1548 struct mlx5_chains_attr attr = {}; 1549 struct mlx5_fs_chains *chains; 1550 u32 fdb_max; 1551 int err; 1552 1553 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); 1554 1555 esw_init_chains_offload_flags(esw, &attr.flags); 1556 attr.ns = MLX5_FLOW_NAMESPACE_FDB; 1557 attr.max_ft_sz = fdb_max; 1558 attr.max_grp_num = esw->params.large_group_num; 1559 attr.default_ft = miss_fdb; 1560 attr.mapping = esw->offloads.reg_c0_obj_pool; 1561 1562 chains = mlx5_chains_create(dev, &attr); 1563 if (IS_ERR(chains)) { 1564 err = PTR_ERR(chains); 1565 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err); 1566 return err; 1567 } 1568 1569 esw->fdb_table.offloads.esw_chains_priv = chains; 1570 1571 /* Create tc_end_ft which is the always created ft chain */ 1572 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1573 1, 0); 1574 if (IS_ERR(nf_ft)) { 1575 err = PTR_ERR(nf_ft); 1576 goto nf_ft_err; 1577 } 1578 1579 /* Always open the root for fast path */ 1580 ft = mlx5_chains_get_table(chains, 0, 1, 0); 1581 if (IS_ERR(ft)) { 1582 err = PTR_ERR(ft); 1583 goto level_0_err; 1584 } 1585 1586 /* Open level 1 for split fdb rules now if prios isn't supported */ 1587 if (!mlx5_chains_prios_supported(chains)) { 1588 err = esw_vport_tbl_get(esw); 1589 if (err) 1590 goto level_1_err; 1591 } 1592 1593 mlx5_chains_set_end_ft(chains, nf_ft); 1594 1595 return 0; 1596 1597 level_1_err: 1598 mlx5_chains_put_table(chains, 0, 1, 0); 1599 level_0_err: 1600 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1601 nf_ft_err: 1602 mlx5_chains_destroy(chains); 1603 esw->fdb_table.offloads.esw_chains_priv = NULL; 1604 1605 return err; 1606 } 1607 1608 static void 1609 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1610 { 1611 if (!mlx5_chains_prios_supported(chains)) 1612 esw_vport_tbl_put(esw); 1613 mlx5_chains_put_table(chains, 0, 1, 0); 1614 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1615 mlx5_chains_destroy(chains); 1616 } 1617 1618 #else /* CONFIG_MLX5_CLS_ACT */ 1619 1620 static int 1621 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1622 { return 0; } 1623 1624 static void 1625 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1626 {} 1627 1628 #endif 1629 1630 static int 1631 esw_create_send_to_vport_group(struct mlx5_eswitch *esw, 1632 struct mlx5_flow_table *fdb, 1633 u32 *flow_group_in, 1634 int *ix) 1635 { 1636 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1637 struct mlx5_flow_group *g; 1638 void *match_criteria; 1639 int count, err = 0; 1640 1641 memset(flow_group_in, 0, inlen); 1642 1643 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1644 MLX5_MATCH_MISC_PARAMETERS); 1645 1646 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1647 1648 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); 1649 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); 1650 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 1651 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1652 misc_parameters.source_eswitch_owner_vhca_id); 1653 MLX5_SET(create_flow_group_in, flow_group_in, 1654 source_eswitch_owner_vhca_id_valid, 1); 1655 } 1656 1657 /* See comment at table_size calculation */ 1658 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ); 1659 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1660 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1); 1661 *ix += count; 1662 1663 g = mlx5_create_flow_group(fdb, flow_group_in); 1664 if (IS_ERR(g)) { 1665 err = PTR_ERR(g); 1666 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err); 1667 goto out; 1668 } 1669 esw->fdb_table.offloads.send_to_vport_grp = g; 1670 1671 out: 1672 return err; 1673 } 1674 1675 static int 1676 esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw, 1677 struct mlx5_flow_table *fdb, 1678 u32 *flow_group_in, 1679 int *ix) 1680 { 1681 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1682 struct mlx5_flow_group *g; 1683 void *match_criteria; 1684 int err = 0; 1685 1686 if (!esw_src_port_rewrite_supported(esw)) 1687 return 0; 1688 1689 memset(flow_group_in, 0, inlen); 1690 1691 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1692 MLX5_MATCH_MISC_PARAMETERS_2); 1693 1694 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1695 1696 MLX5_SET(fte_match_param, match_criteria, 1697 misc_parameters_2.metadata_reg_c_0, 1698 mlx5_eswitch_get_vport_metadata_mask()); 1699 MLX5_SET(fte_match_param, match_criteria, 1700 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 1701 1702 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1703 MLX5_SET(create_flow_group_in, flow_group_in, 1704 end_flow_index, *ix + esw->total_vports - 1); 1705 *ix += esw->total_vports; 1706 1707 g = mlx5_create_flow_group(fdb, flow_group_in); 1708 if (IS_ERR(g)) { 1709 err = PTR_ERR(g); 1710 esw_warn(esw->dev, 1711 "Failed to create send-to-vport meta flow group err(%d)\n", err); 1712 goto send_vport_meta_err; 1713 } 1714 esw->fdb_table.offloads.send_to_vport_meta_grp = g; 1715 1716 return 0; 1717 1718 send_vport_meta_err: 1719 return err; 1720 } 1721 1722 static int 1723 esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw, 1724 struct mlx5_flow_table *fdb, 1725 u32 *flow_group_in, 1726 int *ix) 1727 { 1728 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1729 struct mlx5_flow_group *g; 1730 void *match_criteria; 1731 int err = 0; 1732 1733 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1734 return 0; 1735 1736 memset(flow_group_in, 0, inlen); 1737 1738 esw_set_flow_group_source_port(esw, flow_group_in); 1739 1740 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1741 match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1742 flow_group_in, 1743 match_criteria); 1744 1745 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1746 misc_parameters.source_eswitch_owner_vhca_id); 1747 1748 MLX5_SET(create_flow_group_in, flow_group_in, 1749 source_eswitch_owner_vhca_id_valid, 1); 1750 } 1751 1752 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1753 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1754 *ix + esw->total_vports - 1); 1755 *ix += esw->total_vports; 1756 1757 g = mlx5_create_flow_group(fdb, flow_group_in); 1758 if (IS_ERR(g)) { 1759 err = PTR_ERR(g); 1760 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err); 1761 goto out; 1762 } 1763 esw->fdb_table.offloads.peer_miss_grp = g; 1764 1765 out: 1766 return err; 1767 } 1768 1769 static int 1770 esw_create_miss_group(struct mlx5_eswitch *esw, 1771 struct mlx5_flow_table *fdb, 1772 u32 *flow_group_in, 1773 int *ix) 1774 { 1775 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1776 struct mlx5_flow_group *g; 1777 void *match_criteria; 1778 int err = 0; 1779 u8 *dmac; 1780 1781 memset(flow_group_in, 0, inlen); 1782 1783 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1784 MLX5_MATCH_OUTER_HEADERS); 1785 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 1786 match_criteria); 1787 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, 1788 outer_headers.dmac_47_16); 1789 dmac[0] = 0x01; 1790 1791 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1792 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1793 *ix + MLX5_ESW_MISS_FLOWS); 1794 1795 g = mlx5_create_flow_group(fdb, flow_group_in); 1796 if (IS_ERR(g)) { 1797 err = PTR_ERR(g); 1798 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err); 1799 goto miss_err; 1800 } 1801 esw->fdb_table.offloads.miss_grp = g; 1802 1803 err = esw_add_fdb_miss_rule(esw); 1804 if (err) 1805 goto miss_rule_err; 1806 1807 return 0; 1808 1809 miss_rule_err: 1810 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1811 miss_err: 1812 return err; 1813 } 1814 1815 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) 1816 { 1817 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1818 struct mlx5_flow_table_attr ft_attr = {}; 1819 struct mlx5_core_dev *dev = esw->dev; 1820 struct mlx5_flow_namespace *root_ns; 1821 struct mlx5_flow_table *fdb = NULL; 1822 int table_size, ix = 0, err = 0; 1823 u32 flags = 0, *flow_group_in; 1824 1825 esw_debug(esw->dev, "Create offloads FDB Tables\n"); 1826 1827 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1828 if (!flow_group_in) 1829 return -ENOMEM; 1830 1831 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 1832 if (!root_ns) { 1833 esw_warn(dev, "Failed to get FDB flow namespace\n"); 1834 err = -EOPNOTSUPP; 1835 goto ns_err; 1836 } 1837 esw->fdb_table.offloads.ns = root_ns; 1838 err = mlx5_flow_namespace_set_mode(root_ns, 1839 esw->dev->priv.steering->mode); 1840 if (err) { 1841 esw_warn(dev, "Failed to set FDB namespace steering mode\n"); 1842 goto ns_err; 1843 } 1844 1845 /* To be strictly correct: 1846 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) 1847 * should be: 1848 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + 1849 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ 1850 * but as the peer device might not be in switchdev mode it's not 1851 * possible. We use the fact that by default FW sets max vfs and max sfs 1852 * to the same value on both devices. If it needs to be changed in the future note 1853 * the peer miss group should also be created based on the number of 1854 * total vports of the peer (currently is also uses esw->total_vports). 1855 */ 1856 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) + 1857 esw->total_vports * 2 + MLX5_ESW_MISS_FLOWS; 1858 1859 /* create the slow path fdb with encap set, so further table instances 1860 * can be created at run time while VFs are probed if the FW allows that. 1861 */ 1862 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1863 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 1864 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 1865 1866 ft_attr.flags = flags; 1867 ft_attr.max_fte = table_size; 1868 ft_attr.prio = FDB_SLOW_PATH; 1869 1870 fdb = mlx5_create_flow_table(root_ns, &ft_attr); 1871 if (IS_ERR(fdb)) { 1872 err = PTR_ERR(fdb); 1873 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); 1874 goto slow_fdb_err; 1875 } 1876 esw->fdb_table.offloads.slow_fdb = fdb; 1877 1878 /* Create empty TC-miss managed table. This allows plugging in following 1879 * priorities without directly exposing their level 0 table to 1880 * eswitch_offloads and passing it as miss_fdb to following call to 1881 * esw_chains_create(). 1882 */ 1883 memset(&ft_attr, 0, sizeof(ft_attr)); 1884 ft_attr.prio = FDB_TC_MISS; 1885 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr); 1886 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) { 1887 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table); 1888 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err); 1889 goto tc_miss_table_err; 1890 } 1891 1892 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table); 1893 if (err) { 1894 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err); 1895 goto fdb_chains_err; 1896 } 1897 1898 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix); 1899 if (err) 1900 goto send_vport_err; 1901 1902 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix); 1903 if (err) 1904 goto send_vport_meta_err; 1905 1906 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix); 1907 if (err) 1908 goto peer_miss_err; 1909 1910 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix); 1911 if (err) 1912 goto miss_err; 1913 1914 kvfree(flow_group_in); 1915 return 0; 1916 1917 miss_err: 1918 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1919 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1920 peer_miss_err: 1921 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1922 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1923 send_vport_meta_err: 1924 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1925 send_vport_err: 1926 esw_chains_destroy(esw, esw_chains(esw)); 1927 fdb_chains_err: 1928 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1929 tc_miss_table_err: 1930 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); 1931 slow_fdb_err: 1932 /* Holds true only as long as DMFS is the default */ 1933 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS); 1934 ns_err: 1935 kvfree(flow_group_in); 1936 return err; 1937 } 1938 1939 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) 1940 { 1941 if (!esw->fdb_table.offloads.slow_fdb) 1942 return; 1943 1944 esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); 1945 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); 1946 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1947 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1948 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1949 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1950 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1951 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1952 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1953 1954 esw_chains_destroy(esw, esw_chains(esw)); 1955 1956 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1957 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); 1958 /* Holds true only as long as DMFS is the default */ 1959 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, 1960 MLX5_FLOW_STEERING_MODE_DMFS); 1961 atomic64_set(&esw->user_count, 0); 1962 } 1963 1964 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw) 1965 { 1966 int nvports; 1967 1968 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS; 1969 if (mlx5e_tc_int_port_supported(esw)) 1970 nvports += MLX5E_TC_MAX_INT_PORT_NUM; 1971 1972 return nvports; 1973 } 1974 1975 static int esw_create_offloads_table(struct mlx5_eswitch *esw) 1976 { 1977 struct mlx5_flow_table_attr ft_attr = {}; 1978 struct mlx5_core_dev *dev = esw->dev; 1979 struct mlx5_flow_table *ft_offloads; 1980 struct mlx5_flow_namespace *ns; 1981 int err = 0; 1982 1983 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 1984 if (!ns) { 1985 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 1986 return -EOPNOTSUPP; 1987 } 1988 1989 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) + 1990 MLX5_ESW_FT_OFFLOADS_DROP_RULE; 1991 ft_attr.prio = 1; 1992 1993 ft_offloads = mlx5_create_flow_table(ns, &ft_attr); 1994 if (IS_ERR(ft_offloads)) { 1995 err = PTR_ERR(ft_offloads); 1996 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); 1997 return err; 1998 } 1999 2000 esw->offloads.ft_offloads = ft_offloads; 2001 return 0; 2002 } 2003 2004 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) 2005 { 2006 struct mlx5_esw_offload *offloads = &esw->offloads; 2007 2008 mlx5_destroy_flow_table(offloads->ft_offloads); 2009 } 2010 2011 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) 2012 { 2013 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2014 struct mlx5_flow_group *g; 2015 u32 *flow_group_in; 2016 int nvports; 2017 int err = 0; 2018 2019 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw); 2020 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2021 if (!flow_group_in) 2022 return -ENOMEM; 2023 2024 /* create vport rx group */ 2025 esw_set_flow_group_source_port(esw, flow_group_in); 2026 2027 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2028 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); 2029 2030 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); 2031 2032 if (IS_ERR(g)) { 2033 err = PTR_ERR(g); 2034 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err); 2035 goto out; 2036 } 2037 2038 esw->offloads.vport_rx_group = g; 2039 out: 2040 kvfree(flow_group_in); 2041 return err; 2042 } 2043 2044 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) 2045 { 2046 mlx5_destroy_flow_group(esw->offloads.vport_rx_group); 2047 } 2048 2049 static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw) 2050 { 2051 /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) 2052 * for the drop rule, which is placed at the end of the table. 2053 * So return the total of vport and int_port as rule index. 2054 */ 2055 return esw_get_nr_ft_offloads_steering_src_ports(esw); 2056 } 2057 2058 static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw) 2059 { 2060 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2061 struct mlx5_flow_group *g; 2062 u32 *flow_group_in; 2063 int flow_index; 2064 int err = 0; 2065 2066 flow_index = esw_create_vport_rx_drop_rule_index(esw); 2067 2068 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2069 if (!flow_group_in) 2070 return -ENOMEM; 2071 2072 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); 2073 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); 2074 2075 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); 2076 2077 if (IS_ERR(g)) { 2078 err = PTR_ERR(g); 2079 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err); 2080 goto out; 2081 } 2082 2083 esw->offloads.vport_rx_drop_group = g; 2084 out: 2085 kvfree(flow_group_in); 2086 return err; 2087 } 2088 2089 static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw) 2090 { 2091 if (esw->offloads.vport_rx_drop_group) 2092 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group); 2093 } 2094 2095 struct mlx5_flow_handle * 2096 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 2097 struct mlx5_flow_destination *dest) 2098 { 2099 struct mlx5_flow_act flow_act = {0}; 2100 struct mlx5_flow_handle *flow_rule; 2101 struct mlx5_flow_spec *spec; 2102 void *misc; 2103 2104 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 2105 if (!spec) { 2106 flow_rule = ERR_PTR(-ENOMEM); 2107 goto out; 2108 } 2109 2110 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 2111 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 2112 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2113 mlx5_eswitch_get_vport_metadata_for_match(esw, vport)); 2114 2115 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 2116 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2117 mlx5_eswitch_get_vport_metadata_mask()); 2118 2119 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 2120 } else { 2121 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 2122 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 2123 2124 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 2125 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 2126 2127 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 2128 } 2129 2130 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2131 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, 2132 &flow_act, dest, 1); 2133 if (IS_ERR(flow_rule)) { 2134 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); 2135 goto out; 2136 } 2137 2138 out: 2139 kvfree(spec); 2140 return flow_rule; 2141 } 2142 2143 static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw) 2144 { 2145 struct mlx5_flow_act flow_act = {}; 2146 struct mlx5_flow_handle *flow_rule; 2147 2148 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; 2149 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL, 2150 &flow_act, NULL, 0); 2151 if (IS_ERR(flow_rule)) { 2152 esw_warn(esw->dev, 2153 "fs offloads: Failed to add vport rx drop rule err %ld\n", 2154 PTR_ERR(flow_rule)); 2155 return PTR_ERR(flow_rule); 2156 } 2157 2158 esw->offloads.vport_rx_drop_rule = flow_rule; 2159 2160 return 0; 2161 } 2162 2163 static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw) 2164 { 2165 if (esw->offloads.vport_rx_drop_rule) 2166 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule); 2167 } 2168 2169 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) 2170 { 2171 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 2172 struct mlx5_core_dev *dev = esw->dev; 2173 struct mlx5_vport *vport; 2174 unsigned long i; 2175 2176 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 2177 return -EOPNOTSUPP; 2178 2179 if (!mlx5_esw_is_fdb_created(esw)) 2180 return -EOPNOTSUPP; 2181 2182 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 2183 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 2184 mlx5_mode = MLX5_INLINE_MODE_NONE; 2185 goto out; 2186 case MLX5_CAP_INLINE_MODE_L2: 2187 mlx5_mode = MLX5_INLINE_MODE_L2; 2188 goto out; 2189 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 2190 goto query_vports; 2191 } 2192 2193 query_vports: 2194 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode); 2195 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 2196 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode); 2197 if (prev_mlx5_mode != mlx5_mode) 2198 return -EINVAL; 2199 prev_mlx5_mode = mlx5_mode; 2200 } 2201 2202 out: 2203 *mode = mlx5_mode; 2204 return 0; 2205 } 2206 2207 static void esw_destroy_restore_table(struct mlx5_eswitch *esw) 2208 { 2209 struct mlx5_esw_offload *offloads = &esw->offloads; 2210 2211 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 2212 return; 2213 2214 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); 2215 mlx5_destroy_flow_group(offloads->restore_group); 2216 mlx5_destroy_flow_table(offloads->ft_offloads_restore); 2217 } 2218 2219 static int esw_create_restore_table(struct mlx5_eswitch *esw) 2220 { 2221 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 2222 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2223 struct mlx5_flow_table_attr ft_attr = {}; 2224 struct mlx5_core_dev *dev = esw->dev; 2225 struct mlx5_flow_namespace *ns; 2226 struct mlx5_modify_hdr *mod_hdr; 2227 void *match_criteria, *misc; 2228 struct mlx5_flow_table *ft; 2229 struct mlx5_flow_group *g; 2230 u32 *flow_group_in; 2231 int err = 0; 2232 2233 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 2234 return 0; 2235 2236 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 2237 if (!ns) { 2238 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 2239 return -EOPNOTSUPP; 2240 } 2241 2242 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2243 if (!flow_group_in) { 2244 err = -ENOMEM; 2245 goto out_free; 2246 } 2247 2248 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS; 2249 ft = mlx5_create_flow_table(ns, &ft_attr); 2250 if (IS_ERR(ft)) { 2251 err = PTR_ERR(ft); 2252 esw_warn(esw->dev, "Failed to create restore table, err %d\n", 2253 err); 2254 goto out_free; 2255 } 2256 2257 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 2258 match_criteria); 2259 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, 2260 misc_parameters_2); 2261 2262 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2263 ESW_REG_C0_USER_DATA_METADATA_MASK); 2264 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2265 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2266 ft_attr.max_fte - 1); 2267 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 2268 MLX5_MATCH_MISC_PARAMETERS_2); 2269 g = mlx5_create_flow_group(ft, flow_group_in); 2270 if (IS_ERR(g)) { 2271 err = PTR_ERR(g); 2272 esw_warn(dev, "Failed to create restore flow group, err: %d\n", 2273 err); 2274 goto err_group; 2275 } 2276 2277 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY); 2278 MLX5_SET(copy_action_in, modact, src_field, 2279 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); 2280 MLX5_SET(copy_action_in, modact, dst_field, 2281 MLX5_ACTION_IN_FIELD_METADATA_REG_B); 2282 mod_hdr = mlx5_modify_header_alloc(esw->dev, 2283 MLX5_FLOW_NAMESPACE_KERNEL, 1, 2284 modact); 2285 if (IS_ERR(mod_hdr)) { 2286 err = PTR_ERR(mod_hdr); 2287 esw_warn(dev, "Failed to create restore mod header, err: %d\n", 2288 err); 2289 goto err_mod_hdr; 2290 } 2291 2292 esw->offloads.ft_offloads_restore = ft; 2293 esw->offloads.restore_group = g; 2294 esw->offloads.restore_copy_hdr_id = mod_hdr; 2295 2296 kvfree(flow_group_in); 2297 2298 return 0; 2299 2300 err_mod_hdr: 2301 mlx5_destroy_flow_group(g); 2302 err_group: 2303 mlx5_destroy_flow_table(ft); 2304 out_free: 2305 kvfree(flow_group_in); 2306 2307 return err; 2308 } 2309 2310 static int esw_offloads_start(struct mlx5_eswitch *esw, 2311 struct netlink_ext_ack *extack) 2312 { 2313 int err; 2314 2315 esw->mode = MLX5_ESWITCH_OFFLOADS; 2316 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs); 2317 if (err) { 2318 NL_SET_ERR_MSG_MOD(extack, 2319 "Failed setting eswitch to offloads"); 2320 esw->mode = MLX5_ESWITCH_LEGACY; 2321 mlx5_rescan_drivers(esw->dev); 2322 } 2323 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 2324 if (mlx5_eswitch_inline_mode_get(esw, 2325 &esw->offloads.inline_mode)) { 2326 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; 2327 NL_SET_ERR_MSG_MOD(extack, 2328 "Inline mode is different between vports"); 2329 } 2330 } 2331 return err; 2332 } 2333 2334 static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw, 2335 struct mlx5_eswitch_rep *rep, 2336 xa_mark_t mark) 2337 { 2338 bool mark_set; 2339 2340 /* Copy the mark from vport to its rep */ 2341 mark_set = xa_get_mark(&esw->vports, rep->vport, mark); 2342 if (mark_set) 2343 xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark); 2344 } 2345 2346 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport) 2347 { 2348 struct mlx5_eswitch_rep *rep; 2349 int rep_type; 2350 int err; 2351 2352 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 2353 if (!rep) 2354 return -ENOMEM; 2355 2356 rep->vport = vport->vport; 2357 rep->vport_index = vport->index; 2358 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) 2359 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); 2360 2361 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL); 2362 if (err) 2363 goto insert_err; 2364 2365 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN); 2366 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF); 2367 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF); 2368 return 0; 2369 2370 insert_err: 2371 kfree(rep); 2372 return err; 2373 } 2374 2375 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw, 2376 struct mlx5_eswitch_rep *rep) 2377 { 2378 xa_erase(&esw->offloads.vport_reps, rep->vport); 2379 kfree(rep); 2380 } 2381 2382 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) 2383 { 2384 struct mlx5_eswitch_rep *rep; 2385 unsigned long i; 2386 2387 mlx5_esw_for_each_rep(esw, i, rep) 2388 mlx5_esw_offloads_rep_cleanup(esw, rep); 2389 xa_destroy(&esw->offloads.vport_reps); 2390 } 2391 2392 int esw_offloads_init_reps(struct mlx5_eswitch *esw) 2393 { 2394 struct mlx5_vport *vport; 2395 unsigned long i; 2396 int err; 2397 2398 xa_init(&esw->offloads.vport_reps); 2399 2400 mlx5_esw_for_each_vport(esw, i, vport) { 2401 err = mlx5_esw_offloads_rep_init(esw, vport); 2402 if (err) 2403 goto err; 2404 } 2405 return 0; 2406 2407 err: 2408 esw_offloads_cleanup_reps(esw); 2409 return err; 2410 } 2411 2412 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, 2413 struct mlx5_eswitch_rep *rep, u8 rep_type) 2414 { 2415 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2416 REP_LOADED, REP_REGISTERED) == REP_LOADED) 2417 esw->offloads.rep_ops[rep_type]->unload(rep); 2418 } 2419 2420 static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type) 2421 { 2422 struct mlx5_eswitch_rep *rep; 2423 unsigned long i; 2424 2425 mlx5_esw_for_each_sf_rep(esw, i, rep) 2426 __esw_offloads_unload_rep(esw, rep, rep_type); 2427 } 2428 2429 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) 2430 { 2431 struct mlx5_eswitch_rep *rep; 2432 unsigned long i; 2433 2434 __unload_reps_sf_vport(esw, rep_type); 2435 2436 mlx5_esw_for_each_vf_rep(esw, i, rep) 2437 __esw_offloads_unload_rep(esw, rep, rep_type); 2438 2439 if (mlx5_ecpf_vport_exists(esw->dev)) { 2440 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF); 2441 __esw_offloads_unload_rep(esw, rep, rep_type); 2442 } 2443 2444 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 2445 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); 2446 __esw_offloads_unload_rep(esw, rep, rep_type); 2447 } 2448 2449 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 2450 __esw_offloads_unload_rep(esw, rep, rep_type); 2451 } 2452 2453 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) 2454 { 2455 struct mlx5_eswitch_rep *rep; 2456 int rep_type; 2457 int err; 2458 2459 rep = mlx5_eswitch_get_rep(esw, vport_num); 2460 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) 2461 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2462 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) { 2463 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep); 2464 if (err) 2465 goto err_reps; 2466 } 2467 2468 return 0; 2469 2470 err_reps: 2471 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED); 2472 for (--rep_type; rep_type >= 0; rep_type--) 2473 __esw_offloads_unload_rep(esw, rep, rep_type); 2474 return err; 2475 } 2476 2477 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) 2478 { 2479 struct mlx5_eswitch_rep *rep; 2480 int rep_type; 2481 2482 rep = mlx5_eswitch_get_rep(esw, vport_num); 2483 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--) 2484 __esw_offloads_unload_rep(esw, rep, rep_type); 2485 } 2486 2487 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num) 2488 { 2489 int err; 2490 2491 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2492 return 0; 2493 2494 if (vport_num != MLX5_VPORT_UPLINK) { 2495 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num); 2496 if (err) 2497 return err; 2498 } 2499 2500 err = mlx5_esw_offloads_rep_load(esw, vport_num); 2501 if (err) 2502 goto load_err; 2503 return err; 2504 2505 load_err: 2506 if (vport_num != MLX5_VPORT_UPLINK) 2507 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); 2508 return err; 2509 } 2510 2511 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) 2512 { 2513 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2514 return; 2515 2516 mlx5_esw_offloads_rep_unload(esw, vport_num); 2517 2518 if (vport_num != MLX5_VPORT_UPLINK) 2519 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); 2520 } 2521 2522 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master, 2523 struct mlx5_core_dev *slave) 2524 { 2525 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 2526 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; 2527 struct mlx5_flow_root_namespace *root; 2528 struct mlx5_flow_namespace *ns; 2529 int err; 2530 2531 MLX5_SET(set_flow_table_root_in, in, opcode, 2532 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 2533 MLX5_SET(set_flow_table_root_in, in, table_type, 2534 FS_FT_FDB); 2535 2536 if (master) { 2537 ns = mlx5_get_flow_namespace(master, 2538 MLX5_FLOW_NAMESPACE_FDB); 2539 root = find_root(&ns->node); 2540 mutex_lock(&root->chain_lock); 2541 MLX5_SET(set_flow_table_root_in, in, 2542 table_eswitch_owner_vhca_id_valid, 1); 2543 MLX5_SET(set_flow_table_root_in, in, 2544 table_eswitch_owner_vhca_id, 2545 MLX5_CAP_GEN(master, vhca_id)); 2546 MLX5_SET(set_flow_table_root_in, in, table_id, 2547 root->root_ft->id); 2548 } else { 2549 ns = mlx5_get_flow_namespace(slave, 2550 MLX5_FLOW_NAMESPACE_FDB); 2551 root = find_root(&ns->node); 2552 mutex_lock(&root->chain_lock); 2553 MLX5_SET(set_flow_table_root_in, in, table_id, 2554 root->root_ft->id); 2555 } 2556 2557 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); 2558 mutex_unlock(&root->chain_lock); 2559 2560 return err; 2561 } 2562 2563 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master, 2564 struct mlx5_core_dev *slave, 2565 struct mlx5_vport *vport, 2566 struct mlx5_flow_table *acl) 2567 { 2568 struct mlx5_flow_handle *flow_rule = NULL; 2569 struct mlx5_flow_destination dest = {}; 2570 struct mlx5_flow_act flow_act = {}; 2571 struct mlx5_flow_spec *spec; 2572 int err = 0; 2573 void *misc; 2574 2575 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 2576 if (!spec) 2577 return -ENOMEM; 2578 2579 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 2580 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2581 misc_parameters); 2582 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); 2583 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 2584 MLX5_CAP_GEN(slave, vhca_id)); 2585 2586 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 2587 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 2588 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 2589 source_eswitch_owner_vhca_id); 2590 2591 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2592 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2593 dest.vport.num = slave->priv.eswitch->manager_vport; 2594 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id); 2595 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 2596 2597 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act, 2598 &dest, 1); 2599 if (IS_ERR(flow_rule)) 2600 err = PTR_ERR(flow_rule); 2601 else 2602 vport->egress.offloads.bounce_rule = flow_rule; 2603 2604 kvfree(spec); 2605 return err; 2606 } 2607 2608 static int esw_set_master_egress_rule(struct mlx5_core_dev *master, 2609 struct mlx5_core_dev *slave) 2610 { 2611 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2612 struct mlx5_eswitch *esw = master->priv.eswitch; 2613 struct mlx5_flow_table_attr ft_attr = { 2614 .max_fte = 1, .prio = 0, .level = 0, 2615 .flags = MLX5_FLOW_TABLE_OTHER_VPORT, 2616 }; 2617 struct mlx5_flow_namespace *egress_ns; 2618 struct mlx5_flow_table *acl; 2619 struct mlx5_flow_group *g; 2620 struct mlx5_vport *vport; 2621 void *match_criteria; 2622 u32 *flow_group_in; 2623 int err; 2624 2625 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); 2626 if (IS_ERR(vport)) 2627 return PTR_ERR(vport); 2628 2629 egress_ns = mlx5_get_flow_vport_acl_namespace(master, 2630 MLX5_FLOW_NAMESPACE_ESW_EGRESS, 2631 vport->index); 2632 if (!egress_ns) 2633 return -EINVAL; 2634 2635 if (vport->egress.acl) 2636 return -EINVAL; 2637 2638 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2639 if (!flow_group_in) 2640 return -ENOMEM; 2641 2642 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport); 2643 if (IS_ERR(acl)) { 2644 err = PTR_ERR(acl); 2645 goto out; 2646 } 2647 2648 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 2649 match_criteria); 2650 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 2651 misc_parameters.source_port); 2652 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 2653 misc_parameters.source_eswitch_owner_vhca_id); 2654 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 2655 MLX5_MATCH_MISC_PARAMETERS); 2656 2657 MLX5_SET(create_flow_group_in, flow_group_in, 2658 source_eswitch_owner_vhca_id_valid, 1); 2659 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2660 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 2661 2662 g = mlx5_create_flow_group(acl, flow_group_in); 2663 if (IS_ERR(g)) { 2664 err = PTR_ERR(g); 2665 goto err_group; 2666 } 2667 2668 err = __esw_set_master_egress_rule(master, slave, vport, acl); 2669 if (err) 2670 goto err_rule; 2671 2672 vport->egress.acl = acl; 2673 vport->egress.offloads.bounce_grp = g; 2674 2675 kvfree(flow_group_in); 2676 2677 return 0; 2678 2679 err_rule: 2680 mlx5_destroy_flow_group(g); 2681 err_group: 2682 mlx5_destroy_flow_table(acl); 2683 out: 2684 kvfree(flow_group_in); 2685 return err; 2686 } 2687 2688 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev) 2689 { 2690 struct mlx5_vport *vport; 2691 2692 vport = mlx5_eswitch_get_vport(dev->priv.eswitch, 2693 dev->priv.eswitch->manager_vport); 2694 2695 esw_acl_egress_ofld_cleanup(vport); 2696 } 2697 2698 int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, 2699 struct mlx5_eswitch *slave_esw) 2700 { 2701 int err; 2702 2703 err = esw_set_slave_root_fdb(master_esw->dev, 2704 slave_esw->dev); 2705 if (err) 2706 return err; 2707 2708 err = esw_set_master_egress_rule(master_esw->dev, 2709 slave_esw->dev); 2710 if (err) 2711 goto err_acl; 2712 2713 return err; 2714 2715 err_acl: 2716 esw_set_slave_root_fdb(NULL, slave_esw->dev); 2717 2718 return err; 2719 } 2720 2721 void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, 2722 struct mlx5_eswitch *slave_esw) 2723 { 2724 esw_unset_master_egress_rule(master_esw->dev); 2725 esw_set_slave_root_fdb(NULL, slave_esw->dev); 2726 } 2727 2728 #define ESW_OFFLOADS_DEVCOM_PAIR (0) 2729 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1) 2730 2731 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw) 2732 { 2733 const struct mlx5_eswitch_rep_ops *ops; 2734 struct mlx5_eswitch_rep *rep; 2735 unsigned long i; 2736 u8 rep_type; 2737 2738 mlx5_esw_for_each_rep(esw, i, rep) { 2739 rep_type = NUM_REP_TYPES; 2740 while (rep_type--) { 2741 ops = esw->offloads.rep_ops[rep_type]; 2742 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 2743 ops->event) 2744 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, NULL); 2745 } 2746 } 2747 } 2748 2749 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) 2750 { 2751 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 2752 mlx5e_tc_clean_fdb_peer_flows(esw); 2753 #endif 2754 mlx5_esw_offloads_rep_event_unpair(esw); 2755 esw_del_fdb_peer_miss_rules(esw); 2756 } 2757 2758 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, 2759 struct mlx5_eswitch *peer_esw) 2760 { 2761 const struct mlx5_eswitch_rep_ops *ops; 2762 struct mlx5_eswitch_rep *rep; 2763 unsigned long i; 2764 u8 rep_type; 2765 int err; 2766 2767 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); 2768 if (err) 2769 return err; 2770 2771 mlx5_esw_for_each_rep(esw, i, rep) { 2772 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { 2773 ops = esw->offloads.rep_ops[rep_type]; 2774 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 2775 ops->event) { 2776 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw); 2777 if (err) 2778 goto err_out; 2779 } 2780 } 2781 } 2782 2783 return 0; 2784 2785 err_out: 2786 mlx5_esw_offloads_unpair(esw); 2787 return err; 2788 } 2789 2790 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, 2791 struct mlx5_eswitch *peer_esw, 2792 bool pair) 2793 { 2794 struct mlx5_flow_root_namespace *peer_ns; 2795 struct mlx5_flow_root_namespace *ns; 2796 int err; 2797 2798 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns; 2799 ns = esw->dev->priv.steering->fdb_root_ns; 2800 2801 if (pair) { 2802 err = mlx5_flow_namespace_set_peer(ns, peer_ns); 2803 if (err) 2804 return err; 2805 2806 err = mlx5_flow_namespace_set_peer(peer_ns, ns); 2807 if (err) { 2808 mlx5_flow_namespace_set_peer(ns, NULL); 2809 return err; 2810 } 2811 } else { 2812 mlx5_flow_namespace_set_peer(ns, NULL); 2813 mlx5_flow_namespace_set_peer(peer_ns, NULL); 2814 } 2815 2816 return 0; 2817 } 2818 2819 static int mlx5_esw_offloads_devcom_event(int event, 2820 void *my_data, 2821 void *event_data) 2822 { 2823 struct mlx5_eswitch *esw = my_data; 2824 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2825 struct mlx5_eswitch *peer_esw = event_data; 2826 int err; 2827 2828 switch (event) { 2829 case ESW_OFFLOADS_DEVCOM_PAIR: 2830 if (mlx5_eswitch_vport_match_metadata_enabled(esw) != 2831 mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) 2832 break; 2833 2834 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); 2835 if (err) 2836 goto err_out; 2837 err = mlx5_esw_offloads_pair(esw, peer_esw); 2838 if (err) 2839 goto err_peer; 2840 2841 err = mlx5_esw_offloads_pair(peer_esw, esw); 2842 if (err) 2843 goto err_pair; 2844 2845 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); 2846 break; 2847 2848 case ESW_OFFLOADS_DEVCOM_UNPAIR: 2849 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) 2850 break; 2851 2852 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); 2853 mlx5_esw_offloads_unpair(peer_esw); 2854 mlx5_esw_offloads_unpair(esw); 2855 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 2856 break; 2857 } 2858 2859 return 0; 2860 2861 err_pair: 2862 mlx5_esw_offloads_unpair(esw); 2863 err_peer: 2864 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 2865 err_out: 2866 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d", 2867 event, err); 2868 return err; 2869 } 2870 2871 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) 2872 { 2873 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2874 2875 INIT_LIST_HEAD(&esw->offloads.peer_flows); 2876 mutex_init(&esw->offloads.peer_mutex); 2877 2878 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2879 return; 2880 2881 if (!mlx5_is_lag_supported(esw->dev)) 2882 return; 2883 2884 mlx5_devcom_register_component(devcom, 2885 MLX5_DEVCOM_ESW_OFFLOADS, 2886 mlx5_esw_offloads_devcom_event, 2887 esw); 2888 2889 mlx5_devcom_send_event(devcom, 2890 MLX5_DEVCOM_ESW_OFFLOADS, 2891 ESW_OFFLOADS_DEVCOM_PAIR, esw); 2892 } 2893 2894 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) 2895 { 2896 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2897 2898 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2899 return; 2900 2901 if (!mlx5_is_lag_supported(esw->dev)) 2902 return; 2903 2904 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, 2905 ESW_OFFLOADS_DEVCOM_UNPAIR, esw); 2906 2907 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 2908 } 2909 2910 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) 2911 { 2912 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl)) 2913 return false; 2914 2915 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 2916 MLX5_FDB_TO_VPORT_REG_C_0)) 2917 return false; 2918 2919 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) 2920 return false; 2921 2922 return true; 2923 } 2924 2925 #define MLX5_ESW_METADATA_RSVD_UPLINK 1 2926 2927 /* Share the same metadata for uplink's. This is fine because: 2928 * (a) In shared FDB mode (LAG) both uplink's are treated the 2929 * same and tagged with the same metadata. 2930 * (b) In non shared FDB mode, packets from physical port0 2931 * cannot hit eswitch of PF1 and vice versa. 2932 */ 2933 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw) 2934 { 2935 return MLX5_ESW_METADATA_RSVD_UPLINK; 2936 } 2937 2938 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) 2939 { 2940 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1; 2941 /* Reserve 0xf for internal port offload */ 2942 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2; 2943 u32 pf_num; 2944 int id; 2945 2946 /* Only 4 bits of pf_num */ 2947 pf_num = mlx5_get_dev_index(esw->dev); 2948 if (pf_num > max_pf_num) 2949 return 0; 2950 2951 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */ 2952 /* Use only non-zero vport_id (2-4095) for all PF's */ 2953 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 2954 MLX5_ESW_METADATA_RSVD_UPLINK + 1, 2955 vport_end_ida, GFP_KERNEL); 2956 if (id < 0) 2957 return 0; 2958 id = (pf_num << ESW_VPORT_BITS) | id; 2959 return id; 2960 } 2961 2962 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata) 2963 { 2964 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1; 2965 2966 /* Metadata contains only 12 bits of actual ida id */ 2967 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask); 2968 } 2969 2970 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw, 2971 struct mlx5_vport *vport) 2972 { 2973 if (vport->vport == MLX5_VPORT_UPLINK) 2974 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw); 2975 else 2976 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw); 2977 2978 vport->metadata = vport->default_metadata; 2979 return vport->metadata ? 0 : -ENOSPC; 2980 } 2981 2982 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, 2983 struct mlx5_vport *vport) 2984 { 2985 if (!vport->default_metadata) 2986 return; 2987 2988 if (vport->vport == MLX5_VPORT_UPLINK) 2989 return; 2990 2991 WARN_ON(vport->metadata != vport->default_metadata); 2992 mlx5_esw_match_metadata_free(esw, vport->default_metadata); 2993 } 2994 2995 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw) 2996 { 2997 struct mlx5_vport *vport; 2998 unsigned long i; 2999 3000 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 3001 return; 3002 3003 mlx5_esw_for_each_vport(esw, i, vport) 3004 esw_offloads_vport_metadata_cleanup(esw, vport); 3005 } 3006 3007 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw) 3008 { 3009 struct mlx5_vport *vport; 3010 unsigned long i; 3011 int err; 3012 3013 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 3014 return 0; 3015 3016 mlx5_esw_for_each_vport(esw, i, vport) { 3017 err = esw_offloads_vport_metadata_setup(esw, vport); 3018 if (err) 3019 goto metadata_err; 3020 } 3021 3022 return 0; 3023 3024 metadata_err: 3025 esw_offloads_metadata_uninit(esw); 3026 return err; 3027 } 3028 3029 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable) 3030 { 3031 int err = 0; 3032 3033 down_write(&esw->mode_lock); 3034 if (mlx5_esw_is_fdb_created(esw)) { 3035 err = -EBUSY; 3036 goto done; 3037 } 3038 if (!mlx5_esw_vport_match_metadata_supported(esw)) { 3039 err = -EOPNOTSUPP; 3040 goto done; 3041 } 3042 if (enable) 3043 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; 3044 else 3045 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; 3046 done: 3047 up_write(&esw->mode_lock); 3048 return err; 3049 } 3050 3051 int 3052 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 3053 struct mlx5_vport *vport) 3054 { 3055 int err; 3056 3057 err = esw_acl_ingress_ofld_setup(esw, vport); 3058 if (err) 3059 return err; 3060 3061 err = esw_acl_egress_ofld_setup(esw, vport); 3062 if (err) 3063 goto egress_err; 3064 3065 return 0; 3066 3067 egress_err: 3068 esw_acl_ingress_ofld_cleanup(esw, vport); 3069 return err; 3070 } 3071 3072 void 3073 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 3074 struct mlx5_vport *vport) 3075 { 3076 esw_acl_egress_ofld_cleanup(vport); 3077 esw_acl_ingress_ofld_cleanup(esw, vport); 3078 } 3079 3080 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 3081 { 3082 struct mlx5_vport *vport; 3083 3084 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 3085 if (IS_ERR(vport)) 3086 return PTR_ERR(vport); 3087 3088 return esw_vport_create_offloads_acl_tables(esw, vport); 3089 } 3090 3091 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 3092 { 3093 struct mlx5_vport *vport; 3094 3095 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 3096 if (IS_ERR(vport)) 3097 return; 3098 3099 esw_vport_destroy_offloads_acl_tables(esw, vport); 3100 } 3101 3102 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) 3103 { 3104 struct mlx5_eswitch_rep *rep; 3105 unsigned long i; 3106 int ret; 3107 3108 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS) 3109 return 0; 3110 3111 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 3112 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) 3113 return 0; 3114 3115 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK); 3116 if (ret) 3117 return ret; 3118 3119 mlx5_esw_for_each_rep(esw, i, rep) { 3120 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED) 3121 mlx5_esw_offloads_rep_load(esw, rep->vport); 3122 } 3123 3124 return 0; 3125 } 3126 3127 static int esw_offloads_steering_init(struct mlx5_eswitch *esw) 3128 { 3129 struct mlx5_esw_indir_table *indir; 3130 int err; 3131 3132 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); 3133 mutex_init(&esw->fdb_table.offloads.vports.lock); 3134 hash_init(esw->fdb_table.offloads.vports.table); 3135 atomic64_set(&esw->user_count, 0); 3136 3137 indir = mlx5_esw_indir_table_init(); 3138 if (IS_ERR(indir)) { 3139 err = PTR_ERR(indir); 3140 goto create_indir_err; 3141 } 3142 esw->fdb_table.offloads.indir = indir; 3143 3144 err = esw_create_uplink_offloads_acl_tables(esw); 3145 if (err) 3146 goto create_acl_err; 3147 3148 err = esw_create_offloads_table(esw); 3149 if (err) 3150 goto create_offloads_err; 3151 3152 err = esw_create_restore_table(esw); 3153 if (err) 3154 goto create_restore_err; 3155 3156 err = esw_create_offloads_fdb_tables(esw); 3157 if (err) 3158 goto create_fdb_err; 3159 3160 err = esw_create_vport_rx_group(esw); 3161 if (err) 3162 goto create_fg_err; 3163 3164 err = esw_create_vport_rx_drop_group(esw); 3165 if (err) 3166 goto create_rx_drop_fg_err; 3167 3168 err = esw_create_vport_rx_drop_rule(esw); 3169 if (err) 3170 goto create_rx_drop_rule_err; 3171 3172 return 0; 3173 3174 create_rx_drop_rule_err: 3175 esw_destroy_vport_rx_drop_group(esw); 3176 create_rx_drop_fg_err: 3177 esw_destroy_vport_rx_group(esw); 3178 create_fg_err: 3179 esw_destroy_offloads_fdb_tables(esw); 3180 create_fdb_err: 3181 esw_destroy_restore_table(esw); 3182 create_restore_err: 3183 esw_destroy_offloads_table(esw); 3184 create_offloads_err: 3185 esw_destroy_uplink_offloads_acl_tables(esw); 3186 create_acl_err: 3187 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 3188 create_indir_err: 3189 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 3190 return err; 3191 } 3192 3193 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) 3194 { 3195 esw_destroy_vport_rx_drop_rule(esw); 3196 esw_destroy_vport_rx_drop_group(esw); 3197 esw_destroy_vport_rx_group(esw); 3198 esw_destroy_offloads_fdb_tables(esw); 3199 esw_destroy_restore_table(esw); 3200 esw_destroy_offloads_table(esw); 3201 esw_destroy_uplink_offloads_acl_tables(esw); 3202 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 3203 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 3204 } 3205 3206 static void 3207 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out) 3208 { 3209 struct devlink *devlink; 3210 bool host_pf_disabled; 3211 u16 new_num_vfs; 3212 3213 new_num_vfs = MLX5_GET(query_esw_functions_out, out, 3214 host_params_context.host_num_of_vfs); 3215 host_pf_disabled = MLX5_GET(query_esw_functions_out, out, 3216 host_params_context.host_pf_disabled); 3217 3218 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) 3219 return; 3220 3221 devlink = priv_to_devlink(esw->dev); 3222 devl_lock(devlink); 3223 /* Number of VFs can only change from "0 to x" or "x to 0". */ 3224 if (esw->esw_funcs.num_vfs > 0) { 3225 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 3226 } else { 3227 int err; 3228 3229 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs, 3230 MLX5_VPORT_UC_ADDR_CHANGE); 3231 if (err) { 3232 devl_unlock(devlink); 3233 return; 3234 } 3235 } 3236 esw->esw_funcs.num_vfs = new_num_vfs; 3237 devl_unlock(devlink); 3238 } 3239 3240 static void esw_functions_changed_event_handler(struct work_struct *work) 3241 { 3242 struct mlx5_host_work *host_work; 3243 struct mlx5_eswitch *esw; 3244 const u32 *out; 3245 3246 host_work = container_of(work, struct mlx5_host_work, work); 3247 esw = host_work->esw; 3248 3249 out = mlx5_esw_query_functions(esw->dev); 3250 if (IS_ERR(out)) 3251 goto out; 3252 3253 esw_vfs_changed_event_handler(esw, out); 3254 kvfree(out); 3255 out: 3256 kfree(host_work); 3257 } 3258 3259 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data) 3260 { 3261 struct mlx5_esw_functions *esw_funcs; 3262 struct mlx5_host_work *host_work; 3263 struct mlx5_eswitch *esw; 3264 3265 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC); 3266 if (!host_work) 3267 return NOTIFY_DONE; 3268 3269 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb); 3270 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs); 3271 3272 host_work->esw = esw; 3273 3274 INIT_WORK(&host_work->work, esw_functions_changed_event_handler); 3275 queue_work(esw->work_queue, &host_work->work); 3276 3277 return NOTIFY_OK; 3278 } 3279 3280 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw) 3281 { 3282 const u32 *query_host_out; 3283 3284 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 3285 return 0; 3286 3287 query_host_out = mlx5_esw_query_functions(esw->dev); 3288 if (IS_ERR(query_host_out)) 3289 return PTR_ERR(query_host_out); 3290 3291 /* Mark non local controller with non zero controller number. */ 3292 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out, 3293 host_params_context.host_number); 3294 kvfree(query_host_out); 3295 return 0; 3296 } 3297 3298 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller) 3299 { 3300 /* Local controller is always valid */ 3301 if (controller == 0) 3302 return true; 3303 3304 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 3305 return false; 3306 3307 /* External host number starts with zero in device */ 3308 return (controller == esw->offloads.host_number + 1); 3309 } 3310 3311 int esw_offloads_enable(struct mlx5_eswitch *esw) 3312 { 3313 struct mapping_ctx *reg_c0_obj_pool; 3314 struct mlx5_vport *vport; 3315 unsigned long i; 3316 u64 mapping_id; 3317 int err; 3318 3319 mutex_init(&esw->offloads.termtbl_mutex); 3320 mlx5_rdma_enable_roce(esw->dev); 3321 3322 err = mlx5_esw_host_number_init(esw); 3323 if (err) 3324 goto err_metadata; 3325 3326 err = esw_offloads_metadata_init(esw); 3327 if (err) 3328 goto err_metadata; 3329 3330 err = esw_set_passing_vport_metadata(esw, true); 3331 if (err) 3332 goto err_vport_metadata; 3333 3334 mapping_id = mlx5_query_nic_system_image_guid(esw->dev); 3335 3336 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, 3337 sizeof(struct mlx5_mapped_obj), 3338 ESW_REG_C0_USER_DATA_METADATA_MASK, 3339 true); 3340 3341 if (IS_ERR(reg_c0_obj_pool)) { 3342 err = PTR_ERR(reg_c0_obj_pool); 3343 goto err_pool; 3344 } 3345 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool; 3346 3347 err = esw_offloads_steering_init(esw); 3348 if (err) 3349 goto err_steering_init; 3350 3351 /* Representor will control the vport link state */ 3352 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) 3353 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; 3354 3355 /* Uplink vport rep must load first. */ 3356 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK); 3357 if (err) 3358 goto err_uplink; 3359 3360 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); 3361 if (err) 3362 goto err_vports; 3363 3364 esw_offloads_devcom_init(esw); 3365 3366 return 0; 3367 3368 err_vports: 3369 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); 3370 err_uplink: 3371 esw_offloads_steering_cleanup(esw); 3372 err_steering_init: 3373 mapping_destroy(reg_c0_obj_pool); 3374 err_pool: 3375 esw_set_passing_vport_metadata(esw, false); 3376 err_vport_metadata: 3377 esw_offloads_metadata_uninit(esw); 3378 err_metadata: 3379 mlx5_rdma_disable_roce(esw->dev); 3380 mutex_destroy(&esw->offloads.termtbl_mutex); 3381 return err; 3382 } 3383 3384 static int esw_offloads_stop(struct mlx5_eswitch *esw, 3385 struct netlink_ext_ack *extack) 3386 { 3387 int err; 3388 3389 esw->mode = MLX5_ESWITCH_LEGACY; 3390 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); 3391 if (err) 3392 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); 3393 3394 return err; 3395 } 3396 3397 void esw_offloads_disable(struct mlx5_eswitch *esw) 3398 { 3399 esw_offloads_devcom_cleanup(esw); 3400 mlx5_eswitch_disable_pf_vf_vports(esw); 3401 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); 3402 esw_set_passing_vport_metadata(esw, false); 3403 esw_offloads_steering_cleanup(esw); 3404 mapping_destroy(esw->offloads.reg_c0_obj_pool); 3405 esw_offloads_metadata_uninit(esw); 3406 mlx5_rdma_disable_roce(esw->dev); 3407 mutex_destroy(&esw->offloads.termtbl_mutex); 3408 } 3409 3410 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) 3411 { 3412 switch (mode) { 3413 case DEVLINK_ESWITCH_MODE_LEGACY: 3414 *mlx5_mode = MLX5_ESWITCH_LEGACY; 3415 break; 3416 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 3417 *mlx5_mode = MLX5_ESWITCH_OFFLOADS; 3418 break; 3419 default: 3420 return -EINVAL; 3421 } 3422 3423 return 0; 3424 } 3425 3426 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) 3427 { 3428 switch (mlx5_mode) { 3429 case MLX5_ESWITCH_LEGACY: 3430 *mode = DEVLINK_ESWITCH_MODE_LEGACY; 3431 break; 3432 case MLX5_ESWITCH_OFFLOADS: 3433 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; 3434 break; 3435 default: 3436 return -EINVAL; 3437 } 3438 3439 return 0; 3440 } 3441 3442 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode) 3443 { 3444 switch (mode) { 3445 case DEVLINK_ESWITCH_INLINE_MODE_NONE: 3446 *mlx5_mode = MLX5_INLINE_MODE_NONE; 3447 break; 3448 case DEVLINK_ESWITCH_INLINE_MODE_LINK: 3449 *mlx5_mode = MLX5_INLINE_MODE_L2; 3450 break; 3451 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK: 3452 *mlx5_mode = MLX5_INLINE_MODE_IP; 3453 break; 3454 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: 3455 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP; 3456 break; 3457 default: 3458 return -EINVAL; 3459 } 3460 3461 return 0; 3462 } 3463 3464 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) 3465 { 3466 switch (mlx5_mode) { 3467 case MLX5_INLINE_MODE_NONE: 3468 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE; 3469 break; 3470 case MLX5_INLINE_MODE_L2: 3471 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK; 3472 break; 3473 case MLX5_INLINE_MODE_IP: 3474 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK; 3475 break; 3476 case MLX5_INLINE_MODE_TCP_UDP: 3477 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT; 3478 break; 3479 default: 3480 return -EINVAL; 3481 } 3482 3483 return 0; 3484 } 3485 3486 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 3487 struct netlink_ext_ack *extack) 3488 { 3489 u16 cur_mlx5_mode, mlx5_mode = 0; 3490 struct mlx5_eswitch *esw; 3491 int err = 0; 3492 3493 esw = mlx5_devlink_eswitch_get(devlink); 3494 if (IS_ERR(esw)) 3495 return PTR_ERR(esw); 3496 3497 if (esw_mode_from_devlink(mode, &mlx5_mode)) 3498 return -EINVAL; 3499 3500 mlx5_lag_disable_change(esw->dev); 3501 err = mlx5_esw_try_lock(esw); 3502 if (err < 0) { 3503 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy"); 3504 goto enable_lag; 3505 } 3506 cur_mlx5_mode = err; 3507 err = 0; 3508 3509 if (cur_mlx5_mode == mlx5_mode) 3510 goto unlock; 3511 3512 mlx5_eswitch_disable_locked(esw); 3513 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { 3514 if (mlx5_devlink_trap_get_num_active(esw->dev)) { 3515 NL_SET_ERR_MSG_MOD(extack, 3516 "Can't change mode while devlink traps are active"); 3517 err = -EOPNOTSUPP; 3518 goto unlock; 3519 } 3520 err = esw_offloads_start(esw, extack); 3521 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { 3522 err = esw_offloads_stop(esw, extack); 3523 mlx5_rescan_drivers(esw->dev); 3524 } else { 3525 err = -EINVAL; 3526 } 3527 3528 unlock: 3529 mlx5_esw_unlock(esw); 3530 enable_lag: 3531 mlx5_lag_enable_change(esw->dev); 3532 return err; 3533 } 3534 3535 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 3536 { 3537 struct mlx5_eswitch *esw; 3538 int err; 3539 3540 esw = mlx5_devlink_eswitch_get(devlink); 3541 if (IS_ERR(esw)) 3542 return PTR_ERR(esw); 3543 3544 down_write(&esw->mode_lock); 3545 err = esw_mode_to_devlink(esw->mode, mode); 3546 up_write(&esw->mode_lock); 3547 return err; 3548 } 3549 3550 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode, 3551 struct netlink_ext_ack *extack) 3552 { 3553 struct mlx5_core_dev *dev = esw->dev; 3554 struct mlx5_vport *vport; 3555 u16 err_vport_num = 0; 3556 unsigned long i; 3557 int err = 0; 3558 3559 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 3560 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode); 3561 if (err) { 3562 err_vport_num = vport->vport; 3563 NL_SET_ERR_MSG_MOD(extack, 3564 "Failed to set min inline on vport"); 3565 goto revert_inline_mode; 3566 } 3567 } 3568 return 0; 3569 3570 revert_inline_mode: 3571 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 3572 if (vport->vport == err_vport_num) 3573 break; 3574 mlx5_modify_nic_vport_min_inline(dev, 3575 vport->vport, 3576 esw->offloads.inline_mode); 3577 } 3578 return err; 3579 } 3580 3581 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 3582 struct netlink_ext_ack *extack) 3583 { 3584 struct mlx5_core_dev *dev = devlink_priv(devlink); 3585 struct mlx5_eswitch *esw; 3586 u8 mlx5_mode; 3587 int err; 3588 3589 esw = mlx5_devlink_eswitch_get(devlink); 3590 if (IS_ERR(esw)) 3591 return PTR_ERR(esw); 3592 3593 down_write(&esw->mode_lock); 3594 3595 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 3596 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 3597 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) { 3598 err = 0; 3599 goto out; 3600 } 3601 3602 fallthrough; 3603 case MLX5_CAP_INLINE_MODE_L2: 3604 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); 3605 err = -EOPNOTSUPP; 3606 goto out; 3607 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 3608 break; 3609 } 3610 3611 if (atomic64_read(&esw->offloads.num_flows) > 0) { 3612 NL_SET_ERR_MSG_MOD(extack, 3613 "Can't set inline mode when flows are configured"); 3614 err = -EOPNOTSUPP; 3615 goto out; 3616 } 3617 3618 err = esw_inline_mode_from_devlink(mode, &mlx5_mode); 3619 if (err) 3620 goto out; 3621 3622 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack); 3623 if (err) 3624 goto out; 3625 3626 esw->offloads.inline_mode = mlx5_mode; 3627 up_write(&esw->mode_lock); 3628 return 0; 3629 3630 out: 3631 up_write(&esw->mode_lock); 3632 return err; 3633 } 3634 3635 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) 3636 { 3637 struct mlx5_eswitch *esw; 3638 int err; 3639 3640 esw = mlx5_devlink_eswitch_get(devlink); 3641 if (IS_ERR(esw)) 3642 return PTR_ERR(esw); 3643 3644 down_write(&esw->mode_lock); 3645 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 3646 up_write(&esw->mode_lock); 3647 return err; 3648 } 3649 3650 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 3651 enum devlink_eswitch_encap_mode encap, 3652 struct netlink_ext_ack *extack) 3653 { 3654 struct mlx5_core_dev *dev = devlink_priv(devlink); 3655 struct mlx5_eswitch *esw; 3656 int err = 0; 3657 3658 esw = mlx5_devlink_eswitch_get(devlink); 3659 if (IS_ERR(esw)) 3660 return PTR_ERR(esw); 3661 3662 down_write(&esw->mode_lock); 3663 3664 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 3665 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || 3666 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) { 3667 err = -EOPNOTSUPP; 3668 goto unlock; 3669 } 3670 3671 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) { 3672 err = -EOPNOTSUPP; 3673 goto unlock; 3674 } 3675 3676 if (esw->mode == MLX5_ESWITCH_LEGACY) { 3677 esw->offloads.encap = encap; 3678 goto unlock; 3679 } 3680 3681 if (esw->offloads.encap == encap) 3682 goto unlock; 3683 3684 if (atomic64_read(&esw->offloads.num_flows) > 0) { 3685 NL_SET_ERR_MSG_MOD(extack, 3686 "Can't set encapsulation when flows are configured"); 3687 err = -EOPNOTSUPP; 3688 goto unlock; 3689 } 3690 3691 esw_destroy_offloads_fdb_tables(esw); 3692 3693 esw->offloads.encap = encap; 3694 3695 err = esw_create_offloads_fdb_tables(esw); 3696 3697 if (err) { 3698 NL_SET_ERR_MSG_MOD(extack, 3699 "Failed re-creating fast FDB table"); 3700 esw->offloads.encap = !encap; 3701 (void)esw_create_offloads_fdb_tables(esw); 3702 } 3703 3704 unlock: 3705 up_write(&esw->mode_lock); 3706 return err; 3707 } 3708 3709 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 3710 enum devlink_eswitch_encap_mode *encap) 3711 { 3712 struct mlx5_eswitch *esw; 3713 3714 esw = mlx5_devlink_eswitch_get(devlink); 3715 if (IS_ERR(esw)) 3716 return PTR_ERR(esw); 3717 3718 down_write(&esw->mode_lock); 3719 *encap = esw->offloads.encap; 3720 up_write(&esw->mode_lock); 3721 return 0; 3722 } 3723 3724 static bool 3725 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num) 3726 { 3727 /* Currently, only ECPF based device has representor for host PF. */ 3728 if (vport_num == MLX5_VPORT_PF && 3729 !mlx5_core_is_ecpf_esw_manager(esw->dev)) 3730 return false; 3731 3732 if (vport_num == MLX5_VPORT_ECPF && 3733 !mlx5_ecpf_vport_exists(esw->dev)) 3734 return false; 3735 3736 return true; 3737 } 3738 3739 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, 3740 const struct mlx5_eswitch_rep_ops *ops, 3741 u8 rep_type) 3742 { 3743 struct mlx5_eswitch_rep_data *rep_data; 3744 struct mlx5_eswitch_rep *rep; 3745 unsigned long i; 3746 3747 esw->offloads.rep_ops[rep_type] = ops; 3748 mlx5_esw_for_each_rep(esw, i, rep) { 3749 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) { 3750 rep->esw = esw; 3751 rep_data = &rep->rep_data[rep_type]; 3752 atomic_set(&rep_data->state, REP_REGISTERED); 3753 } 3754 } 3755 } 3756 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); 3757 3758 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) 3759 { 3760 struct mlx5_eswitch_rep *rep; 3761 unsigned long i; 3762 3763 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 3764 __unload_reps_all_vport(esw, rep_type); 3765 3766 mlx5_esw_for_each_rep(esw, i, rep) 3767 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); 3768 } 3769 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); 3770 3771 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) 3772 { 3773 struct mlx5_eswitch_rep *rep; 3774 3775 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 3776 return rep->rep_data[rep_type].priv; 3777 } 3778 3779 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, 3780 u16 vport, 3781 u8 rep_type) 3782 { 3783 struct mlx5_eswitch_rep *rep; 3784 3785 rep = mlx5_eswitch_get_rep(esw, vport); 3786 3787 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 3788 esw->offloads.rep_ops[rep_type]->get_proto_dev) 3789 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep); 3790 return NULL; 3791 } 3792 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); 3793 3794 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) 3795 { 3796 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type); 3797 } 3798 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); 3799 3800 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, 3801 u16 vport) 3802 { 3803 return mlx5_eswitch_get_rep(esw, vport); 3804 } 3805 EXPORT_SYMBOL(mlx5_eswitch_vport_rep); 3806 3807 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) 3808 { 3809 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED); 3810 } 3811 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled); 3812 3813 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) 3814 { 3815 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA); 3816 } 3817 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled); 3818 3819 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, 3820 u16 vport_num) 3821 { 3822 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 3823 3824 if (WARN_ON_ONCE(IS_ERR(vport))) 3825 return 0; 3826 3827 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS); 3828 } 3829 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); 3830 3831 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, 3832 u16 vport_num, u32 controller, u32 sfnum) 3833 { 3834 int err; 3835 3836 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE); 3837 if (err) 3838 return err; 3839 3840 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum); 3841 if (err) 3842 goto devlink_err; 3843 3844 mlx5_esw_vport_debugfs_create(esw, vport_num, true, sfnum); 3845 err = mlx5_esw_offloads_rep_load(esw, vport_num); 3846 if (err) 3847 goto rep_err; 3848 return 0; 3849 3850 rep_err: 3851 mlx5_esw_vport_debugfs_destroy(esw, vport_num); 3852 mlx5_esw_devlink_sf_port_unregister(esw, vport_num); 3853 devlink_err: 3854 mlx5_esw_vport_disable(esw, vport_num); 3855 return err; 3856 } 3857 3858 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) 3859 { 3860 mlx5_esw_offloads_rep_unload(esw, vport_num); 3861 mlx5_esw_vport_debugfs_destroy(esw, vport_num); 3862 mlx5_esw_devlink_sf_port_unregister(esw, vport_num); 3863 mlx5_esw_vport_disable(esw, vport_num); 3864 } 3865 3866 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id) 3867 { 3868 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 3869 void *query_ctx; 3870 void *hca_caps; 3871 int err; 3872 3873 *vhca_id = 0; 3874 if (mlx5_esw_is_manager_vport(esw, vport_num) || 3875 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 3876 return -EPERM; 3877 3878 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 3879 if (!query_ctx) 3880 return -ENOMEM; 3881 3882 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx); 3883 if (err) 3884 goto out_free; 3885 3886 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 3887 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id); 3888 3889 out_free: 3890 kfree(query_ctx); 3891 return err; 3892 } 3893 3894 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num) 3895 { 3896 u16 *old_entry, *vhca_map_entry, vhca_id; 3897 int err; 3898 3899 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); 3900 if (err) { 3901 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n", 3902 vport_num, err); 3903 return err; 3904 } 3905 3906 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL); 3907 if (!vhca_map_entry) 3908 return -ENOMEM; 3909 3910 *vhca_map_entry = vport_num; 3911 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL); 3912 if (xa_is_err(old_entry)) { 3913 kfree(vhca_map_entry); 3914 return xa_err(old_entry); 3915 } 3916 kfree(old_entry); 3917 return 0; 3918 } 3919 3920 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num) 3921 { 3922 u16 *vhca_map_entry, vhca_id; 3923 int err; 3924 3925 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); 3926 if (err) 3927 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n", 3928 vport_num, err); 3929 3930 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id); 3931 kfree(vhca_map_entry); 3932 } 3933 3934 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num) 3935 { 3936 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id); 3937 3938 if (!res) 3939 return -ENOENT; 3940 3941 *vport_num = *res; 3942 return 0; 3943 } 3944 3945 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, 3946 u16 vport_num) 3947 { 3948 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 3949 3950 if (WARN_ON_ONCE(IS_ERR(vport))) 3951 return 0; 3952 3953 return vport->metadata; 3954 } 3955 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set); 3956 3957 static bool 3958 is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) 3959 { 3960 return vport_num == MLX5_VPORT_PF || 3961 mlx5_eswitch_is_vf_vport(esw, vport_num) || 3962 mlx5_esw_is_sf_vport(esw, vport_num); 3963 } 3964 3965 int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, 3966 u8 *hw_addr, int *hw_addr_len, 3967 struct netlink_ext_ack *extack) 3968 { 3969 struct mlx5_eswitch *esw; 3970 struct mlx5_vport *vport; 3971 u16 vport_num; 3972 3973 esw = mlx5_devlink_eswitch_get(port->devlink); 3974 if (IS_ERR(esw)) 3975 return PTR_ERR(esw); 3976 3977 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 3978 if (!is_port_function_supported(esw, vport_num)) 3979 return -EOPNOTSUPP; 3980 3981 vport = mlx5_eswitch_get_vport(esw, vport_num); 3982 if (IS_ERR(vport)) { 3983 NL_SET_ERR_MSG_MOD(extack, "Invalid port"); 3984 return PTR_ERR(vport); 3985 } 3986 3987 mutex_lock(&esw->state_lock); 3988 ether_addr_copy(hw_addr, vport->info.mac); 3989 *hw_addr_len = ETH_ALEN; 3990 mutex_unlock(&esw->state_lock); 3991 return 0; 3992 } 3993 3994 int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, 3995 const u8 *hw_addr, int hw_addr_len, 3996 struct netlink_ext_ack *extack) 3997 { 3998 struct mlx5_eswitch *esw; 3999 u16 vport_num; 4000 4001 esw = mlx5_devlink_eswitch_get(port->devlink); 4002 if (IS_ERR(esw)) { 4003 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); 4004 return PTR_ERR(esw); 4005 } 4006 4007 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 4008 if (!is_port_function_supported(esw, vport_num)) { 4009 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr"); 4010 return -EINVAL; 4011 } 4012 4013 return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr); 4014 } 4015