1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/etherdevice.h> 34 #include <linux/idr.h> 35 #include <linux/mlx5/driver.h> 36 #include <linux/mlx5/mlx5_ifc.h> 37 #include <linux/mlx5/vport.h> 38 #include <linux/mlx5/fs.h> 39 #include "mlx5_core.h" 40 #include "eswitch.h" 41 #include "esw/indir_table.h" 42 #include "esw/acl/ofld.h" 43 #include "rdma.h" 44 #include "en.h" 45 #include "fs_core.h" 46 #include "lib/devcom.h" 47 #include "lib/eq.h" 48 #include "lib/fs_chains.h" 49 #include "en_tc.h" 50 #include "en/mapping.h" 51 #include "devlink.h" 52 #include "lag/lag.h" 53 #include "en/tc/post_meter.h" 54 55 #define mlx5_esw_for_each_rep(esw, i, rep) \ 56 xa_for_each(&((esw)->offloads.vport_reps), i, rep) 57 58 /* There are two match-all miss flows, one for unicast dst mac and 59 * one for multicast. 60 */ 61 #define MLX5_ESW_MISS_FLOWS (2) 62 #define UPLINK_REP_INDEX 0 63 64 #define MLX5_ESW_VPORT_TBL_SIZE 128 65 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4 66 67 #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) 68 69 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { 70 .max_fte = MLX5_ESW_VPORT_TBL_SIZE, 71 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, 72 .flags = 0, 73 }; 74 75 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, 76 u16 vport_num) 77 { 78 return xa_load(&esw->offloads.vport_reps, vport_num); 79 } 80 81 static void 82 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, 83 struct mlx5_flow_spec *spec, 84 struct mlx5_esw_flow_attr *attr) 85 { 86 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep) 87 return; 88 89 if (attr->int_port) { 90 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port); 91 92 return; 93 } 94 95 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ? 96 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK : 97 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; 98 } 99 100 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits 101 * are not needed as well in the following process. So clear them all for simplicity. 102 */ 103 void 104 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec) 105 { 106 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 107 void *misc2; 108 109 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 110 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 111 112 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 113 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 114 115 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2))) 116 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2; 117 } 118 } 119 120 static void 121 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, 122 struct mlx5_flow_spec *spec, 123 struct mlx5_flow_attr *attr, 124 struct mlx5_eswitch *src_esw, 125 u16 vport) 126 { 127 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 128 u32 metadata; 129 void *misc2; 130 void *misc; 131 132 /* Use metadata matching because vport is not represented by single 133 * VHCA in dual-port RoCE mode, and matching on source vport may fail. 134 */ 135 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 136 if (mlx5_esw_indir_table_decap_vport(attr)) 137 vport = mlx5_esw_indir_table_decap_vport(attr); 138 139 if (!attr->chain && esw_attr && esw_attr->int_port) 140 metadata = 141 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port); 142 else 143 metadata = 144 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport); 145 146 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 147 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata); 148 149 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 150 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 151 mlx5_eswitch_get_vport_metadata_mask()); 152 153 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 154 } else { 155 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 156 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 157 158 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 159 MLX5_SET(fte_match_set_misc, misc, 160 source_eswitch_owner_vhca_id, 161 MLX5_CAP_GEN(src_esw->dev, vhca_id)); 162 163 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 164 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 165 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 166 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 167 source_eswitch_owner_vhca_id); 168 169 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 170 } 171 } 172 173 static int 174 esw_setup_decap_indir(struct mlx5_eswitch *esw, 175 struct mlx5_flow_attr *attr) 176 { 177 struct mlx5_flow_table *ft; 178 179 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 180 return -EOPNOTSUPP; 181 182 ft = mlx5_esw_indir_table_get(esw, attr, 183 mlx5_esw_indir_table_decap_vport(attr), true); 184 return PTR_ERR_OR_ZERO(ft); 185 } 186 187 static void 188 esw_cleanup_decap_indir(struct mlx5_eswitch *esw, 189 struct mlx5_flow_attr *attr) 190 { 191 if (mlx5_esw_indir_table_decap_vport(attr)) 192 mlx5_esw_indir_table_put(esw, 193 mlx5_esw_indir_table_decap_vport(attr), 194 true); 195 } 196 197 static int 198 esw_setup_mtu_dest(struct mlx5_flow_destination *dest, 199 struct mlx5e_meter_attr *meter, 200 int i) 201 { 202 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE; 203 dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN; 204 dest[i].range.min = 0; 205 dest[i].range.max = meter->params.mtu; 206 dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter); 207 dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter); 208 209 return 0; 210 } 211 212 static int 213 esw_setup_sampler_dest(struct mlx5_flow_destination *dest, 214 struct mlx5_flow_act *flow_act, 215 u32 sampler_id, 216 int i) 217 { 218 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 219 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; 220 dest[i].sampler_id = sampler_id; 221 222 return 0; 223 } 224 225 static int 226 esw_setup_ft_dest(struct mlx5_flow_destination *dest, 227 struct mlx5_flow_act *flow_act, 228 struct mlx5_eswitch *esw, 229 struct mlx5_flow_attr *attr, 230 int i) 231 { 232 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 233 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 234 dest[i].ft = attr->dest_ft; 235 236 if (mlx5_esw_indir_table_decap_vport(attr)) 237 return esw_setup_decap_indir(esw, attr); 238 return 0; 239 } 240 241 static void 242 esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 243 struct mlx5_fs_chains *chains, int i) 244 { 245 if (mlx5_chains_ignore_flow_level_supported(chains)) 246 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 247 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 248 dest[i].ft = mlx5_chains_get_tc_end_ft(chains); 249 } 250 251 static void 252 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 253 struct mlx5_eswitch *esw, int i) 254 { 255 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) 256 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 257 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 258 dest[i].ft = mlx5_eswitch_get_slow_fdb(esw); 259 } 260 261 static int 262 esw_setup_chain_dest(struct mlx5_flow_destination *dest, 263 struct mlx5_flow_act *flow_act, 264 struct mlx5_fs_chains *chains, 265 u32 chain, u32 prio, u32 level, 266 int i) 267 { 268 struct mlx5_flow_table *ft; 269 270 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 271 ft = mlx5_chains_get_table(chains, chain, prio, level); 272 if (IS_ERR(ft)) 273 return PTR_ERR(ft); 274 275 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 276 dest[i].ft = ft; 277 return 0; 278 } 279 280 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, 281 int from, int to) 282 { 283 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 284 struct mlx5_fs_chains *chains = esw_chains(esw); 285 int i; 286 287 for (i = from; i < to; i++) 288 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 289 mlx5_chains_put_table(chains, 0, 1, 0); 290 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 291 esw_attr->dests[i].mdev)) 292 mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport, 293 false); 294 } 295 296 static bool 297 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr) 298 { 299 int i; 300 301 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) 302 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 303 return true; 304 return false; 305 } 306 307 static int 308 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest, 309 struct mlx5_flow_act *flow_act, 310 struct mlx5_eswitch *esw, 311 struct mlx5_fs_chains *chains, 312 struct mlx5_flow_attr *attr, 313 int *i) 314 { 315 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 316 int err; 317 318 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 319 return -EOPNOTSUPP; 320 321 /* flow steering cannot handle more than one dest with the same ft 322 * in a single flow 323 */ 324 if (esw_attr->out_count - esw_attr->split_count > 1) 325 return -EOPNOTSUPP; 326 327 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i); 328 if (err) 329 return err; 330 331 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) { 332 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 333 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat; 334 } 335 (*i)++; 336 337 return 0; 338 } 339 340 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw, 341 struct mlx5_flow_attr *attr) 342 { 343 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 344 345 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 346 } 347 348 static bool 349 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 350 { 351 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 352 bool result = false; 353 int i; 354 355 /* Indirect table is supported only for flows with in_port uplink 356 * and the destination is vport on the same eswitch as the uplink, 357 * return false in case at least one of destinations doesn't meet 358 * this criteria. 359 */ 360 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { 361 if (esw_attr->dests[i].rep && 362 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 363 esw_attr->dests[i].mdev)) { 364 result = true; 365 } else { 366 result = false; 367 break; 368 } 369 } 370 return result; 371 } 372 373 static int 374 esw_setup_indir_table(struct mlx5_flow_destination *dest, 375 struct mlx5_flow_act *flow_act, 376 struct mlx5_eswitch *esw, 377 struct mlx5_flow_attr *attr, 378 bool ignore_flow_lvl, 379 int *i) 380 { 381 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 382 int j, err; 383 384 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 385 return -EOPNOTSUPP; 386 387 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { 388 if (ignore_flow_lvl) 389 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 390 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 391 392 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, 393 esw_attr->dests[j].rep->vport, false); 394 if (IS_ERR(dest[*i].ft)) { 395 err = PTR_ERR(dest[*i].ft); 396 goto err_indir_tbl_get; 397 } 398 } 399 400 if (mlx5_esw_indir_table_decap_vport(attr)) { 401 err = esw_setup_decap_indir(esw, attr); 402 if (err) 403 goto err_indir_tbl_get; 404 } 405 406 return 0; 407 408 err_indir_tbl_get: 409 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); 410 return err; 411 } 412 413 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 414 { 415 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 416 417 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 418 esw_cleanup_decap_indir(esw, attr); 419 } 420 421 static void 422 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level) 423 { 424 mlx5_chains_put_table(chains, chain, prio, level); 425 } 426 427 static void 428 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 429 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 430 int attr_idx, int dest_idx, bool pkt_reformat) 431 { 432 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 433 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; 434 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 435 dest[dest_idx].vport.vhca_id = 436 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); 437 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 438 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK && 439 mlx5_lag_is_mpesw(esw->dev)) 440 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; 441 } 442 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) { 443 if (pkt_reformat) { 444 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 445 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 446 } 447 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 448 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 449 } 450 } 451 452 static int 453 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 454 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 455 int i) 456 { 457 int j; 458 459 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++) 460 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true); 461 return i; 462 } 463 464 static bool 465 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw) 466 { 467 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) && 468 mlx5_eswitch_vport_match_metadata_enabled(esw) && 469 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level); 470 } 471 472 static int 473 esw_setup_dests(struct mlx5_flow_destination *dest, 474 struct mlx5_flow_act *flow_act, 475 struct mlx5_eswitch *esw, 476 struct mlx5_flow_attr *attr, 477 struct mlx5_flow_spec *spec, 478 int *i) 479 { 480 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 481 struct mlx5_fs_chains *chains = esw_chains(esw); 482 int err = 0; 483 484 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && 485 esw_src_port_rewrite_supported(esw)) 486 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE; 487 488 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) { 489 esw_setup_slow_path_dest(dest, flow_act, esw, *i); 490 (*i)++; 491 goto out; 492 } 493 494 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) { 495 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i); 496 (*i)++; 497 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) { 498 esw_setup_accept_dest(dest, flow_act, chains, *i); 499 (*i)++; 500 } else if (attr->flags & MLX5_ATTR_FLAG_MTU) { 501 err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i); 502 (*i)++; 503 } else if (esw_is_indir_table(esw, attr)) { 504 err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i); 505 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { 506 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); 507 } else { 508 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); 509 510 if (attr->dest_ft) { 511 err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i); 512 (*i)++; 513 } else if (attr->dest_chain) { 514 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 515 1, 0, *i); 516 (*i)++; 517 } 518 } 519 520 out: 521 return err; 522 } 523 524 static void 525 esw_cleanup_dests(struct mlx5_eswitch *esw, 526 struct mlx5_flow_attr *attr) 527 { 528 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 529 struct mlx5_fs_chains *chains = esw_chains(esw); 530 531 if (attr->dest_ft) { 532 esw_cleanup_decap_indir(esw, attr); 533 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) { 534 if (attr->dest_chain) 535 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); 536 else if (esw_is_indir_table(esw, attr)) 537 esw_cleanup_indir_table(esw, attr); 538 else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) 539 esw_cleanup_chain_src_port_rewrite(esw, attr); 540 } 541 } 542 543 static void 544 esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act) 545 { 546 struct mlx5e_flow_meter_handle *meter; 547 548 meter = attr->meter_attr.meter; 549 flow_act->exe_aso.type = attr->exe_aso_type; 550 flow_act->exe_aso.object_id = meter->obj_id; 551 flow_act->exe_aso.flow_meter.meter_idx = meter->idx; 552 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN; 553 /* use metadata reg 5 for packet color */ 554 flow_act->exe_aso.return_reg_id = 5; 555 } 556 557 struct mlx5_flow_handle * 558 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 559 struct mlx5_flow_spec *spec, 560 struct mlx5_flow_attr *attr) 561 { 562 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 563 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 564 struct mlx5_fs_chains *chains = esw_chains(esw); 565 bool split = !!(esw_attr->split_count); 566 struct mlx5_vport_tbl_attr fwd_attr; 567 struct mlx5_flow_destination *dest; 568 struct mlx5_flow_handle *rule; 569 struct mlx5_flow_table *fdb; 570 int i = 0; 571 572 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 573 return ERR_PTR(-EOPNOTSUPP); 574 575 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 576 return ERR_PTR(-EOPNOTSUPP); 577 578 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL); 579 if (!dest) 580 return ERR_PTR(-ENOMEM); 581 582 flow_act.action = attr->action; 583 584 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { 585 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]); 586 flow_act.vlan[0].vid = esw_attr->vlan_vid[0]; 587 flow_act.vlan[0].prio = esw_attr->vlan_prio[0]; 588 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { 589 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]); 590 flow_act.vlan[1].vid = esw_attr->vlan_vid[1]; 591 flow_act.vlan[1].prio = esw_attr->vlan_prio[1]; 592 } 593 } 594 595 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr); 596 597 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 598 int err; 599 600 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i); 601 if (err) { 602 rule = ERR_PTR(err); 603 goto err_create_goto_table; 604 } 605 } 606 607 if (esw_attr->decap_pkt_reformat) 608 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat; 609 610 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 611 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 612 dest[i].counter_id = mlx5_fc_id(attr->counter); 613 i++; 614 } 615 616 if (attr->outer_match_level != MLX5_MATCH_NONE) 617 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 618 if (attr->inner_match_level != MLX5_MATCH_NONE) 619 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 620 621 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 622 flow_act.modify_hdr = attr->modify_hdr; 623 624 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) && 625 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER) 626 esw_setup_meter(attr, &flow_act); 627 628 if (split) { 629 fwd_attr.chain = attr->chain; 630 fwd_attr.prio = attr->prio; 631 fwd_attr.vport = esw_attr->in_rep->vport; 632 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 633 634 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 635 } else { 636 if (attr->chain || attr->prio) 637 fdb = mlx5_chains_get_table(chains, attr->chain, 638 attr->prio, 0); 639 else 640 fdb = attr->ft; 641 642 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT)) 643 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 644 esw_attr->in_mdev->priv.eswitch, 645 esw_attr->in_rep->vport); 646 } 647 if (IS_ERR(fdb)) { 648 rule = ERR_CAST(fdb); 649 goto err_esw_get; 650 } 651 652 if (!i) { 653 kfree(dest); 654 dest = NULL; 655 } 656 657 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) 658 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr, 659 &flow_act, dest, i); 660 else 661 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); 662 if (IS_ERR(rule)) 663 goto err_add_rule; 664 else 665 atomic64_inc(&esw->offloads.num_flows); 666 667 kfree(dest); 668 return rule; 669 670 err_add_rule: 671 if (split) 672 mlx5_esw_vporttbl_put(esw, &fwd_attr); 673 else if (attr->chain || attr->prio) 674 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 675 err_esw_get: 676 esw_cleanup_dests(esw, attr); 677 err_create_goto_table: 678 kfree(dest); 679 return rule; 680 } 681 682 struct mlx5_flow_handle * 683 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 684 struct mlx5_flow_spec *spec, 685 struct mlx5_flow_attr *attr) 686 { 687 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 688 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 689 struct mlx5_fs_chains *chains = esw_chains(esw); 690 struct mlx5_vport_tbl_attr fwd_attr; 691 struct mlx5_flow_destination *dest; 692 struct mlx5_flow_table *fast_fdb; 693 struct mlx5_flow_table *fwd_fdb; 694 struct mlx5_flow_handle *rule; 695 int i, err = 0; 696 697 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL); 698 if (!dest) 699 return ERR_PTR(-ENOMEM); 700 701 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0); 702 if (IS_ERR(fast_fdb)) { 703 rule = ERR_CAST(fast_fdb); 704 goto err_get_fast; 705 } 706 707 fwd_attr.chain = attr->chain; 708 fwd_attr.prio = attr->prio; 709 fwd_attr.vport = esw_attr->in_rep->vport; 710 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 711 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 712 if (IS_ERR(fwd_fdb)) { 713 rule = ERR_CAST(fwd_fdb); 714 goto err_get_fwd; 715 } 716 717 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 718 for (i = 0; i < esw_attr->split_count; i++) { 719 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 720 /* Source port rewrite (forward to ovs internal port or statck device) isn't 721 * supported in the rule of split action. 722 */ 723 err = -EOPNOTSUPP; 724 else 725 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); 726 727 if (err) { 728 rule = ERR_PTR(err); 729 goto err_chain_src_rewrite; 730 } 731 } 732 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 733 dest[i].ft = fwd_fdb; 734 i++; 735 736 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 737 esw_attr->in_mdev->priv.eswitch, 738 esw_attr->in_rep->vport); 739 740 if (attr->outer_match_level != MLX5_MATCH_NONE) 741 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 742 743 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 744 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); 745 746 if (IS_ERR(rule)) { 747 i = esw_attr->split_count; 748 goto err_chain_src_rewrite; 749 } 750 751 atomic64_inc(&esw->offloads.num_flows); 752 753 kfree(dest); 754 return rule; 755 err_chain_src_rewrite: 756 mlx5_esw_vporttbl_put(esw, &fwd_attr); 757 err_get_fwd: 758 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 759 err_get_fast: 760 kfree(dest); 761 return rule; 762 } 763 764 static void 765 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, 766 struct mlx5_flow_handle *rule, 767 struct mlx5_flow_attr *attr, 768 bool fwd_rule) 769 { 770 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 771 struct mlx5_fs_chains *chains = esw_chains(esw); 772 bool split = (esw_attr->split_count > 0); 773 struct mlx5_vport_tbl_attr fwd_attr; 774 int i; 775 776 mlx5_del_flow_rules(rule); 777 778 if (!mlx5e_tc_attr_flags_skip(attr->flags)) { 779 /* unref the term table */ 780 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { 781 if (esw_attr->dests[i].termtbl) 782 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl); 783 } 784 } 785 786 atomic64_dec(&esw->offloads.num_flows); 787 788 if (fwd_rule || split) { 789 fwd_attr.chain = attr->chain; 790 fwd_attr.prio = attr->prio; 791 fwd_attr.vport = esw_attr->in_rep->vport; 792 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 793 } 794 795 if (fwd_rule) { 796 mlx5_esw_vporttbl_put(esw, &fwd_attr); 797 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 798 } else { 799 if (split) 800 mlx5_esw_vporttbl_put(esw, &fwd_attr); 801 else if (attr->chain || attr->prio) 802 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 803 esw_cleanup_dests(esw, attr); 804 } 805 } 806 807 void 808 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 809 struct mlx5_flow_handle *rule, 810 struct mlx5_flow_attr *attr) 811 { 812 __mlx5_eswitch_del_rule(esw, rule, attr, false); 813 } 814 815 void 816 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 817 struct mlx5_flow_handle *rule, 818 struct mlx5_flow_attr *attr) 819 { 820 __mlx5_eswitch_del_rule(esw, rule, attr, true); 821 } 822 823 struct mlx5_flow_handle * 824 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, 825 struct mlx5_eswitch *from_esw, 826 struct mlx5_eswitch_rep *rep, 827 u32 sqn) 828 { 829 struct mlx5_flow_act flow_act = {0}; 830 struct mlx5_flow_destination dest = {}; 831 struct mlx5_flow_handle *flow_rule; 832 struct mlx5_flow_spec *spec; 833 void *misc; 834 u16 vport; 835 836 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 837 if (!spec) { 838 flow_rule = ERR_PTR(-ENOMEM); 839 goto out; 840 } 841 842 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 843 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); 844 845 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 846 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); 847 848 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 849 850 /* source vport is the esw manager */ 851 vport = from_esw->manager_vport; 852 853 if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) { 854 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 855 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 856 mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport)); 857 858 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 859 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 860 mlx5_eswitch_get_vport_metadata_mask()); 861 862 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 863 } else { 864 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 865 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 866 867 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 868 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 869 MLX5_CAP_GEN(from_esw->dev, vhca_id)); 870 871 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 872 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 873 874 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 875 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 876 source_eswitch_owner_vhca_id); 877 878 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 879 } 880 881 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 882 dest.vport.num = rep->vport; 883 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id); 884 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 885 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 886 887 if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) && 888 rep->vport == MLX5_VPORT_UPLINK) 889 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; 890 891 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw), 892 spec, &flow_act, &dest, 1); 893 if (IS_ERR(flow_rule)) 894 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n", 895 PTR_ERR(flow_rule)); 896 out: 897 kvfree(spec); 898 return flow_rule; 899 } 900 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule); 901 902 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) 903 { 904 mlx5_del_flow_rules(rule); 905 } 906 907 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule) 908 { 909 if (rule) 910 mlx5_del_flow_rules(rule); 911 } 912 913 struct mlx5_flow_handle * 914 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num) 915 { 916 struct mlx5_flow_destination dest = {}; 917 struct mlx5_flow_act flow_act = {0}; 918 struct mlx5_flow_handle *flow_rule; 919 struct mlx5_flow_spec *spec; 920 921 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 922 if (!spec) 923 return ERR_PTR(-ENOMEM); 924 925 MLX5_SET(fte_match_param, spec->match_criteria, 926 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); 927 MLX5_SET(fte_match_param, spec->match_criteria, 928 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 929 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1, 930 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK); 931 932 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 933 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 934 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 935 936 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0, 937 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); 938 dest.vport.num = vport_num; 939 940 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 941 spec, &flow_act, &dest, 1); 942 if (IS_ERR(flow_rule)) 943 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n", 944 vport_num, PTR_ERR(flow_rule)); 945 946 kvfree(spec); 947 return flow_rule; 948 } 949 950 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw) 951 { 952 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 953 MLX5_FDB_TO_VPORT_REG_C_1; 954 } 955 956 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) 957 { 958 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; 959 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; 960 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; 961 u8 curr, wanted; 962 int err; 963 964 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) && 965 !mlx5_eswitch_vport_match_metadata_enabled(esw)) 966 return 0; 967 968 MLX5_SET(query_esw_vport_context_in, in, opcode, 969 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); 970 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out); 971 if (err) 972 return err; 973 974 curr = MLX5_GET(query_esw_vport_context_out, out, 975 esw_vport_context.fdb_to_vport_reg_c_id); 976 wanted = MLX5_FDB_TO_VPORT_REG_C_0; 977 if (mlx5_eswitch_reg_c1_loopback_supported(esw)) 978 wanted |= MLX5_FDB_TO_VPORT_REG_C_1; 979 980 if (enable) 981 curr |= wanted; 982 else 983 curr &= ~wanted; 984 985 MLX5_SET(modify_esw_vport_context_in, min, 986 esw_vport_context.fdb_to_vport_reg_c_id, curr); 987 MLX5_SET(modify_esw_vport_context_in, min, 988 field_select.fdb_to_vport_reg_c_id, 1); 989 990 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min); 991 if (!err) { 992 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1)) 993 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 994 else 995 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 996 } 997 998 return err; 999 } 1000 1001 static void peer_miss_rules_setup(struct mlx5_eswitch *esw, 1002 struct mlx5_core_dev *peer_dev, 1003 struct mlx5_flow_spec *spec, 1004 struct mlx5_flow_destination *dest) 1005 { 1006 void *misc; 1007 1008 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1009 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1010 misc_parameters_2); 1011 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1012 mlx5_eswitch_get_vport_metadata_mask()); 1013 1014 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1015 } else { 1016 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1017 misc_parameters); 1018 1019 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 1020 MLX5_CAP_GEN(peer_dev, vhca_id)); 1021 1022 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 1023 1024 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1025 misc_parameters); 1026 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 1027 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 1028 source_eswitch_owner_vhca_id); 1029 } 1030 1031 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1032 dest->vport.num = peer_dev->priv.eswitch->manager_vport; 1033 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); 1034 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 1035 } 1036 1037 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw, 1038 struct mlx5_eswitch *peer_esw, 1039 struct mlx5_flow_spec *spec, 1040 u16 vport) 1041 { 1042 void *misc; 1043 1044 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1045 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1046 misc_parameters_2); 1047 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1048 mlx5_eswitch_get_vport_metadata_for_match(peer_esw, 1049 vport)); 1050 } else { 1051 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1052 misc_parameters); 1053 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 1054 } 1055 } 1056 1057 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1058 struct mlx5_core_dev *peer_dev) 1059 { 1060 struct mlx5_flow_destination dest = {}; 1061 struct mlx5_flow_act flow_act = {0}; 1062 struct mlx5_flow_handle **flows; 1063 /* total vports is the same for both e-switches */ 1064 int nvports = esw->total_vports; 1065 struct mlx5_flow_handle *flow; 1066 struct mlx5_flow_spec *spec; 1067 struct mlx5_vport *vport; 1068 unsigned long i; 1069 void *misc; 1070 int err; 1071 1072 if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev)) 1073 return 0; 1074 1075 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1076 if (!spec) 1077 return -ENOMEM; 1078 1079 peer_miss_rules_setup(esw, peer_dev, spec, &dest); 1080 1081 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); 1082 if (!flows) { 1083 err = -ENOMEM; 1084 goto alloc_flows_err; 1085 } 1086 1087 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1088 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1089 misc_parameters); 1090 1091 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1092 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1093 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, 1094 spec, MLX5_VPORT_PF); 1095 1096 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1097 spec, &flow_act, &dest, 1); 1098 if (IS_ERR(flow)) { 1099 err = PTR_ERR(flow); 1100 goto add_pf_flow_err; 1101 } 1102 flows[vport->index] = flow; 1103 } 1104 1105 if (mlx5_ecpf_vport_exists(esw->dev)) { 1106 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1107 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); 1108 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1109 spec, &flow_act, &dest, 1); 1110 if (IS_ERR(flow)) { 1111 err = PTR_ERR(flow); 1112 goto add_ecpf_flow_err; 1113 } 1114 flows[vport->index] = flow; 1115 } 1116 1117 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1118 esw_set_peer_miss_rule_source_port(esw, 1119 peer_dev->priv.eswitch, 1120 spec, vport->vport); 1121 1122 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1123 spec, &flow_act, &dest, 1); 1124 if (IS_ERR(flow)) { 1125 err = PTR_ERR(flow); 1126 goto add_vf_flow_err; 1127 } 1128 flows[vport->index] = flow; 1129 } 1130 1131 if (mlx5_core_ec_sriov_enabled(esw->dev)) { 1132 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { 1133 if (i >= mlx5_core_max_ec_vfs(peer_dev)) 1134 break; 1135 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, 1136 spec, vport->vport); 1137 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1138 spec, &flow_act, &dest, 1); 1139 if (IS_ERR(flow)) { 1140 err = PTR_ERR(flow); 1141 goto add_ec_vf_flow_err; 1142 } 1143 flows[vport->index] = flow; 1144 } 1145 } 1146 esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows; 1147 1148 kvfree(spec); 1149 return 0; 1150 1151 add_ec_vf_flow_err: 1152 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { 1153 if (!flows[vport->index]) 1154 continue; 1155 mlx5_del_flow_rules(flows[vport->index]); 1156 } 1157 add_vf_flow_err: 1158 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1159 if (!flows[vport->index]) 1160 continue; 1161 mlx5_del_flow_rules(flows[vport->index]); 1162 } 1163 if (mlx5_ecpf_vport_exists(esw->dev)) { 1164 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1165 mlx5_del_flow_rules(flows[vport->index]); 1166 } 1167 add_ecpf_flow_err: 1168 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1169 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1170 mlx5_del_flow_rules(flows[vport->index]); 1171 } 1172 add_pf_flow_err: 1173 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); 1174 kvfree(flows); 1175 alloc_flows_err: 1176 kvfree(spec); 1177 return err; 1178 } 1179 1180 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1181 struct mlx5_core_dev *peer_dev) 1182 { 1183 u16 peer_index = mlx5_get_dev_index(peer_dev); 1184 struct mlx5_flow_handle **flows; 1185 struct mlx5_vport *vport; 1186 unsigned long i; 1187 1188 flows = esw->fdb_table.offloads.peer_miss_rules[peer_index]; 1189 if (!flows) 1190 return; 1191 1192 if (mlx5_core_ec_sriov_enabled(esw->dev)) { 1193 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { 1194 /* The flow for a particular vport could be NULL if the other ECPF 1195 * has fewer or no VFs enabled 1196 */ 1197 if (!flows[vport->index]) 1198 continue; 1199 mlx5_del_flow_rules(flows[vport->index]); 1200 } 1201 } 1202 1203 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) 1204 mlx5_del_flow_rules(flows[vport->index]); 1205 1206 if (mlx5_ecpf_vport_exists(esw->dev)) { 1207 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1208 mlx5_del_flow_rules(flows[vport->index]); 1209 } 1210 1211 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1212 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1213 mlx5_del_flow_rules(flows[vport->index]); 1214 } 1215 1216 kvfree(flows); 1217 esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL; 1218 } 1219 1220 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) 1221 { 1222 struct mlx5_flow_act flow_act = {0}; 1223 struct mlx5_flow_destination dest = {}; 1224 struct mlx5_flow_handle *flow_rule = NULL; 1225 struct mlx5_flow_spec *spec; 1226 void *headers_c; 1227 void *headers_v; 1228 int err = 0; 1229 u8 *dmac_c; 1230 u8 *dmac_v; 1231 1232 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1233 if (!spec) { 1234 err = -ENOMEM; 1235 goto out; 1236 } 1237 1238 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1239 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1240 outer_headers); 1241 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, 1242 outer_headers.dmac_47_16); 1243 dmac_c[0] = 0x01; 1244 1245 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1246 dest.vport.num = esw->manager_vport; 1247 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1248 1249 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1250 spec, &flow_act, &dest, 1); 1251 if (IS_ERR(flow_rule)) { 1252 err = PTR_ERR(flow_rule); 1253 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); 1254 goto out; 1255 } 1256 1257 esw->fdb_table.offloads.miss_rule_uni = flow_rule; 1258 1259 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1260 outer_headers); 1261 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, 1262 outer_headers.dmac_47_16); 1263 dmac_v[0] = 0x01; 1264 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1265 spec, &flow_act, &dest, 1); 1266 if (IS_ERR(flow_rule)) { 1267 err = PTR_ERR(flow_rule); 1268 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); 1269 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1270 goto out; 1271 } 1272 1273 esw->fdb_table.offloads.miss_rule_multi = flow_rule; 1274 1275 out: 1276 kvfree(spec); 1277 return err; 1278 } 1279 1280 struct mlx5_flow_handle * 1281 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 1282 { 1283 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 1284 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore; 1285 struct mlx5_flow_context *flow_context; 1286 struct mlx5_flow_handle *flow_rule; 1287 struct mlx5_flow_destination dest; 1288 struct mlx5_flow_spec *spec; 1289 void *misc; 1290 1291 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 1292 return ERR_PTR(-EOPNOTSUPP); 1293 1294 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1295 if (!spec) 1296 return ERR_PTR(-ENOMEM); 1297 1298 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1299 misc_parameters_2); 1300 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1301 ESW_REG_C0_USER_DATA_METADATA_MASK); 1302 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1303 misc_parameters_2); 1304 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag); 1305 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1306 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1307 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1308 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id; 1309 1310 flow_context = &spec->flow_context; 1311 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 1312 flow_context->flow_tag = tag; 1313 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1314 dest.ft = esw->offloads.ft_offloads; 1315 1316 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 1317 kvfree(spec); 1318 1319 if (IS_ERR(flow_rule)) 1320 esw_warn(esw->dev, 1321 "Failed to create restore rule for tag: %d, err(%d)\n", 1322 tag, (int)PTR_ERR(flow_rule)); 1323 1324 return flow_rule; 1325 } 1326 1327 #define MAX_PF_SQ 256 1328 #define MAX_SQ_NVPORTS 32 1329 1330 void 1331 mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw, 1332 u32 *flow_group_in, 1333 int match_params) 1334 { 1335 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1336 flow_group_in, 1337 match_criteria); 1338 1339 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1340 MLX5_SET(create_flow_group_in, flow_group_in, 1341 match_criteria_enable, 1342 MLX5_MATCH_MISC_PARAMETERS_2 | match_params); 1343 1344 MLX5_SET(fte_match_param, match_criteria, 1345 misc_parameters_2.metadata_reg_c_0, 1346 mlx5_eswitch_get_vport_metadata_mask()); 1347 } else { 1348 MLX5_SET(create_flow_group_in, flow_group_in, 1349 match_criteria_enable, 1350 MLX5_MATCH_MISC_PARAMETERS | match_params); 1351 1352 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1353 misc_parameters.source_port); 1354 } 1355 } 1356 1357 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 1358 static void esw_vport_tbl_put(struct mlx5_eswitch *esw) 1359 { 1360 struct mlx5_vport_tbl_attr attr; 1361 struct mlx5_vport *vport; 1362 unsigned long i; 1363 1364 attr.chain = 0; 1365 attr.prio = 1; 1366 mlx5_esw_for_each_vport(esw, i, vport) { 1367 attr.vport = vport->vport; 1368 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1369 mlx5_esw_vporttbl_put(esw, &attr); 1370 } 1371 } 1372 1373 static int esw_vport_tbl_get(struct mlx5_eswitch *esw) 1374 { 1375 struct mlx5_vport_tbl_attr attr; 1376 struct mlx5_flow_table *fdb; 1377 struct mlx5_vport *vport; 1378 unsigned long i; 1379 1380 attr.chain = 0; 1381 attr.prio = 1; 1382 mlx5_esw_for_each_vport(esw, i, vport) { 1383 attr.vport = vport->vport; 1384 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1385 fdb = mlx5_esw_vporttbl_get(esw, &attr); 1386 if (IS_ERR(fdb)) 1387 goto out; 1388 } 1389 return 0; 1390 1391 out: 1392 esw_vport_tbl_put(esw); 1393 return PTR_ERR(fdb); 1394 } 1395 1396 #define fdb_modify_header_fwd_to_table_supported(esw) \ 1397 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table)) 1398 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags) 1399 { 1400 struct mlx5_core_dev *dev = esw->dev; 1401 1402 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level)) 1403 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 1404 1405 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) && 1406 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { 1407 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1408 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); 1409 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) { 1410 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1411 esw_warn(dev, "Tc chains and priorities offload aren't supported\n"); 1412 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) { 1413 /* Disabled when ttl workaround is needed, e.g 1414 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig 1415 */ 1416 esw_warn(dev, 1417 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n"); 1418 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1419 } else { 1420 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1421 esw_info(dev, "Supported tc chains and prios offload\n"); 1422 } 1423 1424 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1425 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED; 1426 } 1427 1428 static int 1429 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1430 { 1431 struct mlx5_core_dev *dev = esw->dev; 1432 struct mlx5_flow_table *nf_ft, *ft; 1433 struct mlx5_chains_attr attr = {}; 1434 struct mlx5_fs_chains *chains; 1435 int err; 1436 1437 esw_init_chains_offload_flags(esw, &attr.flags); 1438 attr.ns = MLX5_FLOW_NAMESPACE_FDB; 1439 attr.fs_base_prio = FDB_TC_OFFLOAD; 1440 attr.max_grp_num = esw->params.large_group_num; 1441 attr.default_ft = miss_fdb; 1442 attr.mapping = esw->offloads.reg_c0_obj_pool; 1443 1444 chains = mlx5_chains_create(dev, &attr); 1445 if (IS_ERR(chains)) { 1446 err = PTR_ERR(chains); 1447 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err); 1448 return err; 1449 } 1450 mlx5_chains_print_info(chains); 1451 1452 esw->fdb_table.offloads.esw_chains_priv = chains; 1453 1454 /* Create tc_end_ft which is the always created ft chain */ 1455 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1456 1, 0); 1457 if (IS_ERR(nf_ft)) { 1458 err = PTR_ERR(nf_ft); 1459 goto nf_ft_err; 1460 } 1461 1462 /* Always open the root for fast path */ 1463 ft = mlx5_chains_get_table(chains, 0, 1, 0); 1464 if (IS_ERR(ft)) { 1465 err = PTR_ERR(ft); 1466 goto level_0_err; 1467 } 1468 1469 /* Open level 1 for split fdb rules now if prios isn't supported */ 1470 if (!mlx5_chains_prios_supported(chains)) { 1471 err = esw_vport_tbl_get(esw); 1472 if (err) 1473 goto level_1_err; 1474 } 1475 1476 mlx5_chains_set_end_ft(chains, nf_ft); 1477 1478 return 0; 1479 1480 level_1_err: 1481 mlx5_chains_put_table(chains, 0, 1, 0); 1482 level_0_err: 1483 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1484 nf_ft_err: 1485 mlx5_chains_destroy(chains); 1486 esw->fdb_table.offloads.esw_chains_priv = NULL; 1487 1488 return err; 1489 } 1490 1491 static void 1492 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1493 { 1494 if (!mlx5_chains_prios_supported(chains)) 1495 esw_vport_tbl_put(esw); 1496 mlx5_chains_put_table(chains, 0, 1, 0); 1497 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1498 mlx5_chains_destroy(chains); 1499 } 1500 1501 #else /* CONFIG_MLX5_CLS_ACT */ 1502 1503 static int 1504 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1505 { return 0; } 1506 1507 static void 1508 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1509 {} 1510 1511 #endif 1512 1513 static int 1514 esw_create_send_to_vport_group(struct mlx5_eswitch *esw, 1515 struct mlx5_flow_table *fdb, 1516 u32 *flow_group_in, 1517 int *ix) 1518 { 1519 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1520 struct mlx5_flow_group *g; 1521 void *match_criteria; 1522 int count, err = 0; 1523 1524 memset(flow_group_in, 0, inlen); 1525 1526 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS); 1527 1528 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1529 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); 1530 1531 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && 1532 MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 1533 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1534 misc_parameters.source_eswitch_owner_vhca_id); 1535 MLX5_SET(create_flow_group_in, flow_group_in, 1536 source_eswitch_owner_vhca_id_valid, 1); 1537 } 1538 1539 /* See comment at table_size calculation */ 1540 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ); 1541 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1542 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1); 1543 *ix += count; 1544 1545 g = mlx5_create_flow_group(fdb, flow_group_in); 1546 if (IS_ERR(g)) { 1547 err = PTR_ERR(g); 1548 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err); 1549 goto out; 1550 } 1551 esw->fdb_table.offloads.send_to_vport_grp = g; 1552 1553 out: 1554 return err; 1555 } 1556 1557 static int 1558 esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw, 1559 struct mlx5_flow_table *fdb, 1560 u32 *flow_group_in, 1561 int *ix) 1562 { 1563 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1564 struct mlx5_flow_group *g; 1565 void *match_criteria; 1566 int err = 0; 1567 1568 if (!esw_src_port_rewrite_supported(esw)) 1569 return 0; 1570 1571 memset(flow_group_in, 0, inlen); 1572 1573 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1574 MLX5_MATCH_MISC_PARAMETERS_2); 1575 1576 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1577 1578 MLX5_SET(fte_match_param, match_criteria, 1579 misc_parameters_2.metadata_reg_c_0, 1580 mlx5_eswitch_get_vport_metadata_mask()); 1581 MLX5_SET(fte_match_param, match_criteria, 1582 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 1583 1584 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1585 MLX5_SET(create_flow_group_in, flow_group_in, 1586 end_flow_index, *ix + esw->total_vports - 1); 1587 *ix += esw->total_vports; 1588 1589 g = mlx5_create_flow_group(fdb, flow_group_in); 1590 if (IS_ERR(g)) { 1591 err = PTR_ERR(g); 1592 esw_warn(esw->dev, 1593 "Failed to create send-to-vport meta flow group err(%d)\n", err); 1594 goto send_vport_meta_err; 1595 } 1596 esw->fdb_table.offloads.send_to_vport_meta_grp = g; 1597 1598 return 0; 1599 1600 send_vport_meta_err: 1601 return err; 1602 } 1603 1604 static int 1605 esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw, 1606 struct mlx5_flow_table *fdb, 1607 u32 *flow_group_in, 1608 int *ix) 1609 { 1610 int max_peer_ports = (esw->total_vports - 1) * (MLX5_MAX_PORTS - 1); 1611 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1612 struct mlx5_flow_group *g; 1613 void *match_criteria; 1614 int err = 0; 1615 1616 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1617 return 0; 1618 1619 memset(flow_group_in, 0, inlen); 1620 1621 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0); 1622 1623 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1624 match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1625 flow_group_in, 1626 match_criteria); 1627 1628 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1629 misc_parameters.source_eswitch_owner_vhca_id); 1630 1631 MLX5_SET(create_flow_group_in, flow_group_in, 1632 source_eswitch_owner_vhca_id_valid, 1); 1633 } 1634 1635 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1636 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1637 *ix + max_peer_ports); 1638 *ix += max_peer_ports + 1; 1639 1640 g = mlx5_create_flow_group(fdb, flow_group_in); 1641 if (IS_ERR(g)) { 1642 err = PTR_ERR(g); 1643 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err); 1644 goto out; 1645 } 1646 esw->fdb_table.offloads.peer_miss_grp = g; 1647 1648 out: 1649 return err; 1650 } 1651 1652 static int 1653 esw_create_miss_group(struct mlx5_eswitch *esw, 1654 struct mlx5_flow_table *fdb, 1655 u32 *flow_group_in, 1656 int *ix) 1657 { 1658 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1659 struct mlx5_flow_group *g; 1660 void *match_criteria; 1661 int err = 0; 1662 u8 *dmac; 1663 1664 memset(flow_group_in, 0, inlen); 1665 1666 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1667 MLX5_MATCH_OUTER_HEADERS); 1668 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 1669 match_criteria); 1670 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, 1671 outer_headers.dmac_47_16); 1672 dmac[0] = 0x01; 1673 1674 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1675 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1676 *ix + MLX5_ESW_MISS_FLOWS); 1677 1678 g = mlx5_create_flow_group(fdb, flow_group_in); 1679 if (IS_ERR(g)) { 1680 err = PTR_ERR(g); 1681 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err); 1682 goto miss_err; 1683 } 1684 esw->fdb_table.offloads.miss_grp = g; 1685 1686 err = esw_add_fdb_miss_rule(esw); 1687 if (err) 1688 goto miss_rule_err; 1689 1690 return 0; 1691 1692 miss_rule_err: 1693 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1694 miss_err: 1695 return err; 1696 } 1697 1698 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) 1699 { 1700 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1701 struct mlx5_flow_table_attr ft_attr = {}; 1702 struct mlx5_core_dev *dev = esw->dev; 1703 struct mlx5_flow_namespace *root_ns; 1704 struct mlx5_flow_table *fdb = NULL; 1705 int table_size, ix = 0, err = 0; 1706 u32 flags = 0, *flow_group_in; 1707 1708 esw_debug(esw->dev, "Create offloads FDB Tables\n"); 1709 1710 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1711 if (!flow_group_in) 1712 return -ENOMEM; 1713 1714 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 1715 if (!root_ns) { 1716 esw_warn(dev, "Failed to get FDB flow namespace\n"); 1717 err = -EOPNOTSUPP; 1718 goto ns_err; 1719 } 1720 esw->fdb_table.offloads.ns = root_ns; 1721 err = mlx5_flow_namespace_set_mode(root_ns, 1722 esw->dev->priv.steering->mode); 1723 if (err) { 1724 esw_warn(dev, "Failed to set FDB namespace steering mode\n"); 1725 goto ns_err; 1726 } 1727 1728 /* To be strictly correct: 1729 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) 1730 * should be: 1731 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + 1732 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ 1733 * but as the peer device might not be in switchdev mode it's not 1734 * possible. We use the fact that by default FW sets max vfs and max sfs 1735 * to the same value on both devices. If it needs to be changed in the future note 1736 * the peer miss group should also be created based on the number of 1737 * total vports of the peer (currently is also uses esw->total_vports). 1738 */ 1739 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) + 1740 esw->total_vports * MLX5_MAX_PORTS + MLX5_ESW_MISS_FLOWS; 1741 1742 /* create the slow path fdb with encap set, so further table instances 1743 * can be created at run time while VFs are probed if the FW allows that. 1744 */ 1745 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1746 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 1747 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 1748 1749 ft_attr.flags = flags; 1750 ft_attr.max_fte = table_size; 1751 ft_attr.prio = FDB_SLOW_PATH; 1752 1753 fdb = mlx5_create_flow_table(root_ns, &ft_attr); 1754 if (IS_ERR(fdb)) { 1755 err = PTR_ERR(fdb); 1756 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); 1757 goto slow_fdb_err; 1758 } 1759 esw->fdb_table.offloads.slow_fdb = fdb; 1760 1761 /* Create empty TC-miss managed table. This allows plugging in following 1762 * priorities without directly exposing their level 0 table to 1763 * eswitch_offloads and passing it as miss_fdb to following call to 1764 * esw_chains_create(). 1765 */ 1766 memset(&ft_attr, 0, sizeof(ft_attr)); 1767 ft_attr.prio = FDB_TC_MISS; 1768 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr); 1769 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) { 1770 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table); 1771 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err); 1772 goto tc_miss_table_err; 1773 } 1774 1775 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table); 1776 if (err) { 1777 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err); 1778 goto fdb_chains_err; 1779 } 1780 1781 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix); 1782 if (err) 1783 goto send_vport_err; 1784 1785 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix); 1786 if (err) 1787 goto send_vport_meta_err; 1788 1789 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix); 1790 if (err) 1791 goto peer_miss_err; 1792 1793 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix); 1794 if (err) 1795 goto miss_err; 1796 1797 kvfree(flow_group_in); 1798 return 0; 1799 1800 miss_err: 1801 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1802 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1803 peer_miss_err: 1804 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1805 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1806 send_vport_meta_err: 1807 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1808 send_vport_err: 1809 esw_chains_destroy(esw, esw_chains(esw)); 1810 fdb_chains_err: 1811 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1812 tc_miss_table_err: 1813 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw)); 1814 slow_fdb_err: 1815 /* Holds true only as long as DMFS is the default */ 1816 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS); 1817 ns_err: 1818 kvfree(flow_group_in); 1819 return err; 1820 } 1821 1822 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) 1823 { 1824 if (!mlx5_eswitch_get_slow_fdb(esw)) 1825 return; 1826 1827 esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); 1828 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); 1829 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1830 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1831 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1832 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1833 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1834 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1835 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1836 1837 esw_chains_destroy(esw, esw_chains(esw)); 1838 1839 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1840 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw)); 1841 /* Holds true only as long as DMFS is the default */ 1842 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, 1843 MLX5_FLOW_STEERING_MODE_DMFS); 1844 atomic64_set(&esw->user_count, 0); 1845 } 1846 1847 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw) 1848 { 1849 int nvports; 1850 1851 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS; 1852 if (mlx5e_tc_int_port_supported(esw)) 1853 nvports += MLX5E_TC_MAX_INT_PORT_NUM; 1854 1855 return nvports; 1856 } 1857 1858 static int esw_create_offloads_table(struct mlx5_eswitch *esw) 1859 { 1860 struct mlx5_flow_table_attr ft_attr = {}; 1861 struct mlx5_core_dev *dev = esw->dev; 1862 struct mlx5_flow_table *ft_offloads; 1863 struct mlx5_flow_namespace *ns; 1864 int err = 0; 1865 1866 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 1867 if (!ns) { 1868 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 1869 return -EOPNOTSUPP; 1870 } 1871 1872 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) + 1873 MLX5_ESW_FT_OFFLOADS_DROP_RULE; 1874 ft_attr.prio = 1; 1875 1876 ft_offloads = mlx5_create_flow_table(ns, &ft_attr); 1877 if (IS_ERR(ft_offloads)) { 1878 err = PTR_ERR(ft_offloads); 1879 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); 1880 return err; 1881 } 1882 1883 esw->offloads.ft_offloads = ft_offloads; 1884 return 0; 1885 } 1886 1887 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) 1888 { 1889 struct mlx5_esw_offload *offloads = &esw->offloads; 1890 1891 mlx5_destroy_flow_table(offloads->ft_offloads); 1892 } 1893 1894 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) 1895 { 1896 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1897 struct mlx5_flow_group *g; 1898 u32 *flow_group_in; 1899 int nvports; 1900 int err = 0; 1901 1902 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw); 1903 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1904 if (!flow_group_in) 1905 return -ENOMEM; 1906 1907 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0); 1908 1909 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1910 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); 1911 1912 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); 1913 1914 if (IS_ERR(g)) { 1915 err = PTR_ERR(g); 1916 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err); 1917 goto out; 1918 } 1919 1920 esw->offloads.vport_rx_group = g; 1921 out: 1922 kvfree(flow_group_in); 1923 return err; 1924 } 1925 1926 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) 1927 { 1928 mlx5_destroy_flow_group(esw->offloads.vport_rx_group); 1929 } 1930 1931 static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw) 1932 { 1933 /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) 1934 * for the drop rule, which is placed at the end of the table. 1935 * So return the total of vport and int_port as rule index. 1936 */ 1937 return esw_get_nr_ft_offloads_steering_src_ports(esw); 1938 } 1939 1940 static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw) 1941 { 1942 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1943 struct mlx5_flow_group *g; 1944 u32 *flow_group_in; 1945 int flow_index; 1946 int err = 0; 1947 1948 flow_index = esw_create_vport_rx_drop_rule_index(esw); 1949 1950 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1951 if (!flow_group_in) 1952 return -ENOMEM; 1953 1954 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); 1955 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); 1956 1957 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); 1958 1959 if (IS_ERR(g)) { 1960 err = PTR_ERR(g); 1961 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err); 1962 goto out; 1963 } 1964 1965 esw->offloads.vport_rx_drop_group = g; 1966 out: 1967 kvfree(flow_group_in); 1968 return err; 1969 } 1970 1971 static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw) 1972 { 1973 if (esw->offloads.vport_rx_drop_group) 1974 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group); 1975 } 1976 1977 void 1978 mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw, 1979 u16 vport, 1980 struct mlx5_flow_spec *spec) 1981 { 1982 void *misc; 1983 1984 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1985 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 1986 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1987 mlx5_eswitch_get_vport_metadata_for_match(esw, vport)); 1988 1989 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 1990 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1991 mlx5_eswitch_get_vport_metadata_mask()); 1992 1993 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1994 } else { 1995 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 1996 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 1997 1998 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 1999 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 2000 2001 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 2002 } 2003 } 2004 2005 struct mlx5_flow_handle * 2006 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 2007 struct mlx5_flow_destination *dest) 2008 { 2009 struct mlx5_flow_act flow_act = {0}; 2010 struct mlx5_flow_handle *flow_rule; 2011 struct mlx5_flow_spec *spec; 2012 2013 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 2014 if (!spec) { 2015 flow_rule = ERR_PTR(-ENOMEM); 2016 goto out; 2017 } 2018 2019 mlx5_esw_set_spec_source_port(esw, vport, spec); 2020 2021 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2022 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, 2023 &flow_act, dest, 1); 2024 if (IS_ERR(flow_rule)) { 2025 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); 2026 goto out; 2027 } 2028 2029 out: 2030 kvfree(spec); 2031 return flow_rule; 2032 } 2033 2034 static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw) 2035 { 2036 struct mlx5_flow_act flow_act = {}; 2037 struct mlx5_flow_handle *flow_rule; 2038 2039 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; 2040 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL, 2041 &flow_act, NULL, 0); 2042 if (IS_ERR(flow_rule)) { 2043 esw_warn(esw->dev, 2044 "fs offloads: Failed to add vport rx drop rule err %ld\n", 2045 PTR_ERR(flow_rule)); 2046 return PTR_ERR(flow_rule); 2047 } 2048 2049 esw->offloads.vport_rx_drop_rule = flow_rule; 2050 2051 return 0; 2052 } 2053 2054 static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw) 2055 { 2056 if (esw->offloads.vport_rx_drop_rule) 2057 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule); 2058 } 2059 2060 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) 2061 { 2062 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 2063 struct mlx5_core_dev *dev = esw->dev; 2064 struct mlx5_vport *vport; 2065 unsigned long i; 2066 2067 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 2068 return -EOPNOTSUPP; 2069 2070 if (!mlx5_esw_is_fdb_created(esw)) 2071 return -EOPNOTSUPP; 2072 2073 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 2074 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 2075 mlx5_mode = MLX5_INLINE_MODE_NONE; 2076 goto out; 2077 case MLX5_CAP_INLINE_MODE_L2: 2078 mlx5_mode = MLX5_INLINE_MODE_L2; 2079 goto out; 2080 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 2081 goto query_vports; 2082 } 2083 2084 query_vports: 2085 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode); 2086 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 2087 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode); 2088 if (prev_mlx5_mode != mlx5_mode) 2089 return -EINVAL; 2090 prev_mlx5_mode = mlx5_mode; 2091 } 2092 2093 out: 2094 *mode = mlx5_mode; 2095 return 0; 2096 } 2097 2098 static void esw_destroy_restore_table(struct mlx5_eswitch *esw) 2099 { 2100 struct mlx5_esw_offload *offloads = &esw->offloads; 2101 2102 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 2103 return; 2104 2105 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); 2106 mlx5_destroy_flow_group(offloads->restore_group); 2107 mlx5_destroy_flow_table(offloads->ft_offloads_restore); 2108 } 2109 2110 static int esw_create_restore_table(struct mlx5_eswitch *esw) 2111 { 2112 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 2113 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2114 struct mlx5_flow_table_attr ft_attr = {}; 2115 struct mlx5_core_dev *dev = esw->dev; 2116 struct mlx5_flow_namespace *ns; 2117 struct mlx5_modify_hdr *mod_hdr; 2118 void *match_criteria, *misc; 2119 struct mlx5_flow_table *ft; 2120 struct mlx5_flow_group *g; 2121 u32 *flow_group_in; 2122 int err = 0; 2123 2124 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 2125 return 0; 2126 2127 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 2128 if (!ns) { 2129 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 2130 return -EOPNOTSUPP; 2131 } 2132 2133 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2134 if (!flow_group_in) { 2135 err = -ENOMEM; 2136 goto out_free; 2137 } 2138 2139 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS; 2140 ft = mlx5_create_flow_table(ns, &ft_attr); 2141 if (IS_ERR(ft)) { 2142 err = PTR_ERR(ft); 2143 esw_warn(esw->dev, "Failed to create restore table, err %d\n", 2144 err); 2145 goto out_free; 2146 } 2147 2148 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 2149 match_criteria); 2150 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, 2151 misc_parameters_2); 2152 2153 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2154 ESW_REG_C0_USER_DATA_METADATA_MASK); 2155 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2156 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2157 ft_attr.max_fte - 1); 2158 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 2159 MLX5_MATCH_MISC_PARAMETERS_2); 2160 g = mlx5_create_flow_group(ft, flow_group_in); 2161 if (IS_ERR(g)) { 2162 err = PTR_ERR(g); 2163 esw_warn(dev, "Failed to create restore flow group, err: %d\n", 2164 err); 2165 goto err_group; 2166 } 2167 2168 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY); 2169 MLX5_SET(copy_action_in, modact, src_field, 2170 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); 2171 MLX5_SET(copy_action_in, modact, dst_field, 2172 MLX5_ACTION_IN_FIELD_METADATA_REG_B); 2173 mod_hdr = mlx5_modify_header_alloc(esw->dev, 2174 MLX5_FLOW_NAMESPACE_KERNEL, 1, 2175 modact); 2176 if (IS_ERR(mod_hdr)) { 2177 err = PTR_ERR(mod_hdr); 2178 esw_warn(dev, "Failed to create restore mod header, err: %d\n", 2179 err); 2180 goto err_mod_hdr; 2181 } 2182 2183 esw->offloads.ft_offloads_restore = ft; 2184 esw->offloads.restore_group = g; 2185 esw->offloads.restore_copy_hdr_id = mod_hdr; 2186 2187 kvfree(flow_group_in); 2188 2189 return 0; 2190 2191 err_mod_hdr: 2192 mlx5_destroy_flow_group(g); 2193 err_group: 2194 mlx5_destroy_flow_table(ft); 2195 out_free: 2196 kvfree(flow_group_in); 2197 2198 return err; 2199 } 2200 2201 static int esw_offloads_start(struct mlx5_eswitch *esw, 2202 struct netlink_ext_ack *extack) 2203 { 2204 int err; 2205 2206 esw->mode = MLX5_ESWITCH_OFFLOADS; 2207 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs); 2208 if (err) { 2209 NL_SET_ERR_MSG_MOD(extack, 2210 "Failed setting eswitch to offloads"); 2211 esw->mode = MLX5_ESWITCH_LEGACY; 2212 mlx5_rescan_drivers(esw->dev); 2213 return err; 2214 } 2215 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 2216 if (mlx5_eswitch_inline_mode_get(esw, 2217 &esw->offloads.inline_mode)) { 2218 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; 2219 NL_SET_ERR_MSG_MOD(extack, 2220 "Inline mode is different between vports"); 2221 } 2222 } 2223 return 0; 2224 } 2225 2226 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport) 2227 { 2228 struct mlx5_eswitch_rep *rep; 2229 int rep_type; 2230 int err; 2231 2232 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 2233 if (!rep) 2234 return -ENOMEM; 2235 2236 rep->vport = vport->vport; 2237 rep->vport_index = vport->index; 2238 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) 2239 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); 2240 2241 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL); 2242 if (err) 2243 goto insert_err; 2244 2245 return 0; 2246 2247 insert_err: 2248 kfree(rep); 2249 return err; 2250 } 2251 2252 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw, 2253 struct mlx5_eswitch_rep *rep) 2254 { 2255 xa_erase(&esw->offloads.vport_reps, rep->vport); 2256 kfree(rep); 2257 } 2258 2259 static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) 2260 { 2261 struct mlx5_eswitch_rep *rep; 2262 unsigned long i; 2263 2264 mlx5_esw_for_each_rep(esw, i, rep) 2265 mlx5_esw_offloads_rep_cleanup(esw, rep); 2266 xa_destroy(&esw->offloads.vport_reps); 2267 } 2268 2269 static int esw_offloads_init_reps(struct mlx5_eswitch *esw) 2270 { 2271 struct mlx5_vport *vport; 2272 unsigned long i; 2273 int err; 2274 2275 xa_init(&esw->offloads.vport_reps); 2276 2277 mlx5_esw_for_each_vport(esw, i, vport) { 2278 err = mlx5_esw_offloads_rep_init(esw, vport); 2279 if (err) 2280 goto err; 2281 } 2282 return 0; 2283 2284 err: 2285 esw_offloads_cleanup_reps(esw); 2286 return err; 2287 } 2288 2289 static int esw_port_metadata_set(struct devlink *devlink, u32 id, 2290 struct devlink_param_gset_ctx *ctx) 2291 { 2292 struct mlx5_core_dev *dev = devlink_priv(devlink); 2293 struct mlx5_eswitch *esw = dev->priv.eswitch; 2294 int err = 0; 2295 2296 down_write(&esw->mode_lock); 2297 if (mlx5_esw_is_fdb_created(esw)) { 2298 err = -EBUSY; 2299 goto done; 2300 } 2301 if (!mlx5_esw_vport_match_metadata_supported(esw)) { 2302 err = -EOPNOTSUPP; 2303 goto done; 2304 } 2305 if (ctx->val.vbool) 2306 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; 2307 else 2308 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; 2309 done: 2310 up_write(&esw->mode_lock); 2311 return err; 2312 } 2313 2314 static int esw_port_metadata_get(struct devlink *devlink, u32 id, 2315 struct devlink_param_gset_ctx *ctx) 2316 { 2317 struct mlx5_core_dev *dev = devlink_priv(devlink); 2318 2319 ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch); 2320 return 0; 2321 } 2322 2323 static int esw_port_metadata_validate(struct devlink *devlink, u32 id, 2324 union devlink_param_value val, 2325 struct netlink_ext_ack *extack) 2326 { 2327 struct mlx5_core_dev *dev = devlink_priv(devlink); 2328 u8 esw_mode; 2329 2330 esw_mode = mlx5_eswitch_mode(dev); 2331 if (esw_mode == MLX5_ESWITCH_OFFLOADS) { 2332 NL_SET_ERR_MSG_MOD(extack, 2333 "E-Switch must either disabled or non switchdev mode"); 2334 return -EBUSY; 2335 } 2336 return 0; 2337 } 2338 2339 static const struct devlink_param esw_devlink_params[] = { 2340 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA, 2341 "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL, 2342 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 2343 esw_port_metadata_get, 2344 esw_port_metadata_set, 2345 esw_port_metadata_validate), 2346 }; 2347 2348 int esw_offloads_init(struct mlx5_eswitch *esw) 2349 { 2350 int err; 2351 2352 err = esw_offloads_init_reps(esw); 2353 if (err) 2354 return err; 2355 2356 err = devl_params_register(priv_to_devlink(esw->dev), 2357 esw_devlink_params, 2358 ARRAY_SIZE(esw_devlink_params)); 2359 if (err) 2360 goto err_params; 2361 2362 return 0; 2363 2364 err_params: 2365 esw_offloads_cleanup_reps(esw); 2366 return err; 2367 } 2368 2369 void esw_offloads_cleanup(struct mlx5_eswitch *esw) 2370 { 2371 devl_params_unregister(priv_to_devlink(esw->dev), 2372 esw_devlink_params, 2373 ARRAY_SIZE(esw_devlink_params)); 2374 esw_offloads_cleanup_reps(esw); 2375 } 2376 2377 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, 2378 struct mlx5_eswitch_rep *rep, u8 rep_type) 2379 { 2380 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2381 REP_LOADED, REP_REGISTERED) == REP_LOADED) 2382 esw->offloads.rep_ops[rep_type]->unload(rep); 2383 } 2384 2385 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) 2386 { 2387 struct mlx5_eswitch_rep *rep; 2388 unsigned long i; 2389 2390 mlx5_esw_for_each_rep(esw, i, rep) 2391 __esw_offloads_unload_rep(esw, rep, rep_type); 2392 } 2393 2394 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) 2395 { 2396 struct mlx5_eswitch_rep *rep; 2397 int rep_type; 2398 int err; 2399 2400 rep = mlx5_eswitch_get_rep(esw, vport_num); 2401 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) 2402 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2403 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) { 2404 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep); 2405 if (err) 2406 goto err_reps; 2407 } 2408 2409 return 0; 2410 2411 err_reps: 2412 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED); 2413 for (--rep_type; rep_type >= 0; rep_type--) 2414 __esw_offloads_unload_rep(esw, rep, rep_type); 2415 return err; 2416 } 2417 2418 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) 2419 { 2420 struct mlx5_eswitch_rep *rep; 2421 int rep_type; 2422 2423 rep = mlx5_eswitch_get_rep(esw, vport_num); 2424 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--) 2425 __esw_offloads_unload_rep(esw, rep, rep_type); 2426 } 2427 2428 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num) 2429 { 2430 int err; 2431 2432 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2433 return 0; 2434 2435 if (vport_num != MLX5_VPORT_UPLINK) { 2436 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num); 2437 if (err) 2438 return err; 2439 } 2440 2441 err = mlx5_esw_offloads_rep_load(esw, vport_num); 2442 if (err) 2443 goto load_err; 2444 return err; 2445 2446 load_err: 2447 if (vport_num != MLX5_VPORT_UPLINK) 2448 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); 2449 return err; 2450 } 2451 2452 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) 2453 { 2454 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2455 return; 2456 2457 mlx5_esw_offloads_rep_unload(esw, vport_num); 2458 2459 if (vport_num != MLX5_VPORT_UPLINK) 2460 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); 2461 } 2462 2463 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master, 2464 struct mlx5_core_dev *slave) 2465 { 2466 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 2467 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; 2468 struct mlx5_flow_root_namespace *root; 2469 struct mlx5_flow_namespace *ns; 2470 int err; 2471 2472 MLX5_SET(set_flow_table_root_in, in, opcode, 2473 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 2474 MLX5_SET(set_flow_table_root_in, in, table_type, 2475 FS_FT_FDB); 2476 2477 if (master) { 2478 ns = mlx5_get_flow_namespace(master, 2479 MLX5_FLOW_NAMESPACE_FDB); 2480 root = find_root(&ns->node); 2481 mutex_lock(&root->chain_lock); 2482 MLX5_SET(set_flow_table_root_in, in, 2483 table_eswitch_owner_vhca_id_valid, 1); 2484 MLX5_SET(set_flow_table_root_in, in, 2485 table_eswitch_owner_vhca_id, 2486 MLX5_CAP_GEN(master, vhca_id)); 2487 MLX5_SET(set_flow_table_root_in, in, table_id, 2488 root->root_ft->id); 2489 } else { 2490 ns = mlx5_get_flow_namespace(slave, 2491 MLX5_FLOW_NAMESPACE_FDB); 2492 root = find_root(&ns->node); 2493 mutex_lock(&root->chain_lock); 2494 MLX5_SET(set_flow_table_root_in, in, table_id, 2495 root->root_ft->id); 2496 } 2497 2498 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); 2499 mutex_unlock(&root->chain_lock); 2500 2501 return err; 2502 } 2503 2504 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master, 2505 struct mlx5_core_dev *slave, 2506 struct mlx5_vport *vport, 2507 struct mlx5_flow_table *acl) 2508 { 2509 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id); 2510 struct mlx5_flow_handle *flow_rule = NULL; 2511 struct mlx5_flow_destination dest = {}; 2512 struct mlx5_flow_act flow_act = {}; 2513 struct mlx5_flow_spec *spec; 2514 int err = 0; 2515 void *misc; 2516 2517 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 2518 if (!spec) 2519 return -ENOMEM; 2520 2521 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 2522 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2523 misc_parameters); 2524 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); 2525 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, slave_index); 2526 2527 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 2528 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 2529 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 2530 source_eswitch_owner_vhca_id); 2531 2532 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2533 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2534 dest.vport.num = slave->priv.eswitch->manager_vport; 2535 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id); 2536 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 2537 2538 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act, 2539 &dest, 1); 2540 if (IS_ERR(flow_rule)) { 2541 err = PTR_ERR(flow_rule); 2542 } else { 2543 err = xa_insert(&vport->egress.offloads.bounce_rules, 2544 slave_index, flow_rule, GFP_KERNEL); 2545 if (err) 2546 mlx5_del_flow_rules(flow_rule); 2547 } 2548 2549 kvfree(spec); 2550 return err; 2551 } 2552 2553 static int esw_master_egress_create_resources(struct mlx5_eswitch *esw, 2554 struct mlx5_flow_namespace *egress_ns, 2555 struct mlx5_vport *vport, size_t count) 2556 { 2557 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2558 struct mlx5_flow_table_attr ft_attr = { 2559 .max_fte = count, .prio = 0, .level = 0, 2560 }; 2561 struct mlx5_flow_table *acl; 2562 struct mlx5_flow_group *g; 2563 void *match_criteria; 2564 u32 *flow_group_in; 2565 int err; 2566 2567 if (vport->egress.acl) 2568 return 0; 2569 2570 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2571 if (!flow_group_in) 2572 return -ENOMEM; 2573 2574 if (vport->vport || mlx5_core_is_ecpf(esw->dev)) 2575 ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT; 2576 2577 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport); 2578 if (IS_ERR(acl)) { 2579 err = PTR_ERR(acl); 2580 goto out; 2581 } 2582 2583 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 2584 match_criteria); 2585 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 2586 misc_parameters.source_port); 2587 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 2588 misc_parameters.source_eswitch_owner_vhca_id); 2589 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 2590 MLX5_MATCH_MISC_PARAMETERS); 2591 2592 MLX5_SET(create_flow_group_in, flow_group_in, 2593 source_eswitch_owner_vhca_id_valid, 1); 2594 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2595 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, count); 2596 2597 g = mlx5_create_flow_group(acl, flow_group_in); 2598 if (IS_ERR(g)) { 2599 err = PTR_ERR(g); 2600 goto err_group; 2601 } 2602 2603 vport->egress.acl = acl; 2604 vport->egress.offloads.bounce_grp = g; 2605 vport->egress.type = VPORT_EGRESS_ACL_TYPE_SHARED_FDB; 2606 xa_init_flags(&vport->egress.offloads.bounce_rules, XA_FLAGS_ALLOC); 2607 2608 kvfree(flow_group_in); 2609 2610 return 0; 2611 2612 err_group: 2613 mlx5_destroy_flow_table(acl); 2614 out: 2615 kvfree(flow_group_in); 2616 return err; 2617 } 2618 2619 static void esw_master_egress_destroy_resources(struct mlx5_vport *vport) 2620 { 2621 if (!xa_empty(&vport->egress.offloads.bounce_rules)) 2622 return; 2623 mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp); 2624 vport->egress.offloads.bounce_grp = NULL; 2625 mlx5_destroy_flow_table(vport->egress.acl); 2626 vport->egress.acl = NULL; 2627 } 2628 2629 static int esw_set_master_egress_rule(struct mlx5_core_dev *master, 2630 struct mlx5_core_dev *slave, size_t count) 2631 { 2632 struct mlx5_eswitch *esw = master->priv.eswitch; 2633 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id); 2634 struct mlx5_flow_namespace *egress_ns; 2635 struct mlx5_vport *vport; 2636 int err; 2637 2638 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); 2639 if (IS_ERR(vport)) 2640 return PTR_ERR(vport); 2641 2642 egress_ns = mlx5_get_flow_vport_acl_namespace(master, 2643 MLX5_FLOW_NAMESPACE_ESW_EGRESS, 2644 vport->index); 2645 if (!egress_ns) 2646 return -EINVAL; 2647 2648 if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB) 2649 return 0; 2650 2651 err = esw_master_egress_create_resources(esw, egress_ns, vport, count); 2652 if (err) 2653 return err; 2654 2655 if (xa_load(&vport->egress.offloads.bounce_rules, slave_index)) 2656 return -EINVAL; 2657 2658 err = __esw_set_master_egress_rule(master, slave, vport, vport->egress.acl); 2659 if (err) 2660 goto err_rule; 2661 2662 return 0; 2663 2664 err_rule: 2665 esw_master_egress_destroy_resources(vport); 2666 return err; 2667 } 2668 2669 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev, 2670 struct mlx5_core_dev *slave_dev) 2671 { 2672 struct mlx5_vport *vport; 2673 2674 vport = mlx5_eswitch_get_vport(dev->priv.eswitch, 2675 dev->priv.eswitch->manager_vport); 2676 2677 esw_acl_egress_ofld_bounce_rule_destroy(vport, MLX5_CAP_GEN(slave_dev, vhca_id)); 2678 2679 if (xa_empty(&vport->egress.offloads.bounce_rules)) { 2680 esw_acl_egress_ofld_cleanup(vport); 2681 xa_destroy(&vport->egress.offloads.bounce_rules); 2682 } 2683 } 2684 2685 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, 2686 struct mlx5_eswitch *slave_esw, int max_slaves) 2687 { 2688 int err; 2689 2690 err = esw_set_slave_root_fdb(master_esw->dev, 2691 slave_esw->dev); 2692 if (err) 2693 return err; 2694 2695 err = esw_set_master_egress_rule(master_esw->dev, 2696 slave_esw->dev, max_slaves); 2697 if (err) 2698 goto err_acl; 2699 2700 return err; 2701 2702 err_acl: 2703 esw_set_slave_root_fdb(NULL, slave_esw->dev); 2704 return err; 2705 } 2706 2707 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, 2708 struct mlx5_eswitch *slave_esw) 2709 { 2710 esw_set_slave_root_fdb(NULL, slave_esw->dev); 2711 esw_unset_master_egress_rule(master_esw->dev, slave_esw->dev); 2712 } 2713 2714 #define ESW_OFFLOADS_DEVCOM_PAIR (0) 2715 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1) 2716 2717 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw, 2718 struct mlx5_eswitch *peer_esw) 2719 { 2720 const struct mlx5_eswitch_rep_ops *ops; 2721 struct mlx5_eswitch_rep *rep; 2722 unsigned long i; 2723 u8 rep_type; 2724 2725 mlx5_esw_for_each_rep(esw, i, rep) { 2726 rep_type = NUM_REP_TYPES; 2727 while (rep_type--) { 2728 ops = esw->offloads.rep_ops[rep_type]; 2729 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 2730 ops->event) 2731 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, peer_esw); 2732 } 2733 } 2734 } 2735 2736 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw, 2737 struct mlx5_eswitch *peer_esw) 2738 { 2739 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 2740 mlx5e_tc_clean_fdb_peer_flows(esw); 2741 #endif 2742 mlx5_esw_offloads_rep_event_unpair(esw, peer_esw); 2743 esw_del_fdb_peer_miss_rules(esw, peer_esw->dev); 2744 } 2745 2746 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, 2747 struct mlx5_eswitch *peer_esw) 2748 { 2749 const struct mlx5_eswitch_rep_ops *ops; 2750 struct mlx5_eswitch_rep *rep; 2751 unsigned long i; 2752 u8 rep_type; 2753 int err; 2754 2755 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); 2756 if (err) 2757 return err; 2758 2759 mlx5_esw_for_each_rep(esw, i, rep) { 2760 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { 2761 ops = esw->offloads.rep_ops[rep_type]; 2762 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 2763 ops->event) { 2764 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw); 2765 if (err) 2766 goto err_out; 2767 } 2768 } 2769 } 2770 2771 return 0; 2772 2773 err_out: 2774 mlx5_esw_offloads_unpair(esw, peer_esw); 2775 return err; 2776 } 2777 2778 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, 2779 struct mlx5_eswitch *peer_esw, 2780 bool pair) 2781 { 2782 u8 peer_idx = mlx5_get_dev_index(peer_esw->dev); 2783 struct mlx5_flow_root_namespace *peer_ns; 2784 u8 idx = mlx5_get_dev_index(esw->dev); 2785 struct mlx5_flow_root_namespace *ns; 2786 int err; 2787 2788 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns; 2789 ns = esw->dev->priv.steering->fdb_root_ns; 2790 2791 if (pair) { 2792 err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_idx); 2793 if (err) 2794 return err; 2795 2796 err = mlx5_flow_namespace_set_peer(peer_ns, ns, idx); 2797 if (err) { 2798 mlx5_flow_namespace_set_peer(ns, NULL, peer_idx); 2799 return err; 2800 } 2801 } else { 2802 mlx5_flow_namespace_set_peer(ns, NULL, peer_idx); 2803 mlx5_flow_namespace_set_peer(peer_ns, NULL, idx); 2804 } 2805 2806 return 0; 2807 } 2808 2809 static int mlx5_esw_offloads_devcom_event(int event, 2810 void *my_data, 2811 void *event_data) 2812 { 2813 struct mlx5_eswitch *esw = my_data; 2814 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2815 struct mlx5_eswitch *peer_esw = event_data; 2816 u16 esw_i, peer_esw_i; 2817 bool esw_paired; 2818 int err; 2819 2820 peer_esw_i = MLX5_CAP_GEN(peer_esw->dev, vhca_id); 2821 esw_i = MLX5_CAP_GEN(esw->dev, vhca_id); 2822 esw_paired = !!xa_load(&esw->paired, peer_esw_i); 2823 2824 switch (event) { 2825 case ESW_OFFLOADS_DEVCOM_PAIR: 2826 if (mlx5_eswitch_vport_match_metadata_enabled(esw) != 2827 mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) 2828 break; 2829 2830 if (esw_paired) 2831 break; 2832 2833 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); 2834 if (err) 2835 goto err_out; 2836 err = mlx5_esw_offloads_pair(esw, peer_esw); 2837 if (err) 2838 goto err_peer; 2839 2840 err = mlx5_esw_offloads_pair(peer_esw, esw); 2841 if (err) 2842 goto err_pair; 2843 2844 err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL); 2845 if (err) 2846 goto err_xa; 2847 2848 err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL); 2849 if (err) 2850 goto err_peer_xa; 2851 2852 esw->num_peers++; 2853 peer_esw->num_peers++; 2854 mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); 2855 break; 2856 2857 case ESW_OFFLOADS_DEVCOM_UNPAIR: 2858 if (!esw_paired) 2859 break; 2860 2861 peer_esw->num_peers--; 2862 esw->num_peers--; 2863 if (!esw->num_peers && !peer_esw->num_peers) 2864 mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); 2865 xa_erase(&peer_esw->paired, esw_i); 2866 xa_erase(&esw->paired, peer_esw_i); 2867 mlx5_esw_offloads_unpair(peer_esw, esw); 2868 mlx5_esw_offloads_unpair(esw, peer_esw); 2869 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 2870 break; 2871 } 2872 2873 return 0; 2874 2875 err_peer_xa: 2876 xa_erase(&esw->paired, peer_esw_i); 2877 err_xa: 2878 mlx5_esw_offloads_unpair(peer_esw, esw); 2879 err_pair: 2880 mlx5_esw_offloads_unpair(esw, peer_esw); 2881 err_peer: 2882 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 2883 err_out: 2884 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d", 2885 event, err); 2886 return err; 2887 } 2888 2889 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) 2890 { 2891 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2892 int i; 2893 2894 for (i = 0; i < MLX5_MAX_PORTS; i++) 2895 INIT_LIST_HEAD(&esw->offloads.peer_flows[i]); 2896 mutex_init(&esw->offloads.peer_mutex); 2897 2898 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2899 return; 2900 2901 if (!mlx5_lag_is_supported(esw->dev)) 2902 return; 2903 2904 xa_init(&esw->paired); 2905 mlx5_devcom_register_component(devcom, 2906 MLX5_DEVCOM_ESW_OFFLOADS, 2907 mlx5_esw_offloads_devcom_event, 2908 esw); 2909 2910 esw->num_peers = 0; 2911 mlx5_devcom_send_event(devcom, 2912 MLX5_DEVCOM_ESW_OFFLOADS, 2913 ESW_OFFLOADS_DEVCOM_PAIR, 2914 ESW_OFFLOADS_DEVCOM_UNPAIR, esw); 2915 } 2916 2917 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) 2918 { 2919 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2920 2921 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2922 return; 2923 2924 if (!mlx5_lag_is_supported(esw->dev)) 2925 return; 2926 2927 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, 2928 ESW_OFFLOADS_DEVCOM_UNPAIR, 2929 ESW_OFFLOADS_DEVCOM_UNPAIR, esw); 2930 2931 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 2932 xa_destroy(&esw->paired); 2933 } 2934 2935 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) 2936 { 2937 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl)) 2938 return false; 2939 2940 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 2941 MLX5_FDB_TO_VPORT_REG_C_0)) 2942 return false; 2943 2944 return true; 2945 } 2946 2947 #define MLX5_ESW_METADATA_RSVD_UPLINK 1 2948 2949 /* Share the same metadata for uplink's. This is fine because: 2950 * (a) In shared FDB mode (LAG) both uplink's are treated the 2951 * same and tagged with the same metadata. 2952 * (b) In non shared FDB mode, packets from physical port0 2953 * cannot hit eswitch of PF1 and vice versa. 2954 */ 2955 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw) 2956 { 2957 return MLX5_ESW_METADATA_RSVD_UPLINK; 2958 } 2959 2960 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) 2961 { 2962 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1; 2963 /* Reserve 0xf for internal port offload */ 2964 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2; 2965 u32 pf_num; 2966 int id; 2967 2968 /* Only 4 bits of pf_num */ 2969 pf_num = mlx5_get_dev_index(esw->dev); 2970 if (pf_num > max_pf_num) 2971 return 0; 2972 2973 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */ 2974 /* Use only non-zero vport_id (2-4095) for all PF's */ 2975 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 2976 MLX5_ESW_METADATA_RSVD_UPLINK + 1, 2977 vport_end_ida, GFP_KERNEL); 2978 if (id < 0) 2979 return 0; 2980 id = (pf_num << ESW_VPORT_BITS) | id; 2981 return id; 2982 } 2983 2984 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata) 2985 { 2986 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1; 2987 2988 /* Metadata contains only 12 bits of actual ida id */ 2989 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask); 2990 } 2991 2992 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw, 2993 struct mlx5_vport *vport) 2994 { 2995 if (vport->vport == MLX5_VPORT_UPLINK) 2996 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw); 2997 else 2998 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw); 2999 3000 vport->metadata = vport->default_metadata; 3001 return vport->metadata ? 0 : -ENOSPC; 3002 } 3003 3004 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, 3005 struct mlx5_vport *vport) 3006 { 3007 if (!vport->default_metadata) 3008 return; 3009 3010 if (vport->vport == MLX5_VPORT_UPLINK) 3011 return; 3012 3013 WARN_ON(vport->metadata != vport->default_metadata); 3014 mlx5_esw_match_metadata_free(esw, vport->default_metadata); 3015 } 3016 3017 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw) 3018 { 3019 struct mlx5_vport *vport; 3020 unsigned long i; 3021 3022 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 3023 return; 3024 3025 mlx5_esw_for_each_vport(esw, i, vport) 3026 esw_offloads_vport_metadata_cleanup(esw, vport); 3027 } 3028 3029 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw) 3030 { 3031 struct mlx5_vport *vport; 3032 unsigned long i; 3033 int err; 3034 3035 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 3036 return 0; 3037 3038 mlx5_esw_for_each_vport(esw, i, vport) { 3039 err = esw_offloads_vport_metadata_setup(esw, vport); 3040 if (err) 3041 goto metadata_err; 3042 } 3043 3044 return 0; 3045 3046 metadata_err: 3047 esw_offloads_metadata_uninit(esw); 3048 return err; 3049 } 3050 3051 int 3052 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 3053 struct mlx5_vport *vport) 3054 { 3055 int err; 3056 3057 err = esw_acl_ingress_ofld_setup(esw, vport); 3058 if (err) 3059 return err; 3060 3061 err = esw_acl_egress_ofld_setup(esw, vport); 3062 if (err) 3063 goto egress_err; 3064 3065 return 0; 3066 3067 egress_err: 3068 esw_acl_ingress_ofld_cleanup(esw, vport); 3069 return err; 3070 } 3071 3072 void 3073 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 3074 struct mlx5_vport *vport) 3075 { 3076 esw_acl_egress_ofld_cleanup(vport); 3077 esw_acl_ingress_ofld_cleanup(esw, vport); 3078 } 3079 3080 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 3081 { 3082 struct mlx5_vport *vport; 3083 3084 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 3085 if (IS_ERR(vport)) 3086 return PTR_ERR(vport); 3087 3088 return esw_vport_create_offloads_acl_tables(esw, vport); 3089 } 3090 3091 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 3092 { 3093 struct mlx5_vport *vport; 3094 3095 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 3096 if (IS_ERR(vport)) 3097 return; 3098 3099 esw_vport_destroy_offloads_acl_tables(esw, vport); 3100 } 3101 3102 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) 3103 { 3104 struct mlx5_eswitch_rep *rep; 3105 unsigned long i; 3106 int ret; 3107 3108 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS) 3109 return 0; 3110 3111 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 3112 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) 3113 return 0; 3114 3115 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK); 3116 if (ret) 3117 return ret; 3118 3119 mlx5_esw_for_each_rep(esw, i, rep) { 3120 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED) 3121 mlx5_esw_offloads_rep_load(esw, rep->vport); 3122 } 3123 3124 return 0; 3125 } 3126 3127 static int esw_offloads_steering_init(struct mlx5_eswitch *esw) 3128 { 3129 struct mlx5_esw_indir_table *indir; 3130 int err; 3131 3132 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); 3133 mutex_init(&esw->fdb_table.offloads.vports.lock); 3134 hash_init(esw->fdb_table.offloads.vports.table); 3135 atomic64_set(&esw->user_count, 0); 3136 3137 indir = mlx5_esw_indir_table_init(); 3138 if (IS_ERR(indir)) { 3139 err = PTR_ERR(indir); 3140 goto create_indir_err; 3141 } 3142 esw->fdb_table.offloads.indir = indir; 3143 3144 err = esw_create_uplink_offloads_acl_tables(esw); 3145 if (err) 3146 goto create_acl_err; 3147 3148 err = esw_create_offloads_table(esw); 3149 if (err) 3150 goto create_offloads_err; 3151 3152 err = esw_create_restore_table(esw); 3153 if (err) 3154 goto create_restore_err; 3155 3156 err = esw_create_offloads_fdb_tables(esw); 3157 if (err) 3158 goto create_fdb_err; 3159 3160 err = esw_create_vport_rx_group(esw); 3161 if (err) 3162 goto create_fg_err; 3163 3164 err = esw_create_vport_rx_drop_group(esw); 3165 if (err) 3166 goto create_rx_drop_fg_err; 3167 3168 err = esw_create_vport_rx_drop_rule(esw); 3169 if (err) 3170 goto create_rx_drop_rule_err; 3171 3172 return 0; 3173 3174 create_rx_drop_rule_err: 3175 esw_destroy_vport_rx_drop_group(esw); 3176 create_rx_drop_fg_err: 3177 esw_destroy_vport_rx_group(esw); 3178 create_fg_err: 3179 esw_destroy_offloads_fdb_tables(esw); 3180 create_fdb_err: 3181 esw_destroy_restore_table(esw); 3182 create_restore_err: 3183 esw_destroy_offloads_table(esw); 3184 create_offloads_err: 3185 esw_destroy_uplink_offloads_acl_tables(esw); 3186 create_acl_err: 3187 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 3188 create_indir_err: 3189 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 3190 return err; 3191 } 3192 3193 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) 3194 { 3195 esw_destroy_vport_rx_drop_rule(esw); 3196 esw_destroy_vport_rx_drop_group(esw); 3197 esw_destroy_vport_rx_group(esw); 3198 esw_destroy_offloads_fdb_tables(esw); 3199 esw_destroy_restore_table(esw); 3200 esw_destroy_offloads_table(esw); 3201 esw_destroy_uplink_offloads_acl_tables(esw); 3202 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 3203 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 3204 } 3205 3206 static void 3207 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out) 3208 { 3209 struct devlink *devlink; 3210 bool host_pf_disabled; 3211 u16 new_num_vfs; 3212 3213 new_num_vfs = MLX5_GET(query_esw_functions_out, out, 3214 host_params_context.host_num_of_vfs); 3215 host_pf_disabled = MLX5_GET(query_esw_functions_out, out, 3216 host_params_context.host_pf_disabled); 3217 3218 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) 3219 return; 3220 3221 devlink = priv_to_devlink(esw->dev); 3222 devl_lock(devlink); 3223 /* Number of VFs can only change from "0 to x" or "x to 0". */ 3224 if (esw->esw_funcs.num_vfs > 0) { 3225 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 3226 } else { 3227 int err; 3228 3229 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs, 3230 MLX5_VPORT_UC_ADDR_CHANGE); 3231 if (err) { 3232 devl_unlock(devlink); 3233 return; 3234 } 3235 } 3236 esw->esw_funcs.num_vfs = new_num_vfs; 3237 devl_unlock(devlink); 3238 } 3239 3240 static void esw_functions_changed_event_handler(struct work_struct *work) 3241 { 3242 struct mlx5_host_work *host_work; 3243 struct mlx5_eswitch *esw; 3244 const u32 *out; 3245 3246 host_work = container_of(work, struct mlx5_host_work, work); 3247 esw = host_work->esw; 3248 3249 out = mlx5_esw_query_functions(esw->dev); 3250 if (IS_ERR(out)) 3251 goto out; 3252 3253 esw_vfs_changed_event_handler(esw, out); 3254 kvfree(out); 3255 out: 3256 kfree(host_work); 3257 } 3258 3259 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data) 3260 { 3261 struct mlx5_esw_functions *esw_funcs; 3262 struct mlx5_host_work *host_work; 3263 struct mlx5_eswitch *esw; 3264 3265 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC); 3266 if (!host_work) 3267 return NOTIFY_DONE; 3268 3269 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb); 3270 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs); 3271 3272 host_work->esw = esw; 3273 3274 INIT_WORK(&host_work->work, esw_functions_changed_event_handler); 3275 queue_work(esw->work_queue, &host_work->work); 3276 3277 return NOTIFY_OK; 3278 } 3279 3280 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw) 3281 { 3282 const u32 *query_host_out; 3283 3284 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 3285 return 0; 3286 3287 query_host_out = mlx5_esw_query_functions(esw->dev); 3288 if (IS_ERR(query_host_out)) 3289 return PTR_ERR(query_host_out); 3290 3291 /* Mark non local controller with non zero controller number. */ 3292 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out, 3293 host_params_context.host_number); 3294 kvfree(query_host_out); 3295 return 0; 3296 } 3297 3298 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller) 3299 { 3300 /* Local controller is always valid */ 3301 if (controller == 0) 3302 return true; 3303 3304 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 3305 return false; 3306 3307 /* External host number starts with zero in device */ 3308 return (controller == esw->offloads.host_number + 1); 3309 } 3310 3311 int esw_offloads_enable(struct mlx5_eswitch *esw) 3312 { 3313 struct mapping_ctx *reg_c0_obj_pool; 3314 struct mlx5_vport *vport; 3315 unsigned long i; 3316 u64 mapping_id; 3317 int err; 3318 3319 mutex_init(&esw->offloads.termtbl_mutex); 3320 mlx5_rdma_enable_roce(esw->dev); 3321 3322 err = mlx5_esw_host_number_init(esw); 3323 if (err) 3324 goto err_metadata; 3325 3326 err = esw_offloads_metadata_init(esw); 3327 if (err) 3328 goto err_metadata; 3329 3330 err = esw_set_passing_vport_metadata(esw, true); 3331 if (err) 3332 goto err_vport_metadata; 3333 3334 mapping_id = mlx5_query_nic_system_image_guid(esw->dev); 3335 3336 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, 3337 sizeof(struct mlx5_mapped_obj), 3338 ESW_REG_C0_USER_DATA_METADATA_MASK, 3339 true); 3340 3341 if (IS_ERR(reg_c0_obj_pool)) { 3342 err = PTR_ERR(reg_c0_obj_pool); 3343 goto err_pool; 3344 } 3345 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool; 3346 3347 err = esw_offloads_steering_init(esw); 3348 if (err) 3349 goto err_steering_init; 3350 3351 /* Representor will control the vport link state */ 3352 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) 3353 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; 3354 if (mlx5_core_ec_sriov_enabled(esw->dev)) 3355 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) 3356 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; 3357 3358 /* Uplink vport rep must load first. */ 3359 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK); 3360 if (err) 3361 goto err_uplink; 3362 3363 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); 3364 if (err) 3365 goto err_vports; 3366 3367 return 0; 3368 3369 err_vports: 3370 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); 3371 err_uplink: 3372 esw_offloads_steering_cleanup(esw); 3373 err_steering_init: 3374 mapping_destroy(reg_c0_obj_pool); 3375 err_pool: 3376 esw_set_passing_vport_metadata(esw, false); 3377 err_vport_metadata: 3378 esw_offloads_metadata_uninit(esw); 3379 err_metadata: 3380 mlx5_rdma_disable_roce(esw->dev); 3381 mutex_destroy(&esw->offloads.termtbl_mutex); 3382 return err; 3383 } 3384 3385 static int esw_offloads_stop(struct mlx5_eswitch *esw, 3386 struct netlink_ext_ack *extack) 3387 { 3388 int err; 3389 3390 esw->mode = MLX5_ESWITCH_LEGACY; 3391 3392 /* If changing from switchdev to legacy mode without sriov enabled, 3393 * no need to create legacy fdb. 3394 */ 3395 if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev)) 3396 return 0; 3397 3398 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); 3399 if (err) 3400 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); 3401 3402 return err; 3403 } 3404 3405 void esw_offloads_disable(struct mlx5_eswitch *esw) 3406 { 3407 mlx5_eswitch_disable_pf_vf_vports(esw); 3408 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); 3409 esw_set_passing_vport_metadata(esw, false); 3410 esw_offloads_steering_cleanup(esw); 3411 mapping_destroy(esw->offloads.reg_c0_obj_pool); 3412 esw_offloads_metadata_uninit(esw); 3413 mlx5_rdma_disable_roce(esw->dev); 3414 mutex_destroy(&esw->offloads.termtbl_mutex); 3415 } 3416 3417 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) 3418 { 3419 switch (mode) { 3420 case DEVLINK_ESWITCH_MODE_LEGACY: 3421 *mlx5_mode = MLX5_ESWITCH_LEGACY; 3422 break; 3423 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 3424 *mlx5_mode = MLX5_ESWITCH_OFFLOADS; 3425 break; 3426 default: 3427 return -EINVAL; 3428 } 3429 3430 return 0; 3431 } 3432 3433 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) 3434 { 3435 switch (mlx5_mode) { 3436 case MLX5_ESWITCH_LEGACY: 3437 *mode = DEVLINK_ESWITCH_MODE_LEGACY; 3438 break; 3439 case MLX5_ESWITCH_OFFLOADS: 3440 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; 3441 break; 3442 default: 3443 return -EINVAL; 3444 } 3445 3446 return 0; 3447 } 3448 3449 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode) 3450 { 3451 switch (mode) { 3452 case DEVLINK_ESWITCH_INLINE_MODE_NONE: 3453 *mlx5_mode = MLX5_INLINE_MODE_NONE; 3454 break; 3455 case DEVLINK_ESWITCH_INLINE_MODE_LINK: 3456 *mlx5_mode = MLX5_INLINE_MODE_L2; 3457 break; 3458 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK: 3459 *mlx5_mode = MLX5_INLINE_MODE_IP; 3460 break; 3461 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: 3462 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP; 3463 break; 3464 default: 3465 return -EINVAL; 3466 } 3467 3468 return 0; 3469 } 3470 3471 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) 3472 { 3473 switch (mlx5_mode) { 3474 case MLX5_INLINE_MODE_NONE: 3475 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE; 3476 break; 3477 case MLX5_INLINE_MODE_L2: 3478 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK; 3479 break; 3480 case MLX5_INLINE_MODE_IP: 3481 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK; 3482 break; 3483 case MLX5_INLINE_MODE_TCP_UDP: 3484 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT; 3485 break; 3486 default: 3487 return -EINVAL; 3488 } 3489 3490 return 0; 3491 } 3492 3493 static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink) 3494 { 3495 struct net *devl_net, *netdev_net; 3496 struct mlx5_eswitch *esw; 3497 3498 esw = mlx5_devlink_eswitch_get(devlink); 3499 netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev); 3500 devl_net = devlink_net(devlink); 3501 3502 return net_eq(devl_net, netdev_net); 3503 } 3504 3505 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 3506 struct netlink_ext_ack *extack) 3507 { 3508 u16 cur_mlx5_mode, mlx5_mode = 0; 3509 struct mlx5_eswitch *esw; 3510 int err = 0; 3511 3512 esw = mlx5_devlink_eswitch_get(devlink); 3513 if (IS_ERR(esw)) 3514 return PTR_ERR(esw); 3515 3516 if (esw_mode_from_devlink(mode, &mlx5_mode)) 3517 return -EINVAL; 3518 3519 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && 3520 !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) { 3521 NL_SET_ERR_MSG_MOD(extack, 3522 "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's."); 3523 return -EPERM; 3524 } 3525 3526 mlx5_lag_disable_change(esw->dev); 3527 err = mlx5_esw_try_lock(esw); 3528 if (err < 0) { 3529 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy"); 3530 goto enable_lag; 3531 } 3532 cur_mlx5_mode = err; 3533 err = 0; 3534 3535 if (cur_mlx5_mode == mlx5_mode) 3536 goto unlock; 3537 3538 mlx5_eswitch_disable_locked(esw); 3539 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { 3540 if (mlx5_devlink_trap_get_num_active(esw->dev)) { 3541 NL_SET_ERR_MSG_MOD(extack, 3542 "Can't change mode while devlink traps are active"); 3543 err = -EOPNOTSUPP; 3544 goto unlock; 3545 } 3546 err = esw_offloads_start(esw, extack); 3547 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { 3548 err = esw_offloads_stop(esw, extack); 3549 mlx5_rescan_drivers(esw->dev); 3550 } else { 3551 err = -EINVAL; 3552 } 3553 3554 unlock: 3555 mlx5_esw_unlock(esw); 3556 enable_lag: 3557 mlx5_lag_enable_change(esw->dev); 3558 return err; 3559 } 3560 3561 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 3562 { 3563 struct mlx5_eswitch *esw; 3564 int err; 3565 3566 esw = mlx5_devlink_eswitch_get(devlink); 3567 if (IS_ERR(esw)) 3568 return PTR_ERR(esw); 3569 3570 down_read(&esw->mode_lock); 3571 err = esw_mode_to_devlink(esw->mode, mode); 3572 up_read(&esw->mode_lock); 3573 return err; 3574 } 3575 3576 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode, 3577 struct netlink_ext_ack *extack) 3578 { 3579 struct mlx5_core_dev *dev = esw->dev; 3580 struct mlx5_vport *vport; 3581 u16 err_vport_num = 0; 3582 unsigned long i; 3583 int err = 0; 3584 3585 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 3586 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode); 3587 if (err) { 3588 err_vport_num = vport->vport; 3589 NL_SET_ERR_MSG_MOD(extack, 3590 "Failed to set min inline on vport"); 3591 goto revert_inline_mode; 3592 } 3593 } 3594 if (mlx5_core_ec_sriov_enabled(esw->dev)) { 3595 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) { 3596 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode); 3597 if (err) { 3598 err_vport_num = vport->vport; 3599 NL_SET_ERR_MSG_MOD(extack, 3600 "Failed to set min inline on vport"); 3601 goto revert_ec_vf_inline_mode; 3602 } 3603 } 3604 } 3605 return 0; 3606 3607 revert_ec_vf_inline_mode: 3608 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) { 3609 if (vport->vport == err_vport_num) 3610 break; 3611 mlx5_modify_nic_vport_min_inline(dev, 3612 vport->vport, 3613 esw->offloads.inline_mode); 3614 } 3615 revert_inline_mode: 3616 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 3617 if (vport->vport == err_vport_num) 3618 break; 3619 mlx5_modify_nic_vport_min_inline(dev, 3620 vport->vport, 3621 esw->offloads.inline_mode); 3622 } 3623 return err; 3624 } 3625 3626 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 3627 struct netlink_ext_ack *extack) 3628 { 3629 struct mlx5_core_dev *dev = devlink_priv(devlink); 3630 struct mlx5_eswitch *esw; 3631 u8 mlx5_mode; 3632 int err; 3633 3634 esw = mlx5_devlink_eswitch_get(devlink); 3635 if (IS_ERR(esw)) 3636 return PTR_ERR(esw); 3637 3638 down_write(&esw->mode_lock); 3639 3640 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 3641 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 3642 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) { 3643 err = 0; 3644 goto out; 3645 } 3646 3647 fallthrough; 3648 case MLX5_CAP_INLINE_MODE_L2: 3649 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); 3650 err = -EOPNOTSUPP; 3651 goto out; 3652 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 3653 break; 3654 } 3655 3656 if (atomic64_read(&esw->offloads.num_flows) > 0) { 3657 NL_SET_ERR_MSG_MOD(extack, 3658 "Can't set inline mode when flows are configured"); 3659 err = -EOPNOTSUPP; 3660 goto out; 3661 } 3662 3663 err = esw_inline_mode_from_devlink(mode, &mlx5_mode); 3664 if (err) 3665 goto out; 3666 3667 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack); 3668 if (err) 3669 goto out; 3670 3671 esw->offloads.inline_mode = mlx5_mode; 3672 up_write(&esw->mode_lock); 3673 return 0; 3674 3675 out: 3676 up_write(&esw->mode_lock); 3677 return err; 3678 } 3679 3680 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) 3681 { 3682 struct mlx5_eswitch *esw; 3683 int err; 3684 3685 esw = mlx5_devlink_eswitch_get(devlink); 3686 if (IS_ERR(esw)) 3687 return PTR_ERR(esw); 3688 3689 down_read(&esw->mode_lock); 3690 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 3691 up_read(&esw->mode_lock); 3692 return err; 3693 } 3694 3695 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) 3696 { 3697 struct devlink *devlink = priv_to_devlink(dev); 3698 struct mlx5_eswitch *esw; 3699 3700 devl_lock(devlink); 3701 esw = mlx5_devlink_eswitch_get(devlink); 3702 if (IS_ERR(esw)) { 3703 devl_unlock(devlink); 3704 /* Failure means no eswitch => not possible to change encap */ 3705 return true; 3706 } 3707 3708 down_write(&esw->mode_lock); 3709 if (esw->mode != MLX5_ESWITCH_LEGACY && 3710 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { 3711 up_write(&esw->mode_lock); 3712 devl_unlock(devlink); 3713 return false; 3714 } 3715 3716 esw->offloads.num_block_encap++; 3717 up_write(&esw->mode_lock); 3718 devl_unlock(devlink); 3719 return true; 3720 } 3721 3722 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev) 3723 { 3724 struct devlink *devlink = priv_to_devlink(dev); 3725 struct mlx5_eswitch *esw; 3726 3727 esw = mlx5_devlink_eswitch_get(devlink); 3728 if (IS_ERR(esw)) 3729 return; 3730 3731 down_write(&esw->mode_lock); 3732 esw->offloads.num_block_encap--; 3733 up_write(&esw->mode_lock); 3734 } 3735 3736 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 3737 enum devlink_eswitch_encap_mode encap, 3738 struct netlink_ext_ack *extack) 3739 { 3740 struct mlx5_core_dev *dev = devlink_priv(devlink); 3741 struct mlx5_eswitch *esw; 3742 int err = 0; 3743 3744 esw = mlx5_devlink_eswitch_get(devlink); 3745 if (IS_ERR(esw)) 3746 return PTR_ERR(esw); 3747 3748 down_write(&esw->mode_lock); 3749 3750 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 3751 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || 3752 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) { 3753 err = -EOPNOTSUPP; 3754 goto unlock; 3755 } 3756 3757 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) { 3758 err = -EOPNOTSUPP; 3759 goto unlock; 3760 } 3761 3762 if (esw->mode == MLX5_ESWITCH_LEGACY) { 3763 esw->offloads.encap = encap; 3764 goto unlock; 3765 } 3766 3767 if (esw->offloads.encap == encap) 3768 goto unlock; 3769 3770 if (atomic64_read(&esw->offloads.num_flows) > 0) { 3771 NL_SET_ERR_MSG_MOD(extack, 3772 "Can't set encapsulation when flows are configured"); 3773 err = -EOPNOTSUPP; 3774 goto unlock; 3775 } 3776 3777 if (esw->offloads.num_block_encap) { 3778 NL_SET_ERR_MSG_MOD(extack, 3779 "Can't set encapsulation when IPsec SA and/or policies are configured"); 3780 err = -EOPNOTSUPP; 3781 goto unlock; 3782 } 3783 3784 esw_destroy_offloads_fdb_tables(esw); 3785 3786 esw->offloads.encap = encap; 3787 3788 err = esw_create_offloads_fdb_tables(esw); 3789 3790 if (err) { 3791 NL_SET_ERR_MSG_MOD(extack, 3792 "Failed re-creating fast FDB table"); 3793 esw->offloads.encap = !encap; 3794 (void)esw_create_offloads_fdb_tables(esw); 3795 } 3796 3797 unlock: 3798 up_write(&esw->mode_lock); 3799 return err; 3800 } 3801 3802 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 3803 enum devlink_eswitch_encap_mode *encap) 3804 { 3805 struct mlx5_eswitch *esw; 3806 3807 esw = mlx5_devlink_eswitch_get(devlink); 3808 if (IS_ERR(esw)) 3809 return PTR_ERR(esw); 3810 3811 down_read(&esw->mode_lock); 3812 *encap = esw->offloads.encap; 3813 up_read(&esw->mode_lock); 3814 return 0; 3815 } 3816 3817 static bool 3818 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num) 3819 { 3820 /* Currently, only ECPF based device has representor for host PF. */ 3821 if (vport_num == MLX5_VPORT_PF && 3822 !mlx5_core_is_ecpf_esw_manager(esw->dev)) 3823 return false; 3824 3825 if (vport_num == MLX5_VPORT_ECPF && 3826 !mlx5_ecpf_vport_exists(esw->dev)) 3827 return false; 3828 3829 return true; 3830 } 3831 3832 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, 3833 const struct mlx5_eswitch_rep_ops *ops, 3834 u8 rep_type) 3835 { 3836 struct mlx5_eswitch_rep_data *rep_data; 3837 struct mlx5_eswitch_rep *rep; 3838 unsigned long i; 3839 3840 esw->offloads.rep_ops[rep_type] = ops; 3841 mlx5_esw_for_each_rep(esw, i, rep) { 3842 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) { 3843 rep->esw = esw; 3844 rep_data = &rep->rep_data[rep_type]; 3845 atomic_set(&rep_data->state, REP_REGISTERED); 3846 } 3847 } 3848 } 3849 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); 3850 3851 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) 3852 { 3853 struct mlx5_eswitch_rep *rep; 3854 unsigned long i; 3855 3856 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 3857 __unload_reps_all_vport(esw, rep_type); 3858 3859 mlx5_esw_for_each_rep(esw, i, rep) 3860 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); 3861 } 3862 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); 3863 3864 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) 3865 { 3866 struct mlx5_eswitch_rep *rep; 3867 3868 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 3869 return rep->rep_data[rep_type].priv; 3870 } 3871 3872 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, 3873 u16 vport, 3874 u8 rep_type) 3875 { 3876 struct mlx5_eswitch_rep *rep; 3877 3878 rep = mlx5_eswitch_get_rep(esw, vport); 3879 3880 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 3881 esw->offloads.rep_ops[rep_type]->get_proto_dev) 3882 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep); 3883 return NULL; 3884 } 3885 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); 3886 3887 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) 3888 { 3889 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type); 3890 } 3891 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); 3892 3893 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, 3894 u16 vport) 3895 { 3896 return mlx5_eswitch_get_rep(esw, vport); 3897 } 3898 EXPORT_SYMBOL(mlx5_eswitch_vport_rep); 3899 3900 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) 3901 { 3902 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED); 3903 } 3904 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled); 3905 3906 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) 3907 { 3908 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA); 3909 } 3910 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled); 3911 3912 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, 3913 u16 vport_num) 3914 { 3915 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 3916 3917 if (WARN_ON_ONCE(IS_ERR(vport))) 3918 return 0; 3919 3920 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS); 3921 } 3922 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); 3923 3924 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, 3925 u16 vport_num, u32 controller, u32 sfnum) 3926 { 3927 int err; 3928 3929 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE); 3930 if (err) 3931 return err; 3932 3933 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum); 3934 if (err) 3935 goto devlink_err; 3936 3937 err = mlx5_esw_offloads_rep_load(esw, vport_num); 3938 if (err) 3939 goto rep_err; 3940 return 0; 3941 3942 rep_err: 3943 mlx5_esw_devlink_sf_port_unregister(esw, vport_num); 3944 devlink_err: 3945 mlx5_esw_vport_disable(esw, vport_num); 3946 return err; 3947 } 3948 3949 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) 3950 { 3951 mlx5_esw_offloads_rep_unload(esw, vport_num); 3952 mlx5_esw_devlink_sf_port_unregister(esw, vport_num); 3953 mlx5_esw_vport_disable(esw, vport_num); 3954 } 3955 3956 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id) 3957 { 3958 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 3959 void *query_ctx; 3960 void *hca_caps; 3961 int err; 3962 3963 *vhca_id = 0; 3964 3965 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 3966 if (!query_ctx) 3967 return -ENOMEM; 3968 3969 err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx); 3970 if (err) 3971 goto out_free; 3972 3973 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 3974 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id); 3975 3976 out_free: 3977 kfree(query_ctx); 3978 return err; 3979 } 3980 3981 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num) 3982 { 3983 u16 *old_entry, *vhca_map_entry, vhca_id; 3984 int err; 3985 3986 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); 3987 if (err) { 3988 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n", 3989 vport_num, err); 3990 return err; 3991 } 3992 3993 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL); 3994 if (!vhca_map_entry) 3995 return -ENOMEM; 3996 3997 *vhca_map_entry = vport_num; 3998 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL); 3999 if (xa_is_err(old_entry)) { 4000 kfree(vhca_map_entry); 4001 return xa_err(old_entry); 4002 } 4003 kfree(old_entry); 4004 return 0; 4005 } 4006 4007 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num) 4008 { 4009 u16 *vhca_map_entry, vhca_id; 4010 int err; 4011 4012 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); 4013 if (err) 4014 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n", 4015 vport_num, err); 4016 4017 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id); 4018 kfree(vhca_map_entry); 4019 } 4020 4021 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num) 4022 { 4023 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id); 4024 4025 if (!res) 4026 return -ENOENT; 4027 4028 *vport_num = *res; 4029 return 0; 4030 } 4031 4032 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, 4033 u16 vport_num) 4034 { 4035 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 4036 4037 if (WARN_ON_ONCE(IS_ERR(vport))) 4038 return 0; 4039 4040 return vport->metadata; 4041 } 4042 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set); 4043 4044 static bool 4045 is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) 4046 { 4047 return vport_num == MLX5_VPORT_PF || 4048 mlx5_eswitch_is_vf_vport(esw, vport_num) || 4049 mlx5_esw_is_sf_vport(esw, vport_num); 4050 } 4051 4052 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port, 4053 u8 *hw_addr, int *hw_addr_len, 4054 struct netlink_ext_ack *extack) 4055 { 4056 struct mlx5_eswitch *esw; 4057 struct mlx5_vport *vport; 4058 u16 vport_num; 4059 4060 esw = mlx5_devlink_eswitch_get(port->devlink); 4061 if (IS_ERR(esw)) 4062 return PTR_ERR(esw); 4063 4064 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 4065 if (!is_port_function_supported(esw, vport_num)) 4066 return -EOPNOTSUPP; 4067 4068 vport = mlx5_eswitch_get_vport(esw, vport_num); 4069 if (IS_ERR(vport)) { 4070 NL_SET_ERR_MSG_MOD(extack, "Invalid port"); 4071 return PTR_ERR(vport); 4072 } 4073 4074 mutex_lock(&esw->state_lock); 4075 ether_addr_copy(hw_addr, vport->info.mac); 4076 *hw_addr_len = ETH_ALEN; 4077 mutex_unlock(&esw->state_lock); 4078 return 0; 4079 } 4080 4081 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port, 4082 const u8 *hw_addr, int hw_addr_len, 4083 struct netlink_ext_ack *extack) 4084 { 4085 struct mlx5_eswitch *esw; 4086 u16 vport_num; 4087 4088 esw = mlx5_devlink_eswitch_get(port->devlink); 4089 if (IS_ERR(esw)) { 4090 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); 4091 return PTR_ERR(esw); 4092 } 4093 4094 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 4095 if (!is_port_function_supported(esw, vport_num)) { 4096 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr"); 4097 return -EINVAL; 4098 } 4099 4100 return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr); 4101 } 4102 4103 static struct mlx5_vport * 4104 mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw) 4105 { 4106 u16 vport_num; 4107 4108 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 4109 return ERR_PTR(-EOPNOTSUPP); 4110 4111 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 4112 if (!is_port_function_supported(esw, vport_num)) 4113 return ERR_PTR(-EOPNOTSUPP); 4114 4115 return mlx5_eswitch_get_vport(esw, vport_num); 4116 } 4117 4118 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled, 4119 struct netlink_ext_ack *extack) 4120 { 4121 struct mlx5_eswitch *esw; 4122 struct mlx5_vport *vport; 4123 int err = -EOPNOTSUPP; 4124 4125 esw = mlx5_devlink_eswitch_get(port->devlink); 4126 if (IS_ERR(esw)) 4127 return PTR_ERR(esw); 4128 4129 if (!MLX5_CAP_GEN(esw->dev, migration)) { 4130 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration"); 4131 return err; 4132 } 4133 4134 vport = mlx5_devlink_port_fn_get_vport(port, esw); 4135 if (IS_ERR(vport)) { 4136 NL_SET_ERR_MSG_MOD(extack, "Invalid port"); 4137 return PTR_ERR(vport); 4138 } 4139 4140 mutex_lock(&esw->state_lock); 4141 if (vport->enabled) { 4142 *is_enabled = vport->info.mig_enabled; 4143 err = 0; 4144 } 4145 mutex_unlock(&esw->state_lock); 4146 return err; 4147 } 4148 4149 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable, 4150 struct netlink_ext_ack *extack) 4151 { 4152 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 4153 struct mlx5_eswitch *esw; 4154 struct mlx5_vport *vport; 4155 void *query_ctx; 4156 void *hca_caps; 4157 int err = -EOPNOTSUPP; 4158 4159 esw = mlx5_devlink_eswitch_get(port->devlink); 4160 if (IS_ERR(esw)) 4161 return PTR_ERR(esw); 4162 4163 if (!MLX5_CAP_GEN(esw->dev, migration)) { 4164 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration"); 4165 return err; 4166 } 4167 4168 vport = mlx5_devlink_port_fn_get_vport(port, esw); 4169 if (IS_ERR(vport)) { 4170 NL_SET_ERR_MSG_MOD(extack, "Invalid port"); 4171 return PTR_ERR(vport); 4172 } 4173 4174 mutex_lock(&esw->state_lock); 4175 if (!vport->enabled) { 4176 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); 4177 goto out; 4178 } 4179 4180 if (vport->info.mig_enabled == enable) { 4181 err = 0; 4182 goto out; 4183 } 4184 4185 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 4186 if (!query_ctx) { 4187 err = -ENOMEM; 4188 goto out; 4189 } 4190 4191 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx, 4192 MLX5_CAP_GENERAL_2); 4193 if (err) { 4194 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps"); 4195 goto out_free; 4196 } 4197 4198 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 4199 MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1); 4200 4201 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport, 4202 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2); 4203 if (err) { 4204 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap"); 4205 goto out_free; 4206 } 4207 4208 vport->info.mig_enabled = enable; 4209 4210 out_free: 4211 kfree(query_ctx); 4212 out: 4213 mutex_unlock(&esw->state_lock); 4214 return err; 4215 } 4216 4217 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled, 4218 struct netlink_ext_ack *extack) 4219 { 4220 struct mlx5_eswitch *esw; 4221 struct mlx5_vport *vport; 4222 int err = -EOPNOTSUPP; 4223 4224 esw = mlx5_devlink_eswitch_get(port->devlink); 4225 if (IS_ERR(esw)) 4226 return PTR_ERR(esw); 4227 4228 vport = mlx5_devlink_port_fn_get_vport(port, esw); 4229 if (IS_ERR(vport)) { 4230 NL_SET_ERR_MSG_MOD(extack, "Invalid port"); 4231 return PTR_ERR(vport); 4232 } 4233 4234 mutex_lock(&esw->state_lock); 4235 if (vport->enabled) { 4236 *is_enabled = vport->info.roce_enabled; 4237 err = 0; 4238 } 4239 mutex_unlock(&esw->state_lock); 4240 return err; 4241 } 4242 4243 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable, 4244 struct netlink_ext_ack *extack) 4245 { 4246 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 4247 struct mlx5_eswitch *esw; 4248 struct mlx5_vport *vport; 4249 int err = -EOPNOTSUPP; 4250 void *query_ctx; 4251 void *hca_caps; 4252 u16 vport_num; 4253 4254 esw = mlx5_devlink_eswitch_get(port->devlink); 4255 if (IS_ERR(esw)) 4256 return PTR_ERR(esw); 4257 4258 vport = mlx5_devlink_port_fn_get_vport(port, esw); 4259 if (IS_ERR(vport)) { 4260 NL_SET_ERR_MSG_MOD(extack, "Invalid port"); 4261 return PTR_ERR(vport); 4262 } 4263 vport_num = vport->vport; 4264 4265 mutex_lock(&esw->state_lock); 4266 if (!vport->enabled) { 4267 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); 4268 goto out; 4269 } 4270 4271 if (vport->info.roce_enabled == enable) { 4272 err = 0; 4273 goto out; 4274 } 4275 4276 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 4277 if (!query_ctx) { 4278 err = -ENOMEM; 4279 goto out; 4280 } 4281 4282 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx, 4283 MLX5_CAP_GENERAL); 4284 if (err) { 4285 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps"); 4286 goto out_free; 4287 } 4288 4289 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 4290 MLX5_SET(cmd_hca_cap, hca_caps, roce, enable); 4291 4292 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num, 4293 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); 4294 if (err) { 4295 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap"); 4296 goto out_free; 4297 } 4298 4299 vport->info.roce_enabled = enable; 4300 4301 out_free: 4302 kfree(query_ctx); 4303 out: 4304 mutex_unlock(&esw->state_lock); 4305 return err; 4306 } 4307