1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies */ 3 4 #include <linux/mlx5/vport.h> 5 #include "mlx5_core.h" 6 #include "fs_core.h" 7 #include "fs_cmd.h" 8 #include "mlx5dr.h" 9 #include "fs_dr.h" 10 11 static bool mlx5_dr_is_fw_table(u32 flags) 12 { 13 if (flags & MLX5_FLOW_TABLE_TERMINATION) 14 return true; 15 16 return false; 17 } 18 19 static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns, 20 struct mlx5_flow_table *ft, 21 u32 underlay_qpn, 22 bool disconnect) 23 { 24 return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn, 25 disconnect); 26 } 27 28 static int set_miss_action(struct mlx5_flow_root_namespace *ns, 29 struct mlx5_flow_table *ft, 30 struct mlx5_flow_table *next_ft) 31 { 32 struct mlx5dr_action *old_miss_action; 33 struct mlx5dr_action *action = NULL; 34 struct mlx5dr_table *next_tbl; 35 int err; 36 37 next_tbl = next_ft ? next_ft->fs_dr_table.dr_table : NULL; 38 if (next_tbl) { 39 action = mlx5dr_action_create_dest_table(next_tbl); 40 if (!action) 41 return -EINVAL; 42 } 43 old_miss_action = ft->fs_dr_table.miss_action; 44 err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action); 45 if (err && action) { 46 err = mlx5dr_action_destroy(action); 47 if (err) { 48 action = NULL; 49 mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n", 50 err); 51 } 52 } 53 ft->fs_dr_table.miss_action = action; 54 if (old_miss_action) { 55 err = mlx5dr_action_destroy(old_miss_action); 56 if (err) 57 mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n", 58 err); 59 } 60 61 return err; 62 } 63 64 static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns, 65 struct mlx5_flow_table *ft, 66 unsigned int size, 67 struct mlx5_flow_table *next_ft) 68 { 69 struct mlx5dr_table *tbl; 70 u32 flags; 71 int err; 72 73 if (mlx5_dr_is_fw_table(ft->flags)) 74 return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, 75 size, 76 next_ft); 77 flags = ft->flags; 78 /* turn off encap/decap if not supported for sw-str by fw */ 79 if (!MLX5_CAP_FLOWTABLE(ns->dev, sw_owner_reformat_supported)) 80 flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 81 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 82 83 tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags); 84 if (!tbl) { 85 mlx5_core_err(ns->dev, "Failed creating dr flow_table\n"); 86 return -EINVAL; 87 } 88 89 ft->fs_dr_table.dr_table = tbl; 90 ft->id = mlx5dr_table_get_id(tbl); 91 92 if (next_ft) { 93 err = set_miss_action(ns, ft, next_ft); 94 if (err) { 95 mlx5dr_table_destroy(tbl); 96 ft->fs_dr_table.dr_table = NULL; 97 return err; 98 } 99 } 100 101 ft->max_fte = INT_MAX; 102 103 return 0; 104 } 105 106 static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns, 107 struct mlx5_flow_table *ft) 108 { 109 struct mlx5dr_action *action = ft->fs_dr_table.miss_action; 110 int err; 111 112 if (mlx5_dr_is_fw_table(ft->flags)) 113 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft); 114 115 err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table); 116 if (err) { 117 mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", 118 err); 119 return err; 120 } 121 if (action) { 122 err = mlx5dr_action_destroy(action); 123 if (err) { 124 mlx5_core_err(ns->dev, "Failed to destroy action(%d)\n", 125 err); 126 return err; 127 } 128 } 129 130 return err; 131 } 132 133 static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns, 134 struct mlx5_flow_table *ft, 135 struct mlx5_flow_table *next_ft) 136 { 137 if (mlx5_dr_is_fw_table(ft->flags)) 138 return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft); 139 140 return set_miss_action(ns, ft, next_ft); 141 } 142 143 static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns, 144 struct mlx5_flow_table *ft, 145 u32 *in, 146 struct mlx5_flow_group *fg) 147 { 148 struct mlx5dr_matcher *matcher; 149 u32 priority = MLX5_GET(create_flow_group_in, in, 150 start_flow_index); 151 u8 match_criteria_enable = MLX5_GET(create_flow_group_in, 152 in, 153 match_criteria_enable); 154 struct mlx5dr_match_parameters mask; 155 156 if (mlx5_dr_is_fw_table(ft->flags)) 157 return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, 158 fg); 159 160 mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, 161 in, match_criteria); 162 mask.match_sz = sizeof(fg->mask.match_criteria); 163 164 matcher = mlx5dr_matcher_create(ft->fs_dr_table.dr_table, 165 priority, 166 match_criteria_enable, 167 &mask); 168 if (!matcher) { 169 mlx5_core_err(ns->dev, "Failed creating matcher\n"); 170 return -EINVAL; 171 } 172 173 fg->fs_dr_matcher.dr_matcher = matcher; 174 return 0; 175 } 176 177 static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns, 178 struct mlx5_flow_table *ft, 179 struct mlx5_flow_group *fg) 180 { 181 if (mlx5_dr_is_fw_table(ft->flags)) 182 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg); 183 184 return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher); 185 } 186 187 static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain, 188 struct mlx5_flow_rule *dst) 189 { 190 struct mlx5_flow_destination *dest_attr = &dst->dest_attr; 191 192 return mlx5dr_action_create_dest_vport(domain, dest_attr->vport.num, 193 dest_attr->vport.flags & 194 MLX5_FLOW_DEST_VPORT_VHCA_ID, 195 dest_attr->vport.vhca_id); 196 } 197 198 static struct mlx5dr_action *create_uplink_action(struct mlx5dr_domain *domain, 199 struct mlx5_flow_rule *dst) 200 { 201 struct mlx5_flow_destination *dest_attr = &dst->dest_attr; 202 203 return mlx5dr_action_create_dest_vport(domain, MLX5_VPORT_UPLINK, 1, 204 dest_attr->vport.vhca_id); 205 } 206 207 static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain, 208 struct mlx5_flow_rule *dst) 209 { 210 struct mlx5_flow_table *dest_ft = dst->dest_attr.ft; 211 212 if (mlx5_dr_is_fw_table(dest_ft->flags)) 213 return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft); 214 return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table); 215 } 216 217 static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain, 218 struct mlx5_fs_vlan *vlan) 219 { 220 u16 n_ethtype = vlan->ethtype; 221 u8 prio = vlan->prio; 222 u16 vid = vlan->vid; 223 u32 vlan_hdr; 224 225 vlan_hdr = (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid; 226 return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr)); 227 } 228 229 static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst) 230 { 231 return (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT || 232 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && 233 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 234 } 235 236 /* We want to support a rule with 32 destinations, which means we need to 237 * account for 32 destinations plus usually a counter plus one more action 238 * for a multi-destination flow table. 239 */ 240 #define MLX5_FLOW_CONTEXT_ACTION_MAX 34 241 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, 242 struct mlx5_flow_table *ft, 243 struct mlx5_flow_group *group, 244 struct fs_fte *fte) 245 { 246 struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain; 247 struct mlx5dr_action_dest *term_actions; 248 struct mlx5dr_match_parameters params; 249 struct mlx5_core_dev *dev = ns->dev; 250 struct mlx5dr_action **fs_dr_actions; 251 struct mlx5dr_action *tmp_action; 252 struct mlx5dr_action **actions; 253 bool delay_encap_set = false; 254 struct mlx5dr_rule *rule; 255 struct mlx5_flow_rule *dst; 256 int fs_dr_num_actions = 0; 257 int num_term_actions = 0; 258 int num_actions = 0; 259 size_t match_sz; 260 int err = 0; 261 int i; 262 263 if (mlx5_dr_is_fw_table(ft->flags)) 264 return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte); 265 266 actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions), 267 GFP_KERNEL); 268 if (!actions) { 269 err = -ENOMEM; 270 goto out_err; 271 } 272 273 fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, 274 sizeof(*fs_dr_actions), GFP_KERNEL); 275 if (!fs_dr_actions) { 276 err = -ENOMEM; 277 goto free_actions_alloc; 278 } 279 280 term_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, 281 sizeof(*term_actions), GFP_KERNEL); 282 if (!term_actions) { 283 err = -ENOMEM; 284 goto free_fs_dr_actions_alloc; 285 } 286 287 match_sz = sizeof(fte->val); 288 289 /* Drop reformat action bit if destination vport set with reformat */ 290 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 291 list_for_each_entry(dst, &fte->node.children, node.list) { 292 if (!contain_vport_reformat_action(dst)) 293 continue; 294 295 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 296 break; 297 } 298 } 299 300 /* The order of the actions are must to be keep, only the following 301 * order is supported by SW steering: 302 * TX: modify header -> push vlan -> encap 303 * RX: decap -> pop vlan -> modify header 304 */ 305 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { 306 enum mlx5dr_action_reformat_type decap_type = 307 DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2; 308 309 tmp_action = mlx5dr_action_create_packet_reformat(domain, 310 decap_type, 311 0, 0, 0, 312 NULL); 313 if (!tmp_action) { 314 err = -ENOMEM; 315 goto free_actions; 316 } 317 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 318 actions[num_actions++] = tmp_action; 319 } 320 321 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { 322 bool is_decap = fte->action.pkt_reformat->reformat_type == 323 MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; 324 325 if (is_decap) 326 actions[num_actions++] = 327 fte->action.pkt_reformat->action.dr_action; 328 else 329 delay_encap_set = true; 330 } 331 332 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { 333 tmp_action = 334 mlx5dr_action_create_pop_vlan(); 335 if (!tmp_action) { 336 err = -ENOMEM; 337 goto free_actions; 338 } 339 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 340 actions[num_actions++] = tmp_action; 341 } 342 343 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) { 344 tmp_action = 345 mlx5dr_action_create_pop_vlan(); 346 if (!tmp_action) { 347 err = -ENOMEM; 348 goto free_actions; 349 } 350 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 351 actions[num_actions++] = tmp_action; 352 } 353 354 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 355 actions[num_actions++] = 356 fte->action.modify_hdr->action.dr_action; 357 358 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { 359 tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]); 360 if (!tmp_action) { 361 err = -ENOMEM; 362 goto free_actions; 363 } 364 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 365 actions[num_actions++] = tmp_action; 366 } 367 368 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { 369 tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]); 370 if (!tmp_action) { 371 err = -ENOMEM; 372 goto free_actions; 373 } 374 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 375 actions[num_actions++] = tmp_action; 376 } 377 378 if (delay_encap_set) 379 actions[num_actions++] = 380 fte->action.pkt_reformat->action.dr_action; 381 382 /* The order of the actions below is not important */ 383 384 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { 385 tmp_action = mlx5dr_action_create_drop(); 386 if (!tmp_action) { 387 err = -ENOMEM; 388 goto free_actions; 389 } 390 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 391 term_actions[num_term_actions++].dest = tmp_action; 392 } 393 394 if (fte->flow_context.flow_tag) { 395 tmp_action = 396 mlx5dr_action_create_tag(fte->flow_context.flow_tag); 397 if (!tmp_action) { 398 err = -ENOMEM; 399 goto free_actions; 400 } 401 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 402 actions[num_actions++] = tmp_action; 403 } 404 405 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 406 list_for_each_entry(dst, &fte->node.children, node.list) { 407 enum mlx5_flow_destination_type type = dst->dest_attr.type; 408 u32 id; 409 410 if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || 411 num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 412 err = -EOPNOTSUPP; 413 goto free_actions; 414 } 415 416 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) 417 continue; 418 419 switch (type) { 420 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: 421 tmp_action = create_ft_action(domain, dst); 422 if (!tmp_action) { 423 err = -ENOMEM; 424 goto free_actions; 425 } 426 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 427 term_actions[num_term_actions++].dest = tmp_action; 428 break; 429 case MLX5_FLOW_DESTINATION_TYPE_UPLINK: 430 case MLX5_FLOW_DESTINATION_TYPE_VPORT: 431 tmp_action = type == MLX5_FLOW_DESTINATION_TYPE_VPORT ? 432 create_vport_action(domain, dst) : 433 create_uplink_action(domain, dst); 434 if (!tmp_action) { 435 err = -ENOMEM; 436 goto free_actions; 437 } 438 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 439 term_actions[num_term_actions].dest = tmp_action; 440 441 if (dst->dest_attr.vport.flags & 442 MLX5_FLOW_DEST_VPORT_REFORMAT_ID) 443 term_actions[num_term_actions].reformat = 444 dst->dest_attr.vport.pkt_reformat->action.dr_action; 445 446 num_term_actions++; 447 break; 448 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: 449 id = dst->dest_attr.ft_num; 450 tmp_action = mlx5dr_action_create_dest_table_num(domain, 451 id); 452 if (!tmp_action) { 453 err = -ENOMEM; 454 goto free_actions; 455 } 456 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 457 term_actions[num_term_actions++].dest = tmp_action; 458 break; 459 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: 460 id = dst->dest_attr.sampler_id; 461 tmp_action = mlx5dr_action_create_flow_sampler(domain, 462 id); 463 if (!tmp_action) { 464 err = -ENOMEM; 465 goto free_actions; 466 } 467 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 468 term_actions[num_term_actions++].dest = tmp_action; 469 break; 470 default: 471 err = -EOPNOTSUPP; 472 goto free_actions; 473 } 474 } 475 } 476 477 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 478 list_for_each_entry(dst, &fte->node.children, node.list) { 479 u32 id; 480 481 if (dst->dest_attr.type != 482 MLX5_FLOW_DESTINATION_TYPE_COUNTER) 483 continue; 484 485 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || 486 fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 487 err = -EOPNOTSUPP; 488 goto free_actions; 489 } 490 491 id = dst->dest_attr.counter_id; 492 tmp_action = 493 mlx5dr_action_create_flow_counter(id); 494 if (!tmp_action) { 495 err = -ENOMEM; 496 goto free_actions; 497 } 498 499 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 500 actions[num_actions++] = tmp_action; 501 } 502 } 503 504 params.match_sz = match_sz; 505 params.match_buf = (u64 *)fte->val; 506 if (num_term_actions == 1) { 507 if (term_actions->reformat) { 508 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 509 err = -EOPNOTSUPP; 510 goto free_actions; 511 } 512 actions[num_actions++] = term_actions->reformat; 513 } 514 515 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 516 err = -EOPNOTSUPP; 517 goto free_actions; 518 } 519 actions[num_actions++] = term_actions->dest; 520 } else if (num_term_actions > 1) { 521 bool ignore_flow_level = 522 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); 523 524 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || 525 fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 526 err = -EOPNOTSUPP; 527 goto free_actions; 528 } 529 tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, 530 term_actions, 531 num_term_actions, 532 ignore_flow_level); 533 if (!tmp_action) { 534 err = -EOPNOTSUPP; 535 goto free_actions; 536 } 537 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 538 actions[num_actions++] = tmp_action; 539 } 540 541 rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher, 542 ¶ms, 543 num_actions, 544 actions, 545 fte->flow_context.flow_source); 546 if (!rule) { 547 err = -EINVAL; 548 goto free_actions; 549 } 550 551 kfree(term_actions); 552 kfree(actions); 553 554 fte->fs_dr_rule.dr_rule = rule; 555 fte->fs_dr_rule.num_actions = fs_dr_num_actions; 556 fte->fs_dr_rule.dr_actions = fs_dr_actions; 557 558 return 0; 559 560 free_actions: 561 /* Free in reverse order to handle action dependencies */ 562 for (i = fs_dr_num_actions - 1; i >= 0; i--) 563 if (!IS_ERR_OR_NULL(fs_dr_actions[i])) 564 mlx5dr_action_destroy(fs_dr_actions[i]); 565 566 kfree(term_actions); 567 free_fs_dr_actions_alloc: 568 kfree(fs_dr_actions); 569 free_actions_alloc: 570 kfree(actions); 571 out_err: 572 mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err); 573 return err; 574 } 575 576 static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, 577 struct mlx5_pkt_reformat_params *params, 578 enum mlx5_flow_namespace_type namespace, 579 struct mlx5_pkt_reformat *pkt_reformat) 580 { 581 struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain; 582 struct mlx5dr_action *action; 583 int dr_reformat; 584 585 switch (params->type) { 586 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: 587 case MLX5_REFORMAT_TYPE_L2_TO_NVGRE: 588 case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL: 589 dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2; 590 break; 591 case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2: 592 dr_reformat = DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2; 593 break; 594 case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL: 595 dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3; 596 break; 597 case MLX5_REFORMAT_TYPE_INSERT_HDR: 598 dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR; 599 break; 600 case MLX5_REFORMAT_TYPE_REMOVE_HDR: 601 dr_reformat = DR_ACTION_REFORMAT_TYP_REMOVE_HDR; 602 break; 603 default: 604 mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n", 605 params->type); 606 return -EOPNOTSUPP; 607 } 608 609 action = mlx5dr_action_create_packet_reformat(dr_domain, 610 dr_reformat, 611 params->param_0, 612 params->param_1, 613 params->size, 614 params->data); 615 if (!action) { 616 mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n"); 617 return -EINVAL; 618 } 619 620 pkt_reformat->action.dr_action = action; 621 622 return 0; 623 } 624 625 static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, 626 struct mlx5_pkt_reformat *pkt_reformat) 627 { 628 mlx5dr_action_destroy(pkt_reformat->action.dr_action); 629 } 630 631 static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns, 632 u8 namespace, u8 num_actions, 633 void *modify_actions, 634 struct mlx5_modify_hdr *modify_hdr) 635 { 636 struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain; 637 struct mlx5dr_action *action; 638 size_t actions_sz; 639 640 actions_sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * 641 num_actions; 642 action = mlx5dr_action_create_modify_header(dr_domain, 0, 643 actions_sz, 644 modify_actions); 645 if (!action) { 646 mlx5_core_err(ns->dev, "Failed allocating modify-header action\n"); 647 return -EINVAL; 648 } 649 650 modify_hdr->action.dr_action = action; 651 652 return 0; 653 } 654 655 static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, 656 struct mlx5_modify_hdr *modify_hdr) 657 { 658 mlx5dr_action_destroy(modify_hdr->action.dr_action); 659 } 660 661 static int 662 mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns, 663 int definer_id) 664 { 665 return -EOPNOTSUPP; 666 } 667 668 static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns, 669 u16 format_id, u32 *match_mask) 670 { 671 return -EOPNOTSUPP; 672 } 673 674 static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, 675 struct mlx5_flow_table *ft, 676 struct fs_fte *fte) 677 { 678 struct mlx5_fs_dr_rule *rule = &fte->fs_dr_rule; 679 int err; 680 int i; 681 682 if (mlx5_dr_is_fw_table(ft->flags)) 683 return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte); 684 685 err = mlx5dr_rule_destroy(rule->dr_rule); 686 if (err) 687 return err; 688 689 /* Free in reverse order to handle action dependencies */ 690 for (i = rule->num_actions - 1; i >= 0; i--) 691 if (!IS_ERR_OR_NULL(rule->dr_actions[i])) 692 mlx5dr_action_destroy(rule->dr_actions[i]); 693 694 kfree(rule->dr_actions); 695 return 0; 696 } 697 698 static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, 699 struct mlx5_flow_table *ft, 700 struct mlx5_flow_group *group, 701 int modify_mask, 702 struct fs_fte *fte) 703 { 704 struct fs_fte fte_tmp = {}; 705 int ret; 706 707 if (mlx5_dr_is_fw_table(ft->flags)) 708 return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte); 709 710 /* Backup current dr rule details */ 711 fte_tmp.fs_dr_rule = fte->fs_dr_rule; 712 memset(&fte->fs_dr_rule, 0, sizeof(struct mlx5_fs_dr_rule)); 713 714 /* First add the new updated rule, then delete the old rule */ 715 ret = mlx5_cmd_dr_create_fte(ns, ft, group, fte); 716 if (ret) 717 goto restore_fte; 718 719 ret = mlx5_cmd_dr_delete_fte(ns, ft, &fte_tmp); 720 WARN_ONCE(ret, "dr update fte duplicate rule deletion failed\n"); 721 return ret; 722 723 restore_fte: 724 fte->fs_dr_rule = fte_tmp.fs_dr_rule; 725 return ret; 726 } 727 728 static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns, 729 struct mlx5_flow_root_namespace *peer_ns) 730 { 731 struct mlx5dr_domain *peer_domain = NULL; 732 733 if (peer_ns) 734 peer_domain = peer_ns->fs_dr_domain.dr_domain; 735 mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain, 736 peer_domain); 737 return 0; 738 } 739 740 static int mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace *ns) 741 { 742 ns->fs_dr_domain.dr_domain = 743 mlx5dr_domain_create(ns->dev, 744 MLX5DR_DOMAIN_TYPE_FDB); 745 if (!ns->fs_dr_domain.dr_domain) { 746 mlx5_core_err(ns->dev, "Failed to create dr flow namespace\n"); 747 return -EOPNOTSUPP; 748 } 749 return 0; 750 } 751 752 static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns) 753 { 754 return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain); 755 } 756 757 static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns, 758 enum fs_flow_table_type ft_type) 759 { 760 if (ft_type != FS_FT_FDB || 761 MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5) 762 return 0; 763 764 return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX; 765 } 766 767 bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev) 768 { 769 return mlx5dr_is_supported(dev); 770 } 771 772 static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = { 773 .create_flow_table = mlx5_cmd_dr_create_flow_table, 774 .destroy_flow_table = mlx5_cmd_dr_destroy_flow_table, 775 .modify_flow_table = mlx5_cmd_dr_modify_flow_table, 776 .create_flow_group = mlx5_cmd_dr_create_flow_group, 777 .destroy_flow_group = mlx5_cmd_dr_destroy_flow_group, 778 .create_fte = mlx5_cmd_dr_create_fte, 779 .update_fte = mlx5_cmd_dr_update_fte, 780 .delete_fte = mlx5_cmd_dr_delete_fte, 781 .update_root_ft = mlx5_cmd_dr_update_root_ft, 782 .packet_reformat_alloc = mlx5_cmd_dr_packet_reformat_alloc, 783 .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc, 784 .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc, 785 .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc, 786 .create_match_definer = mlx5_cmd_dr_create_match_definer, 787 .destroy_match_definer = mlx5_cmd_dr_destroy_match_definer, 788 .set_peer = mlx5_cmd_dr_set_peer, 789 .create_ns = mlx5_cmd_dr_create_ns, 790 .destroy_ns = mlx5_cmd_dr_destroy_ns, 791 .get_capabilities = mlx5_cmd_dr_get_capabilities, 792 }; 793 794 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void) 795 { 796 return &mlx5_flow_cmds_dr; 797 } 798