1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <net/flow_dissector.h> 34 #include <net/flow_offload.h> 35 #include <net/sch_generic.h> 36 #include <net/pkt_cls.h> 37 #include <linux/mlx5/fs.h> 38 #include <linux/mlx5/device.h> 39 #include <linux/rhashtable.h> 40 #include <linux/refcount.h> 41 #include <linux/completion.h> 42 #include <net/arp.h> 43 #include <net/ipv6_stubs.h> 44 #include <net/bareudp.h> 45 #include <net/bonding.h> 46 #include <net/dst_metadata.h> 47 #include "devlink.h" 48 #include "en.h" 49 #include "en/tc/post_act.h" 50 #include "en/tc/act_stats.h" 51 #include "en_rep.h" 52 #include "en/rep/tc.h" 53 #include "en/rep/neigh.h" 54 #include "en_tc.h" 55 #include "eswitch.h" 56 #include "fs_core.h" 57 #include "en/port.h" 58 #include "en/tc_tun.h" 59 #include "en/mapping.h" 60 #include "en/tc_ct.h" 61 #include "en/mod_hdr.h" 62 #include "en/tc_tun_encap.h" 63 #include "en/tc/sample.h" 64 #include "en/tc/act/act.h" 65 #include "en/tc/post_meter.h" 66 #include "lib/devcom.h" 67 #include "lib/geneve.h" 68 #include "lib/fs_chains.h" 69 #include "diag/en_tc_tracepoint.h" 70 #include <asm/div64.h> 71 #include "lag/lag.h" 72 #include "lag/mp.h" 73 74 #define MLX5E_TC_TABLE_NUM_GROUPS 4 75 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18) 76 77 struct mlx5e_tc_table { 78 /* Protects the dynamic assignment of the t parameter 79 * which is the nic tc root table. 80 */ 81 struct mutex t_lock; 82 struct mlx5e_priv *priv; 83 struct mlx5_flow_table *t; 84 struct mlx5_flow_table *miss_t; 85 struct mlx5_fs_chains *chains; 86 struct mlx5e_post_act *post_act; 87 88 struct rhashtable ht; 89 90 struct mod_hdr_tbl mod_hdr; 91 struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */ 92 DECLARE_HASHTABLE(hairpin_tbl, 8); 93 94 struct notifier_block netdevice_nb; 95 struct netdev_net_notifier netdevice_nn; 96 97 struct mlx5_tc_ct_priv *ct; 98 struct mapping_ctx *mapping; 99 struct dentry *dfs_root; 100 101 /* tc action stats */ 102 struct mlx5e_tc_act_stats_handle *action_stats_handle; 103 }; 104 105 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { 106 [MAPPED_OBJ_TO_REG] = { 107 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, 108 .moffset = 0, 109 .mlen = 16, 110 }, 111 [VPORT_TO_REG] = { 112 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, 113 .moffset = 16, 114 .mlen = 16, 115 }, 116 [TUNNEL_TO_REG] = { 117 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1, 118 .moffset = 8, 119 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS, 120 .soffset = MLX5_BYTE_OFF(fte_match_param, 121 misc_parameters_2.metadata_reg_c_1), 122 }, 123 [ZONE_TO_REG] = zone_to_reg_ct, 124 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct, 125 [CTSTATE_TO_REG] = ctstate_to_reg_ct, 126 [MARK_TO_REG] = mark_to_reg_ct, 127 [LABELS_TO_REG] = labels_to_reg_ct, 128 [FTEID_TO_REG] = fteid_to_reg_ct, 129 /* For NIC rules we store the restore metadata directly 130 * into reg_b that is passed to SW since we don't 131 * jump between steering domains. 132 */ 133 [NIC_MAPPED_OBJ_TO_REG] = { 134 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B, 135 .moffset = 0, 136 .mlen = 16, 137 }, 138 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct, 139 [PACKET_COLOR_TO_REG] = packet_color_to_reg, 140 }; 141 142 struct mlx5e_tc_jump_state { 143 u32 jump_count; 144 bool jump_target; 145 struct mlx5_flow_attr *jumping_attr; 146 147 enum flow_action_id last_id; 148 u32 last_index; 149 }; 150 151 struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) 152 { 153 struct mlx5e_tc_table *tc; 154 155 tc = kvzalloc(sizeof(*tc), GFP_KERNEL); 156 return tc ? tc : ERR_PTR(-ENOMEM); 157 } 158 159 void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) 160 { 161 kvfree(tc); 162 } 163 164 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc) 165 { 166 return tc->chains; 167 } 168 169 /* To avoid false lock dependency warning set the tc_ht lock 170 * class different than the lock class of the ht being used when deleting 171 * last flow from a group and then deleting a group, we get into del_sw_flow_group() 172 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but 173 * it's different than the ht->mutex here. 174 */ 175 static struct lock_class_key tc_ht_lock_key; 176 static struct lock_class_key tc_ht_wq_key; 177 178 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); 179 static void free_flow_post_acts(struct mlx5e_tc_flow *flow); 180 static void mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, 181 struct mlx5_flow_attr *attr); 182 183 void 184 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, 185 enum mlx5e_tc_attr_to_reg type, 186 u32 val, 187 u32 mask) 188 { 189 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval; 190 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; 191 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 192 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; 193 u32 max_mask = GENMASK(match_len - 1, 0); 194 __be32 curr_mask_be, curr_val_be; 195 u32 curr_mask, curr_val; 196 197 fmask = headers_c + soffset; 198 fval = headers_v + soffset; 199 200 memcpy(&curr_mask_be, fmask, 4); 201 memcpy(&curr_val_be, fval, 4); 202 203 curr_mask = be32_to_cpu(curr_mask_be); 204 curr_val = be32_to_cpu(curr_val_be); 205 206 //move to correct offset 207 WARN_ON(mask > max_mask); 208 mask <<= moffset; 209 val <<= moffset; 210 max_mask <<= moffset; 211 212 //zero val and mask 213 curr_mask &= ~max_mask; 214 curr_val &= ~max_mask; 215 216 //add current to mask 217 curr_mask |= mask; 218 curr_val |= val; 219 220 //back to be32 and write 221 curr_mask_be = cpu_to_be32(curr_mask); 222 curr_val_be = cpu_to_be32(curr_val); 223 224 memcpy(fmask, &curr_mask_be, 4); 225 memcpy(fval, &curr_val_be, 4); 226 227 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 228 } 229 230 void 231 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec, 232 enum mlx5e_tc_attr_to_reg type, 233 u32 *val, 234 u32 *mask) 235 { 236 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval; 237 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; 238 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 239 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; 240 u32 max_mask = GENMASK(match_len - 1, 0); 241 __be32 curr_mask_be, curr_val_be; 242 u32 curr_mask, curr_val; 243 244 fmask = headers_c + soffset; 245 fval = headers_v + soffset; 246 247 memcpy(&curr_mask_be, fmask, 4); 248 memcpy(&curr_val_be, fval, 4); 249 250 curr_mask = be32_to_cpu(curr_mask_be); 251 curr_val = be32_to_cpu(curr_val_be); 252 253 *mask = (curr_mask >> moffset) & max_mask; 254 *val = (curr_val >> moffset) & max_mask; 255 } 256 257 int 258 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, 259 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 260 enum mlx5_flow_namespace_type ns, 261 enum mlx5e_tc_attr_to_reg type, 262 u32 data) 263 { 264 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 265 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; 266 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; 267 char *modact; 268 int err; 269 270 modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts); 271 if (IS_ERR(modact)) 272 return PTR_ERR(modact); 273 274 /* Firmware has 5bit length field and 0 means 32bits */ 275 if (mlen == 32) 276 mlen = 0; 277 278 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); 279 MLX5_SET(set_action_in, modact, field, mfield); 280 MLX5_SET(set_action_in, modact, offset, moffset); 281 MLX5_SET(set_action_in, modact, length, mlen); 282 MLX5_SET(set_action_in, modact, data, data); 283 err = mod_hdr_acts->num_actions; 284 mod_hdr_acts->num_actions++; 285 286 return err; 287 } 288 289 static struct mlx5e_tc_act_stats_handle * 290 get_act_stats_handle(struct mlx5e_priv *priv) 291 { 292 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 293 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 294 struct mlx5_rep_uplink_priv *uplink_priv; 295 struct mlx5e_rep_priv *uplink_rpriv; 296 297 if (is_mdev_switchdev_mode(priv->mdev)) { 298 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 299 uplink_priv = &uplink_rpriv->uplink_priv; 300 301 return uplink_priv->action_stats_handle; 302 } 303 304 return tc->action_stats_handle; 305 } 306 307 struct mlx5e_tc_int_port_priv * 308 mlx5e_get_int_port_priv(struct mlx5e_priv *priv) 309 { 310 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 311 struct mlx5_rep_uplink_priv *uplink_priv; 312 struct mlx5e_rep_priv *uplink_rpriv; 313 314 if (is_mdev_switchdev_mode(priv->mdev)) { 315 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 316 uplink_priv = &uplink_rpriv->uplink_priv; 317 318 return uplink_priv->int_port_priv; 319 } 320 321 return NULL; 322 } 323 324 struct mlx5e_flow_meters * 325 mlx5e_get_flow_meters(struct mlx5_core_dev *dev) 326 { 327 struct mlx5_eswitch *esw = dev->priv.eswitch; 328 struct mlx5_rep_uplink_priv *uplink_priv; 329 struct mlx5e_rep_priv *uplink_rpriv; 330 struct mlx5e_priv *priv; 331 332 if (is_mdev_switchdev_mode(dev)) { 333 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 334 uplink_priv = &uplink_rpriv->uplink_priv; 335 priv = netdev_priv(uplink_rpriv->netdev); 336 if (!uplink_priv->flow_meters) 337 uplink_priv->flow_meters = 338 mlx5e_flow_meters_init(priv, 339 MLX5_FLOW_NAMESPACE_FDB, 340 uplink_priv->post_act); 341 if (!IS_ERR(uplink_priv->flow_meters)) 342 return uplink_priv->flow_meters; 343 } 344 345 return NULL; 346 } 347 348 static struct mlx5_tc_ct_priv * 349 get_ct_priv(struct mlx5e_priv *priv) 350 { 351 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 352 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 353 struct mlx5_rep_uplink_priv *uplink_priv; 354 struct mlx5e_rep_priv *uplink_rpriv; 355 356 if (is_mdev_switchdev_mode(priv->mdev)) { 357 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 358 uplink_priv = &uplink_rpriv->uplink_priv; 359 360 return uplink_priv->ct_priv; 361 } 362 363 return tc->ct; 364 } 365 366 static struct mlx5e_tc_psample * 367 get_sample_priv(struct mlx5e_priv *priv) 368 { 369 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 370 struct mlx5_rep_uplink_priv *uplink_priv; 371 struct mlx5e_rep_priv *uplink_rpriv; 372 373 if (is_mdev_switchdev_mode(priv->mdev)) { 374 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 375 uplink_priv = &uplink_rpriv->uplink_priv; 376 377 return uplink_priv->tc_psample; 378 } 379 380 return NULL; 381 } 382 383 static struct mlx5e_post_act * 384 get_post_action(struct mlx5e_priv *priv) 385 { 386 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 387 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 388 struct mlx5_rep_uplink_priv *uplink_priv; 389 struct mlx5e_rep_priv *uplink_rpriv; 390 391 if (is_mdev_switchdev_mode(priv->mdev)) { 392 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 393 uplink_priv = &uplink_rpriv->uplink_priv; 394 395 return uplink_priv->post_act; 396 } 397 398 return tc->post_act; 399 } 400 401 struct mlx5_flow_handle * 402 mlx5_tc_rule_insert(struct mlx5e_priv *priv, 403 struct mlx5_flow_spec *spec, 404 struct mlx5_flow_attr *attr) 405 { 406 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 407 408 if (is_mdev_switchdev_mode(priv->mdev)) 409 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 410 411 return mlx5e_add_offloaded_nic_rule(priv, spec, attr); 412 } 413 414 void 415 mlx5_tc_rule_delete(struct mlx5e_priv *priv, 416 struct mlx5_flow_handle *rule, 417 struct mlx5_flow_attr *attr) 418 { 419 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 420 421 if (is_mdev_switchdev_mode(priv->mdev)) { 422 mlx5_eswitch_del_offloaded_rule(esw, rule, attr); 423 return; 424 } 425 426 mlx5e_del_offloaded_nic_rule(priv, rule, attr); 427 } 428 429 static bool 430 is_flow_meter_action(struct mlx5_flow_attr *attr) 431 { 432 return (((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) && 433 (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)) || 434 attr->flags & MLX5_ATTR_FLAG_MTU); 435 } 436 437 static int 438 mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv, 439 struct mlx5_flow_attr *attr) 440 { 441 struct mlx5e_post_act *post_act = get_post_action(priv); 442 struct mlx5e_post_meter_priv *post_meter; 443 enum mlx5_flow_namespace_type ns_type; 444 struct mlx5e_flow_meter_handle *meter; 445 enum mlx5e_post_meter_type type; 446 447 meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params); 448 if (IS_ERR(meter)) { 449 mlx5_core_err(priv->mdev, "Failed to get flow meter\n"); 450 return PTR_ERR(meter); 451 } 452 453 ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters); 454 type = meter->params.mtu ? MLX5E_POST_METER_MTU : MLX5E_POST_METER_RATE; 455 post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, 456 type, 457 meter->act_counter, meter->drop_counter, 458 attr->branch_true, attr->branch_false); 459 if (IS_ERR(post_meter)) { 460 mlx5_core_err(priv->mdev, "Failed to init post meter\n"); 461 goto err_meter_init; 462 } 463 464 attr->meter_attr.meter = meter; 465 attr->meter_attr.post_meter = post_meter; 466 attr->dest_ft = mlx5e_post_meter_get_ft(post_meter); 467 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 468 469 return 0; 470 471 err_meter_init: 472 mlx5e_tc_meter_put(meter); 473 return PTR_ERR(post_meter); 474 } 475 476 static void 477 mlx5e_tc_del_flow_meter(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 478 { 479 mlx5e_post_meter_cleanup(esw, attr->meter_attr.post_meter); 480 mlx5e_tc_meter_put(attr->meter_attr.meter); 481 } 482 483 struct mlx5_flow_handle * 484 mlx5e_tc_rule_offload(struct mlx5e_priv *priv, 485 struct mlx5_flow_spec *spec, 486 struct mlx5_flow_attr *attr) 487 { 488 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 489 int err; 490 491 if (!is_mdev_switchdev_mode(priv->mdev)) 492 return mlx5e_add_offloaded_nic_rule(priv, spec, attr); 493 494 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) 495 return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr); 496 497 if (is_flow_meter_action(attr)) { 498 err = mlx5e_tc_add_flow_meter(priv, attr); 499 if (err) 500 return ERR_PTR(err); 501 } 502 503 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 504 } 505 506 void 507 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv, 508 struct mlx5_flow_handle *rule, 509 struct mlx5_flow_attr *attr) 510 { 511 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 512 513 if (!is_mdev_switchdev_mode(priv->mdev)) { 514 mlx5e_del_offloaded_nic_rule(priv, rule, attr); 515 return; 516 } 517 518 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) { 519 mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr); 520 return; 521 } 522 523 mlx5_eswitch_del_offloaded_rule(esw, rule, attr); 524 525 if (attr->meter_attr.meter) 526 mlx5e_tc_del_flow_meter(esw, attr); 527 } 528 529 int 530 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, 531 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 532 enum mlx5_flow_namespace_type ns, 533 enum mlx5e_tc_attr_to_reg type, 534 u32 data) 535 { 536 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data); 537 538 return ret < 0 ? ret : 0; 539 } 540 541 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev, 542 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 543 enum mlx5e_tc_attr_to_reg type, 544 int act_id, u32 data) 545 { 546 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 547 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; 548 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; 549 char *modact; 550 551 modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id); 552 553 /* Firmware has 5bit length field and 0 means 32bits */ 554 if (mlen == 32) 555 mlen = 0; 556 557 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); 558 MLX5_SET(set_action_in, modact, field, mfield); 559 MLX5_SET(set_action_in, modact, offset, moffset); 560 MLX5_SET(set_action_in, modact, length, mlen); 561 MLX5_SET(set_action_in, modact, data, data); 562 } 563 564 struct mlx5e_hairpin { 565 struct mlx5_hairpin *pair; 566 567 struct mlx5_core_dev *func_mdev; 568 struct mlx5e_priv *func_priv; 569 u32 tdn; 570 struct mlx5e_tir direct_tir; 571 572 int num_channels; 573 u8 log_num_packets; 574 struct mlx5e_rqt indir_rqt; 575 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; 576 struct mlx5_ttc_table *ttc; 577 }; 578 579 struct mlx5e_hairpin_entry { 580 /* a node of a hash table which keeps all the hairpin entries */ 581 struct hlist_node hairpin_hlist; 582 583 /* protects flows list */ 584 spinlock_t flows_lock; 585 /* flows sharing the same hairpin */ 586 struct list_head flows; 587 /* hpe's that were not fully initialized when dead peer update event 588 * function traversed them. 589 */ 590 struct list_head dead_peer_wait_list; 591 592 u16 peer_vhca_id; 593 u8 prio; 594 struct mlx5e_hairpin *hp; 595 refcount_t refcnt; 596 struct completion res_ready; 597 }; 598 599 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 600 struct mlx5e_tc_flow *flow); 601 602 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) 603 { 604 if (!flow || !refcount_inc_not_zero(&flow->refcnt)) 605 return ERR_PTR(-EINVAL); 606 return flow; 607 } 608 609 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) 610 { 611 if (refcount_dec_and_test(&flow->refcnt)) { 612 mlx5e_tc_del_flow(priv, flow); 613 kfree_rcu(flow, rcu_head); 614 } 615 } 616 617 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) 618 { 619 return flow_flag_test(flow, ESWITCH); 620 } 621 622 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow) 623 { 624 return flow_flag_test(flow, FT); 625 } 626 627 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) 628 { 629 return flow_flag_test(flow, OFFLOADED); 630 } 631 632 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow) 633 { 634 return mlx5e_is_eswitch_flow(flow) ? 635 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL; 636 } 637 638 static struct mlx5_core_dev * 639 get_flow_counter_dev(struct mlx5e_tc_flow *flow) 640 { 641 return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev; 642 } 643 644 static struct mod_hdr_tbl * 645 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) 646 { 647 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 648 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 649 650 return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ? 651 &esw->offloads.mod_hdr : 652 &tc->mod_hdr; 653 } 654 655 int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv, 656 struct mlx5e_tc_flow *flow, 657 struct mlx5_flow_attr *attr) 658 { 659 struct mlx5e_mod_hdr_handle *mh; 660 661 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow), 662 mlx5e_get_flow_namespace(flow), 663 &attr->parse_attr->mod_hdr_acts); 664 if (IS_ERR(mh)) 665 return PTR_ERR(mh); 666 667 WARN_ON(attr->modify_hdr); 668 attr->modify_hdr = mlx5e_mod_hdr_get(mh); 669 attr->mh = mh; 670 671 return 0; 672 } 673 674 void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv, 675 struct mlx5e_tc_flow *flow, 676 struct mlx5_flow_attr *attr) 677 { 678 /* flow wasn't fully initialized */ 679 if (!attr->mh) 680 return; 681 682 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow), 683 attr->mh); 684 attr->mh = NULL; 685 } 686 687 static 688 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) 689 { 690 struct mlx5_core_dev *mdev; 691 struct net_device *netdev; 692 struct mlx5e_priv *priv; 693 694 netdev = dev_get_by_index(net, ifindex); 695 if (!netdev) 696 return ERR_PTR(-ENODEV); 697 698 priv = netdev_priv(netdev); 699 mdev = priv->mdev; 700 dev_put(netdev); 701 702 /* Mirred tc action holds a refcount on the ifindex net_device (see 703 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev 704 * after dev_put(netdev), while we're in the context of adding a tc flow. 705 * 706 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then 707 * stored in a hairpin object, which exists until all flows, that refer to it, get 708 * removed. 709 * 710 * On the other hand, after a hairpin object has been created, the peer net_device may 711 * be removed/unbound while there are still some hairpin flows that are using it. This 712 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to 713 * NETDEV_UNREGISTER event of the peer net_device. 714 */ 715 return mdev; 716 } 717 718 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) 719 { 720 struct mlx5e_tir_builder *builder; 721 int err; 722 723 builder = mlx5e_tir_builder_alloc(false); 724 if (!builder) 725 return -ENOMEM; 726 727 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn); 728 if (err) 729 goto out; 730 731 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]); 732 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false); 733 if (err) 734 goto create_tir_err; 735 736 out: 737 mlx5e_tir_builder_free(builder); 738 return err; 739 740 create_tir_err: 741 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); 742 743 goto out; 744 } 745 746 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) 747 { 748 mlx5e_tir_destroy(&hp->direct_tir); 749 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); 750 } 751 752 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) 753 { 754 struct mlx5e_priv *priv = hp->func_priv; 755 struct mlx5_core_dev *mdev = priv->mdev; 756 struct mlx5e_rss_params_indir *indir; 757 int err; 758 759 indir = kvmalloc(sizeof(*indir), GFP_KERNEL); 760 if (!indir) 761 return -ENOMEM; 762 763 mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels); 764 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels, 765 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc, 766 indir); 767 768 kvfree(indir); 769 return err; 770 } 771 772 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp) 773 { 774 struct mlx5e_priv *priv = hp->func_priv; 775 struct mlx5e_rss_params_hash rss_hash; 776 enum mlx5_traffic_types tt, max_tt; 777 struct mlx5e_tir_builder *builder; 778 int err = 0; 779 780 builder = mlx5e_tir_builder_alloc(false); 781 if (!builder) 782 return -ENOMEM; 783 784 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res); 785 786 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { 787 struct mlx5e_rss_params_traffic_type rss_tt; 788 789 rss_tt = mlx5e_rss_get_default_tt_config(tt); 790 791 mlx5e_tir_builder_build_rqt(builder, hp->tdn, 792 mlx5e_rqt_get_rqtn(&hp->indir_rqt), 793 false); 794 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false); 795 796 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false); 797 if (err) { 798 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err); 799 goto err_destroy_tirs; 800 } 801 802 mlx5e_tir_builder_clear(builder); 803 } 804 805 out: 806 mlx5e_tir_builder_free(builder); 807 return err; 808 809 err_destroy_tirs: 810 max_tt = tt; 811 for (tt = 0; tt < max_tt; tt++) 812 mlx5e_tir_destroy(&hp->indir_tir[tt]); 813 814 goto out; 815 } 816 817 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp) 818 { 819 int tt; 820 821 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 822 mlx5e_tir_destroy(&hp->indir_tir[tt]); 823 } 824 825 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, 826 struct ttc_params *ttc_params) 827 { 828 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; 829 int tt; 830 831 memset(ttc_params, 0, sizeof(*ttc_params)); 832 833 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev, 834 MLX5_FLOW_NAMESPACE_KERNEL); 835 for (tt = 0; tt < MLX5_NUM_TT; tt++) { 836 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; 837 ttc_params->dests[tt].tir_num = 838 tt == MLX5_TT_ANY ? 839 mlx5e_tir_get_tirn(&hp->direct_tir) : 840 mlx5e_tir_get_tirn(&hp->indir_tir[tt]); 841 } 842 843 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; 844 ft_attr->prio = MLX5E_TC_PRIO; 845 } 846 847 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp) 848 { 849 struct mlx5e_priv *priv = hp->func_priv; 850 struct ttc_params ttc_params; 851 struct mlx5_ttc_table *ttc; 852 int err; 853 854 err = mlx5e_hairpin_create_indirect_rqt(hp); 855 if (err) 856 return err; 857 858 err = mlx5e_hairpin_create_indirect_tirs(hp); 859 if (err) 860 goto err_create_indirect_tirs; 861 862 mlx5e_hairpin_set_ttc_params(hp, &ttc_params); 863 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); 864 if (IS_ERR(hp->ttc)) { 865 err = PTR_ERR(hp->ttc); 866 goto err_create_ttc_table; 867 } 868 869 ttc = mlx5e_fs_get_ttc(priv->fs, false); 870 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n", 871 hp->num_channels, 872 mlx5_get_ttc_flow_table(ttc)->id); 873 874 return 0; 875 876 err_create_ttc_table: 877 mlx5e_hairpin_destroy_indirect_tirs(hp); 878 err_create_indirect_tirs: 879 mlx5e_rqt_destroy(&hp->indir_rqt); 880 881 return err; 882 } 883 884 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp) 885 { 886 mlx5_destroy_ttc_table(hp->ttc); 887 mlx5e_hairpin_destroy_indirect_tirs(hp); 888 mlx5e_rqt_destroy(&hp->indir_rqt); 889 } 890 891 static struct mlx5e_hairpin * 892 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params, 893 int peer_ifindex) 894 { 895 struct mlx5_core_dev *func_mdev, *peer_mdev; 896 struct mlx5e_hairpin *hp; 897 struct mlx5_hairpin *pair; 898 int err; 899 900 hp = kzalloc(sizeof(*hp), GFP_KERNEL); 901 if (!hp) 902 return ERR_PTR(-ENOMEM); 903 904 func_mdev = priv->mdev; 905 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); 906 if (IS_ERR(peer_mdev)) { 907 err = PTR_ERR(peer_mdev); 908 goto create_pair_err; 909 } 910 911 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params); 912 if (IS_ERR(pair)) { 913 err = PTR_ERR(pair); 914 goto create_pair_err; 915 } 916 hp->pair = pair; 917 hp->func_mdev = func_mdev; 918 hp->func_priv = priv; 919 hp->num_channels = params->num_channels; 920 hp->log_num_packets = params->log_num_packets; 921 922 err = mlx5e_hairpin_create_transport(hp); 923 if (err) 924 goto create_transport_err; 925 926 if (hp->num_channels > 1) { 927 err = mlx5e_hairpin_rss_init(hp); 928 if (err) 929 goto rss_init_err; 930 } 931 932 return hp; 933 934 rss_init_err: 935 mlx5e_hairpin_destroy_transport(hp); 936 create_transport_err: 937 mlx5_core_hairpin_destroy(hp->pair); 938 create_pair_err: 939 kfree(hp); 940 return ERR_PTR(err); 941 } 942 943 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp) 944 { 945 if (hp->num_channels > 1) 946 mlx5e_hairpin_rss_cleanup(hp); 947 mlx5e_hairpin_destroy_transport(hp); 948 mlx5_core_hairpin_destroy(hp->pair); 949 kvfree(hp); 950 } 951 952 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio) 953 { 954 return (peer_vhca_id << 16 | prio); 955 } 956 957 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, 958 u16 peer_vhca_id, u8 prio) 959 { 960 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 961 struct mlx5e_hairpin_entry *hpe; 962 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio); 963 964 hash_for_each_possible(tc->hairpin_tbl, hpe, 965 hairpin_hlist, hash_key) { 966 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) { 967 refcount_inc(&hpe->refcnt); 968 return hpe; 969 } 970 } 971 972 return NULL; 973 } 974 975 static void mlx5e_hairpin_put(struct mlx5e_priv *priv, 976 struct mlx5e_hairpin_entry *hpe) 977 { 978 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 979 /* no more hairpin flows for us, release the hairpin pair */ 980 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock)) 981 return; 982 hash_del(&hpe->hairpin_hlist); 983 mutex_unlock(&tc->hairpin_tbl_lock); 984 985 if (!IS_ERR_OR_NULL(hpe->hp)) { 986 netdev_dbg(priv->netdev, "del hairpin: peer %s\n", 987 dev_name(hpe->hp->pair->peer_mdev->device)); 988 989 mlx5e_hairpin_destroy(hpe->hp); 990 } 991 992 WARN_ON(!list_empty(&hpe->flows)); 993 kfree(hpe); 994 } 995 996 #define UNKNOWN_MATCH_PRIO 8 997 998 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, 999 struct mlx5_flow_spec *spec, u8 *match_prio, 1000 struct netlink_ext_ack *extack) 1001 { 1002 void *headers_c, *headers_v; 1003 u8 prio_val, prio_mask = 0; 1004 bool vlan_present; 1005 1006 #ifdef CONFIG_MLX5_CORE_EN_DCB 1007 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) { 1008 NL_SET_ERR_MSG_MOD(extack, 1009 "only PCP trust state supported for hairpin"); 1010 return -EOPNOTSUPP; 1011 } 1012 #endif 1013 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); 1014 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 1015 1016 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag); 1017 if (vlan_present) { 1018 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); 1019 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); 1020 } 1021 1022 if (!vlan_present || !prio_mask) { 1023 prio_val = UNKNOWN_MATCH_PRIO; 1024 } else if (prio_mask != 0x7) { 1025 NL_SET_ERR_MSG_MOD(extack, 1026 "masked priority match not supported for hairpin"); 1027 return -EOPNOTSUPP; 1028 } 1029 1030 *match_prio = prio_val; 1031 return 0; 1032 } 1033 1034 static int debugfs_hairpin_num_active_get(void *data, u64 *val) 1035 { 1036 struct mlx5e_tc_table *tc = data; 1037 struct mlx5e_hairpin_entry *hpe; 1038 u32 cnt = 0; 1039 u32 bkt; 1040 1041 mutex_lock(&tc->hairpin_tbl_lock); 1042 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist) 1043 cnt++; 1044 mutex_unlock(&tc->hairpin_tbl_lock); 1045 1046 *val = cnt; 1047 1048 return 0; 1049 } 1050 DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_num_active, 1051 debugfs_hairpin_num_active_get, NULL, "%llu\n"); 1052 1053 static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv) 1054 1055 { 1056 struct mlx5e_tc_table *tc = file->private; 1057 struct mlx5e_hairpin_entry *hpe; 1058 u32 bkt; 1059 1060 mutex_lock(&tc->hairpin_tbl_lock); 1061 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist) 1062 seq_printf(file, 1063 "Hairpin peer_vhca_id %u prio %u refcnt %u num_channels %u num_packets %lu\n", 1064 hpe->peer_vhca_id, hpe->prio, 1065 refcount_read(&hpe->refcnt), hpe->hp->num_channels, 1066 BIT(hpe->hp->log_num_packets)); 1067 mutex_unlock(&tc->hairpin_tbl_lock); 1068 1069 return 0; 1070 } 1071 DEFINE_SHOW_ATTRIBUTE(debugfs_hairpin_table_dump); 1072 1073 static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc, 1074 struct dentry *dfs_root) 1075 { 1076 if (IS_ERR_OR_NULL(dfs_root)) 1077 return; 1078 1079 tc->dfs_root = debugfs_create_dir("tc", dfs_root); 1080 1081 debugfs_create_file("hairpin_num_active", 0444, tc->dfs_root, tc, 1082 &fops_hairpin_num_active); 1083 debugfs_create_file("hairpin_table_dump", 0444, tc->dfs_root, tc, 1084 &debugfs_hairpin_table_dump_fops); 1085 } 1086 1087 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, 1088 struct mlx5e_tc_flow *flow, 1089 struct mlx5e_tc_flow_parse_attr *parse_attr, 1090 struct netlink_ext_ack *extack) 1091 { 1092 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 1093 struct devlink *devlink = priv_to_devlink(priv->mdev); 1094 int peer_ifindex = parse_attr->mirred_ifindex[0]; 1095 union devlink_param_value val = {}; 1096 struct mlx5_hairpin_params params; 1097 struct mlx5_core_dev *peer_mdev; 1098 struct mlx5e_hairpin_entry *hpe; 1099 struct mlx5e_hairpin *hp; 1100 u8 match_prio; 1101 u16 peer_id; 1102 int err; 1103 1104 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); 1105 if (IS_ERR(peer_mdev)) { 1106 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device"); 1107 return PTR_ERR(peer_mdev); 1108 } 1109 1110 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { 1111 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported"); 1112 return -EOPNOTSUPP; 1113 } 1114 1115 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id); 1116 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio, 1117 extack); 1118 if (err) 1119 return err; 1120 1121 mutex_lock(&tc->hairpin_tbl_lock); 1122 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); 1123 if (hpe) { 1124 mutex_unlock(&tc->hairpin_tbl_lock); 1125 wait_for_completion(&hpe->res_ready); 1126 1127 if (IS_ERR(hpe->hp)) { 1128 err = -EREMOTEIO; 1129 goto out_err; 1130 } 1131 goto attach_flow; 1132 } 1133 1134 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL); 1135 if (!hpe) { 1136 mutex_unlock(&tc->hairpin_tbl_lock); 1137 return -ENOMEM; 1138 } 1139 1140 spin_lock_init(&hpe->flows_lock); 1141 INIT_LIST_HEAD(&hpe->flows); 1142 INIT_LIST_HEAD(&hpe->dead_peer_wait_list); 1143 hpe->peer_vhca_id = peer_id; 1144 hpe->prio = match_prio; 1145 refcount_set(&hpe->refcnt, 1); 1146 init_completion(&hpe->res_ready); 1147 1148 hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist, 1149 hash_hairpin_info(peer_id, match_prio)); 1150 mutex_unlock(&tc->hairpin_tbl_lock); 1151 1152 err = devl_param_driverinit_value_get( 1153 devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE, &val); 1154 if (err) { 1155 err = -ENOMEM; 1156 goto out_err; 1157 } 1158 1159 params.log_num_packets = ilog2(val.vu32); 1160 params.log_data_size = 1161 clamp_t(u32, 1162 params.log_num_packets + 1163 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev), 1164 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz), 1165 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); 1166 1167 params.q_counter = priv->q_counter; 1168 err = devl_param_driverinit_value_get( 1169 devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, &val); 1170 if (err) { 1171 err = -ENOMEM; 1172 goto out_err; 1173 } 1174 1175 params.num_channels = val.vu32; 1176 1177 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex); 1178 hpe->hp = hp; 1179 complete_all(&hpe->res_ready); 1180 if (IS_ERR(hp)) { 1181 err = PTR_ERR(hp); 1182 goto out_err; 1183 } 1184 1185 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", 1186 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0], 1187 dev_name(hp->pair->peer_mdev->device), 1188 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets); 1189 1190 attach_flow: 1191 if (hpe->hp->num_channels > 1) { 1192 flow_flag_set(flow, HAIRPIN_RSS); 1193 flow->attr->nic_attr->hairpin_ft = 1194 mlx5_get_ttc_flow_table(hpe->hp->ttc); 1195 } else { 1196 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir); 1197 } 1198 1199 flow->hpe = hpe; 1200 spin_lock(&hpe->flows_lock); 1201 list_add(&flow->hairpin, &hpe->flows); 1202 spin_unlock(&hpe->flows_lock); 1203 1204 return 0; 1205 1206 out_err: 1207 mlx5e_hairpin_put(priv, hpe); 1208 return err; 1209 } 1210 1211 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, 1212 struct mlx5e_tc_flow *flow) 1213 { 1214 /* flow wasn't fully initialized */ 1215 if (!flow->hpe) 1216 return; 1217 1218 spin_lock(&flow->hpe->flows_lock); 1219 list_del(&flow->hairpin); 1220 spin_unlock(&flow->hpe->flows_lock); 1221 1222 mlx5e_hairpin_put(priv, flow->hpe); 1223 flow->hpe = NULL; 1224 } 1225 1226 struct mlx5_flow_handle * 1227 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, 1228 struct mlx5_flow_spec *spec, 1229 struct mlx5_flow_attr *attr) 1230 { 1231 struct mlx5_flow_context *flow_context = &spec->flow_context; 1232 struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs); 1233 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 1234 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr; 1235 struct mlx5_flow_destination dest[2] = {}; 1236 struct mlx5_fs_chains *nic_chains; 1237 struct mlx5_flow_act flow_act = { 1238 .action = attr->action, 1239 .flags = FLOW_ACT_NO_APPEND, 1240 }; 1241 struct mlx5_flow_handle *rule; 1242 struct mlx5_flow_table *ft; 1243 int dest_ix = 0; 1244 1245 nic_chains = mlx5e_nic_chains(tc); 1246 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 1247 flow_context->flow_tag = nic_attr->flow_tag; 1248 1249 if (attr->dest_ft) { 1250 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1251 dest[dest_ix].ft = attr->dest_ft; 1252 dest_ix++; 1253 } else if (nic_attr->hairpin_ft) { 1254 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1255 dest[dest_ix].ft = nic_attr->hairpin_ft; 1256 dest_ix++; 1257 } else if (nic_attr->hairpin_tirn) { 1258 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1259 dest[dest_ix].tir_num = nic_attr->hairpin_tirn; 1260 dest_ix++; 1261 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 1262 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1263 if (attr->dest_chain) { 1264 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains, 1265 attr->dest_chain, 1, 1266 MLX5E_TC_FT_LEVEL); 1267 if (IS_ERR(dest[dest_ix].ft)) 1268 return ERR_CAST(dest[dest_ix].ft); 1269 } else { 1270 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan); 1271 } 1272 dest_ix++; 1273 } 1274 1275 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && 1276 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) 1277 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 1278 1279 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1280 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1281 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter); 1282 dest_ix++; 1283 } 1284 1285 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 1286 flow_act.modify_hdr = attr->modify_hdr; 1287 1288 mutex_lock(&tc->t_lock); 1289 if (IS_ERR_OR_NULL(tc->t)) { 1290 /* Create the root table here if doesn't exist yet */ 1291 tc->t = 1292 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL); 1293 1294 if (IS_ERR(tc->t)) { 1295 mutex_unlock(&tc->t_lock); 1296 netdev_err(priv->netdev, 1297 "Failed to create tc offload table\n"); 1298 rule = ERR_CAST(tc->t); 1299 goto err_ft_get; 1300 } 1301 } 1302 mutex_unlock(&tc->t_lock); 1303 1304 if (attr->chain || attr->prio) 1305 ft = mlx5_chains_get_table(nic_chains, 1306 attr->chain, attr->prio, 1307 MLX5E_TC_FT_LEVEL); 1308 else 1309 ft = attr->ft; 1310 1311 if (IS_ERR(ft)) { 1312 rule = ERR_CAST(ft); 1313 goto err_ft_get; 1314 } 1315 1316 if (attr->outer_match_level != MLX5_MATCH_NONE) 1317 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 1318 1319 rule = mlx5_add_flow_rules(ft, spec, 1320 &flow_act, dest, dest_ix); 1321 if (IS_ERR(rule)) 1322 goto err_rule; 1323 1324 return rule; 1325 1326 err_rule: 1327 if (attr->chain || attr->prio) 1328 mlx5_chains_put_table(nic_chains, 1329 attr->chain, attr->prio, 1330 MLX5E_TC_FT_LEVEL); 1331 err_ft_get: 1332 if (attr->dest_chain) 1333 mlx5_chains_put_table(nic_chains, 1334 attr->dest_chain, 1, 1335 MLX5E_TC_FT_LEVEL); 1336 1337 return ERR_CAST(rule); 1338 } 1339 1340 static int 1341 alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev, 1342 struct mlx5_flow_attr *attr) 1343 1344 { 1345 struct mlx5_fc *counter; 1346 1347 counter = mlx5_fc_create(counter_dev, true); 1348 if (IS_ERR(counter)) 1349 return PTR_ERR(counter); 1350 1351 attr->counter = counter; 1352 return 0; 1353 } 1354 1355 static int 1356 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, 1357 struct mlx5e_tc_flow *flow, 1358 struct netlink_ext_ack *extack) 1359 { 1360 struct mlx5e_tc_flow_parse_attr *parse_attr; 1361 struct mlx5_flow_attr *attr = flow->attr; 1362 struct mlx5_core_dev *dev = priv->mdev; 1363 int err; 1364 1365 parse_attr = attr->parse_attr; 1366 1367 if (flow_flag_test(flow, HAIRPIN)) { 1368 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); 1369 if (err) 1370 return err; 1371 } 1372 1373 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1374 err = alloc_flow_attr_counter(dev, attr); 1375 if (err) 1376 return err; 1377 } 1378 1379 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1380 err = mlx5e_tc_attach_mod_hdr(priv, flow, attr); 1381 if (err) 1382 return err; 1383 } 1384 1385 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec, attr); 1386 return PTR_ERR_OR_ZERO(flow->rule[0]); 1387 } 1388 1389 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv, 1390 struct mlx5_flow_handle *rule, 1391 struct mlx5_flow_attr *attr) 1392 { 1393 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 1394 struct mlx5_fs_chains *nic_chains; 1395 1396 nic_chains = mlx5e_nic_chains(tc); 1397 mlx5_del_flow_rules(rule); 1398 1399 if (attr->chain || attr->prio) 1400 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio, 1401 MLX5E_TC_FT_LEVEL); 1402 1403 if (attr->dest_chain) 1404 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1, 1405 MLX5E_TC_FT_LEVEL); 1406 } 1407 1408 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, 1409 struct mlx5e_tc_flow *flow) 1410 { 1411 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 1412 struct mlx5_flow_attr *attr = flow->attr; 1413 1414 flow_flag_clear(flow, OFFLOADED); 1415 1416 if (!IS_ERR_OR_NULL(flow->rule[0])) 1417 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr); 1418 1419 /* Remove root table if no rules are left to avoid 1420 * extra steering hops. 1421 */ 1422 mutex_lock(&tc->t_lock); 1423 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && 1424 !IS_ERR_OR_NULL(tc->t)) { 1425 mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL); 1426 tc->t = NULL; 1427 } 1428 mutex_unlock(&tc->t_lock); 1429 1430 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1431 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); 1432 mlx5e_tc_detach_mod_hdr(priv, flow, attr); 1433 } 1434 1435 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 1436 mlx5_fc_destroy(priv->mdev, attr->counter); 1437 1438 if (flow_flag_test(flow, HAIRPIN)) 1439 mlx5e_hairpin_flow_del(priv, flow); 1440 1441 free_flow_post_acts(flow); 1442 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr); 1443 1444 kvfree(attr->parse_attr); 1445 kfree(flow->attr); 1446 } 1447 1448 struct mlx5_flow_handle * 1449 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, 1450 struct mlx5e_tc_flow *flow, 1451 struct mlx5_flow_spec *spec, 1452 struct mlx5_flow_attr *attr) 1453 { 1454 struct mlx5_flow_handle *rule; 1455 1456 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) 1457 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 1458 1459 rule = mlx5e_tc_rule_offload(flow->priv, spec, attr); 1460 1461 if (IS_ERR(rule)) 1462 return rule; 1463 1464 if (attr->esw_attr->split_count) { 1465 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr); 1466 if (IS_ERR(flow->rule[1])) 1467 goto err_rule1; 1468 } 1469 1470 return rule; 1471 1472 err_rule1: 1473 mlx5e_tc_rule_unoffload(flow->priv, rule, attr); 1474 return flow->rule[1]; 1475 } 1476 1477 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, 1478 struct mlx5e_tc_flow *flow, 1479 struct mlx5_flow_attr *attr) 1480 { 1481 flow_flag_clear(flow, OFFLOADED); 1482 1483 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) 1484 return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); 1485 1486 if (attr->esw_attr->split_count) 1487 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); 1488 1489 mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr); 1490 } 1491 1492 struct mlx5_flow_handle * 1493 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, 1494 struct mlx5e_tc_flow *flow, 1495 struct mlx5_flow_spec *spec) 1496 { 1497 struct mlx5e_tc_mod_hdr_acts mod_acts = {}; 1498 struct mlx5e_mod_hdr_handle *mh = NULL; 1499 struct mlx5_flow_attr *slow_attr; 1500 struct mlx5_flow_handle *rule; 1501 bool fwd_and_modify_cap; 1502 u32 chain_mapping = 0; 1503 int err; 1504 1505 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); 1506 if (!slow_attr) 1507 return ERR_PTR(-ENOMEM); 1508 1509 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ); 1510 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1511 slow_attr->esw_attr->split_count = 0; 1512 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; 1513 1514 fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table); 1515 if (!fwd_and_modify_cap) 1516 goto skip_restore; 1517 1518 err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping); 1519 if (err) 1520 goto err_get_chain; 1521 1522 err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB, 1523 MAPPED_OBJ_TO_REG, chain_mapping); 1524 if (err) 1525 goto err_reg_set; 1526 1527 mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow), 1528 MLX5_FLOW_NAMESPACE_FDB, &mod_acts); 1529 if (IS_ERR(mh)) { 1530 err = PTR_ERR(mh); 1531 goto err_attach; 1532 } 1533 1534 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1535 slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh); 1536 1537 skip_restore: 1538 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); 1539 if (IS_ERR(rule)) { 1540 err = PTR_ERR(rule); 1541 goto err_offload; 1542 } 1543 1544 flow->attr->slow_mh = mh; 1545 flow->chain_mapping = chain_mapping; 1546 flow_flag_set(flow, SLOW); 1547 1548 mlx5e_mod_hdr_dealloc(&mod_acts); 1549 kfree(slow_attr); 1550 1551 return rule; 1552 1553 err_offload: 1554 if (fwd_and_modify_cap) 1555 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh); 1556 err_attach: 1557 err_reg_set: 1558 if (fwd_and_modify_cap) 1559 mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping); 1560 err_get_chain: 1561 mlx5e_mod_hdr_dealloc(&mod_acts); 1562 kfree(slow_attr); 1563 return ERR_PTR(err); 1564 } 1565 1566 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, 1567 struct mlx5e_tc_flow *flow) 1568 { 1569 struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh; 1570 struct mlx5_flow_attr *slow_attr; 1571 1572 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); 1573 if (!slow_attr) { 1574 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n"); 1575 return; 1576 } 1577 1578 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ); 1579 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1580 slow_attr->esw_attr->split_count = 0; 1581 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; 1582 if (slow_mh) { 1583 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1584 slow_attr->modify_hdr = mlx5e_mod_hdr_get(slow_mh); 1585 } 1586 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); 1587 if (slow_mh) { 1588 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh); 1589 mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping); 1590 flow->chain_mapping = 0; 1591 flow->attr->slow_mh = NULL; 1592 } 1593 flow_flag_clear(flow, SLOW); 1594 kfree(slow_attr); 1595 } 1596 1597 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this 1598 * function. 1599 */ 1600 static void unready_flow_add(struct mlx5e_tc_flow *flow, 1601 struct list_head *unready_flows) 1602 { 1603 flow_flag_set(flow, NOT_READY); 1604 list_add_tail(&flow->unready, unready_flows); 1605 } 1606 1607 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this 1608 * function. 1609 */ 1610 static void unready_flow_del(struct mlx5e_tc_flow *flow) 1611 { 1612 list_del(&flow->unready); 1613 flow_flag_clear(flow, NOT_READY); 1614 } 1615 1616 static void add_unready_flow(struct mlx5e_tc_flow *flow) 1617 { 1618 struct mlx5_rep_uplink_priv *uplink_priv; 1619 struct mlx5e_rep_priv *rpriv; 1620 struct mlx5_eswitch *esw; 1621 1622 esw = flow->priv->mdev->priv.eswitch; 1623 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1624 uplink_priv = &rpriv->uplink_priv; 1625 1626 mutex_lock(&uplink_priv->unready_flows_lock); 1627 unready_flow_add(flow, &uplink_priv->unready_flows); 1628 mutex_unlock(&uplink_priv->unready_flows_lock); 1629 } 1630 1631 static void remove_unready_flow(struct mlx5e_tc_flow *flow) 1632 { 1633 struct mlx5_rep_uplink_priv *uplink_priv; 1634 struct mlx5e_rep_priv *rpriv; 1635 struct mlx5_eswitch *esw; 1636 1637 esw = flow->priv->mdev->priv.eswitch; 1638 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1639 uplink_priv = &rpriv->uplink_priv; 1640 1641 mutex_lock(&uplink_priv->unready_flows_lock); 1642 if (flow_flag_test(flow, NOT_READY)) 1643 unready_flow_del(flow); 1644 mutex_unlock(&uplink_priv->unready_flows_lock); 1645 } 1646 1647 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev) 1648 { 1649 struct mlx5_core_dev *out_mdev, *route_mdev; 1650 struct mlx5e_priv *out_priv, *route_priv; 1651 1652 out_priv = netdev_priv(out_dev); 1653 out_mdev = out_priv->mdev; 1654 route_priv = netdev_priv(route_dev); 1655 route_mdev = route_priv->mdev; 1656 1657 if (out_mdev->coredev_type != MLX5_COREDEV_PF) 1658 return false; 1659 1660 if (route_mdev->coredev_type != MLX5_COREDEV_VF && 1661 route_mdev->coredev_type != MLX5_COREDEV_SF) 1662 return false; 1663 1664 return mlx5e_same_hw_devs(out_priv, route_priv); 1665 } 1666 1667 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) 1668 { 1669 struct mlx5e_priv *out_priv, *route_priv; 1670 struct mlx5_core_dev *route_mdev; 1671 struct mlx5_devcom_comp_dev *pos; 1672 struct mlx5_eswitch *esw; 1673 u16 vhca_id; 1674 int err; 1675 1676 out_priv = netdev_priv(out_dev); 1677 esw = out_priv->mdev->priv.eswitch; 1678 route_priv = netdev_priv(route_dev); 1679 route_mdev = route_priv->mdev; 1680 1681 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); 1682 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); 1683 if (!err) 1684 return err; 1685 1686 if (!mlx5_lag_is_active(out_priv->mdev)) 1687 return err; 1688 1689 rcu_read_lock(); 1690 err = -ENODEV; 1691 mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) { 1692 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); 1693 if (!err) 1694 break; 1695 } 1696 rcu_read_unlock(); 1697 1698 return err; 1699 } 1700 1701 static int 1702 verify_attr_actions(u32 actions, struct netlink_ext_ack *extack) 1703 { 1704 if (!(actions & 1705 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { 1706 NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action"); 1707 return -EOPNOTSUPP; 1708 } 1709 1710 if (!(~actions & 1711 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { 1712 NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action"); 1713 return -EOPNOTSUPP; 1714 } 1715 1716 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 1717 actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { 1718 NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); 1719 return -EOPNOTSUPP; 1720 } 1721 1722 return 0; 1723 } 1724 1725 static bool 1726 has_encap_dests(struct mlx5_flow_attr *attr) 1727 { 1728 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 1729 int out_index; 1730 1731 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) 1732 if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) 1733 return true; 1734 1735 return false; 1736 } 1737 1738 static int 1739 post_process_attr(struct mlx5e_tc_flow *flow, 1740 struct mlx5_flow_attr *attr, 1741 struct netlink_ext_ack *extack) 1742 { 1743 bool vf_tun; 1744 int err = 0; 1745 1746 err = verify_attr_actions(attr->action, extack); 1747 if (err) 1748 goto err_out; 1749 1750 if (mlx5e_is_eswitch_flow(flow) && has_encap_dests(attr)) { 1751 err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun); 1752 if (err) 1753 goto err_out; 1754 } 1755 1756 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1757 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr); 1758 if (err) 1759 goto err_out; 1760 } 1761 1762 if (attr->branch_true && 1763 attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1764 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true); 1765 if (err) 1766 goto err_out; 1767 } 1768 1769 if (attr->branch_false && 1770 attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1771 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false); 1772 if (err) 1773 goto err_out; 1774 } 1775 1776 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1777 err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr); 1778 if (err) 1779 goto err_out; 1780 } 1781 1782 err_out: 1783 return err; 1784 } 1785 1786 static int 1787 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 1788 struct mlx5e_tc_flow *flow, 1789 struct netlink_ext_ack *extack) 1790 { 1791 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1792 struct mlx5e_tc_flow_parse_attr *parse_attr; 1793 struct mlx5_flow_attr *attr = flow->attr; 1794 struct mlx5_esw_flow_attr *esw_attr; 1795 u32 max_prio, max_chain; 1796 int err = 0; 1797 1798 parse_attr = attr->parse_attr; 1799 esw_attr = attr->esw_attr; 1800 1801 /* We check chain range only for tc flows. 1802 * For ft flows, we checked attr->chain was originally 0 and set it to 1803 * FDB_FT_CHAIN which is outside tc range. 1804 * See mlx5e_rep_setup_ft_cb(). 1805 */ 1806 max_chain = mlx5_chains_get_chain_range(esw_chains(esw)); 1807 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { 1808 NL_SET_ERR_MSG_MOD(extack, 1809 "Requested chain is out of supported range"); 1810 err = -EOPNOTSUPP; 1811 goto err_out; 1812 } 1813 1814 max_prio = mlx5_chains_get_prio_range(esw_chains(esw)); 1815 if (attr->prio > max_prio) { 1816 NL_SET_ERR_MSG_MOD(extack, 1817 "Requested priority is out of supported range"); 1818 err = -EOPNOTSUPP; 1819 goto err_out; 1820 } 1821 1822 if (flow_flag_test(flow, TUN_RX)) { 1823 err = mlx5e_attach_decap_route(priv, flow); 1824 if (err) 1825 goto err_out; 1826 1827 if (!attr->chain && esw_attr->int_port && 1828 attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 1829 /* If decap route device is internal port, change the 1830 * source vport value in reg_c0 back to uplink just in 1831 * case the rule performs goto chain > 0. If we have a miss 1832 * on chain > 0 we want the metadata regs to hold the 1833 * chain id so SW will resume handling of this packet 1834 * from the proper chain. 1835 */ 1836 u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw, 1837 esw_attr->in_rep->vport); 1838 1839 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts, 1840 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, 1841 metadata); 1842 if (err) 1843 goto err_out; 1844 1845 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1846 } 1847 } 1848 1849 if (flow_flag_test(flow, L3_TO_L2_DECAP)) { 1850 err = mlx5e_attach_decap(priv, flow, extack); 1851 if (err) 1852 goto err_out; 1853 } 1854 1855 if (netif_is_ovs_master(parse_attr->filter_dev)) { 1856 struct mlx5e_tc_int_port *int_port; 1857 1858 if (attr->chain) { 1859 NL_SET_ERR_MSG_MOD(extack, 1860 "Internal port rule is only supported on chain 0"); 1861 err = -EOPNOTSUPP; 1862 goto err_out; 1863 } 1864 1865 if (attr->dest_chain) { 1866 NL_SET_ERR_MSG_MOD(extack, 1867 "Internal port rule offload doesn't support goto action"); 1868 err = -EOPNOTSUPP; 1869 goto err_out; 1870 } 1871 1872 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), 1873 parse_attr->filter_dev->ifindex, 1874 flow_flag_test(flow, EGRESS) ? 1875 MLX5E_TC_INT_PORT_EGRESS : 1876 MLX5E_TC_INT_PORT_INGRESS); 1877 if (IS_ERR(int_port)) { 1878 err = PTR_ERR(int_port); 1879 goto err_out; 1880 } 1881 1882 esw_attr->int_port = int_port; 1883 } 1884 1885 err = post_process_attr(flow, attr, extack); 1886 if (err) 1887 goto err_out; 1888 1889 err = mlx5e_tc_act_stats_add_flow(get_act_stats_handle(priv), flow); 1890 if (err) 1891 goto err_out; 1892 1893 /* we get here if one of the following takes place: 1894 * (1) there's no error 1895 * (2) there's an encap action and we don't have valid neigh 1896 */ 1897 if (flow_flag_test(flow, SLOW)) 1898 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec); 1899 else 1900 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); 1901 1902 if (IS_ERR(flow->rule[0])) { 1903 err = PTR_ERR(flow->rule[0]); 1904 goto err_out; 1905 } 1906 flow_flag_set(flow, OFFLOADED); 1907 1908 return 0; 1909 1910 err_out: 1911 flow_flag_set(flow, FAILED); 1912 return err; 1913 } 1914 1915 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow) 1916 { 1917 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec; 1918 void *headers_v = MLX5_ADDR_OF(fte_match_param, 1919 spec->match_value, 1920 misc_parameters_3); 1921 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3, 1922 headers_v, 1923 geneve_tlv_option_0_data); 1924 1925 return !!geneve_tlv_opt_0_data; 1926 } 1927 1928 static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr) 1929 { 1930 if (!attr) 1931 return; 1932 1933 mlx5_free_flow_attr_actions(flow, attr); 1934 kvfree(attr->parse_attr); 1935 kfree(attr); 1936 } 1937 1938 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, 1939 struct mlx5e_tc_flow *flow) 1940 { 1941 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1942 struct mlx5_flow_attr *attr = flow->attr; 1943 1944 mlx5e_put_flow_tunnel_id(flow); 1945 1946 remove_unready_flow(flow); 1947 1948 if (mlx5e_is_offloaded_flow(flow)) { 1949 if (flow_flag_test(flow, SLOW)) 1950 mlx5e_tc_unoffload_from_slow_path(esw, flow); 1951 else 1952 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); 1953 } 1954 complete_all(&flow->del_hw_done); 1955 1956 if (mlx5_flow_has_geneve_opt(flow)) 1957 mlx5_geneve_tlv_option_del(priv->mdev->geneve); 1958 1959 if (flow->decap_route) 1960 mlx5e_detach_decap_route(priv, flow); 1961 1962 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr); 1963 1964 if (flow_flag_test(flow, L3_TO_L2_DECAP)) 1965 mlx5e_detach_decap(priv, flow); 1966 1967 mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow); 1968 1969 free_flow_post_acts(flow); 1970 mlx5_free_flow_attr_actions(flow, attr); 1971 1972 kvfree(attr->esw_attr->rx_tun_attr); 1973 kvfree(attr->parse_attr); 1974 kfree(flow->attr); 1975 } 1976 1977 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) 1978 { 1979 struct mlx5_flow_attr *attr; 1980 1981 attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list); 1982 return attr->counter; 1983 } 1984 1985 /* Iterate over tmp_list of flows attached to flow_list head. */ 1986 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list) 1987 { 1988 struct mlx5e_tc_flow *flow, *tmp; 1989 1990 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list) 1991 mlx5e_flow_put(priv, flow); 1992 } 1993 1994 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow, 1995 int peer_index) 1996 { 1997 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; 1998 struct mlx5e_tc_flow *peer_flow; 1999 struct mlx5e_tc_flow *tmp; 2000 2001 if (!flow_flag_test(flow, ESWITCH) || 2002 !flow_flag_test(flow, DUP)) 2003 return; 2004 2005 mutex_lock(&esw->offloads.peer_mutex); 2006 list_del(&flow->peer[peer_index]); 2007 mutex_unlock(&esw->offloads.peer_mutex); 2008 2009 list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) { 2010 if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev)) 2011 continue; 2012 if (refcount_dec_and_test(&peer_flow->refcnt)) { 2013 mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow); 2014 list_del(&peer_flow->peer_flows); 2015 kfree(peer_flow); 2016 } 2017 } 2018 2019 if (list_empty(&flow->peer_flows)) 2020 flow_flag_clear(flow, DUP); 2021 } 2022 2023 static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow) 2024 { 2025 int i; 2026 2027 for (i = 0; i < MLX5_MAX_PORTS; i++) { 2028 if (i == mlx5_get_dev_index(flow->priv->mdev)) 2029 continue; 2030 mlx5e_tc_del_fdb_peer_flow(flow, i); 2031 } 2032 } 2033 2034 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 2035 struct mlx5e_tc_flow *flow) 2036 { 2037 if (mlx5e_is_eswitch_flow(flow)) { 2038 struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom; 2039 2040 if (!mlx5_devcom_for_each_peer_begin(devcom)) { 2041 mlx5e_tc_del_fdb_flow(priv, flow); 2042 return; 2043 } 2044 2045 mlx5e_tc_del_fdb_peers_flow(flow); 2046 mlx5_devcom_for_each_peer_end(devcom); 2047 mlx5e_tc_del_fdb_flow(priv, flow); 2048 } else { 2049 mlx5e_tc_del_nic_flow(priv, flow); 2050 } 2051 } 2052 2053 static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f) 2054 { 2055 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2056 struct flow_action *flow_action = &rule->action; 2057 const struct flow_action_entry *act; 2058 int i; 2059 2060 if (chain) 2061 return false; 2062 2063 flow_action_for_each(i, act, flow_action) { 2064 switch (act->id) { 2065 case FLOW_ACTION_GOTO: 2066 return true; 2067 case FLOW_ACTION_SAMPLE: 2068 return true; 2069 default: 2070 continue; 2071 } 2072 } 2073 2074 return false; 2075 } 2076 2077 static int 2078 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, 2079 struct flow_dissector_key_enc_opts *opts, 2080 struct netlink_ext_ack *extack, 2081 bool *dont_care) 2082 { 2083 struct geneve_opt *opt; 2084 int off = 0; 2085 2086 *dont_care = true; 2087 2088 while (opts->len > off) { 2089 opt = (struct geneve_opt *)&opts->data[off]; 2090 2091 if (!(*dont_care) || opt->opt_class || opt->type || 2092 memchr_inv(opt->opt_data, 0, opt->length * 4)) { 2093 *dont_care = false; 2094 2095 if (opt->opt_class != htons(U16_MAX) || 2096 opt->type != U8_MAX) { 2097 NL_SET_ERR_MSG_MOD(extack, 2098 "Partial match of tunnel options in chain > 0 isn't supported"); 2099 netdev_warn(priv->netdev, 2100 "Partial match of tunnel options in chain > 0 isn't supported"); 2101 return -EOPNOTSUPP; 2102 } 2103 } 2104 2105 off += sizeof(struct geneve_opt) + opt->length * 4; 2106 } 2107 2108 return 0; 2109 } 2110 2111 #define COPY_DISSECTOR(rule, diss_key, dst)\ 2112 ({ \ 2113 struct flow_rule *__rule = (rule);\ 2114 typeof(dst) __dst = dst;\ 2115 \ 2116 memcpy(__dst,\ 2117 skb_flow_dissector_target(__rule->match.dissector,\ 2118 diss_key,\ 2119 __rule->match.key),\ 2120 sizeof(*__dst));\ 2121 }) 2122 2123 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv, 2124 struct mlx5e_tc_flow *flow, 2125 struct flow_cls_offload *f, 2126 struct net_device *filter_dev) 2127 { 2128 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2129 struct netlink_ext_ack *extack = f->common.extack; 2130 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts; 2131 struct flow_match_enc_opts enc_opts_match; 2132 struct tunnel_match_enc_opts tun_enc_opts; 2133 struct mlx5_rep_uplink_priv *uplink_priv; 2134 struct mlx5_flow_attr *attr = flow->attr; 2135 struct mlx5e_rep_priv *uplink_rpriv; 2136 struct tunnel_match_key tunnel_key; 2137 bool enc_opts_is_dont_care = true; 2138 u32 tun_id, enc_opts_id = 0; 2139 struct mlx5_eswitch *esw; 2140 u32 value, mask; 2141 int err; 2142 2143 esw = priv->mdev->priv.eswitch; 2144 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 2145 uplink_priv = &uplink_rpriv->uplink_priv; 2146 2147 memset(&tunnel_key, 0, sizeof(tunnel_key)); 2148 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, 2149 &tunnel_key.enc_control); 2150 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 2151 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 2152 &tunnel_key.enc_ipv4); 2153 else 2154 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, 2155 &tunnel_key.enc_ipv6); 2156 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip); 2157 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, 2158 &tunnel_key.enc_tp); 2159 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, 2160 &tunnel_key.enc_key_id); 2161 tunnel_key.filter_ifindex = filter_dev->ifindex; 2162 2163 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id); 2164 if (err) 2165 return err; 2166 2167 flow_rule_match_enc_opts(rule, &enc_opts_match); 2168 err = enc_opts_is_dont_care_or_full_match(priv, 2169 enc_opts_match.mask, 2170 extack, 2171 &enc_opts_is_dont_care); 2172 if (err) 2173 goto err_enc_opts; 2174 2175 if (!enc_opts_is_dont_care) { 2176 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts)); 2177 memcpy(&tun_enc_opts.key, enc_opts_match.key, 2178 sizeof(*enc_opts_match.key)); 2179 memcpy(&tun_enc_opts.mask, enc_opts_match.mask, 2180 sizeof(*enc_opts_match.mask)); 2181 2182 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping, 2183 &tun_enc_opts, &enc_opts_id); 2184 if (err) 2185 goto err_enc_opts; 2186 } 2187 2188 value = tun_id << ENC_OPTS_BITS | enc_opts_id; 2189 mask = enc_opts_id ? TUNNEL_ID_MASK : 2190 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK); 2191 2192 if (attr->chain) { 2193 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec, 2194 TUNNEL_TO_REG, value, mask); 2195 } else { 2196 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; 2197 err = mlx5e_tc_match_to_reg_set(priv->mdev, 2198 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB, 2199 TUNNEL_TO_REG, value); 2200 if (err) 2201 goto err_set; 2202 2203 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 2204 } 2205 2206 flow->attr->tunnel_id = value; 2207 return 0; 2208 2209 err_set: 2210 if (enc_opts_id) 2211 mapping_remove(uplink_priv->tunnel_enc_opts_mapping, 2212 enc_opts_id); 2213 err_enc_opts: 2214 mapping_remove(uplink_priv->tunnel_mapping, tun_id); 2215 return err; 2216 } 2217 2218 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow) 2219 { 2220 u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK; 2221 u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS; 2222 struct mlx5_rep_uplink_priv *uplink_priv; 2223 struct mlx5e_rep_priv *uplink_rpriv; 2224 struct mlx5_eswitch *esw; 2225 2226 esw = flow->priv->mdev->priv.eswitch; 2227 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 2228 uplink_priv = &uplink_rpriv->uplink_priv; 2229 2230 if (tun_id) 2231 mapping_remove(uplink_priv->tunnel_mapping, tun_id); 2232 if (enc_opts_id) 2233 mapping_remove(uplink_priv->tunnel_enc_opts_mapping, 2234 enc_opts_id); 2235 } 2236 2237 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, 2238 struct flow_match_basic *match, bool outer, 2239 void *headers_c, void *headers_v) 2240 { 2241 bool ip_version_cap; 2242 2243 ip_version_cap = outer ? 2244 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2245 ft_field_support.outer_ip_version) : 2246 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2247 ft_field_support.inner_ip_version); 2248 2249 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) && 2250 (match->key->n_proto == htons(ETH_P_IP) || 2251 match->key->n_proto == htons(ETH_P_IPV6))) { 2252 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version); 2253 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 2254 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6); 2255 } else { 2256 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, 2257 ntohs(match->mask->n_proto)); 2258 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 2259 ntohs(match->key->n_proto)); 2260 } 2261 } 2262 2263 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer) 2264 { 2265 void *headers_v; 2266 u16 ethertype; 2267 u8 ip_version; 2268 2269 if (outer) 2270 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 2271 else 2272 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); 2273 2274 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version); 2275 /* Return ip_version converted from ethertype anyway */ 2276 if (!ip_version) { 2277 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 2278 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP) 2279 ip_version = 4; 2280 else if (ethertype == ETH_P_IPV6) 2281 ip_version = 6; 2282 } 2283 return ip_version; 2284 } 2285 2286 /* Tunnel device follows RFC 6040, see include/net/inet_ecn.h. 2287 * And changes inner ip_ecn depending on inner and outer ip_ecn as follows: 2288 * +---------+----------------------------------------+ 2289 * |Arriving | Arriving Outer Header | 2290 * | Inner +---------+---------+---------+----------+ 2291 * | Header | Not-ECT | ECT(0) | ECT(1) | CE | 2292 * +---------+---------+---------+---------+----------+ 2293 * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> | 2294 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* | 2295 * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* | 2296 * | CE | CE | CE | CE | CE | 2297 * +---------+---------+---------+---------+----------+ 2298 * 2299 * Tc matches on inner after decapsulation on tunnel device, but hw offload matches 2300 * the inner ip_ecn value before hardware decap action. 2301 * 2302 * Cells marked are changed from original inner packet ip_ecn value during decap, and 2303 * so matching those values on inner ip_ecn before decap will fail. 2304 * 2305 * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn, 2306 * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE, 2307 * and such we can drop the inner ip_ecn=CE match. 2308 */ 2309 2310 static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv, 2311 struct flow_cls_offload *f, 2312 bool *match_inner_ecn) 2313 { 2314 u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0; 2315 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2316 struct netlink_ext_ack *extack = f->common.extack; 2317 struct flow_match_ip match; 2318 2319 *match_inner_ecn = true; 2320 2321 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 2322 flow_rule_match_enc_ip(rule, &match); 2323 outer_ecn_key = match.key->tos & INET_ECN_MASK; 2324 outer_ecn_mask = match.mask->tos & INET_ECN_MASK; 2325 } 2326 2327 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 2328 flow_rule_match_ip(rule, &match); 2329 inner_ecn_key = match.key->tos & INET_ECN_MASK; 2330 inner_ecn_mask = match.mask->tos & INET_ECN_MASK; 2331 } 2332 2333 if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) { 2334 NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported"); 2335 netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported"); 2336 return -EOPNOTSUPP; 2337 } 2338 2339 if (!outer_ecn_mask) { 2340 if (!inner_ecn_mask) 2341 return 0; 2342 2343 NL_SET_ERR_MSG_MOD(extack, 2344 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported"); 2345 netdev_warn(priv->netdev, 2346 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported"); 2347 return -EOPNOTSUPP; 2348 } 2349 2350 if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) { 2351 NL_SET_ERR_MSG_MOD(extack, 2352 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported"); 2353 netdev_warn(priv->netdev, 2354 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported"); 2355 return -EOPNOTSUPP; 2356 } 2357 2358 if (!inner_ecn_mask) 2359 return 0; 2360 2361 /* Both inner and outer have full mask on ecn */ 2362 2363 if (outer_ecn_key == INET_ECN_ECT_1) { 2364 /* inner ecn might change by DECAP action */ 2365 2366 NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported"); 2367 netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported"); 2368 return -EOPNOTSUPP; 2369 } 2370 2371 if (outer_ecn_key != INET_ECN_CE) 2372 return 0; 2373 2374 if (inner_ecn_key != INET_ECN_CE) { 2375 /* Can't happen in software, as packet ecn will be changed to CE after decap */ 2376 NL_SET_ERR_MSG_MOD(extack, 2377 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported"); 2378 netdev_warn(priv->netdev, 2379 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported"); 2380 return -EOPNOTSUPP; 2381 } 2382 2383 /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase, 2384 * drop match on inner ecn 2385 */ 2386 *match_inner_ecn = false; 2387 2388 return 0; 2389 } 2390 2391 static int parse_tunnel_attr(struct mlx5e_priv *priv, 2392 struct mlx5e_tc_flow *flow, 2393 struct mlx5_flow_spec *spec, 2394 struct flow_cls_offload *f, 2395 struct net_device *filter_dev, 2396 u8 *match_level, 2397 bool *match_inner) 2398 { 2399 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev); 2400 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 2401 struct netlink_ext_ack *extack = f->common.extack; 2402 bool needs_mapping, sets_mapping; 2403 int err; 2404 2405 if (!mlx5e_is_eswitch_flow(flow)) { 2406 NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported"); 2407 return -EOPNOTSUPP; 2408 } 2409 2410 needs_mapping = !!flow->attr->chain; 2411 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f); 2412 *match_inner = !needs_mapping; 2413 2414 if ((needs_mapping || sets_mapping) && 2415 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) { 2416 NL_SET_ERR_MSG_MOD(extack, 2417 "Chains on tunnel devices isn't supported without register loopback support"); 2418 netdev_warn(priv->netdev, 2419 "Chains on tunnel devices isn't supported without register loopback support"); 2420 return -EOPNOTSUPP; 2421 } 2422 2423 if (!flow->attr->chain) { 2424 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 2425 match_level); 2426 if (err) { 2427 NL_SET_ERR_MSG_MOD(extack, 2428 "Failed to parse tunnel attributes"); 2429 netdev_warn(priv->netdev, 2430 "Failed to parse tunnel attributes"); 2431 return err; 2432 } 2433 2434 /* With mpls over udp we decapsulate using packet reformat 2435 * object 2436 */ 2437 if (!netif_is_bareudp(filter_dev)) 2438 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 2439 err = mlx5e_tc_set_attr_rx_tun(flow, spec); 2440 if (err) 2441 return err; 2442 } else if (tunnel) { 2443 struct mlx5_flow_spec *tmp_spec; 2444 2445 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL); 2446 if (!tmp_spec) { 2447 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec"); 2448 netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec"); 2449 return -ENOMEM; 2450 } 2451 memcpy(tmp_spec, spec, sizeof(*tmp_spec)); 2452 2453 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level); 2454 if (err) { 2455 kvfree(tmp_spec); 2456 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes"); 2457 netdev_warn(priv->netdev, "Failed to parse tunnel attributes"); 2458 return err; 2459 } 2460 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec); 2461 kvfree(tmp_spec); 2462 if (err) 2463 return err; 2464 } 2465 2466 if (!needs_mapping && !sets_mapping) 2467 return 0; 2468 2469 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev); 2470 } 2471 2472 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec) 2473 { 2474 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2475 inner_headers); 2476 } 2477 2478 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec) 2479 { 2480 return MLX5_ADDR_OF(fte_match_param, spec->match_value, 2481 inner_headers); 2482 } 2483 2484 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec) 2485 { 2486 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2487 outer_headers); 2488 } 2489 2490 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec) 2491 { 2492 return MLX5_ADDR_OF(fte_match_param, spec->match_value, 2493 outer_headers); 2494 } 2495 2496 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec) 2497 { 2498 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 2499 get_match_inner_headers_value(spec) : 2500 get_match_outer_headers_value(spec); 2501 } 2502 2503 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec) 2504 { 2505 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 2506 get_match_inner_headers_criteria(spec) : 2507 get_match_outer_headers_criteria(spec); 2508 } 2509 2510 static int mlx5e_flower_parse_meta(struct net_device *filter_dev, 2511 struct flow_cls_offload *f) 2512 { 2513 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2514 struct netlink_ext_ack *extack = f->common.extack; 2515 struct net_device *ingress_dev; 2516 struct flow_match_meta match; 2517 2518 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 2519 return 0; 2520 2521 flow_rule_match_meta(rule, &match); 2522 2523 if (match.mask->l2_miss) { 2524 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on \"l2_miss\""); 2525 return -EOPNOTSUPP; 2526 } 2527 2528 if (!match.mask->ingress_ifindex) 2529 return 0; 2530 2531 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 2532 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); 2533 return -EOPNOTSUPP; 2534 } 2535 2536 ingress_dev = __dev_get_by_index(dev_net(filter_dev), 2537 match.key->ingress_ifindex); 2538 if (!ingress_dev) { 2539 NL_SET_ERR_MSG_MOD(extack, 2540 "Can't find the ingress port to match on"); 2541 return -ENOENT; 2542 } 2543 2544 if (ingress_dev != filter_dev) { 2545 NL_SET_ERR_MSG_MOD(extack, 2546 "Can't match on the ingress filter port"); 2547 return -EOPNOTSUPP; 2548 } 2549 2550 return 0; 2551 } 2552 2553 static bool skip_key_basic(struct net_device *filter_dev, 2554 struct flow_cls_offload *f) 2555 { 2556 /* When doing mpls over udp decap, the user needs to provide 2557 * MPLS_UC as the protocol in order to be able to match on mpls 2558 * label fields. However, the actual ethertype is IP so we want to 2559 * avoid matching on this, otherwise we'll fail the match. 2560 */ 2561 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0) 2562 return true; 2563 2564 return false; 2565 } 2566 2567 static int __parse_cls_flower(struct mlx5e_priv *priv, 2568 struct mlx5e_tc_flow *flow, 2569 struct mlx5_flow_spec *spec, 2570 struct flow_cls_offload *f, 2571 struct net_device *filter_dev, 2572 u8 *inner_match_level, u8 *outer_match_level) 2573 { 2574 struct netlink_ext_ack *extack = f->common.extack; 2575 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2576 outer_headers); 2577 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2578 outer_headers); 2579 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2580 misc_parameters); 2581 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2582 misc_parameters); 2583 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2584 misc_parameters_3); 2585 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2586 misc_parameters_3); 2587 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2588 struct flow_dissector *dissector = rule->match.dissector; 2589 enum fs_flow_table_type fs_type; 2590 bool match_inner_ecn = true; 2591 u16 addr_type = 0; 2592 u8 ip_proto = 0; 2593 u8 *match_level; 2594 int err; 2595 2596 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX; 2597 match_level = outer_match_level; 2598 2599 if (dissector->used_keys & 2600 ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) | 2601 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 2602 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 2603 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2604 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 2605 BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | 2606 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2607 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2608 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 2609 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | 2610 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 2611 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 2612 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | 2613 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | 2614 BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | 2615 BIT_ULL(FLOW_DISSECTOR_KEY_IP) | 2616 BIT_ULL(FLOW_DISSECTOR_KEY_CT) | 2617 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | 2618 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | 2619 BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | 2620 BIT_ULL(FLOW_DISSECTOR_KEY_MPLS))) { 2621 NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); 2622 netdev_dbg(priv->netdev, "Unsupported key used: 0x%llx\n", 2623 dissector->used_keys); 2624 return -EOPNOTSUPP; 2625 } 2626 2627 if (mlx5e_get_tc_tun(filter_dev)) { 2628 bool match_inner = false; 2629 2630 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev, 2631 outer_match_level, &match_inner); 2632 if (err) 2633 return err; 2634 2635 if (match_inner) { 2636 /* header pointers should point to the inner headers 2637 * if the packet was decapsulated already. 2638 * outer headers are set by parse_tunnel_attr. 2639 */ 2640 match_level = inner_match_level; 2641 headers_c = get_match_inner_headers_criteria(spec); 2642 headers_v = get_match_inner_headers_value(spec); 2643 } 2644 2645 err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn); 2646 if (err) 2647 return err; 2648 } 2649 2650 err = mlx5e_flower_parse_meta(filter_dev, f); 2651 if (err) 2652 return err; 2653 2654 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) && 2655 !skip_key_basic(filter_dev, f)) { 2656 struct flow_match_basic match; 2657 2658 flow_rule_match_basic(rule, &match); 2659 mlx5e_tc_set_ethertype(priv->mdev, &match, 2660 match_level == outer_match_level, 2661 headers_c, headers_v); 2662 2663 if (match.mask->n_proto) 2664 *match_level = MLX5_MATCH_L2; 2665 } 2666 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) || 2667 is_vlan_dev(filter_dev)) { 2668 struct flow_dissector_key_vlan filter_dev_mask; 2669 struct flow_dissector_key_vlan filter_dev_key; 2670 struct flow_match_vlan match; 2671 2672 if (is_vlan_dev(filter_dev)) { 2673 match.key = &filter_dev_key; 2674 match.key->vlan_id = vlan_dev_vlan_id(filter_dev); 2675 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev); 2676 match.key->vlan_priority = 0; 2677 match.mask = &filter_dev_mask; 2678 memset(match.mask, 0xff, sizeof(*match.mask)); 2679 match.mask->vlan_priority = 0; 2680 } else { 2681 flow_rule_match_vlan(rule, &match); 2682 } 2683 if (match.mask->vlan_id || 2684 match.mask->vlan_priority || 2685 match.mask->vlan_tpid) { 2686 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { 2687 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2688 svlan_tag, 1); 2689 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2690 svlan_tag, 1); 2691 } else { 2692 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2693 cvlan_tag, 1); 2694 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2695 cvlan_tag, 1); 2696 } 2697 2698 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, 2699 match.mask->vlan_id); 2700 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, 2701 match.key->vlan_id); 2702 2703 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, 2704 match.mask->vlan_priority); 2705 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, 2706 match.key->vlan_priority); 2707 2708 *match_level = MLX5_MATCH_L2; 2709 2710 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) && 2711 match.mask->vlan_eth_type && 2712 MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, 2713 ft_field_support.outer_second_vid, 2714 fs_type)) { 2715 MLX5_SET(fte_match_set_misc, misc_c, 2716 outer_second_cvlan_tag, 1); 2717 spec->match_criteria_enable |= 2718 MLX5_MATCH_MISC_PARAMETERS; 2719 } 2720 } 2721 } else if (*match_level != MLX5_MATCH_NONE) { 2722 /* cvlan_tag enabled in match criteria and 2723 * disabled in match value means both S & C tags 2724 * don't exist (untagged of both) 2725 */ 2726 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); 2727 *match_level = MLX5_MATCH_L2; 2728 } 2729 2730 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 2731 struct flow_match_vlan match; 2732 2733 flow_rule_match_cvlan(rule, &match); 2734 if (match.mask->vlan_id || 2735 match.mask->vlan_priority || 2736 match.mask->vlan_tpid) { 2737 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid, 2738 fs_type)) { 2739 NL_SET_ERR_MSG_MOD(extack, 2740 "Matching on CVLAN is not supported"); 2741 return -EOPNOTSUPP; 2742 } 2743 2744 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { 2745 MLX5_SET(fte_match_set_misc, misc_c, 2746 outer_second_svlan_tag, 1); 2747 MLX5_SET(fte_match_set_misc, misc_v, 2748 outer_second_svlan_tag, 1); 2749 } else { 2750 MLX5_SET(fte_match_set_misc, misc_c, 2751 outer_second_cvlan_tag, 1); 2752 MLX5_SET(fte_match_set_misc, misc_v, 2753 outer_second_cvlan_tag, 1); 2754 } 2755 2756 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid, 2757 match.mask->vlan_id); 2758 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid, 2759 match.key->vlan_id); 2760 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio, 2761 match.mask->vlan_priority); 2762 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio, 2763 match.key->vlan_priority); 2764 2765 *match_level = MLX5_MATCH_L2; 2766 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 2767 } 2768 } 2769 2770 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2771 struct flow_match_eth_addrs match; 2772 2773 flow_rule_match_eth_addrs(rule, &match); 2774 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2775 dmac_47_16), 2776 match.mask->dst); 2777 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2778 dmac_47_16), 2779 match.key->dst); 2780 2781 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2782 smac_47_16), 2783 match.mask->src); 2784 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2785 smac_47_16), 2786 match.key->src); 2787 2788 if (!is_zero_ether_addr(match.mask->src) || 2789 !is_zero_ether_addr(match.mask->dst)) 2790 *match_level = MLX5_MATCH_L2; 2791 } 2792 2793 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2794 struct flow_match_control match; 2795 2796 flow_rule_match_control(rule, &match); 2797 addr_type = match.key->addr_type; 2798 2799 /* the HW doesn't support frag first/later */ 2800 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 2801 NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported"); 2802 return -EOPNOTSUPP; 2803 } 2804 2805 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 2806 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 2807 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 2808 match.key->flags & FLOW_DIS_IS_FRAGMENT); 2809 2810 /* the HW doesn't need L3 inline to match on frag=no */ 2811 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT)) 2812 *match_level = MLX5_MATCH_L2; 2813 /* *** L2 attributes parsing up to here *** */ 2814 else 2815 *match_level = MLX5_MATCH_L3; 2816 } 2817 } 2818 2819 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2820 struct flow_match_basic match; 2821 2822 flow_rule_match_basic(rule, &match); 2823 ip_proto = match.key->ip_proto; 2824 2825 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 2826 match.mask->ip_proto); 2827 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 2828 match.key->ip_proto); 2829 2830 if (match.mask->ip_proto) 2831 *match_level = MLX5_MATCH_L3; 2832 } 2833 2834 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2835 struct flow_match_ipv4_addrs match; 2836 2837 flow_rule_match_ipv4_addrs(rule, &match); 2838 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2839 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2840 &match.mask->src, sizeof(match.mask->src)); 2841 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2842 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2843 &match.key->src, sizeof(match.key->src)); 2844 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2845 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2846 &match.mask->dst, sizeof(match.mask->dst)); 2847 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2848 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2849 &match.key->dst, sizeof(match.key->dst)); 2850 2851 if (match.mask->src || match.mask->dst) 2852 *match_level = MLX5_MATCH_L3; 2853 } 2854 2855 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2856 struct flow_match_ipv6_addrs match; 2857 2858 flow_rule_match_ipv6_addrs(rule, &match); 2859 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2860 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2861 &match.mask->src, sizeof(match.mask->src)); 2862 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2863 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2864 &match.key->src, sizeof(match.key->src)); 2865 2866 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2867 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2868 &match.mask->dst, sizeof(match.mask->dst)); 2869 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2870 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2871 &match.key->dst, sizeof(match.key->dst)); 2872 2873 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY || 2874 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY) 2875 *match_level = MLX5_MATCH_L3; 2876 } 2877 2878 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 2879 struct flow_match_ip match; 2880 2881 flow_rule_match_ip(rule, &match); 2882 if (match_inner_ecn) { 2883 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, 2884 match.mask->tos & 0x3); 2885 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, 2886 match.key->tos & 0x3); 2887 } 2888 2889 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, 2890 match.mask->tos >> 2); 2891 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, 2892 match.key->tos >> 2); 2893 2894 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, 2895 match.mask->ttl); 2896 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, 2897 match.key->ttl); 2898 2899 if (match.mask->ttl && 2900 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, 2901 ft_field_support.outer_ipv4_ttl)) { 2902 NL_SET_ERR_MSG_MOD(extack, 2903 "Matching on TTL is not supported"); 2904 return -EOPNOTSUPP; 2905 } 2906 2907 if (match.mask->tos || match.mask->ttl) 2908 *match_level = MLX5_MATCH_L3; 2909 } 2910 2911 /* *** L3 attributes parsing up to here *** */ 2912 2913 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 2914 struct flow_match_ports match; 2915 2916 flow_rule_match_ports(rule, &match); 2917 switch (ip_proto) { 2918 case IPPROTO_TCP: 2919 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2920 tcp_sport, ntohs(match.mask->src)); 2921 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2922 tcp_sport, ntohs(match.key->src)); 2923 2924 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2925 tcp_dport, ntohs(match.mask->dst)); 2926 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2927 tcp_dport, ntohs(match.key->dst)); 2928 break; 2929 2930 case IPPROTO_UDP: 2931 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2932 udp_sport, ntohs(match.mask->src)); 2933 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2934 udp_sport, ntohs(match.key->src)); 2935 2936 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2937 udp_dport, ntohs(match.mask->dst)); 2938 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2939 udp_dport, ntohs(match.key->dst)); 2940 break; 2941 default: 2942 NL_SET_ERR_MSG_MOD(extack, 2943 "Only UDP and TCP transports are supported for L4 matching"); 2944 netdev_err(priv->netdev, 2945 "Only UDP and TCP transport are supported\n"); 2946 return -EINVAL; 2947 } 2948 2949 if (match.mask->src || match.mask->dst) 2950 *match_level = MLX5_MATCH_L4; 2951 } 2952 2953 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 2954 struct flow_match_tcp match; 2955 2956 flow_rule_match_tcp(rule, &match); 2957 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags, 2958 ntohs(match.mask->flags)); 2959 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, 2960 ntohs(match.key->flags)); 2961 2962 if (match.mask->flags) 2963 *match_level = MLX5_MATCH_L4; 2964 } 2965 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { 2966 struct flow_match_icmp match; 2967 2968 flow_rule_match_icmp(rule, &match); 2969 switch (ip_proto) { 2970 case IPPROTO_ICMP: 2971 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & 2972 MLX5_FLEX_PROTO_ICMP)) { 2973 NL_SET_ERR_MSG_MOD(extack, 2974 "Match on Flex protocols for ICMP is not supported"); 2975 return -EOPNOTSUPP; 2976 } 2977 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type, 2978 match.mask->type); 2979 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type, 2980 match.key->type); 2981 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code, 2982 match.mask->code); 2983 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code, 2984 match.key->code); 2985 break; 2986 case IPPROTO_ICMPV6: 2987 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & 2988 MLX5_FLEX_PROTO_ICMPV6)) { 2989 NL_SET_ERR_MSG_MOD(extack, 2990 "Match on Flex protocols for ICMPV6 is not supported"); 2991 return -EOPNOTSUPP; 2992 } 2993 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type, 2994 match.mask->type); 2995 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type, 2996 match.key->type); 2997 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code, 2998 match.mask->code); 2999 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code, 3000 match.key->code); 3001 break; 3002 default: 3003 NL_SET_ERR_MSG_MOD(extack, 3004 "Code and type matching only with ICMP and ICMPv6"); 3005 netdev_err(priv->netdev, 3006 "Code and type matching only with ICMP and ICMPv6\n"); 3007 return -EINVAL; 3008 } 3009 if (match.mask->code || match.mask->type) { 3010 *match_level = MLX5_MATCH_L4; 3011 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3; 3012 } 3013 } 3014 /* Currently supported only for MPLS over UDP */ 3015 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && 3016 !netif_is_bareudp(filter_dev)) { 3017 NL_SET_ERR_MSG_MOD(extack, 3018 "Matching on MPLS is supported only for MPLS over UDP"); 3019 netdev_err(priv->netdev, 3020 "Matching on MPLS is supported only for MPLS over UDP\n"); 3021 return -EOPNOTSUPP; 3022 } 3023 3024 return 0; 3025 } 3026 3027 static int parse_cls_flower(struct mlx5e_priv *priv, 3028 struct mlx5e_tc_flow *flow, 3029 struct mlx5_flow_spec *spec, 3030 struct flow_cls_offload *f, 3031 struct net_device *filter_dev) 3032 { 3033 u8 inner_match_level, outer_match_level, non_tunnel_match_level; 3034 struct netlink_ext_ack *extack = f->common.extack; 3035 struct mlx5_core_dev *dev = priv->mdev; 3036 struct mlx5_eswitch *esw = dev->priv.eswitch; 3037 struct mlx5e_rep_priv *rpriv = priv->ppriv; 3038 struct mlx5_eswitch_rep *rep; 3039 bool is_eswitch_flow; 3040 int err; 3041 3042 inner_match_level = MLX5_MATCH_NONE; 3043 outer_match_level = MLX5_MATCH_NONE; 3044 3045 err = __parse_cls_flower(priv, flow, spec, f, filter_dev, 3046 &inner_match_level, &outer_match_level); 3047 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ? 3048 outer_match_level : inner_match_level; 3049 3050 is_eswitch_flow = mlx5e_is_eswitch_flow(flow); 3051 if (!err && is_eswitch_flow) { 3052 rep = rpriv->rep; 3053 if (rep->vport != MLX5_VPORT_UPLINK && 3054 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 3055 esw->offloads.inline_mode < non_tunnel_match_level)) { 3056 NL_SET_ERR_MSG_MOD(extack, 3057 "Flow is not offloaded due to min inline setting"); 3058 netdev_warn(priv->netdev, 3059 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 3060 non_tunnel_match_level, esw->offloads.inline_mode); 3061 return -EOPNOTSUPP; 3062 } 3063 } 3064 3065 flow->attr->inner_match_level = inner_match_level; 3066 flow->attr->outer_match_level = outer_match_level; 3067 3068 3069 return err; 3070 } 3071 3072 struct mlx5_fields { 3073 u8 field; 3074 u8 field_bsize; 3075 u32 field_mask; 3076 u32 offset; 3077 u32 match_offset; 3078 }; 3079 3080 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \ 3081 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \ 3082 offsetof(struct pedit_headers, field) + (off), \ 3083 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)} 3084 3085 /* masked values are the same and there are no rewrites that do not have a 3086 * match. 3087 */ 3088 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \ 3089 type matchmaskx = *(type *)(matchmaskp); \ 3090 type matchvalx = *(type *)(matchvalp); \ 3091 type maskx = *(type *)(maskp); \ 3092 type valx = *(type *)(valp); \ 3093 \ 3094 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \ 3095 matchmaskx)); \ 3096 }) 3097 3098 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp, 3099 void *matchmaskp, u8 bsize) 3100 { 3101 bool same = false; 3102 3103 switch (bsize) { 3104 case 8: 3105 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp); 3106 break; 3107 case 16: 3108 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp); 3109 break; 3110 case 32: 3111 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp); 3112 break; 3113 } 3114 3115 return same; 3116 } 3117 3118 static struct mlx5_fields fields[] = { 3119 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16), 3120 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0), 3121 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16), 3122 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0), 3123 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype), 3124 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid), 3125 3126 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp), 3127 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit), 3128 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4), 3129 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 3130 3131 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0, 3132 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]), 3133 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0, 3134 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]), 3135 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0, 3136 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]), 3137 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0, 3138 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]), 3139 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0, 3140 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]), 3141 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0, 3142 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]), 3143 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0, 3144 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]), 3145 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0, 3146 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]), 3147 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit), 3148 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp), 3149 3150 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport), 3151 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport), 3152 /* in linux iphdr tcp_flags is 8 bits long */ 3153 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags), 3154 3155 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport), 3156 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport), 3157 }; 3158 3159 static unsigned long mask_to_le(unsigned long mask, int size) 3160 { 3161 __be32 mask_be32; 3162 __be16 mask_be16; 3163 3164 if (size == 32) { 3165 mask_be32 = (__force __be32)(mask); 3166 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); 3167 } else if (size == 16) { 3168 mask_be32 = (__force __be32)(mask); 3169 mask_be16 = *(__be16 *)&mask_be32; 3170 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); 3171 } 3172 3173 return mask; 3174 } 3175 3176 static int offload_pedit_fields(struct mlx5e_priv *priv, 3177 int namespace, 3178 struct mlx5e_tc_flow_parse_attr *parse_attr, 3179 u32 *action_flags, 3180 struct netlink_ext_ack *extack) 3181 { 3182 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; 3183 struct pedit_headers_action *hdrs = parse_attr->hdrs; 3184 void *headers_c, *headers_v, *action, *vals_p; 3185 u32 *s_masks_p, *a_masks_p, s_mask, a_mask; 3186 struct mlx5e_tc_mod_hdr_acts *mod_acts; 3187 unsigned long mask, field_mask; 3188 int i, first, last, next_z; 3189 struct mlx5_fields *f; 3190 u8 cmd; 3191 3192 mod_acts = &parse_attr->mod_hdr_acts; 3193 headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec); 3194 headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec); 3195 3196 set_masks = &hdrs[0].masks; 3197 add_masks = &hdrs[1].masks; 3198 set_vals = &hdrs[0].vals; 3199 add_vals = &hdrs[1].vals; 3200 3201 for (i = 0; i < ARRAY_SIZE(fields); i++) { 3202 bool skip; 3203 3204 f = &fields[i]; 3205 /* avoid seeing bits set from previous iterations */ 3206 s_mask = 0; 3207 a_mask = 0; 3208 3209 s_masks_p = (void *)set_masks + f->offset; 3210 a_masks_p = (void *)add_masks + f->offset; 3211 3212 s_mask = *s_masks_p & f->field_mask; 3213 a_mask = *a_masks_p & f->field_mask; 3214 3215 if (!s_mask && !a_mask) /* nothing to offload here */ 3216 continue; 3217 3218 if (s_mask && a_mask) { 3219 NL_SET_ERR_MSG_MOD(extack, 3220 "can't set and add to the same HW field"); 3221 netdev_warn(priv->netdev, 3222 "mlx5: can't set and add to the same HW field (%x)\n", 3223 f->field); 3224 return -EOPNOTSUPP; 3225 } 3226 3227 skip = false; 3228 if (s_mask) { 3229 void *match_mask = headers_c + f->match_offset; 3230 void *match_val = headers_v + f->match_offset; 3231 3232 cmd = MLX5_ACTION_TYPE_SET; 3233 mask = s_mask; 3234 vals_p = (void *)set_vals + f->offset; 3235 /* don't rewrite if we have a match on the same value */ 3236 if (cmp_val_mask(vals_p, s_masks_p, match_val, 3237 match_mask, f->field_bsize)) 3238 skip = true; 3239 /* clear to denote we consumed this field */ 3240 *s_masks_p &= ~f->field_mask; 3241 } else { 3242 cmd = MLX5_ACTION_TYPE_ADD; 3243 mask = a_mask; 3244 vals_p = (void *)add_vals + f->offset; 3245 /* add 0 is no change */ 3246 if ((*(u32 *)vals_p & f->field_mask) == 0) 3247 skip = true; 3248 /* clear to denote we consumed this field */ 3249 *a_masks_p &= ~f->field_mask; 3250 } 3251 if (skip) 3252 continue; 3253 3254 mask = mask_to_le(mask, f->field_bsize); 3255 3256 first = find_first_bit(&mask, f->field_bsize); 3257 next_z = find_next_zero_bit(&mask, f->field_bsize, first); 3258 last = find_last_bit(&mask, f->field_bsize); 3259 if (first < next_z && next_z < last) { 3260 NL_SET_ERR_MSG_MOD(extack, 3261 "rewrite of few sub-fields isn't supported"); 3262 netdev_warn(priv->netdev, 3263 "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", 3264 mask); 3265 return -EOPNOTSUPP; 3266 } 3267 3268 action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts); 3269 if (IS_ERR(action)) { 3270 NL_SET_ERR_MSG_MOD(extack, 3271 "too many pedit actions, can't offload"); 3272 mlx5_core_warn(priv->mdev, 3273 "mlx5: parsed %d pedit actions, can't do more\n", 3274 mod_acts->num_actions); 3275 return PTR_ERR(action); 3276 } 3277 3278 MLX5_SET(set_action_in, action, action_type, cmd); 3279 MLX5_SET(set_action_in, action, field, f->field); 3280 3281 if (cmd == MLX5_ACTION_TYPE_SET) { 3282 int start; 3283 3284 field_mask = mask_to_le(f->field_mask, f->field_bsize); 3285 3286 /* if field is bit sized it can start not from first bit */ 3287 start = find_first_bit(&field_mask, f->field_bsize); 3288 3289 MLX5_SET(set_action_in, action, offset, first - start); 3290 /* length is num of bits to be written, zero means length of 32 */ 3291 MLX5_SET(set_action_in, action, length, (last - first + 1)); 3292 } 3293 3294 if (f->field_bsize == 32) 3295 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first); 3296 else if (f->field_bsize == 16) 3297 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first); 3298 else if (f->field_bsize == 8) 3299 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first); 3300 3301 ++mod_acts->num_actions; 3302 } 3303 3304 return 0; 3305 } 3306 3307 static const struct pedit_headers zero_masks = {}; 3308 3309 static int verify_offload_pedit_fields(struct mlx5e_priv *priv, 3310 struct mlx5e_tc_flow_parse_attr *parse_attr, 3311 struct netlink_ext_ack *extack) 3312 { 3313 struct pedit_headers *cmd_masks; 3314 u8 cmd; 3315 3316 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { 3317 cmd_masks = &parse_attr->hdrs[cmd].masks; 3318 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { 3319 NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field"); 3320 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd); 3321 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS, 3322 16, 1, cmd_masks, sizeof(zero_masks), true); 3323 return -EOPNOTSUPP; 3324 } 3325 } 3326 3327 return 0; 3328 } 3329 3330 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, 3331 struct mlx5e_tc_flow_parse_attr *parse_attr, 3332 u32 *action_flags, 3333 struct netlink_ext_ack *extack) 3334 { 3335 int err; 3336 3337 err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack); 3338 if (err) 3339 goto out_dealloc_parsed_actions; 3340 3341 err = verify_offload_pedit_fields(priv, parse_attr, extack); 3342 if (err) 3343 goto out_dealloc_parsed_actions; 3344 3345 return 0; 3346 3347 out_dealloc_parsed_actions: 3348 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 3349 return err; 3350 } 3351 3352 struct ip_ttl_word { 3353 __u8 ttl; 3354 __u8 protocol; 3355 __sum16 check; 3356 }; 3357 3358 struct ipv6_hoplimit_word { 3359 __be16 payload_len; 3360 __u8 nexthdr; 3361 __u8 hop_limit; 3362 }; 3363 3364 static bool 3365 is_flow_action_modify_ip_header(struct flow_action *flow_action) 3366 { 3367 const struct flow_action_entry *act; 3368 u32 mask, offset; 3369 u8 htype; 3370 int i; 3371 3372 /* For IPv4 & IPv6 header check 4 byte word, 3373 * to determine that modified fields 3374 * are NOT ttl & hop_limit only. 3375 */ 3376 flow_action_for_each(i, act, flow_action) { 3377 if (act->id != FLOW_ACTION_MANGLE && 3378 act->id != FLOW_ACTION_ADD) 3379 continue; 3380 3381 htype = act->mangle.htype; 3382 offset = act->mangle.offset; 3383 mask = ~act->mangle.mask; 3384 3385 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) { 3386 struct ip_ttl_word *ttl_word = 3387 (struct ip_ttl_word *)&mask; 3388 3389 if (offset != offsetof(struct iphdr, ttl) || 3390 ttl_word->protocol || 3391 ttl_word->check) 3392 return true; 3393 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { 3394 struct ipv6_hoplimit_word *hoplimit_word = 3395 (struct ipv6_hoplimit_word *)&mask; 3396 3397 if (offset != offsetof(struct ipv6hdr, payload_len) || 3398 hoplimit_word->payload_len || 3399 hoplimit_word->nexthdr) 3400 return true; 3401 } 3402 } 3403 3404 return false; 3405 } 3406 3407 static bool modify_header_match_supported(struct mlx5e_priv *priv, 3408 struct mlx5_flow_spec *spec, 3409 struct flow_action *flow_action, 3410 u32 actions, 3411 struct netlink_ext_ack *extack) 3412 { 3413 bool modify_ip_header; 3414 void *headers_c; 3415 void *headers_v; 3416 u16 ethertype; 3417 u8 ip_proto; 3418 3419 headers_c = mlx5e_get_match_headers_criteria(actions, spec); 3420 headers_v = mlx5e_get_match_headers_value(actions, spec); 3421 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 3422 3423 /* for non-IP we only re-write MACs, so we're okay */ 3424 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 && 3425 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6) 3426 goto out_ok; 3427 3428 modify_ip_header = is_flow_action_modify_ip_header(flow_action); 3429 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); 3430 if (modify_ip_header && ip_proto != IPPROTO_TCP && 3431 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { 3432 NL_SET_ERR_MSG_MOD(extack, 3433 "can't offload re-write of non TCP/UDP"); 3434 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n", 3435 ip_proto); 3436 return false; 3437 } 3438 3439 out_ok: 3440 return true; 3441 } 3442 3443 static bool 3444 actions_match_supported_fdb(struct mlx5e_priv *priv, 3445 struct mlx5e_tc_flow *flow, 3446 struct netlink_ext_ack *extack) 3447 { 3448 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; 3449 3450 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { 3451 NL_SET_ERR_MSG_MOD(extack, 3452 "current firmware doesn't support split rule for port mirroring"); 3453 netdev_warn_once(priv->netdev, 3454 "current firmware doesn't support split rule for port mirroring\n"); 3455 return false; 3456 } 3457 3458 return true; 3459 } 3460 3461 static bool 3462 actions_match_supported(struct mlx5e_priv *priv, 3463 struct flow_action *flow_action, 3464 u32 actions, 3465 struct mlx5e_tc_flow_parse_attr *parse_attr, 3466 struct mlx5e_tc_flow *flow, 3467 struct netlink_ext_ack *extack) 3468 { 3469 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 3470 !modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions, 3471 extack)) 3472 return false; 3473 3474 if (mlx5e_is_eswitch_flow(flow) && 3475 !actions_match_supported_fdb(priv, flow, extack)) 3476 return false; 3477 3478 return true; 3479 } 3480 3481 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 3482 { 3483 return priv->mdev == peer_priv->mdev; 3484 } 3485 3486 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 3487 { 3488 struct mlx5_core_dev *fmdev, *pmdev; 3489 u64 fsystem_guid, psystem_guid; 3490 3491 fmdev = priv->mdev; 3492 pmdev = peer_priv->mdev; 3493 3494 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev); 3495 psystem_guid = mlx5_query_nic_system_image_guid(pmdev); 3496 3497 return (fsystem_guid == psystem_guid); 3498 } 3499 3500 static int 3501 actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, 3502 struct mlx5e_tc_flow *flow, 3503 struct mlx5_flow_attr *attr, 3504 struct netlink_ext_ack *extack) 3505 { 3506 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; 3507 struct pedit_headers_action *hdrs = parse_attr->hdrs; 3508 enum mlx5_flow_namespace_type ns_type; 3509 int err; 3510 3511 if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits && 3512 !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) 3513 return 0; 3514 3515 ns_type = mlx5e_get_flow_namespace(flow); 3516 3517 err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack); 3518 if (err) 3519 return err; 3520 3521 if (parse_attr->mod_hdr_acts.num_actions > 0) 3522 return 0; 3523 3524 /* In case all pedit actions are skipped, remove the MOD_HDR flag. */ 3525 attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 3526 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 3527 3528 if (ns_type != MLX5_FLOW_NAMESPACE_FDB) 3529 return 0; 3530 3531 if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || 3532 (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) 3533 attr->esw_attr->split_count = 0; 3534 3535 return 0; 3536 } 3537 3538 static struct mlx5_flow_attr* 3539 mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr, 3540 enum mlx5_flow_namespace_type ns_type) 3541 { 3542 struct mlx5e_tc_flow_parse_attr *parse_attr; 3543 u32 attr_sz = ns_to_attr_sz(ns_type); 3544 struct mlx5_flow_attr *attr2; 3545 3546 attr2 = mlx5_alloc_flow_attr(ns_type); 3547 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); 3548 if (!attr2 || !parse_attr) { 3549 kvfree(parse_attr); 3550 kfree(attr2); 3551 return NULL; 3552 } 3553 3554 memcpy(attr2, attr, attr_sz); 3555 INIT_LIST_HEAD(&attr2->list); 3556 parse_attr->filter_dev = attr->parse_attr->filter_dev; 3557 attr2->action = 0; 3558 attr2->counter = NULL; 3559 attr2->tc_act_cookies_count = 0; 3560 attr2->flags = 0; 3561 attr2->parse_attr = parse_attr; 3562 attr2->dest_chain = 0; 3563 attr2->dest_ft = NULL; 3564 attr2->act_id_restore_rule = NULL; 3565 memset(&attr2->ct_attr, 0, sizeof(attr2->ct_attr)); 3566 3567 if (ns_type == MLX5_FLOW_NAMESPACE_FDB) { 3568 attr2->esw_attr->out_count = 0; 3569 attr2->esw_attr->split_count = 0; 3570 } 3571 3572 attr2->branch_true = NULL; 3573 attr2->branch_false = NULL; 3574 attr2->jumping_attr = NULL; 3575 return attr2; 3576 } 3577 3578 struct mlx5_flow_attr * 3579 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow) 3580 { 3581 struct mlx5_esw_flow_attr *esw_attr; 3582 struct mlx5_flow_attr *attr; 3583 int i; 3584 3585 list_for_each_entry(attr, &flow->attrs, list) { 3586 esw_attr = attr->esw_attr; 3587 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { 3588 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) 3589 return attr; 3590 } 3591 } 3592 3593 return NULL; 3594 } 3595 3596 void 3597 mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow) 3598 { 3599 struct mlx5e_post_act *post_act = get_post_action(flow->priv); 3600 struct mlx5_flow_attr *attr; 3601 3602 list_for_each_entry(attr, &flow->attrs, list) { 3603 if (list_is_last(&attr->list, &flow->attrs)) 3604 break; 3605 3606 mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle); 3607 } 3608 } 3609 3610 static void 3611 free_flow_post_acts(struct mlx5e_tc_flow *flow) 3612 { 3613 struct mlx5_flow_attr *attr, *tmp; 3614 3615 list_for_each_entry_safe(attr, tmp, &flow->attrs, list) { 3616 if (list_is_last(&attr->list, &flow->attrs)) 3617 break; 3618 3619 mlx5_free_flow_attr_actions(flow, attr); 3620 3621 list_del(&attr->list); 3622 kvfree(attr->parse_attr); 3623 kfree(attr); 3624 } 3625 } 3626 3627 int 3628 mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow) 3629 { 3630 struct mlx5e_post_act *post_act = get_post_action(flow->priv); 3631 struct mlx5_flow_attr *attr; 3632 int err = 0; 3633 3634 list_for_each_entry(attr, &flow->attrs, list) { 3635 if (list_is_last(&attr->list, &flow->attrs)) 3636 break; 3637 3638 err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle); 3639 if (err) 3640 break; 3641 } 3642 3643 return err; 3644 } 3645 3646 /* TC filter rule HW translation: 3647 * 3648 * +---------------------+ 3649 * + ft prio (tc chain) + 3650 * + original match + 3651 * +---------------------+ 3652 * | 3653 * | if multi table action 3654 * | 3655 * v 3656 * +---------------------+ 3657 * + post act ft |<----. 3658 * + match fte id | | split on multi table action 3659 * + do actions |-----' 3660 * +---------------------+ 3661 * | 3662 * | 3663 * v 3664 * Do rest of the actions after last multi table action. 3665 */ 3666 static int 3667 alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) 3668 { 3669 struct mlx5e_post_act *post_act = get_post_action(flow->priv); 3670 struct mlx5_flow_attr *attr, *next_attr = NULL; 3671 struct mlx5e_post_act_handle *handle; 3672 int err; 3673 3674 /* This is going in reverse order as needed. 3675 * The first entry is the last attribute. 3676 */ 3677 list_for_each_entry(attr, &flow->attrs, list) { 3678 if (!next_attr) { 3679 /* Set counter action on last post act rule. */ 3680 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3681 } 3682 3683 if (next_attr && !(attr->flags & MLX5_ATTR_FLAG_TERMINATING)) { 3684 err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr); 3685 if (err) 3686 goto out_free; 3687 } 3688 3689 /* Don't add post_act rule for first attr (last in the list). 3690 * It's being handled by the caller. 3691 */ 3692 if (list_is_last(&attr->list, &flow->attrs)) 3693 break; 3694 3695 err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack); 3696 if (err) 3697 goto out_free; 3698 3699 err = post_process_attr(flow, attr, extack); 3700 if (err) 3701 goto out_free; 3702 3703 handle = mlx5e_tc_post_act_add(post_act, attr); 3704 if (IS_ERR(handle)) { 3705 err = PTR_ERR(handle); 3706 goto out_free; 3707 } 3708 3709 attr->post_act_handle = handle; 3710 3711 if (attr->jumping_attr) { 3712 err = mlx5e_tc_act_set_next_post_act(flow, attr->jumping_attr, attr); 3713 if (err) 3714 goto out_free; 3715 } 3716 3717 next_attr = attr; 3718 } 3719 3720 if (flow_flag_test(flow, SLOW)) 3721 goto out; 3722 3723 err = mlx5e_tc_offload_flow_post_acts(flow); 3724 if (err) 3725 goto out_free; 3726 3727 out: 3728 return 0; 3729 3730 out_free: 3731 free_flow_post_acts(flow); 3732 return err; 3733 } 3734 3735 static int 3736 alloc_branch_attr(struct mlx5e_tc_flow *flow, 3737 struct mlx5e_tc_act_branch_ctrl *cond, 3738 struct mlx5_flow_attr **cond_attr, 3739 u32 *jump_count, 3740 struct netlink_ext_ack *extack) 3741 { 3742 struct mlx5_flow_attr *attr; 3743 int err = 0; 3744 3745 *cond_attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, 3746 mlx5e_get_flow_namespace(flow)); 3747 if (!(*cond_attr)) 3748 return -ENOMEM; 3749 3750 attr = *cond_attr; 3751 3752 switch (cond->act_id) { 3753 case FLOW_ACTION_DROP: 3754 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 3755 break; 3756 case FLOW_ACTION_ACCEPT: 3757 case FLOW_ACTION_PIPE: 3758 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 3759 attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv)); 3760 break; 3761 case FLOW_ACTION_JUMP: 3762 if (*jump_count) { 3763 NL_SET_ERR_MSG_MOD(extack, "Cannot offload flows with nested jumps"); 3764 err = -EOPNOTSUPP; 3765 goto out_err; 3766 } 3767 *jump_count = cond->extval; 3768 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 3769 attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv)); 3770 break; 3771 default: 3772 err = -EOPNOTSUPP; 3773 goto out_err; 3774 } 3775 3776 return err; 3777 out_err: 3778 kfree(*cond_attr); 3779 *cond_attr = NULL; 3780 return err; 3781 } 3782 3783 static void 3784 dec_jump_count(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act, 3785 struct mlx5_flow_attr *attr, struct mlx5e_priv *priv, 3786 struct mlx5e_tc_jump_state *jump_state) 3787 { 3788 if (!jump_state->jump_count) 3789 return; 3790 3791 /* Single tc action can instantiate multiple offload actions (e.g. pedit) 3792 * Jump only over a tc action 3793 */ 3794 if (act->id == jump_state->last_id && act->hw_index == jump_state->last_index) 3795 return; 3796 3797 jump_state->last_id = act->id; 3798 jump_state->last_index = act->hw_index; 3799 3800 /* nothing to do for intermediate actions */ 3801 if (--jump_state->jump_count > 1) 3802 return; 3803 3804 if (jump_state->jump_count == 1) { /* last action in the jump action list */ 3805 3806 /* create a new attribute after this action */ 3807 jump_state->jump_target = true; 3808 3809 if (tc_act->is_terminating_action) { /* the branch ends here */ 3810 attr->flags |= MLX5_ATTR_FLAG_TERMINATING; 3811 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3812 } else { /* the branch continues executing the rest of the actions */ 3813 struct mlx5e_post_act *post_act; 3814 3815 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 3816 post_act = get_post_action(priv); 3817 attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act); 3818 } 3819 } else if (jump_state->jump_count == 0) { /* first attr after the jump action list */ 3820 /* This is the post action for the jumping attribute (either red or green) 3821 * Use the stored jumping_attr to set the post act id on the jumping attribute 3822 */ 3823 attr->jumping_attr = jump_state->jumping_attr; 3824 } 3825 } 3826 3827 static int 3828 parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act, 3829 struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr, 3830 struct mlx5e_tc_jump_state *jump_state, 3831 struct netlink_ext_ack *extack) 3832 { 3833 struct mlx5e_tc_act_branch_ctrl cond_true, cond_false; 3834 u32 jump_count = jump_state->jump_count; 3835 int err; 3836 3837 if (!tc_act->get_branch_ctrl) 3838 return 0; 3839 3840 tc_act->get_branch_ctrl(act, &cond_true, &cond_false); 3841 3842 err = alloc_branch_attr(flow, &cond_true, 3843 &attr->branch_true, &jump_count, extack); 3844 if (err) 3845 goto out_err; 3846 3847 if (jump_count) 3848 jump_state->jumping_attr = attr->branch_true; 3849 3850 err = alloc_branch_attr(flow, &cond_false, 3851 &attr->branch_false, &jump_count, extack); 3852 if (err) 3853 goto err_branch_false; 3854 3855 if (jump_count && !jump_state->jumping_attr) 3856 jump_state->jumping_attr = attr->branch_false; 3857 3858 jump_state->jump_count = jump_count; 3859 3860 /* branching action requires its own counter */ 3861 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3862 flow_flag_set(flow, USE_ACT_STATS); 3863 3864 return 0; 3865 3866 err_branch_false: 3867 free_branch_attr(flow, attr->branch_true); 3868 out_err: 3869 return err; 3870 } 3871 3872 static int 3873 parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, 3874 struct flow_action *flow_action) 3875 { 3876 struct netlink_ext_ack *extack = parse_state->extack; 3877 struct mlx5e_tc_flow *flow = parse_state->flow; 3878 struct mlx5e_tc_jump_state jump_state = {}; 3879 struct mlx5_flow_attr *attr = flow->attr; 3880 enum mlx5_flow_namespace_type ns_type; 3881 struct mlx5e_priv *priv = flow->priv; 3882 struct mlx5_flow_attr *prev_attr; 3883 struct flow_action_entry *act; 3884 struct mlx5e_tc_act *tc_act; 3885 int err, i, i_split = 0; 3886 bool is_missable; 3887 3888 ns_type = mlx5e_get_flow_namespace(flow); 3889 list_add(&attr->list, &flow->attrs); 3890 3891 flow_action_for_each(i, act, flow_action) { 3892 jump_state.jump_target = false; 3893 is_missable = false; 3894 prev_attr = attr; 3895 3896 tc_act = mlx5e_tc_act_get(act->id, ns_type); 3897 if (!tc_act) { 3898 NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action"); 3899 err = -EOPNOTSUPP; 3900 goto out_free_post_acts; 3901 } 3902 3903 if (tc_act->can_offload && !tc_act->can_offload(parse_state, act, i, attr)) { 3904 err = -EOPNOTSUPP; 3905 goto out_free_post_acts; 3906 } 3907 3908 err = tc_act->parse_action(parse_state, act, priv, attr); 3909 if (err) 3910 goto out_free_post_acts; 3911 3912 dec_jump_count(act, tc_act, attr, priv, &jump_state); 3913 3914 err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack); 3915 if (err) 3916 goto out_free_post_acts; 3917 3918 parse_state->actions |= attr->action; 3919 3920 /* Split attr for multi table act if not the last act. */ 3921 if (jump_state.jump_target || 3922 (tc_act->is_multi_table_act && 3923 tc_act->is_multi_table_act(priv, act, attr) && 3924 i < flow_action->num_entries - 1)) { 3925 is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false; 3926 3927 err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, 3928 ns_type); 3929 if (err) 3930 goto out_free_post_acts; 3931 3932 attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type); 3933 if (!attr) { 3934 err = -ENOMEM; 3935 goto out_free_post_acts; 3936 } 3937 3938 i_split = i + 1; 3939 parse_state->if_count = 0; 3940 list_add(&attr->list, &flow->attrs); 3941 } 3942 3943 if (is_missable) { 3944 /* Add counter to prev, and assign act to new (next) attr */ 3945 prev_attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3946 flow_flag_set(flow, USE_ACT_STATS); 3947 3948 attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie; 3949 } else if (!tc_act->stats_action) { 3950 prev_attr->tc_act_cookies[prev_attr->tc_act_cookies_count++] = act->cookie; 3951 } 3952 } 3953 3954 err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, ns_type); 3955 if (err) 3956 goto out_free_post_acts; 3957 3958 err = alloc_flow_post_acts(flow, extack); 3959 if (err) 3960 goto out_free_post_acts; 3961 3962 return 0; 3963 3964 out_free_post_acts: 3965 free_flow_post_acts(flow); 3966 3967 return err; 3968 } 3969 3970 static int 3971 flow_action_supported(struct flow_action *flow_action, 3972 struct netlink_ext_ack *extack) 3973 { 3974 if (!flow_action_has_entries(flow_action)) { 3975 NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries"); 3976 return -EINVAL; 3977 } 3978 3979 if (!flow_action_hw_stats_check(flow_action, extack, 3980 FLOW_ACTION_HW_STATS_DELAYED_BIT)) { 3981 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); 3982 return -EOPNOTSUPP; 3983 } 3984 3985 return 0; 3986 } 3987 3988 static int 3989 parse_tc_nic_actions(struct mlx5e_priv *priv, 3990 struct flow_action *flow_action, 3991 struct mlx5e_tc_flow *flow, 3992 struct netlink_ext_ack *extack) 3993 { 3994 struct mlx5e_tc_act_parse_state *parse_state; 3995 struct mlx5e_tc_flow_parse_attr *parse_attr; 3996 struct mlx5_flow_attr *attr = flow->attr; 3997 int err; 3998 3999 err = flow_action_supported(flow_action, extack); 4000 if (err) 4001 return err; 4002 4003 attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 4004 parse_attr = attr->parse_attr; 4005 parse_state = &parse_attr->parse_state; 4006 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); 4007 parse_state->ct_priv = get_ct_priv(priv); 4008 4009 err = parse_tc_actions(parse_state, flow_action); 4010 if (err) 4011 return err; 4012 4013 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack); 4014 if (err) 4015 return err; 4016 4017 err = verify_attr_actions(attr->action, extack); 4018 if (err) 4019 return err; 4020 4021 if (!actions_match_supported(priv, flow_action, parse_state->actions, 4022 parse_attr, flow, extack)) 4023 return -EOPNOTSUPP; 4024 4025 return 0; 4026 } 4027 4028 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, 4029 struct net_device *peer_netdev) 4030 { 4031 struct mlx5e_priv *peer_priv; 4032 4033 peer_priv = netdev_priv(peer_netdev); 4034 4035 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && 4036 mlx5e_eswitch_vf_rep(priv->netdev) && 4037 mlx5e_eswitch_vf_rep(peer_netdev) && 4038 mlx5e_same_hw_devs(priv, peer_priv)); 4039 } 4040 4041 static bool same_hw_reps(struct mlx5e_priv *priv, 4042 struct net_device *peer_netdev) 4043 { 4044 struct mlx5e_priv *peer_priv; 4045 4046 peer_priv = netdev_priv(peer_netdev); 4047 4048 return mlx5e_eswitch_rep(priv->netdev) && 4049 mlx5e_eswitch_rep(peer_netdev) && 4050 mlx5e_same_hw_devs(priv, peer_priv); 4051 } 4052 4053 static bool is_lag_dev(struct mlx5e_priv *priv, 4054 struct net_device *peer_netdev) 4055 { 4056 return ((mlx5_lag_is_sriov(priv->mdev) || 4057 mlx5_lag_is_multipath(priv->mdev)) && 4058 same_hw_reps(priv, peer_netdev)); 4059 } 4060 4061 static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev) 4062 { 4063 return same_hw_reps(priv, out_dev) && mlx5_lag_is_mpesw(priv->mdev); 4064 } 4065 4066 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, 4067 struct net_device *out_dev) 4068 { 4069 if (is_merged_eswitch_vfs(priv, out_dev)) 4070 return true; 4071 4072 if (is_multiport_eligible(priv, out_dev)) 4073 return true; 4074 4075 if (is_lag_dev(priv, out_dev)) 4076 return true; 4077 4078 return mlx5e_eswitch_rep(out_dev) && 4079 same_port_devs(priv, netdev_priv(out_dev)); 4080 } 4081 4082 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, 4083 struct mlx5_flow_attr *attr, 4084 int ifindex, 4085 enum mlx5e_tc_int_port_type type, 4086 u32 *action, 4087 int out_index) 4088 { 4089 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 4090 struct mlx5e_tc_int_port_priv *int_port_priv; 4091 struct mlx5e_tc_flow_parse_attr *parse_attr; 4092 struct mlx5e_tc_int_port *dest_int_port; 4093 int err; 4094 4095 parse_attr = attr->parse_attr; 4096 int_port_priv = mlx5e_get_int_port_priv(priv); 4097 4098 dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type); 4099 if (IS_ERR(dest_int_port)) 4100 return PTR_ERR(dest_int_port); 4101 4102 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts, 4103 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, 4104 mlx5e_tc_int_port_get_metadata(dest_int_port)); 4105 if (err) { 4106 mlx5e_tc_int_port_put(int_port_priv, dest_int_port); 4107 return err; 4108 } 4109 4110 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 4111 4112 esw_attr->dest_int_port = dest_int_port; 4113 esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE; 4114 esw_attr->split_count = out_index; 4115 4116 /* Forward to root fdb for matching against the new source vport */ 4117 attr->dest_chain = 0; 4118 4119 return 0; 4120 } 4121 4122 static int 4123 parse_tc_fdb_actions(struct mlx5e_priv *priv, 4124 struct flow_action *flow_action, 4125 struct mlx5e_tc_flow *flow, 4126 struct netlink_ext_ack *extack) 4127 { 4128 struct mlx5e_tc_act_parse_state *parse_state; 4129 struct mlx5e_tc_flow_parse_attr *parse_attr; 4130 struct mlx5_flow_attr *attr = flow->attr; 4131 struct mlx5_esw_flow_attr *esw_attr; 4132 struct net_device *filter_dev; 4133 int err; 4134 4135 err = flow_action_supported(flow_action, extack); 4136 if (err) 4137 return err; 4138 4139 esw_attr = attr->esw_attr; 4140 parse_attr = attr->parse_attr; 4141 filter_dev = parse_attr->filter_dev; 4142 parse_state = &parse_attr->parse_state; 4143 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); 4144 parse_state->ct_priv = get_ct_priv(priv); 4145 4146 err = parse_tc_actions(parse_state, flow_action); 4147 if (err) 4148 return err; 4149 4150 /* Forward to/from internal port can only have 1 dest */ 4151 if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) && 4152 esw_attr->out_count > 1) { 4153 NL_SET_ERR_MSG_MOD(extack, 4154 "Rules with internal port can have only one destination"); 4155 return -EOPNOTSUPP; 4156 } 4157 4158 /* Forward from tunnel/internal port to internal port is not supported */ 4159 if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) && 4160 esw_attr->dest_int_port) { 4161 NL_SET_ERR_MSG_MOD(extack, 4162 "Forwarding from tunnel/internal port to internal port is not supported"); 4163 return -EOPNOTSUPP; 4164 } 4165 4166 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack); 4167 if (err) 4168 return err; 4169 4170 if (!actions_match_supported(priv, flow_action, parse_state->actions, 4171 parse_attr, flow, extack)) 4172 return -EOPNOTSUPP; 4173 4174 return 0; 4175 } 4176 4177 static void get_flags(int flags, unsigned long *flow_flags) 4178 { 4179 unsigned long __flow_flags = 0; 4180 4181 if (flags & MLX5_TC_FLAG(INGRESS)) 4182 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS); 4183 if (flags & MLX5_TC_FLAG(EGRESS)) 4184 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS); 4185 4186 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) 4187 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); 4188 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD)) 4189 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); 4190 if (flags & MLX5_TC_FLAG(FT_OFFLOAD)) 4191 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT); 4192 4193 *flow_flags = __flow_flags; 4194 } 4195 4196 static const struct rhashtable_params tc_ht_params = { 4197 .head_offset = offsetof(struct mlx5e_tc_flow, node), 4198 .key_offset = offsetof(struct mlx5e_tc_flow, cookie), 4199 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), 4200 .automatic_shrinking = true, 4201 }; 4202 4203 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, 4204 unsigned long flags) 4205 { 4206 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 4207 struct mlx5e_rep_priv *rpriv; 4208 4209 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) { 4210 rpriv = priv->ppriv; 4211 return &rpriv->tc_ht; 4212 } else /* NIC offload */ 4213 return &tc->ht; 4214 } 4215 4216 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) 4217 { 4218 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; 4219 struct mlx5_flow_attr *attr = flow->attr; 4220 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK && 4221 flow_flag_test(flow, INGRESS); 4222 bool act_is_encap = !!(attr->action & 4223 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); 4224 bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom); 4225 4226 if (!esw_paired) 4227 return false; 4228 4229 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) || 4230 mlx5_lag_is_multipath(esw_attr->in_mdev)) && 4231 (is_rep_ingress || act_is_encap)) 4232 return true; 4233 4234 if (mlx5_lag_is_mpesw(esw_attr->in_mdev)) 4235 return true; 4236 4237 return false; 4238 } 4239 4240 struct mlx5_flow_attr * 4241 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type) 4242 { 4243 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ? 4244 sizeof(struct mlx5_esw_flow_attr) : 4245 sizeof(struct mlx5_nic_flow_attr); 4246 struct mlx5_flow_attr *attr; 4247 4248 attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL); 4249 if (!attr) 4250 return attr; 4251 4252 INIT_LIST_HEAD(&attr->list); 4253 return attr; 4254 } 4255 4256 static void 4257 mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr) 4258 { 4259 struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow); 4260 struct mlx5_esw_flow_attr *esw_attr; 4261 4262 if (!attr) 4263 return; 4264 4265 if (attr->post_act_handle) 4266 mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle); 4267 4268 mlx5e_tc_tun_encap_dests_unset(flow->priv, flow, attr); 4269 4270 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 4271 mlx5_fc_destroy(counter_dev, attr->counter); 4272 4273 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 4274 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); 4275 mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr); 4276 } 4277 4278 if (mlx5e_is_eswitch_flow(flow)) { 4279 esw_attr = attr->esw_attr; 4280 4281 if (esw_attr->int_port) 4282 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv), 4283 esw_attr->int_port); 4284 4285 if (esw_attr->dest_int_port) 4286 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv), 4287 esw_attr->dest_int_port); 4288 } 4289 4290 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr); 4291 4292 free_branch_attr(flow, attr->branch_true); 4293 free_branch_attr(flow, attr->branch_false); 4294 } 4295 4296 static int 4297 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, 4298 struct flow_cls_offload *f, unsigned long flow_flags, 4299 struct mlx5e_tc_flow_parse_attr **__parse_attr, 4300 struct mlx5e_tc_flow **__flow) 4301 { 4302 struct mlx5e_tc_flow_parse_attr *parse_attr; 4303 struct mlx5_flow_attr *attr; 4304 struct mlx5e_tc_flow *flow; 4305 int err = -ENOMEM; 4306 int out_index; 4307 4308 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 4309 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); 4310 if (!parse_attr || !flow) 4311 goto err_free; 4312 4313 flow->flags = flow_flags; 4314 flow->cookie = f->cookie; 4315 flow->priv = priv; 4316 4317 attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow)); 4318 if (!attr) 4319 goto err_free; 4320 4321 flow->attr = attr; 4322 4323 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) 4324 INIT_LIST_HEAD(&flow->encaps[out_index].list); 4325 INIT_LIST_HEAD(&flow->hairpin); 4326 INIT_LIST_HEAD(&flow->l3_to_l2_reformat); 4327 INIT_LIST_HEAD(&flow->attrs); 4328 INIT_LIST_HEAD(&flow->peer_flows); 4329 refcount_set(&flow->refcnt, 1); 4330 init_completion(&flow->init_done); 4331 init_completion(&flow->del_hw_done); 4332 4333 *__flow = flow; 4334 *__parse_attr = parse_attr; 4335 4336 return 0; 4337 4338 err_free: 4339 kfree(flow); 4340 kvfree(parse_attr); 4341 return err; 4342 } 4343 4344 static void 4345 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr, 4346 struct mlx5e_tc_flow_parse_attr *parse_attr, 4347 struct flow_cls_offload *f) 4348 { 4349 attr->parse_attr = parse_attr; 4350 attr->chain = f->common.chain_index; 4351 attr->prio = f->common.prio; 4352 } 4353 4354 static void 4355 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr, 4356 struct mlx5e_priv *priv, 4357 struct mlx5e_tc_flow_parse_attr *parse_attr, 4358 struct flow_cls_offload *f, 4359 struct mlx5_eswitch_rep *in_rep, 4360 struct mlx5_core_dev *in_mdev) 4361 { 4362 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4363 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 4364 4365 mlx5e_flow_attr_init(attr, parse_attr, f); 4366 4367 esw_attr->in_rep = in_rep; 4368 esw_attr->in_mdev = in_mdev; 4369 4370 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) == 4371 MLX5_COUNTER_SOURCE_ESWITCH) 4372 esw_attr->counter_dev = in_mdev; 4373 else 4374 esw_attr->counter_dev = priv->mdev; 4375 } 4376 4377 static struct mlx5e_tc_flow * 4378 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, 4379 struct flow_cls_offload *f, 4380 unsigned long flow_flags, 4381 struct net_device *filter_dev, 4382 struct mlx5_eswitch_rep *in_rep, 4383 struct mlx5_core_dev *in_mdev) 4384 { 4385 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 4386 struct netlink_ext_ack *extack = f->common.extack; 4387 struct mlx5e_tc_flow_parse_attr *parse_attr; 4388 struct mlx5e_tc_flow *flow; 4389 int attr_size, err; 4390 4391 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); 4392 attr_size = sizeof(struct mlx5_esw_flow_attr); 4393 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, 4394 &parse_attr, &flow); 4395 if (err) 4396 goto out; 4397 4398 parse_attr->filter_dev = filter_dev; 4399 mlx5e_flow_esw_attr_init(flow->attr, 4400 priv, parse_attr, 4401 f, in_rep, in_mdev); 4402 4403 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, 4404 f, filter_dev); 4405 if (err) 4406 goto err_free; 4407 4408 /* actions validation depends on parsing the ct matches first */ 4409 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f, 4410 &flow->attr->ct_attr, extack); 4411 if (err) 4412 goto err_free; 4413 4414 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); 4415 if (err) 4416 goto err_free; 4417 4418 err = mlx5e_tc_add_fdb_flow(priv, flow, extack); 4419 complete_all(&flow->init_done); 4420 if (err) { 4421 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev))) 4422 goto err_free; 4423 4424 add_unready_flow(flow); 4425 } 4426 4427 return flow; 4428 4429 err_free: 4430 mlx5e_flow_put(priv, flow); 4431 out: 4432 return ERR_PTR(err); 4433 } 4434 4435 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, 4436 struct mlx5e_tc_flow *flow, 4437 unsigned long flow_flags, 4438 struct mlx5_eswitch *peer_esw) 4439 { 4440 struct mlx5e_priv *priv = flow->priv, *peer_priv; 4441 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4442 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; 4443 struct mlx5e_tc_flow_parse_attr *parse_attr; 4444 int i = mlx5_get_dev_index(peer_esw->dev); 4445 struct mlx5e_rep_priv *peer_urpriv; 4446 struct mlx5e_tc_flow *peer_flow; 4447 struct mlx5_core_dev *in_mdev; 4448 int err = 0; 4449 4450 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH); 4451 peer_priv = netdev_priv(peer_urpriv->netdev); 4452 4453 /* in_mdev is assigned of which the packet originated from. 4454 * So packets redirected to uplink use the same mdev of the 4455 * original flow and packets redirected from uplink use the 4456 * peer mdev. 4457 * In multiport eswitch it's a special case that we need to 4458 * keep the original mdev. 4459 */ 4460 if (attr->in_rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(priv->mdev)) 4461 in_mdev = peer_priv->mdev; 4462 else 4463 in_mdev = priv->mdev; 4464 4465 parse_attr = flow->attr->parse_attr; 4466 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags, 4467 parse_attr->filter_dev, 4468 attr->in_rep, in_mdev); 4469 if (IS_ERR(peer_flow)) { 4470 err = PTR_ERR(peer_flow); 4471 goto out; 4472 } 4473 4474 list_add_tail(&peer_flow->peer_flows, &flow->peer_flows); 4475 flow_flag_set(flow, DUP); 4476 mutex_lock(&esw->offloads.peer_mutex); 4477 list_add_tail(&flow->peer[i], &esw->offloads.peer_flows[i]); 4478 mutex_unlock(&esw->offloads.peer_mutex); 4479 4480 out: 4481 return err; 4482 } 4483 4484 static int 4485 mlx5e_add_fdb_flow(struct mlx5e_priv *priv, 4486 struct flow_cls_offload *f, 4487 unsigned long flow_flags, 4488 struct net_device *filter_dev, 4489 struct mlx5e_tc_flow **__flow) 4490 { 4491 struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos; 4492 struct mlx5e_rep_priv *rpriv = priv->ppriv; 4493 struct mlx5_eswitch_rep *in_rep = rpriv->rep; 4494 struct mlx5_core_dev *in_mdev = priv->mdev; 4495 struct mlx5_eswitch *peer_esw; 4496 struct mlx5e_tc_flow *flow; 4497 int err; 4498 4499 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, 4500 in_mdev); 4501 if (IS_ERR(flow)) 4502 return PTR_ERR(flow); 4503 4504 if (!is_peer_flow_needed(flow)) { 4505 *__flow = flow; 4506 return 0; 4507 } 4508 4509 if (!mlx5_devcom_for_each_peer_begin(devcom)) { 4510 err = -ENODEV; 4511 goto clean_flow; 4512 } 4513 4514 mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) { 4515 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw); 4516 if (err) 4517 goto peer_clean; 4518 } 4519 4520 mlx5_devcom_for_each_peer_end(devcom); 4521 4522 *__flow = flow; 4523 return 0; 4524 4525 peer_clean: 4526 mlx5e_tc_del_fdb_peers_flow(flow); 4527 mlx5_devcom_for_each_peer_end(devcom); 4528 clean_flow: 4529 mlx5e_tc_del_fdb_flow(priv, flow); 4530 return err; 4531 } 4532 4533 static int 4534 mlx5e_add_nic_flow(struct mlx5e_priv *priv, 4535 struct flow_cls_offload *f, 4536 unsigned long flow_flags, 4537 struct net_device *filter_dev, 4538 struct mlx5e_tc_flow **__flow) 4539 { 4540 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 4541 struct netlink_ext_ack *extack = f->common.extack; 4542 struct mlx5e_tc_flow_parse_attr *parse_attr; 4543 struct mlx5e_tc_flow *flow; 4544 int attr_size, err; 4545 4546 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) { 4547 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common)) 4548 return -EOPNOTSUPP; 4549 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) { 4550 return -EOPNOTSUPP; 4551 } 4552 4553 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); 4554 attr_size = sizeof(struct mlx5_nic_flow_attr); 4555 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, 4556 &parse_attr, &flow); 4557 if (err) 4558 goto out; 4559 4560 parse_attr->filter_dev = filter_dev; 4561 mlx5e_flow_attr_init(flow->attr, parse_attr, f); 4562 4563 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, 4564 f, filter_dev); 4565 if (err) 4566 goto err_free; 4567 4568 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f, 4569 &flow->attr->ct_attr, extack); 4570 if (err) 4571 goto err_free; 4572 4573 err = parse_tc_nic_actions(priv, &rule->action, flow, extack); 4574 if (err) 4575 goto err_free; 4576 4577 err = mlx5e_tc_add_nic_flow(priv, flow, extack); 4578 if (err) 4579 goto err_free; 4580 4581 flow_flag_set(flow, OFFLOADED); 4582 *__flow = flow; 4583 4584 return 0; 4585 4586 err_free: 4587 flow_flag_set(flow, FAILED); 4588 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 4589 mlx5e_flow_put(priv, flow); 4590 out: 4591 return err; 4592 } 4593 4594 static int 4595 mlx5e_tc_add_flow(struct mlx5e_priv *priv, 4596 struct flow_cls_offload *f, 4597 unsigned long flags, 4598 struct net_device *filter_dev, 4599 struct mlx5e_tc_flow **flow) 4600 { 4601 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4602 unsigned long flow_flags; 4603 int err; 4604 4605 get_flags(flags, &flow_flags); 4606 4607 if (!tc_can_offload_extack(priv->netdev, f->common.extack)) 4608 return -EOPNOTSUPP; 4609 4610 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) 4611 err = mlx5e_add_fdb_flow(priv, f, flow_flags, 4612 filter_dev, flow); 4613 else 4614 err = mlx5e_add_nic_flow(priv, f, flow_flags, 4615 filter_dev, flow); 4616 4617 return err; 4618 } 4619 4620 static bool is_flow_rule_duplicate_allowed(struct net_device *dev, 4621 struct mlx5e_rep_priv *rpriv) 4622 { 4623 /* Offloaded flow rule is allowed to duplicate on non-uplink representor 4624 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this 4625 * function is called from NIC mode. 4626 */ 4627 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK; 4628 } 4629 4630 /* As IPsec and TC order is not aligned between software and hardware-offload, 4631 * either IPsec offload or TC offload, not both, is allowed for a specific interface. 4632 */ 4633 static bool is_tc_ipsec_order_check_needed(struct net_device *filter, struct mlx5e_priv *priv) 4634 { 4635 if (!IS_ENABLED(CONFIG_MLX5_EN_IPSEC)) 4636 return false; 4637 4638 if (filter != priv->netdev) 4639 return false; 4640 4641 if (mlx5e_eswitch_vf_rep(priv->netdev)) 4642 return false; 4643 4644 return true; 4645 } 4646 4647 static int mlx5e_tc_block_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv) 4648 { 4649 struct mlx5_core_dev *mdev = priv->mdev; 4650 4651 if (!is_tc_ipsec_order_check_needed(filter, priv)) 4652 return 0; 4653 4654 if (mdev->num_block_tc) 4655 return -EBUSY; 4656 4657 mdev->num_block_ipsec++; 4658 4659 return 0; 4660 } 4661 4662 static void mlx5e_tc_unblock_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv) 4663 { 4664 if (!is_tc_ipsec_order_check_needed(filter, priv)) 4665 return; 4666 4667 priv->mdev->num_block_ipsec--; 4668 } 4669 4670 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, 4671 struct flow_cls_offload *f, unsigned long flags) 4672 { 4673 struct netlink_ext_ack *extack = f->common.extack; 4674 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 4675 struct mlx5e_rep_priv *rpriv = priv->ppriv; 4676 struct mlx5e_tc_flow *flow; 4677 int err = 0; 4678 4679 if (!mlx5_esw_hold(priv->mdev)) 4680 return -EBUSY; 4681 4682 err = mlx5e_tc_block_ipsec_offload(dev, priv); 4683 if (err) 4684 goto esw_release; 4685 4686 mlx5_esw_get(priv->mdev); 4687 4688 rcu_read_lock(); 4689 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); 4690 if (flow) { 4691 /* Same flow rule offloaded to non-uplink representor sharing tc block, 4692 * just return 0. 4693 */ 4694 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev) 4695 goto rcu_unlock; 4696 4697 NL_SET_ERR_MSG_MOD(extack, 4698 "flow cookie already exists, ignoring"); 4699 netdev_warn_once(priv->netdev, 4700 "flow cookie %lx already exists, ignoring\n", 4701 f->cookie); 4702 err = -EEXIST; 4703 goto rcu_unlock; 4704 } 4705 rcu_unlock: 4706 rcu_read_unlock(); 4707 if (flow) 4708 goto out; 4709 4710 trace_mlx5e_configure_flower(f); 4711 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); 4712 if (err) 4713 goto out; 4714 4715 /* Flow rule offloaded to non-uplink representor sharing tc block, 4716 * set the flow's owner dev. 4717 */ 4718 if (is_flow_rule_duplicate_allowed(dev, rpriv)) 4719 flow->orig_dev = dev; 4720 4721 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params); 4722 if (err) 4723 goto err_free; 4724 4725 mlx5_esw_release(priv->mdev); 4726 return 0; 4727 4728 err_free: 4729 mlx5e_flow_put(priv, flow); 4730 out: 4731 mlx5e_tc_unblock_ipsec_offload(dev, priv); 4732 mlx5_esw_put(priv->mdev); 4733 esw_release: 4734 mlx5_esw_release(priv->mdev); 4735 return err; 4736 } 4737 4738 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags) 4739 { 4740 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS)); 4741 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS)); 4742 4743 return flow_flag_test(flow, INGRESS) == dir_ingress && 4744 flow_flag_test(flow, EGRESS) == dir_egress; 4745 } 4746 4747 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, 4748 struct flow_cls_offload *f, unsigned long flags) 4749 { 4750 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 4751 struct mlx5e_tc_flow *flow; 4752 int err; 4753 4754 rcu_read_lock(); 4755 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); 4756 if (!flow || !same_flow_direction(flow, flags)) { 4757 err = -EINVAL; 4758 goto errout; 4759 } 4760 4761 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag 4762 * set. 4763 */ 4764 if (flow_flag_test_and_set(flow, DELETED)) { 4765 err = -EINVAL; 4766 goto errout; 4767 } 4768 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); 4769 rcu_read_unlock(); 4770 4771 trace_mlx5e_delete_flower(f); 4772 mlx5e_flow_put(priv, flow); 4773 4774 mlx5e_tc_unblock_ipsec_offload(dev, priv); 4775 mlx5_esw_put(priv->mdev); 4776 return 0; 4777 4778 errout: 4779 rcu_read_unlock(); 4780 return err; 4781 } 4782 4783 int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv, 4784 struct flow_offload_action *fl_act) 4785 { 4786 return mlx5e_tc_act_stats_fill_stats(get_act_stats_handle(priv), fl_act); 4787 } 4788 4789 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, 4790 struct flow_cls_offload *f, unsigned long flags) 4791 { 4792 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4793 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 4794 struct mlx5e_tc_flow *flow; 4795 struct mlx5_fc *counter; 4796 u64 lastuse = 0; 4797 u64 packets = 0; 4798 u64 bytes = 0; 4799 int err = 0; 4800 4801 rcu_read_lock(); 4802 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie, 4803 tc_ht_params)); 4804 rcu_read_unlock(); 4805 if (IS_ERR(flow)) 4806 return PTR_ERR(flow); 4807 4808 if (!same_flow_direction(flow, flags)) { 4809 err = -EINVAL; 4810 goto errout; 4811 } 4812 4813 if (mlx5e_is_offloaded_flow(flow)) { 4814 if (flow_flag_test(flow, USE_ACT_STATS)) { 4815 f->use_act_stats = true; 4816 } else { 4817 counter = mlx5e_tc_get_counter(flow); 4818 if (!counter) 4819 goto errout; 4820 4821 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 4822 } 4823 } 4824 4825 /* Under multipath it's possible for one rule to be currently 4826 * un-offloaded while the other rule is offloaded. 4827 */ 4828 if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom)) 4829 goto out; 4830 4831 if (flow_flag_test(flow, DUP)) { 4832 struct mlx5e_tc_flow *peer_flow; 4833 4834 list_for_each_entry(peer_flow, &flow->peer_flows, peer_flows) { 4835 u64 packets2; 4836 u64 lastuse2; 4837 u64 bytes2; 4838 4839 if (!flow_flag_test(peer_flow, OFFLOADED)) 4840 continue; 4841 if (flow_flag_test(flow, USE_ACT_STATS)) { 4842 f->use_act_stats = true; 4843 break; 4844 } 4845 4846 counter = mlx5e_tc_get_counter(peer_flow); 4847 if (!counter) 4848 goto no_peer_counter; 4849 mlx5_fc_query_cached(counter, &bytes2, &packets2, 4850 &lastuse2); 4851 4852 bytes += bytes2; 4853 packets += packets2; 4854 lastuse = max_t(u64, lastuse, lastuse2); 4855 } 4856 } 4857 4858 no_peer_counter: 4859 if (esw) 4860 mlx5_devcom_for_each_peer_end(esw->devcom); 4861 out: 4862 flow_stats_update(&f->stats, bytes, packets, 0, lastuse, 4863 FLOW_ACTION_HW_STATS_DELAYED); 4864 trace_mlx5e_stats_flower(f); 4865 errout: 4866 mlx5e_flow_put(priv, flow); 4867 return err; 4868 } 4869 4870 static int apply_police_params(struct mlx5e_priv *priv, u64 rate, 4871 struct netlink_ext_ack *extack) 4872 { 4873 struct mlx5e_rep_priv *rpriv = priv->ppriv; 4874 struct mlx5_eswitch *esw; 4875 u32 rate_mbps = 0; 4876 u16 vport_num; 4877 int err; 4878 4879 vport_num = rpriv->rep->vport; 4880 if (vport_num >= MLX5_VPORT_ECPF) { 4881 NL_SET_ERR_MSG_MOD(extack, 4882 "Ingress rate limit is supported only for Eswitch ports connected to VFs"); 4883 return -EOPNOTSUPP; 4884 } 4885 4886 esw = priv->mdev->priv.eswitch; 4887 /* rate is given in bytes/sec. 4888 * First convert to bits/sec and then round to the nearest mbit/secs. 4889 * mbit means million bits. 4890 * Moreover, if rate is non zero we choose to configure to a minimum of 4891 * 1 mbit/sec. 4892 */ 4893 if (rate) { 4894 rate = (rate * BITS_PER_BYTE) + 500000; 4895 do_div(rate, 1000000); 4896 rate_mbps = max_t(u32, rate, 1); 4897 } 4898 4899 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps); 4900 if (err) 4901 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); 4902 4903 return err; 4904 } 4905 4906 static int 4907 tc_matchall_police_validate(const struct flow_action *action, 4908 const struct flow_action_entry *act, 4909 struct netlink_ext_ack *extack) 4910 { 4911 if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) { 4912 NL_SET_ERR_MSG_MOD(extack, 4913 "Offload not supported when conform action is not continue"); 4914 return -EOPNOTSUPP; 4915 } 4916 4917 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 4918 NL_SET_ERR_MSG_MOD(extack, 4919 "Offload not supported when exceed action is not drop"); 4920 return -EOPNOTSUPP; 4921 } 4922 4923 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 4924 !flow_action_is_last_entry(action, act)) { 4925 NL_SET_ERR_MSG_MOD(extack, 4926 "Offload not supported when conform action is ok, but action is not last"); 4927 return -EOPNOTSUPP; 4928 } 4929 4930 if (act->police.peakrate_bytes_ps || 4931 act->police.avrate || act->police.overhead) { 4932 NL_SET_ERR_MSG_MOD(extack, 4933 "Offload not supported when peakrate/avrate/overhead is configured"); 4934 return -EOPNOTSUPP; 4935 } 4936 4937 return 0; 4938 } 4939 4940 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, 4941 struct flow_action *flow_action, 4942 struct netlink_ext_ack *extack) 4943 { 4944 struct mlx5e_rep_priv *rpriv = priv->ppriv; 4945 const struct flow_action_entry *act; 4946 int err; 4947 int i; 4948 4949 if (!flow_action_has_entries(flow_action)) { 4950 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action"); 4951 return -EINVAL; 4952 } 4953 4954 if (!flow_offload_has_one_action(flow_action)) { 4955 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action"); 4956 return -EOPNOTSUPP; 4957 } 4958 4959 if (!flow_action_basic_hw_stats_check(flow_action, extack)) { 4960 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); 4961 return -EOPNOTSUPP; 4962 } 4963 4964 flow_action_for_each(i, act, flow_action) { 4965 switch (act->id) { 4966 case FLOW_ACTION_POLICE: 4967 err = tc_matchall_police_validate(flow_action, act, extack); 4968 if (err) 4969 return err; 4970 4971 err = apply_police_params(priv, act->police.rate_bytes_ps, extack); 4972 if (err) 4973 return err; 4974 4975 rpriv->prev_vf_vport_stats = priv->stats.vf_vport; 4976 break; 4977 default: 4978 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall"); 4979 return -EOPNOTSUPP; 4980 } 4981 } 4982 4983 return 0; 4984 } 4985 4986 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, 4987 struct tc_cls_matchall_offload *ma) 4988 { 4989 struct netlink_ext_ack *extack = ma->common.extack; 4990 4991 if (ma->common.prio != 1) { 4992 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); 4993 return -EINVAL; 4994 } 4995 4996 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack); 4997 } 4998 4999 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, 5000 struct tc_cls_matchall_offload *ma) 5001 { 5002 struct netlink_ext_ack *extack = ma->common.extack; 5003 5004 return apply_police_params(priv, 0, extack); 5005 } 5006 5007 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, 5008 struct tc_cls_matchall_offload *ma) 5009 { 5010 struct mlx5e_rep_priv *rpriv = priv->ppriv; 5011 struct rtnl_link_stats64 cur_stats; 5012 u64 dbytes; 5013 u64 dpkts; 5014 5015 cur_stats = priv->stats.vf_vport; 5016 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; 5017 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; 5018 rpriv->prev_vf_vport_stats = cur_stats; 5019 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies, 5020 FLOW_ACTION_HW_STATS_DELAYED); 5021 } 5022 5023 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, 5024 struct mlx5e_priv *peer_priv) 5025 { 5026 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 5027 struct mlx5_core_dev *peer_mdev = peer_priv->mdev; 5028 struct mlx5e_hairpin_entry *hpe, *tmp; 5029 LIST_HEAD(init_wait_list); 5030 u16 peer_vhca_id; 5031 int bkt; 5032 5033 if (!mlx5e_same_hw_devs(priv, peer_priv)) 5034 return; 5035 5036 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id); 5037 5038 mutex_lock(&tc->hairpin_tbl_lock); 5039 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist) 5040 if (refcount_inc_not_zero(&hpe->refcnt)) 5041 list_add(&hpe->dead_peer_wait_list, &init_wait_list); 5042 mutex_unlock(&tc->hairpin_tbl_lock); 5043 5044 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) { 5045 wait_for_completion(&hpe->res_ready); 5046 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id) 5047 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair); 5048 5049 mlx5e_hairpin_put(priv, hpe); 5050 } 5051 } 5052 5053 static int mlx5e_tc_netdev_event(struct notifier_block *this, 5054 unsigned long event, void *ptr) 5055 { 5056 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 5057 struct mlx5e_priv *peer_priv; 5058 struct mlx5e_tc_table *tc; 5059 struct mlx5e_priv *priv; 5060 5061 if (ndev->netdev_ops != &mlx5e_netdev_ops || 5062 event != NETDEV_UNREGISTER || 5063 ndev->reg_state == NETREG_REGISTERED) 5064 return NOTIFY_DONE; 5065 5066 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb); 5067 priv = tc->priv; 5068 peer_priv = netdev_priv(ndev); 5069 if (priv == peer_priv || 5070 !(priv->netdev->features & NETIF_F_HW_TC)) 5071 return NOTIFY_DONE; 5072 5073 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv); 5074 5075 return NOTIFY_DONE; 5076 } 5077 5078 static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv) 5079 { 5080 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 5081 struct mlx5_flow_table **ft = &tc->miss_t; 5082 struct mlx5_flow_table_attr ft_attr = {}; 5083 struct mlx5_flow_namespace *ns; 5084 int err = 0; 5085 5086 ft_attr.max_fte = 1; 5087 ft_attr.autogroup.max_num_groups = 1; 5088 ft_attr.level = MLX5E_TC_MISS_LEVEL; 5089 ft_attr.prio = 0; 5090 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); 5091 5092 *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 5093 if (IS_ERR(*ft)) { 5094 err = PTR_ERR(*ft); 5095 netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err); 5096 } 5097 5098 return err; 5099 } 5100 5101 static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv) 5102 { 5103 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 5104 5105 mlx5_destroy_flow_table(tc->miss_t); 5106 } 5107 5108 int mlx5e_tc_nic_init(struct mlx5e_priv *priv) 5109 { 5110 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 5111 struct mlx5_core_dev *dev = priv->mdev; 5112 struct mapping_ctx *chains_mapping; 5113 struct mlx5_chains_attr attr = {}; 5114 u64 mapping_id; 5115 int err; 5116 5117 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr); 5118 mutex_init(&tc->t_lock); 5119 mutex_init(&tc->hairpin_tbl_lock); 5120 hash_init(tc->hairpin_tbl); 5121 tc->priv = priv; 5122 5123 err = rhashtable_init(&tc->ht, &tc_ht_params); 5124 if (err) 5125 return err; 5126 5127 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); 5128 lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0); 5129 5130 mapping_id = mlx5_query_nic_system_image_guid(dev); 5131 5132 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, 5133 sizeof(struct mlx5_mapped_obj), 5134 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true); 5135 5136 if (IS_ERR(chains_mapping)) { 5137 err = PTR_ERR(chains_mapping); 5138 goto err_mapping; 5139 } 5140 tc->mapping = chains_mapping; 5141 5142 err = mlx5e_tc_nic_create_miss_table(priv); 5143 if (err) 5144 goto err_chains; 5145 5146 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) 5147 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED | 5148 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 5149 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL; 5150 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS; 5151 attr.default_ft = tc->miss_t; 5152 attr.mapping = chains_mapping; 5153 attr.fs_base_prio = MLX5E_TC_PRIO; 5154 5155 tc->chains = mlx5_chains_create(dev, &attr); 5156 if (IS_ERR(tc->chains)) { 5157 err = PTR_ERR(tc->chains); 5158 goto err_miss; 5159 } 5160 5161 mlx5_chains_print_info(tc->chains); 5162 5163 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL); 5164 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr, 5165 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act); 5166 5167 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; 5168 err = register_netdevice_notifier_dev_net(priv->netdev, 5169 &tc->netdevice_nb, 5170 &tc->netdevice_nn); 5171 if (err) { 5172 tc->netdevice_nb.notifier_call = NULL; 5173 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n"); 5174 goto err_reg; 5175 } 5176 5177 mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs)); 5178 5179 tc->action_stats_handle = mlx5e_tc_act_stats_create(); 5180 if (IS_ERR(tc->action_stats_handle)) { 5181 err = PTR_ERR(tc->action_stats_handle); 5182 goto err_act_stats; 5183 } 5184 5185 return 0; 5186 5187 err_act_stats: 5188 unregister_netdevice_notifier_dev_net(priv->netdev, 5189 &tc->netdevice_nb, 5190 &tc->netdevice_nn); 5191 err_reg: 5192 mlx5_tc_ct_clean(tc->ct); 5193 mlx5e_tc_post_act_destroy(tc->post_act); 5194 mlx5_chains_destroy(tc->chains); 5195 err_miss: 5196 mlx5e_tc_nic_destroy_miss_table(priv); 5197 err_chains: 5198 mapping_destroy(chains_mapping); 5199 err_mapping: 5200 rhashtable_destroy(&tc->ht); 5201 return err; 5202 } 5203 5204 static void _mlx5e_tc_del_flow(void *ptr, void *arg) 5205 { 5206 struct mlx5e_tc_flow *flow = ptr; 5207 struct mlx5e_priv *priv = flow->priv; 5208 5209 mlx5e_tc_del_flow(priv, flow); 5210 kfree(flow); 5211 } 5212 5213 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) 5214 { 5215 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 5216 5217 debugfs_remove_recursive(tc->dfs_root); 5218 5219 if (tc->netdevice_nb.notifier_call) 5220 unregister_netdevice_notifier_dev_net(priv->netdev, 5221 &tc->netdevice_nb, 5222 &tc->netdevice_nn); 5223 5224 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr); 5225 mutex_destroy(&tc->hairpin_tbl_lock); 5226 5227 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL); 5228 5229 if (!IS_ERR_OR_NULL(tc->t)) { 5230 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL); 5231 tc->t = NULL; 5232 } 5233 mutex_destroy(&tc->t_lock); 5234 5235 mlx5_tc_ct_clean(tc->ct); 5236 mlx5e_tc_post_act_destroy(tc->post_act); 5237 mapping_destroy(tc->mapping); 5238 mlx5_chains_destroy(tc->chains); 5239 mlx5e_tc_nic_destroy_miss_table(priv); 5240 mlx5e_tc_act_stats_free(tc->action_stats_handle); 5241 } 5242 5243 int mlx5e_tc_ht_init(struct rhashtable *tc_ht) 5244 { 5245 int err; 5246 5247 err = rhashtable_init(tc_ht, &tc_ht_params); 5248 if (err) 5249 return err; 5250 5251 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key); 5252 lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0); 5253 5254 return 0; 5255 } 5256 5257 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) 5258 { 5259 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); 5260 } 5261 5262 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) 5263 { 5264 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts); 5265 struct netdev_phys_item_id ppid; 5266 struct mlx5e_rep_priv *rpriv; 5267 struct mapping_ctx *mapping; 5268 struct mlx5_eswitch *esw; 5269 struct mlx5e_priv *priv; 5270 u64 mapping_id, key; 5271 int err = 0; 5272 5273 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); 5274 priv = netdev_priv(rpriv->netdev); 5275 esw = priv->mdev->priv.eswitch; 5276 5277 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw), 5278 MLX5_FLOW_NAMESPACE_FDB); 5279 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev), 5280 esw_chains(esw), 5281 &esw->offloads.mod_hdr, 5282 MLX5_FLOW_NAMESPACE_FDB, 5283 uplink_priv->post_act); 5284 5285 uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev)); 5286 5287 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); 5288 5289 mapping_id = mlx5_query_nic_system_image_guid(esw->dev); 5290 5291 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL, 5292 sizeof(struct tunnel_match_key), 5293 TUNNEL_INFO_BITS_MASK, true); 5294 5295 if (IS_ERR(mapping)) { 5296 err = PTR_ERR(mapping); 5297 goto err_tun_mapping; 5298 } 5299 uplink_priv->tunnel_mapping = mapping; 5300 5301 /* Two last values are reserved for stack devices slow path table mark 5302 * and bridge ingress push mark. 5303 */ 5304 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS, 5305 sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true); 5306 if (IS_ERR(mapping)) { 5307 err = PTR_ERR(mapping); 5308 goto err_enc_opts_mapping; 5309 } 5310 uplink_priv->tunnel_enc_opts_mapping = mapping; 5311 5312 uplink_priv->encap = mlx5e_tc_tun_init(priv); 5313 if (IS_ERR(uplink_priv->encap)) { 5314 err = PTR_ERR(uplink_priv->encap); 5315 goto err_register_fib_notifier; 5316 } 5317 5318 uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create(); 5319 if (IS_ERR(uplink_priv->action_stats_handle)) { 5320 err = PTR_ERR(uplink_priv->action_stats_handle); 5321 goto err_action_counter; 5322 } 5323 5324 err = dev_get_port_parent_id(priv->netdev, &ppid, false); 5325 if (!err) { 5326 memcpy(&key, &ppid.id, sizeof(key)); 5327 mlx5_esw_offloads_devcom_init(esw, key); 5328 } 5329 5330 return 0; 5331 5332 err_action_counter: 5333 mlx5e_tc_tun_cleanup(uplink_priv->encap); 5334 err_register_fib_notifier: 5335 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); 5336 err_enc_opts_mapping: 5337 mapping_destroy(uplink_priv->tunnel_mapping); 5338 err_tun_mapping: 5339 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); 5340 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv); 5341 mlx5_tc_ct_clean(uplink_priv->ct_priv); 5342 netdev_warn(priv->netdev, 5343 "Failed to initialize tc (eswitch), err: %d", err); 5344 mlx5e_tc_post_act_destroy(uplink_priv->post_act); 5345 return err; 5346 } 5347 5348 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv) 5349 { 5350 struct mlx5e_rep_priv *rpriv; 5351 struct mlx5_eswitch *esw; 5352 struct mlx5e_priv *priv; 5353 5354 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); 5355 priv = netdev_priv(rpriv->netdev); 5356 esw = priv->mdev->priv.eswitch; 5357 5358 mlx5_esw_offloads_devcom_cleanup(esw); 5359 5360 mlx5e_tc_tun_cleanup(uplink_priv->encap); 5361 5362 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); 5363 mapping_destroy(uplink_priv->tunnel_mapping); 5364 5365 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); 5366 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv); 5367 mlx5_tc_ct_clean(uplink_priv->ct_priv); 5368 mlx5e_flow_meters_cleanup(uplink_priv->flow_meters); 5369 mlx5e_tc_post_act_destroy(uplink_priv->post_act); 5370 mlx5e_tc_act_stats_free(uplink_priv->action_stats_handle); 5371 } 5372 5373 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) 5374 { 5375 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 5376 5377 return atomic_read(&tc_ht->nelems); 5378 } 5379 5380 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw) 5381 { 5382 struct mlx5e_tc_flow *flow, *tmp; 5383 int i; 5384 5385 for (i = 0; i < MLX5_MAX_PORTS; i++) { 5386 if (i == mlx5_get_dev_index(esw->dev)) 5387 continue; 5388 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i]) 5389 mlx5e_tc_del_fdb_peers_flow(flow); 5390 } 5391 } 5392 5393 void mlx5e_tc_reoffload_flows_work(struct work_struct *work) 5394 { 5395 struct mlx5_rep_uplink_priv *rpriv = 5396 container_of(work, struct mlx5_rep_uplink_priv, 5397 reoffload_flows_work); 5398 struct mlx5e_tc_flow *flow, *tmp; 5399 5400 mutex_lock(&rpriv->unready_flows_lock); 5401 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) { 5402 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL)) 5403 unready_flow_del(flow); 5404 } 5405 mutex_unlock(&rpriv->unready_flows_lock); 5406 } 5407 5408 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, 5409 struct flow_cls_offload *cls_flower, 5410 unsigned long flags) 5411 { 5412 switch (cls_flower->command) { 5413 case FLOW_CLS_REPLACE: 5414 return mlx5e_configure_flower(priv->netdev, priv, cls_flower, 5415 flags); 5416 case FLOW_CLS_DESTROY: 5417 return mlx5e_delete_flower(priv->netdev, priv, cls_flower, 5418 flags); 5419 case FLOW_CLS_STATS: 5420 return mlx5e_stats_flower(priv->netdev, priv, cls_flower, 5421 flags); 5422 default: 5423 return -EOPNOTSUPP; 5424 } 5425 } 5426 5427 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 5428 void *cb_priv) 5429 { 5430 unsigned long flags = MLX5_TC_FLAG(INGRESS); 5431 struct mlx5e_priv *priv = cb_priv; 5432 5433 if (!priv->netdev || !netif_device_present(priv->netdev)) 5434 return -EOPNOTSUPP; 5435 5436 if (mlx5e_is_uplink_rep(priv)) 5437 flags |= MLX5_TC_FLAG(ESW_OFFLOAD); 5438 else 5439 flags |= MLX5_TC_FLAG(NIC_OFFLOAD); 5440 5441 switch (type) { 5442 case TC_SETUP_CLSFLOWER: 5443 return mlx5e_setup_tc_cls_flower(priv, type_data, flags); 5444 default: 5445 return -EOPNOTSUPP; 5446 } 5447 } 5448 5449 static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, 5450 struct mlx5e_tc_update_priv *tc_priv, 5451 u32 tunnel_id) 5452 { 5453 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 5454 struct tunnel_match_enc_opts enc_opts = {}; 5455 struct mlx5_rep_uplink_priv *uplink_priv; 5456 struct mlx5e_rep_priv *uplink_rpriv; 5457 struct metadata_dst *tun_dst; 5458 struct tunnel_match_key key; 5459 u32 tun_id, enc_opts_id; 5460 struct net_device *dev; 5461 int err; 5462 5463 enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK; 5464 tun_id = tunnel_id >> ENC_OPTS_BITS; 5465 5466 if (!tun_id) 5467 return true; 5468 5469 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 5470 uplink_priv = &uplink_rpriv->uplink_priv; 5471 5472 err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key); 5473 if (err) { 5474 netdev_dbg(priv->netdev, 5475 "Couldn't find tunnel for tun_id: %d, err: %d\n", 5476 tun_id, err); 5477 return false; 5478 } 5479 5480 if (enc_opts_id) { 5481 err = mapping_find(uplink_priv->tunnel_enc_opts_mapping, 5482 enc_opts_id, &enc_opts); 5483 if (err) { 5484 netdev_dbg(priv->netdev, 5485 "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n", 5486 enc_opts_id, err); 5487 return false; 5488 } 5489 } 5490 5491 switch (key.enc_control.addr_type) { 5492 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 5493 tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst, 5494 key.enc_ip.tos, key.enc_ip.ttl, 5495 key.enc_tp.dst, TUNNEL_KEY, 5496 key32_to_tunnel_id(key.enc_key_id.keyid), 5497 enc_opts.key.len); 5498 break; 5499 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 5500 tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst, 5501 key.enc_ip.tos, key.enc_ip.ttl, 5502 key.enc_tp.dst, 0, TUNNEL_KEY, 5503 key32_to_tunnel_id(key.enc_key_id.keyid), 5504 enc_opts.key.len); 5505 break; 5506 default: 5507 netdev_dbg(priv->netdev, 5508 "Couldn't restore tunnel, unsupported addr_type: %d\n", 5509 key.enc_control.addr_type); 5510 return false; 5511 } 5512 5513 if (!tun_dst) { 5514 netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n"); 5515 return false; 5516 } 5517 5518 tun_dst->u.tun_info.key.tp_src = key.enc_tp.src; 5519 5520 if (enc_opts.key.len) 5521 ip_tunnel_info_opts_set(&tun_dst->u.tun_info, 5522 enc_opts.key.data, 5523 enc_opts.key.len, 5524 enc_opts.key.dst_opt_type); 5525 5526 skb_dst_set(skb, (struct dst_entry *)tun_dst); 5527 dev = dev_get_by_index(&init_net, key.filter_ifindex); 5528 if (!dev) { 5529 netdev_dbg(priv->netdev, 5530 "Couldn't find tunnel device with ifindex: %d\n", 5531 key.filter_ifindex); 5532 return false; 5533 } 5534 5535 /* Set fwd_dev so we do dev_put() after datapath */ 5536 tc_priv->fwd_dev = dev; 5537 5538 skb->dev = dev; 5539 5540 return true; 5541 } 5542 5543 static bool mlx5e_tc_restore_skb_tc_meta(struct sk_buff *skb, struct mlx5_tc_ct_priv *ct_priv, 5544 struct mlx5_mapped_obj *mapped_obj, u32 zone_restore_id, 5545 u32 tunnel_id, struct mlx5e_tc_update_priv *tc_priv) 5546 { 5547 struct mlx5e_priv *priv = netdev_priv(skb->dev); 5548 struct tc_skb_ext *tc_skb_ext; 5549 u64 act_miss_cookie; 5550 u32 chain; 5551 5552 chain = mapped_obj->type == MLX5_MAPPED_OBJ_CHAIN ? mapped_obj->chain : 0; 5553 act_miss_cookie = mapped_obj->type == MLX5_MAPPED_OBJ_ACT_MISS ? 5554 mapped_obj->act_miss_cookie : 0; 5555 if (chain || act_miss_cookie) { 5556 if (!mlx5e_tc_ct_restore_flow(ct_priv, skb, zone_restore_id)) 5557 return false; 5558 5559 tc_skb_ext = tc_skb_ext_alloc(skb); 5560 if (!tc_skb_ext) { 5561 WARN_ON(1); 5562 return false; 5563 } 5564 5565 if (act_miss_cookie) { 5566 tc_skb_ext->act_miss_cookie = act_miss_cookie; 5567 tc_skb_ext->act_miss = 1; 5568 } else { 5569 tc_skb_ext->chain = chain; 5570 } 5571 } 5572 5573 if (tc_priv) 5574 return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id); 5575 5576 return true; 5577 } 5578 5579 static void mlx5e_tc_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb, 5580 struct mlx5_mapped_obj *mapped_obj, 5581 struct mlx5e_tc_update_priv *tc_priv) 5582 { 5583 if (!mlx5e_tc_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) { 5584 netdev_dbg(priv->netdev, 5585 "Failed to restore tunnel info for sampled packet\n"); 5586 return; 5587 } 5588 mlx5e_tc_sample_skb(skb, mapped_obj); 5589 } 5590 5591 static bool mlx5e_tc_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb, 5592 struct mlx5_mapped_obj *mapped_obj, 5593 struct mlx5e_tc_update_priv *tc_priv, 5594 u32 tunnel_id) 5595 { 5596 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 5597 struct mlx5_rep_uplink_priv *uplink_priv; 5598 struct mlx5e_rep_priv *uplink_rpriv; 5599 bool forward_tx = false; 5600 5601 /* Tunnel restore takes precedence over int port restore */ 5602 if (tunnel_id) 5603 return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id); 5604 5605 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 5606 uplink_priv = &uplink_rpriv->uplink_priv; 5607 5608 if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb, 5609 mapped_obj->int_port_metadata, &forward_tx)) { 5610 /* Set fwd_dev for future dev_put */ 5611 tc_priv->fwd_dev = skb->dev; 5612 tc_priv->forward_tx = forward_tx; 5613 5614 return true; 5615 } 5616 5617 return false; 5618 } 5619 5620 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb, 5621 struct mapping_ctx *mapping_ctx, u32 mapped_obj_id, 5622 struct mlx5_tc_ct_priv *ct_priv, 5623 u32 zone_restore_id, u32 tunnel_id, 5624 struct mlx5e_tc_update_priv *tc_priv) 5625 { 5626 struct mlx5e_priv *priv = netdev_priv(skb->dev); 5627 struct mlx5_mapped_obj mapped_obj; 5628 int err; 5629 5630 err = mapping_find(mapping_ctx, mapped_obj_id, &mapped_obj); 5631 if (err) { 5632 netdev_dbg(skb->dev, 5633 "Couldn't find mapped object for mapped_obj_id: %d, err: %d\n", 5634 mapped_obj_id, err); 5635 return false; 5636 } 5637 5638 switch (mapped_obj.type) { 5639 case MLX5_MAPPED_OBJ_CHAIN: 5640 case MLX5_MAPPED_OBJ_ACT_MISS: 5641 return mlx5e_tc_restore_skb_tc_meta(skb, ct_priv, &mapped_obj, zone_restore_id, 5642 tunnel_id, tc_priv); 5643 case MLX5_MAPPED_OBJ_SAMPLE: 5644 mlx5e_tc_restore_skb_sample(priv, skb, &mapped_obj, tc_priv); 5645 tc_priv->skb_done = true; 5646 return true; 5647 case MLX5_MAPPED_OBJ_INT_PORT_METADATA: 5648 return mlx5e_tc_restore_skb_int_port(priv, skb, &mapped_obj, tc_priv, tunnel_id); 5649 default: 5650 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type); 5651 return false; 5652 } 5653 5654 return false; 5655 } 5656 5657 bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb) 5658 { 5659 struct mlx5e_priv *priv = netdev_priv(skb->dev); 5660 u32 mapped_obj_id, reg_b, zone_restore_id; 5661 struct mlx5_tc_ct_priv *ct_priv; 5662 struct mapping_ctx *mapping_ctx; 5663 struct mlx5e_tc_table *tc; 5664 5665 reg_b = be32_to_cpu(cqe->ft_metadata); 5666 tc = mlx5e_fs_get_tc(priv->fs); 5667 mapped_obj_id = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK; 5668 zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) & 5669 ESW_ZONE_ID_MASK; 5670 ct_priv = tc->ct; 5671 mapping_ctx = tc->mapping; 5672 5673 return mlx5e_tc_update_skb(cqe, skb, mapping_ctx, mapped_obj_id, ct_priv, zone_restore_id, 5674 0, NULL); 5675 } 5676 5677 static struct mapping_ctx * 5678 mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv) 5679 { 5680 struct mlx5e_tc_table *tc; 5681 struct mlx5_eswitch *esw; 5682 struct mapping_ctx *ctx; 5683 5684 if (is_mdev_switchdev_mode(priv->mdev)) { 5685 esw = priv->mdev->priv.eswitch; 5686 ctx = esw->offloads.reg_c0_obj_pool; 5687 } else { 5688 tc = mlx5e_fs_get_tc(priv->fs); 5689 ctx = tc->mapping; 5690 } 5691 5692 return ctx; 5693 } 5694 5695 int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, 5696 u64 act_miss_cookie, u32 *act_miss_mapping) 5697 { 5698 struct mlx5_mapped_obj mapped_obj = {}; 5699 struct mlx5_eswitch *esw; 5700 struct mapping_ctx *ctx; 5701 int err; 5702 5703 ctx = mlx5e_get_priv_obj_mapping(priv); 5704 mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS; 5705 mapped_obj.act_miss_cookie = act_miss_cookie; 5706 err = mapping_add(ctx, &mapped_obj, act_miss_mapping); 5707 if (err) 5708 return err; 5709 5710 if (!is_mdev_switchdev_mode(priv->mdev)) 5711 return 0; 5712 5713 esw = priv->mdev->priv.eswitch; 5714 attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping); 5715 if (IS_ERR(attr->act_id_restore_rule)) 5716 goto err_rule; 5717 5718 return 0; 5719 5720 err_rule: 5721 mapping_remove(ctx, *act_miss_mapping); 5722 return err; 5723 } 5724 5725 void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, 5726 u32 act_miss_mapping) 5727 { 5728 struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv); 5729 5730 if (is_mdev_switchdev_mode(priv->mdev)) 5731 mlx5_del_flow_rules(attr->act_id_restore_rule); 5732 mapping_remove(ctx, act_miss_mapping); 5733 } 5734