1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <net/flow_dissector.h> 34 #include <net/flow_offload.h> 35 #include <net/sch_generic.h> 36 #include <net/pkt_cls.h> 37 #include <linux/mlx5/fs.h> 38 #include <linux/mlx5/device.h> 39 #include <linux/rhashtable.h> 40 #include <linux/refcount.h> 41 #include <linux/completion.h> 42 #include <net/arp.h> 43 #include <net/ipv6_stubs.h> 44 #include <net/bareudp.h> 45 #include <net/bonding.h> 46 #include "en.h" 47 #include "en/tc/post_act.h" 48 #include "en_rep.h" 49 #include "en/rep/tc.h" 50 #include "en/rep/neigh.h" 51 #include "en_tc.h" 52 #include "eswitch.h" 53 #include "fs_core.h" 54 #include "en/port.h" 55 #include "en/tc_tun.h" 56 #include "en/mapping.h" 57 #include "en/tc_ct.h" 58 #include "en/mod_hdr.h" 59 #include "en/tc_tun_encap.h" 60 #include "en/tc/sample.h" 61 #include "en/tc/act/act.h" 62 #include "lib/devcom.h" 63 #include "lib/geneve.h" 64 #include "lib/fs_chains.h" 65 #include "diag/en_tc_tracepoint.h" 66 #include <asm/div64.h> 67 #include "lag/lag.h" 68 #include "lag/mp.h" 69 70 #define MLX5E_TC_TABLE_NUM_GROUPS 4 71 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18) 72 73 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { 74 [CHAIN_TO_REG] = { 75 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, 76 .moffset = 0, 77 .mlen = 16, 78 }, 79 [VPORT_TO_REG] = { 80 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, 81 .moffset = 16, 82 .mlen = 16, 83 }, 84 [TUNNEL_TO_REG] = { 85 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1, 86 .moffset = 8, 87 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS, 88 .soffset = MLX5_BYTE_OFF(fte_match_param, 89 misc_parameters_2.metadata_reg_c_1), 90 }, 91 [ZONE_TO_REG] = zone_to_reg_ct, 92 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct, 93 [CTSTATE_TO_REG] = ctstate_to_reg_ct, 94 [MARK_TO_REG] = mark_to_reg_ct, 95 [LABELS_TO_REG] = labels_to_reg_ct, 96 [FTEID_TO_REG] = fteid_to_reg_ct, 97 /* For NIC rules we store the restore metadata directly 98 * into reg_b that is passed to SW since we don't 99 * jump between steering domains. 100 */ 101 [NIC_CHAIN_TO_REG] = { 102 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B, 103 .moffset = 0, 104 .mlen = 16, 105 }, 106 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct, 107 }; 108 109 /* To avoid false lock dependency warning set the tc_ht lock 110 * class different than the lock class of the ht being used when deleting 111 * last flow from a group and then deleting a group, we get into del_sw_flow_group() 112 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but 113 * it's different than the ht->mutex here. 114 */ 115 static struct lock_class_key tc_ht_lock_key; 116 117 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); 118 119 void 120 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, 121 enum mlx5e_tc_attr_to_reg type, 122 u32 val, 123 u32 mask) 124 { 125 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval; 126 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; 127 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 128 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; 129 u32 max_mask = GENMASK(match_len - 1, 0); 130 __be32 curr_mask_be, curr_val_be; 131 u32 curr_mask, curr_val; 132 133 fmask = headers_c + soffset; 134 fval = headers_v + soffset; 135 136 memcpy(&curr_mask_be, fmask, 4); 137 memcpy(&curr_val_be, fval, 4); 138 139 curr_mask = be32_to_cpu(curr_mask_be); 140 curr_val = be32_to_cpu(curr_val_be); 141 142 //move to correct offset 143 WARN_ON(mask > max_mask); 144 mask <<= moffset; 145 val <<= moffset; 146 max_mask <<= moffset; 147 148 //zero val and mask 149 curr_mask &= ~max_mask; 150 curr_val &= ~max_mask; 151 152 //add current to mask 153 curr_mask |= mask; 154 curr_val |= val; 155 156 //back to be32 and write 157 curr_mask_be = cpu_to_be32(curr_mask); 158 curr_val_be = cpu_to_be32(curr_val); 159 160 memcpy(fmask, &curr_mask_be, 4); 161 memcpy(fval, &curr_val_be, 4); 162 163 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 164 } 165 166 void 167 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec, 168 enum mlx5e_tc_attr_to_reg type, 169 u32 *val, 170 u32 *mask) 171 { 172 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval; 173 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; 174 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 175 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; 176 u32 max_mask = GENMASK(match_len - 1, 0); 177 __be32 curr_mask_be, curr_val_be; 178 u32 curr_mask, curr_val; 179 180 fmask = headers_c + soffset; 181 fval = headers_v + soffset; 182 183 memcpy(&curr_mask_be, fmask, 4); 184 memcpy(&curr_val_be, fval, 4); 185 186 curr_mask = be32_to_cpu(curr_mask_be); 187 curr_val = be32_to_cpu(curr_val_be); 188 189 *mask = (curr_mask >> moffset) & max_mask; 190 *val = (curr_val >> moffset) & max_mask; 191 } 192 193 int 194 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, 195 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 196 enum mlx5_flow_namespace_type ns, 197 enum mlx5e_tc_attr_to_reg type, 198 u32 data) 199 { 200 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 201 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; 202 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; 203 char *modact; 204 int err; 205 206 modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts); 207 if (IS_ERR(modact)) 208 return PTR_ERR(modact); 209 210 /* Firmware has 5bit length field and 0 means 32bits */ 211 if (mlen == 32) 212 mlen = 0; 213 214 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); 215 MLX5_SET(set_action_in, modact, field, mfield); 216 MLX5_SET(set_action_in, modact, offset, moffset); 217 MLX5_SET(set_action_in, modact, length, mlen); 218 MLX5_SET(set_action_in, modact, data, data); 219 err = mod_hdr_acts->num_actions; 220 mod_hdr_acts->num_actions++; 221 222 return err; 223 } 224 225 struct mlx5e_tc_int_port_priv * 226 mlx5e_get_int_port_priv(struct mlx5e_priv *priv) 227 { 228 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 229 struct mlx5_rep_uplink_priv *uplink_priv; 230 struct mlx5e_rep_priv *uplink_rpriv; 231 232 if (is_mdev_switchdev_mode(priv->mdev)) { 233 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 234 uplink_priv = &uplink_rpriv->uplink_priv; 235 236 return uplink_priv->int_port_priv; 237 } 238 239 return NULL; 240 } 241 242 static struct mlx5_tc_ct_priv * 243 get_ct_priv(struct mlx5e_priv *priv) 244 { 245 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 246 struct mlx5_rep_uplink_priv *uplink_priv; 247 struct mlx5e_rep_priv *uplink_rpriv; 248 249 if (is_mdev_switchdev_mode(priv->mdev)) { 250 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 251 uplink_priv = &uplink_rpriv->uplink_priv; 252 253 return uplink_priv->ct_priv; 254 } 255 256 return priv->fs.tc.ct; 257 } 258 259 static struct mlx5e_tc_psample * 260 get_sample_priv(struct mlx5e_priv *priv) 261 { 262 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 263 struct mlx5_rep_uplink_priv *uplink_priv; 264 struct mlx5e_rep_priv *uplink_rpriv; 265 266 if (is_mdev_switchdev_mode(priv->mdev)) { 267 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 268 uplink_priv = &uplink_rpriv->uplink_priv; 269 270 return uplink_priv->tc_psample; 271 } 272 273 return NULL; 274 } 275 276 struct mlx5_flow_handle * 277 mlx5_tc_rule_insert(struct mlx5e_priv *priv, 278 struct mlx5_flow_spec *spec, 279 struct mlx5_flow_attr *attr) 280 { 281 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 282 283 if (is_mdev_switchdev_mode(priv->mdev)) 284 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 285 286 return mlx5e_add_offloaded_nic_rule(priv, spec, attr); 287 } 288 289 void 290 mlx5_tc_rule_delete(struct mlx5e_priv *priv, 291 struct mlx5_flow_handle *rule, 292 struct mlx5_flow_attr *attr) 293 { 294 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 295 296 if (is_mdev_switchdev_mode(priv->mdev)) { 297 mlx5_eswitch_del_offloaded_rule(esw, rule, attr); 298 299 return; 300 } 301 302 mlx5e_del_offloaded_nic_rule(priv, rule, attr); 303 } 304 305 int 306 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, 307 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 308 enum mlx5_flow_namespace_type ns, 309 enum mlx5e_tc_attr_to_reg type, 310 u32 data) 311 { 312 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data); 313 314 return ret < 0 ? ret : 0; 315 } 316 317 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev, 318 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 319 enum mlx5e_tc_attr_to_reg type, 320 int act_id, u32 data) 321 { 322 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 323 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; 324 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; 325 char *modact; 326 327 modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id); 328 329 /* Firmware has 5bit length field and 0 means 32bits */ 330 if (mlen == 32) 331 mlen = 0; 332 333 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); 334 MLX5_SET(set_action_in, modact, field, mfield); 335 MLX5_SET(set_action_in, modact, offset, moffset); 336 MLX5_SET(set_action_in, modact, length, mlen); 337 MLX5_SET(set_action_in, modact, data, data); 338 } 339 340 struct mlx5e_hairpin { 341 struct mlx5_hairpin *pair; 342 343 struct mlx5_core_dev *func_mdev; 344 struct mlx5e_priv *func_priv; 345 u32 tdn; 346 struct mlx5e_tir direct_tir; 347 348 int num_channels; 349 struct mlx5e_rqt indir_rqt; 350 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; 351 struct mlx5_ttc_table *ttc; 352 }; 353 354 struct mlx5e_hairpin_entry { 355 /* a node of a hash table which keeps all the hairpin entries */ 356 struct hlist_node hairpin_hlist; 357 358 /* protects flows list */ 359 spinlock_t flows_lock; 360 /* flows sharing the same hairpin */ 361 struct list_head flows; 362 /* hpe's that were not fully initialized when dead peer update event 363 * function traversed them. 364 */ 365 struct list_head dead_peer_wait_list; 366 367 u16 peer_vhca_id; 368 u8 prio; 369 struct mlx5e_hairpin *hp; 370 refcount_t refcnt; 371 struct completion res_ready; 372 }; 373 374 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 375 struct mlx5e_tc_flow *flow); 376 377 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) 378 { 379 if (!flow || !refcount_inc_not_zero(&flow->refcnt)) 380 return ERR_PTR(-EINVAL); 381 return flow; 382 } 383 384 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) 385 { 386 if (refcount_dec_and_test(&flow->refcnt)) { 387 mlx5e_tc_del_flow(priv, flow); 388 kfree_rcu(flow, rcu_head); 389 } 390 } 391 392 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) 393 { 394 return flow_flag_test(flow, ESWITCH); 395 } 396 397 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow) 398 { 399 return flow_flag_test(flow, FT); 400 } 401 402 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) 403 { 404 return flow_flag_test(flow, OFFLOADED); 405 } 406 407 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow) 408 { 409 return mlx5e_is_eswitch_flow(flow) ? 410 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL; 411 } 412 413 static struct mod_hdr_tbl * 414 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) 415 { 416 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 417 418 return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ? 419 &esw->offloads.mod_hdr : 420 &priv->fs.tc.mod_hdr; 421 } 422 423 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, 424 struct mlx5e_tc_flow *flow, 425 struct mlx5e_tc_flow_parse_attr *parse_attr) 426 { 427 struct mlx5_modify_hdr *modify_hdr; 428 struct mlx5e_mod_hdr_handle *mh; 429 430 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow), 431 mlx5e_get_flow_namespace(flow), 432 &parse_attr->mod_hdr_acts); 433 if (IS_ERR(mh)) 434 return PTR_ERR(mh); 435 436 modify_hdr = mlx5e_mod_hdr_get(mh); 437 flow->attr->modify_hdr = modify_hdr; 438 flow->mh = mh; 439 440 return 0; 441 } 442 443 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, 444 struct mlx5e_tc_flow *flow) 445 { 446 /* flow wasn't fully initialized */ 447 if (!flow->mh) 448 return; 449 450 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow), 451 flow->mh); 452 flow->mh = NULL; 453 } 454 455 static 456 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) 457 { 458 struct mlx5_core_dev *mdev; 459 struct net_device *netdev; 460 struct mlx5e_priv *priv; 461 462 netdev = dev_get_by_index(net, ifindex); 463 if (!netdev) 464 return ERR_PTR(-ENODEV); 465 466 priv = netdev_priv(netdev); 467 mdev = priv->mdev; 468 dev_put(netdev); 469 470 /* Mirred tc action holds a refcount on the ifindex net_device (see 471 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev 472 * after dev_put(netdev), while we're in the context of adding a tc flow. 473 * 474 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then 475 * stored in a hairpin object, which exists until all flows, that refer to it, get 476 * removed. 477 * 478 * On the other hand, after a hairpin object has been created, the peer net_device may 479 * be removed/unbound while there are still some hairpin flows that are using it. This 480 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to 481 * NETDEV_UNREGISTER event of the peer net_device. 482 */ 483 return mdev; 484 } 485 486 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) 487 { 488 struct mlx5e_tir_builder *builder; 489 int err; 490 491 builder = mlx5e_tir_builder_alloc(false); 492 if (!builder) 493 return -ENOMEM; 494 495 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn); 496 if (err) 497 goto out; 498 499 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]); 500 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false); 501 if (err) 502 goto create_tir_err; 503 504 out: 505 mlx5e_tir_builder_free(builder); 506 return err; 507 508 create_tir_err: 509 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); 510 511 goto out; 512 } 513 514 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) 515 { 516 mlx5e_tir_destroy(&hp->direct_tir); 517 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); 518 } 519 520 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) 521 { 522 struct mlx5e_priv *priv = hp->func_priv; 523 struct mlx5_core_dev *mdev = priv->mdev; 524 struct mlx5e_rss_params_indir *indir; 525 int err; 526 527 indir = kvmalloc(sizeof(*indir), GFP_KERNEL); 528 if (!indir) 529 return -ENOMEM; 530 531 mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels); 532 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels, 533 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc, 534 indir); 535 536 kvfree(indir); 537 return err; 538 } 539 540 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp) 541 { 542 struct mlx5e_priv *priv = hp->func_priv; 543 struct mlx5e_rss_params_hash rss_hash; 544 enum mlx5_traffic_types tt, max_tt; 545 struct mlx5e_tir_builder *builder; 546 int err = 0; 547 548 builder = mlx5e_tir_builder_alloc(false); 549 if (!builder) 550 return -ENOMEM; 551 552 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res); 553 554 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { 555 struct mlx5e_rss_params_traffic_type rss_tt; 556 557 rss_tt = mlx5e_rss_get_default_tt_config(tt); 558 559 mlx5e_tir_builder_build_rqt(builder, hp->tdn, 560 mlx5e_rqt_get_rqtn(&hp->indir_rqt), 561 false); 562 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false); 563 564 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false); 565 if (err) { 566 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err); 567 goto err_destroy_tirs; 568 } 569 570 mlx5e_tir_builder_clear(builder); 571 } 572 573 out: 574 mlx5e_tir_builder_free(builder); 575 return err; 576 577 err_destroy_tirs: 578 max_tt = tt; 579 for (tt = 0; tt < max_tt; tt++) 580 mlx5e_tir_destroy(&hp->indir_tir[tt]); 581 582 goto out; 583 } 584 585 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp) 586 { 587 int tt; 588 589 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 590 mlx5e_tir_destroy(&hp->indir_tir[tt]); 591 } 592 593 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, 594 struct ttc_params *ttc_params) 595 { 596 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; 597 int tt; 598 599 memset(ttc_params, 0, sizeof(*ttc_params)); 600 601 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev, 602 MLX5_FLOW_NAMESPACE_KERNEL); 603 for (tt = 0; tt < MLX5_NUM_TT; tt++) { 604 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; 605 ttc_params->dests[tt].tir_num = 606 tt == MLX5_TT_ANY ? 607 mlx5e_tir_get_tirn(&hp->direct_tir) : 608 mlx5e_tir_get_tirn(&hp->indir_tir[tt]); 609 } 610 611 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; 612 ft_attr->prio = MLX5E_TC_PRIO; 613 } 614 615 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp) 616 { 617 struct mlx5e_priv *priv = hp->func_priv; 618 struct ttc_params ttc_params; 619 int err; 620 621 err = mlx5e_hairpin_create_indirect_rqt(hp); 622 if (err) 623 return err; 624 625 err = mlx5e_hairpin_create_indirect_tirs(hp); 626 if (err) 627 goto err_create_indirect_tirs; 628 629 mlx5e_hairpin_set_ttc_params(hp, &ttc_params); 630 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); 631 if (IS_ERR(hp->ttc)) { 632 err = PTR_ERR(hp->ttc); 633 goto err_create_ttc_table; 634 } 635 636 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n", 637 hp->num_channels, 638 mlx5_get_ttc_flow_table(priv->fs.ttc)->id); 639 640 return 0; 641 642 err_create_ttc_table: 643 mlx5e_hairpin_destroy_indirect_tirs(hp); 644 err_create_indirect_tirs: 645 mlx5e_rqt_destroy(&hp->indir_rqt); 646 647 return err; 648 } 649 650 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp) 651 { 652 mlx5_destroy_ttc_table(hp->ttc); 653 mlx5e_hairpin_destroy_indirect_tirs(hp); 654 mlx5e_rqt_destroy(&hp->indir_rqt); 655 } 656 657 static struct mlx5e_hairpin * 658 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params, 659 int peer_ifindex) 660 { 661 struct mlx5_core_dev *func_mdev, *peer_mdev; 662 struct mlx5e_hairpin *hp; 663 struct mlx5_hairpin *pair; 664 int err; 665 666 hp = kzalloc(sizeof(*hp), GFP_KERNEL); 667 if (!hp) 668 return ERR_PTR(-ENOMEM); 669 670 func_mdev = priv->mdev; 671 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); 672 if (IS_ERR(peer_mdev)) { 673 err = PTR_ERR(peer_mdev); 674 goto create_pair_err; 675 } 676 677 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params); 678 if (IS_ERR(pair)) { 679 err = PTR_ERR(pair); 680 goto create_pair_err; 681 } 682 hp->pair = pair; 683 hp->func_mdev = func_mdev; 684 hp->func_priv = priv; 685 hp->num_channels = params->num_channels; 686 687 err = mlx5e_hairpin_create_transport(hp); 688 if (err) 689 goto create_transport_err; 690 691 if (hp->num_channels > 1) { 692 err = mlx5e_hairpin_rss_init(hp); 693 if (err) 694 goto rss_init_err; 695 } 696 697 return hp; 698 699 rss_init_err: 700 mlx5e_hairpin_destroy_transport(hp); 701 create_transport_err: 702 mlx5_core_hairpin_destroy(hp->pair); 703 create_pair_err: 704 kfree(hp); 705 return ERR_PTR(err); 706 } 707 708 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp) 709 { 710 if (hp->num_channels > 1) 711 mlx5e_hairpin_rss_cleanup(hp); 712 mlx5e_hairpin_destroy_transport(hp); 713 mlx5_core_hairpin_destroy(hp->pair); 714 kvfree(hp); 715 } 716 717 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio) 718 { 719 return (peer_vhca_id << 16 | prio); 720 } 721 722 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, 723 u16 peer_vhca_id, u8 prio) 724 { 725 struct mlx5e_hairpin_entry *hpe; 726 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio); 727 728 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe, 729 hairpin_hlist, hash_key) { 730 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) { 731 refcount_inc(&hpe->refcnt); 732 return hpe; 733 } 734 } 735 736 return NULL; 737 } 738 739 static void mlx5e_hairpin_put(struct mlx5e_priv *priv, 740 struct mlx5e_hairpin_entry *hpe) 741 { 742 /* no more hairpin flows for us, release the hairpin pair */ 743 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock)) 744 return; 745 hash_del(&hpe->hairpin_hlist); 746 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); 747 748 if (!IS_ERR_OR_NULL(hpe->hp)) { 749 netdev_dbg(priv->netdev, "del hairpin: peer %s\n", 750 dev_name(hpe->hp->pair->peer_mdev->device)); 751 752 mlx5e_hairpin_destroy(hpe->hp); 753 } 754 755 WARN_ON(!list_empty(&hpe->flows)); 756 kfree(hpe); 757 } 758 759 #define UNKNOWN_MATCH_PRIO 8 760 761 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, 762 struct mlx5_flow_spec *spec, u8 *match_prio, 763 struct netlink_ext_ack *extack) 764 { 765 void *headers_c, *headers_v; 766 u8 prio_val, prio_mask = 0; 767 bool vlan_present; 768 769 #ifdef CONFIG_MLX5_CORE_EN_DCB 770 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) { 771 NL_SET_ERR_MSG_MOD(extack, 772 "only PCP trust state supported for hairpin"); 773 return -EOPNOTSUPP; 774 } 775 #endif 776 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); 777 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 778 779 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag); 780 if (vlan_present) { 781 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); 782 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); 783 } 784 785 if (!vlan_present || !prio_mask) { 786 prio_val = UNKNOWN_MATCH_PRIO; 787 } else if (prio_mask != 0x7) { 788 NL_SET_ERR_MSG_MOD(extack, 789 "masked priority match not supported for hairpin"); 790 return -EOPNOTSUPP; 791 } 792 793 *match_prio = prio_val; 794 return 0; 795 } 796 797 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, 798 struct mlx5e_tc_flow *flow, 799 struct mlx5e_tc_flow_parse_attr *parse_attr, 800 struct netlink_ext_ack *extack) 801 { 802 int peer_ifindex = parse_attr->mirred_ifindex[0]; 803 struct mlx5_hairpin_params params; 804 struct mlx5_core_dev *peer_mdev; 805 struct mlx5e_hairpin_entry *hpe; 806 struct mlx5e_hairpin *hp; 807 u64 link_speed64; 808 u32 link_speed; 809 u8 match_prio; 810 u16 peer_id; 811 int err; 812 813 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); 814 if (IS_ERR(peer_mdev)) { 815 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device"); 816 return PTR_ERR(peer_mdev); 817 } 818 819 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { 820 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported"); 821 return -EOPNOTSUPP; 822 } 823 824 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id); 825 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio, 826 extack); 827 if (err) 828 return err; 829 830 mutex_lock(&priv->fs.tc.hairpin_tbl_lock); 831 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); 832 if (hpe) { 833 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); 834 wait_for_completion(&hpe->res_ready); 835 836 if (IS_ERR(hpe->hp)) { 837 err = -EREMOTEIO; 838 goto out_err; 839 } 840 goto attach_flow; 841 } 842 843 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL); 844 if (!hpe) { 845 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); 846 return -ENOMEM; 847 } 848 849 spin_lock_init(&hpe->flows_lock); 850 INIT_LIST_HEAD(&hpe->flows); 851 INIT_LIST_HEAD(&hpe->dead_peer_wait_list); 852 hpe->peer_vhca_id = peer_id; 853 hpe->prio = match_prio; 854 refcount_set(&hpe->refcnt, 1); 855 init_completion(&hpe->res_ready); 856 857 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, 858 hash_hairpin_info(peer_id, match_prio)); 859 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); 860 861 params.log_data_size = 16; 862 params.log_data_size = min_t(u8, params.log_data_size, 863 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); 864 params.log_data_size = max_t(u8, params.log_data_size, 865 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz)); 866 867 params.log_num_packets = params.log_data_size - 868 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev); 869 params.log_num_packets = min_t(u8, params.log_num_packets, 870 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets)); 871 872 params.q_counter = priv->q_counter; 873 /* set hairpin pair per each 50Gbs share of the link */ 874 mlx5e_port_max_linkspeed(priv->mdev, &link_speed); 875 link_speed = max_t(u32, link_speed, 50000); 876 link_speed64 = link_speed; 877 do_div(link_speed64, 50000); 878 params.num_channels = link_speed64; 879 880 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex); 881 hpe->hp = hp; 882 complete_all(&hpe->res_ready); 883 if (IS_ERR(hp)) { 884 err = PTR_ERR(hp); 885 goto out_err; 886 } 887 888 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", 889 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0], 890 dev_name(hp->pair->peer_mdev->device), 891 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets); 892 893 attach_flow: 894 if (hpe->hp->num_channels > 1) { 895 flow_flag_set(flow, HAIRPIN_RSS); 896 flow->attr->nic_attr->hairpin_ft = 897 mlx5_get_ttc_flow_table(hpe->hp->ttc); 898 } else { 899 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir); 900 } 901 902 flow->hpe = hpe; 903 spin_lock(&hpe->flows_lock); 904 list_add(&flow->hairpin, &hpe->flows); 905 spin_unlock(&hpe->flows_lock); 906 907 return 0; 908 909 out_err: 910 mlx5e_hairpin_put(priv, hpe); 911 return err; 912 } 913 914 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, 915 struct mlx5e_tc_flow *flow) 916 { 917 /* flow wasn't fully initialized */ 918 if (!flow->hpe) 919 return; 920 921 spin_lock(&flow->hpe->flows_lock); 922 list_del(&flow->hairpin); 923 spin_unlock(&flow->hpe->flows_lock); 924 925 mlx5e_hairpin_put(priv, flow->hpe); 926 flow->hpe = NULL; 927 } 928 929 struct mlx5_flow_handle * 930 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, 931 struct mlx5_flow_spec *spec, 932 struct mlx5_flow_attr *attr) 933 { 934 struct mlx5_flow_context *flow_context = &spec->flow_context; 935 struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv); 936 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr; 937 struct mlx5e_tc_table *tc = &priv->fs.tc; 938 struct mlx5_flow_destination dest[2] = {}; 939 struct mlx5_flow_act flow_act = { 940 .action = attr->action, 941 .flags = FLOW_ACT_NO_APPEND, 942 }; 943 struct mlx5_flow_handle *rule; 944 struct mlx5_flow_table *ft; 945 int dest_ix = 0; 946 947 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 948 flow_context->flow_tag = nic_attr->flow_tag; 949 950 if (attr->dest_ft) { 951 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 952 dest[dest_ix].ft = attr->dest_ft; 953 dest_ix++; 954 } else if (nic_attr->hairpin_ft) { 955 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 956 dest[dest_ix].ft = nic_attr->hairpin_ft; 957 dest_ix++; 958 } else if (nic_attr->hairpin_tirn) { 959 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR; 960 dest[dest_ix].tir_num = nic_attr->hairpin_tirn; 961 dest_ix++; 962 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 963 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 964 if (attr->dest_chain) { 965 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains, 966 attr->dest_chain, 1, 967 MLX5E_TC_FT_LEVEL); 968 if (IS_ERR(dest[dest_ix].ft)) 969 return ERR_CAST(dest[dest_ix].ft); 970 } else { 971 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan); 972 } 973 dest_ix++; 974 } 975 976 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && 977 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) 978 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 979 980 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 981 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 982 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter); 983 dest_ix++; 984 } 985 986 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 987 flow_act.modify_hdr = attr->modify_hdr; 988 989 mutex_lock(&tc->t_lock); 990 if (IS_ERR_OR_NULL(tc->t)) { 991 /* Create the root table here if doesn't exist yet */ 992 tc->t = 993 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL); 994 995 if (IS_ERR(tc->t)) { 996 mutex_unlock(&tc->t_lock); 997 netdev_err(priv->netdev, 998 "Failed to create tc offload table\n"); 999 rule = ERR_CAST(priv->fs.tc.t); 1000 goto err_ft_get; 1001 } 1002 } 1003 mutex_unlock(&tc->t_lock); 1004 1005 if (attr->chain || attr->prio) 1006 ft = mlx5_chains_get_table(nic_chains, 1007 attr->chain, attr->prio, 1008 MLX5E_TC_FT_LEVEL); 1009 else 1010 ft = attr->ft; 1011 1012 if (IS_ERR(ft)) { 1013 rule = ERR_CAST(ft); 1014 goto err_ft_get; 1015 } 1016 1017 if (attr->outer_match_level != MLX5_MATCH_NONE) 1018 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 1019 1020 rule = mlx5_add_flow_rules(ft, spec, 1021 &flow_act, dest, dest_ix); 1022 if (IS_ERR(rule)) 1023 goto err_rule; 1024 1025 return rule; 1026 1027 err_rule: 1028 if (attr->chain || attr->prio) 1029 mlx5_chains_put_table(nic_chains, 1030 attr->chain, attr->prio, 1031 MLX5E_TC_FT_LEVEL); 1032 err_ft_get: 1033 if (attr->dest_chain) 1034 mlx5_chains_put_table(nic_chains, 1035 attr->dest_chain, 1, 1036 MLX5E_TC_FT_LEVEL); 1037 1038 return ERR_CAST(rule); 1039 } 1040 1041 static int 1042 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, 1043 struct mlx5e_tc_flow *flow, 1044 struct netlink_ext_ack *extack) 1045 { 1046 struct mlx5e_tc_flow_parse_attr *parse_attr; 1047 struct mlx5_flow_attr *attr = flow->attr; 1048 struct mlx5_core_dev *dev = priv->mdev; 1049 struct mlx5_fc *counter; 1050 int err; 1051 1052 parse_attr = attr->parse_attr; 1053 1054 if (flow_flag_test(flow, HAIRPIN)) { 1055 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); 1056 if (err) 1057 return err; 1058 } 1059 1060 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1061 counter = mlx5_fc_create(dev, true); 1062 if (IS_ERR(counter)) 1063 return PTR_ERR(counter); 1064 1065 attr->counter = counter; 1066 } 1067 1068 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1069 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); 1070 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 1071 if (err) 1072 return err; 1073 } 1074 1075 if (flow_flag_test(flow, CT)) 1076 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec, 1077 attr, &parse_attr->mod_hdr_acts); 1078 else 1079 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec, 1080 attr); 1081 1082 return PTR_ERR_OR_ZERO(flow->rule[0]); 1083 } 1084 1085 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv, 1086 struct mlx5_flow_handle *rule, 1087 struct mlx5_flow_attr *attr) 1088 { 1089 struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv); 1090 1091 mlx5_del_flow_rules(rule); 1092 1093 if (attr->chain || attr->prio) 1094 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio, 1095 MLX5E_TC_FT_LEVEL); 1096 1097 if (attr->dest_chain) 1098 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1, 1099 MLX5E_TC_FT_LEVEL); 1100 } 1101 1102 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, 1103 struct mlx5e_tc_flow *flow) 1104 { 1105 struct mlx5_flow_attr *attr = flow->attr; 1106 struct mlx5e_tc_table *tc = &priv->fs.tc; 1107 1108 flow_flag_clear(flow, OFFLOADED); 1109 1110 if (flow_flag_test(flow, CT)) 1111 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr); 1112 else if (!IS_ERR_OR_NULL(flow->rule[0])) 1113 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr); 1114 1115 /* Remove root table if no rules are left to avoid 1116 * extra steering hops. 1117 */ 1118 mutex_lock(&priv->fs.tc.t_lock); 1119 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && 1120 !IS_ERR_OR_NULL(tc->t)) { 1121 mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL); 1122 priv->fs.tc.t = NULL; 1123 } 1124 mutex_unlock(&priv->fs.tc.t_lock); 1125 1126 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 1127 mlx5e_detach_mod_hdr(priv, flow); 1128 1129 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 1130 mlx5_fc_destroy(priv->mdev, attr->counter); 1131 1132 if (flow_flag_test(flow, HAIRPIN)) 1133 mlx5e_hairpin_flow_del(priv, flow); 1134 1135 kvfree(attr->parse_attr); 1136 kfree(flow->attr); 1137 } 1138 1139 struct mlx5_flow_handle * 1140 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, 1141 struct mlx5e_tc_flow *flow, 1142 struct mlx5_flow_spec *spec, 1143 struct mlx5_flow_attr *attr) 1144 { 1145 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts; 1146 struct mlx5_flow_handle *rule; 1147 1148 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) 1149 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 1150 1151 if (flow_flag_test(flow, CT)) { 1152 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; 1153 1154 rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv), 1155 flow, spec, attr, 1156 mod_hdr_acts); 1157 } else if (flow_flag_test(flow, SAMPLE)) { 1158 rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr, 1159 mlx5e_tc_get_flow_tun_id(flow)); 1160 } else { 1161 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 1162 } 1163 1164 if (IS_ERR(rule)) 1165 return rule; 1166 1167 if (attr->esw_attr->split_count) { 1168 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr); 1169 if (IS_ERR(flow->rule[1])) { 1170 if (flow_flag_test(flow, CT)) 1171 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr); 1172 else 1173 mlx5_eswitch_del_offloaded_rule(esw, rule, attr); 1174 return flow->rule[1]; 1175 } 1176 } 1177 1178 return rule; 1179 } 1180 1181 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, 1182 struct mlx5e_tc_flow *flow, 1183 struct mlx5_flow_attr *attr) 1184 { 1185 flow_flag_clear(flow, OFFLOADED); 1186 1187 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) 1188 goto offload_rule_0; 1189 1190 if (attr->esw_attr->split_count) 1191 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); 1192 1193 if (flow_flag_test(flow, CT)) 1194 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr); 1195 else if (flow_flag_test(flow, SAMPLE)) 1196 mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); 1197 else 1198 offload_rule_0: 1199 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); 1200 } 1201 1202 struct mlx5_flow_handle * 1203 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, 1204 struct mlx5e_tc_flow *flow, 1205 struct mlx5_flow_spec *spec) 1206 { 1207 struct mlx5_flow_attr *slow_attr; 1208 struct mlx5_flow_handle *rule; 1209 1210 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); 1211 if (!slow_attr) 1212 return ERR_PTR(-ENOMEM); 1213 1214 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ); 1215 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1216 slow_attr->esw_attr->split_count = 0; 1217 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; 1218 1219 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); 1220 if (!IS_ERR(rule)) 1221 flow_flag_set(flow, SLOW); 1222 1223 kfree(slow_attr); 1224 1225 return rule; 1226 } 1227 1228 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, 1229 struct mlx5e_tc_flow *flow) 1230 { 1231 struct mlx5_flow_attr *slow_attr; 1232 1233 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); 1234 if (!slow_attr) { 1235 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n"); 1236 return; 1237 } 1238 1239 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ); 1240 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1241 slow_attr->esw_attr->split_count = 0; 1242 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; 1243 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); 1244 flow_flag_clear(flow, SLOW); 1245 kfree(slow_attr); 1246 } 1247 1248 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this 1249 * function. 1250 */ 1251 static void unready_flow_add(struct mlx5e_tc_flow *flow, 1252 struct list_head *unready_flows) 1253 { 1254 flow_flag_set(flow, NOT_READY); 1255 list_add_tail(&flow->unready, unready_flows); 1256 } 1257 1258 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this 1259 * function. 1260 */ 1261 static void unready_flow_del(struct mlx5e_tc_flow *flow) 1262 { 1263 list_del(&flow->unready); 1264 flow_flag_clear(flow, NOT_READY); 1265 } 1266 1267 static void add_unready_flow(struct mlx5e_tc_flow *flow) 1268 { 1269 struct mlx5_rep_uplink_priv *uplink_priv; 1270 struct mlx5e_rep_priv *rpriv; 1271 struct mlx5_eswitch *esw; 1272 1273 esw = flow->priv->mdev->priv.eswitch; 1274 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1275 uplink_priv = &rpriv->uplink_priv; 1276 1277 mutex_lock(&uplink_priv->unready_flows_lock); 1278 unready_flow_add(flow, &uplink_priv->unready_flows); 1279 mutex_unlock(&uplink_priv->unready_flows_lock); 1280 } 1281 1282 static void remove_unready_flow(struct mlx5e_tc_flow *flow) 1283 { 1284 struct mlx5_rep_uplink_priv *uplink_priv; 1285 struct mlx5e_rep_priv *rpriv; 1286 struct mlx5_eswitch *esw; 1287 1288 esw = flow->priv->mdev->priv.eswitch; 1289 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1290 uplink_priv = &rpriv->uplink_priv; 1291 1292 mutex_lock(&uplink_priv->unready_flows_lock); 1293 unready_flow_del(flow); 1294 mutex_unlock(&uplink_priv->unready_flows_lock); 1295 } 1296 1297 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev) 1298 { 1299 struct mlx5_core_dev *out_mdev, *route_mdev; 1300 struct mlx5e_priv *out_priv, *route_priv; 1301 1302 out_priv = netdev_priv(out_dev); 1303 out_mdev = out_priv->mdev; 1304 route_priv = netdev_priv(route_dev); 1305 route_mdev = route_priv->mdev; 1306 1307 if (out_mdev->coredev_type != MLX5_COREDEV_PF || 1308 route_mdev->coredev_type != MLX5_COREDEV_VF) 1309 return false; 1310 1311 return mlx5e_same_hw_devs(out_priv, route_priv); 1312 } 1313 1314 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) 1315 { 1316 struct mlx5e_priv *out_priv, *route_priv; 1317 struct mlx5_devcom *devcom = NULL; 1318 struct mlx5_core_dev *route_mdev; 1319 struct mlx5_eswitch *esw; 1320 u16 vhca_id; 1321 int err; 1322 1323 out_priv = netdev_priv(out_dev); 1324 esw = out_priv->mdev->priv.eswitch; 1325 route_priv = netdev_priv(route_dev); 1326 route_mdev = route_priv->mdev; 1327 1328 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); 1329 if (mlx5_lag_is_active(out_priv->mdev)) { 1330 /* In lag case we may get devices from different eswitch instances. 1331 * If we failed to get vport num, it means, mostly, that we on the wrong 1332 * eswitch. 1333 */ 1334 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); 1335 if (err != -ENOENT) 1336 return err; 1337 1338 devcom = out_priv->mdev->priv.devcom; 1339 esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 1340 if (!esw) 1341 return -ENODEV; 1342 } 1343 1344 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); 1345 if (devcom) 1346 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 1347 return err; 1348 } 1349 1350 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, 1351 struct mlx5e_tc_flow_parse_attr *parse_attr, 1352 struct mlx5e_tc_flow *flow) 1353 { 1354 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts; 1355 struct mlx5_modify_hdr *mod_hdr; 1356 1357 mod_hdr = mlx5_modify_header_alloc(priv->mdev, 1358 mlx5e_get_flow_namespace(flow), 1359 mod_hdr_acts->num_actions, 1360 mod_hdr_acts->actions); 1361 if (IS_ERR(mod_hdr)) 1362 return PTR_ERR(mod_hdr); 1363 1364 WARN_ON(flow->attr->modify_hdr); 1365 flow->attr->modify_hdr = mod_hdr; 1366 1367 return 0; 1368 } 1369 1370 static int 1371 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 1372 struct mlx5e_tc_flow *flow, 1373 struct netlink_ext_ack *extack) 1374 { 1375 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1376 struct mlx5e_tc_flow_parse_attr *parse_attr; 1377 struct mlx5_flow_attr *attr = flow->attr; 1378 bool vf_tun = false, encap_valid = true; 1379 struct net_device *encap_dev = NULL; 1380 struct mlx5_esw_flow_attr *esw_attr; 1381 struct mlx5e_rep_priv *rpriv; 1382 struct mlx5e_priv *out_priv; 1383 struct mlx5_fc *counter; 1384 u32 max_prio, max_chain; 1385 int err = 0; 1386 int out_index; 1387 1388 parse_attr = attr->parse_attr; 1389 esw_attr = attr->esw_attr; 1390 1391 /* We check chain range only for tc flows. 1392 * For ft flows, we checked attr->chain was originally 0 and set it to 1393 * FDB_FT_CHAIN which is outside tc range. 1394 * See mlx5e_rep_setup_ft_cb(). 1395 */ 1396 max_chain = mlx5_chains_get_chain_range(esw_chains(esw)); 1397 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { 1398 NL_SET_ERR_MSG_MOD(extack, 1399 "Requested chain is out of supported range"); 1400 err = -EOPNOTSUPP; 1401 goto err_out; 1402 } 1403 1404 max_prio = mlx5_chains_get_prio_range(esw_chains(esw)); 1405 if (attr->prio > max_prio) { 1406 NL_SET_ERR_MSG_MOD(extack, 1407 "Requested priority is out of supported range"); 1408 err = -EOPNOTSUPP; 1409 goto err_out; 1410 } 1411 1412 if (flow_flag_test(flow, TUN_RX)) { 1413 err = mlx5e_attach_decap_route(priv, flow); 1414 if (err) 1415 goto err_out; 1416 1417 if (!attr->chain && esw_attr->int_port) { 1418 /* If decap route device is internal port, change the 1419 * source vport value in reg_c0 back to uplink just in 1420 * case the rule performs goto chain > 0. If we have a miss 1421 * on chain > 0 we want the metadata regs to hold the 1422 * chain id so SW will resume handling of this packet 1423 * from the proper chain. 1424 */ 1425 u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw, 1426 esw_attr->in_rep->vport); 1427 1428 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts, 1429 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, 1430 metadata); 1431 if (err) 1432 goto err_out; 1433 1434 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1435 } 1436 } 1437 1438 if (flow_flag_test(flow, L3_TO_L2_DECAP)) { 1439 err = mlx5e_attach_decap(priv, flow, extack); 1440 if (err) 1441 goto err_out; 1442 } 1443 1444 if (netif_is_ovs_master(parse_attr->filter_dev)) { 1445 struct mlx5e_tc_int_port *int_port; 1446 1447 if (attr->chain) { 1448 NL_SET_ERR_MSG_MOD(extack, 1449 "Internal port rule is only supported on chain 0"); 1450 err = -EOPNOTSUPP; 1451 goto err_out; 1452 } 1453 1454 if (attr->dest_chain) { 1455 NL_SET_ERR_MSG_MOD(extack, 1456 "Internal port rule offload doesn't support goto action"); 1457 err = -EOPNOTSUPP; 1458 goto err_out; 1459 } 1460 1461 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), 1462 parse_attr->filter_dev->ifindex, 1463 flow_flag_test(flow, EGRESS) ? 1464 MLX5E_TC_INT_PORT_EGRESS : 1465 MLX5E_TC_INT_PORT_INGRESS); 1466 if (IS_ERR(int_port)) { 1467 err = PTR_ERR(int_port); 1468 goto err_out; 1469 } 1470 1471 esw_attr->int_port = int_port; 1472 } 1473 1474 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { 1475 struct net_device *out_dev; 1476 int mirred_ifindex; 1477 1478 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) 1479 continue; 1480 1481 mirred_ifindex = parse_attr->mirred_ifindex[out_index]; 1482 out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex); 1483 if (!out_dev) { 1484 NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found"); 1485 err = -ENODEV; 1486 goto err_out; 1487 } 1488 err = mlx5e_attach_encap(priv, flow, out_dev, out_index, 1489 extack, &encap_dev, &encap_valid); 1490 dev_put(out_dev); 1491 if (err) 1492 goto err_out; 1493 1494 if (esw_attr->dests[out_index].flags & 1495 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE && 1496 !esw_attr->dest_int_port) 1497 vf_tun = true; 1498 out_priv = netdev_priv(encap_dev); 1499 rpriv = out_priv->ppriv; 1500 esw_attr->dests[out_index].rep = rpriv->rep; 1501 esw_attr->dests[out_index].mdev = out_priv->mdev; 1502 } 1503 1504 if (vf_tun && esw_attr->out_count > 1) { 1505 NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported"); 1506 err = -EOPNOTSUPP; 1507 goto err_out; 1508 } 1509 1510 err = mlx5_eswitch_add_vlan_action(esw, attr); 1511 if (err) 1512 goto err_out; 1513 1514 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 1515 !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) { 1516 if (vf_tun) { 1517 err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow); 1518 if (err) 1519 goto err_out; 1520 } else { 1521 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); 1522 if (err) 1523 goto err_out; 1524 } 1525 } 1526 1527 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1528 counter = mlx5_fc_create(esw_attr->counter_dev, true); 1529 if (IS_ERR(counter)) { 1530 err = PTR_ERR(counter); 1531 goto err_out; 1532 } 1533 1534 attr->counter = counter; 1535 } 1536 1537 /* we get here if one of the following takes place: 1538 * (1) there's no error 1539 * (2) there's an encap action and we don't have valid neigh 1540 */ 1541 if (!encap_valid) 1542 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec); 1543 else 1544 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); 1545 1546 if (IS_ERR(flow->rule[0])) { 1547 err = PTR_ERR(flow->rule[0]); 1548 goto err_out; 1549 } 1550 flow_flag_set(flow, OFFLOADED); 1551 1552 return 0; 1553 1554 err_out: 1555 flow_flag_set(flow, FAILED); 1556 return err; 1557 } 1558 1559 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow) 1560 { 1561 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec; 1562 void *headers_v = MLX5_ADDR_OF(fte_match_param, 1563 spec->match_value, 1564 misc_parameters_3); 1565 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3, 1566 headers_v, 1567 geneve_tlv_option_0_data); 1568 1569 return !!geneve_tlv_opt_0_data; 1570 } 1571 1572 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, 1573 struct mlx5e_tc_flow *flow) 1574 { 1575 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1576 struct mlx5_flow_attr *attr = flow->attr; 1577 struct mlx5_esw_flow_attr *esw_attr; 1578 bool vf_tun = false; 1579 int out_index; 1580 1581 esw_attr = attr->esw_attr; 1582 mlx5e_put_flow_tunnel_id(flow); 1583 1584 if (flow_flag_test(flow, NOT_READY)) 1585 remove_unready_flow(flow); 1586 1587 if (mlx5e_is_offloaded_flow(flow)) { 1588 if (flow_flag_test(flow, SLOW)) 1589 mlx5e_tc_unoffload_from_slow_path(esw, flow); 1590 else 1591 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); 1592 } 1593 complete_all(&flow->del_hw_done); 1594 1595 if (mlx5_flow_has_geneve_opt(flow)) 1596 mlx5_geneve_tlv_option_del(priv->mdev->geneve); 1597 1598 mlx5_eswitch_del_vlan_action(esw, attr); 1599 1600 if (flow->decap_route) 1601 mlx5e_detach_decap_route(priv, flow); 1602 1603 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { 1604 if (esw_attr->dests[out_index].flags & 1605 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE && 1606 !esw_attr->dest_int_port) 1607 vf_tun = true; 1608 if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) { 1609 mlx5e_detach_encap(priv, flow, out_index); 1610 kfree(attr->parse_attr->tun_info[out_index]); 1611 } 1612 } 1613 1614 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr); 1615 1616 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1617 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); 1618 if (vf_tun && attr->modify_hdr) 1619 mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr); 1620 else 1621 mlx5e_detach_mod_hdr(priv, flow); 1622 } 1623 1624 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 1625 mlx5_fc_destroy(esw_attr->counter_dev, attr->counter); 1626 1627 if (esw_attr->int_port) 1628 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port); 1629 1630 if (esw_attr->dest_int_port) 1631 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port); 1632 1633 if (flow_flag_test(flow, L3_TO_L2_DECAP)) 1634 mlx5e_detach_decap(priv, flow); 1635 1636 kfree(attr->sample_attr); 1637 kvfree(attr->esw_attr->rx_tun_attr); 1638 kvfree(attr->parse_attr); 1639 kfree(flow->attr); 1640 } 1641 1642 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) 1643 { 1644 return flow->attr->counter; 1645 } 1646 1647 /* Iterate over tmp_list of flows attached to flow_list head. */ 1648 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list) 1649 { 1650 struct mlx5e_tc_flow *flow, *tmp; 1651 1652 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list) 1653 mlx5e_flow_put(priv, flow); 1654 } 1655 1656 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) 1657 { 1658 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; 1659 1660 if (!flow_flag_test(flow, ESWITCH) || 1661 !flow_flag_test(flow, DUP)) 1662 return; 1663 1664 mutex_lock(&esw->offloads.peer_mutex); 1665 list_del(&flow->peer); 1666 mutex_unlock(&esw->offloads.peer_mutex); 1667 1668 flow_flag_clear(flow, DUP); 1669 1670 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) { 1671 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); 1672 kfree(flow->peer_flow); 1673 } 1674 1675 flow->peer_flow = NULL; 1676 } 1677 1678 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) 1679 { 1680 struct mlx5_core_dev *dev = flow->priv->mdev; 1681 struct mlx5_devcom *devcom = dev->priv.devcom; 1682 struct mlx5_eswitch *peer_esw; 1683 1684 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 1685 if (!peer_esw) 1686 return; 1687 1688 __mlx5e_tc_del_fdb_peer_flow(flow); 1689 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 1690 } 1691 1692 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 1693 struct mlx5e_tc_flow *flow) 1694 { 1695 if (mlx5e_is_eswitch_flow(flow)) { 1696 mlx5e_tc_del_fdb_peer_flow(flow); 1697 mlx5e_tc_del_fdb_flow(priv, flow); 1698 } else { 1699 mlx5e_tc_del_nic_flow(priv, flow); 1700 } 1701 } 1702 1703 static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f) 1704 { 1705 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1706 struct flow_action *flow_action = &rule->action; 1707 const struct flow_action_entry *act; 1708 int i; 1709 1710 if (chain) 1711 return false; 1712 1713 flow_action_for_each(i, act, flow_action) { 1714 switch (act->id) { 1715 case FLOW_ACTION_GOTO: 1716 return true; 1717 case FLOW_ACTION_SAMPLE: 1718 return true; 1719 default: 1720 continue; 1721 } 1722 } 1723 1724 return false; 1725 } 1726 1727 static int 1728 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, 1729 struct flow_dissector_key_enc_opts *opts, 1730 struct netlink_ext_ack *extack, 1731 bool *dont_care) 1732 { 1733 struct geneve_opt *opt; 1734 int off = 0; 1735 1736 *dont_care = true; 1737 1738 while (opts->len > off) { 1739 opt = (struct geneve_opt *)&opts->data[off]; 1740 1741 if (!(*dont_care) || opt->opt_class || opt->type || 1742 memchr_inv(opt->opt_data, 0, opt->length * 4)) { 1743 *dont_care = false; 1744 1745 if (opt->opt_class != htons(U16_MAX) || 1746 opt->type != U8_MAX) { 1747 NL_SET_ERR_MSG_MOD(extack, 1748 "Partial match of tunnel options in chain > 0 isn't supported"); 1749 netdev_warn(priv->netdev, 1750 "Partial match of tunnel options in chain > 0 isn't supported"); 1751 return -EOPNOTSUPP; 1752 } 1753 } 1754 1755 off += sizeof(struct geneve_opt) + opt->length * 4; 1756 } 1757 1758 return 0; 1759 } 1760 1761 #define COPY_DISSECTOR(rule, diss_key, dst)\ 1762 ({ \ 1763 struct flow_rule *__rule = (rule);\ 1764 typeof(dst) __dst = dst;\ 1765 \ 1766 memcpy(__dst,\ 1767 skb_flow_dissector_target(__rule->match.dissector,\ 1768 diss_key,\ 1769 __rule->match.key),\ 1770 sizeof(*__dst));\ 1771 }) 1772 1773 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv, 1774 struct mlx5e_tc_flow *flow, 1775 struct flow_cls_offload *f, 1776 struct net_device *filter_dev) 1777 { 1778 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1779 struct netlink_ext_ack *extack = f->common.extack; 1780 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts; 1781 struct flow_match_enc_opts enc_opts_match; 1782 struct tunnel_match_enc_opts tun_enc_opts; 1783 struct mlx5_rep_uplink_priv *uplink_priv; 1784 struct mlx5_flow_attr *attr = flow->attr; 1785 struct mlx5e_rep_priv *uplink_rpriv; 1786 struct tunnel_match_key tunnel_key; 1787 bool enc_opts_is_dont_care = true; 1788 u32 tun_id, enc_opts_id = 0; 1789 struct mlx5_eswitch *esw; 1790 u32 value, mask; 1791 int err; 1792 1793 esw = priv->mdev->priv.eswitch; 1794 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1795 uplink_priv = &uplink_rpriv->uplink_priv; 1796 1797 memset(&tunnel_key, 0, sizeof(tunnel_key)); 1798 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, 1799 &tunnel_key.enc_control); 1800 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 1801 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 1802 &tunnel_key.enc_ipv4); 1803 else 1804 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, 1805 &tunnel_key.enc_ipv6); 1806 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip); 1807 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, 1808 &tunnel_key.enc_tp); 1809 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, 1810 &tunnel_key.enc_key_id); 1811 tunnel_key.filter_ifindex = filter_dev->ifindex; 1812 1813 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id); 1814 if (err) 1815 return err; 1816 1817 flow_rule_match_enc_opts(rule, &enc_opts_match); 1818 err = enc_opts_is_dont_care_or_full_match(priv, 1819 enc_opts_match.mask, 1820 extack, 1821 &enc_opts_is_dont_care); 1822 if (err) 1823 goto err_enc_opts; 1824 1825 if (!enc_opts_is_dont_care) { 1826 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts)); 1827 memcpy(&tun_enc_opts.key, enc_opts_match.key, 1828 sizeof(*enc_opts_match.key)); 1829 memcpy(&tun_enc_opts.mask, enc_opts_match.mask, 1830 sizeof(*enc_opts_match.mask)); 1831 1832 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping, 1833 &tun_enc_opts, &enc_opts_id); 1834 if (err) 1835 goto err_enc_opts; 1836 } 1837 1838 value = tun_id << ENC_OPTS_BITS | enc_opts_id; 1839 mask = enc_opts_id ? TUNNEL_ID_MASK : 1840 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK); 1841 1842 if (attr->chain) { 1843 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec, 1844 TUNNEL_TO_REG, value, mask); 1845 } else { 1846 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; 1847 err = mlx5e_tc_match_to_reg_set(priv->mdev, 1848 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB, 1849 TUNNEL_TO_REG, value); 1850 if (err) 1851 goto err_set; 1852 1853 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1854 } 1855 1856 flow->tunnel_id = value; 1857 return 0; 1858 1859 err_set: 1860 if (enc_opts_id) 1861 mapping_remove(uplink_priv->tunnel_enc_opts_mapping, 1862 enc_opts_id); 1863 err_enc_opts: 1864 mapping_remove(uplink_priv->tunnel_mapping, tun_id); 1865 return err; 1866 } 1867 1868 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow) 1869 { 1870 u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK; 1871 u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS; 1872 struct mlx5_rep_uplink_priv *uplink_priv; 1873 struct mlx5e_rep_priv *uplink_rpriv; 1874 struct mlx5_eswitch *esw; 1875 1876 esw = flow->priv->mdev->priv.eswitch; 1877 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1878 uplink_priv = &uplink_rpriv->uplink_priv; 1879 1880 if (tun_id) 1881 mapping_remove(uplink_priv->tunnel_mapping, tun_id); 1882 if (enc_opts_id) 1883 mapping_remove(uplink_priv->tunnel_enc_opts_mapping, 1884 enc_opts_id); 1885 } 1886 1887 u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow) 1888 { 1889 return flow->tunnel_id; 1890 } 1891 1892 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, 1893 struct flow_match_basic *match, bool outer, 1894 void *headers_c, void *headers_v) 1895 { 1896 bool ip_version_cap; 1897 1898 ip_version_cap = outer ? 1899 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 1900 ft_field_support.outer_ip_version) : 1901 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 1902 ft_field_support.inner_ip_version); 1903 1904 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) && 1905 (match->key->n_proto == htons(ETH_P_IP) || 1906 match->key->n_proto == htons(ETH_P_IPV6))) { 1907 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version); 1908 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 1909 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6); 1910 } else { 1911 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, 1912 ntohs(match->mask->n_proto)); 1913 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 1914 ntohs(match->key->n_proto)); 1915 } 1916 } 1917 1918 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer) 1919 { 1920 void *headers_v; 1921 u16 ethertype; 1922 u8 ip_version; 1923 1924 if (outer) 1925 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 1926 else 1927 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); 1928 1929 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version); 1930 /* Return ip_version converted from ethertype anyway */ 1931 if (!ip_version) { 1932 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 1933 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP) 1934 ip_version = 4; 1935 else if (ethertype == ETH_P_IPV6) 1936 ip_version = 6; 1937 } 1938 return ip_version; 1939 } 1940 1941 /* Tunnel device follows RFC 6040, see include/net/inet_ecn.h. 1942 * And changes inner ip_ecn depending on inner and outer ip_ecn as follows: 1943 * +---------+----------------------------------------+ 1944 * |Arriving | Arriving Outer Header | 1945 * | Inner +---------+---------+---------+----------+ 1946 * | Header | Not-ECT | ECT(0) | ECT(1) | CE | 1947 * +---------+---------+---------+---------+----------+ 1948 * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> | 1949 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* | 1950 * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* | 1951 * | CE | CE | CE | CE | CE | 1952 * +---------+---------+---------+---------+----------+ 1953 * 1954 * Tc matches on inner after decapsulation on tunnel device, but hw offload matches 1955 * the inner ip_ecn value before hardware decap action. 1956 * 1957 * Cells marked are changed from original inner packet ip_ecn value during decap, and 1958 * so matching those values on inner ip_ecn before decap will fail. 1959 * 1960 * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn, 1961 * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE, 1962 * and such we can drop the inner ip_ecn=CE match. 1963 */ 1964 1965 static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv, 1966 struct flow_cls_offload *f, 1967 bool *match_inner_ecn) 1968 { 1969 u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0; 1970 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1971 struct netlink_ext_ack *extack = f->common.extack; 1972 struct flow_match_ip match; 1973 1974 *match_inner_ecn = true; 1975 1976 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 1977 flow_rule_match_enc_ip(rule, &match); 1978 outer_ecn_key = match.key->tos & INET_ECN_MASK; 1979 outer_ecn_mask = match.mask->tos & INET_ECN_MASK; 1980 } 1981 1982 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 1983 flow_rule_match_ip(rule, &match); 1984 inner_ecn_key = match.key->tos & INET_ECN_MASK; 1985 inner_ecn_mask = match.mask->tos & INET_ECN_MASK; 1986 } 1987 1988 if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) { 1989 NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported"); 1990 netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported"); 1991 return -EOPNOTSUPP; 1992 } 1993 1994 if (!outer_ecn_mask) { 1995 if (!inner_ecn_mask) 1996 return 0; 1997 1998 NL_SET_ERR_MSG_MOD(extack, 1999 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported"); 2000 netdev_warn(priv->netdev, 2001 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported"); 2002 return -EOPNOTSUPP; 2003 } 2004 2005 if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) { 2006 NL_SET_ERR_MSG_MOD(extack, 2007 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported"); 2008 netdev_warn(priv->netdev, 2009 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported"); 2010 return -EOPNOTSUPP; 2011 } 2012 2013 if (!inner_ecn_mask) 2014 return 0; 2015 2016 /* Both inner and outer have full mask on ecn */ 2017 2018 if (outer_ecn_key == INET_ECN_ECT_1) { 2019 /* inner ecn might change by DECAP action */ 2020 2021 NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported"); 2022 netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported"); 2023 return -EOPNOTSUPP; 2024 } 2025 2026 if (outer_ecn_key != INET_ECN_CE) 2027 return 0; 2028 2029 if (inner_ecn_key != INET_ECN_CE) { 2030 /* Can't happen in software, as packet ecn will be changed to CE after decap */ 2031 NL_SET_ERR_MSG_MOD(extack, 2032 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported"); 2033 netdev_warn(priv->netdev, 2034 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported"); 2035 return -EOPNOTSUPP; 2036 } 2037 2038 /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase, 2039 * drop match on inner ecn 2040 */ 2041 *match_inner_ecn = false; 2042 2043 return 0; 2044 } 2045 2046 static int parse_tunnel_attr(struct mlx5e_priv *priv, 2047 struct mlx5e_tc_flow *flow, 2048 struct mlx5_flow_spec *spec, 2049 struct flow_cls_offload *f, 2050 struct net_device *filter_dev, 2051 u8 *match_level, 2052 bool *match_inner) 2053 { 2054 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev); 2055 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 2056 struct netlink_ext_ack *extack = f->common.extack; 2057 bool needs_mapping, sets_mapping; 2058 int err; 2059 2060 if (!mlx5e_is_eswitch_flow(flow)) { 2061 NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported"); 2062 return -EOPNOTSUPP; 2063 } 2064 2065 needs_mapping = !!flow->attr->chain; 2066 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f); 2067 *match_inner = !needs_mapping; 2068 2069 if ((needs_mapping || sets_mapping) && 2070 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) { 2071 NL_SET_ERR_MSG_MOD(extack, 2072 "Chains on tunnel devices isn't supported without register loopback support"); 2073 netdev_warn(priv->netdev, 2074 "Chains on tunnel devices isn't supported without register loopback support"); 2075 return -EOPNOTSUPP; 2076 } 2077 2078 if (!flow->attr->chain) { 2079 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 2080 match_level); 2081 if (err) { 2082 NL_SET_ERR_MSG_MOD(extack, 2083 "Failed to parse tunnel attributes"); 2084 netdev_warn(priv->netdev, 2085 "Failed to parse tunnel attributes"); 2086 return err; 2087 } 2088 2089 /* With mpls over udp we decapsulate using packet reformat 2090 * object 2091 */ 2092 if (!netif_is_bareudp(filter_dev)) 2093 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 2094 err = mlx5e_tc_set_attr_rx_tun(flow, spec); 2095 if (err) 2096 return err; 2097 } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 2098 struct mlx5_flow_spec *tmp_spec; 2099 2100 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL); 2101 if (!tmp_spec) { 2102 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec"); 2103 netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec"); 2104 return -ENOMEM; 2105 } 2106 memcpy(tmp_spec, spec, sizeof(*tmp_spec)); 2107 2108 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level); 2109 if (err) { 2110 kvfree(tmp_spec); 2111 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes"); 2112 netdev_warn(priv->netdev, "Failed to parse tunnel attributes"); 2113 return err; 2114 } 2115 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec); 2116 kvfree(tmp_spec); 2117 if (err) 2118 return err; 2119 } 2120 2121 if (!needs_mapping && !sets_mapping) 2122 return 0; 2123 2124 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev); 2125 } 2126 2127 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec) 2128 { 2129 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2130 inner_headers); 2131 } 2132 2133 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec) 2134 { 2135 return MLX5_ADDR_OF(fte_match_param, spec->match_value, 2136 inner_headers); 2137 } 2138 2139 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec) 2140 { 2141 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2142 outer_headers); 2143 } 2144 2145 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec) 2146 { 2147 return MLX5_ADDR_OF(fte_match_param, spec->match_value, 2148 outer_headers); 2149 } 2150 2151 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec) 2152 { 2153 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 2154 get_match_inner_headers_value(spec) : 2155 get_match_outer_headers_value(spec); 2156 } 2157 2158 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec) 2159 { 2160 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 2161 get_match_inner_headers_criteria(spec) : 2162 get_match_outer_headers_criteria(spec); 2163 } 2164 2165 static int mlx5e_flower_parse_meta(struct net_device *filter_dev, 2166 struct flow_cls_offload *f) 2167 { 2168 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2169 struct netlink_ext_ack *extack = f->common.extack; 2170 struct net_device *ingress_dev; 2171 struct flow_match_meta match; 2172 2173 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 2174 return 0; 2175 2176 flow_rule_match_meta(rule, &match); 2177 if (!match.mask->ingress_ifindex) 2178 return 0; 2179 2180 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 2181 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); 2182 return -EOPNOTSUPP; 2183 } 2184 2185 ingress_dev = __dev_get_by_index(dev_net(filter_dev), 2186 match.key->ingress_ifindex); 2187 if (!ingress_dev) { 2188 NL_SET_ERR_MSG_MOD(extack, 2189 "Can't find the ingress port to match on"); 2190 return -ENOENT; 2191 } 2192 2193 if (ingress_dev != filter_dev) { 2194 NL_SET_ERR_MSG_MOD(extack, 2195 "Can't match on the ingress filter port"); 2196 return -EOPNOTSUPP; 2197 } 2198 2199 return 0; 2200 } 2201 2202 static bool skip_key_basic(struct net_device *filter_dev, 2203 struct flow_cls_offload *f) 2204 { 2205 /* When doing mpls over udp decap, the user needs to provide 2206 * MPLS_UC as the protocol in order to be able to match on mpls 2207 * label fields. However, the actual ethertype is IP so we want to 2208 * avoid matching on this, otherwise we'll fail the match. 2209 */ 2210 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0) 2211 return true; 2212 2213 return false; 2214 } 2215 2216 static int __parse_cls_flower(struct mlx5e_priv *priv, 2217 struct mlx5e_tc_flow *flow, 2218 struct mlx5_flow_spec *spec, 2219 struct flow_cls_offload *f, 2220 struct net_device *filter_dev, 2221 u8 *inner_match_level, u8 *outer_match_level) 2222 { 2223 struct netlink_ext_ack *extack = f->common.extack; 2224 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2225 outer_headers); 2226 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2227 outer_headers); 2228 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2229 misc_parameters); 2230 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2231 misc_parameters); 2232 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2233 misc_parameters_3); 2234 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2235 misc_parameters_3); 2236 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2237 struct flow_dissector *dissector = rule->match.dissector; 2238 enum fs_flow_table_type fs_type; 2239 bool match_inner_ecn = true; 2240 u16 addr_type = 0; 2241 u8 ip_proto = 0; 2242 u8 *match_level; 2243 int err; 2244 2245 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX; 2246 match_level = outer_match_level; 2247 2248 if (dissector->used_keys & 2249 ~(BIT(FLOW_DISSECTOR_KEY_META) | 2250 BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2251 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2252 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2253 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2254 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 2255 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2256 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2257 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2258 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | 2259 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 2260 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 2261 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | 2262 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | 2263 BIT(FLOW_DISSECTOR_KEY_TCP) | 2264 BIT(FLOW_DISSECTOR_KEY_IP) | 2265 BIT(FLOW_DISSECTOR_KEY_CT) | 2266 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | 2267 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | 2268 BIT(FLOW_DISSECTOR_KEY_ICMP) | 2269 BIT(FLOW_DISSECTOR_KEY_MPLS))) { 2270 NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); 2271 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n", 2272 dissector->used_keys); 2273 return -EOPNOTSUPP; 2274 } 2275 2276 if (mlx5e_get_tc_tun(filter_dev)) { 2277 bool match_inner = false; 2278 2279 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev, 2280 outer_match_level, &match_inner); 2281 if (err) 2282 return err; 2283 2284 if (match_inner) { 2285 /* header pointers should point to the inner headers 2286 * if the packet was decapsulated already. 2287 * outer headers are set by parse_tunnel_attr. 2288 */ 2289 match_level = inner_match_level; 2290 headers_c = get_match_inner_headers_criteria(spec); 2291 headers_v = get_match_inner_headers_value(spec); 2292 } 2293 2294 err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn); 2295 if (err) 2296 return err; 2297 } 2298 2299 err = mlx5e_flower_parse_meta(filter_dev, f); 2300 if (err) 2301 return err; 2302 2303 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) && 2304 !skip_key_basic(filter_dev, f)) { 2305 struct flow_match_basic match; 2306 2307 flow_rule_match_basic(rule, &match); 2308 mlx5e_tc_set_ethertype(priv->mdev, &match, 2309 match_level == outer_match_level, 2310 headers_c, headers_v); 2311 2312 if (match.mask->n_proto) 2313 *match_level = MLX5_MATCH_L2; 2314 } 2315 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) || 2316 is_vlan_dev(filter_dev)) { 2317 struct flow_dissector_key_vlan filter_dev_mask; 2318 struct flow_dissector_key_vlan filter_dev_key; 2319 struct flow_match_vlan match; 2320 2321 if (is_vlan_dev(filter_dev)) { 2322 match.key = &filter_dev_key; 2323 match.key->vlan_id = vlan_dev_vlan_id(filter_dev); 2324 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev); 2325 match.key->vlan_priority = 0; 2326 match.mask = &filter_dev_mask; 2327 memset(match.mask, 0xff, sizeof(*match.mask)); 2328 match.mask->vlan_priority = 0; 2329 } else { 2330 flow_rule_match_vlan(rule, &match); 2331 } 2332 if (match.mask->vlan_id || 2333 match.mask->vlan_priority || 2334 match.mask->vlan_tpid) { 2335 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { 2336 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2337 svlan_tag, 1); 2338 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2339 svlan_tag, 1); 2340 } else { 2341 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2342 cvlan_tag, 1); 2343 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2344 cvlan_tag, 1); 2345 } 2346 2347 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, 2348 match.mask->vlan_id); 2349 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, 2350 match.key->vlan_id); 2351 2352 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, 2353 match.mask->vlan_priority); 2354 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, 2355 match.key->vlan_priority); 2356 2357 *match_level = MLX5_MATCH_L2; 2358 } 2359 } else if (*match_level != MLX5_MATCH_NONE) { 2360 /* cvlan_tag enabled in match criteria and 2361 * disabled in match value means both S & C tags 2362 * don't exist (untagged of both) 2363 */ 2364 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); 2365 *match_level = MLX5_MATCH_L2; 2366 } 2367 2368 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 2369 struct flow_match_vlan match; 2370 2371 flow_rule_match_cvlan(rule, &match); 2372 if (match.mask->vlan_id || 2373 match.mask->vlan_priority || 2374 match.mask->vlan_tpid) { 2375 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid, 2376 fs_type)) { 2377 NL_SET_ERR_MSG_MOD(extack, 2378 "Matching on CVLAN is not supported"); 2379 return -EOPNOTSUPP; 2380 } 2381 2382 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { 2383 MLX5_SET(fte_match_set_misc, misc_c, 2384 outer_second_svlan_tag, 1); 2385 MLX5_SET(fte_match_set_misc, misc_v, 2386 outer_second_svlan_tag, 1); 2387 } else { 2388 MLX5_SET(fte_match_set_misc, misc_c, 2389 outer_second_cvlan_tag, 1); 2390 MLX5_SET(fte_match_set_misc, misc_v, 2391 outer_second_cvlan_tag, 1); 2392 } 2393 2394 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid, 2395 match.mask->vlan_id); 2396 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid, 2397 match.key->vlan_id); 2398 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio, 2399 match.mask->vlan_priority); 2400 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio, 2401 match.key->vlan_priority); 2402 2403 *match_level = MLX5_MATCH_L2; 2404 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 2405 } 2406 } 2407 2408 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2409 struct flow_match_eth_addrs match; 2410 2411 flow_rule_match_eth_addrs(rule, &match); 2412 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2413 dmac_47_16), 2414 match.mask->dst); 2415 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2416 dmac_47_16), 2417 match.key->dst); 2418 2419 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2420 smac_47_16), 2421 match.mask->src); 2422 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2423 smac_47_16), 2424 match.key->src); 2425 2426 if (!is_zero_ether_addr(match.mask->src) || 2427 !is_zero_ether_addr(match.mask->dst)) 2428 *match_level = MLX5_MATCH_L2; 2429 } 2430 2431 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2432 struct flow_match_control match; 2433 2434 flow_rule_match_control(rule, &match); 2435 addr_type = match.key->addr_type; 2436 2437 /* the HW doesn't support frag first/later */ 2438 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 2439 NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported"); 2440 return -EOPNOTSUPP; 2441 } 2442 2443 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 2444 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 2445 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 2446 match.key->flags & FLOW_DIS_IS_FRAGMENT); 2447 2448 /* the HW doesn't need L3 inline to match on frag=no */ 2449 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT)) 2450 *match_level = MLX5_MATCH_L2; 2451 /* *** L2 attributes parsing up to here *** */ 2452 else 2453 *match_level = MLX5_MATCH_L3; 2454 } 2455 } 2456 2457 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2458 struct flow_match_basic match; 2459 2460 flow_rule_match_basic(rule, &match); 2461 ip_proto = match.key->ip_proto; 2462 2463 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 2464 match.mask->ip_proto); 2465 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 2466 match.key->ip_proto); 2467 2468 if (match.mask->ip_proto) 2469 *match_level = MLX5_MATCH_L3; 2470 } 2471 2472 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2473 struct flow_match_ipv4_addrs match; 2474 2475 flow_rule_match_ipv4_addrs(rule, &match); 2476 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2477 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2478 &match.mask->src, sizeof(match.mask->src)); 2479 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2480 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2481 &match.key->src, sizeof(match.key->src)); 2482 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2483 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2484 &match.mask->dst, sizeof(match.mask->dst)); 2485 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2486 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2487 &match.key->dst, sizeof(match.key->dst)); 2488 2489 if (match.mask->src || match.mask->dst) 2490 *match_level = MLX5_MATCH_L3; 2491 } 2492 2493 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2494 struct flow_match_ipv6_addrs match; 2495 2496 flow_rule_match_ipv6_addrs(rule, &match); 2497 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2498 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2499 &match.mask->src, sizeof(match.mask->src)); 2500 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2501 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2502 &match.key->src, sizeof(match.key->src)); 2503 2504 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2505 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2506 &match.mask->dst, sizeof(match.mask->dst)); 2507 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2508 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2509 &match.key->dst, sizeof(match.key->dst)); 2510 2511 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY || 2512 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY) 2513 *match_level = MLX5_MATCH_L3; 2514 } 2515 2516 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 2517 struct flow_match_ip match; 2518 2519 flow_rule_match_ip(rule, &match); 2520 if (match_inner_ecn) { 2521 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, 2522 match.mask->tos & 0x3); 2523 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, 2524 match.key->tos & 0x3); 2525 } 2526 2527 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, 2528 match.mask->tos >> 2); 2529 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, 2530 match.key->tos >> 2); 2531 2532 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, 2533 match.mask->ttl); 2534 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, 2535 match.key->ttl); 2536 2537 if (match.mask->ttl && 2538 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, 2539 ft_field_support.outer_ipv4_ttl)) { 2540 NL_SET_ERR_MSG_MOD(extack, 2541 "Matching on TTL is not supported"); 2542 return -EOPNOTSUPP; 2543 } 2544 2545 if (match.mask->tos || match.mask->ttl) 2546 *match_level = MLX5_MATCH_L3; 2547 } 2548 2549 /* *** L3 attributes parsing up to here *** */ 2550 2551 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 2552 struct flow_match_ports match; 2553 2554 flow_rule_match_ports(rule, &match); 2555 switch (ip_proto) { 2556 case IPPROTO_TCP: 2557 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2558 tcp_sport, ntohs(match.mask->src)); 2559 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2560 tcp_sport, ntohs(match.key->src)); 2561 2562 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2563 tcp_dport, ntohs(match.mask->dst)); 2564 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2565 tcp_dport, ntohs(match.key->dst)); 2566 break; 2567 2568 case IPPROTO_UDP: 2569 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2570 udp_sport, ntohs(match.mask->src)); 2571 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2572 udp_sport, ntohs(match.key->src)); 2573 2574 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2575 udp_dport, ntohs(match.mask->dst)); 2576 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2577 udp_dport, ntohs(match.key->dst)); 2578 break; 2579 default: 2580 NL_SET_ERR_MSG_MOD(extack, 2581 "Only UDP and TCP transports are supported for L4 matching"); 2582 netdev_err(priv->netdev, 2583 "Only UDP and TCP transport are supported\n"); 2584 return -EINVAL; 2585 } 2586 2587 if (match.mask->src || match.mask->dst) 2588 *match_level = MLX5_MATCH_L4; 2589 } 2590 2591 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 2592 struct flow_match_tcp match; 2593 2594 flow_rule_match_tcp(rule, &match); 2595 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags, 2596 ntohs(match.mask->flags)); 2597 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, 2598 ntohs(match.key->flags)); 2599 2600 if (match.mask->flags) 2601 *match_level = MLX5_MATCH_L4; 2602 } 2603 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { 2604 struct flow_match_icmp match; 2605 2606 flow_rule_match_icmp(rule, &match); 2607 switch (ip_proto) { 2608 case IPPROTO_ICMP: 2609 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & 2610 MLX5_FLEX_PROTO_ICMP)) { 2611 NL_SET_ERR_MSG_MOD(extack, 2612 "Match on Flex protocols for ICMP is not supported"); 2613 return -EOPNOTSUPP; 2614 } 2615 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type, 2616 match.mask->type); 2617 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type, 2618 match.key->type); 2619 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code, 2620 match.mask->code); 2621 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code, 2622 match.key->code); 2623 break; 2624 case IPPROTO_ICMPV6: 2625 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & 2626 MLX5_FLEX_PROTO_ICMPV6)) { 2627 NL_SET_ERR_MSG_MOD(extack, 2628 "Match on Flex protocols for ICMPV6 is not supported"); 2629 return -EOPNOTSUPP; 2630 } 2631 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type, 2632 match.mask->type); 2633 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type, 2634 match.key->type); 2635 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code, 2636 match.mask->code); 2637 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code, 2638 match.key->code); 2639 break; 2640 default: 2641 NL_SET_ERR_MSG_MOD(extack, 2642 "Code and type matching only with ICMP and ICMPv6"); 2643 netdev_err(priv->netdev, 2644 "Code and type matching only with ICMP and ICMPv6\n"); 2645 return -EINVAL; 2646 } 2647 if (match.mask->code || match.mask->type) { 2648 *match_level = MLX5_MATCH_L4; 2649 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3; 2650 } 2651 } 2652 /* Currently supported only for MPLS over UDP */ 2653 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && 2654 !netif_is_bareudp(filter_dev)) { 2655 NL_SET_ERR_MSG_MOD(extack, 2656 "Matching on MPLS is supported only for MPLS over UDP"); 2657 netdev_err(priv->netdev, 2658 "Matching on MPLS is supported only for MPLS over UDP\n"); 2659 return -EOPNOTSUPP; 2660 } 2661 2662 return 0; 2663 } 2664 2665 static int parse_cls_flower(struct mlx5e_priv *priv, 2666 struct mlx5e_tc_flow *flow, 2667 struct mlx5_flow_spec *spec, 2668 struct flow_cls_offload *f, 2669 struct net_device *filter_dev) 2670 { 2671 u8 inner_match_level, outer_match_level, non_tunnel_match_level; 2672 struct netlink_ext_ack *extack = f->common.extack; 2673 struct mlx5_core_dev *dev = priv->mdev; 2674 struct mlx5_eswitch *esw = dev->priv.eswitch; 2675 struct mlx5e_rep_priv *rpriv = priv->ppriv; 2676 struct mlx5_eswitch_rep *rep; 2677 bool is_eswitch_flow; 2678 int err; 2679 2680 inner_match_level = MLX5_MATCH_NONE; 2681 outer_match_level = MLX5_MATCH_NONE; 2682 2683 err = __parse_cls_flower(priv, flow, spec, f, filter_dev, 2684 &inner_match_level, &outer_match_level); 2685 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ? 2686 outer_match_level : inner_match_level; 2687 2688 is_eswitch_flow = mlx5e_is_eswitch_flow(flow); 2689 if (!err && is_eswitch_flow) { 2690 rep = rpriv->rep; 2691 if (rep->vport != MLX5_VPORT_UPLINK && 2692 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 2693 esw->offloads.inline_mode < non_tunnel_match_level)) { 2694 NL_SET_ERR_MSG_MOD(extack, 2695 "Flow is not offloaded due to min inline setting"); 2696 netdev_warn(priv->netdev, 2697 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 2698 non_tunnel_match_level, esw->offloads.inline_mode); 2699 return -EOPNOTSUPP; 2700 } 2701 } 2702 2703 flow->attr->inner_match_level = inner_match_level; 2704 flow->attr->outer_match_level = outer_match_level; 2705 2706 2707 return err; 2708 } 2709 2710 struct mlx5_fields { 2711 u8 field; 2712 u8 field_bsize; 2713 u32 field_mask; 2714 u32 offset; 2715 u32 match_offset; 2716 }; 2717 2718 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \ 2719 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \ 2720 offsetof(struct pedit_headers, field) + (off), \ 2721 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)} 2722 2723 /* masked values are the same and there are no rewrites that do not have a 2724 * match. 2725 */ 2726 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \ 2727 type matchmaskx = *(type *)(matchmaskp); \ 2728 type matchvalx = *(type *)(matchvalp); \ 2729 type maskx = *(type *)(maskp); \ 2730 type valx = *(type *)(valp); \ 2731 \ 2732 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \ 2733 matchmaskx)); \ 2734 }) 2735 2736 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp, 2737 void *matchmaskp, u8 bsize) 2738 { 2739 bool same = false; 2740 2741 switch (bsize) { 2742 case 8: 2743 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp); 2744 break; 2745 case 16: 2746 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp); 2747 break; 2748 case 32: 2749 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp); 2750 break; 2751 } 2752 2753 return same; 2754 } 2755 2756 static struct mlx5_fields fields[] = { 2757 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16), 2758 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0), 2759 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16), 2760 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0), 2761 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype), 2762 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid), 2763 2764 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp), 2765 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit), 2766 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4), 2767 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2768 2769 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0, 2770 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]), 2771 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0, 2772 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]), 2773 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0, 2774 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]), 2775 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0, 2776 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]), 2777 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0, 2778 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]), 2779 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0, 2780 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]), 2781 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0, 2782 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]), 2783 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0, 2784 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]), 2785 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit), 2786 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp), 2787 2788 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport), 2789 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport), 2790 /* in linux iphdr tcp_flags is 8 bits long */ 2791 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags), 2792 2793 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport), 2794 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport), 2795 }; 2796 2797 static unsigned long mask_to_le(unsigned long mask, int size) 2798 { 2799 __be32 mask_be32; 2800 __be16 mask_be16; 2801 2802 if (size == 32) { 2803 mask_be32 = (__force __be32)(mask); 2804 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); 2805 } else if (size == 16) { 2806 mask_be32 = (__force __be32)(mask); 2807 mask_be16 = *(__be16 *)&mask_be32; 2808 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); 2809 } 2810 2811 return mask; 2812 } 2813 static int offload_pedit_fields(struct mlx5e_priv *priv, 2814 int namespace, 2815 struct pedit_headers_action *hdrs, 2816 struct mlx5e_tc_flow_parse_attr *parse_attr, 2817 u32 *action_flags, 2818 struct netlink_ext_ack *extack) 2819 { 2820 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; 2821 void *headers_c, *headers_v, *action, *vals_p; 2822 u32 *s_masks_p, *a_masks_p, s_mask, a_mask; 2823 struct mlx5e_tc_mod_hdr_acts *mod_acts; 2824 unsigned long mask, field_mask; 2825 int i, first, last, next_z; 2826 struct mlx5_fields *f; 2827 u8 cmd; 2828 2829 mod_acts = &parse_attr->mod_hdr_acts; 2830 headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec); 2831 headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec); 2832 2833 set_masks = &hdrs[0].masks; 2834 add_masks = &hdrs[1].masks; 2835 set_vals = &hdrs[0].vals; 2836 add_vals = &hdrs[1].vals; 2837 2838 for (i = 0; i < ARRAY_SIZE(fields); i++) { 2839 bool skip; 2840 2841 f = &fields[i]; 2842 /* avoid seeing bits set from previous iterations */ 2843 s_mask = 0; 2844 a_mask = 0; 2845 2846 s_masks_p = (void *)set_masks + f->offset; 2847 a_masks_p = (void *)add_masks + f->offset; 2848 2849 s_mask = *s_masks_p & f->field_mask; 2850 a_mask = *a_masks_p & f->field_mask; 2851 2852 if (!s_mask && !a_mask) /* nothing to offload here */ 2853 continue; 2854 2855 if (s_mask && a_mask) { 2856 NL_SET_ERR_MSG_MOD(extack, 2857 "can't set and add to the same HW field"); 2858 netdev_warn(priv->netdev, 2859 "mlx5: can't set and add to the same HW field (%x)\n", 2860 f->field); 2861 return -EOPNOTSUPP; 2862 } 2863 2864 skip = false; 2865 if (s_mask) { 2866 void *match_mask = headers_c + f->match_offset; 2867 void *match_val = headers_v + f->match_offset; 2868 2869 cmd = MLX5_ACTION_TYPE_SET; 2870 mask = s_mask; 2871 vals_p = (void *)set_vals + f->offset; 2872 /* don't rewrite if we have a match on the same value */ 2873 if (cmp_val_mask(vals_p, s_masks_p, match_val, 2874 match_mask, f->field_bsize)) 2875 skip = true; 2876 /* clear to denote we consumed this field */ 2877 *s_masks_p &= ~f->field_mask; 2878 } else { 2879 cmd = MLX5_ACTION_TYPE_ADD; 2880 mask = a_mask; 2881 vals_p = (void *)add_vals + f->offset; 2882 /* add 0 is no change */ 2883 if ((*(u32 *)vals_p & f->field_mask) == 0) 2884 skip = true; 2885 /* clear to denote we consumed this field */ 2886 *a_masks_p &= ~f->field_mask; 2887 } 2888 if (skip) 2889 continue; 2890 2891 mask = mask_to_le(mask, f->field_bsize); 2892 2893 first = find_first_bit(&mask, f->field_bsize); 2894 next_z = find_next_zero_bit(&mask, f->field_bsize, first); 2895 last = find_last_bit(&mask, f->field_bsize); 2896 if (first < next_z && next_z < last) { 2897 NL_SET_ERR_MSG_MOD(extack, 2898 "rewrite of few sub-fields isn't supported"); 2899 netdev_warn(priv->netdev, 2900 "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", 2901 mask); 2902 return -EOPNOTSUPP; 2903 } 2904 2905 action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts); 2906 if (IS_ERR(action)) { 2907 NL_SET_ERR_MSG_MOD(extack, 2908 "too many pedit actions, can't offload"); 2909 mlx5_core_warn(priv->mdev, 2910 "mlx5: parsed %d pedit actions, can't do more\n", 2911 mod_acts->num_actions); 2912 return PTR_ERR(action); 2913 } 2914 2915 MLX5_SET(set_action_in, action, action_type, cmd); 2916 MLX5_SET(set_action_in, action, field, f->field); 2917 2918 if (cmd == MLX5_ACTION_TYPE_SET) { 2919 int start; 2920 2921 field_mask = mask_to_le(f->field_mask, f->field_bsize); 2922 2923 /* if field is bit sized it can start not from first bit */ 2924 start = find_first_bit(&field_mask, f->field_bsize); 2925 2926 MLX5_SET(set_action_in, action, offset, first - start); 2927 /* length is num of bits to be written, zero means length of 32 */ 2928 MLX5_SET(set_action_in, action, length, (last - first + 1)); 2929 } 2930 2931 if (f->field_bsize == 32) 2932 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first); 2933 else if (f->field_bsize == 16) 2934 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first); 2935 else if (f->field_bsize == 8) 2936 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first); 2937 2938 ++mod_acts->num_actions; 2939 } 2940 2941 return 0; 2942 } 2943 2944 static const struct pedit_headers zero_masks = {}; 2945 2946 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, 2947 struct mlx5e_tc_flow_parse_attr *parse_attr, 2948 struct pedit_headers_action *hdrs, 2949 u32 *action_flags, 2950 struct netlink_ext_ack *extack) 2951 { 2952 struct pedit_headers *cmd_masks; 2953 int err; 2954 u8 cmd; 2955 2956 err = offload_pedit_fields(priv, namespace, hdrs, parse_attr, 2957 action_flags, extack); 2958 if (err < 0) 2959 goto out_dealloc_parsed_actions; 2960 2961 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { 2962 cmd_masks = &hdrs[cmd].masks; 2963 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { 2964 NL_SET_ERR_MSG_MOD(extack, 2965 "attempt to offload an unsupported field"); 2966 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd); 2967 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS, 2968 16, 1, cmd_masks, sizeof(zero_masks), true); 2969 err = -EOPNOTSUPP; 2970 goto out_dealloc_parsed_actions; 2971 } 2972 } 2973 2974 return 0; 2975 2976 out_dealloc_parsed_actions: 2977 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 2978 return err; 2979 } 2980 2981 struct ip_ttl_word { 2982 __u8 ttl; 2983 __u8 protocol; 2984 __sum16 check; 2985 }; 2986 2987 struct ipv6_hoplimit_word { 2988 __be16 payload_len; 2989 __u8 nexthdr; 2990 __u8 hop_limit; 2991 }; 2992 2993 static bool 2994 is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow, 2995 bool *modify_ip_header, bool *modify_tuple, 2996 struct netlink_ext_ack *extack) 2997 { 2998 u32 mask, offset; 2999 u8 htype; 3000 3001 htype = act->mangle.htype; 3002 offset = act->mangle.offset; 3003 mask = ~act->mangle.mask; 3004 /* For IPv4 & IPv6 header check 4 byte word, 3005 * to determine that modified fields 3006 * are NOT ttl & hop_limit only. 3007 */ 3008 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) { 3009 struct ip_ttl_word *ttl_word = 3010 (struct ip_ttl_word *)&mask; 3011 3012 if (offset != offsetof(struct iphdr, ttl) || 3013 ttl_word->protocol || 3014 ttl_word->check) { 3015 *modify_ip_header = true; 3016 } 3017 3018 if (offset >= offsetof(struct iphdr, saddr)) 3019 *modify_tuple = true; 3020 3021 if (ct_flow && *modify_tuple) { 3022 NL_SET_ERR_MSG_MOD(extack, 3023 "can't offload re-write of ipv4 address with action ct"); 3024 return false; 3025 } 3026 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { 3027 struct ipv6_hoplimit_word *hoplimit_word = 3028 (struct ipv6_hoplimit_word *)&mask; 3029 3030 if (offset != offsetof(struct ipv6hdr, payload_len) || 3031 hoplimit_word->payload_len || 3032 hoplimit_word->nexthdr) { 3033 *modify_ip_header = true; 3034 } 3035 3036 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) 3037 *modify_tuple = true; 3038 3039 if (ct_flow && *modify_tuple) { 3040 NL_SET_ERR_MSG_MOD(extack, 3041 "can't offload re-write of ipv6 address with action ct"); 3042 return false; 3043 } 3044 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP || 3045 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) { 3046 *modify_tuple = true; 3047 if (ct_flow) { 3048 NL_SET_ERR_MSG_MOD(extack, 3049 "can't offload re-write of transport header ports with action ct"); 3050 return false; 3051 } 3052 } 3053 3054 return true; 3055 } 3056 3057 static bool modify_tuple_supported(bool modify_tuple, bool ct_clear, 3058 bool ct_flow, struct netlink_ext_ack *extack, 3059 struct mlx5e_priv *priv, 3060 struct mlx5_flow_spec *spec) 3061 { 3062 if (!modify_tuple || ct_clear) 3063 return true; 3064 3065 if (ct_flow) { 3066 NL_SET_ERR_MSG_MOD(extack, 3067 "can't offload tuple modification with non-clear ct()"); 3068 netdev_info(priv->netdev, 3069 "can't offload tuple modification with non-clear ct()"); 3070 return false; 3071 } 3072 3073 /* Add ct_state=-trk match so it will be offloaded for non ct flows 3074 * (or after clear action), as otherwise, since the tuple is changed, 3075 * we can't restore ct state 3076 */ 3077 if (mlx5_tc_ct_add_no_trk_match(spec)) { 3078 NL_SET_ERR_MSG_MOD(extack, 3079 "can't offload tuple modification with ct matches and no ct(clear) action"); 3080 netdev_info(priv->netdev, 3081 "can't offload tuple modification with ct matches and no ct(clear) action"); 3082 return false; 3083 } 3084 3085 return true; 3086 } 3087 3088 static bool modify_header_match_supported(struct mlx5e_priv *priv, 3089 struct mlx5_flow_spec *spec, 3090 struct flow_action *flow_action, 3091 u32 actions, bool ct_flow, 3092 bool ct_clear, 3093 struct netlink_ext_ack *extack) 3094 { 3095 const struct flow_action_entry *act; 3096 bool modify_ip_header, modify_tuple; 3097 void *headers_c; 3098 void *headers_v; 3099 u16 ethertype; 3100 u8 ip_proto; 3101 int i; 3102 3103 headers_c = mlx5e_get_match_headers_criteria(actions, spec); 3104 headers_v = mlx5e_get_match_headers_value(actions, spec); 3105 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 3106 3107 /* for non-IP we only re-write MACs, so we're okay */ 3108 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 && 3109 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6) 3110 goto out_ok; 3111 3112 modify_ip_header = false; 3113 modify_tuple = false; 3114 flow_action_for_each(i, act, flow_action) { 3115 if (act->id != FLOW_ACTION_MANGLE && 3116 act->id != FLOW_ACTION_ADD) 3117 continue; 3118 3119 if (!is_action_keys_supported(act, ct_flow, 3120 &modify_ip_header, 3121 &modify_tuple, extack)) 3122 return false; 3123 } 3124 3125 if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack, 3126 priv, spec)) 3127 return false; 3128 3129 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); 3130 if (modify_ip_header && ip_proto != IPPROTO_TCP && 3131 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { 3132 NL_SET_ERR_MSG_MOD(extack, 3133 "can't offload re-write of non TCP/UDP"); 3134 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n", 3135 ip_proto); 3136 return false; 3137 } 3138 3139 out_ok: 3140 return true; 3141 } 3142 3143 static bool 3144 actions_match_supported_fdb(struct mlx5e_priv *priv, 3145 struct mlx5e_tc_flow_parse_attr *parse_attr, 3146 struct mlx5e_tc_flow *flow, 3147 struct netlink_ext_ack *extack) 3148 { 3149 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; 3150 bool ct_flow, ct_clear; 3151 3152 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; 3153 ct_flow = flow_flag_test(flow, CT) && !ct_clear; 3154 3155 if (esw_attr->split_count && ct_flow && 3156 !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) { 3157 /* All registers used by ct are cleared when using 3158 * split rules. 3159 */ 3160 NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct"); 3161 return false; 3162 } 3163 3164 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { 3165 NL_SET_ERR_MSG_MOD(extack, 3166 "current firmware doesn't support split rule for port mirroring"); 3167 netdev_warn_once(priv->netdev, 3168 "current firmware doesn't support split rule for port mirroring\n"); 3169 return false; 3170 } 3171 3172 return true; 3173 } 3174 3175 static bool 3176 actions_match_supported(struct mlx5e_priv *priv, 3177 struct flow_action *flow_action, 3178 struct mlx5e_tc_flow_parse_attr *parse_attr, 3179 struct mlx5e_tc_flow *flow, 3180 struct netlink_ext_ack *extack) 3181 { 3182 u32 actions = flow->attr->action; 3183 bool ct_flow, ct_clear; 3184 3185 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; 3186 ct_flow = flow_flag_test(flow, CT) && !ct_clear; 3187 3188 if (!(actions & 3189 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { 3190 NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action"); 3191 return false; 3192 } 3193 3194 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 3195 !modify_header_match_supported(priv, &parse_attr->spec, flow_action, 3196 actions, ct_flow, ct_clear, extack)) 3197 return false; 3198 3199 if (mlx5e_is_eswitch_flow(flow) && 3200 !actions_match_supported_fdb(priv, parse_attr, flow, extack)) 3201 return false; 3202 3203 return true; 3204 } 3205 3206 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 3207 { 3208 return priv->mdev == peer_priv->mdev; 3209 } 3210 3211 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 3212 { 3213 struct mlx5_core_dev *fmdev, *pmdev; 3214 u64 fsystem_guid, psystem_guid; 3215 3216 fmdev = priv->mdev; 3217 pmdev = peer_priv->mdev; 3218 3219 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev); 3220 psystem_guid = mlx5_query_nic_system_image_guid(pmdev); 3221 3222 return (fsystem_guid == psystem_guid); 3223 } 3224 3225 static int 3226 parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, 3227 struct flow_action *flow_action) 3228 { 3229 struct netlink_ext_ack *extack = parse_state->extack; 3230 struct mlx5e_tc_flow *flow = parse_state->flow; 3231 struct mlx5_flow_attr *attr = flow->attr; 3232 enum mlx5_flow_namespace_type ns_type; 3233 struct mlx5e_priv *priv = flow->priv; 3234 const struct flow_action_entry *act; 3235 struct mlx5e_tc_act *tc_act; 3236 int err, i; 3237 3238 ns_type = mlx5e_get_flow_namespace(flow); 3239 3240 flow_action_for_each(i, act, flow_action) { 3241 tc_act = mlx5e_tc_act_get(act->id, ns_type); 3242 if (!tc_act) { 3243 NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action"); 3244 return -EOPNOTSUPP; 3245 } 3246 3247 if (!tc_act->can_offload(parse_state, act, i)) 3248 return -EOPNOTSUPP; 3249 3250 err = tc_act->parse_action(parse_state, act, priv, attr); 3251 if (err) 3252 return err; 3253 } 3254 3255 flow_action_for_each(i, act, flow_action) { 3256 tc_act = mlx5e_tc_act_get(act->id, ns_type); 3257 if (!tc_act || !tc_act->post_parse || 3258 !tc_act->can_offload(parse_state, act, i)) 3259 continue; 3260 3261 err = tc_act->post_parse(parse_state, priv, attr); 3262 if (err) 3263 return err; 3264 } 3265 3266 return 0; 3267 } 3268 3269 static int 3270 actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, 3271 struct mlx5e_tc_flow *flow, 3272 struct mlx5_flow_attr *attr, 3273 struct pedit_headers_action *hdrs, 3274 struct netlink_ext_ack *extack) 3275 { 3276 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; 3277 enum mlx5_flow_namespace_type ns_type; 3278 int err; 3279 3280 if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits && 3281 !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) 3282 return 0; 3283 3284 ns_type = mlx5e_get_flow_namespace(flow); 3285 3286 err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs, 3287 &attr->action, extack); 3288 if (err) 3289 return err; 3290 3291 if (parse_attr->mod_hdr_acts.num_actions > 0) 3292 return 0; 3293 3294 /* In case all pedit actions are skipped, remove the MOD_HDR flag. */ 3295 attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 3296 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 3297 3298 if (ns_type != MLX5_FLOW_NAMESPACE_FDB) 3299 return 0; 3300 3301 if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || 3302 (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) 3303 attr->esw_attr->split_count = 0; 3304 3305 return 0; 3306 } 3307 3308 static int 3309 flow_action_supported(struct flow_action *flow_action, 3310 struct netlink_ext_ack *extack) 3311 { 3312 if (!flow_action_has_entries(flow_action)) { 3313 NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries"); 3314 return -EINVAL; 3315 } 3316 3317 if (!flow_action_hw_stats_check(flow_action, extack, 3318 FLOW_ACTION_HW_STATS_DELAYED_BIT)) { 3319 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); 3320 return -EOPNOTSUPP; 3321 } 3322 3323 return 0; 3324 } 3325 3326 static int 3327 parse_tc_nic_actions(struct mlx5e_priv *priv, 3328 struct flow_action *flow_action, 3329 struct mlx5e_tc_flow *flow, 3330 struct netlink_ext_ack *extack) 3331 { 3332 struct mlx5e_tc_act_parse_state *parse_state; 3333 struct mlx5e_tc_flow_parse_attr *parse_attr; 3334 struct mlx5_flow_attr *attr = flow->attr; 3335 struct pedit_headers_action *hdrs; 3336 int err; 3337 3338 err = flow_action_supported(flow_action, extack); 3339 if (err) 3340 return err; 3341 3342 attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 3343 parse_attr = attr->parse_attr; 3344 parse_state = &parse_attr->parse_state; 3345 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); 3346 parse_state->ct_priv = get_ct_priv(priv); 3347 hdrs = parse_state->hdrs; 3348 3349 err = parse_tc_actions(parse_state, flow_action); 3350 if (err) 3351 return err; 3352 3353 err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack); 3354 if (err) 3355 return err; 3356 3357 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) 3358 return -EOPNOTSUPP; 3359 3360 return 0; 3361 } 3362 3363 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, 3364 struct net_device *peer_netdev) 3365 { 3366 struct mlx5e_priv *peer_priv; 3367 3368 peer_priv = netdev_priv(peer_netdev); 3369 3370 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && 3371 mlx5e_eswitch_vf_rep(priv->netdev) && 3372 mlx5e_eswitch_vf_rep(peer_netdev) && 3373 mlx5e_same_hw_devs(priv, peer_priv)); 3374 } 3375 3376 static bool same_hw_reps(struct mlx5e_priv *priv, 3377 struct net_device *peer_netdev) 3378 { 3379 struct mlx5e_priv *peer_priv; 3380 3381 peer_priv = netdev_priv(peer_netdev); 3382 3383 return mlx5e_eswitch_rep(priv->netdev) && 3384 mlx5e_eswitch_rep(peer_netdev) && 3385 mlx5e_same_hw_devs(priv, peer_priv); 3386 } 3387 3388 static bool is_lag_dev(struct mlx5e_priv *priv, 3389 struct net_device *peer_netdev) 3390 { 3391 return ((mlx5_lag_is_sriov(priv->mdev) || 3392 mlx5_lag_is_multipath(priv->mdev)) && 3393 same_hw_reps(priv, peer_netdev)); 3394 } 3395 3396 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, 3397 struct net_device *out_dev) 3398 { 3399 if (is_merged_eswitch_vfs(priv, out_dev)) 3400 return true; 3401 3402 if (is_lag_dev(priv, out_dev)) 3403 return true; 3404 3405 return mlx5e_eswitch_rep(out_dev) && 3406 same_port_devs(priv, netdev_priv(out_dev)); 3407 } 3408 3409 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, 3410 struct mlx5_flow_attr *attr, 3411 int ifindex, 3412 enum mlx5e_tc_int_port_type type, 3413 u32 *action, 3414 int out_index) 3415 { 3416 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 3417 struct mlx5e_tc_int_port_priv *int_port_priv; 3418 struct mlx5e_tc_flow_parse_attr *parse_attr; 3419 struct mlx5e_tc_int_port *dest_int_port; 3420 int err; 3421 3422 parse_attr = attr->parse_attr; 3423 int_port_priv = mlx5e_get_int_port_priv(priv); 3424 3425 dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type); 3426 if (IS_ERR(dest_int_port)) 3427 return PTR_ERR(dest_int_port); 3428 3429 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts, 3430 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, 3431 mlx5e_tc_int_port_get_metadata(dest_int_port)); 3432 if (err) { 3433 mlx5e_tc_int_port_put(int_port_priv, dest_int_port); 3434 return err; 3435 } 3436 3437 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 3438 3439 esw_attr->dest_int_port = dest_int_port; 3440 esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE; 3441 3442 /* Forward to root fdb for matching against the new source vport */ 3443 attr->dest_chain = 0; 3444 3445 return 0; 3446 } 3447 3448 static int 3449 parse_tc_fdb_actions(struct mlx5e_priv *priv, 3450 struct flow_action *flow_action, 3451 struct mlx5e_tc_flow *flow, 3452 struct netlink_ext_ack *extack) 3453 { 3454 struct mlx5e_tc_act_parse_state *parse_state; 3455 struct mlx5e_tc_flow_parse_attr *parse_attr; 3456 struct mlx5_flow_attr *attr = flow->attr; 3457 struct mlx5_esw_flow_attr *esw_attr; 3458 struct pedit_headers_action *hdrs; 3459 int err; 3460 3461 err = flow_action_supported(flow_action, extack); 3462 if (err) 3463 return err; 3464 3465 esw_attr = attr->esw_attr; 3466 parse_attr = attr->parse_attr; 3467 parse_state = &parse_attr->parse_state; 3468 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); 3469 parse_state->ct_priv = get_ct_priv(priv); 3470 hdrs = parse_state->hdrs; 3471 3472 err = parse_tc_actions(parse_state, flow_action); 3473 if (err) 3474 return err; 3475 3476 /* Forward to/from internal port can only have 1 dest */ 3477 if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) && 3478 esw_attr->out_count > 1) { 3479 NL_SET_ERR_MSG_MOD(extack, 3480 "Rules with internal port can have only one destination"); 3481 return -EOPNOTSUPP; 3482 } 3483 3484 err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack); 3485 if (err) 3486 return err; 3487 3488 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) 3489 return -EOPNOTSUPP; 3490 3491 return 0; 3492 } 3493 3494 static void get_flags(int flags, unsigned long *flow_flags) 3495 { 3496 unsigned long __flow_flags = 0; 3497 3498 if (flags & MLX5_TC_FLAG(INGRESS)) 3499 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS); 3500 if (flags & MLX5_TC_FLAG(EGRESS)) 3501 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS); 3502 3503 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) 3504 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); 3505 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD)) 3506 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); 3507 if (flags & MLX5_TC_FLAG(FT_OFFLOAD)) 3508 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT); 3509 3510 *flow_flags = __flow_flags; 3511 } 3512 3513 static const struct rhashtable_params tc_ht_params = { 3514 .head_offset = offsetof(struct mlx5e_tc_flow, node), 3515 .key_offset = offsetof(struct mlx5e_tc_flow, cookie), 3516 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), 3517 .automatic_shrinking = true, 3518 }; 3519 3520 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, 3521 unsigned long flags) 3522 { 3523 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 3524 struct mlx5e_rep_priv *uplink_rpriv; 3525 3526 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) { 3527 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 3528 return &uplink_rpriv->uplink_priv.tc_ht; 3529 } else /* NIC offload */ 3530 return &priv->fs.tc.ht; 3531 } 3532 3533 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) 3534 { 3535 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; 3536 struct mlx5_flow_attr *attr = flow->attr; 3537 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK && 3538 flow_flag_test(flow, INGRESS); 3539 bool act_is_encap = !!(attr->action & 3540 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); 3541 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom, 3542 MLX5_DEVCOM_ESW_OFFLOADS); 3543 3544 if (!esw_paired) 3545 return false; 3546 3547 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) || 3548 mlx5_lag_is_multipath(esw_attr->in_mdev)) && 3549 (is_rep_ingress || act_is_encap)) 3550 return true; 3551 3552 return false; 3553 } 3554 3555 struct mlx5_flow_attr * 3556 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type) 3557 { 3558 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ? 3559 sizeof(struct mlx5_esw_flow_attr) : 3560 sizeof(struct mlx5_nic_flow_attr); 3561 struct mlx5_flow_attr *attr; 3562 3563 return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL); 3564 } 3565 3566 static int 3567 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, 3568 struct flow_cls_offload *f, unsigned long flow_flags, 3569 struct mlx5e_tc_flow_parse_attr **__parse_attr, 3570 struct mlx5e_tc_flow **__flow) 3571 { 3572 struct mlx5e_tc_flow_parse_attr *parse_attr; 3573 struct mlx5_flow_attr *attr; 3574 struct mlx5e_tc_flow *flow; 3575 int err = -ENOMEM; 3576 int out_index; 3577 3578 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 3579 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); 3580 if (!parse_attr || !flow) 3581 goto err_free; 3582 3583 flow->flags = flow_flags; 3584 flow->cookie = f->cookie; 3585 flow->priv = priv; 3586 3587 attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow)); 3588 if (!attr) 3589 goto err_free; 3590 3591 flow->attr = attr; 3592 3593 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) 3594 INIT_LIST_HEAD(&flow->encaps[out_index].list); 3595 INIT_LIST_HEAD(&flow->hairpin); 3596 INIT_LIST_HEAD(&flow->l3_to_l2_reformat); 3597 refcount_set(&flow->refcnt, 1); 3598 init_completion(&flow->init_done); 3599 init_completion(&flow->del_hw_done); 3600 3601 *__flow = flow; 3602 *__parse_attr = parse_attr; 3603 3604 return 0; 3605 3606 err_free: 3607 kfree(flow); 3608 kvfree(parse_attr); 3609 return err; 3610 } 3611 3612 static void 3613 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr, 3614 struct mlx5e_tc_flow_parse_attr *parse_attr, 3615 struct flow_cls_offload *f) 3616 { 3617 attr->parse_attr = parse_attr; 3618 attr->chain = f->common.chain_index; 3619 attr->prio = f->common.prio; 3620 } 3621 3622 static void 3623 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr, 3624 struct mlx5e_priv *priv, 3625 struct mlx5e_tc_flow_parse_attr *parse_attr, 3626 struct flow_cls_offload *f, 3627 struct mlx5_eswitch_rep *in_rep, 3628 struct mlx5_core_dev *in_mdev) 3629 { 3630 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 3631 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 3632 3633 mlx5e_flow_attr_init(attr, parse_attr, f); 3634 3635 esw_attr->in_rep = in_rep; 3636 esw_attr->in_mdev = in_mdev; 3637 3638 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) == 3639 MLX5_COUNTER_SOURCE_ESWITCH) 3640 esw_attr->counter_dev = in_mdev; 3641 else 3642 esw_attr->counter_dev = priv->mdev; 3643 } 3644 3645 static struct mlx5e_tc_flow * 3646 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, 3647 struct flow_cls_offload *f, 3648 unsigned long flow_flags, 3649 struct net_device *filter_dev, 3650 struct mlx5_eswitch_rep *in_rep, 3651 struct mlx5_core_dev *in_mdev) 3652 { 3653 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 3654 struct netlink_ext_ack *extack = f->common.extack; 3655 struct mlx5e_tc_flow_parse_attr *parse_attr; 3656 struct mlx5e_tc_flow *flow; 3657 int attr_size, err; 3658 3659 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); 3660 attr_size = sizeof(struct mlx5_esw_flow_attr); 3661 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, 3662 &parse_attr, &flow); 3663 if (err) 3664 goto out; 3665 3666 parse_attr->filter_dev = filter_dev; 3667 mlx5e_flow_esw_attr_init(flow->attr, 3668 priv, parse_attr, 3669 f, in_rep, in_mdev); 3670 3671 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, 3672 f, filter_dev); 3673 if (err) 3674 goto err_free; 3675 3676 /* actions validation depends on parsing the ct matches first */ 3677 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f, 3678 &flow->attr->ct_attr, extack); 3679 if (err) 3680 goto err_free; 3681 3682 /* always set IP version for indirect table handling */ 3683 flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true); 3684 3685 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); 3686 if (err) 3687 goto err_free; 3688 3689 err = mlx5e_tc_add_fdb_flow(priv, flow, extack); 3690 complete_all(&flow->init_done); 3691 if (err) { 3692 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev))) 3693 goto err_free; 3694 3695 add_unready_flow(flow); 3696 } 3697 3698 return flow; 3699 3700 err_free: 3701 mlx5e_flow_put(priv, flow); 3702 out: 3703 return ERR_PTR(err); 3704 } 3705 3706 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, 3707 struct mlx5e_tc_flow *flow, 3708 unsigned long flow_flags) 3709 { 3710 struct mlx5e_priv *priv = flow->priv, *peer_priv; 3711 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw; 3712 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; 3713 struct mlx5_devcom *devcom = priv->mdev->priv.devcom; 3714 struct mlx5e_tc_flow_parse_attr *parse_attr; 3715 struct mlx5e_rep_priv *peer_urpriv; 3716 struct mlx5e_tc_flow *peer_flow; 3717 struct mlx5_core_dev *in_mdev; 3718 int err = 0; 3719 3720 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 3721 if (!peer_esw) 3722 return -ENODEV; 3723 3724 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH); 3725 peer_priv = netdev_priv(peer_urpriv->netdev); 3726 3727 /* in_mdev is assigned of which the packet originated from. 3728 * So packets redirected to uplink use the same mdev of the 3729 * original flow and packets redirected from uplink use the 3730 * peer mdev. 3731 */ 3732 if (attr->in_rep->vport == MLX5_VPORT_UPLINK) 3733 in_mdev = peer_priv->mdev; 3734 else 3735 in_mdev = priv->mdev; 3736 3737 parse_attr = flow->attr->parse_attr; 3738 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags, 3739 parse_attr->filter_dev, 3740 attr->in_rep, in_mdev); 3741 if (IS_ERR(peer_flow)) { 3742 err = PTR_ERR(peer_flow); 3743 goto out; 3744 } 3745 3746 flow->peer_flow = peer_flow; 3747 flow_flag_set(flow, DUP); 3748 mutex_lock(&esw->offloads.peer_mutex); 3749 list_add_tail(&flow->peer, &esw->offloads.peer_flows); 3750 mutex_unlock(&esw->offloads.peer_mutex); 3751 3752 out: 3753 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 3754 return err; 3755 } 3756 3757 static int 3758 mlx5e_add_fdb_flow(struct mlx5e_priv *priv, 3759 struct flow_cls_offload *f, 3760 unsigned long flow_flags, 3761 struct net_device *filter_dev, 3762 struct mlx5e_tc_flow **__flow) 3763 { 3764 struct mlx5e_rep_priv *rpriv = priv->ppriv; 3765 struct mlx5_eswitch_rep *in_rep = rpriv->rep; 3766 struct mlx5_core_dev *in_mdev = priv->mdev; 3767 struct mlx5e_tc_flow *flow; 3768 int err; 3769 3770 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, 3771 in_mdev); 3772 if (IS_ERR(flow)) 3773 return PTR_ERR(flow); 3774 3775 if (is_peer_flow_needed(flow)) { 3776 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags); 3777 if (err) { 3778 mlx5e_tc_del_fdb_flow(priv, flow); 3779 goto out; 3780 } 3781 } 3782 3783 *__flow = flow; 3784 3785 return 0; 3786 3787 out: 3788 return err; 3789 } 3790 3791 static int 3792 mlx5e_add_nic_flow(struct mlx5e_priv *priv, 3793 struct flow_cls_offload *f, 3794 unsigned long flow_flags, 3795 struct net_device *filter_dev, 3796 struct mlx5e_tc_flow **__flow) 3797 { 3798 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 3799 struct netlink_ext_ack *extack = f->common.extack; 3800 struct mlx5e_tc_flow_parse_attr *parse_attr; 3801 struct mlx5e_tc_flow *flow; 3802 int attr_size, err; 3803 3804 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) { 3805 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common)) 3806 return -EOPNOTSUPP; 3807 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) { 3808 return -EOPNOTSUPP; 3809 } 3810 3811 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); 3812 attr_size = sizeof(struct mlx5_nic_flow_attr); 3813 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, 3814 &parse_attr, &flow); 3815 if (err) 3816 goto out; 3817 3818 parse_attr->filter_dev = filter_dev; 3819 mlx5e_flow_attr_init(flow->attr, parse_attr, f); 3820 3821 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, 3822 f, filter_dev); 3823 if (err) 3824 goto err_free; 3825 3826 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f, 3827 &flow->attr->ct_attr, extack); 3828 if (err) 3829 goto err_free; 3830 3831 err = parse_tc_nic_actions(priv, &rule->action, flow, extack); 3832 if (err) 3833 goto err_free; 3834 3835 err = mlx5e_tc_add_nic_flow(priv, flow, extack); 3836 if (err) 3837 goto err_free; 3838 3839 flow_flag_set(flow, OFFLOADED); 3840 *__flow = flow; 3841 3842 return 0; 3843 3844 err_free: 3845 flow_flag_set(flow, FAILED); 3846 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 3847 mlx5e_flow_put(priv, flow); 3848 out: 3849 return err; 3850 } 3851 3852 static int 3853 mlx5e_tc_add_flow(struct mlx5e_priv *priv, 3854 struct flow_cls_offload *f, 3855 unsigned long flags, 3856 struct net_device *filter_dev, 3857 struct mlx5e_tc_flow **flow) 3858 { 3859 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 3860 unsigned long flow_flags; 3861 int err; 3862 3863 get_flags(flags, &flow_flags); 3864 3865 if (!tc_can_offload_extack(priv->netdev, f->common.extack)) 3866 return -EOPNOTSUPP; 3867 3868 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) 3869 err = mlx5e_add_fdb_flow(priv, f, flow_flags, 3870 filter_dev, flow); 3871 else 3872 err = mlx5e_add_nic_flow(priv, f, flow_flags, 3873 filter_dev, flow); 3874 3875 return err; 3876 } 3877 3878 static bool is_flow_rule_duplicate_allowed(struct net_device *dev, 3879 struct mlx5e_rep_priv *rpriv) 3880 { 3881 /* Offloaded flow rule is allowed to duplicate on non-uplink representor 3882 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this 3883 * function is called from NIC mode. 3884 */ 3885 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK; 3886 } 3887 3888 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, 3889 struct flow_cls_offload *f, unsigned long flags) 3890 { 3891 struct netlink_ext_ack *extack = f->common.extack; 3892 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 3893 struct mlx5e_rep_priv *rpriv = priv->ppriv; 3894 struct mlx5e_tc_flow *flow; 3895 int err = 0; 3896 3897 if (!mlx5_esw_hold(priv->mdev)) 3898 return -EAGAIN; 3899 3900 mlx5_esw_get(priv->mdev); 3901 3902 rcu_read_lock(); 3903 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); 3904 if (flow) { 3905 /* Same flow rule offloaded to non-uplink representor sharing tc block, 3906 * just return 0. 3907 */ 3908 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev) 3909 goto rcu_unlock; 3910 3911 NL_SET_ERR_MSG_MOD(extack, 3912 "flow cookie already exists, ignoring"); 3913 netdev_warn_once(priv->netdev, 3914 "flow cookie %lx already exists, ignoring\n", 3915 f->cookie); 3916 err = -EEXIST; 3917 goto rcu_unlock; 3918 } 3919 rcu_unlock: 3920 rcu_read_unlock(); 3921 if (flow) 3922 goto out; 3923 3924 trace_mlx5e_configure_flower(f); 3925 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); 3926 if (err) 3927 goto out; 3928 3929 /* Flow rule offloaded to non-uplink representor sharing tc block, 3930 * set the flow's owner dev. 3931 */ 3932 if (is_flow_rule_duplicate_allowed(dev, rpriv)) 3933 flow->orig_dev = dev; 3934 3935 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params); 3936 if (err) 3937 goto err_free; 3938 3939 mlx5_esw_release(priv->mdev); 3940 return 0; 3941 3942 err_free: 3943 mlx5e_flow_put(priv, flow); 3944 out: 3945 mlx5_esw_put(priv->mdev); 3946 mlx5_esw_release(priv->mdev); 3947 return err; 3948 } 3949 3950 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags) 3951 { 3952 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS)); 3953 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS)); 3954 3955 return flow_flag_test(flow, INGRESS) == dir_ingress && 3956 flow_flag_test(flow, EGRESS) == dir_egress; 3957 } 3958 3959 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, 3960 struct flow_cls_offload *f, unsigned long flags) 3961 { 3962 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 3963 struct mlx5e_tc_flow *flow; 3964 int err; 3965 3966 rcu_read_lock(); 3967 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); 3968 if (!flow || !same_flow_direction(flow, flags)) { 3969 err = -EINVAL; 3970 goto errout; 3971 } 3972 3973 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag 3974 * set. 3975 */ 3976 if (flow_flag_test_and_set(flow, DELETED)) { 3977 err = -EINVAL; 3978 goto errout; 3979 } 3980 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); 3981 rcu_read_unlock(); 3982 3983 trace_mlx5e_delete_flower(f); 3984 mlx5e_flow_put(priv, flow); 3985 3986 mlx5_esw_put(priv->mdev); 3987 return 0; 3988 3989 errout: 3990 rcu_read_unlock(); 3991 return err; 3992 } 3993 3994 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, 3995 struct flow_cls_offload *f, unsigned long flags) 3996 { 3997 struct mlx5_devcom *devcom = priv->mdev->priv.devcom; 3998 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 3999 struct mlx5_eswitch *peer_esw; 4000 struct mlx5e_tc_flow *flow; 4001 struct mlx5_fc *counter; 4002 u64 lastuse = 0; 4003 u64 packets = 0; 4004 u64 bytes = 0; 4005 int err = 0; 4006 4007 rcu_read_lock(); 4008 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie, 4009 tc_ht_params)); 4010 rcu_read_unlock(); 4011 if (IS_ERR(flow)) 4012 return PTR_ERR(flow); 4013 4014 if (!same_flow_direction(flow, flags)) { 4015 err = -EINVAL; 4016 goto errout; 4017 } 4018 4019 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) { 4020 counter = mlx5e_tc_get_counter(flow); 4021 if (!counter) 4022 goto errout; 4023 4024 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 4025 } 4026 4027 /* Under multipath it's possible for one rule to be currently 4028 * un-offloaded while the other rule is offloaded. 4029 */ 4030 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 4031 if (!peer_esw) 4032 goto out; 4033 4034 if (flow_flag_test(flow, DUP) && 4035 flow_flag_test(flow->peer_flow, OFFLOADED)) { 4036 u64 bytes2; 4037 u64 packets2; 4038 u64 lastuse2; 4039 4040 counter = mlx5e_tc_get_counter(flow->peer_flow); 4041 if (!counter) 4042 goto no_peer_counter; 4043 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2); 4044 4045 bytes += bytes2; 4046 packets += packets2; 4047 lastuse = max_t(u64, lastuse, lastuse2); 4048 } 4049 4050 no_peer_counter: 4051 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 4052 out: 4053 flow_stats_update(&f->stats, bytes, packets, 0, lastuse, 4054 FLOW_ACTION_HW_STATS_DELAYED); 4055 trace_mlx5e_stats_flower(f); 4056 errout: 4057 mlx5e_flow_put(priv, flow); 4058 return err; 4059 } 4060 4061 static int apply_police_params(struct mlx5e_priv *priv, u64 rate, 4062 struct netlink_ext_ack *extack) 4063 { 4064 struct mlx5e_rep_priv *rpriv = priv->ppriv; 4065 struct mlx5_eswitch *esw; 4066 u32 rate_mbps = 0; 4067 u16 vport_num; 4068 int err; 4069 4070 vport_num = rpriv->rep->vport; 4071 if (vport_num >= MLX5_VPORT_ECPF) { 4072 NL_SET_ERR_MSG_MOD(extack, 4073 "Ingress rate limit is supported only for Eswitch ports connected to VFs"); 4074 return -EOPNOTSUPP; 4075 } 4076 4077 esw = priv->mdev->priv.eswitch; 4078 /* rate is given in bytes/sec. 4079 * First convert to bits/sec and then round to the nearest mbit/secs. 4080 * mbit means million bits. 4081 * Moreover, if rate is non zero we choose to configure to a minimum of 4082 * 1 mbit/sec. 4083 */ 4084 if (rate) { 4085 rate = (rate * BITS_PER_BYTE) + 500000; 4086 do_div(rate, 1000000); 4087 rate_mbps = max_t(u32, rate, 1); 4088 } 4089 4090 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps); 4091 if (err) 4092 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); 4093 4094 return err; 4095 } 4096 4097 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, 4098 struct flow_action *flow_action, 4099 struct netlink_ext_ack *extack) 4100 { 4101 struct mlx5e_rep_priv *rpriv = priv->ppriv; 4102 const struct flow_action_entry *act; 4103 int err; 4104 int i; 4105 4106 if (!flow_action_has_entries(flow_action)) { 4107 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action"); 4108 return -EINVAL; 4109 } 4110 4111 if (!flow_offload_has_one_action(flow_action)) { 4112 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action"); 4113 return -EOPNOTSUPP; 4114 } 4115 4116 if (!flow_action_basic_hw_stats_check(flow_action, extack)) { 4117 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); 4118 return -EOPNOTSUPP; 4119 } 4120 4121 flow_action_for_each(i, act, flow_action) { 4122 switch (act->id) { 4123 case FLOW_ACTION_POLICE: 4124 if (act->police.rate_pkt_ps) { 4125 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 4126 return -EOPNOTSUPP; 4127 } 4128 err = apply_police_params(priv, act->police.rate_bytes_ps, extack); 4129 if (err) 4130 return err; 4131 4132 rpriv->prev_vf_vport_stats = priv->stats.vf_vport; 4133 break; 4134 default: 4135 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall"); 4136 return -EOPNOTSUPP; 4137 } 4138 } 4139 4140 return 0; 4141 } 4142 4143 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, 4144 struct tc_cls_matchall_offload *ma) 4145 { 4146 struct netlink_ext_ack *extack = ma->common.extack; 4147 4148 if (ma->common.prio != 1) { 4149 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); 4150 return -EINVAL; 4151 } 4152 4153 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack); 4154 } 4155 4156 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, 4157 struct tc_cls_matchall_offload *ma) 4158 { 4159 struct netlink_ext_ack *extack = ma->common.extack; 4160 4161 return apply_police_params(priv, 0, extack); 4162 } 4163 4164 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, 4165 struct tc_cls_matchall_offload *ma) 4166 { 4167 struct mlx5e_rep_priv *rpriv = priv->ppriv; 4168 struct rtnl_link_stats64 cur_stats; 4169 u64 dbytes; 4170 u64 dpkts; 4171 4172 cur_stats = priv->stats.vf_vport; 4173 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; 4174 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; 4175 rpriv->prev_vf_vport_stats = cur_stats; 4176 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies, 4177 FLOW_ACTION_HW_STATS_DELAYED); 4178 } 4179 4180 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, 4181 struct mlx5e_priv *peer_priv) 4182 { 4183 struct mlx5_core_dev *peer_mdev = peer_priv->mdev; 4184 struct mlx5e_hairpin_entry *hpe, *tmp; 4185 LIST_HEAD(init_wait_list); 4186 u16 peer_vhca_id; 4187 int bkt; 4188 4189 if (!mlx5e_same_hw_devs(priv, peer_priv)) 4190 return; 4191 4192 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id); 4193 4194 mutex_lock(&priv->fs.tc.hairpin_tbl_lock); 4195 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) 4196 if (refcount_inc_not_zero(&hpe->refcnt)) 4197 list_add(&hpe->dead_peer_wait_list, &init_wait_list); 4198 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); 4199 4200 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) { 4201 wait_for_completion(&hpe->res_ready); 4202 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id) 4203 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair); 4204 4205 mlx5e_hairpin_put(priv, hpe); 4206 } 4207 } 4208 4209 static int mlx5e_tc_netdev_event(struct notifier_block *this, 4210 unsigned long event, void *ptr) 4211 { 4212 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 4213 struct mlx5e_flow_steering *fs; 4214 struct mlx5e_priv *peer_priv; 4215 struct mlx5e_tc_table *tc; 4216 struct mlx5e_priv *priv; 4217 4218 if (ndev->netdev_ops != &mlx5e_netdev_ops || 4219 event != NETDEV_UNREGISTER || 4220 ndev->reg_state == NETREG_REGISTERED) 4221 return NOTIFY_DONE; 4222 4223 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb); 4224 fs = container_of(tc, struct mlx5e_flow_steering, tc); 4225 priv = container_of(fs, struct mlx5e_priv, fs); 4226 peer_priv = netdev_priv(ndev); 4227 if (priv == peer_priv || 4228 !(priv->netdev->features & NETIF_F_HW_TC)) 4229 return NOTIFY_DONE; 4230 4231 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv); 4232 4233 return NOTIFY_DONE; 4234 } 4235 4236 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev) 4237 { 4238 int tc_grp_size, tc_tbl_size; 4239 u32 max_flow_counter; 4240 4241 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | 4242 MLX5_CAP_GEN(dev, max_flow_counter_15_0); 4243 4244 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE); 4245 4246 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS, 4247 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size))); 4248 4249 return tc_tbl_size; 4250 } 4251 4252 int mlx5e_tc_nic_init(struct mlx5e_priv *priv) 4253 { 4254 struct mlx5e_tc_table *tc = &priv->fs.tc; 4255 struct mlx5_core_dev *dev = priv->mdev; 4256 struct mapping_ctx *chains_mapping; 4257 struct mlx5_chains_attr attr = {}; 4258 u64 mapping_id; 4259 int err; 4260 4261 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr); 4262 mutex_init(&tc->t_lock); 4263 mutex_init(&tc->hairpin_tbl_lock); 4264 hash_init(tc->hairpin_tbl); 4265 4266 err = rhashtable_init(&tc->ht, &tc_ht_params); 4267 if (err) 4268 return err; 4269 4270 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); 4271 4272 mapping_id = mlx5_query_nic_system_image_guid(dev); 4273 4274 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, 4275 sizeof(struct mlx5_mapped_obj), 4276 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true); 4277 4278 if (IS_ERR(chains_mapping)) { 4279 err = PTR_ERR(chains_mapping); 4280 goto err_mapping; 4281 } 4282 tc->mapping = chains_mapping; 4283 4284 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) 4285 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED | 4286 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 4287 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL; 4288 attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev); 4289 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS; 4290 attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan); 4291 attr.mapping = chains_mapping; 4292 4293 tc->chains = mlx5_chains_create(dev, &attr); 4294 if (IS_ERR(tc->chains)) { 4295 err = PTR_ERR(tc->chains); 4296 goto err_chains; 4297 } 4298 4299 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL); 4300 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr, 4301 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act); 4302 4303 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; 4304 err = register_netdevice_notifier_dev_net(priv->netdev, 4305 &tc->netdevice_nb, 4306 &tc->netdevice_nn); 4307 if (err) { 4308 tc->netdevice_nb.notifier_call = NULL; 4309 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n"); 4310 goto err_reg; 4311 } 4312 4313 return 0; 4314 4315 err_reg: 4316 mlx5_tc_ct_clean(tc->ct); 4317 mlx5e_tc_post_act_destroy(tc->post_act); 4318 mlx5_chains_destroy(tc->chains); 4319 err_chains: 4320 mapping_destroy(chains_mapping); 4321 err_mapping: 4322 rhashtable_destroy(&tc->ht); 4323 return err; 4324 } 4325 4326 static void _mlx5e_tc_del_flow(void *ptr, void *arg) 4327 { 4328 struct mlx5e_tc_flow *flow = ptr; 4329 struct mlx5e_priv *priv = flow->priv; 4330 4331 mlx5e_tc_del_flow(priv, flow); 4332 kfree(flow); 4333 } 4334 4335 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) 4336 { 4337 struct mlx5e_tc_table *tc = &priv->fs.tc; 4338 4339 if (tc->netdevice_nb.notifier_call) 4340 unregister_netdevice_notifier_dev_net(priv->netdev, 4341 &tc->netdevice_nb, 4342 &tc->netdevice_nn); 4343 4344 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr); 4345 mutex_destroy(&tc->hairpin_tbl_lock); 4346 4347 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL); 4348 4349 if (!IS_ERR_OR_NULL(tc->t)) { 4350 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL); 4351 tc->t = NULL; 4352 } 4353 mutex_destroy(&tc->t_lock); 4354 4355 mlx5_tc_ct_clean(tc->ct); 4356 mlx5e_tc_post_act_destroy(tc->post_act); 4357 mapping_destroy(tc->mapping); 4358 mlx5_chains_destroy(tc->chains); 4359 } 4360 4361 int mlx5e_tc_esw_init(struct rhashtable *tc_ht) 4362 { 4363 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts); 4364 struct mlx5_rep_uplink_priv *uplink_priv; 4365 struct mlx5e_rep_priv *rpriv; 4366 struct mapping_ctx *mapping; 4367 struct mlx5_eswitch *esw; 4368 struct mlx5e_priv *priv; 4369 u64 mapping_id; 4370 int err = 0; 4371 4372 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); 4373 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); 4374 priv = netdev_priv(rpriv->netdev); 4375 esw = priv->mdev->priv.eswitch; 4376 4377 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw), 4378 MLX5_FLOW_NAMESPACE_FDB); 4379 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev), 4380 esw_chains(esw), 4381 &esw->offloads.mod_hdr, 4382 MLX5_FLOW_NAMESPACE_FDB, 4383 uplink_priv->post_act); 4384 4385 uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev)); 4386 4387 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); 4388 4389 mapping_id = mlx5_query_nic_system_image_guid(esw->dev); 4390 4391 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL, 4392 sizeof(struct tunnel_match_key), 4393 TUNNEL_INFO_BITS_MASK, true); 4394 4395 if (IS_ERR(mapping)) { 4396 err = PTR_ERR(mapping); 4397 goto err_tun_mapping; 4398 } 4399 uplink_priv->tunnel_mapping = mapping; 4400 4401 /* Two last values are reserved for stack devices slow path table mark 4402 * and bridge ingress push mark. 4403 */ 4404 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS, 4405 sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true); 4406 if (IS_ERR(mapping)) { 4407 err = PTR_ERR(mapping); 4408 goto err_enc_opts_mapping; 4409 } 4410 uplink_priv->tunnel_enc_opts_mapping = mapping; 4411 4412 err = rhashtable_init(tc_ht, &tc_ht_params); 4413 if (err) 4414 goto err_ht_init; 4415 4416 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key); 4417 4418 uplink_priv->encap = mlx5e_tc_tun_init(priv); 4419 if (IS_ERR(uplink_priv->encap)) { 4420 err = PTR_ERR(uplink_priv->encap); 4421 goto err_register_fib_notifier; 4422 } 4423 4424 return 0; 4425 4426 err_register_fib_notifier: 4427 rhashtable_destroy(tc_ht); 4428 err_ht_init: 4429 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); 4430 err_enc_opts_mapping: 4431 mapping_destroy(uplink_priv->tunnel_mapping); 4432 err_tun_mapping: 4433 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); 4434 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv); 4435 mlx5_tc_ct_clean(uplink_priv->ct_priv); 4436 netdev_warn(priv->netdev, 4437 "Failed to initialize tc (eswitch), err: %d", err); 4438 mlx5e_tc_post_act_destroy(uplink_priv->post_act); 4439 return err; 4440 } 4441 4442 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) 4443 { 4444 struct mlx5_rep_uplink_priv *uplink_priv; 4445 4446 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); 4447 4448 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); 4449 mlx5e_tc_tun_cleanup(uplink_priv->encap); 4450 4451 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); 4452 mapping_destroy(uplink_priv->tunnel_mapping); 4453 4454 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); 4455 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv); 4456 mlx5_tc_ct_clean(uplink_priv->ct_priv); 4457 mlx5e_tc_post_act_destroy(uplink_priv->post_act); 4458 } 4459 4460 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) 4461 { 4462 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 4463 4464 return atomic_read(&tc_ht->nelems); 4465 } 4466 4467 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw) 4468 { 4469 struct mlx5e_tc_flow *flow, *tmp; 4470 4471 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer) 4472 __mlx5e_tc_del_fdb_peer_flow(flow); 4473 } 4474 4475 void mlx5e_tc_reoffload_flows_work(struct work_struct *work) 4476 { 4477 struct mlx5_rep_uplink_priv *rpriv = 4478 container_of(work, struct mlx5_rep_uplink_priv, 4479 reoffload_flows_work); 4480 struct mlx5e_tc_flow *flow, *tmp; 4481 4482 mutex_lock(&rpriv->unready_flows_lock); 4483 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) { 4484 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL)) 4485 unready_flow_del(flow); 4486 } 4487 mutex_unlock(&rpriv->unready_flows_lock); 4488 } 4489 4490 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, 4491 struct flow_cls_offload *cls_flower, 4492 unsigned long flags) 4493 { 4494 switch (cls_flower->command) { 4495 case FLOW_CLS_REPLACE: 4496 return mlx5e_configure_flower(priv->netdev, priv, cls_flower, 4497 flags); 4498 case FLOW_CLS_DESTROY: 4499 return mlx5e_delete_flower(priv->netdev, priv, cls_flower, 4500 flags); 4501 case FLOW_CLS_STATS: 4502 return mlx5e_stats_flower(priv->netdev, priv, cls_flower, 4503 flags); 4504 default: 4505 return -EOPNOTSUPP; 4506 } 4507 } 4508 4509 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 4510 void *cb_priv) 4511 { 4512 unsigned long flags = MLX5_TC_FLAG(INGRESS); 4513 struct mlx5e_priv *priv = cb_priv; 4514 4515 if (!priv->netdev || !netif_device_present(priv->netdev)) 4516 return -EOPNOTSUPP; 4517 4518 if (mlx5e_is_uplink_rep(priv)) 4519 flags |= MLX5_TC_FLAG(ESW_OFFLOAD); 4520 else 4521 flags |= MLX5_TC_FLAG(NIC_OFFLOAD); 4522 4523 switch (type) { 4524 case TC_SETUP_CLSFLOWER: 4525 return mlx5e_setup_tc_cls_flower(priv, type_data, flags); 4526 default: 4527 return -EOPNOTSUPP; 4528 } 4529 } 4530 4531 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, 4532 struct sk_buff *skb) 4533 { 4534 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4535 u32 chain = 0, chain_tag, reg_b, zone_restore_id; 4536 struct mlx5e_priv *priv = netdev_priv(skb->dev); 4537 struct mlx5e_tc_table *tc = &priv->fs.tc; 4538 struct mlx5_mapped_obj mapped_obj; 4539 struct tc_skb_ext *tc_skb_ext; 4540 int err; 4541 4542 reg_b = be32_to_cpu(cqe->ft_metadata); 4543 4544 chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK; 4545 4546 err = mapping_find(tc->mapping, chain_tag, &mapped_obj); 4547 if (err) { 4548 netdev_dbg(priv->netdev, 4549 "Couldn't find chain for chain tag: %d, err: %d\n", 4550 chain_tag, err); 4551 return false; 4552 } 4553 4554 if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) { 4555 chain = mapped_obj.chain; 4556 tc_skb_ext = tc_skb_ext_alloc(skb); 4557 if (WARN_ON(!tc_skb_ext)) 4558 return false; 4559 4560 tc_skb_ext->chain = chain; 4561 4562 zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) & 4563 ESW_ZONE_ID_MASK; 4564 4565 if (!mlx5e_tc_ct_restore_flow(tc->ct, skb, 4566 zone_restore_id)) 4567 return false; 4568 } else { 4569 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type); 4570 return false; 4571 } 4572 #endif /* CONFIG_NET_TC_SKB_EXT */ 4573 4574 return true; 4575 } 4576