1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/list.h> 34 #include <linux/ip.h> 35 #include <linux/ipv6.h> 36 #include <linux/tcp.h> 37 #include <linux/mlx5/fs.h> 38 #include "en.h" 39 #include "lib/mpfs.h" 40 41 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, 42 struct mlx5e_l2_rule *ai, int type); 43 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, 44 struct mlx5e_l2_rule *ai); 45 46 enum { 47 MLX5E_FULLMATCH = 0, 48 MLX5E_ALLMULTI = 1, 49 }; 50 51 enum { 52 MLX5E_UC = 0, 53 MLX5E_MC_IPV4 = 1, 54 MLX5E_MC_IPV6 = 2, 55 MLX5E_MC_OTHER = 3, 56 }; 57 58 enum { 59 MLX5E_ACTION_NONE = 0, 60 MLX5E_ACTION_ADD = 1, 61 MLX5E_ACTION_DEL = 2, 62 }; 63 64 struct mlx5e_l2_hash_node { 65 struct hlist_node hlist; 66 u8 action; 67 struct mlx5e_l2_rule ai; 68 bool mpfs; 69 }; 70 71 static inline int mlx5e_hash_l2(u8 *addr) 72 { 73 return addr[5]; 74 } 75 76 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr) 77 { 78 struct mlx5e_l2_hash_node *hn; 79 int ix = mlx5e_hash_l2(addr); 80 int found = 0; 81 82 hlist_for_each_entry(hn, &hash[ix], hlist) 83 if (ether_addr_equal_64bits(hn->ai.addr, addr)) { 84 found = 1; 85 break; 86 } 87 88 if (found) { 89 hn->action = MLX5E_ACTION_NONE; 90 return; 91 } 92 93 hn = kzalloc(sizeof(*hn), GFP_ATOMIC); 94 if (!hn) 95 return; 96 97 ether_addr_copy(hn->ai.addr, addr); 98 hn->action = MLX5E_ACTION_ADD; 99 100 hlist_add_head(&hn->hlist, &hash[ix]); 101 } 102 103 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn) 104 { 105 hlist_del(&hn->hlist); 106 kfree(hn); 107 } 108 109 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) 110 { 111 struct net_device *ndev = priv->netdev; 112 int max_list_size; 113 int list_size; 114 u16 *vlans; 115 int vlan; 116 int err; 117 int i; 118 119 list_size = 0; 120 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) 121 list_size++; 122 123 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); 124 125 if (list_size > max_list_size) { 126 netdev_warn(ndev, 127 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n", 128 list_size, max_list_size); 129 list_size = max_list_size; 130 } 131 132 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL); 133 if (!vlans) 134 return -ENOMEM; 135 136 i = 0; 137 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) { 138 if (i >= list_size) 139 break; 140 vlans[i++] = vlan; 141 } 142 143 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size); 144 if (err) 145 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n", 146 err); 147 148 kfree(vlans); 149 return err; 150 } 151 152 enum mlx5e_vlan_rule_type { 153 MLX5E_VLAN_RULE_TYPE_UNTAGGED, 154 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 155 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 156 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, 157 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, 158 }; 159 160 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 161 enum mlx5e_vlan_rule_type rule_type, 162 u16 vid, struct mlx5_flow_spec *spec) 163 { 164 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; 165 struct mlx5_flow_destination dest = {}; 166 struct mlx5_flow_handle **rule_p; 167 MLX5_DECLARE_FLOW_ACT(flow_act); 168 int err = 0; 169 170 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 171 dest.ft = priv->fs.l2.ft.t; 172 173 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 174 175 switch (rule_type) { 176 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 177 /* cvlan_tag enabled in match criteria and 178 * disabled in match value means both S & C tags 179 * don't exist (untagged of both) 180 */ 181 rule_p = &priv->fs.vlan.untagged_rule; 182 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 183 outer_headers.cvlan_tag); 184 break; 185 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 186 rule_p = &priv->fs.vlan.any_cvlan_rule; 187 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 188 outer_headers.cvlan_tag); 189 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); 190 break; 191 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 192 rule_p = &priv->fs.vlan.any_svlan_rule; 193 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 194 outer_headers.svlan_tag); 195 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); 196 break; 197 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: 198 rule_p = &priv->fs.vlan.active_svlans_rule[vid]; 199 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 200 outer_headers.svlan_tag); 201 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); 202 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 203 outer_headers.first_vid); 204 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 205 vid); 206 break; 207 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */ 208 rule_p = &priv->fs.vlan.active_cvlans_rule[vid]; 209 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 210 outer_headers.cvlan_tag); 211 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); 212 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 213 outer_headers.first_vid); 214 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 215 vid); 216 break; 217 } 218 219 if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type)) 220 return 0; 221 222 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 223 224 if (IS_ERR(*rule_p)) { 225 err = PTR_ERR(*rule_p); 226 *rule_p = NULL; 227 netdev_err(priv->netdev, "%s: add rule failed\n", __func__); 228 } 229 230 return err; 231 } 232 233 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 234 enum mlx5e_vlan_rule_type rule_type, u16 vid) 235 { 236 struct mlx5_flow_spec *spec; 237 int err = 0; 238 239 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 240 if (!spec) 241 return -ENOMEM; 242 243 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID) 244 mlx5e_vport_context_update_vlans(priv); 245 246 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec); 247 248 kvfree(spec); 249 250 return err; 251 } 252 253 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, 254 enum mlx5e_vlan_rule_type rule_type, u16 vid) 255 { 256 switch (rule_type) { 257 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 258 if (priv->fs.vlan.untagged_rule) { 259 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule); 260 priv->fs.vlan.untagged_rule = NULL; 261 } 262 break; 263 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 264 if (priv->fs.vlan.any_cvlan_rule) { 265 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule); 266 priv->fs.vlan.any_cvlan_rule = NULL; 267 } 268 break; 269 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 270 if (priv->fs.vlan.any_svlan_rule) { 271 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule); 272 priv->fs.vlan.any_svlan_rule = NULL; 273 } 274 break; 275 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: 276 if (priv->fs.vlan.active_svlans_rule[vid]) { 277 mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]); 278 priv->fs.vlan.active_svlans_rule[vid] = NULL; 279 } 280 break; 281 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID: 282 if (priv->fs.vlan.active_cvlans_rule[vid]) { 283 mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]); 284 priv->fs.vlan.active_cvlans_rule[vid] = NULL; 285 } 286 mlx5e_vport_context_update_vlans(priv); 287 break; 288 } 289 } 290 291 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv) 292 { 293 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 294 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); 295 } 296 297 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) 298 { 299 int err; 300 301 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 302 if (err) 303 return err; 304 305 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); 306 } 307 308 static struct mlx5_flow_handle * 309 mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num) 310 { 311 struct mlx5_flow_destination dest = {}; 312 MLX5_DECLARE_FLOW_ACT(flow_act); 313 struct mlx5_flow_handle *rule; 314 struct mlx5_flow_spec *spec; 315 316 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 317 if (!spec) 318 return ERR_PTR(-ENOMEM); 319 spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG; 320 spec->flow_context.flow_tag = trap_id; 321 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 322 dest.tir_num = tir_num; 323 324 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 325 kvfree(spec); 326 return rule; 327 } 328 329 int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num) 330 { 331 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; 332 struct mlx5_flow_handle *rule; 333 int err; 334 335 rule = mlx5e_add_trap_rule(ft, trap_id, tir_num); 336 if (IS_ERR(rule)) { 337 err = PTR_ERR(rule); 338 priv->fs.vlan.trap_rule = NULL; 339 netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n", 340 __func__, err); 341 return err; 342 } 343 priv->fs.vlan.trap_rule = rule; 344 return 0; 345 } 346 347 void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv) 348 { 349 if (priv->fs.vlan.trap_rule) { 350 mlx5_del_flow_rules(priv->fs.vlan.trap_rule); 351 priv->fs.vlan.trap_rule = NULL; 352 } 353 } 354 355 int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num) 356 { 357 struct mlx5_flow_table *ft = priv->fs.l2.ft.t; 358 struct mlx5_flow_handle *rule; 359 int err; 360 361 rule = mlx5e_add_trap_rule(ft, trap_id, tir_num); 362 if (IS_ERR(rule)) { 363 err = PTR_ERR(rule); 364 priv->fs.l2.trap_rule = NULL; 365 netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n", 366 __func__, err); 367 return err; 368 } 369 priv->fs.l2.trap_rule = rule; 370 return 0; 371 } 372 373 void mlx5e_remove_mac_trap(struct mlx5e_priv *priv) 374 { 375 if (priv->fs.l2.trap_rule) { 376 mlx5_del_flow_rules(priv->fs.l2.trap_rule); 377 priv->fs.l2.trap_rule = NULL; 378 } 379 } 380 381 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv) 382 { 383 if (!priv->fs.vlan.cvlan_filter_disabled) 384 return; 385 386 priv->fs.vlan.cvlan_filter_disabled = false; 387 if (priv->netdev->flags & IFF_PROMISC) 388 return; 389 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 390 } 391 392 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv) 393 { 394 if (priv->fs.vlan.cvlan_filter_disabled) 395 return; 396 397 priv->fs.vlan.cvlan_filter_disabled = true; 398 if (priv->netdev->flags & IFF_PROMISC) 399 return; 400 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 401 } 402 403 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid) 404 { 405 int err; 406 407 set_bit(vid, priv->fs.vlan.active_cvlans); 408 409 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); 410 if (err) 411 clear_bit(vid, priv->fs.vlan.active_cvlans); 412 413 return err; 414 } 415 416 static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid) 417 { 418 struct net_device *netdev = priv->netdev; 419 int err; 420 421 set_bit(vid, priv->fs.vlan.active_svlans); 422 423 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); 424 if (err) { 425 clear_bit(vid, priv->fs.vlan.active_svlans); 426 return err; 427 } 428 429 /* Need to fix some features.. */ 430 netdev_update_features(netdev); 431 return err; 432 } 433 434 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 435 { 436 struct mlx5e_priv *priv = netdev_priv(dev); 437 438 if (be16_to_cpu(proto) == ETH_P_8021Q) 439 return mlx5e_vlan_rx_add_cvid(priv, vid); 440 else if (be16_to_cpu(proto) == ETH_P_8021AD) 441 return mlx5e_vlan_rx_add_svid(priv, vid); 442 443 return -EOPNOTSUPP; 444 } 445 446 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 447 { 448 struct mlx5e_priv *priv = netdev_priv(dev); 449 450 if (be16_to_cpu(proto) == ETH_P_8021Q) { 451 clear_bit(vid, priv->fs.vlan.active_cvlans); 452 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); 453 } else if (be16_to_cpu(proto) == ETH_P_8021AD) { 454 clear_bit(vid, priv->fs.vlan.active_svlans); 455 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); 456 netdev_update_features(dev); 457 } 458 459 return 0; 460 } 461 462 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) 463 { 464 int i; 465 466 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 467 468 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { 469 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); 470 } 471 472 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) 473 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); 474 475 if (priv->fs.vlan.cvlan_filter_disabled) 476 mlx5e_add_any_vid_rules(priv); 477 } 478 479 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) 480 { 481 int i; 482 483 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 484 485 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { 486 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); 487 } 488 489 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) 490 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); 491 492 WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state))); 493 494 mlx5e_remove_vlan_trap(priv); 495 496 /* must be called after DESTROY bit is set and 497 * set_rx_mode is called and flushed 498 */ 499 if (priv->fs.vlan.cvlan_filter_disabled) 500 mlx5e_del_any_vid_rules(priv); 501 } 502 503 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ 504 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \ 505 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) 506 507 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, 508 struct mlx5e_l2_hash_node *hn) 509 { 510 u8 action = hn->action; 511 u8 mac_addr[ETH_ALEN]; 512 int l2_err = 0; 513 514 ether_addr_copy(mac_addr, hn->ai.addr); 515 516 switch (action) { 517 case MLX5E_ACTION_ADD: 518 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); 519 if (!is_multicast_ether_addr(mac_addr)) { 520 l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr); 521 hn->mpfs = !l2_err; 522 } 523 hn->action = MLX5E_ACTION_NONE; 524 break; 525 526 case MLX5E_ACTION_DEL: 527 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs) 528 l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr); 529 mlx5e_del_l2_flow_rule(priv, &hn->ai); 530 mlx5e_del_l2_from_hash(hn); 531 break; 532 } 533 534 if (l2_err) 535 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n", 536 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err); 537 } 538 539 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) 540 { 541 struct net_device *netdev = priv->netdev; 542 struct netdev_hw_addr *ha; 543 544 netif_addr_lock_bh(netdev); 545 546 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, 547 priv->netdev->dev_addr); 548 549 netdev_for_each_uc_addr(ha, netdev) 550 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr); 551 552 netdev_for_each_mc_addr(ha, netdev) 553 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr); 554 555 netif_addr_unlock_bh(netdev); 556 } 557 558 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, 559 u8 addr_array[][ETH_ALEN], int size) 560 { 561 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); 562 struct net_device *ndev = priv->netdev; 563 struct mlx5e_l2_hash_node *hn; 564 struct hlist_head *addr_list; 565 struct hlist_node *tmp; 566 int i = 0; 567 int hi; 568 569 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc; 570 571 if (is_uc) /* Make sure our own address is pushed first */ 572 ether_addr_copy(addr_array[i++], ndev->dev_addr); 573 else if (priv->fs.l2.broadcast_enabled) 574 ether_addr_copy(addr_array[i++], ndev->broadcast); 575 576 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { 577 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr)) 578 continue; 579 if (i >= size) 580 break; 581 ether_addr_copy(addr_array[i++], hn->ai.addr); 582 } 583 } 584 585 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, 586 int list_type) 587 { 588 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); 589 struct mlx5e_l2_hash_node *hn; 590 u8 (*addr_array)[ETH_ALEN] = NULL; 591 struct hlist_head *addr_list; 592 struct hlist_node *tmp; 593 int max_size; 594 int size; 595 int err; 596 int hi; 597 598 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0); 599 max_size = is_uc ? 600 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : 601 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); 602 603 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc; 604 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) 605 size++; 606 607 if (size > max_size) { 608 netdev_warn(priv->netdev, 609 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n", 610 is_uc ? "UC" : "MC", size, max_size); 611 size = max_size; 612 } 613 614 if (size) { 615 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL); 616 if (!addr_array) { 617 err = -ENOMEM; 618 goto out; 619 } 620 mlx5e_fill_addr_array(priv, list_type, addr_array, size); 621 } 622 623 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size); 624 out: 625 if (err) 626 netdev_err(priv->netdev, 627 "Failed to modify vport %s list err(%d)\n", 628 is_uc ? "UC" : "MC", err); 629 kfree(addr_array); 630 } 631 632 static void mlx5e_vport_context_update(struct mlx5e_priv *priv) 633 { 634 struct mlx5e_l2_table *ea = &priv->fs.l2; 635 636 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC); 637 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC); 638 mlx5_modify_nic_vport_promisc(priv->mdev, 0, 639 ea->allmulti_enabled, 640 ea->promisc_enabled); 641 } 642 643 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) 644 { 645 struct mlx5e_l2_hash_node *hn; 646 struct hlist_node *tmp; 647 int i; 648 649 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i) 650 mlx5e_execute_l2_action(priv, hn); 651 652 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i) 653 mlx5e_execute_l2_action(priv, hn); 654 } 655 656 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) 657 { 658 struct mlx5e_l2_hash_node *hn; 659 struct hlist_node *tmp; 660 int i; 661 662 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i) 663 hn->action = MLX5E_ACTION_DEL; 664 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i) 665 hn->action = MLX5E_ACTION_DEL; 666 667 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) 668 mlx5e_sync_netdev_addr(priv); 669 670 mlx5e_apply_netdev_addr(priv); 671 } 672 673 #define MLX5E_PROMISC_GROUP0_SIZE BIT(0) 674 #define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE 675 676 static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv) 677 { 678 struct mlx5_flow_table *ft = priv->fs.promisc.ft.t; 679 struct mlx5_flow_destination dest = {}; 680 struct mlx5_flow_handle **rule_p; 681 MLX5_DECLARE_FLOW_ACT(flow_act); 682 struct mlx5_flow_spec *spec; 683 int err = 0; 684 685 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 686 if (!spec) 687 return -ENOMEM; 688 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 689 dest.ft = priv->fs.ttc.ft.t; 690 691 rule_p = &priv->fs.promisc.rule; 692 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 693 if (IS_ERR(*rule_p)) { 694 err = PTR_ERR(*rule_p); 695 *rule_p = NULL; 696 netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__); 697 } 698 kvfree(spec); 699 return err; 700 } 701 702 static int mlx5e_create_promisc_table(struct mlx5e_priv *priv) 703 { 704 struct mlx5e_flow_table *ft = &priv->fs.promisc.ft; 705 struct mlx5_flow_table_attr ft_attr = {}; 706 int err; 707 708 ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE; 709 ft_attr.autogroup.max_num_groups = 1; 710 ft_attr.level = MLX5E_PROMISC_FT_LEVEL; 711 ft_attr.prio = MLX5E_NIC_PRIO; 712 713 ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); 714 if (IS_ERR(ft->t)) { 715 err = PTR_ERR(ft->t); 716 netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err); 717 return err; 718 } 719 720 err = mlx5e_add_promisc_rule(priv); 721 if (err) 722 goto err_destroy_promisc_table; 723 724 return 0; 725 726 err_destroy_promisc_table: 727 mlx5_destroy_flow_table(ft->t); 728 ft->t = NULL; 729 730 return err; 731 } 732 733 static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv) 734 { 735 if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule")) 736 return; 737 mlx5_del_flow_rules(priv->fs.promisc.rule); 738 priv->fs.promisc.rule = NULL; 739 } 740 741 static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv) 742 { 743 if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table")) 744 return; 745 mlx5e_del_promisc_rule(priv); 746 mlx5_destroy_flow_table(priv->fs.promisc.ft.t); 747 priv->fs.promisc.ft.t = NULL; 748 } 749 750 void mlx5e_set_rx_mode_work(struct work_struct *work) 751 { 752 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 753 set_rx_mode_work); 754 755 struct mlx5e_l2_table *ea = &priv->fs.l2; 756 struct net_device *ndev = priv->netdev; 757 758 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); 759 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); 760 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); 761 bool broadcast_enabled = rx_mode_enable; 762 763 bool enable_promisc = !ea->promisc_enabled && promisc_enabled; 764 bool disable_promisc = ea->promisc_enabled && !promisc_enabled; 765 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; 766 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; 767 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; 768 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; 769 int err; 770 771 if (enable_promisc) { 772 err = mlx5e_create_promisc_table(priv); 773 if (err) 774 enable_promisc = false; 775 if (!priv->channels.params.vlan_strip_disable && !err) 776 netdev_warn_once(ndev, 777 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n"); 778 } 779 if (enable_allmulti) 780 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); 781 if (enable_broadcast) 782 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); 783 784 mlx5e_handle_netdev_addr(priv); 785 786 if (disable_broadcast) 787 mlx5e_del_l2_flow_rule(priv, &ea->broadcast); 788 if (disable_allmulti) 789 mlx5e_del_l2_flow_rule(priv, &ea->allmulti); 790 if (disable_promisc) 791 mlx5e_destroy_promisc_table(priv); 792 793 ea->promisc_enabled = promisc_enabled; 794 ea->allmulti_enabled = allmulti_enabled; 795 ea->broadcast_enabled = broadcast_enabled; 796 797 mlx5e_vport_context_update(priv); 798 } 799 800 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft) 801 { 802 int i; 803 804 for (i = ft->num_groups - 1; i >= 0; i--) { 805 if (!IS_ERR_OR_NULL(ft->g[i])) 806 mlx5_destroy_flow_group(ft->g[i]); 807 ft->g[i] = NULL; 808 } 809 ft->num_groups = 0; 810 } 811 812 void mlx5e_init_l2_addr(struct mlx5e_priv *priv) 813 { 814 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast); 815 } 816 817 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) 818 { 819 mlx5e_destroy_groups(ft); 820 kfree(ft->g); 821 mlx5_destroy_flow_table(ft->t); 822 ft->t = NULL; 823 } 824 825 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) 826 { 827 int i; 828 829 for (i = 0; i < MLX5E_NUM_TT; i++) { 830 if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) { 831 mlx5_del_flow_rules(ttc->rules[i].rule); 832 ttc->rules[i].rule = NULL; 833 } 834 } 835 836 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) { 837 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) { 838 mlx5_del_flow_rules(ttc->tunnel_rules[i]); 839 ttc->tunnel_rules[i] = NULL; 840 } 841 } 842 } 843 844 struct mlx5e_etype_proto { 845 u16 etype; 846 u8 proto; 847 }; 848 849 static struct mlx5e_etype_proto ttc_rules[] = { 850 [MLX5E_TT_IPV4_TCP] = { 851 .etype = ETH_P_IP, 852 .proto = IPPROTO_TCP, 853 }, 854 [MLX5E_TT_IPV6_TCP] = { 855 .etype = ETH_P_IPV6, 856 .proto = IPPROTO_TCP, 857 }, 858 [MLX5E_TT_IPV4_UDP] = { 859 .etype = ETH_P_IP, 860 .proto = IPPROTO_UDP, 861 }, 862 [MLX5E_TT_IPV6_UDP] = { 863 .etype = ETH_P_IPV6, 864 .proto = IPPROTO_UDP, 865 }, 866 [MLX5E_TT_IPV4_IPSEC_AH] = { 867 .etype = ETH_P_IP, 868 .proto = IPPROTO_AH, 869 }, 870 [MLX5E_TT_IPV6_IPSEC_AH] = { 871 .etype = ETH_P_IPV6, 872 .proto = IPPROTO_AH, 873 }, 874 [MLX5E_TT_IPV4_IPSEC_ESP] = { 875 .etype = ETH_P_IP, 876 .proto = IPPROTO_ESP, 877 }, 878 [MLX5E_TT_IPV6_IPSEC_ESP] = { 879 .etype = ETH_P_IPV6, 880 .proto = IPPROTO_ESP, 881 }, 882 [MLX5E_TT_IPV4] = { 883 .etype = ETH_P_IP, 884 .proto = 0, 885 }, 886 [MLX5E_TT_IPV6] = { 887 .etype = ETH_P_IPV6, 888 .proto = 0, 889 }, 890 [MLX5E_TT_ANY] = { 891 .etype = 0, 892 .proto = 0, 893 }, 894 }; 895 896 static struct mlx5e_etype_proto ttc_tunnel_rules[] = { 897 [MLX5E_TT_IPV4_GRE] = { 898 .etype = ETH_P_IP, 899 .proto = IPPROTO_GRE, 900 }, 901 [MLX5E_TT_IPV6_GRE] = { 902 .etype = ETH_P_IPV6, 903 .proto = IPPROTO_GRE, 904 }, 905 [MLX5E_TT_IPV4_IPIP] = { 906 .etype = ETH_P_IP, 907 .proto = IPPROTO_IPIP, 908 }, 909 [MLX5E_TT_IPV6_IPIP] = { 910 .etype = ETH_P_IPV6, 911 .proto = IPPROTO_IPIP, 912 }, 913 [MLX5E_TT_IPV4_IPV6] = { 914 .etype = ETH_P_IP, 915 .proto = IPPROTO_IPV6, 916 }, 917 [MLX5E_TT_IPV6_IPV6] = { 918 .etype = ETH_P_IPV6, 919 .proto = IPPROTO_IPV6, 920 }, 921 922 }; 923 924 u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt) 925 { 926 return ttc_tunnel_rules[tt].proto; 927 } 928 929 static bool mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, u8 proto_type) 930 { 931 switch (proto_type) { 932 case IPPROTO_GRE: 933 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); 934 case IPPROTO_IPIP: 935 case IPPROTO_IPV6: 936 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) || 937 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx)); 938 default: 939 return false; 940 } 941 } 942 943 static bool mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev) 944 { 945 int tt; 946 947 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { 948 if (mlx5e_tunnel_proto_supported_rx(mdev, ttc_tunnel_rules[tt].proto)) 949 return true; 950 } 951 return false; 952 } 953 954 bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) 955 { 956 return (mlx5e_tunnel_any_rx_proto_supported(mdev) && 957 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); 958 } 959 960 static u8 mlx5e_etype_to_ipv(u16 ethertype) 961 { 962 if (ethertype == ETH_P_IP) 963 return 4; 964 965 if (ethertype == ETH_P_IPV6) 966 return 6; 967 968 return 0; 969 } 970 971 static struct mlx5_flow_handle * 972 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, 973 struct mlx5_flow_table *ft, 974 struct mlx5_flow_destination *dest, 975 u16 etype, 976 u8 proto) 977 { 978 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); 979 MLX5_DECLARE_FLOW_ACT(flow_act); 980 struct mlx5_flow_handle *rule; 981 struct mlx5_flow_spec *spec; 982 int err = 0; 983 u8 ipv; 984 985 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 986 if (!spec) 987 return ERR_PTR(-ENOMEM); 988 989 if (proto) { 990 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 991 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); 992 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); 993 } 994 995 ipv = mlx5e_etype_to_ipv(etype); 996 if (match_ipv_outer && ipv) { 997 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 998 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); 999 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv); 1000 } else if (etype) { 1001 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1002 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); 1003 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); 1004 } 1005 1006 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); 1007 if (IS_ERR(rule)) { 1008 err = PTR_ERR(rule); 1009 netdev_err(priv->netdev, "%s: add rule failed\n", __func__); 1010 } 1011 1012 kvfree(spec); 1013 return err ? ERR_PTR(err) : rule; 1014 } 1015 1016 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, 1017 struct ttc_params *params, 1018 struct mlx5e_ttc_table *ttc) 1019 { 1020 struct mlx5_flow_destination dest = {}; 1021 struct mlx5_flow_handle **trules; 1022 struct mlx5e_ttc_rule *rules; 1023 struct mlx5_flow_table *ft; 1024 int tt; 1025 int err; 1026 1027 ft = ttc->ft.t; 1028 rules = ttc->rules; 1029 1030 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1031 for (tt = 0; tt < MLX5E_NUM_TT; tt++) { 1032 struct mlx5e_ttc_rule *rule = &rules[tt]; 1033 1034 if (tt == MLX5E_TT_ANY) 1035 dest.tir_num = params->any_tt_tirn; 1036 else 1037 dest.tir_num = params->indir_tirn[tt]; 1038 1039 rule->rule = mlx5e_generate_ttc_rule(priv, ft, &dest, 1040 ttc_rules[tt].etype, 1041 ttc_rules[tt].proto); 1042 if (IS_ERR(rule->rule)) { 1043 err = PTR_ERR(rule->rule); 1044 rule->rule = NULL; 1045 goto del_rules; 1046 } 1047 rule->default_dest = dest; 1048 } 1049 1050 if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) 1051 return 0; 1052 1053 trules = ttc->tunnel_rules; 1054 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1055 dest.ft = params->inner_ttc->ft.t; 1056 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { 1057 if (!mlx5e_tunnel_proto_supported_rx(priv->mdev, 1058 ttc_tunnel_rules[tt].proto)) 1059 continue; 1060 trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, 1061 ttc_tunnel_rules[tt].etype, 1062 ttc_tunnel_rules[tt].proto); 1063 if (IS_ERR(trules[tt])) { 1064 err = PTR_ERR(trules[tt]); 1065 trules[tt] = NULL; 1066 goto del_rules; 1067 } 1068 } 1069 1070 return 0; 1071 1072 del_rules: 1073 mlx5e_cleanup_ttc_rules(ttc); 1074 return err; 1075 } 1076 1077 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, 1078 bool use_ipv) 1079 { 1080 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1081 struct mlx5e_flow_table *ft = &ttc->ft; 1082 int ix = 0; 1083 u32 *in; 1084 int err; 1085 u8 *mc; 1086 1087 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS, 1088 sizeof(*ft->g), GFP_KERNEL); 1089 if (!ft->g) 1090 return -ENOMEM; 1091 in = kvzalloc(inlen, GFP_KERNEL); 1092 if (!in) { 1093 kfree(ft->g); 1094 ft->g = NULL; 1095 return -ENOMEM; 1096 } 1097 1098 /* L4 Group */ 1099 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1100 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1101 if (use_ipv) 1102 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version); 1103 else 1104 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1105 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1106 MLX5_SET_CFG(in, start_flow_index, ix); 1107 ix += MLX5E_TTC_GROUP1_SIZE; 1108 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1109 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1110 if (IS_ERR(ft->g[ft->num_groups])) 1111 goto err; 1112 ft->num_groups++; 1113 1114 /* L3 Group */ 1115 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0); 1116 MLX5_SET_CFG(in, start_flow_index, ix); 1117 ix += MLX5E_TTC_GROUP2_SIZE; 1118 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1119 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1120 if (IS_ERR(ft->g[ft->num_groups])) 1121 goto err; 1122 ft->num_groups++; 1123 1124 /* Any Group */ 1125 memset(in, 0, inlen); 1126 MLX5_SET_CFG(in, start_flow_index, ix); 1127 ix += MLX5E_TTC_GROUP3_SIZE; 1128 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1129 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1130 if (IS_ERR(ft->g[ft->num_groups])) 1131 goto err; 1132 ft->num_groups++; 1133 1134 kvfree(in); 1135 return 0; 1136 1137 err: 1138 err = PTR_ERR(ft->g[ft->num_groups]); 1139 ft->g[ft->num_groups] = NULL; 1140 kvfree(in); 1141 1142 return err; 1143 } 1144 1145 static struct mlx5_flow_handle * 1146 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv, 1147 struct mlx5_flow_table *ft, 1148 struct mlx5_flow_destination *dest, 1149 u16 etype, u8 proto) 1150 { 1151 MLX5_DECLARE_FLOW_ACT(flow_act); 1152 struct mlx5_flow_handle *rule; 1153 struct mlx5_flow_spec *spec; 1154 int err = 0; 1155 u8 ipv; 1156 1157 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1158 if (!spec) 1159 return ERR_PTR(-ENOMEM); 1160 1161 ipv = mlx5e_etype_to_ipv(etype); 1162 if (etype && ipv) { 1163 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; 1164 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version); 1165 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv); 1166 } 1167 1168 if (proto) { 1169 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; 1170 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol); 1171 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto); 1172 } 1173 1174 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); 1175 if (IS_ERR(rule)) { 1176 err = PTR_ERR(rule); 1177 netdev_err(priv->netdev, "%s: add rule failed\n", __func__); 1178 } 1179 1180 kvfree(spec); 1181 return err ? ERR_PTR(err) : rule; 1182 } 1183 1184 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv, 1185 struct ttc_params *params, 1186 struct mlx5e_ttc_table *ttc) 1187 { 1188 struct mlx5_flow_destination dest = {}; 1189 struct mlx5e_ttc_rule *rules; 1190 struct mlx5_flow_table *ft; 1191 int err; 1192 int tt; 1193 1194 ft = ttc->ft.t; 1195 rules = ttc->rules; 1196 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1197 1198 for (tt = 0; tt < MLX5E_NUM_TT; tt++) { 1199 struct mlx5e_ttc_rule *rule = &rules[tt]; 1200 1201 if (tt == MLX5E_TT_ANY) 1202 dest.tir_num = params->any_tt_tirn; 1203 else 1204 dest.tir_num = params->indir_tirn[tt]; 1205 1206 rule->rule = mlx5e_generate_inner_ttc_rule(priv, ft, &dest, 1207 ttc_rules[tt].etype, 1208 ttc_rules[tt].proto); 1209 if (IS_ERR(rule->rule)) { 1210 err = PTR_ERR(rule->rule); 1211 rule->rule = NULL; 1212 goto del_rules; 1213 } 1214 rule->default_dest = dest; 1215 } 1216 1217 return 0; 1218 1219 del_rules: 1220 1221 mlx5e_cleanup_ttc_rules(ttc); 1222 return err; 1223 } 1224 1225 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc) 1226 { 1227 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1228 struct mlx5e_flow_table *ft = &ttc->ft; 1229 int ix = 0; 1230 u32 *in; 1231 int err; 1232 u8 *mc; 1233 1234 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1235 if (!ft->g) 1236 return -ENOMEM; 1237 in = kvzalloc(inlen, GFP_KERNEL); 1238 if (!in) { 1239 kfree(ft->g); 1240 ft->g = NULL; 1241 return -ENOMEM; 1242 } 1243 1244 /* L4 Group */ 1245 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1246 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); 1247 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version); 1248 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); 1249 MLX5_SET_CFG(in, start_flow_index, ix); 1250 ix += MLX5E_INNER_TTC_GROUP1_SIZE; 1251 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1252 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1253 if (IS_ERR(ft->g[ft->num_groups])) 1254 goto err; 1255 ft->num_groups++; 1256 1257 /* L3 Group */ 1258 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0); 1259 MLX5_SET_CFG(in, start_flow_index, ix); 1260 ix += MLX5E_INNER_TTC_GROUP2_SIZE; 1261 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1262 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1263 if (IS_ERR(ft->g[ft->num_groups])) 1264 goto err; 1265 ft->num_groups++; 1266 1267 /* Any Group */ 1268 memset(in, 0, inlen); 1269 MLX5_SET_CFG(in, start_flow_index, ix); 1270 ix += MLX5E_INNER_TTC_GROUP3_SIZE; 1271 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1272 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1273 if (IS_ERR(ft->g[ft->num_groups])) 1274 goto err; 1275 ft->num_groups++; 1276 1277 kvfree(in); 1278 return 0; 1279 1280 err: 1281 err = PTR_ERR(ft->g[ft->num_groups]); 1282 ft->g[ft->num_groups] = NULL; 1283 kvfree(in); 1284 1285 return err; 1286 } 1287 1288 void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, 1289 struct ttc_params *ttc_params) 1290 { 1291 ttc_params->any_tt_tirn = priv->direct_tir[0].tirn; 1292 ttc_params->inner_ttc = &priv->fs.inner_ttc; 1293 } 1294 1295 void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params) 1296 { 1297 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; 1298 1299 ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE; 1300 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL; 1301 ft_attr->prio = MLX5E_NIC_PRIO; 1302 } 1303 1304 void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params) 1305 1306 { 1307 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; 1308 1309 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; 1310 ft_attr->level = MLX5E_TTC_FT_LEVEL; 1311 ft_attr->prio = MLX5E_NIC_PRIO; 1312 } 1313 1314 int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, 1315 struct mlx5e_ttc_table *ttc) 1316 { 1317 struct mlx5e_flow_table *ft = &ttc->ft; 1318 int err; 1319 1320 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) 1321 return 0; 1322 1323 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr); 1324 if (IS_ERR(ft->t)) { 1325 err = PTR_ERR(ft->t); 1326 ft->t = NULL; 1327 return err; 1328 } 1329 1330 err = mlx5e_create_inner_ttc_table_groups(ttc); 1331 if (err) 1332 goto err; 1333 1334 err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc); 1335 if (err) 1336 goto err; 1337 1338 return 0; 1339 1340 err: 1341 mlx5e_destroy_flow_table(ft); 1342 return err; 1343 } 1344 1345 void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, 1346 struct mlx5e_ttc_table *ttc) 1347 { 1348 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) 1349 return; 1350 1351 mlx5e_cleanup_ttc_rules(ttc); 1352 mlx5e_destroy_flow_table(&ttc->ft); 1353 } 1354 1355 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv, 1356 struct mlx5e_ttc_table *ttc) 1357 { 1358 mlx5e_cleanup_ttc_rules(ttc); 1359 mlx5e_destroy_flow_table(&ttc->ft); 1360 } 1361 1362 int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, 1363 struct mlx5e_ttc_table *ttc) 1364 { 1365 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); 1366 struct mlx5e_flow_table *ft = &ttc->ft; 1367 int err; 1368 1369 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr); 1370 if (IS_ERR(ft->t)) { 1371 err = PTR_ERR(ft->t); 1372 ft->t = NULL; 1373 return err; 1374 } 1375 1376 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer); 1377 if (err) 1378 goto err; 1379 1380 err = mlx5e_generate_ttc_table_rules(priv, params, ttc); 1381 if (err) 1382 goto err; 1383 1384 return 0; 1385 err: 1386 mlx5e_destroy_flow_table(ft); 1387 return err; 1388 } 1389 1390 int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type, 1391 struct mlx5_flow_destination *new_dest) 1392 { 1393 return mlx5_modify_rule_destination(priv->fs.ttc.rules[type].rule, new_dest, NULL); 1394 } 1395 1396 struct mlx5_flow_destination 1397 mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type) 1398 { 1399 struct mlx5_flow_destination *dest = &priv->fs.ttc.rules[type].default_dest; 1400 1401 WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR, 1402 "TTC[%d] default dest is not setup yet", type); 1403 1404 return *dest; 1405 } 1406 1407 int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type) 1408 { 1409 struct mlx5_flow_destination dest = mlx5e_ttc_get_default_dest(priv, type); 1410 1411 return mlx5e_ttc_fwd_dest(priv, type, &dest); 1412 } 1413 1414 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, 1415 struct mlx5e_l2_rule *ai) 1416 { 1417 if (!IS_ERR_OR_NULL(ai->rule)) { 1418 mlx5_del_flow_rules(ai->rule); 1419 ai->rule = NULL; 1420 } 1421 } 1422 1423 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, 1424 struct mlx5e_l2_rule *ai, int type) 1425 { 1426 struct mlx5_flow_table *ft = priv->fs.l2.ft.t; 1427 struct mlx5_flow_destination dest = {}; 1428 MLX5_DECLARE_FLOW_ACT(flow_act); 1429 struct mlx5_flow_spec *spec; 1430 int err = 0; 1431 u8 *mc_dmac; 1432 u8 *mv_dmac; 1433 1434 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1435 if (!spec) 1436 return -ENOMEM; 1437 1438 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1439 outer_headers.dmac_47_16); 1440 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1441 outer_headers.dmac_47_16); 1442 1443 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1444 dest.ft = priv->fs.ttc.ft.t; 1445 1446 switch (type) { 1447 case MLX5E_FULLMATCH: 1448 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1449 eth_broadcast_addr(mc_dmac); 1450 ether_addr_copy(mv_dmac, ai->addr); 1451 break; 1452 1453 case MLX5E_ALLMULTI: 1454 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1455 mc_dmac[0] = 0x01; 1456 mv_dmac[0] = 0x01; 1457 break; 1458 } 1459 1460 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 1461 if (IS_ERR(ai->rule)) { 1462 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n", 1463 __func__, mv_dmac); 1464 err = PTR_ERR(ai->rule); 1465 ai->rule = NULL; 1466 } 1467 1468 kvfree(spec); 1469 1470 return err; 1471 } 1472 1473 #define MLX5E_NUM_L2_GROUPS 3 1474 #define MLX5E_L2_GROUP1_SIZE BIT(15) 1475 #define MLX5E_L2_GROUP2_SIZE BIT(0) 1476 #define MLX5E_L2_GROUP_TRAP_SIZE BIT(0) /* must be last */ 1477 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\ 1478 MLX5E_L2_GROUP2_SIZE +\ 1479 MLX5E_L2_GROUP_TRAP_SIZE) 1480 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) 1481 { 1482 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1483 struct mlx5e_flow_table *ft = &l2_table->ft; 1484 int ix = 0; 1485 u8 *mc_dmac; 1486 u32 *in; 1487 int err; 1488 u8 *mc; 1489 1490 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1491 if (!ft->g) 1492 return -ENOMEM; 1493 in = kvzalloc(inlen, GFP_KERNEL); 1494 if (!in) { 1495 kfree(ft->g); 1496 return -ENOMEM; 1497 } 1498 1499 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1500 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, 1501 outer_headers.dmac_47_16); 1502 /* Flow Group for full match */ 1503 eth_broadcast_addr(mc_dmac); 1504 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1505 MLX5_SET_CFG(in, start_flow_index, ix); 1506 ix += MLX5E_L2_GROUP1_SIZE; 1507 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1508 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1509 if (IS_ERR(ft->g[ft->num_groups])) 1510 goto err_destroy_groups; 1511 ft->num_groups++; 1512 1513 /* Flow Group for allmulti */ 1514 eth_zero_addr(mc_dmac); 1515 mc_dmac[0] = 0x01; 1516 MLX5_SET_CFG(in, start_flow_index, ix); 1517 ix += MLX5E_L2_GROUP2_SIZE; 1518 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1519 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1520 if (IS_ERR(ft->g[ft->num_groups])) 1521 goto err_destroy_groups; 1522 ft->num_groups++; 1523 1524 /* Flow Group for l2 traps */ 1525 memset(in, 0, inlen); 1526 MLX5_SET_CFG(in, start_flow_index, ix); 1527 ix += MLX5E_L2_GROUP_TRAP_SIZE; 1528 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1529 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1530 if (IS_ERR(ft->g[ft->num_groups])) 1531 goto err_destroy_groups; 1532 ft->num_groups++; 1533 1534 kvfree(in); 1535 return 0; 1536 1537 err_destroy_groups: 1538 err = PTR_ERR(ft->g[ft->num_groups]); 1539 ft->g[ft->num_groups] = NULL; 1540 mlx5e_destroy_groups(ft); 1541 kvfree(in); 1542 kfree(ft->g); 1543 1544 return err; 1545 } 1546 1547 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv) 1548 { 1549 mlx5e_destroy_flow_table(&priv->fs.l2.ft); 1550 } 1551 1552 static int mlx5e_create_l2_table(struct mlx5e_priv *priv) 1553 { 1554 struct mlx5e_l2_table *l2_table = &priv->fs.l2; 1555 struct mlx5e_flow_table *ft = &l2_table->ft; 1556 struct mlx5_flow_table_attr ft_attr = {}; 1557 int err; 1558 1559 ft->num_groups = 0; 1560 1561 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE; 1562 ft_attr.level = MLX5E_L2_FT_LEVEL; 1563 ft_attr.prio = MLX5E_NIC_PRIO; 1564 1565 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); 1566 if (IS_ERR(ft->t)) { 1567 err = PTR_ERR(ft->t); 1568 ft->t = NULL; 1569 return err; 1570 } 1571 1572 err = mlx5e_create_l2_table_groups(l2_table); 1573 if (err) 1574 goto err_destroy_flow_table; 1575 1576 return 0; 1577 1578 err_destroy_flow_table: 1579 mlx5_destroy_flow_table(ft->t); 1580 ft->t = NULL; 1581 1582 return err; 1583 } 1584 1585 #define MLX5E_NUM_VLAN_GROUPS 5 1586 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) 1587 #define MLX5E_VLAN_GROUP1_SIZE BIT(12) 1588 #define MLX5E_VLAN_GROUP2_SIZE BIT(1) 1589 #define MLX5E_VLAN_GROUP3_SIZE BIT(0) 1590 #define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */ 1591 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ 1592 MLX5E_VLAN_GROUP1_SIZE +\ 1593 MLX5E_VLAN_GROUP2_SIZE +\ 1594 MLX5E_VLAN_GROUP3_SIZE +\ 1595 MLX5E_VLAN_GROUP_TRAP_SIZE) 1596 1597 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in, 1598 int inlen) 1599 { 1600 int err; 1601 int ix = 0; 1602 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1603 1604 memset(in, 0, inlen); 1605 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1606 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1607 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 1608 MLX5_SET_CFG(in, start_flow_index, ix); 1609 ix += MLX5E_VLAN_GROUP0_SIZE; 1610 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1611 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1612 if (IS_ERR(ft->g[ft->num_groups])) 1613 goto err_destroy_groups; 1614 ft->num_groups++; 1615 1616 memset(in, 0, inlen); 1617 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1618 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 1619 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 1620 MLX5_SET_CFG(in, start_flow_index, ix); 1621 ix += MLX5E_VLAN_GROUP1_SIZE; 1622 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1623 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1624 if (IS_ERR(ft->g[ft->num_groups])) 1625 goto err_destroy_groups; 1626 ft->num_groups++; 1627 1628 memset(in, 0, inlen); 1629 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1630 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1631 MLX5_SET_CFG(in, start_flow_index, ix); 1632 ix += MLX5E_VLAN_GROUP2_SIZE; 1633 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1634 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1635 if (IS_ERR(ft->g[ft->num_groups])) 1636 goto err_destroy_groups; 1637 ft->num_groups++; 1638 1639 memset(in, 0, inlen); 1640 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1641 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 1642 MLX5_SET_CFG(in, start_flow_index, ix); 1643 ix += MLX5E_VLAN_GROUP3_SIZE; 1644 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1645 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1646 if (IS_ERR(ft->g[ft->num_groups])) 1647 goto err_destroy_groups; 1648 ft->num_groups++; 1649 1650 memset(in, 0, inlen); 1651 MLX5_SET_CFG(in, start_flow_index, ix); 1652 ix += MLX5E_VLAN_GROUP_TRAP_SIZE; 1653 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1654 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1655 if (IS_ERR(ft->g[ft->num_groups])) 1656 goto err_destroy_groups; 1657 ft->num_groups++; 1658 1659 return 0; 1660 1661 err_destroy_groups: 1662 err = PTR_ERR(ft->g[ft->num_groups]); 1663 ft->g[ft->num_groups] = NULL; 1664 mlx5e_destroy_groups(ft); 1665 1666 return err; 1667 } 1668 1669 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft) 1670 { 1671 u32 *in; 1672 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1673 int err; 1674 1675 in = kvzalloc(inlen, GFP_KERNEL); 1676 if (!in) 1677 return -ENOMEM; 1678 1679 err = __mlx5e_create_vlan_table_groups(ft, in, inlen); 1680 1681 kvfree(in); 1682 return err; 1683 } 1684 1685 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv) 1686 { 1687 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft; 1688 struct mlx5_flow_table_attr ft_attr = {}; 1689 int err; 1690 1691 ft->num_groups = 0; 1692 1693 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE; 1694 ft_attr.level = MLX5E_VLAN_FT_LEVEL; 1695 ft_attr.prio = MLX5E_NIC_PRIO; 1696 1697 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); 1698 1699 if (IS_ERR(ft->t)) { 1700 err = PTR_ERR(ft->t); 1701 ft->t = NULL; 1702 return err; 1703 } 1704 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1705 if (!ft->g) { 1706 err = -ENOMEM; 1707 goto err_destroy_vlan_table; 1708 } 1709 1710 err = mlx5e_create_vlan_table_groups(ft); 1711 if (err) 1712 goto err_free_g; 1713 1714 mlx5e_add_vlan_rules(priv); 1715 1716 return 0; 1717 1718 err_free_g: 1719 kfree(ft->g); 1720 err_destroy_vlan_table: 1721 mlx5_destroy_flow_table(ft->t); 1722 ft->t = NULL; 1723 1724 return err; 1725 } 1726 1727 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv) 1728 { 1729 mlx5e_del_vlan_rules(priv); 1730 mlx5e_destroy_flow_table(&priv->fs.vlan.ft); 1731 } 1732 1733 int mlx5e_create_flow_steering(struct mlx5e_priv *priv) 1734 { 1735 struct ttc_params ttc_params = {}; 1736 int tt, err; 1737 1738 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, 1739 MLX5_FLOW_NAMESPACE_KERNEL); 1740 1741 if (!priv->fs.ns) 1742 return -EOPNOTSUPP; 1743 1744 err = mlx5e_arfs_create_tables(priv); 1745 if (err) { 1746 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", 1747 err); 1748 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 1749 } 1750 1751 mlx5e_set_ttc_basic_params(priv, &ttc_params); 1752 mlx5e_set_inner_ttc_ft_params(&ttc_params); 1753 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 1754 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; 1755 1756 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); 1757 if (err) { 1758 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", 1759 err); 1760 goto err_destroy_arfs_tables; 1761 } 1762 1763 mlx5e_set_ttc_ft_params(&ttc_params); 1764 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 1765 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; 1766 1767 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); 1768 if (err) { 1769 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 1770 err); 1771 goto err_destroy_inner_ttc_table; 1772 } 1773 1774 err = mlx5e_create_l2_table(priv); 1775 if (err) { 1776 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n", 1777 err); 1778 goto err_destroy_ttc_table; 1779 } 1780 1781 err = mlx5e_create_vlan_table(priv); 1782 if (err) { 1783 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n", 1784 err); 1785 goto err_destroy_l2_table; 1786 } 1787 1788 mlx5e_ethtool_init_steering(priv); 1789 1790 return 0; 1791 1792 err_destroy_l2_table: 1793 mlx5e_destroy_l2_table(priv); 1794 err_destroy_ttc_table: 1795 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 1796 err_destroy_inner_ttc_table: 1797 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); 1798 err_destroy_arfs_tables: 1799 mlx5e_arfs_destroy_tables(priv); 1800 1801 return err; 1802 } 1803 1804 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) 1805 { 1806 mlx5e_destroy_vlan_table(priv); 1807 mlx5e_destroy_l2_table(priv); 1808 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 1809 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); 1810 mlx5e_arfs_destroy_tables(priv); 1811 mlx5e_ethtool_cleanup_steering(priv); 1812 } 1813