1e8f887acSAmir Vadai /* 2e8f887acSAmir Vadai * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3e8f887acSAmir Vadai * 4e8f887acSAmir Vadai * This software is available to you under a choice of one of two 5e8f887acSAmir Vadai * licenses. You may choose to be licensed under the terms of the GNU 6e8f887acSAmir Vadai * General Public License (GPL) Version 2, available from the file 7e8f887acSAmir Vadai * COPYING in the main directory of this source tree, or the 8e8f887acSAmir Vadai * OpenIB.org BSD license below: 9e8f887acSAmir Vadai * 10e8f887acSAmir Vadai * Redistribution and use in source and binary forms, with or 11e8f887acSAmir Vadai * without modification, are permitted provided that the following 12e8f887acSAmir Vadai * conditions are met: 13e8f887acSAmir Vadai * 14e8f887acSAmir Vadai * - Redistributions of source code must retain the above 15e8f887acSAmir Vadai * copyright notice, this list of conditions and the following 16e8f887acSAmir Vadai * disclaimer. 17e8f887acSAmir Vadai * 18e8f887acSAmir Vadai * - Redistributions in binary form must reproduce the above 19e8f887acSAmir Vadai * copyright notice, this list of conditions and the following 20e8f887acSAmir Vadai * disclaimer in the documentation and/or other materials 21e8f887acSAmir Vadai * provided with the distribution. 22e8f887acSAmir Vadai * 23e8f887acSAmir Vadai * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24e8f887acSAmir Vadai * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25e8f887acSAmir Vadai * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26e8f887acSAmir Vadai * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27e8f887acSAmir Vadai * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28e8f887acSAmir Vadai * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29e8f887acSAmir Vadai * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30e8f887acSAmir Vadai * SOFTWARE. 31e8f887acSAmir Vadai */ 32e8f887acSAmir Vadai 33e3a2b7edSAmir Vadai #include <net/flow_dissector.h> 343f7d0eb4SOr Gerlitz #include <net/sch_generic.h> 35e3a2b7edSAmir Vadai #include <net/pkt_cls.h> 36e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h> 3712185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h> 38e8f887acSAmir Vadai #include <linux/mlx5/fs.h> 39e8f887acSAmir Vadai #include <linux/mlx5/device.h> 40e8f887acSAmir Vadai #include <linux/rhashtable.h> 415a7e5bcbSVlad Buslov #include <linux/refcount.h> 4203a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h> 43776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h> 44bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h> 45d79b6df6SOr Gerlitz #include <net/tc_act/tc_pedit.h> 4626c02749SOr Gerlitz #include <net/tc_act/tc_csum.h> 47f6dfb4c3SHadar Hen Zion #include <net/arp.h> 483616d08bSDavid Ahern #include <net/ipv6_stubs.h> 49e8f887acSAmir Vadai #include "en.h" 501d447a39SSaeed Mahameed #include "en_rep.h" 51232c0013SHadar Hen Zion #include "en_tc.h" 5203a9d11eSOr Gerlitz #include "eswitch.h" 533f6d08d1SOr Gerlitz #include "fs_core.h" 542c81bfd5SHuy Nguyen #include "en/port.h" 55101f4de9SOz Shlomo #include "en/tc_tun.h" 5604de7ddaSRoi Dayan #include "lib/devcom.h" 579272e3dfSYevgeny Kliteynik #include "lib/geneve.h" 58e8f887acSAmir Vadai 593bc4b7bfSOr Gerlitz struct mlx5_nic_flow_attr { 603bc4b7bfSOr Gerlitz u32 action; 613bc4b7bfSOr Gerlitz u32 flow_tag; 622f4fe4caSOr Gerlitz u32 mod_hdr_id; 635c65c564SOr Gerlitz u32 hairpin_tirn; 6438aa51c1SOr Gerlitz u8 match_level; 653f6d08d1SOr Gerlitz struct mlx5_flow_table *hairpin_ft; 66b8aee822SMark Bloch struct mlx5_fc *counter; 673bc4b7bfSOr Gerlitz }; 683bc4b7bfSOr Gerlitz 69226f2ca3SVlad Buslov #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1) 7060bd4af8SOr Gerlitz 7165ba8fb7SOr Gerlitz enum { 72226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, 73226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, 74226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, 75226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, 76226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE, 77226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1, 78226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2, 79226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3, 80226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4, 81226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5, 82c5d326b2SVlad Buslov MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6, 8365ba8fb7SOr Gerlitz }; 8465ba8fb7SOr Gerlitz 85e4ad91f2SChris Mi #define MLX5E_TC_MAX_SPLITS 1 86e4ad91f2SChris Mi 8779baaec7SEli Britstein /* Helper struct for accessing a struct containing list_head array. 8879baaec7SEli Britstein * Containing struct 8979baaec7SEli Britstein * |- Helper array 9079baaec7SEli Britstein * [0] Helper item 0 9179baaec7SEli Britstein * |- list_head item 0 9279baaec7SEli Britstein * |- index (0) 9379baaec7SEli Britstein * [1] Helper item 1 9479baaec7SEli Britstein * |- list_head item 1 9579baaec7SEli Britstein * |- index (1) 9679baaec7SEli Britstein * To access the containing struct from one of the list_head items: 9779baaec7SEli Britstein * 1. Get the helper item from the list_head item using 9879baaec7SEli Britstein * helper item = 9979baaec7SEli Britstein * container_of(list_head item, helper struct type, list_head field) 10079baaec7SEli Britstein * 2. Get the contining struct from the helper item and its index in the array: 10179baaec7SEli Britstein * containing struct = 10279baaec7SEli Britstein * container_of(helper item, containing struct type, helper field[index]) 10379baaec7SEli Britstein */ 10479baaec7SEli Britstein struct encap_flow_item { 10579baaec7SEli Britstein struct list_head list; 10679baaec7SEli Britstein int index; 10779baaec7SEli Britstein }; 10879baaec7SEli Britstein 109e8f887acSAmir Vadai struct mlx5e_tc_flow { 110e8f887acSAmir Vadai struct rhash_head node; 111655dc3d2SOr Gerlitz struct mlx5e_priv *priv; 112e8f887acSAmir Vadai u64 cookie; 113226f2ca3SVlad Buslov unsigned long flags; 114e4ad91f2SChris Mi struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; 11579baaec7SEli Britstein /* Flow can be associated with multiple encap IDs. 11679baaec7SEli Britstein * The number of encaps is bounded by the number of supported 11779baaec7SEli Britstein * destinations. 11879baaec7SEli Britstein */ 11979baaec7SEli Britstein struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; 12004de7ddaSRoi Dayan struct mlx5e_tc_flow *peer_flow; 12111c9c548SOr Gerlitz struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ 122e4f9abbdSVlad Buslov struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ 1235c65c564SOr Gerlitz struct list_head hairpin; /* flows sharing the same hairpin */ 12404de7ddaSRoi Dayan struct list_head peer; /* flows with peer flow */ 125b4a23329SRoi Dayan struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */ 1265a7e5bcbSVlad Buslov refcount_t refcnt; 127c5d326b2SVlad Buslov struct rcu_head rcu_head; 1283bc4b7bfSOr Gerlitz union { 129ecf5bb79SOr Gerlitz struct mlx5_esw_flow_attr esw_attr[0]; 1303bc4b7bfSOr Gerlitz struct mlx5_nic_flow_attr nic_attr[0]; 1313bc4b7bfSOr Gerlitz }; 132e8f887acSAmir Vadai }; 133e8f887acSAmir Vadai 13417091853SOr Gerlitz struct mlx5e_tc_flow_parse_attr { 1351f6da306SYevgeny Kliteynik const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; 136d11afc26SOz Shlomo struct net_device *filter_dev; 13717091853SOr Gerlitz struct mlx5_flow_spec spec; 138d79b6df6SOr Gerlitz int num_mod_hdr_actions; 139218d05ceSTonghao Zhang int max_mod_hdr_actions; 140d79b6df6SOr Gerlitz void *mod_hdr_actions; 14198b66cb1SEli Britstein int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; 14217091853SOr Gerlitz }; 14317091853SOr Gerlitz 144acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4 145b3a433deSOr Gerlitz #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16) 146e8f887acSAmir Vadai 14777ab67b7SOr Gerlitz struct mlx5e_hairpin { 14877ab67b7SOr Gerlitz struct mlx5_hairpin *pair; 14977ab67b7SOr Gerlitz 15077ab67b7SOr Gerlitz struct mlx5_core_dev *func_mdev; 1513f6d08d1SOr Gerlitz struct mlx5e_priv *func_priv; 15277ab67b7SOr Gerlitz u32 tdn; 15377ab67b7SOr Gerlitz u32 tirn; 1543f6d08d1SOr Gerlitz 1553f6d08d1SOr Gerlitz int num_channels; 1563f6d08d1SOr Gerlitz struct mlx5e_rqt indir_rqt; 1573f6d08d1SOr Gerlitz u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; 1583f6d08d1SOr Gerlitz struct mlx5e_ttc_table ttc; 15977ab67b7SOr Gerlitz }; 16077ab67b7SOr Gerlitz 1615c65c564SOr Gerlitz struct mlx5e_hairpin_entry { 1625c65c564SOr Gerlitz /* a node of a hash table which keeps all the hairpin entries */ 1635c65c564SOr Gerlitz struct hlist_node hairpin_hlist; 1645c65c564SOr Gerlitz 16573edca73SVlad Buslov /* protects flows list */ 16673edca73SVlad Buslov spinlock_t flows_lock; 1675c65c564SOr Gerlitz /* flows sharing the same hairpin */ 1685c65c564SOr Gerlitz struct list_head flows; 1695c65c564SOr Gerlitz 170d8822868SOr Gerlitz u16 peer_vhca_id; 171106be53bSOr Gerlitz u8 prio; 1725c65c564SOr Gerlitz struct mlx5e_hairpin *hp; 173e4f9abbdSVlad Buslov refcount_t refcnt; 1745c65c564SOr Gerlitz }; 1755c65c564SOr Gerlitz 17611c9c548SOr Gerlitz struct mod_hdr_key { 17711c9c548SOr Gerlitz int num_actions; 17811c9c548SOr Gerlitz void *actions; 17911c9c548SOr Gerlitz }; 18011c9c548SOr Gerlitz 18111c9c548SOr Gerlitz struct mlx5e_mod_hdr_entry { 18211c9c548SOr Gerlitz /* a node of a hash table which keeps all the mod_hdr entries */ 18311c9c548SOr Gerlitz struct hlist_node mod_hdr_hlist; 18411c9c548SOr Gerlitz 18511c9c548SOr Gerlitz /* flows sharing the same mod_hdr entry */ 18611c9c548SOr Gerlitz struct list_head flows; 18711c9c548SOr Gerlitz 18811c9c548SOr Gerlitz struct mod_hdr_key key; 18911c9c548SOr Gerlitz 19011c9c548SOr Gerlitz u32 mod_hdr_id; 19111c9c548SOr Gerlitz }; 19211c9c548SOr Gerlitz 19311c9c548SOr Gerlitz #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) 19411c9c548SOr Gerlitz 1955a7e5bcbSVlad Buslov static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 1965a7e5bcbSVlad Buslov struct mlx5e_tc_flow *flow); 1975a7e5bcbSVlad Buslov 1985a7e5bcbSVlad Buslov static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) 1995a7e5bcbSVlad Buslov { 2005a7e5bcbSVlad Buslov if (!flow || !refcount_inc_not_zero(&flow->refcnt)) 2015a7e5bcbSVlad Buslov return ERR_PTR(-EINVAL); 2025a7e5bcbSVlad Buslov return flow; 2035a7e5bcbSVlad Buslov } 2045a7e5bcbSVlad Buslov 2055a7e5bcbSVlad Buslov static void mlx5e_flow_put(struct mlx5e_priv *priv, 2065a7e5bcbSVlad Buslov struct mlx5e_tc_flow *flow) 2075a7e5bcbSVlad Buslov { 2085a7e5bcbSVlad Buslov if (refcount_dec_and_test(&flow->refcnt)) { 2095a7e5bcbSVlad Buslov mlx5e_tc_del_flow(priv, flow); 210c5d326b2SVlad Buslov kfree_rcu(flow, rcu_head); 2115a7e5bcbSVlad Buslov } 2125a7e5bcbSVlad Buslov } 2135a7e5bcbSVlad Buslov 214226f2ca3SVlad Buslov static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) 215226f2ca3SVlad Buslov { 216226f2ca3SVlad Buslov /* Complete all memory stores before setting bit. */ 217226f2ca3SVlad Buslov smp_mb__before_atomic(); 218226f2ca3SVlad Buslov set_bit(flag, &flow->flags); 219226f2ca3SVlad Buslov } 220226f2ca3SVlad Buslov 221226f2ca3SVlad Buslov #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag) 222226f2ca3SVlad Buslov 223c5d326b2SVlad Buslov static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow, 224c5d326b2SVlad Buslov unsigned long flag) 225c5d326b2SVlad Buslov { 226c5d326b2SVlad Buslov /* test_and_set_bit() provides all necessary barriers */ 227c5d326b2SVlad Buslov return test_and_set_bit(flag, &flow->flags); 228c5d326b2SVlad Buslov } 229c5d326b2SVlad Buslov 230c5d326b2SVlad Buslov #define flow_flag_test_and_set(flow, flag) \ 231c5d326b2SVlad Buslov __flow_flag_test_and_set(flow, \ 232c5d326b2SVlad Buslov MLX5E_TC_FLOW_FLAG_##flag) 233c5d326b2SVlad Buslov 234226f2ca3SVlad Buslov static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag) 235226f2ca3SVlad Buslov { 236226f2ca3SVlad Buslov /* Complete all memory stores before clearing bit. */ 237226f2ca3SVlad Buslov smp_mb__before_atomic(); 238226f2ca3SVlad Buslov clear_bit(flag, &flow->flags); 239226f2ca3SVlad Buslov } 240226f2ca3SVlad Buslov 241226f2ca3SVlad Buslov #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \ 242226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_##flag) 243226f2ca3SVlad Buslov 244226f2ca3SVlad Buslov static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag) 245226f2ca3SVlad Buslov { 246226f2ca3SVlad Buslov bool ret = test_bit(flag, &flow->flags); 247226f2ca3SVlad Buslov 248226f2ca3SVlad Buslov /* Read fields of flow structure only after checking flags. */ 249226f2ca3SVlad Buslov smp_mb__after_atomic(); 250226f2ca3SVlad Buslov return ret; 251226f2ca3SVlad Buslov } 252226f2ca3SVlad Buslov 253226f2ca3SVlad Buslov #define flow_flag_test(flow, flag) __flow_flag_test(flow, \ 254226f2ca3SVlad Buslov MLX5E_TC_FLOW_FLAG_##flag) 255226f2ca3SVlad Buslov 256226f2ca3SVlad Buslov static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) 257226f2ca3SVlad Buslov { 258226f2ca3SVlad Buslov return flow_flag_test(flow, ESWITCH); 259226f2ca3SVlad Buslov } 260226f2ca3SVlad Buslov 261226f2ca3SVlad Buslov static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) 262226f2ca3SVlad Buslov { 263226f2ca3SVlad Buslov return flow_flag_test(flow, OFFLOADED); 264226f2ca3SVlad Buslov } 265226f2ca3SVlad Buslov 26611c9c548SOr Gerlitz static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key) 26711c9c548SOr Gerlitz { 26811c9c548SOr Gerlitz return jhash(key->actions, 26911c9c548SOr Gerlitz key->num_actions * MLX5_MH_ACT_SZ, 0); 27011c9c548SOr Gerlitz } 27111c9c548SOr Gerlitz 27211c9c548SOr Gerlitz static inline int cmp_mod_hdr_info(struct mod_hdr_key *a, 27311c9c548SOr Gerlitz struct mod_hdr_key *b) 27411c9c548SOr Gerlitz { 27511c9c548SOr Gerlitz if (a->num_actions != b->num_actions) 27611c9c548SOr Gerlitz return 1; 27711c9c548SOr Gerlitz 27811c9c548SOr Gerlitz return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ); 27911c9c548SOr Gerlitz } 28011c9c548SOr Gerlitz 28111c9c548SOr Gerlitz static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, 28211c9c548SOr Gerlitz struct mlx5e_tc_flow *flow, 28311c9c548SOr Gerlitz struct mlx5e_tc_flow_parse_attr *parse_attr) 28411c9c548SOr Gerlitz { 28511c9c548SOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 28611c9c548SOr Gerlitz int num_actions, actions_size, namespace, err; 287226f2ca3SVlad Buslov bool found = false, is_eswitch_flow; 28811c9c548SOr Gerlitz struct mlx5e_mod_hdr_entry *mh; 28911c9c548SOr Gerlitz struct mod_hdr_key key; 29011c9c548SOr Gerlitz u32 hash_key; 29111c9c548SOr Gerlitz 29211c9c548SOr Gerlitz num_actions = parse_attr->num_mod_hdr_actions; 29311c9c548SOr Gerlitz actions_size = MLX5_MH_ACT_SZ * num_actions; 29411c9c548SOr Gerlitz 29511c9c548SOr Gerlitz key.actions = parse_attr->mod_hdr_actions; 29611c9c548SOr Gerlitz key.num_actions = num_actions; 29711c9c548SOr Gerlitz 29811c9c548SOr Gerlitz hash_key = hash_mod_hdr_info(&key); 29911c9c548SOr Gerlitz 300226f2ca3SVlad Buslov is_eswitch_flow = mlx5e_is_eswitch_flow(flow); 301226f2ca3SVlad Buslov if (is_eswitch_flow) { 30211c9c548SOr Gerlitz namespace = MLX5_FLOW_NAMESPACE_FDB; 30311c9c548SOr Gerlitz hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh, 30411c9c548SOr Gerlitz mod_hdr_hlist, hash_key) { 30511c9c548SOr Gerlitz if (!cmp_mod_hdr_info(&mh->key, &key)) { 30611c9c548SOr Gerlitz found = true; 30711c9c548SOr Gerlitz break; 30811c9c548SOr Gerlitz } 30911c9c548SOr Gerlitz } 31011c9c548SOr Gerlitz } else { 31111c9c548SOr Gerlitz namespace = MLX5_FLOW_NAMESPACE_KERNEL; 31211c9c548SOr Gerlitz hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh, 31311c9c548SOr Gerlitz mod_hdr_hlist, hash_key) { 31411c9c548SOr Gerlitz if (!cmp_mod_hdr_info(&mh->key, &key)) { 31511c9c548SOr Gerlitz found = true; 31611c9c548SOr Gerlitz break; 31711c9c548SOr Gerlitz } 31811c9c548SOr Gerlitz } 31911c9c548SOr Gerlitz } 32011c9c548SOr Gerlitz 32111c9c548SOr Gerlitz if (found) 32211c9c548SOr Gerlitz goto attach_flow; 32311c9c548SOr Gerlitz 32411c9c548SOr Gerlitz mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL); 32511c9c548SOr Gerlitz if (!mh) 32611c9c548SOr Gerlitz return -ENOMEM; 32711c9c548SOr Gerlitz 32811c9c548SOr Gerlitz mh->key.actions = (void *)mh + sizeof(*mh); 32911c9c548SOr Gerlitz memcpy(mh->key.actions, key.actions, actions_size); 33011c9c548SOr Gerlitz mh->key.num_actions = num_actions; 33111c9c548SOr Gerlitz INIT_LIST_HEAD(&mh->flows); 33211c9c548SOr Gerlitz 33311c9c548SOr Gerlitz err = mlx5_modify_header_alloc(priv->mdev, namespace, 33411c9c548SOr Gerlitz mh->key.num_actions, 33511c9c548SOr Gerlitz mh->key.actions, 33611c9c548SOr Gerlitz &mh->mod_hdr_id); 33711c9c548SOr Gerlitz if (err) 33811c9c548SOr Gerlitz goto out_err; 33911c9c548SOr Gerlitz 340226f2ca3SVlad Buslov if (is_eswitch_flow) 34111c9c548SOr Gerlitz hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key); 34211c9c548SOr Gerlitz else 34311c9c548SOr Gerlitz hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key); 34411c9c548SOr Gerlitz 34511c9c548SOr Gerlitz attach_flow: 34611c9c548SOr Gerlitz list_add(&flow->mod_hdr, &mh->flows); 347226f2ca3SVlad Buslov if (is_eswitch_flow) 34811c9c548SOr Gerlitz flow->esw_attr->mod_hdr_id = mh->mod_hdr_id; 34911c9c548SOr Gerlitz else 35011c9c548SOr Gerlitz flow->nic_attr->mod_hdr_id = mh->mod_hdr_id; 35111c9c548SOr Gerlitz 35211c9c548SOr Gerlitz return 0; 35311c9c548SOr Gerlitz 35411c9c548SOr Gerlitz out_err: 35511c9c548SOr Gerlitz kfree(mh); 35611c9c548SOr Gerlitz return err; 35711c9c548SOr Gerlitz } 35811c9c548SOr Gerlitz 35911c9c548SOr Gerlitz static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, 36011c9c548SOr Gerlitz struct mlx5e_tc_flow *flow) 36111c9c548SOr Gerlitz { 36211c9c548SOr Gerlitz struct list_head *next = flow->mod_hdr.next; 36311c9c548SOr Gerlitz 3645a7e5bcbSVlad Buslov /* flow wasn't fully initialized */ 3655a7e5bcbSVlad Buslov if (list_empty(&flow->mod_hdr)) 3665a7e5bcbSVlad Buslov return; 3675a7e5bcbSVlad Buslov 36811c9c548SOr Gerlitz list_del(&flow->mod_hdr); 36911c9c548SOr Gerlitz 37011c9c548SOr Gerlitz if (list_empty(next)) { 37111c9c548SOr Gerlitz struct mlx5e_mod_hdr_entry *mh; 37211c9c548SOr Gerlitz 37311c9c548SOr Gerlitz mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows); 37411c9c548SOr Gerlitz 37511c9c548SOr Gerlitz mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id); 37611c9c548SOr Gerlitz hash_del(&mh->mod_hdr_hlist); 37711c9c548SOr Gerlitz kfree(mh); 37811c9c548SOr Gerlitz } 37911c9c548SOr Gerlitz } 38011c9c548SOr Gerlitz 38177ab67b7SOr Gerlitz static 38277ab67b7SOr Gerlitz struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) 38377ab67b7SOr Gerlitz { 38477ab67b7SOr Gerlitz struct net_device *netdev; 38577ab67b7SOr Gerlitz struct mlx5e_priv *priv; 38677ab67b7SOr Gerlitz 38777ab67b7SOr Gerlitz netdev = __dev_get_by_index(net, ifindex); 38877ab67b7SOr Gerlitz priv = netdev_priv(netdev); 38977ab67b7SOr Gerlitz return priv->mdev; 39077ab67b7SOr Gerlitz } 39177ab67b7SOr Gerlitz 39277ab67b7SOr Gerlitz static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) 39377ab67b7SOr Gerlitz { 39477ab67b7SOr Gerlitz u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0}; 39577ab67b7SOr Gerlitz void *tirc; 39677ab67b7SOr Gerlitz int err; 39777ab67b7SOr Gerlitz 39877ab67b7SOr Gerlitz err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn); 39977ab67b7SOr Gerlitz if (err) 40077ab67b7SOr Gerlitz goto alloc_tdn_err; 40177ab67b7SOr Gerlitz 40277ab67b7SOr Gerlitz tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 40377ab67b7SOr Gerlitz 40477ab67b7SOr Gerlitz MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); 405ddae74acSOr Gerlitz MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]); 40677ab67b7SOr Gerlitz MLX5_SET(tirc, tirc, transport_domain, hp->tdn); 40777ab67b7SOr Gerlitz 40877ab67b7SOr Gerlitz err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn); 40977ab67b7SOr Gerlitz if (err) 41077ab67b7SOr Gerlitz goto create_tir_err; 41177ab67b7SOr Gerlitz 41277ab67b7SOr Gerlitz return 0; 41377ab67b7SOr Gerlitz 41477ab67b7SOr Gerlitz create_tir_err: 41577ab67b7SOr Gerlitz mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); 41677ab67b7SOr Gerlitz alloc_tdn_err: 41777ab67b7SOr Gerlitz return err; 41877ab67b7SOr Gerlitz } 41977ab67b7SOr Gerlitz 42077ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) 42177ab67b7SOr Gerlitz { 42277ab67b7SOr Gerlitz mlx5_core_destroy_tir(hp->func_mdev, hp->tirn); 42377ab67b7SOr Gerlitz mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); 42477ab67b7SOr Gerlitz } 42577ab67b7SOr Gerlitz 4263f6d08d1SOr Gerlitz static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) 4273f6d08d1SOr Gerlitz { 4283f6d08d1SOr Gerlitz u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn; 4293f6d08d1SOr Gerlitz struct mlx5e_priv *priv = hp->func_priv; 4303f6d08d1SOr Gerlitz int i, ix, sz = MLX5E_INDIR_RQT_SIZE; 4313f6d08d1SOr Gerlitz 4323f6d08d1SOr Gerlitz mlx5e_build_default_indir_rqt(indirection_rqt, sz, 4333f6d08d1SOr Gerlitz hp->num_channels); 4343f6d08d1SOr Gerlitz 4353f6d08d1SOr Gerlitz for (i = 0; i < sz; i++) { 4363f6d08d1SOr Gerlitz ix = i; 437bbeb53b8SAya Levin if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR) 4383f6d08d1SOr Gerlitz ix = mlx5e_bits_invert(i, ilog2(sz)); 4393f6d08d1SOr Gerlitz ix = indirection_rqt[ix]; 4403f6d08d1SOr Gerlitz rqn = hp->pair->rqn[ix]; 4413f6d08d1SOr Gerlitz MLX5_SET(rqtc, rqtc, rq_num[i], rqn); 4423f6d08d1SOr Gerlitz } 4433f6d08d1SOr Gerlitz } 4443f6d08d1SOr Gerlitz 4453f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) 4463f6d08d1SOr Gerlitz { 4473f6d08d1SOr Gerlitz int inlen, err, sz = MLX5E_INDIR_RQT_SIZE; 4483f6d08d1SOr Gerlitz struct mlx5e_priv *priv = hp->func_priv; 4493f6d08d1SOr Gerlitz struct mlx5_core_dev *mdev = priv->mdev; 4503f6d08d1SOr Gerlitz void *rqtc; 4513f6d08d1SOr Gerlitz u32 *in; 4523f6d08d1SOr Gerlitz 4533f6d08d1SOr Gerlitz inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 4543f6d08d1SOr Gerlitz in = kvzalloc(inlen, GFP_KERNEL); 4553f6d08d1SOr Gerlitz if (!in) 4563f6d08d1SOr Gerlitz return -ENOMEM; 4573f6d08d1SOr Gerlitz 4583f6d08d1SOr Gerlitz rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 4593f6d08d1SOr Gerlitz 4603f6d08d1SOr Gerlitz MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 4613f6d08d1SOr Gerlitz MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 4623f6d08d1SOr Gerlitz 4633f6d08d1SOr Gerlitz mlx5e_hairpin_fill_rqt_rqns(hp, rqtc); 4643f6d08d1SOr Gerlitz 4653f6d08d1SOr Gerlitz err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn); 4663f6d08d1SOr Gerlitz if (!err) 4673f6d08d1SOr Gerlitz hp->indir_rqt.enabled = true; 4683f6d08d1SOr Gerlitz 4693f6d08d1SOr Gerlitz kvfree(in); 4703f6d08d1SOr Gerlitz return err; 4713f6d08d1SOr Gerlitz } 4723f6d08d1SOr Gerlitz 4733f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp) 4743f6d08d1SOr Gerlitz { 4753f6d08d1SOr Gerlitz struct mlx5e_priv *priv = hp->func_priv; 4763f6d08d1SOr Gerlitz u32 in[MLX5_ST_SZ_DW(create_tir_in)]; 4773f6d08d1SOr Gerlitz int tt, i, err; 4783f6d08d1SOr Gerlitz void *tirc; 4793f6d08d1SOr Gerlitz 4803f6d08d1SOr Gerlitz for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { 481d930ac79SAya Levin struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt); 482d930ac79SAya Levin 4833f6d08d1SOr Gerlitz memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in)); 4843f6d08d1SOr Gerlitz tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 4853f6d08d1SOr Gerlitz 4863f6d08d1SOr Gerlitz MLX5_SET(tirc, tirc, transport_domain, hp->tdn); 4873f6d08d1SOr Gerlitz MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); 4883f6d08d1SOr Gerlitz MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn); 489bbeb53b8SAya Levin mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false); 490bbeb53b8SAya Levin 4913f6d08d1SOr Gerlitz err = mlx5_core_create_tir(hp->func_mdev, in, 4923f6d08d1SOr Gerlitz MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]); 4933f6d08d1SOr Gerlitz if (err) { 4943f6d08d1SOr Gerlitz mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err); 4953f6d08d1SOr Gerlitz goto err_destroy_tirs; 4963f6d08d1SOr Gerlitz } 4973f6d08d1SOr Gerlitz } 4983f6d08d1SOr Gerlitz return 0; 4993f6d08d1SOr Gerlitz 5003f6d08d1SOr Gerlitz err_destroy_tirs: 5013f6d08d1SOr Gerlitz for (i = 0; i < tt; i++) 5023f6d08d1SOr Gerlitz mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]); 5033f6d08d1SOr Gerlitz return err; 5043f6d08d1SOr Gerlitz } 5053f6d08d1SOr Gerlitz 5063f6d08d1SOr Gerlitz static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp) 5073f6d08d1SOr Gerlitz { 5083f6d08d1SOr Gerlitz int tt; 5093f6d08d1SOr Gerlitz 5103f6d08d1SOr Gerlitz for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 5113f6d08d1SOr Gerlitz mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]); 5123f6d08d1SOr Gerlitz } 5133f6d08d1SOr Gerlitz 5143f6d08d1SOr Gerlitz static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, 5153f6d08d1SOr Gerlitz struct ttc_params *ttc_params) 5163f6d08d1SOr Gerlitz { 5173f6d08d1SOr Gerlitz struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; 5183f6d08d1SOr Gerlitz int tt; 5193f6d08d1SOr Gerlitz 5203f6d08d1SOr Gerlitz memset(ttc_params, 0, sizeof(*ttc_params)); 5213f6d08d1SOr Gerlitz 5223f6d08d1SOr Gerlitz ttc_params->any_tt_tirn = hp->tirn; 5233f6d08d1SOr Gerlitz 5243f6d08d1SOr Gerlitz for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 5253f6d08d1SOr Gerlitz ttc_params->indir_tirn[tt] = hp->indir_tirn[tt]; 5263f6d08d1SOr Gerlitz 5273f6d08d1SOr Gerlitz ft_attr->max_fte = MLX5E_NUM_TT; 5283f6d08d1SOr Gerlitz ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; 5293f6d08d1SOr Gerlitz ft_attr->prio = MLX5E_TC_PRIO; 5303f6d08d1SOr Gerlitz } 5313f6d08d1SOr Gerlitz 5323f6d08d1SOr Gerlitz static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp) 5333f6d08d1SOr Gerlitz { 5343f6d08d1SOr Gerlitz struct mlx5e_priv *priv = hp->func_priv; 5353f6d08d1SOr Gerlitz struct ttc_params ttc_params; 5363f6d08d1SOr Gerlitz int err; 5373f6d08d1SOr Gerlitz 5383f6d08d1SOr Gerlitz err = mlx5e_hairpin_create_indirect_rqt(hp); 5393f6d08d1SOr Gerlitz if (err) 5403f6d08d1SOr Gerlitz return err; 5413f6d08d1SOr Gerlitz 5423f6d08d1SOr Gerlitz err = mlx5e_hairpin_create_indirect_tirs(hp); 5433f6d08d1SOr Gerlitz if (err) 5443f6d08d1SOr Gerlitz goto err_create_indirect_tirs; 5453f6d08d1SOr Gerlitz 5463f6d08d1SOr Gerlitz mlx5e_hairpin_set_ttc_params(hp, &ttc_params); 5473f6d08d1SOr Gerlitz err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc); 5483f6d08d1SOr Gerlitz if (err) 5493f6d08d1SOr Gerlitz goto err_create_ttc_table; 5503f6d08d1SOr Gerlitz 5513f6d08d1SOr Gerlitz netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n", 5523f6d08d1SOr Gerlitz hp->num_channels, hp->ttc.ft.t->id); 5533f6d08d1SOr Gerlitz 5543f6d08d1SOr Gerlitz return 0; 5553f6d08d1SOr Gerlitz 5563f6d08d1SOr Gerlitz err_create_ttc_table: 5573f6d08d1SOr Gerlitz mlx5e_hairpin_destroy_indirect_tirs(hp); 5583f6d08d1SOr Gerlitz err_create_indirect_tirs: 5593f6d08d1SOr Gerlitz mlx5e_destroy_rqt(priv, &hp->indir_rqt); 5603f6d08d1SOr Gerlitz 5613f6d08d1SOr Gerlitz return err; 5623f6d08d1SOr Gerlitz } 5633f6d08d1SOr Gerlitz 5643f6d08d1SOr Gerlitz static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp) 5653f6d08d1SOr Gerlitz { 5663f6d08d1SOr Gerlitz struct mlx5e_priv *priv = hp->func_priv; 5673f6d08d1SOr Gerlitz 5683f6d08d1SOr Gerlitz mlx5e_destroy_ttc_table(priv, &hp->ttc); 5693f6d08d1SOr Gerlitz mlx5e_hairpin_destroy_indirect_tirs(hp); 5703f6d08d1SOr Gerlitz mlx5e_destroy_rqt(priv, &hp->indir_rqt); 5713f6d08d1SOr Gerlitz } 5723f6d08d1SOr Gerlitz 57377ab67b7SOr Gerlitz static struct mlx5e_hairpin * 57477ab67b7SOr Gerlitz mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params, 57577ab67b7SOr Gerlitz int peer_ifindex) 57677ab67b7SOr Gerlitz { 57777ab67b7SOr Gerlitz struct mlx5_core_dev *func_mdev, *peer_mdev; 57877ab67b7SOr Gerlitz struct mlx5e_hairpin *hp; 57977ab67b7SOr Gerlitz struct mlx5_hairpin *pair; 58077ab67b7SOr Gerlitz int err; 58177ab67b7SOr Gerlitz 58277ab67b7SOr Gerlitz hp = kzalloc(sizeof(*hp), GFP_KERNEL); 58377ab67b7SOr Gerlitz if (!hp) 58477ab67b7SOr Gerlitz return ERR_PTR(-ENOMEM); 58577ab67b7SOr Gerlitz 58677ab67b7SOr Gerlitz func_mdev = priv->mdev; 58777ab67b7SOr Gerlitz peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); 58877ab67b7SOr Gerlitz 58977ab67b7SOr Gerlitz pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params); 59077ab67b7SOr Gerlitz if (IS_ERR(pair)) { 59177ab67b7SOr Gerlitz err = PTR_ERR(pair); 59277ab67b7SOr Gerlitz goto create_pair_err; 59377ab67b7SOr Gerlitz } 59477ab67b7SOr Gerlitz hp->pair = pair; 59577ab67b7SOr Gerlitz hp->func_mdev = func_mdev; 5963f6d08d1SOr Gerlitz hp->func_priv = priv; 5973f6d08d1SOr Gerlitz hp->num_channels = params->num_channels; 59877ab67b7SOr Gerlitz 59977ab67b7SOr Gerlitz err = mlx5e_hairpin_create_transport(hp); 60077ab67b7SOr Gerlitz if (err) 60177ab67b7SOr Gerlitz goto create_transport_err; 60277ab67b7SOr Gerlitz 6033f6d08d1SOr Gerlitz if (hp->num_channels > 1) { 6043f6d08d1SOr Gerlitz err = mlx5e_hairpin_rss_init(hp); 6053f6d08d1SOr Gerlitz if (err) 6063f6d08d1SOr Gerlitz goto rss_init_err; 6073f6d08d1SOr Gerlitz } 6083f6d08d1SOr Gerlitz 60977ab67b7SOr Gerlitz return hp; 61077ab67b7SOr Gerlitz 6113f6d08d1SOr Gerlitz rss_init_err: 6123f6d08d1SOr Gerlitz mlx5e_hairpin_destroy_transport(hp); 61377ab67b7SOr Gerlitz create_transport_err: 61477ab67b7SOr Gerlitz mlx5_core_hairpin_destroy(hp->pair); 61577ab67b7SOr Gerlitz create_pair_err: 61677ab67b7SOr Gerlitz kfree(hp); 61777ab67b7SOr Gerlitz return ERR_PTR(err); 61877ab67b7SOr Gerlitz } 61977ab67b7SOr Gerlitz 62077ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp) 62177ab67b7SOr Gerlitz { 6223f6d08d1SOr Gerlitz if (hp->num_channels > 1) 6233f6d08d1SOr Gerlitz mlx5e_hairpin_rss_cleanup(hp); 62477ab67b7SOr Gerlitz mlx5e_hairpin_destroy_transport(hp); 62577ab67b7SOr Gerlitz mlx5_core_hairpin_destroy(hp->pair); 62677ab67b7SOr Gerlitz kvfree(hp); 62777ab67b7SOr Gerlitz } 62877ab67b7SOr Gerlitz 629106be53bSOr Gerlitz static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio) 630106be53bSOr Gerlitz { 631106be53bSOr Gerlitz return (peer_vhca_id << 16 | prio); 632106be53bSOr Gerlitz } 633106be53bSOr Gerlitz 6345c65c564SOr Gerlitz static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, 635106be53bSOr Gerlitz u16 peer_vhca_id, u8 prio) 6365c65c564SOr Gerlitz { 6375c65c564SOr Gerlitz struct mlx5e_hairpin_entry *hpe; 638106be53bSOr Gerlitz u32 hash_key = hash_hairpin_info(peer_vhca_id, prio); 6395c65c564SOr Gerlitz 6405c65c564SOr Gerlitz hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe, 641106be53bSOr Gerlitz hairpin_hlist, hash_key) { 642e4f9abbdSVlad Buslov if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) { 643e4f9abbdSVlad Buslov refcount_inc(&hpe->refcnt); 6445c65c564SOr Gerlitz return hpe; 6455c65c564SOr Gerlitz } 646e4f9abbdSVlad Buslov } 6475c65c564SOr Gerlitz 6485c65c564SOr Gerlitz return NULL; 6495c65c564SOr Gerlitz } 6505c65c564SOr Gerlitz 651e4f9abbdSVlad Buslov static void mlx5e_hairpin_put(struct mlx5e_priv *priv, 652e4f9abbdSVlad Buslov struct mlx5e_hairpin_entry *hpe) 653e4f9abbdSVlad Buslov { 654e4f9abbdSVlad Buslov /* no more hairpin flows for us, release the hairpin pair */ 655e4f9abbdSVlad Buslov if (!refcount_dec_and_test(&hpe->refcnt)) 656e4f9abbdSVlad Buslov return; 657e4f9abbdSVlad Buslov 658e4f9abbdSVlad Buslov netdev_dbg(priv->netdev, "del hairpin: peer %s\n", 659e4f9abbdSVlad Buslov dev_name(hpe->hp->pair->peer_mdev->device)); 660e4f9abbdSVlad Buslov 661e4f9abbdSVlad Buslov WARN_ON(!list_empty(&hpe->flows)); 662e4f9abbdSVlad Buslov mlx5e_hairpin_destroy(hpe->hp); 663e4f9abbdSVlad Buslov hash_del(&hpe->hairpin_hlist); 664e4f9abbdSVlad Buslov kfree(hpe); 665e4f9abbdSVlad Buslov } 666e4f9abbdSVlad Buslov 667106be53bSOr Gerlitz #define UNKNOWN_MATCH_PRIO 8 668106be53bSOr Gerlitz 669106be53bSOr Gerlitz static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, 670e98bedf5SEli Britstein struct mlx5_flow_spec *spec, u8 *match_prio, 671e98bedf5SEli Britstein struct netlink_ext_ack *extack) 672106be53bSOr Gerlitz { 673106be53bSOr Gerlitz void *headers_c, *headers_v; 674106be53bSOr Gerlitz u8 prio_val, prio_mask = 0; 675106be53bSOr Gerlitz bool vlan_present; 676106be53bSOr Gerlitz 677106be53bSOr Gerlitz #ifdef CONFIG_MLX5_CORE_EN_DCB 678106be53bSOr Gerlitz if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) { 679e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 680e98bedf5SEli Britstein "only PCP trust state supported for hairpin"); 681106be53bSOr Gerlitz return -EOPNOTSUPP; 682106be53bSOr Gerlitz } 683106be53bSOr Gerlitz #endif 684106be53bSOr Gerlitz headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); 685106be53bSOr Gerlitz headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 686106be53bSOr Gerlitz 687106be53bSOr Gerlitz vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag); 688106be53bSOr Gerlitz if (vlan_present) { 689106be53bSOr Gerlitz prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); 690106be53bSOr Gerlitz prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); 691106be53bSOr Gerlitz } 692106be53bSOr Gerlitz 693106be53bSOr Gerlitz if (!vlan_present || !prio_mask) { 694106be53bSOr Gerlitz prio_val = UNKNOWN_MATCH_PRIO; 695106be53bSOr Gerlitz } else if (prio_mask != 0x7) { 696e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 697e98bedf5SEli Britstein "masked priority match not supported for hairpin"); 698106be53bSOr Gerlitz return -EOPNOTSUPP; 699106be53bSOr Gerlitz } 700106be53bSOr Gerlitz 701106be53bSOr Gerlitz *match_prio = prio_val; 702106be53bSOr Gerlitz return 0; 703106be53bSOr Gerlitz } 704106be53bSOr Gerlitz 7055c65c564SOr Gerlitz static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, 7065c65c564SOr Gerlitz struct mlx5e_tc_flow *flow, 707e98bedf5SEli Britstein struct mlx5e_tc_flow_parse_attr *parse_attr, 708e98bedf5SEli Britstein struct netlink_ext_ack *extack) 7095c65c564SOr Gerlitz { 71098b66cb1SEli Britstein int peer_ifindex = parse_attr->mirred_ifindex[0]; 7115c65c564SOr Gerlitz struct mlx5_hairpin_params params; 712d8822868SOr Gerlitz struct mlx5_core_dev *peer_mdev; 7135c65c564SOr Gerlitz struct mlx5e_hairpin_entry *hpe; 7145c65c564SOr Gerlitz struct mlx5e_hairpin *hp; 7153f6d08d1SOr Gerlitz u64 link_speed64; 7163f6d08d1SOr Gerlitz u32 link_speed; 717106be53bSOr Gerlitz u8 match_prio; 718d8822868SOr Gerlitz u16 peer_id; 7195c65c564SOr Gerlitz int err; 7205c65c564SOr Gerlitz 721d8822868SOr Gerlitz peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); 722d8822868SOr Gerlitz if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { 723e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported"); 7245c65c564SOr Gerlitz return -EOPNOTSUPP; 7255c65c564SOr Gerlitz } 7265c65c564SOr Gerlitz 727d8822868SOr Gerlitz peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id); 728e98bedf5SEli Britstein err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio, 729e98bedf5SEli Britstein extack); 730106be53bSOr Gerlitz if (err) 731106be53bSOr Gerlitz return err; 732106be53bSOr Gerlitz hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); 7335c65c564SOr Gerlitz if (hpe) 7345c65c564SOr Gerlitz goto attach_flow; 7355c65c564SOr Gerlitz 7365c65c564SOr Gerlitz hpe = kzalloc(sizeof(*hpe), GFP_KERNEL); 7375c65c564SOr Gerlitz if (!hpe) 7385c65c564SOr Gerlitz return -ENOMEM; 7395c65c564SOr Gerlitz 74073edca73SVlad Buslov spin_lock_init(&hpe->flows_lock); 7415c65c564SOr Gerlitz INIT_LIST_HEAD(&hpe->flows); 742d8822868SOr Gerlitz hpe->peer_vhca_id = peer_id; 743106be53bSOr Gerlitz hpe->prio = match_prio; 744e4f9abbdSVlad Buslov refcount_set(&hpe->refcnt, 1); 7455c65c564SOr Gerlitz 7465c65c564SOr Gerlitz params.log_data_size = 15; 7475c65c564SOr Gerlitz params.log_data_size = min_t(u8, params.log_data_size, 7485c65c564SOr Gerlitz MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); 7495c65c564SOr Gerlitz params.log_data_size = max_t(u8, params.log_data_size, 7505c65c564SOr Gerlitz MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz)); 7515c65c564SOr Gerlitz 752eb9180f7SOr Gerlitz params.log_num_packets = params.log_data_size - 753eb9180f7SOr Gerlitz MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev); 754eb9180f7SOr Gerlitz params.log_num_packets = min_t(u8, params.log_num_packets, 755eb9180f7SOr Gerlitz MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets)); 756eb9180f7SOr Gerlitz 757eb9180f7SOr Gerlitz params.q_counter = priv->q_counter; 7583f6d08d1SOr Gerlitz /* set hairpin pair per each 50Gbs share of the link */ 7592c81bfd5SHuy Nguyen mlx5e_port_max_linkspeed(priv->mdev, &link_speed); 7603f6d08d1SOr Gerlitz link_speed = max_t(u32, link_speed, 50000); 7613f6d08d1SOr Gerlitz link_speed64 = link_speed; 7623f6d08d1SOr Gerlitz do_div(link_speed64, 50000); 7633f6d08d1SOr Gerlitz params.num_channels = link_speed64; 7643f6d08d1SOr Gerlitz 7655c65c564SOr Gerlitz hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex); 7665c65c564SOr Gerlitz if (IS_ERR(hp)) { 7675c65c564SOr Gerlitz err = PTR_ERR(hp); 7685c65c564SOr Gerlitz goto create_hairpin_err; 7695c65c564SOr Gerlitz } 7705c65c564SOr Gerlitz 771eb9180f7SOr Gerlitz netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", 77227b942fbSParav Pandit hp->tirn, hp->pair->rqn[0], 77327b942fbSParav Pandit dev_name(hp->pair->peer_mdev->device), 774eb9180f7SOr Gerlitz hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets); 7755c65c564SOr Gerlitz 7765c65c564SOr Gerlitz hpe->hp = hp; 777106be53bSOr Gerlitz hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, 778106be53bSOr Gerlitz hash_hairpin_info(peer_id, match_prio)); 7795c65c564SOr Gerlitz 7805c65c564SOr Gerlitz attach_flow: 7813f6d08d1SOr Gerlitz if (hpe->hp->num_channels > 1) { 782226f2ca3SVlad Buslov flow_flag_set(flow, HAIRPIN_RSS); 7833f6d08d1SOr Gerlitz flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t; 7843f6d08d1SOr Gerlitz } else { 7855c65c564SOr Gerlitz flow->nic_attr->hairpin_tirn = hpe->hp->tirn; 7863f6d08d1SOr Gerlitz } 787e4f9abbdSVlad Buslov flow->hpe = hpe; 78873edca73SVlad Buslov spin_lock(&hpe->flows_lock); 7895c65c564SOr Gerlitz list_add(&flow->hairpin, &hpe->flows); 79073edca73SVlad Buslov spin_unlock(&hpe->flows_lock); 7913f6d08d1SOr Gerlitz 7925c65c564SOr Gerlitz return 0; 7935c65c564SOr Gerlitz 7945c65c564SOr Gerlitz create_hairpin_err: 7955c65c564SOr Gerlitz kfree(hpe); 7965c65c564SOr Gerlitz return err; 7975c65c564SOr Gerlitz } 7985c65c564SOr Gerlitz 7995c65c564SOr Gerlitz static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, 8005c65c564SOr Gerlitz struct mlx5e_tc_flow *flow) 8015c65c564SOr Gerlitz { 8025a7e5bcbSVlad Buslov /* flow wasn't fully initialized */ 803e4f9abbdSVlad Buslov if (!flow->hpe) 8045a7e5bcbSVlad Buslov return; 8055a7e5bcbSVlad Buslov 80673edca73SVlad Buslov spin_lock(&flow->hpe->flows_lock); 8075c65c564SOr Gerlitz list_del(&flow->hairpin); 80873edca73SVlad Buslov spin_unlock(&flow->hpe->flows_lock); 80973edca73SVlad Buslov 810e4f9abbdSVlad Buslov mlx5e_hairpin_put(priv, flow->hpe); 811e4f9abbdSVlad Buslov flow->hpe = NULL; 8125c65c564SOr Gerlitz } 8135c65c564SOr Gerlitz 814c83954abSRabie Loulou static int 81574491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, 81617091853SOr Gerlitz struct mlx5e_tc_flow_parse_attr *parse_attr, 817e98bedf5SEli Britstein struct mlx5e_tc_flow *flow, 818e98bedf5SEli Britstein struct netlink_ext_ack *extack) 819e8f887acSAmir Vadai { 820bb0ee7dcSJianbo Liu struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context; 821aa0cbbaeSOr Gerlitz struct mlx5_nic_flow_attr *attr = flow->nic_attr; 822aad7e08dSAmir Vadai struct mlx5_core_dev *dev = priv->mdev; 8235c65c564SOr Gerlitz struct mlx5_flow_destination dest[2] = {}; 82466958ed9SHadar Hen Zion struct mlx5_flow_act flow_act = { 8253bc4b7bfSOr Gerlitz .action = attr->action, 82660786f09SMark Bloch .reformat_id = 0, 827bb0ee7dcSJianbo Liu .flags = FLOW_ACT_NO_APPEND, 82866958ed9SHadar Hen Zion }; 829aad7e08dSAmir Vadai struct mlx5_fc *counter = NULL; 8305c65c564SOr Gerlitz int err, dest_ix = 0; 831e8f887acSAmir Vadai 832bb0ee7dcSJianbo Liu flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 833bb0ee7dcSJianbo Liu flow_context->flow_tag = attr->flow_tag; 834bb0ee7dcSJianbo Liu 835226f2ca3SVlad Buslov if (flow_flag_test(flow, HAIRPIN)) { 836e98bedf5SEli Britstein err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); 8375a7e5bcbSVlad Buslov if (err) 8385a7e5bcbSVlad Buslov return err; 8395a7e5bcbSVlad Buslov 840226f2ca3SVlad Buslov if (flow_flag_test(flow, HAIRPIN_RSS)) { 8413f6d08d1SOr Gerlitz dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 8423f6d08d1SOr Gerlitz dest[dest_ix].ft = attr->hairpin_ft; 8433f6d08d1SOr Gerlitz } else { 8445c65c564SOr Gerlitz dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR; 8455c65c564SOr Gerlitz dest[dest_ix].tir_num = attr->hairpin_tirn; 8463f6d08d1SOr Gerlitz } 8473f6d08d1SOr Gerlitz dest_ix++; 8483f6d08d1SOr Gerlitz } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 8495c65c564SOr Gerlitz dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 8505c65c564SOr Gerlitz dest[dest_ix].ft = priv->fs.vlan.ft.t; 8515c65c564SOr Gerlitz dest_ix++; 8525c65c564SOr Gerlitz } 853aad7e08dSAmir Vadai 8545c65c564SOr Gerlitz if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 8555c65c564SOr Gerlitz counter = mlx5_fc_create(dev, true); 8565a7e5bcbSVlad Buslov if (IS_ERR(counter)) 8575a7e5bcbSVlad Buslov return PTR_ERR(counter); 8585a7e5bcbSVlad Buslov 8595c65c564SOr Gerlitz dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 860171c7625SMark Bloch dest[dest_ix].counter_id = mlx5_fc_id(counter); 8615c65c564SOr Gerlitz dest_ix++; 862b8aee822SMark Bloch attr->counter = counter; 863aad7e08dSAmir Vadai } 864aad7e08dSAmir Vadai 8652f4fe4caSOr Gerlitz if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 8663099eb5aSOr Gerlitz err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); 867d7e75a32SOr Gerlitz flow_act.modify_id = attr->mod_hdr_id; 8682f4fe4caSOr Gerlitz kfree(parse_attr->mod_hdr_actions); 869c83954abSRabie Loulou if (err) 8705a7e5bcbSVlad Buslov return err; 8712f4fe4caSOr Gerlitz } 8722f4fe4caSOr Gerlitz 873b6fac0b4SVlad Buslov mutex_lock(&priv->fs.tc.t_lock); 874acff797cSMaor Gottlieb if (IS_ERR_OR_NULL(priv->fs.tc.t)) { 87521b9c144SOr Gerlitz int tc_grp_size, tc_tbl_size; 87621b9c144SOr Gerlitz u32 max_flow_counter; 87721b9c144SOr Gerlitz 87821b9c144SOr Gerlitz max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | 87921b9c144SOr Gerlitz MLX5_CAP_GEN(dev, max_flow_counter_15_0); 88021b9c144SOr Gerlitz 88121b9c144SOr Gerlitz tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE); 88221b9c144SOr Gerlitz 88321b9c144SOr Gerlitz tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS, 88421b9c144SOr Gerlitz BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size))); 88521b9c144SOr Gerlitz 886acff797cSMaor Gottlieb priv->fs.tc.t = 887acff797cSMaor Gottlieb mlx5_create_auto_grouped_flow_table(priv->fs.ns, 888acff797cSMaor Gottlieb MLX5E_TC_PRIO, 88921b9c144SOr Gerlitz tc_tbl_size, 890acff797cSMaor Gottlieb MLX5E_TC_TABLE_NUM_GROUPS, 8913f6d08d1SOr Gerlitz MLX5E_TC_FT_LEVEL, 0); 892acff797cSMaor Gottlieb if (IS_ERR(priv->fs.tc.t)) { 893b6fac0b4SVlad Buslov mutex_unlock(&priv->fs.tc.t_lock); 894e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 895e98bedf5SEli Britstein "Failed to create tc offload table\n"); 896e8f887acSAmir Vadai netdev_err(priv->netdev, 897e8f887acSAmir Vadai "Failed to create tc offload table\n"); 8985a7e5bcbSVlad Buslov return PTR_ERR(priv->fs.tc.t); 899e8f887acSAmir Vadai } 900e8f887acSAmir Vadai } 901e8f887acSAmir Vadai 90238aa51c1SOr Gerlitz if (attr->match_level != MLX5_MATCH_NONE) 903d4a18e16SYevgeny Kliteynik parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 90438aa51c1SOr Gerlitz 905c83954abSRabie Loulou flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, 9065c65c564SOr Gerlitz &flow_act, dest, dest_ix); 907b6fac0b4SVlad Buslov mutex_unlock(&priv->fs.tc.t_lock); 908e8f887acSAmir Vadai 9095a7e5bcbSVlad Buslov if (IS_ERR(flow->rule[0])) 9105a7e5bcbSVlad Buslov return PTR_ERR(flow->rule[0]); 911aad7e08dSAmir Vadai 912c83954abSRabie Loulou return 0; 913e8f887acSAmir Vadai } 914e8f887acSAmir Vadai 915d85cdccbSOr Gerlitz static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, 916d85cdccbSOr Gerlitz struct mlx5e_tc_flow *flow) 917d85cdccbSOr Gerlitz { 918513f8f7fSOr Gerlitz struct mlx5_nic_flow_attr *attr = flow->nic_attr; 919d85cdccbSOr Gerlitz struct mlx5_fc *counter = NULL; 920d85cdccbSOr Gerlitz 921b8aee822SMark Bloch counter = attr->counter; 9225a7e5bcbSVlad Buslov if (!IS_ERR_OR_NULL(flow->rule[0])) 923e4ad91f2SChris Mi mlx5_del_flow_rules(flow->rule[0]); 924d85cdccbSOr Gerlitz mlx5_fc_destroy(priv->mdev, counter); 925d85cdccbSOr Gerlitz 926b6fac0b4SVlad Buslov mutex_lock(&priv->fs.tc.t_lock); 927226f2ca3SVlad Buslov if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) { 928d85cdccbSOr Gerlitz mlx5_destroy_flow_table(priv->fs.tc.t); 929d85cdccbSOr Gerlitz priv->fs.tc.t = NULL; 930d85cdccbSOr Gerlitz } 931b6fac0b4SVlad Buslov mutex_unlock(&priv->fs.tc.t_lock); 9322f4fe4caSOr Gerlitz 933513f8f7fSOr Gerlitz if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 9343099eb5aSOr Gerlitz mlx5e_detach_mod_hdr(priv, flow); 9355c65c564SOr Gerlitz 936226f2ca3SVlad Buslov if (flow_flag_test(flow, HAIRPIN)) 9375c65c564SOr Gerlitz mlx5e_hairpin_flow_del(priv, flow); 938d85cdccbSOr Gerlitz } 939d85cdccbSOr Gerlitz 940aa0cbbaeSOr Gerlitz static void mlx5e_detach_encap(struct mlx5e_priv *priv, 9418c4dc42bSEli Britstein struct mlx5e_tc_flow *flow, int out_index); 942aa0cbbaeSOr Gerlitz 9433c37745eSOr Gerlitz static int mlx5e_attach_encap(struct mlx5e_priv *priv, 944e98bedf5SEli Britstein struct mlx5e_tc_flow *flow, 945733d4f36SRoi Dayan struct net_device *mirred_dev, 946733d4f36SRoi Dayan int out_index, 9478c4dc42bSEli Britstein struct netlink_ext_ack *extack, 9480ad060eeSRoi Dayan struct net_device **encap_dev, 9490ad060eeSRoi Dayan bool *encap_valid); 9503c37745eSOr Gerlitz 9516d2a3ed0SOr Gerlitz static struct mlx5_flow_handle * 9526d2a3ed0SOr Gerlitz mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, 9536d2a3ed0SOr Gerlitz struct mlx5e_tc_flow *flow, 9546d2a3ed0SOr Gerlitz struct mlx5_flow_spec *spec, 9556d2a3ed0SOr Gerlitz struct mlx5_esw_flow_attr *attr) 9566d2a3ed0SOr Gerlitz { 9576d2a3ed0SOr Gerlitz struct mlx5_flow_handle *rule; 9586d2a3ed0SOr Gerlitz 9596d2a3ed0SOr Gerlitz rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 9606d2a3ed0SOr Gerlitz if (IS_ERR(rule)) 9616d2a3ed0SOr Gerlitz return rule; 9626d2a3ed0SOr Gerlitz 963e85e02baSEli Britstein if (attr->split_count) { 9646d2a3ed0SOr Gerlitz flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr); 9656d2a3ed0SOr Gerlitz if (IS_ERR(flow->rule[1])) { 9666d2a3ed0SOr Gerlitz mlx5_eswitch_del_offloaded_rule(esw, rule, attr); 9676d2a3ed0SOr Gerlitz return flow->rule[1]; 9686d2a3ed0SOr Gerlitz } 9696d2a3ed0SOr Gerlitz } 9706d2a3ed0SOr Gerlitz 9716d2a3ed0SOr Gerlitz return rule; 9726d2a3ed0SOr Gerlitz } 9736d2a3ed0SOr Gerlitz 9746d2a3ed0SOr Gerlitz static void 9756d2a3ed0SOr Gerlitz mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, 9766d2a3ed0SOr Gerlitz struct mlx5e_tc_flow *flow, 9776d2a3ed0SOr Gerlitz struct mlx5_esw_flow_attr *attr) 9786d2a3ed0SOr Gerlitz { 979226f2ca3SVlad Buslov flow_flag_clear(flow, OFFLOADED); 9806d2a3ed0SOr Gerlitz 981e85e02baSEli Britstein if (attr->split_count) 9826d2a3ed0SOr Gerlitz mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); 9836d2a3ed0SOr Gerlitz 9846d2a3ed0SOr Gerlitz mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); 9856d2a3ed0SOr Gerlitz } 9866d2a3ed0SOr Gerlitz 9875dbe906fSPaul Blakey static struct mlx5_flow_handle * 9885dbe906fSPaul Blakey mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, 9895dbe906fSPaul Blakey struct mlx5e_tc_flow *flow, 9905dbe906fSPaul Blakey struct mlx5_flow_spec *spec, 9915dbe906fSPaul Blakey struct mlx5_esw_flow_attr *slow_attr) 9925dbe906fSPaul Blakey { 9935dbe906fSPaul Blakey struct mlx5_flow_handle *rule; 9945dbe906fSPaul Blakey 9955dbe906fSPaul Blakey memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); 996154e62abSOr Gerlitz slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 9972be09de7SDavid S. Miller slow_attr->split_count = 0; 998154e62abSOr Gerlitz slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; 9995dbe906fSPaul Blakey 10005dbe906fSPaul Blakey rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); 10015dbe906fSPaul Blakey if (!IS_ERR(rule)) 1002226f2ca3SVlad Buslov flow_flag_set(flow, SLOW); 10035dbe906fSPaul Blakey 10045dbe906fSPaul Blakey return rule; 10055dbe906fSPaul Blakey } 10065dbe906fSPaul Blakey 10075dbe906fSPaul Blakey static void 10085dbe906fSPaul Blakey mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, 10095dbe906fSPaul Blakey struct mlx5e_tc_flow *flow, 10105dbe906fSPaul Blakey struct mlx5_esw_flow_attr *slow_attr) 10115dbe906fSPaul Blakey { 10125dbe906fSPaul Blakey memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); 1013154e62abSOr Gerlitz slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 10142be09de7SDavid S. Miller slow_attr->split_count = 0; 1015154e62abSOr Gerlitz slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; 10165dbe906fSPaul Blakey mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); 1017226f2ca3SVlad Buslov flow_flag_clear(flow, SLOW); 10185dbe906fSPaul Blakey } 10195dbe906fSPaul Blakey 1020ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this 1021ad86755bSVlad Buslov * function. 1022ad86755bSVlad Buslov */ 1023ad86755bSVlad Buslov static void unready_flow_add(struct mlx5e_tc_flow *flow, 1024ad86755bSVlad Buslov struct list_head *unready_flows) 1025ad86755bSVlad Buslov { 1026ad86755bSVlad Buslov flow_flag_set(flow, NOT_READY); 1027ad86755bSVlad Buslov list_add_tail(&flow->unready, unready_flows); 1028ad86755bSVlad Buslov } 1029ad86755bSVlad Buslov 1030ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this 1031ad86755bSVlad Buslov * function. 1032ad86755bSVlad Buslov */ 1033ad86755bSVlad Buslov static void unready_flow_del(struct mlx5e_tc_flow *flow) 1034ad86755bSVlad Buslov { 1035ad86755bSVlad Buslov list_del(&flow->unready); 1036ad86755bSVlad Buslov flow_flag_clear(flow, NOT_READY); 1037ad86755bSVlad Buslov } 1038ad86755bSVlad Buslov 1039b4a23329SRoi Dayan static void add_unready_flow(struct mlx5e_tc_flow *flow) 1040b4a23329SRoi Dayan { 1041b4a23329SRoi Dayan struct mlx5_rep_uplink_priv *uplink_priv; 1042b4a23329SRoi Dayan struct mlx5e_rep_priv *rpriv; 1043b4a23329SRoi Dayan struct mlx5_eswitch *esw; 1044b4a23329SRoi Dayan 1045b4a23329SRoi Dayan esw = flow->priv->mdev->priv.eswitch; 1046b4a23329SRoi Dayan rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1047b4a23329SRoi Dayan uplink_priv = &rpriv->uplink_priv; 1048b4a23329SRoi Dayan 1049ad86755bSVlad Buslov mutex_lock(&uplink_priv->unready_flows_lock); 1050ad86755bSVlad Buslov unready_flow_add(flow, &uplink_priv->unready_flows); 1051ad86755bSVlad Buslov mutex_unlock(&uplink_priv->unready_flows_lock); 1052b4a23329SRoi Dayan } 1053b4a23329SRoi Dayan 1054b4a23329SRoi Dayan static void remove_unready_flow(struct mlx5e_tc_flow *flow) 1055b4a23329SRoi Dayan { 1056ad86755bSVlad Buslov struct mlx5_rep_uplink_priv *uplink_priv; 1057ad86755bSVlad Buslov struct mlx5e_rep_priv *rpriv; 1058ad86755bSVlad Buslov struct mlx5_eswitch *esw; 1059ad86755bSVlad Buslov 1060ad86755bSVlad Buslov esw = flow->priv->mdev->priv.eswitch; 1061ad86755bSVlad Buslov rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1062ad86755bSVlad Buslov uplink_priv = &rpriv->uplink_priv; 1063ad86755bSVlad Buslov 1064ad86755bSVlad Buslov mutex_lock(&uplink_priv->unready_flows_lock); 1065ad86755bSVlad Buslov unready_flow_del(flow); 1066ad86755bSVlad Buslov mutex_unlock(&uplink_priv->unready_flows_lock); 1067b4a23329SRoi Dayan } 1068b4a23329SRoi Dayan 1069c83954abSRabie Loulou static int 107074491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 1071e98bedf5SEli Britstein struct mlx5e_tc_flow *flow, 1072e98bedf5SEli Britstein struct netlink_ext_ack *extack) 1073adb4c123SOr Gerlitz { 1074adb4c123SOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1075bf07aa73SPaul Blakey u32 max_chain = mlx5_eswitch_get_chain_range(esw); 1076aa0cbbaeSOr Gerlitz struct mlx5_esw_flow_attr *attr = flow->esw_attr; 10777040632dSTonghao Zhang struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; 1078bf07aa73SPaul Blakey u16 max_prio = mlx5_eswitch_get_prio_range(esw); 10793c37745eSOr Gerlitz struct net_device *out_dev, *encap_dev = NULL; 1080b8aee822SMark Bloch struct mlx5_fc *counter = NULL; 10813c37745eSOr Gerlitz struct mlx5e_rep_priv *rpriv; 10823c37745eSOr Gerlitz struct mlx5e_priv *out_priv; 10830ad060eeSRoi Dayan bool encap_valid = true; 10840ad060eeSRoi Dayan int err = 0; 1085f493f155SEli Britstein int out_index; 10868b32580dSOr Gerlitz 1087d14f6f2aSOr Gerlitz if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) { 1088d14f6f2aSOr Gerlitz NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW"); 1089d14f6f2aSOr Gerlitz return -EOPNOTSUPP; 1090d14f6f2aSOr Gerlitz } 1091e52c2802SPaul Blakey 1092bf07aa73SPaul Blakey if (attr->chain > max_chain) { 1093bf07aa73SPaul Blakey NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); 10945a7e5bcbSVlad Buslov return -EOPNOTSUPP; 1095bf07aa73SPaul Blakey } 1096bf07aa73SPaul Blakey 1097bf07aa73SPaul Blakey if (attr->prio > max_prio) { 1098bf07aa73SPaul Blakey NL_SET_ERR_MSG(extack, "Requested priority is out of supported range"); 10995a7e5bcbSVlad Buslov return -EOPNOTSUPP; 1100bf07aa73SPaul Blakey } 1101bf07aa73SPaul Blakey 1102f493f155SEli Britstein for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { 11038c4dc42bSEli Britstein int mirred_ifindex; 11048c4dc42bSEli Britstein 1105f493f155SEli Britstein if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) 1106f493f155SEli Britstein continue; 1107f493f155SEli Britstein 11087040632dSTonghao Zhang mirred_ifindex = parse_attr->mirred_ifindex[out_index]; 11093c37745eSOr Gerlitz out_dev = __dev_get_by_index(dev_net(priv->netdev), 11108c4dc42bSEli Britstein mirred_ifindex); 1111733d4f36SRoi Dayan err = mlx5e_attach_encap(priv, flow, out_dev, out_index, 11120ad060eeSRoi Dayan extack, &encap_dev, &encap_valid); 11130ad060eeSRoi Dayan if (err) 11145a7e5bcbSVlad Buslov return err; 11150ad060eeSRoi Dayan 11163c37745eSOr Gerlitz out_priv = netdev_priv(encap_dev); 11173c37745eSOr Gerlitz rpriv = out_priv->ppriv; 11181cc26d74SEli Britstein attr->dests[out_index].rep = rpriv->rep; 11191cc26d74SEli Britstein attr->dests[out_index].mdev = out_priv->mdev; 11203c37745eSOr Gerlitz } 11213c37745eSOr Gerlitz 11228b32580dSOr Gerlitz err = mlx5_eswitch_add_vlan_action(esw, attr); 1123c83954abSRabie Loulou if (err) 11245a7e5bcbSVlad Buslov return err; 1125adb4c123SOr Gerlitz 1126d7e75a32SOr Gerlitz if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 11271a9527bbSOr Gerlitz err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); 1128d7e75a32SOr Gerlitz kfree(parse_attr->mod_hdr_actions); 1129c83954abSRabie Loulou if (err) 11305a7e5bcbSVlad Buslov return err; 1131d7e75a32SOr Gerlitz } 1132d7e75a32SOr Gerlitz 1133b8aee822SMark Bloch if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1134f9392795SShahar Klein counter = mlx5_fc_create(attr->counter_dev, true); 11355a7e5bcbSVlad Buslov if (IS_ERR(counter)) 11365a7e5bcbSVlad Buslov return PTR_ERR(counter); 1137b8aee822SMark Bloch 1138b8aee822SMark Bloch attr->counter = counter; 1139b8aee822SMark Bloch } 1140b8aee822SMark Bloch 11410ad060eeSRoi Dayan /* we get here if one of the following takes place: 11420ad060eeSRoi Dayan * (1) there's no error 11430ad060eeSRoi Dayan * (2) there's an encap action and we don't have valid neigh 11443c37745eSOr Gerlitz */ 11450ad060eeSRoi Dayan if (!encap_valid) { 11465dbe906fSPaul Blakey /* continue with goto slow path rule instead */ 11475dbe906fSPaul Blakey struct mlx5_esw_flow_attr slow_attr; 11485dbe906fSPaul Blakey 11495dbe906fSPaul Blakey flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr); 11505dbe906fSPaul Blakey } else { 11516d2a3ed0SOr Gerlitz flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); 11525dbe906fSPaul Blakey } 11535dbe906fSPaul Blakey 11545a7e5bcbSVlad Buslov if (IS_ERR(flow->rule[0])) 11555a7e5bcbSVlad Buslov return PTR_ERR(flow->rule[0]); 1156226f2ca3SVlad Buslov else 1157226f2ca3SVlad Buslov flow_flag_set(flow, OFFLOADED); 1158c83954abSRabie Loulou 11595dbe906fSPaul Blakey return 0; 1160aa0cbbaeSOr Gerlitz } 1161d85cdccbSOr Gerlitz 11629272e3dfSYevgeny Kliteynik static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow) 11639272e3dfSYevgeny Kliteynik { 11649272e3dfSYevgeny Kliteynik struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec; 11659272e3dfSYevgeny Kliteynik void *headers_v = MLX5_ADDR_OF(fte_match_param, 11669272e3dfSYevgeny Kliteynik spec->match_value, 11679272e3dfSYevgeny Kliteynik misc_parameters_3); 11689272e3dfSYevgeny Kliteynik u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3, 11699272e3dfSYevgeny Kliteynik headers_v, 11709272e3dfSYevgeny Kliteynik geneve_tlv_option_0_data); 11719272e3dfSYevgeny Kliteynik 11729272e3dfSYevgeny Kliteynik return !!geneve_tlv_opt_0_data; 11739272e3dfSYevgeny Kliteynik } 11749272e3dfSYevgeny Kliteynik 1175d85cdccbSOr Gerlitz static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, 1176d85cdccbSOr Gerlitz struct mlx5e_tc_flow *flow) 1177d85cdccbSOr Gerlitz { 1178d85cdccbSOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1179d7e75a32SOr Gerlitz struct mlx5_esw_flow_attr *attr = flow->esw_attr; 11805dbe906fSPaul Blakey struct mlx5_esw_flow_attr slow_attr; 1181f493f155SEli Britstein int out_index; 1182d85cdccbSOr Gerlitz 1183226f2ca3SVlad Buslov if (flow_flag_test(flow, NOT_READY)) { 1184b4a23329SRoi Dayan remove_unready_flow(flow); 1185ef06c9eeSRoi Dayan kvfree(attr->parse_attr); 1186ef06c9eeSRoi Dayan return; 1187ef06c9eeSRoi Dayan } 1188ef06c9eeSRoi Dayan 1189226f2ca3SVlad Buslov if (mlx5e_is_offloaded_flow(flow)) { 1190226f2ca3SVlad Buslov if (flow_flag_test(flow, SLOW)) 11915dbe906fSPaul Blakey mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); 11925dbe906fSPaul Blakey else 11935dbe906fSPaul Blakey mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); 11945dbe906fSPaul Blakey } 1195d85cdccbSOr Gerlitz 11969272e3dfSYevgeny Kliteynik if (mlx5_flow_has_geneve_opt(flow)) 11979272e3dfSYevgeny Kliteynik mlx5_geneve_tlv_option_del(priv->mdev->geneve); 11989272e3dfSYevgeny Kliteynik 1199513f8f7fSOr Gerlitz mlx5_eswitch_del_vlan_action(esw, attr); 1200d85cdccbSOr Gerlitz 1201f493f155SEli Britstein for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) 12028c4dc42bSEli Britstein if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) 12038c4dc42bSEli Britstein mlx5e_detach_encap(priv, flow, out_index); 1204f493f155SEli Britstein kvfree(attr->parse_attr); 1205d7e75a32SOr Gerlitz 1206513f8f7fSOr Gerlitz if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 12071a9527bbSOr Gerlitz mlx5e_detach_mod_hdr(priv, flow); 1208b8aee822SMark Bloch 1209b8aee822SMark Bloch if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 1210f9392795SShahar Klein mlx5_fc_destroy(attr->counter_dev, attr->counter); 1211d85cdccbSOr Gerlitz } 1212d85cdccbSOr Gerlitz 1213232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, 1214232c0013SHadar Hen Zion struct mlx5e_encap_entry *e) 1215232c0013SHadar Hen Zion { 12163c37745eSOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 12175dbe906fSPaul Blakey struct mlx5_esw_flow_attr slow_attr, *esw_attr; 12185a7e5bcbSVlad Buslov struct encap_flow_item *efi, *tmp; 12196d2a3ed0SOr Gerlitz struct mlx5_flow_handle *rule; 12206d2a3ed0SOr Gerlitz struct mlx5_flow_spec *spec; 1221232c0013SHadar Hen Zion struct mlx5e_tc_flow *flow; 1222232c0013SHadar Hen Zion int err; 1223232c0013SHadar Hen Zion 122454c177caSOz Shlomo err = mlx5_packet_reformat_alloc(priv->mdev, 122554c177caSOz Shlomo e->reformat_type, 1226232c0013SHadar Hen Zion e->encap_size, e->encap_header, 122731ca3648SMark Bloch MLX5_FLOW_NAMESPACE_FDB, 1228232c0013SHadar Hen Zion &e->encap_id); 1229232c0013SHadar Hen Zion if (err) { 1230232c0013SHadar Hen Zion mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n", 1231232c0013SHadar Hen Zion err); 1232232c0013SHadar Hen Zion return; 1233232c0013SHadar Hen Zion } 1234232c0013SHadar Hen Zion e->flags |= MLX5_ENCAP_ENTRY_VALID; 1235f6dfb4c3SHadar Hen Zion mlx5e_rep_queue_neigh_stats_work(priv); 1236232c0013SHadar Hen Zion 12375a7e5bcbSVlad Buslov list_for_each_entry_safe(efi, tmp, &e->flows, list) { 12388c4dc42bSEli Britstein bool all_flow_encaps_valid = true; 12398c4dc42bSEli Britstein int i; 12408c4dc42bSEli Britstein 124179baaec7SEli Britstein flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); 12425a7e5bcbSVlad Buslov if (IS_ERR(mlx5e_flow_get(flow))) 12435a7e5bcbSVlad Buslov continue; 12445a7e5bcbSVlad Buslov 12453c37745eSOr Gerlitz esw_attr = flow->esw_attr; 12466d2a3ed0SOr Gerlitz spec = &esw_attr->parse_attr->spec; 12476d2a3ed0SOr Gerlitz 12488c4dc42bSEli Britstein esw_attr->dests[efi->index].encap_id = e->encap_id; 12498c4dc42bSEli Britstein esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID; 12508c4dc42bSEli Britstein /* Flow can be associated with multiple encap entries. 12518c4dc42bSEli Britstein * Before offloading the flow verify that all of them have 12528c4dc42bSEli Britstein * a valid neighbour. 12538c4dc42bSEli Britstein */ 12548c4dc42bSEli Britstein for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { 12558c4dc42bSEli Britstein if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)) 12568c4dc42bSEli Britstein continue; 12578c4dc42bSEli Britstein if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) { 12588c4dc42bSEli Britstein all_flow_encaps_valid = false; 12598c4dc42bSEli Britstein break; 12608c4dc42bSEli Britstein } 12618c4dc42bSEli Britstein } 12628c4dc42bSEli Britstein /* Do not offload flows with unresolved neighbors */ 12638c4dc42bSEli Britstein if (!all_flow_encaps_valid) 12645a7e5bcbSVlad Buslov goto loop_cont; 12655dbe906fSPaul Blakey /* update from slow path rule to encap rule */ 12666d2a3ed0SOr Gerlitz rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr); 12676d2a3ed0SOr Gerlitz if (IS_ERR(rule)) { 12686d2a3ed0SOr Gerlitz err = PTR_ERR(rule); 1269232c0013SHadar Hen Zion mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", 1270232c0013SHadar Hen Zion err); 12715a7e5bcbSVlad Buslov goto loop_cont; 1272232c0013SHadar Hen Zion } 12735dbe906fSPaul Blakey 12745dbe906fSPaul Blakey mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); 12756d2a3ed0SOr Gerlitz flow->rule[0] = rule; 1276226f2ca3SVlad Buslov /* was unset when slow path rule removed */ 1277226f2ca3SVlad Buslov flow_flag_set(flow, OFFLOADED); 12785a7e5bcbSVlad Buslov 12795a7e5bcbSVlad Buslov loop_cont: 12805a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 1281232c0013SHadar Hen Zion } 1282232c0013SHadar Hen Zion } 1283232c0013SHadar Hen Zion 1284232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, 1285232c0013SHadar Hen Zion struct mlx5e_encap_entry *e) 1286232c0013SHadar Hen Zion { 12873c37745eSOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 12885dbe906fSPaul Blakey struct mlx5_esw_flow_attr slow_attr; 12895a7e5bcbSVlad Buslov struct encap_flow_item *efi, *tmp; 12905dbe906fSPaul Blakey struct mlx5_flow_handle *rule; 12915dbe906fSPaul Blakey struct mlx5_flow_spec *spec; 1292232c0013SHadar Hen Zion struct mlx5e_tc_flow *flow; 12935dbe906fSPaul Blakey int err; 1294232c0013SHadar Hen Zion 12955a7e5bcbSVlad Buslov list_for_each_entry_safe(efi, tmp, &e->flows, list) { 129679baaec7SEli Britstein flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); 12975a7e5bcbSVlad Buslov if (IS_ERR(mlx5e_flow_get(flow))) 12985a7e5bcbSVlad Buslov continue; 12995a7e5bcbSVlad Buslov 13005dbe906fSPaul Blakey spec = &flow->esw_attr->parse_attr->spec; 13015dbe906fSPaul Blakey 13025dbe906fSPaul Blakey /* update from encap rule to slow path rule */ 13035dbe906fSPaul Blakey rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr); 13048c4dc42bSEli Britstein /* mark the flow's encap dest as non-valid */ 13058c4dc42bSEli Britstein flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; 13065dbe906fSPaul Blakey 13075dbe906fSPaul Blakey if (IS_ERR(rule)) { 13085dbe906fSPaul Blakey err = PTR_ERR(rule); 13095dbe906fSPaul Blakey mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n", 13105dbe906fSPaul Blakey err); 13115a7e5bcbSVlad Buslov goto loop_cont; 13125dbe906fSPaul Blakey } 13135dbe906fSPaul Blakey 13146d2a3ed0SOr Gerlitz mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr); 13155dbe906fSPaul Blakey flow->rule[0] = rule; 1316226f2ca3SVlad Buslov /* was unset when fast path rule removed */ 1317226f2ca3SVlad Buslov flow_flag_set(flow, OFFLOADED); 13185a7e5bcbSVlad Buslov 13195a7e5bcbSVlad Buslov loop_cont: 13205a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 1321232c0013SHadar Hen Zion } 1322232c0013SHadar Hen Zion 132361c806daSOr Gerlitz /* we know that the encap is valid */ 1324232c0013SHadar Hen Zion e->flags &= ~MLX5_ENCAP_ENTRY_VALID; 132560786f09SMark Bloch mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id); 1326232c0013SHadar Hen Zion } 1327232c0013SHadar Hen Zion 1328b8aee822SMark Bloch static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) 1329b8aee822SMark Bloch { 1330226f2ca3SVlad Buslov if (mlx5e_is_eswitch_flow(flow)) 1331b8aee822SMark Bloch return flow->esw_attr->counter; 1332b8aee822SMark Bloch else 1333b8aee822SMark Bloch return flow->nic_attr->counter; 1334b8aee822SMark Bloch } 1335b8aee822SMark Bloch 1336f6dfb4c3SHadar Hen Zion void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) 1337f6dfb4c3SHadar Hen Zion { 1338f6dfb4c3SHadar Hen Zion struct mlx5e_neigh *m_neigh = &nhe->m_neigh; 1339f6dfb4c3SHadar Hen Zion struct mlx5e_tc_flow *flow; 1340f6dfb4c3SHadar Hen Zion struct mlx5e_encap_entry *e; 1341f6dfb4c3SHadar Hen Zion struct mlx5_fc *counter; 1342f6dfb4c3SHadar Hen Zion struct neigh_table *tbl; 1343f6dfb4c3SHadar Hen Zion bool neigh_used = false; 1344f6dfb4c3SHadar Hen Zion struct neighbour *n; 134590bb7692SAriel Levkovich u64 lastuse; 1346f6dfb4c3SHadar Hen Zion 1347f6dfb4c3SHadar Hen Zion if (m_neigh->family == AF_INET) 1348f6dfb4c3SHadar Hen Zion tbl = &arp_tbl; 1349f6dfb4c3SHadar Hen Zion #if IS_ENABLED(CONFIG_IPV6) 1350f6dfb4c3SHadar Hen Zion else if (m_neigh->family == AF_INET6) 1351423c9db2SOr Gerlitz tbl = &nd_tbl; 1352f6dfb4c3SHadar Hen Zion #endif 1353f6dfb4c3SHadar Hen Zion else 1354f6dfb4c3SHadar Hen Zion return; 1355f6dfb4c3SHadar Hen Zion 1356f6dfb4c3SHadar Hen Zion list_for_each_entry(e, &nhe->encap_list, encap_list) { 13575a7e5bcbSVlad Buslov struct encap_flow_item *efi, *tmp; 1358f6dfb4c3SHadar Hen Zion if (!(e->flags & MLX5_ENCAP_ENTRY_VALID)) 1359f6dfb4c3SHadar Hen Zion continue; 13605a7e5bcbSVlad Buslov list_for_each_entry_safe(efi, tmp, &e->flows, list) { 136179baaec7SEli Britstein flow = container_of(efi, struct mlx5e_tc_flow, 136279baaec7SEli Britstein encaps[efi->index]); 13635a7e5bcbSVlad Buslov if (IS_ERR(mlx5e_flow_get(flow))) 13645a7e5bcbSVlad Buslov continue; 13655a7e5bcbSVlad Buslov 1366226f2ca3SVlad Buslov if (mlx5e_is_offloaded_flow(flow)) { 1367b8aee822SMark Bloch counter = mlx5e_tc_get_counter(flow); 136890bb7692SAriel Levkovich lastuse = mlx5_fc_query_lastuse(counter); 1369f6dfb4c3SHadar Hen Zion if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { 13705a7e5bcbSVlad Buslov mlx5e_flow_put(netdev_priv(e->out_dev), flow); 1371f6dfb4c3SHadar Hen Zion neigh_used = true; 1372f6dfb4c3SHadar Hen Zion break; 1373f6dfb4c3SHadar Hen Zion } 1374f6dfb4c3SHadar Hen Zion } 13755a7e5bcbSVlad Buslov 13765a7e5bcbSVlad Buslov mlx5e_flow_put(netdev_priv(e->out_dev), flow); 1377f6dfb4c3SHadar Hen Zion } 1378e36d4810SRoi Dayan if (neigh_used) 1379e36d4810SRoi Dayan break; 1380f6dfb4c3SHadar Hen Zion } 1381f6dfb4c3SHadar Hen Zion 1382f6dfb4c3SHadar Hen Zion if (neigh_used) { 1383f6dfb4c3SHadar Hen Zion nhe->reported_lastuse = jiffies; 1384f6dfb4c3SHadar Hen Zion 1385f6dfb4c3SHadar Hen Zion /* find the relevant neigh according to the cached device and 1386f6dfb4c3SHadar Hen Zion * dst ip pair 1387f6dfb4c3SHadar Hen Zion */ 1388f6dfb4c3SHadar Hen Zion n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev); 1389c7f7ba8dSRoi Dayan if (!n) 1390f6dfb4c3SHadar Hen Zion return; 1391f6dfb4c3SHadar Hen Zion 1392f6dfb4c3SHadar Hen Zion neigh_event_send(n, NULL); 1393f6dfb4c3SHadar Hen Zion neigh_release(n); 1394f6dfb4c3SHadar Hen Zion } 1395f6dfb4c3SHadar Hen Zion } 1396f6dfb4c3SHadar Hen Zion 1397d85cdccbSOr Gerlitz static void mlx5e_detach_encap(struct mlx5e_priv *priv, 13988c4dc42bSEli Britstein struct mlx5e_tc_flow *flow, int out_index) 1399d85cdccbSOr Gerlitz { 14008c4dc42bSEli Britstein struct list_head *next = flow->encaps[out_index].list.next; 14015067b602SRoi Dayan 14025a7e5bcbSVlad Buslov /* flow wasn't fully initialized */ 14035a7e5bcbSVlad Buslov if (list_empty(&flow->encaps[out_index].list)) 14045a7e5bcbSVlad Buslov return; 14055a7e5bcbSVlad Buslov 14068c4dc42bSEli Britstein list_del(&flow->encaps[out_index].list); 14075067b602SRoi Dayan if (list_empty(next)) { 1408c1ae1152SOr Gerlitz struct mlx5e_encap_entry *e; 14095067b602SRoi Dayan 1410c1ae1152SOr Gerlitz e = list_entry(next, struct mlx5e_encap_entry, flows); 1411232c0013SHadar Hen Zion mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); 1412232c0013SHadar Hen Zion 1413232c0013SHadar Hen Zion if (e->flags & MLX5_ENCAP_ENTRY_VALID) 141460786f09SMark Bloch mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id); 1415232c0013SHadar Hen Zion 1416cdc5a7f3SOr Gerlitz hash_del_rcu(&e->encap_hlist); 1417232c0013SHadar Hen Zion kfree(e->encap_header); 14185067b602SRoi Dayan kfree(e); 14195067b602SRoi Dayan } 14205067b602SRoi Dayan } 14215067b602SRoi Dayan 142204de7ddaSRoi Dayan static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) 142304de7ddaSRoi Dayan { 142404de7ddaSRoi Dayan struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; 142504de7ddaSRoi Dayan 1426226f2ca3SVlad Buslov if (!flow_flag_test(flow, ESWITCH) || 1427226f2ca3SVlad Buslov !flow_flag_test(flow, DUP)) 142804de7ddaSRoi Dayan return; 142904de7ddaSRoi Dayan 143004de7ddaSRoi Dayan mutex_lock(&esw->offloads.peer_mutex); 143104de7ddaSRoi Dayan list_del(&flow->peer); 143204de7ddaSRoi Dayan mutex_unlock(&esw->offloads.peer_mutex); 143304de7ddaSRoi Dayan 1434226f2ca3SVlad Buslov flow_flag_clear(flow, DUP); 143504de7ddaSRoi Dayan 143604de7ddaSRoi Dayan mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); 143704de7ddaSRoi Dayan kvfree(flow->peer_flow); 143804de7ddaSRoi Dayan flow->peer_flow = NULL; 143904de7ddaSRoi Dayan } 144004de7ddaSRoi Dayan 144104de7ddaSRoi Dayan static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) 144204de7ddaSRoi Dayan { 144304de7ddaSRoi Dayan struct mlx5_core_dev *dev = flow->priv->mdev; 144404de7ddaSRoi Dayan struct mlx5_devcom *devcom = dev->priv.devcom; 144504de7ddaSRoi Dayan struct mlx5_eswitch *peer_esw; 144604de7ddaSRoi Dayan 144704de7ddaSRoi Dayan peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 144804de7ddaSRoi Dayan if (!peer_esw) 144904de7ddaSRoi Dayan return; 145004de7ddaSRoi Dayan 145104de7ddaSRoi Dayan __mlx5e_tc_del_fdb_peer_flow(flow); 145204de7ddaSRoi Dayan mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 145304de7ddaSRoi Dayan } 145404de7ddaSRoi Dayan 1455e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 1456961e8979SRoi Dayan struct mlx5e_tc_flow *flow) 1457e8f887acSAmir Vadai { 1458226f2ca3SVlad Buslov if (mlx5e_is_eswitch_flow(flow)) { 145904de7ddaSRoi Dayan mlx5e_tc_del_fdb_peer_flow(flow); 1460d85cdccbSOr Gerlitz mlx5e_tc_del_fdb_flow(priv, flow); 146104de7ddaSRoi Dayan } else { 1462d85cdccbSOr Gerlitz mlx5e_tc_del_nic_flow(priv, flow); 1463e8f887acSAmir Vadai } 146404de7ddaSRoi Dayan } 1465e8f887acSAmir Vadai 1466bbd00f7eSHadar Hen Zion 1467bbd00f7eSHadar Hen Zion static int parse_tunnel_attr(struct mlx5e_priv *priv, 1468bbd00f7eSHadar Hen Zion struct mlx5_flow_spec *spec, 1469f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 14706363651dSOr Gerlitz struct net_device *filter_dev, u8 *match_level) 1471bbd00f7eSHadar Hen Zion { 1472e98bedf5SEli Britstein struct netlink_ext_ack *extack = f->common.extack; 1473bbd00f7eSHadar Hen Zion void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1474bbd00f7eSHadar Hen Zion outer_headers); 1475bbd00f7eSHadar Hen Zion void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1476bbd00f7eSHadar Hen Zion outer_headers); 1477f9e30088SPablo Neira Ayuso struct flow_rule *rule = flow_cls_offload_flow_rule(f); 14788f256622SPablo Neira Ayuso int err; 1479bbd00f7eSHadar Hen Zion 1480101f4de9SOz Shlomo err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 14816363651dSOr Gerlitz headers_c, headers_v, match_level); 148254c177caSOz Shlomo if (err) { 148354c177caSOz Shlomo NL_SET_ERR_MSG_MOD(extack, 148454c177caSOz Shlomo "failed to parse tunnel attributes"); 1485101f4de9SOz Shlomo return err; 1486bbd00f7eSHadar Hen Zion } 1487bbd00f7eSHadar Hen Zion 1488d1bda7eeSTonghao Zhang if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { 14898f256622SPablo Neira Ayuso struct flow_match_ipv4_addrs match; 14908f256622SPablo Neira Ayuso 14918f256622SPablo Neira Ayuso flow_rule_match_enc_ipv4_addrs(rule, &match); 1492bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1493bbd00f7eSHadar Hen Zion src_ipv4_src_ipv6.ipv4_layout.ipv4, 14948f256622SPablo Neira Ayuso ntohl(match.mask->src)); 1495bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1496bbd00f7eSHadar Hen Zion src_ipv4_src_ipv6.ipv4_layout.ipv4, 14978f256622SPablo Neira Ayuso ntohl(match.key->src)); 1498bbd00f7eSHadar Hen Zion 1499bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1500bbd00f7eSHadar Hen Zion dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 15018f256622SPablo Neira Ayuso ntohl(match.mask->dst)); 1502bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1503bbd00f7eSHadar Hen Zion dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 15048f256622SPablo Neira Ayuso ntohl(match.key->dst)); 1505bbd00f7eSHadar Hen Zion 1506bbd00f7eSHadar Hen Zion MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); 1507bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); 1508d1bda7eeSTonghao Zhang } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { 15098f256622SPablo Neira Ayuso struct flow_match_ipv6_addrs match; 151019f44401SOr Gerlitz 15118f256622SPablo Neira Ayuso flow_rule_match_enc_ipv6_addrs(rule, &match); 151219f44401SOr Gerlitz memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 151319f44401SOr Gerlitz src_ipv4_src_ipv6.ipv6_layout.ipv6), 15148f256622SPablo Neira Ayuso &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 151519f44401SOr Gerlitz memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 151619f44401SOr Gerlitz src_ipv4_src_ipv6.ipv6_layout.ipv6), 15178f256622SPablo Neira Ayuso &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 151819f44401SOr Gerlitz 151919f44401SOr Gerlitz memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 152019f44401SOr Gerlitz dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 15218f256622SPablo Neira Ayuso &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 152219f44401SOr Gerlitz memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 152319f44401SOr Gerlitz dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 15248f256622SPablo Neira Ayuso &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 152519f44401SOr Gerlitz 152619f44401SOr Gerlitz MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); 152719f44401SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); 15282e72eb43SOr Gerlitz } 1529bbd00f7eSHadar Hen Zion 15308f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 15318f256622SPablo Neira Ayuso struct flow_match_ip match; 1532bcef735cSOr Gerlitz 15338f256622SPablo Neira Ayuso flow_rule_match_enc_ip(rule, &match); 15348f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, 15358f256622SPablo Neira Ayuso match.mask->tos & 0x3); 15368f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, 15378f256622SPablo Neira Ayuso match.key->tos & 0x3); 1538bcef735cSOr Gerlitz 15398f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, 15408f256622SPablo Neira Ayuso match.mask->tos >> 2); 15418f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, 15428f256622SPablo Neira Ayuso match.key->tos >> 2); 1543bcef735cSOr Gerlitz 15448f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, 15458f256622SPablo Neira Ayuso match.mask->ttl); 15468f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, 15478f256622SPablo Neira Ayuso match.key->ttl); 1548e98bedf5SEli Britstein 15498f256622SPablo Neira Ayuso if (match.mask->ttl && 1550e98bedf5SEli Britstein !MLX5_CAP_ESW_FLOWTABLE_FDB 1551e98bedf5SEli Britstein (priv->mdev, 1552e98bedf5SEli Britstein ft_field_support.outer_ipv4_ttl)) { 1553e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 1554e98bedf5SEli Britstein "Matching on TTL is not supported"); 1555e98bedf5SEli Britstein return -EOPNOTSUPP; 1556e98bedf5SEli Britstein } 1557e98bedf5SEli Britstein 1558bcef735cSOr Gerlitz } 1559bcef735cSOr Gerlitz 1560bbd00f7eSHadar Hen Zion /* Enforce DMAC when offloading incoming tunneled flows. 1561bbd00f7eSHadar Hen Zion * Flow counters require a match on the DMAC. 1562bbd00f7eSHadar Hen Zion */ 1563bbd00f7eSHadar Hen Zion MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16); 1564bbd00f7eSHadar Hen Zion MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0); 1565bbd00f7eSHadar Hen Zion ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1566bbd00f7eSHadar Hen Zion dmac_47_16), priv->netdev->dev_addr); 1567bbd00f7eSHadar Hen Zion 1568bbd00f7eSHadar Hen Zion /* let software handle IP fragments */ 1569bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 1570bbd00f7eSHadar Hen Zion MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); 1571bbd00f7eSHadar Hen Zion 1572bbd00f7eSHadar Hen Zion return 0; 1573bbd00f7eSHadar Hen Zion } 1574bbd00f7eSHadar Hen Zion 15758377629eSEli Britstein static void *get_match_headers_criteria(u32 flags, 15768377629eSEli Britstein struct mlx5_flow_spec *spec) 15778377629eSEli Britstein { 15788377629eSEli Britstein return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 15798377629eSEli Britstein MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 15808377629eSEli Britstein inner_headers) : 15818377629eSEli Britstein MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 15828377629eSEli Britstein outer_headers); 15838377629eSEli Britstein } 15848377629eSEli Britstein 15858377629eSEli Britstein static void *get_match_headers_value(u32 flags, 15868377629eSEli Britstein struct mlx5_flow_spec *spec) 15878377629eSEli Britstein { 15888377629eSEli Britstein return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 15898377629eSEli Britstein MLX5_ADDR_OF(fte_match_param, spec->match_value, 15908377629eSEli Britstein inner_headers) : 15918377629eSEli Britstein MLX5_ADDR_OF(fte_match_param, spec->match_value, 15928377629eSEli Britstein outer_headers); 15938377629eSEli Britstein } 15948377629eSEli Britstein 1595de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv, 1596de0af0bfSRoi Dayan struct mlx5_flow_spec *spec, 1597f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 159854c177caSOz Shlomo struct net_device *filter_dev, 15996363651dSOr Gerlitz u8 *match_level, u8 *tunnel_match_level) 1600e3a2b7edSAmir Vadai { 1601e98bedf5SEli Britstein struct netlink_ext_ack *extack = f->common.extack; 1602c5bb1730SMaor Gottlieb void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1603c5bb1730SMaor Gottlieb outer_headers); 1604c5bb1730SMaor Gottlieb void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1605c5bb1730SMaor Gottlieb outer_headers); 1606699e96ddSJianbo Liu void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1607699e96ddSJianbo Liu misc_parameters); 1608699e96ddSJianbo Liu void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1609699e96ddSJianbo Liu misc_parameters); 1610f9e30088SPablo Neira Ayuso struct flow_rule *rule = flow_cls_offload_flow_rule(f); 16118f256622SPablo Neira Ayuso struct flow_dissector *dissector = rule->match.dissector; 1612e3a2b7edSAmir Vadai u16 addr_type = 0; 1613e3a2b7edSAmir Vadai u8 ip_proto = 0; 1614e3a2b7edSAmir Vadai 1615d708f902SOr Gerlitz *match_level = MLX5_MATCH_NONE; 1616de0af0bfSRoi Dayan 16178f256622SPablo Neira Ayuso if (dissector->used_keys & 16183d144578SVlad Buslov ~(BIT(FLOW_DISSECTOR_KEY_META) | 16193d144578SVlad Buslov BIT(FLOW_DISSECTOR_KEY_CONTROL) | 1620e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_BASIC) | 1621e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 1622095b6cfdSOr Gerlitz BIT(FLOW_DISSECTOR_KEY_VLAN) | 1623699e96ddSJianbo Liu BIT(FLOW_DISSECTOR_KEY_CVLAN) | 1624e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 1625e3a2b7edSAmir Vadai BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 1626bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_PORTS) | 1627bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | 1628bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 1629bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 1630bbd00f7eSHadar Hen Zion BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | 1631e77834ecSOr Gerlitz BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | 1632fd7da28bSOr Gerlitz BIT(FLOW_DISSECTOR_KEY_TCP) | 1633bcef735cSOr Gerlitz BIT(FLOW_DISSECTOR_KEY_IP) | 16349272e3dfSYevgeny Kliteynik BIT(FLOW_DISSECTOR_KEY_ENC_IP) | 16359272e3dfSYevgeny Kliteynik BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) { 1636e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); 1637e3a2b7edSAmir Vadai netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", 16388f256622SPablo Neira Ayuso dissector->used_keys); 1639e3a2b7edSAmir Vadai return -EOPNOTSUPP; 1640e3a2b7edSAmir Vadai } 1641e3a2b7edSAmir Vadai 1642075973c7SVlad Buslov if (mlx5e_get_tc_tun(filter_dev)) { 16436363651dSOr Gerlitz if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) 1644bbd00f7eSHadar Hen Zion return -EOPNOTSUPP; 1645bbd00f7eSHadar Hen Zion 1646bbd00f7eSHadar Hen Zion /* In decap flow, header pointers should point to the inner 1647bbd00f7eSHadar Hen Zion * headers, outer header were already set by parse_tunnel_attr 1648bbd00f7eSHadar Hen Zion */ 16498377629eSEli Britstein headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, 16508377629eSEli Britstein spec); 16518377629eSEli Britstein headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, 16528377629eSEli Britstein spec); 1653bbd00f7eSHadar Hen Zion } 1654bbd00f7eSHadar Hen Zion 16558f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 16568f256622SPablo Neira Ayuso struct flow_match_basic match; 1657e3a2b7edSAmir Vadai 16588f256622SPablo Neira Ayuso flow_rule_match_basic(rule, &match); 16598f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, 16608f256622SPablo Neira Ayuso ntohs(match.mask->n_proto)); 16618f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 16628f256622SPablo Neira Ayuso ntohs(match.key->n_proto)); 16638f256622SPablo Neira Ayuso 16648f256622SPablo Neira Ayuso if (match.mask->n_proto) 1665d708f902SOr Gerlitz *match_level = MLX5_MATCH_L2; 1666e3a2b7edSAmir Vadai } 166735a605dbSEli Britstein if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) || 166835a605dbSEli Britstein is_vlan_dev(filter_dev)) { 166935a605dbSEli Britstein struct flow_dissector_key_vlan filter_dev_mask; 167035a605dbSEli Britstein struct flow_dissector_key_vlan filter_dev_key; 16718f256622SPablo Neira Ayuso struct flow_match_vlan match; 16728f256622SPablo Neira Ayuso 167335a605dbSEli Britstein if (is_vlan_dev(filter_dev)) { 167435a605dbSEli Britstein match.key = &filter_dev_key; 167535a605dbSEli Britstein match.key->vlan_id = vlan_dev_vlan_id(filter_dev); 167635a605dbSEli Britstein match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev); 167735a605dbSEli Britstein match.key->vlan_priority = 0; 167835a605dbSEli Britstein match.mask = &filter_dev_mask; 167935a605dbSEli Britstein memset(match.mask, 0xff, sizeof(*match.mask)); 168035a605dbSEli Britstein match.mask->vlan_priority = 0; 168135a605dbSEli Britstein } else { 16828f256622SPablo Neira Ayuso flow_rule_match_vlan(rule, &match); 168335a605dbSEli Britstein } 16848f256622SPablo Neira Ayuso if (match.mask->vlan_id || 16858f256622SPablo Neira Ayuso match.mask->vlan_priority || 16868f256622SPablo Neira Ayuso match.mask->vlan_tpid) { 16878f256622SPablo Neira Ayuso if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { 1688699e96ddSJianbo Liu MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1689699e96ddSJianbo Liu svlan_tag, 1); 1690699e96ddSJianbo Liu MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1691699e96ddSJianbo Liu svlan_tag, 1); 1692699e96ddSJianbo Liu } else { 1693699e96ddSJianbo Liu MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1694699e96ddSJianbo Liu cvlan_tag, 1); 1695699e96ddSJianbo Liu MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1696699e96ddSJianbo Liu cvlan_tag, 1); 1697699e96ddSJianbo Liu } 1698095b6cfdSOr Gerlitz 16998f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, 17008f256622SPablo Neira Ayuso match.mask->vlan_id); 17018f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, 17028f256622SPablo Neira Ayuso match.key->vlan_id); 1703358d79a4SOr Gerlitz 17048f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, 17058f256622SPablo Neira Ayuso match.mask->vlan_priority); 17068f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, 17078f256622SPablo Neira Ayuso match.key->vlan_priority); 170854782900SOr Gerlitz 1709d708f902SOr Gerlitz *match_level = MLX5_MATCH_L2; 1710095b6cfdSOr Gerlitz } 1711d3a80bb5SOr Gerlitz } else if (*match_level != MLX5_MATCH_NONE) { 1712cee26487SJianbo Liu MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); 1713cee26487SJianbo Liu MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); 1714d3a80bb5SOr Gerlitz *match_level = MLX5_MATCH_L2; 1715095b6cfdSOr Gerlitz } 1716095b6cfdSOr Gerlitz 17178f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 17188f256622SPablo Neira Ayuso struct flow_match_vlan match; 17198f256622SPablo Neira Ayuso 172012d5cbf8SJianbo Liu flow_rule_match_cvlan(rule, &match); 17218f256622SPablo Neira Ayuso if (match.mask->vlan_id || 17228f256622SPablo Neira Ayuso match.mask->vlan_priority || 17238f256622SPablo Neira Ayuso match.mask->vlan_tpid) { 17248f256622SPablo Neira Ayuso if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { 1725699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_c, 1726699e96ddSJianbo Liu outer_second_svlan_tag, 1); 1727699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_v, 1728699e96ddSJianbo Liu outer_second_svlan_tag, 1); 1729699e96ddSJianbo Liu } else { 1730699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_c, 1731699e96ddSJianbo Liu outer_second_cvlan_tag, 1); 1732699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_v, 1733699e96ddSJianbo Liu outer_second_cvlan_tag, 1); 1734699e96ddSJianbo Liu } 1735699e96ddSJianbo Liu 1736699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid, 17378f256622SPablo Neira Ayuso match.mask->vlan_id); 1738699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid, 17398f256622SPablo Neira Ayuso match.key->vlan_id); 1740699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio, 17418f256622SPablo Neira Ayuso match.mask->vlan_priority); 1742699e96ddSJianbo Liu MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio, 17438f256622SPablo Neira Ayuso match.key->vlan_priority); 1744699e96ddSJianbo Liu 1745699e96ddSJianbo Liu *match_level = MLX5_MATCH_L2; 1746699e96ddSJianbo Liu } 1747699e96ddSJianbo Liu } 1748699e96ddSJianbo Liu 17498f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 17508f256622SPablo Neira Ayuso struct flow_match_eth_addrs match; 175154782900SOr Gerlitz 17528f256622SPablo Neira Ayuso flow_rule_match_eth_addrs(rule, &match); 1753d3a80bb5SOr Gerlitz ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1754d3a80bb5SOr Gerlitz dmac_47_16), 17558f256622SPablo Neira Ayuso match.mask->dst); 1756d3a80bb5SOr Gerlitz ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1757d3a80bb5SOr Gerlitz dmac_47_16), 17588f256622SPablo Neira Ayuso match.key->dst); 1759d3a80bb5SOr Gerlitz 1760d3a80bb5SOr Gerlitz ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1761d3a80bb5SOr Gerlitz smac_47_16), 17628f256622SPablo Neira Ayuso match.mask->src); 1763d3a80bb5SOr Gerlitz ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1764d3a80bb5SOr Gerlitz smac_47_16), 17658f256622SPablo Neira Ayuso match.key->src); 1766d3a80bb5SOr Gerlitz 17678f256622SPablo Neira Ayuso if (!is_zero_ether_addr(match.mask->src) || 17688f256622SPablo Neira Ayuso !is_zero_ether_addr(match.mask->dst)) 1769d708f902SOr Gerlitz *match_level = MLX5_MATCH_L2; 177054782900SOr Gerlitz } 177154782900SOr Gerlitz 17728f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 17738f256622SPablo Neira Ayuso struct flow_match_control match; 177454782900SOr Gerlitz 17758f256622SPablo Neira Ayuso flow_rule_match_control(rule, &match); 17768f256622SPablo Neira Ayuso addr_type = match.key->addr_type; 177754782900SOr Gerlitz 177854782900SOr Gerlitz /* the HW doesn't support frag first/later */ 17798f256622SPablo Neira Ayuso if (match.mask->flags & FLOW_DIS_FIRST_FRAG) 178054782900SOr Gerlitz return -EOPNOTSUPP; 178154782900SOr Gerlitz 17828f256622SPablo Neira Ayuso if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 178354782900SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 178454782900SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 17858f256622SPablo Neira Ayuso match.key->flags & FLOW_DIS_IS_FRAGMENT); 178654782900SOr Gerlitz 178754782900SOr Gerlitz /* the HW doesn't need L3 inline to match on frag=no */ 17888f256622SPablo Neira Ayuso if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT)) 178983621b7dSOr Gerlitz *match_level = MLX5_MATCH_L2; 179054782900SOr Gerlitz /* *** L2 attributes parsing up to here *** */ 179154782900SOr Gerlitz else 179283621b7dSOr Gerlitz *match_level = MLX5_MATCH_L3; 179354782900SOr Gerlitz } 179454782900SOr Gerlitz } 179554782900SOr Gerlitz 17968f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 17978f256622SPablo Neira Ayuso struct flow_match_basic match; 17988f256622SPablo Neira Ayuso 17998f256622SPablo Neira Ayuso flow_rule_match_basic(rule, &match); 18008f256622SPablo Neira Ayuso ip_proto = match.key->ip_proto; 180154782900SOr Gerlitz 180254782900SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 18038f256622SPablo Neira Ayuso match.mask->ip_proto); 180454782900SOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 18058f256622SPablo Neira Ayuso match.key->ip_proto); 180654782900SOr Gerlitz 18078f256622SPablo Neira Ayuso if (match.mask->ip_proto) 1808d708f902SOr Gerlitz *match_level = MLX5_MATCH_L3; 180954782900SOr Gerlitz } 181054782900SOr Gerlitz 1811e3a2b7edSAmir Vadai if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 18128f256622SPablo Neira Ayuso struct flow_match_ipv4_addrs match; 1813e3a2b7edSAmir Vadai 18148f256622SPablo Neira Ayuso flow_rule_match_ipv4_addrs(rule, &match); 1815e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1816e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv4_layout.ipv4), 18178f256622SPablo Neira Ayuso &match.mask->src, sizeof(match.mask->src)); 1818e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1819e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv4_layout.ipv4), 18208f256622SPablo Neira Ayuso &match.key->src, sizeof(match.key->src)); 1821e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1822e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 18238f256622SPablo Neira Ayuso &match.mask->dst, sizeof(match.mask->dst)); 1824e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1825e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 18268f256622SPablo Neira Ayuso &match.key->dst, sizeof(match.key->dst)); 1827de0af0bfSRoi Dayan 18288f256622SPablo Neira Ayuso if (match.mask->src || match.mask->dst) 1829d708f902SOr Gerlitz *match_level = MLX5_MATCH_L3; 1830e3a2b7edSAmir Vadai } 1831e3a2b7edSAmir Vadai 1832e3a2b7edSAmir Vadai if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 18338f256622SPablo Neira Ayuso struct flow_match_ipv6_addrs match; 1834e3a2b7edSAmir Vadai 18358f256622SPablo Neira Ayuso flow_rule_match_ipv6_addrs(rule, &match); 1836e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1837e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv6_layout.ipv6), 18388f256622SPablo Neira Ayuso &match.mask->src, sizeof(match.mask->src)); 1839e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1840e3a2b7edSAmir Vadai src_ipv4_src_ipv6.ipv6_layout.ipv6), 18418f256622SPablo Neira Ayuso &match.key->src, sizeof(match.key->src)); 1842e3a2b7edSAmir Vadai 1843e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1844e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 18458f256622SPablo Neira Ayuso &match.mask->dst, sizeof(match.mask->dst)); 1846e3a2b7edSAmir Vadai memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1847e3a2b7edSAmir Vadai dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 18488f256622SPablo Neira Ayuso &match.key->dst, sizeof(match.key->dst)); 1849de0af0bfSRoi Dayan 18508f256622SPablo Neira Ayuso if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY || 18518f256622SPablo Neira Ayuso ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY) 1852d708f902SOr Gerlitz *match_level = MLX5_MATCH_L3; 1853e3a2b7edSAmir Vadai } 1854e3a2b7edSAmir Vadai 18558f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 18568f256622SPablo Neira Ayuso struct flow_match_ip match; 18571f97a526SOr Gerlitz 18588f256622SPablo Neira Ayuso flow_rule_match_ip(rule, &match); 18598f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, 18608f256622SPablo Neira Ayuso match.mask->tos & 0x3); 18618f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, 18628f256622SPablo Neira Ayuso match.key->tos & 0x3); 18631f97a526SOr Gerlitz 18648f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, 18658f256622SPablo Neira Ayuso match.mask->tos >> 2); 18668f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, 18678f256622SPablo Neira Ayuso match.key->tos >> 2); 18681f97a526SOr Gerlitz 18698f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, 18708f256622SPablo Neira Ayuso match.mask->ttl); 18718f256622SPablo Neira Ayuso MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, 18728f256622SPablo Neira Ayuso match.key->ttl); 18731f97a526SOr Gerlitz 18748f256622SPablo Neira Ayuso if (match.mask->ttl && 1875a8ade55fSOr Gerlitz !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, 1876e98bedf5SEli Britstein ft_field_support.outer_ipv4_ttl)) { 1877e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 1878e98bedf5SEli Britstein "Matching on TTL is not supported"); 18791f97a526SOr Gerlitz return -EOPNOTSUPP; 1880e98bedf5SEli Britstein } 1881a8ade55fSOr Gerlitz 18828f256622SPablo Neira Ayuso if (match.mask->tos || match.mask->ttl) 1883d708f902SOr Gerlitz *match_level = MLX5_MATCH_L3; 18841f97a526SOr Gerlitz } 18851f97a526SOr Gerlitz 188654782900SOr Gerlitz /* *** L3 attributes parsing up to here *** */ 188754782900SOr Gerlitz 18888f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 18898f256622SPablo Neira Ayuso struct flow_match_ports match; 18908f256622SPablo Neira Ayuso 18918f256622SPablo Neira Ayuso flow_rule_match_ports(rule, &match); 1892e3a2b7edSAmir Vadai switch (ip_proto) { 1893e3a2b7edSAmir Vadai case IPPROTO_TCP: 1894e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 18958f256622SPablo Neira Ayuso tcp_sport, ntohs(match.mask->src)); 1896e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 18978f256622SPablo Neira Ayuso tcp_sport, ntohs(match.key->src)); 1898e3a2b7edSAmir Vadai 1899e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 19008f256622SPablo Neira Ayuso tcp_dport, ntohs(match.mask->dst)); 1901e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 19028f256622SPablo Neira Ayuso tcp_dport, ntohs(match.key->dst)); 1903e3a2b7edSAmir Vadai break; 1904e3a2b7edSAmir Vadai 1905e3a2b7edSAmir Vadai case IPPROTO_UDP: 1906e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 19078f256622SPablo Neira Ayuso udp_sport, ntohs(match.mask->src)); 1908e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 19098f256622SPablo Neira Ayuso udp_sport, ntohs(match.key->src)); 1910e3a2b7edSAmir Vadai 1911e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_c, 19128f256622SPablo Neira Ayuso udp_dport, ntohs(match.mask->dst)); 1913e3a2b7edSAmir Vadai MLX5_SET(fte_match_set_lyr_2_4, headers_v, 19148f256622SPablo Neira Ayuso udp_dport, ntohs(match.key->dst)); 1915e3a2b7edSAmir Vadai break; 1916e3a2b7edSAmir Vadai default: 1917e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 1918e98bedf5SEli Britstein "Only UDP and TCP transports are supported for L4 matching"); 1919e3a2b7edSAmir Vadai netdev_err(priv->netdev, 1920e3a2b7edSAmir Vadai "Only UDP and TCP transport are supported\n"); 1921e3a2b7edSAmir Vadai return -EINVAL; 1922e3a2b7edSAmir Vadai } 1923de0af0bfSRoi Dayan 19248f256622SPablo Neira Ayuso if (match.mask->src || match.mask->dst) 1925d708f902SOr Gerlitz *match_level = MLX5_MATCH_L4; 1926e3a2b7edSAmir Vadai } 1927e3a2b7edSAmir Vadai 19288f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 19298f256622SPablo Neira Ayuso struct flow_match_tcp match; 1930e77834ecSOr Gerlitz 19318f256622SPablo Neira Ayuso flow_rule_match_tcp(rule, &match); 1932e77834ecSOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags, 19338f256622SPablo Neira Ayuso ntohs(match.mask->flags)); 1934e77834ecSOr Gerlitz MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, 19358f256622SPablo Neira Ayuso ntohs(match.key->flags)); 1936e77834ecSOr Gerlitz 19378f256622SPablo Neira Ayuso if (match.mask->flags) 1938d708f902SOr Gerlitz *match_level = MLX5_MATCH_L4; 1939e77834ecSOr Gerlitz } 1940e77834ecSOr Gerlitz 1941e3a2b7edSAmir Vadai return 0; 1942e3a2b7edSAmir Vadai } 1943e3a2b7edSAmir Vadai 1944de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv, 194565ba8fb7SOr Gerlitz struct mlx5e_tc_flow *flow, 1946de0af0bfSRoi Dayan struct mlx5_flow_spec *spec, 1947f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 194854c177caSOz Shlomo struct net_device *filter_dev) 1949de0af0bfSRoi Dayan { 1950e98bedf5SEli Britstein struct netlink_ext_ack *extack = f->common.extack; 1951de0af0bfSRoi Dayan struct mlx5_core_dev *dev = priv->mdev; 1952de0af0bfSRoi Dayan struct mlx5_eswitch *esw = dev->priv.eswitch; 19531d447a39SSaeed Mahameed struct mlx5e_rep_priv *rpriv = priv->ppriv; 19546363651dSOr Gerlitz u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; 19551d447a39SSaeed Mahameed struct mlx5_eswitch_rep *rep; 1956226f2ca3SVlad Buslov bool is_eswitch_flow; 1957de0af0bfSRoi Dayan int err; 1958de0af0bfSRoi Dayan 19596363651dSOr Gerlitz err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); 1960de0af0bfSRoi Dayan 1961226f2ca3SVlad Buslov is_eswitch_flow = mlx5e_is_eswitch_flow(flow); 1962226f2ca3SVlad Buslov if (!err && is_eswitch_flow) { 19631d447a39SSaeed Mahameed rep = rpriv->rep; 1964b05af6aaSBodong Wang if (rep->vport != MLX5_VPORT_UPLINK && 19651d447a39SSaeed Mahameed (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 1966d708f902SOr Gerlitz esw->offloads.inline_mode < match_level)) { 1967e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 1968e98bedf5SEli Britstein "Flow is not offloaded due to min inline setting"); 1969de0af0bfSRoi Dayan netdev_warn(priv->netdev, 1970de0af0bfSRoi Dayan "Flow is not offloaded due to min inline setting, required %d actual %d\n", 1971d708f902SOr Gerlitz match_level, esw->offloads.inline_mode); 1972de0af0bfSRoi Dayan return -EOPNOTSUPP; 1973de0af0bfSRoi Dayan } 1974de0af0bfSRoi Dayan } 1975de0af0bfSRoi Dayan 1976226f2ca3SVlad Buslov if (is_eswitch_flow) { 197738aa51c1SOr Gerlitz flow->esw_attr->match_level = match_level; 19786363651dSOr Gerlitz flow->esw_attr->tunnel_match_level = tunnel_match_level; 19796363651dSOr Gerlitz } else { 198038aa51c1SOr Gerlitz flow->nic_attr->match_level = match_level; 19816363651dSOr Gerlitz } 198238aa51c1SOr Gerlitz 1983de0af0bfSRoi Dayan return err; 1984de0af0bfSRoi Dayan } 1985de0af0bfSRoi Dayan 1986d79b6df6SOr Gerlitz struct pedit_headers { 1987d79b6df6SOr Gerlitz struct ethhdr eth; 19880eb69bb9SEli Britstein struct vlan_hdr vlan; 1989d79b6df6SOr Gerlitz struct iphdr ip4; 1990d79b6df6SOr Gerlitz struct ipv6hdr ip6; 1991d79b6df6SOr Gerlitz struct tcphdr tcp; 1992d79b6df6SOr Gerlitz struct udphdr udp; 1993d79b6df6SOr Gerlitz }; 1994d79b6df6SOr Gerlitz 1995c500c86bSPablo Neira Ayuso struct pedit_headers_action { 1996c500c86bSPablo Neira Ayuso struct pedit_headers vals; 1997c500c86bSPablo Neira Ayuso struct pedit_headers masks; 1998c500c86bSPablo Neira Ayuso u32 pedits; 1999c500c86bSPablo Neira Ayuso }; 2000c500c86bSPablo Neira Ayuso 2001d79b6df6SOr Gerlitz static int pedit_header_offsets[] = { 200273867881SPablo Neira Ayuso [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth), 200373867881SPablo Neira Ayuso [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4), 200473867881SPablo Neira Ayuso [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6), 200573867881SPablo Neira Ayuso [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp), 200673867881SPablo Neira Ayuso [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp), 2007d79b6df6SOr Gerlitz }; 2008d79b6df6SOr Gerlitz 2009d79b6df6SOr Gerlitz #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype]) 2010d79b6df6SOr Gerlitz 2011d79b6df6SOr Gerlitz static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset, 2012c500c86bSPablo Neira Ayuso struct pedit_headers_action *hdrs) 2013d79b6df6SOr Gerlitz { 2014d79b6df6SOr Gerlitz u32 *curr_pmask, *curr_pval; 2015d79b6df6SOr Gerlitz 2016c500c86bSPablo Neira Ayuso curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset); 2017c500c86bSPablo Neira Ayuso curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset); 2018d79b6df6SOr Gerlitz 2019d79b6df6SOr Gerlitz if (*curr_pmask & mask) /* disallow acting twice on the same location */ 2020d79b6df6SOr Gerlitz goto out_err; 2021d79b6df6SOr Gerlitz 2022d79b6df6SOr Gerlitz *curr_pmask |= mask; 2023d79b6df6SOr Gerlitz *curr_pval |= (val & mask); 2024d79b6df6SOr Gerlitz 2025d79b6df6SOr Gerlitz return 0; 2026d79b6df6SOr Gerlitz 2027d79b6df6SOr Gerlitz out_err: 2028d79b6df6SOr Gerlitz return -EOPNOTSUPP; 2029d79b6df6SOr Gerlitz } 2030d79b6df6SOr Gerlitz 2031d79b6df6SOr Gerlitz struct mlx5_fields { 2032d79b6df6SOr Gerlitz u8 field; 2033d79b6df6SOr Gerlitz u8 size; 2034d79b6df6SOr Gerlitz u32 offset; 203527c11b6bSEli Britstein u32 match_offset; 2036d79b6df6SOr Gerlitz }; 2037d79b6df6SOr Gerlitz 203827c11b6bSEli Britstein #define OFFLOAD(fw_field, size, field, off, match_field) \ 203927c11b6bSEli Britstein {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \ 204027c11b6bSEli Britstein offsetof(struct pedit_headers, field) + (off), \ 204127c11b6bSEli Britstein MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)} 204227c11b6bSEli Britstein 20432ef86872SEli Britstein /* masked values are the same and there are no rewrites that do not have a 20442ef86872SEli Britstein * match. 20452ef86872SEli Britstein */ 20462ef86872SEli Britstein #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \ 20472ef86872SEli Britstein type matchmaskx = *(type *)(matchmaskp); \ 20482ef86872SEli Britstein type matchvalx = *(type *)(matchvalp); \ 20492ef86872SEli Britstein type maskx = *(type *)(maskp); \ 20502ef86872SEli Britstein type valx = *(type *)(valp); \ 20512ef86872SEli Britstein \ 20522ef86872SEli Britstein (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \ 20532ef86872SEli Britstein matchmaskx)); \ 20542ef86872SEli Britstein }) 20552ef86872SEli Britstein 205627c11b6bSEli Britstein static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp, 205727c11b6bSEli Britstein void *matchmaskp, int size) 205827c11b6bSEli Britstein { 205927c11b6bSEli Britstein bool same = false; 206027c11b6bSEli Britstein 206127c11b6bSEli Britstein switch (size) { 206227c11b6bSEli Britstein case sizeof(u8): 20632ef86872SEli Britstein same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp); 206427c11b6bSEli Britstein break; 206527c11b6bSEli Britstein case sizeof(u16): 20662ef86872SEli Britstein same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp); 206727c11b6bSEli Britstein break; 206827c11b6bSEli Britstein case sizeof(u32): 20692ef86872SEli Britstein same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp); 207027c11b6bSEli Britstein break; 207127c11b6bSEli Britstein } 207227c11b6bSEli Britstein 207327c11b6bSEli Britstein return same; 207427c11b6bSEli Britstein } 2075a8e4f0c4SOr Gerlitz 2076d79b6df6SOr Gerlitz static struct mlx5_fields fields[] = { 207727c11b6bSEli Britstein OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16), 207827c11b6bSEli Britstein OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0, dmac_15_0), 207927c11b6bSEli Britstein OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16), 208027c11b6bSEli Britstein OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0, smac_15_0), 208127c11b6bSEli Britstein OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0, ethertype), 208227c11b6bSEli Britstein OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0, first_vid), 2083d79b6df6SOr Gerlitz 208427c11b6bSEli Britstein OFFLOAD(IP_TTL, 1, ip4.ttl, 0, ttl_hoplimit), 208527c11b6bSEli Britstein OFFLOAD(SIPV4, 4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4), 208627c11b6bSEli Britstein OFFLOAD(DIPV4, 4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2087d79b6df6SOr Gerlitz 208827c11b6bSEli Britstein OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0, 208927c11b6bSEli Britstein src_ipv4_src_ipv6.ipv6_layout.ipv6[0]), 209027c11b6bSEli Britstein OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0, 209127c11b6bSEli Britstein src_ipv4_src_ipv6.ipv6_layout.ipv6[4]), 209227c11b6bSEli Britstein OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0, 209327c11b6bSEli Britstein src_ipv4_src_ipv6.ipv6_layout.ipv6[8]), 209427c11b6bSEli Britstein OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0, 209527c11b6bSEli Britstein src_ipv4_src_ipv6.ipv6_layout.ipv6[12]), 209627c11b6bSEli Britstein OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0, 209727c11b6bSEli Britstein dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]), 209827c11b6bSEli Britstein OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0, 209927c11b6bSEli Britstein dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]), 210027c11b6bSEli Britstein OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0, 210127c11b6bSEli Britstein dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]), 210227c11b6bSEli Britstein OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0, 210327c11b6bSEli Britstein dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]), 210427c11b6bSEli Britstein OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit), 2105d79b6df6SOr Gerlitz 210627c11b6bSEli Britstein OFFLOAD(TCP_SPORT, 2, tcp.source, 0, tcp_sport), 210727c11b6bSEli Britstein OFFLOAD(TCP_DPORT, 2, tcp.dest, 0, tcp_dport), 210827c11b6bSEli Britstein OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags), 2109d79b6df6SOr Gerlitz 211027c11b6bSEli Britstein OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport), 211127c11b6bSEli Britstein OFFLOAD(UDP_DPORT, 2, udp.dest, 0, udp_dport), 2112d79b6df6SOr Gerlitz }; 2113d79b6df6SOr Gerlitz 2114218d05ceSTonghao Zhang /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at 2115218d05ceSTonghao Zhang * max from the SW pedit action. On success, attr->num_mod_hdr_actions 2116218d05ceSTonghao Zhang * says how many HW actions were actually parsed. 2117d79b6df6SOr Gerlitz */ 2118c500c86bSPablo Neira Ayuso static int offload_pedit_fields(struct pedit_headers_action *hdrs, 2119e98bedf5SEli Britstein struct mlx5e_tc_flow_parse_attr *parse_attr, 212027c11b6bSEli Britstein u32 *action_flags, 2121e98bedf5SEli Britstein struct netlink_ext_ack *extack) 2122d79b6df6SOr Gerlitz { 2123d79b6df6SOr Gerlitz struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; 212427c11b6bSEli Britstein void *headers_c = get_match_headers_criteria(*action_flags, 212527c11b6bSEli Britstein &parse_attr->spec); 212627c11b6bSEli Britstein void *headers_v = get_match_headers_value(*action_flags, 212727c11b6bSEli Britstein &parse_attr->spec); 21282b64bebaSOr Gerlitz int i, action_size, nactions, max_actions, first, last, next_z; 2129d79b6df6SOr Gerlitz void *s_masks_p, *a_masks_p, *vals_p; 2130d79b6df6SOr Gerlitz struct mlx5_fields *f; 2131d79b6df6SOr Gerlitz u8 cmd, field_bsize; 2132e3ca4e05SOr Gerlitz u32 s_mask, a_mask; 2133d79b6df6SOr Gerlitz unsigned long mask; 21342b64bebaSOr Gerlitz __be32 mask_be32; 21352b64bebaSOr Gerlitz __be16 mask_be16; 2136d79b6df6SOr Gerlitz void *action; 2137d79b6df6SOr Gerlitz 213873867881SPablo Neira Ayuso set_masks = &hdrs[0].masks; 213973867881SPablo Neira Ayuso add_masks = &hdrs[1].masks; 214073867881SPablo Neira Ayuso set_vals = &hdrs[0].vals; 214173867881SPablo Neira Ayuso add_vals = &hdrs[1].vals; 2142d79b6df6SOr Gerlitz 2143d79b6df6SOr Gerlitz action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); 2144218d05ceSTonghao Zhang action = parse_attr->mod_hdr_actions + 2145218d05ceSTonghao Zhang parse_attr->num_mod_hdr_actions * action_size; 2146218d05ceSTonghao Zhang 2147218d05ceSTonghao Zhang max_actions = parse_attr->max_mod_hdr_actions; 2148218d05ceSTonghao Zhang nactions = parse_attr->num_mod_hdr_actions; 2149d79b6df6SOr Gerlitz 2150d79b6df6SOr Gerlitz for (i = 0; i < ARRAY_SIZE(fields); i++) { 215127c11b6bSEli Britstein bool skip; 215227c11b6bSEli Britstein 2153d79b6df6SOr Gerlitz f = &fields[i]; 2154d79b6df6SOr Gerlitz /* avoid seeing bits set from previous iterations */ 2155e3ca4e05SOr Gerlitz s_mask = 0; 2156e3ca4e05SOr Gerlitz a_mask = 0; 2157d79b6df6SOr Gerlitz 2158d79b6df6SOr Gerlitz s_masks_p = (void *)set_masks + f->offset; 2159d79b6df6SOr Gerlitz a_masks_p = (void *)add_masks + f->offset; 2160d79b6df6SOr Gerlitz 2161d79b6df6SOr Gerlitz memcpy(&s_mask, s_masks_p, f->size); 2162d79b6df6SOr Gerlitz memcpy(&a_mask, a_masks_p, f->size); 2163d79b6df6SOr Gerlitz 2164d79b6df6SOr Gerlitz if (!s_mask && !a_mask) /* nothing to offload here */ 2165d79b6df6SOr Gerlitz continue; 2166d79b6df6SOr Gerlitz 2167d79b6df6SOr Gerlitz if (s_mask && a_mask) { 2168e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2169e98bedf5SEli Britstein "can't set and add to the same HW field"); 2170d79b6df6SOr Gerlitz printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field); 2171d79b6df6SOr Gerlitz return -EOPNOTSUPP; 2172d79b6df6SOr Gerlitz } 2173d79b6df6SOr Gerlitz 2174d79b6df6SOr Gerlitz if (nactions == max_actions) { 2175e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2176e98bedf5SEli Britstein "too many pedit actions, can't offload"); 2177d79b6df6SOr Gerlitz printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions); 2178d79b6df6SOr Gerlitz return -EOPNOTSUPP; 2179d79b6df6SOr Gerlitz } 2180d79b6df6SOr Gerlitz 218127c11b6bSEli Britstein skip = false; 2182d79b6df6SOr Gerlitz if (s_mask) { 218327c11b6bSEli Britstein void *match_mask = headers_c + f->match_offset; 218427c11b6bSEli Britstein void *match_val = headers_v + f->match_offset; 218527c11b6bSEli Britstein 2186d79b6df6SOr Gerlitz cmd = MLX5_ACTION_TYPE_SET; 2187d79b6df6SOr Gerlitz mask = s_mask; 2188d79b6df6SOr Gerlitz vals_p = (void *)set_vals + f->offset; 218927c11b6bSEli Britstein /* don't rewrite if we have a match on the same value */ 219027c11b6bSEli Britstein if (cmp_val_mask(vals_p, s_masks_p, match_val, 219127c11b6bSEli Britstein match_mask, f->size)) 219227c11b6bSEli Britstein skip = true; 2193d79b6df6SOr Gerlitz /* clear to denote we consumed this field */ 2194d79b6df6SOr Gerlitz memset(s_masks_p, 0, f->size); 2195d79b6df6SOr Gerlitz } else { 219627c11b6bSEli Britstein u32 zero = 0; 219727c11b6bSEli Britstein 2198d79b6df6SOr Gerlitz cmd = MLX5_ACTION_TYPE_ADD; 2199d79b6df6SOr Gerlitz mask = a_mask; 2200d79b6df6SOr Gerlitz vals_p = (void *)add_vals + f->offset; 220127c11b6bSEli Britstein /* add 0 is no change */ 220227c11b6bSEli Britstein if (!memcmp(vals_p, &zero, f->size)) 220327c11b6bSEli Britstein skip = true; 2204d79b6df6SOr Gerlitz /* clear to denote we consumed this field */ 2205d79b6df6SOr Gerlitz memset(a_masks_p, 0, f->size); 2206d79b6df6SOr Gerlitz } 220727c11b6bSEli Britstein if (skip) 220827c11b6bSEli Britstein continue; 2209d79b6df6SOr Gerlitz 2210d79b6df6SOr Gerlitz field_bsize = f->size * BITS_PER_BYTE; 2211e3ca4e05SOr Gerlitz 22122b64bebaSOr Gerlitz if (field_bsize == 32) { 22132b64bebaSOr Gerlitz mask_be32 = *(__be32 *)&mask; 22142b64bebaSOr Gerlitz mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); 22152b64bebaSOr Gerlitz } else if (field_bsize == 16) { 22162b64bebaSOr Gerlitz mask_be16 = *(__be16 *)&mask; 22172b64bebaSOr Gerlitz mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); 22182b64bebaSOr Gerlitz } 22192b64bebaSOr Gerlitz 2220d79b6df6SOr Gerlitz first = find_first_bit(&mask, field_bsize); 22212b64bebaSOr Gerlitz next_z = find_next_zero_bit(&mask, field_bsize, first); 2222d79b6df6SOr Gerlitz last = find_last_bit(&mask, field_bsize); 22232b64bebaSOr Gerlitz if (first < next_z && next_z < last) { 2224e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2225e98bedf5SEli Britstein "rewrite of few sub-fields isn't supported"); 22262b64bebaSOr Gerlitz printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", 2227d79b6df6SOr Gerlitz mask); 2228d79b6df6SOr Gerlitz return -EOPNOTSUPP; 2229d79b6df6SOr Gerlitz } 2230d79b6df6SOr Gerlitz 2231d79b6df6SOr Gerlitz MLX5_SET(set_action_in, action, action_type, cmd); 2232d79b6df6SOr Gerlitz MLX5_SET(set_action_in, action, field, f->field); 2233d79b6df6SOr Gerlitz 2234d79b6df6SOr Gerlitz if (cmd == MLX5_ACTION_TYPE_SET) { 22352b64bebaSOr Gerlitz MLX5_SET(set_action_in, action, offset, first); 2236d79b6df6SOr Gerlitz /* length is num of bits to be written, zero means length of 32 */ 22372b64bebaSOr Gerlitz MLX5_SET(set_action_in, action, length, (last - first + 1)); 2238d79b6df6SOr Gerlitz } 2239d79b6df6SOr Gerlitz 2240d79b6df6SOr Gerlitz if (field_bsize == 32) 22412b64bebaSOr Gerlitz MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first); 2242d79b6df6SOr Gerlitz else if (field_bsize == 16) 22432b64bebaSOr Gerlitz MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first); 2244d79b6df6SOr Gerlitz else if (field_bsize == 8) 22452b64bebaSOr Gerlitz MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first); 2246d79b6df6SOr Gerlitz 2247d79b6df6SOr Gerlitz action += action_size; 2248d79b6df6SOr Gerlitz nactions++; 2249d79b6df6SOr Gerlitz } 2250d79b6df6SOr Gerlitz 2251d79b6df6SOr Gerlitz parse_attr->num_mod_hdr_actions = nactions; 2252d79b6df6SOr Gerlitz return 0; 2253d79b6df6SOr Gerlitz } 2254d79b6df6SOr Gerlitz 22552cc1cb1dSTonghao Zhang static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev, 22562cc1cb1dSTonghao Zhang int namespace) 22572cc1cb1dSTonghao Zhang { 22582cc1cb1dSTonghao Zhang if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */ 22592cc1cb1dSTonghao Zhang return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions); 22602cc1cb1dSTonghao Zhang else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */ 22612cc1cb1dSTonghao Zhang return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions); 22622cc1cb1dSTonghao Zhang } 22632cc1cb1dSTonghao Zhang 2264d79b6df6SOr Gerlitz static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, 2265c500c86bSPablo Neira Ayuso struct pedit_headers_action *hdrs, 2266c500c86bSPablo Neira Ayuso int namespace, 2267d79b6df6SOr Gerlitz struct mlx5e_tc_flow_parse_attr *parse_attr) 2268d79b6df6SOr Gerlitz { 2269d79b6df6SOr Gerlitz int nkeys, action_size, max_actions; 2270d79b6df6SOr Gerlitz 2271c500c86bSPablo Neira Ayuso nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits + 2272c500c86bSPablo Neira Ayuso hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits; 2273d79b6df6SOr Gerlitz action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); 2274d79b6df6SOr Gerlitz 22752cc1cb1dSTonghao Zhang max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace); 2276d79b6df6SOr Gerlitz /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */ 2277d79b6df6SOr Gerlitz max_actions = min(max_actions, nkeys * 16); 2278d79b6df6SOr Gerlitz 2279d79b6df6SOr Gerlitz parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL); 2280d79b6df6SOr Gerlitz if (!parse_attr->mod_hdr_actions) 2281d79b6df6SOr Gerlitz return -ENOMEM; 2282d79b6df6SOr Gerlitz 2283218d05ceSTonghao Zhang parse_attr->max_mod_hdr_actions = max_actions; 2284d79b6df6SOr Gerlitz return 0; 2285d79b6df6SOr Gerlitz } 2286d79b6df6SOr Gerlitz 2287d79b6df6SOr Gerlitz static const struct pedit_headers zero_masks = {}; 2288d79b6df6SOr Gerlitz 2289d79b6df6SOr Gerlitz static int parse_tc_pedit_action(struct mlx5e_priv *priv, 229073867881SPablo Neira Ayuso const struct flow_action_entry *act, int namespace, 2291e98bedf5SEli Britstein struct mlx5e_tc_flow_parse_attr *parse_attr, 2292c500c86bSPablo Neira Ayuso struct pedit_headers_action *hdrs, 2293e98bedf5SEli Britstein struct netlink_ext_ack *extack) 2294d79b6df6SOr Gerlitz { 229573867881SPablo Neira Ayuso u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1; 229673867881SPablo Neira Ayuso int err = -EOPNOTSUPP; 2297d79b6df6SOr Gerlitz u32 mask, val, offset; 229873867881SPablo Neira Ayuso u8 htype; 2299d79b6df6SOr Gerlitz 230073867881SPablo Neira Ayuso htype = act->mangle.htype; 2301d79b6df6SOr Gerlitz err = -EOPNOTSUPP; /* can't be all optimistic */ 2302d79b6df6SOr Gerlitz 230373867881SPablo Neira Ayuso if (htype == FLOW_ACT_MANGLE_UNSPEC) { 230473867881SPablo Neira Ayuso NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded"); 2305d79b6df6SOr Gerlitz goto out_err; 2306d79b6df6SOr Gerlitz } 2307d79b6df6SOr Gerlitz 23082cc1cb1dSTonghao Zhang if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) { 23092cc1cb1dSTonghao Zhang NL_SET_ERR_MSG_MOD(extack, 23102cc1cb1dSTonghao Zhang "The pedit offload action is not supported"); 23112cc1cb1dSTonghao Zhang goto out_err; 23122cc1cb1dSTonghao Zhang } 23132cc1cb1dSTonghao Zhang 231473867881SPablo Neira Ayuso mask = act->mangle.mask; 231573867881SPablo Neira Ayuso val = act->mangle.val; 231673867881SPablo Neira Ayuso offset = act->mangle.offset; 2317d79b6df6SOr Gerlitz 2318c500c86bSPablo Neira Ayuso err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]); 2319d79b6df6SOr Gerlitz if (err) 2320d79b6df6SOr Gerlitz goto out_err; 2321c500c86bSPablo Neira Ayuso 2322c500c86bSPablo Neira Ayuso hdrs[cmd].pedits++; 2323d79b6df6SOr Gerlitz 2324c500c86bSPablo Neira Ayuso return 0; 2325c500c86bSPablo Neira Ayuso out_err: 2326c500c86bSPablo Neira Ayuso return err; 2327c500c86bSPablo Neira Ayuso } 2328c500c86bSPablo Neira Ayuso 2329c500c86bSPablo Neira Ayuso static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, 2330c500c86bSPablo Neira Ayuso struct mlx5e_tc_flow_parse_attr *parse_attr, 2331c500c86bSPablo Neira Ayuso struct pedit_headers_action *hdrs, 233227c11b6bSEli Britstein u32 *action_flags, 2333c500c86bSPablo Neira Ayuso struct netlink_ext_ack *extack) 2334c500c86bSPablo Neira Ayuso { 2335c500c86bSPablo Neira Ayuso struct pedit_headers *cmd_masks; 2336c500c86bSPablo Neira Ayuso int err; 2337c500c86bSPablo Neira Ayuso u8 cmd; 2338c500c86bSPablo Neira Ayuso 2339218d05ceSTonghao Zhang if (!parse_attr->mod_hdr_actions) { 2340c500c86bSPablo Neira Ayuso err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr); 2341d79b6df6SOr Gerlitz if (err) 2342d79b6df6SOr Gerlitz goto out_err; 2343218d05ceSTonghao Zhang } 2344d79b6df6SOr Gerlitz 234527c11b6bSEli Britstein err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack); 2346d79b6df6SOr Gerlitz if (err < 0) 2347d79b6df6SOr Gerlitz goto out_dealloc_parsed_actions; 2348d79b6df6SOr Gerlitz 2349d79b6df6SOr Gerlitz for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { 2350c500c86bSPablo Neira Ayuso cmd_masks = &hdrs[cmd].masks; 2351d79b6df6SOr Gerlitz if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { 2352e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2353e98bedf5SEli Britstein "attempt to offload an unsupported field"); 2354b3a433deSOr Gerlitz netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd); 2355d79b6df6SOr Gerlitz print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS, 2356d79b6df6SOr Gerlitz 16, 1, cmd_masks, sizeof(zero_masks), true); 2357d79b6df6SOr Gerlitz err = -EOPNOTSUPP; 2358d79b6df6SOr Gerlitz goto out_dealloc_parsed_actions; 2359d79b6df6SOr Gerlitz } 2360d79b6df6SOr Gerlitz } 2361d79b6df6SOr Gerlitz 2362d79b6df6SOr Gerlitz return 0; 2363d79b6df6SOr Gerlitz 2364d79b6df6SOr Gerlitz out_dealloc_parsed_actions: 2365d79b6df6SOr Gerlitz kfree(parse_attr->mod_hdr_actions); 2366d79b6df6SOr Gerlitz out_err: 2367d79b6df6SOr Gerlitz return err; 2368d79b6df6SOr Gerlitz } 2369d79b6df6SOr Gerlitz 2370e98bedf5SEli Britstein static bool csum_offload_supported(struct mlx5e_priv *priv, 2371e98bedf5SEli Britstein u32 action, 2372e98bedf5SEli Britstein u32 update_flags, 2373e98bedf5SEli Britstein struct netlink_ext_ack *extack) 237426c02749SOr Gerlitz { 237526c02749SOr Gerlitz u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP | 237626c02749SOr Gerlitz TCA_CSUM_UPDATE_FLAG_UDP; 237726c02749SOr Gerlitz 237826c02749SOr Gerlitz /* The HW recalcs checksums only if re-writing headers */ 237926c02749SOr Gerlitz if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) { 2380e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2381e98bedf5SEli Britstein "TC csum action is only offloaded with pedit"); 238226c02749SOr Gerlitz netdev_warn(priv->netdev, 238326c02749SOr Gerlitz "TC csum action is only offloaded with pedit\n"); 238426c02749SOr Gerlitz return false; 238526c02749SOr Gerlitz } 238626c02749SOr Gerlitz 238726c02749SOr Gerlitz if (update_flags & ~prot_flags) { 2388e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2389e98bedf5SEli Britstein "can't offload TC csum action for some header/s"); 239026c02749SOr Gerlitz netdev_warn(priv->netdev, 239126c02749SOr Gerlitz "can't offload TC csum action for some header/s - flags %#x\n", 239226c02749SOr Gerlitz update_flags); 239326c02749SOr Gerlitz return false; 239426c02749SOr Gerlitz } 239526c02749SOr Gerlitz 239626c02749SOr Gerlitz return true; 239726c02749SOr Gerlitz } 239826c02749SOr Gerlitz 23998998576bSDmytro Linkin struct ip_ttl_word { 24008998576bSDmytro Linkin __u8 ttl; 24018998576bSDmytro Linkin __u8 protocol; 24028998576bSDmytro Linkin __sum16 check; 24038998576bSDmytro Linkin }; 24048998576bSDmytro Linkin 24058998576bSDmytro Linkin struct ipv6_hoplimit_word { 24068998576bSDmytro Linkin __be16 payload_len; 24078998576bSDmytro Linkin __u8 nexthdr; 24088998576bSDmytro Linkin __u8 hop_limit; 24098998576bSDmytro Linkin }; 24108998576bSDmytro Linkin 24118998576bSDmytro Linkin static bool is_action_keys_supported(const struct flow_action_entry *act) 24128998576bSDmytro Linkin { 24138998576bSDmytro Linkin u32 mask, offset; 24148998576bSDmytro Linkin u8 htype; 24158998576bSDmytro Linkin 24168998576bSDmytro Linkin htype = act->mangle.htype; 24178998576bSDmytro Linkin offset = act->mangle.offset; 24188998576bSDmytro Linkin mask = ~act->mangle.mask; 24198998576bSDmytro Linkin /* For IPv4 & IPv6 header check 4 byte word, 24208998576bSDmytro Linkin * to determine that modified fields 24218998576bSDmytro Linkin * are NOT ttl & hop_limit only. 24228998576bSDmytro Linkin */ 24238998576bSDmytro Linkin if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) { 24248998576bSDmytro Linkin struct ip_ttl_word *ttl_word = 24258998576bSDmytro Linkin (struct ip_ttl_word *)&mask; 24268998576bSDmytro Linkin 24278998576bSDmytro Linkin if (offset != offsetof(struct iphdr, ttl) || 24288998576bSDmytro Linkin ttl_word->protocol || 24298998576bSDmytro Linkin ttl_word->check) { 24308998576bSDmytro Linkin return true; 24318998576bSDmytro Linkin } 24328998576bSDmytro Linkin } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { 24338998576bSDmytro Linkin struct ipv6_hoplimit_word *hoplimit_word = 24348998576bSDmytro Linkin (struct ipv6_hoplimit_word *)&mask; 24358998576bSDmytro Linkin 24368998576bSDmytro Linkin if (offset != offsetof(struct ipv6hdr, payload_len) || 24378998576bSDmytro Linkin hoplimit_word->payload_len || 24388998576bSDmytro Linkin hoplimit_word->nexthdr) { 24398998576bSDmytro Linkin return true; 24408998576bSDmytro Linkin } 24418998576bSDmytro Linkin } 24428998576bSDmytro Linkin return false; 24438998576bSDmytro Linkin } 24448998576bSDmytro Linkin 2445bdd66ac0SOr Gerlitz static bool modify_header_match_supported(struct mlx5_flow_spec *spec, 244673867881SPablo Neira Ayuso struct flow_action *flow_action, 24471651925dSGuy Shattah u32 actions, 2448e98bedf5SEli Britstein struct netlink_ext_ack *extack) 2449bdd66ac0SOr Gerlitz { 245073867881SPablo Neira Ayuso const struct flow_action_entry *act; 2451bdd66ac0SOr Gerlitz bool modify_ip_header; 2452bdd66ac0SOr Gerlitz void *headers_v; 2453bdd66ac0SOr Gerlitz u16 ethertype; 24548998576bSDmytro Linkin u8 ip_proto; 245573867881SPablo Neira Ayuso int i; 2456bdd66ac0SOr Gerlitz 24578377629eSEli Britstein headers_v = get_match_headers_value(actions, spec); 2458bdd66ac0SOr Gerlitz ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 2459bdd66ac0SOr Gerlitz 2460bdd66ac0SOr Gerlitz /* for non-IP we only re-write MACs, so we're okay */ 2461bdd66ac0SOr Gerlitz if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6) 2462bdd66ac0SOr Gerlitz goto out_ok; 2463bdd66ac0SOr Gerlitz 2464bdd66ac0SOr Gerlitz modify_ip_header = false; 246573867881SPablo Neira Ayuso flow_action_for_each(i, act, flow_action) { 246673867881SPablo Neira Ayuso if (act->id != FLOW_ACTION_MANGLE && 246773867881SPablo Neira Ayuso act->id != FLOW_ACTION_ADD) 2468bdd66ac0SOr Gerlitz continue; 2469bdd66ac0SOr Gerlitz 24708998576bSDmytro Linkin if (is_action_keys_supported(act)) { 2471bdd66ac0SOr Gerlitz modify_ip_header = true; 2472bdd66ac0SOr Gerlitz break; 2473bdd66ac0SOr Gerlitz } 2474bdd66ac0SOr Gerlitz } 2475bdd66ac0SOr Gerlitz 2476bdd66ac0SOr Gerlitz ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); 24771ccef350SJianbo Liu if (modify_ip_header && ip_proto != IPPROTO_TCP && 24781ccef350SJianbo Liu ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { 2479e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2480e98bedf5SEli Britstein "can't offload re-write of non TCP/UDP"); 2481bdd66ac0SOr Gerlitz pr_info("can't offload re-write of ip proto %d\n", ip_proto); 2482bdd66ac0SOr Gerlitz return false; 2483bdd66ac0SOr Gerlitz } 2484bdd66ac0SOr Gerlitz 2485bdd66ac0SOr Gerlitz out_ok: 2486bdd66ac0SOr Gerlitz return true; 2487bdd66ac0SOr Gerlitz } 2488bdd66ac0SOr Gerlitz 2489bdd66ac0SOr Gerlitz static bool actions_match_supported(struct mlx5e_priv *priv, 249073867881SPablo Neira Ayuso struct flow_action *flow_action, 2491bdd66ac0SOr Gerlitz struct mlx5e_tc_flow_parse_attr *parse_attr, 2492e98bedf5SEli Britstein struct mlx5e_tc_flow *flow, 2493e98bedf5SEli Britstein struct netlink_ext_ack *extack) 2494bdd66ac0SOr Gerlitz { 2495bdd66ac0SOr Gerlitz u32 actions; 2496bdd66ac0SOr Gerlitz 2497226f2ca3SVlad Buslov if (mlx5e_is_eswitch_flow(flow)) 2498bdd66ac0SOr Gerlitz actions = flow->esw_attr->action; 2499bdd66ac0SOr Gerlitz else 2500bdd66ac0SOr Gerlitz actions = flow->nic_attr->action; 2501bdd66ac0SOr Gerlitz 2502226f2ca3SVlad Buslov if (flow_flag_test(flow, EGRESS) && 250335a605dbSEli Britstein !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) || 25046830b468STonghao Zhang (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || 25056830b468STonghao Zhang (actions & MLX5_FLOW_CONTEXT_ACTION_DROP))) 25067e29392eSRoi Dayan return false; 25077e29392eSRoi Dayan 2508bdd66ac0SOr Gerlitz if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 250973867881SPablo Neira Ayuso return modify_header_match_supported(&parse_attr->spec, 2510a655fe9fSDavid S. Miller flow_action, actions, 2511e98bedf5SEli Britstein extack); 2512bdd66ac0SOr Gerlitz 2513bdd66ac0SOr Gerlitz return true; 2514bdd66ac0SOr Gerlitz } 2515bdd66ac0SOr Gerlitz 25165c65c564SOr Gerlitz static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 25175c65c564SOr Gerlitz { 25185c65c564SOr Gerlitz struct mlx5_core_dev *fmdev, *pmdev; 2519816f6706SOr Gerlitz u64 fsystem_guid, psystem_guid; 25205c65c564SOr Gerlitz 25215c65c564SOr Gerlitz fmdev = priv->mdev; 25225c65c564SOr Gerlitz pmdev = peer_priv->mdev; 25235c65c564SOr Gerlitz 252459c9d35eSAlaa Hleihel fsystem_guid = mlx5_query_nic_system_image_guid(fmdev); 252559c9d35eSAlaa Hleihel psystem_guid = mlx5_query_nic_system_image_guid(pmdev); 25265c65c564SOr Gerlitz 2527816f6706SOr Gerlitz return (fsystem_guid == psystem_guid); 25285c65c564SOr Gerlitz } 25295c65c564SOr Gerlitz 2530bdc837eeSEli Britstein static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace, 2531bdc837eeSEli Britstein const struct flow_action_entry *act, 2532bdc837eeSEli Britstein struct mlx5e_tc_flow_parse_attr *parse_attr, 2533bdc837eeSEli Britstein struct pedit_headers_action *hdrs, 2534bdc837eeSEli Britstein u32 *action, struct netlink_ext_ack *extack) 2535bdc837eeSEli Britstein { 2536bdc837eeSEli Britstein u16 mask16 = VLAN_VID_MASK; 2537bdc837eeSEli Britstein u16 val16 = act->vlan.vid & VLAN_VID_MASK; 2538bdc837eeSEli Britstein const struct flow_action_entry pedit_act = { 2539bdc837eeSEli Britstein .id = FLOW_ACTION_MANGLE, 2540bdc837eeSEli Britstein .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH, 2541bdc837eeSEli Britstein .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI), 2542bdc837eeSEli Britstein .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16), 2543bdc837eeSEli Britstein .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16), 2544bdc837eeSEli Britstein }; 25456fca9d1eSEli Britstein u8 match_prio_mask, match_prio_val; 2546bf2f3bcaSEli Britstein void *headers_c, *headers_v; 2547bdc837eeSEli Britstein int err; 2548bdc837eeSEli Britstein 2549bf2f3bcaSEli Britstein headers_c = get_match_headers_criteria(*action, &parse_attr->spec); 2550bf2f3bcaSEli Britstein headers_v = get_match_headers_value(*action, &parse_attr->spec); 2551bf2f3bcaSEli Britstein 2552bf2f3bcaSEli Britstein if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) && 2553bf2f3bcaSEli Britstein MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) { 2554bf2f3bcaSEli Britstein NL_SET_ERR_MSG_MOD(extack, 2555bf2f3bcaSEli Britstein "VLAN rewrite action must have VLAN protocol match"); 2556bf2f3bcaSEli Britstein return -EOPNOTSUPP; 2557bf2f3bcaSEli Britstein } 2558bf2f3bcaSEli Britstein 25596fca9d1eSEli Britstein match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); 25606fca9d1eSEli Britstein match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); 25616fca9d1eSEli Britstein if (act->vlan.prio != (match_prio_val & match_prio_mask)) { 25626fca9d1eSEli Britstein NL_SET_ERR_MSG_MOD(extack, 25636fca9d1eSEli Britstein "Changing VLAN prio is not supported"); 2564bdc837eeSEli Britstein return -EOPNOTSUPP; 2565bdc837eeSEli Britstein } 2566bdc837eeSEli Britstein 2567bdc837eeSEli Britstein err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, 2568bdc837eeSEli Britstein hdrs, NULL); 2569bdc837eeSEli Britstein *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 2570bdc837eeSEli Britstein 2571bdc837eeSEli Britstein return err; 2572bdc837eeSEli Britstein } 2573bdc837eeSEli Britstein 25740bac1194SEli Britstein static int 25750bac1194SEli Britstein add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv, 25760bac1194SEli Britstein struct mlx5e_tc_flow_parse_attr *parse_attr, 25770bac1194SEli Britstein struct pedit_headers_action *hdrs, 25780bac1194SEli Britstein u32 *action, struct netlink_ext_ack *extack) 25790bac1194SEli Britstein { 25800bac1194SEli Britstein const struct flow_action_entry prio_tag_act = { 25810bac1194SEli Britstein .vlan.vid = 0, 25820bac1194SEli Britstein .vlan.prio = 25830bac1194SEli Britstein MLX5_GET(fte_match_set_lyr_2_4, 25840bac1194SEli Britstein get_match_headers_value(*action, 25850bac1194SEli Britstein &parse_attr->spec), 25860bac1194SEli Britstein first_prio) & 25870bac1194SEli Britstein MLX5_GET(fte_match_set_lyr_2_4, 25880bac1194SEli Britstein get_match_headers_criteria(*action, 25890bac1194SEli Britstein &parse_attr->spec), 25900bac1194SEli Britstein first_prio), 25910bac1194SEli Britstein }; 25920bac1194SEli Britstein 25930bac1194SEli Britstein return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, 25940bac1194SEli Britstein &prio_tag_act, parse_attr, hdrs, action, 25950bac1194SEli Britstein extack); 25960bac1194SEli Britstein } 25970bac1194SEli Britstein 259873867881SPablo Neira Ayuso static int parse_tc_nic_actions(struct mlx5e_priv *priv, 259973867881SPablo Neira Ayuso struct flow_action *flow_action, 2600aa0cbbaeSOr Gerlitz struct mlx5e_tc_flow_parse_attr *parse_attr, 2601e98bedf5SEli Britstein struct mlx5e_tc_flow *flow, 2602e98bedf5SEli Britstein struct netlink_ext_ack *extack) 2603e3a2b7edSAmir Vadai { 2604aa0cbbaeSOr Gerlitz struct mlx5_nic_flow_attr *attr = flow->nic_attr; 260573867881SPablo Neira Ayuso struct pedit_headers_action hdrs[2] = {}; 260673867881SPablo Neira Ayuso const struct flow_action_entry *act; 26071cab1cd7SOr Gerlitz u32 action = 0; 2608244cd96aSCong Wang int err, i; 2609e3a2b7edSAmir Vadai 261073867881SPablo Neira Ayuso if (!flow_action_has_entries(flow_action)) 2611e3a2b7edSAmir Vadai return -EINVAL; 2612e3a2b7edSAmir Vadai 26133bc4b7bfSOr Gerlitz attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 2614e3a2b7edSAmir Vadai 261573867881SPablo Neira Ayuso flow_action_for_each(i, act, flow_action) { 261673867881SPablo Neira Ayuso switch (act->id) { 261773867881SPablo Neira Ayuso case FLOW_ACTION_DROP: 26181cab1cd7SOr Gerlitz action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 2619aad7e08dSAmir Vadai if (MLX5_CAP_FLOWTABLE(priv->mdev, 2620aad7e08dSAmir Vadai flow_table_properties_nic_receive.flow_counter)) 26211cab1cd7SOr Gerlitz action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 262273867881SPablo Neira Ayuso break; 262373867881SPablo Neira Ayuso case FLOW_ACTION_MANGLE: 262473867881SPablo Neira Ayuso case FLOW_ACTION_ADD: 262573867881SPablo Neira Ayuso err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL, 2626c500c86bSPablo Neira Ayuso parse_attr, hdrs, extack); 26272f4fe4caSOr Gerlitz if (err) 26282f4fe4caSOr Gerlitz return err; 26292f4fe4caSOr Gerlitz 26301cab1cd7SOr Gerlitz action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | 26312f4fe4caSOr Gerlitz MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 263273867881SPablo Neira Ayuso break; 2633bdc837eeSEli Britstein case FLOW_ACTION_VLAN_MANGLE: 2634bdc837eeSEli Britstein err = add_vlan_rewrite_action(priv, 2635bdc837eeSEli Britstein MLX5_FLOW_NAMESPACE_KERNEL, 2636bdc837eeSEli Britstein act, parse_attr, hdrs, 2637bdc837eeSEli Britstein &action, extack); 2638bdc837eeSEli Britstein if (err) 2639bdc837eeSEli Britstein return err; 2640bdc837eeSEli Britstein 2641bdc837eeSEli Britstein break; 264273867881SPablo Neira Ayuso case FLOW_ACTION_CSUM: 26431cab1cd7SOr Gerlitz if (csum_offload_supported(priv, action, 264473867881SPablo Neira Ayuso act->csum_flags, 2645e98bedf5SEli Britstein extack)) 264673867881SPablo Neira Ayuso break; 264726c02749SOr Gerlitz 264826c02749SOr Gerlitz return -EOPNOTSUPP; 264973867881SPablo Neira Ayuso case FLOW_ACTION_REDIRECT: { 265073867881SPablo Neira Ayuso struct net_device *peer_dev = act->dev; 26515c65c564SOr Gerlitz 26525c65c564SOr Gerlitz if (priv->netdev->netdev_ops == peer_dev->netdev_ops && 26535c65c564SOr Gerlitz same_hw_devs(priv, netdev_priv(peer_dev))) { 265498b66cb1SEli Britstein parse_attr->mirred_ifindex[0] = peer_dev->ifindex; 2655226f2ca3SVlad Buslov flow_flag_set(flow, HAIRPIN); 26561cab1cd7SOr Gerlitz action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 26575c65c564SOr Gerlitz MLX5_FLOW_CONTEXT_ACTION_COUNT; 26585c65c564SOr Gerlitz } else { 2659e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2660e98bedf5SEli Britstein "device is not on same HW, can't offload"); 26615c65c564SOr Gerlitz netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", 26625c65c564SOr Gerlitz peer_dev->name); 26635c65c564SOr Gerlitz return -EINVAL; 26645c65c564SOr Gerlitz } 26655c65c564SOr Gerlitz } 266673867881SPablo Neira Ayuso break; 266773867881SPablo Neira Ayuso case FLOW_ACTION_MARK: { 266873867881SPablo Neira Ayuso u32 mark = act->mark; 2669e3a2b7edSAmir Vadai 2670e3a2b7edSAmir Vadai if (mark & ~MLX5E_TC_FLOW_ID_MASK) { 2671e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2672e98bedf5SEli Britstein "Bad flow mark - only 16 bit is supported"); 2673e3a2b7edSAmir Vadai return -EINVAL; 2674e3a2b7edSAmir Vadai } 2675e3a2b7edSAmir Vadai 26763bc4b7bfSOr Gerlitz attr->flow_tag = mark; 26771cab1cd7SOr Gerlitz action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2678e3a2b7edSAmir Vadai } 267973867881SPablo Neira Ayuso break; 268073867881SPablo Neira Ayuso default: 26812cc1cb1dSTonghao Zhang NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported"); 26822cc1cb1dSTonghao Zhang return -EOPNOTSUPP; 2683e3a2b7edSAmir Vadai } 268473867881SPablo Neira Ayuso } 2685e3a2b7edSAmir Vadai 2686c500c86bSPablo Neira Ayuso if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || 2687c500c86bSPablo Neira Ayuso hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { 2688c500c86bSPablo Neira Ayuso err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, 268927c11b6bSEli Britstein parse_attr, hdrs, &action, extack); 2690c500c86bSPablo Neira Ayuso if (err) 2691c500c86bSPablo Neira Ayuso return err; 269227c11b6bSEli Britstein /* in case all pedit actions are skipped, remove the MOD_HDR 269327c11b6bSEli Britstein * flag. 269427c11b6bSEli Britstein */ 2695e7739a60SEli Britstein if (parse_attr->num_mod_hdr_actions == 0) { 269627c11b6bSEli Britstein action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 2697e7739a60SEli Britstein kfree(parse_attr->mod_hdr_actions); 2698e7739a60SEli Britstein } 2699c500c86bSPablo Neira Ayuso } 2700c500c86bSPablo Neira Ayuso 27011cab1cd7SOr Gerlitz attr->action = action; 270273867881SPablo Neira Ayuso if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) 2703bdd66ac0SOr Gerlitz return -EOPNOTSUPP; 2704bdd66ac0SOr Gerlitz 2705e3a2b7edSAmir Vadai return 0; 2706e3a2b7edSAmir Vadai } 2707e3a2b7edSAmir Vadai 27087f1a546eSEli Britstein struct encap_key { 27091f6da306SYevgeny Kliteynik const struct ip_tunnel_key *ip_tun_key; 2710d386939aSYevgeny Kliteynik struct mlx5e_tc_tunnel *tc_tunnel; 27117f1a546eSEli Britstein }; 27127f1a546eSEli Britstein 27137f1a546eSEli Britstein static inline int cmp_encap_info(struct encap_key *a, 27147f1a546eSEli Britstein struct encap_key *b) 2715a54e20b4SHadar Hen Zion { 27167f1a546eSEli Britstein return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || 2717d386939aSYevgeny Kliteynik a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type; 2718a54e20b4SHadar Hen Zion } 2719a54e20b4SHadar Hen Zion 27207f1a546eSEli Britstein static inline int hash_encap_info(struct encap_key *key) 2721a54e20b4SHadar Hen Zion { 27227f1a546eSEli Britstein return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), 2723d386939aSYevgeny Kliteynik key->tc_tunnel->tunnel_type); 2724a54e20b4SHadar Hen Zion } 2725a54e20b4SHadar Hen Zion 2726a54e20b4SHadar Hen Zion 2727b1d90e6bSRabie Loulou static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, 2728b1d90e6bSRabie Loulou struct net_device *peer_netdev) 2729b1d90e6bSRabie Loulou { 2730b1d90e6bSRabie Loulou struct mlx5e_priv *peer_priv; 2731b1d90e6bSRabie Loulou 2732b1d90e6bSRabie Loulou peer_priv = netdev_priv(peer_netdev); 2733b1d90e6bSRabie Loulou 2734b1d90e6bSRabie Loulou return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && 273568931c7dSRoi Dayan mlx5e_eswitch_rep(priv->netdev) && 273668931c7dSRoi Dayan mlx5e_eswitch_rep(peer_netdev) && 273768931c7dSRoi Dayan same_hw_devs(priv, peer_priv)); 2738b1d90e6bSRabie Loulou } 2739b1d90e6bSRabie Loulou 2740ce99f6b9SOr Gerlitz 274154c177caSOz Shlomo 2742a54e20b4SHadar Hen Zion static int mlx5e_attach_encap(struct mlx5e_priv *priv, 2743e98bedf5SEli Britstein struct mlx5e_tc_flow *flow, 2744733d4f36SRoi Dayan struct net_device *mirred_dev, 2745733d4f36SRoi Dayan int out_index, 27468c4dc42bSEli Britstein struct netlink_ext_ack *extack, 27470ad060eeSRoi Dayan struct net_device **encap_dev, 27480ad060eeSRoi Dayan bool *encap_valid) 274903a9d11eSOr Gerlitz { 2750a54e20b4SHadar Hen Zion struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 275145247bf2SOr Gerlitz struct mlx5_esw_flow_attr *attr = flow->esw_attr; 2752733d4f36SRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr; 27531f6da306SYevgeny Kliteynik const struct ip_tunnel_info *tun_info; 27547f1a546eSEli Britstein struct encap_key key, e_key; 2755c1ae1152SOr Gerlitz struct mlx5e_encap_entry *e; 2756733d4f36SRoi Dayan unsigned short family; 2757a54e20b4SHadar Hen Zion uintptr_t hash_key; 2758a54e20b4SHadar Hen Zion bool found = false; 275954c177caSOz Shlomo int err = 0; 2760a54e20b4SHadar Hen Zion 2761733d4f36SRoi Dayan parse_attr = attr->parse_attr; 27621f6da306SYevgeny Kliteynik tun_info = parse_attr->tun_info[out_index]; 2763733d4f36SRoi Dayan family = ip_tunnel_info_af(tun_info); 27647f1a546eSEli Britstein key.ip_tun_key = &tun_info->key; 2765d386939aSYevgeny Kliteynik key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev); 2766d71f895cSEli Cohen if (!key.tc_tunnel) { 2767d71f895cSEli Cohen NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel"); 2768d71f895cSEli Cohen return -EOPNOTSUPP; 2769d71f895cSEli Cohen } 2770733d4f36SRoi Dayan 27717f1a546eSEli Britstein hash_key = hash_encap_info(&key); 2772a54e20b4SHadar Hen Zion 2773a54e20b4SHadar Hen Zion hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, 2774a54e20b4SHadar Hen Zion encap_hlist, hash_key) { 27751f6da306SYevgeny Kliteynik e_key.ip_tun_key = &e->tun_info->key; 2776d386939aSYevgeny Kliteynik e_key.tc_tunnel = e->tunnel; 27777f1a546eSEli Britstein if (!cmp_encap_info(&e_key, &key)) { 2778a54e20b4SHadar Hen Zion found = true; 2779a54e20b4SHadar Hen Zion break; 2780a54e20b4SHadar Hen Zion } 2781a54e20b4SHadar Hen Zion } 2782a54e20b4SHadar Hen Zion 2783b2812089SVlad Buslov /* must verify if encap is valid or not */ 278445247bf2SOr Gerlitz if (found) 278545247bf2SOr Gerlitz goto attach_flow; 2786a54e20b4SHadar Hen Zion 2787a54e20b4SHadar Hen Zion e = kzalloc(sizeof(*e), GFP_KERNEL); 2788a54e20b4SHadar Hen Zion if (!e) 2789a54e20b4SHadar Hen Zion return -ENOMEM; 2790a54e20b4SHadar Hen Zion 27911f6da306SYevgeny Kliteynik e->tun_info = tun_info; 2792101f4de9SOz Shlomo err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); 279354c177caSOz Shlomo if (err) 279454c177caSOz Shlomo goto out_err; 279554c177caSOz Shlomo 2796a54e20b4SHadar Hen Zion INIT_LIST_HEAD(&e->flows); 2797a54e20b4SHadar Hen Zion 2798ce99f6b9SOr Gerlitz if (family == AF_INET) 2799101f4de9SOz Shlomo err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e); 2800ce99f6b9SOr Gerlitz else if (family == AF_INET6) 2801101f4de9SOz Shlomo err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e); 2802ce99f6b9SOr Gerlitz 28030ad060eeSRoi Dayan if (err) 2804a54e20b4SHadar Hen Zion goto out_err; 2805a54e20b4SHadar Hen Zion 2806a54e20b4SHadar Hen Zion hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); 2807a54e20b4SHadar Hen Zion 280845247bf2SOr Gerlitz attach_flow: 28098c4dc42bSEli Britstein list_add(&flow->encaps[out_index].list, &e->flows); 28108c4dc42bSEli Britstein flow->encaps[out_index].index = out_index; 281145247bf2SOr Gerlitz *encap_dev = e->out_dev; 28128c4dc42bSEli Britstein if (e->flags & MLX5_ENCAP_ENTRY_VALID) { 28138c4dc42bSEli Britstein attr->dests[out_index].encap_id = e->encap_id; 28148c4dc42bSEli Britstein attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; 28150ad060eeSRoi Dayan *encap_valid = true; 28168c4dc42bSEli Britstein } else { 28170ad060eeSRoi Dayan *encap_valid = false; 28188c4dc42bSEli Britstein } 281945247bf2SOr Gerlitz 2820232c0013SHadar Hen Zion return err; 2821a54e20b4SHadar Hen Zion 2822a54e20b4SHadar Hen Zion out_err: 2823a54e20b4SHadar Hen Zion kfree(e); 2824a54e20b4SHadar Hen Zion return err; 2825a54e20b4SHadar Hen Zion } 2826a54e20b4SHadar Hen Zion 28271482bd3dSJianbo Liu static int parse_tc_vlan_action(struct mlx5e_priv *priv, 282873867881SPablo Neira Ayuso const struct flow_action_entry *act, 28291482bd3dSJianbo Liu struct mlx5_esw_flow_attr *attr, 28301482bd3dSJianbo Liu u32 *action) 28311482bd3dSJianbo Liu { 2832cc495188SJianbo Liu u8 vlan_idx = attr->total_vlan; 2833cc495188SJianbo Liu 2834cc495188SJianbo Liu if (vlan_idx >= MLX5_FS_VLAN_DEPTH) 28351482bd3dSJianbo Liu return -EOPNOTSUPP; 2836cc495188SJianbo Liu 283773867881SPablo Neira Ayuso switch (act->id) { 283873867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_POP: 2839cc495188SJianbo Liu if (vlan_idx) { 2840cc495188SJianbo Liu if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 2841cc495188SJianbo Liu MLX5_FS_VLAN_DEPTH)) 2842cc495188SJianbo Liu return -EOPNOTSUPP; 2843cc495188SJianbo Liu 2844cc495188SJianbo Liu *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2; 2845cc495188SJianbo Liu } else { 2846cc495188SJianbo Liu *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 2847cc495188SJianbo Liu } 284873867881SPablo Neira Ayuso break; 284973867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_PUSH: 285073867881SPablo Neira Ayuso attr->vlan_vid[vlan_idx] = act->vlan.vid; 285173867881SPablo Neira Ayuso attr->vlan_prio[vlan_idx] = act->vlan.prio; 285273867881SPablo Neira Ayuso attr->vlan_proto[vlan_idx] = act->vlan.proto; 2853cc495188SJianbo Liu if (!attr->vlan_proto[vlan_idx]) 2854cc495188SJianbo Liu attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q); 2855cc495188SJianbo Liu 2856cc495188SJianbo Liu if (vlan_idx) { 2857cc495188SJianbo Liu if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 2858cc495188SJianbo Liu MLX5_FS_VLAN_DEPTH)) 2859cc495188SJianbo Liu return -EOPNOTSUPP; 2860cc495188SJianbo Liu 2861cc495188SJianbo Liu *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; 2862cc495188SJianbo Liu } else { 2863cc495188SJianbo Liu if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) && 286473867881SPablo Neira Ayuso (act->vlan.proto != htons(ETH_P_8021Q) || 286573867881SPablo Neira Ayuso act->vlan.prio)) 2866cc495188SJianbo Liu return -EOPNOTSUPP; 2867cc495188SJianbo Liu 2868cc495188SJianbo Liu *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; 28691482bd3dSJianbo Liu } 287073867881SPablo Neira Ayuso break; 287173867881SPablo Neira Ayuso default: 2872bdc837eeSEli Britstein return -EINVAL; 28731482bd3dSJianbo Liu } 28741482bd3dSJianbo Liu 2875cc495188SJianbo Liu attr->total_vlan = vlan_idx + 1; 2876cc495188SJianbo Liu 28771482bd3dSJianbo Liu return 0; 28781482bd3dSJianbo Liu } 28791482bd3dSJianbo Liu 2880278748a9SEli Britstein static int add_vlan_push_action(struct mlx5e_priv *priv, 2881278748a9SEli Britstein struct mlx5_esw_flow_attr *attr, 2882278748a9SEli Britstein struct net_device **out_dev, 2883278748a9SEli Britstein u32 *action) 2884278748a9SEli Britstein { 2885278748a9SEli Britstein struct net_device *vlan_dev = *out_dev; 2886278748a9SEli Britstein struct flow_action_entry vlan_act = { 2887278748a9SEli Britstein .id = FLOW_ACTION_VLAN_PUSH, 2888278748a9SEli Britstein .vlan.vid = vlan_dev_vlan_id(vlan_dev), 2889278748a9SEli Britstein .vlan.proto = vlan_dev_vlan_proto(vlan_dev), 2890278748a9SEli Britstein .vlan.prio = 0, 2891278748a9SEli Britstein }; 2892278748a9SEli Britstein int err; 2893278748a9SEli Britstein 2894278748a9SEli Britstein err = parse_tc_vlan_action(priv, &vlan_act, attr, action); 2895278748a9SEli Britstein if (err) 2896278748a9SEli Britstein return err; 2897278748a9SEli Britstein 2898278748a9SEli Britstein *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), 2899278748a9SEli Britstein dev_get_iflink(vlan_dev)); 2900278748a9SEli Britstein if (is_vlan_dev(*out_dev)) 2901278748a9SEli Britstein err = add_vlan_push_action(priv, attr, out_dev, action); 2902278748a9SEli Britstein 2903278748a9SEli Britstein return err; 2904278748a9SEli Britstein } 2905278748a9SEli Britstein 290635a605dbSEli Britstein static int add_vlan_pop_action(struct mlx5e_priv *priv, 290735a605dbSEli Britstein struct mlx5_esw_flow_attr *attr, 290835a605dbSEli Britstein u32 *action) 290935a605dbSEli Britstein { 291035a605dbSEli Britstein int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev); 291135a605dbSEli Britstein struct flow_action_entry vlan_act = { 291235a605dbSEli Britstein .id = FLOW_ACTION_VLAN_POP, 291335a605dbSEli Britstein }; 291435a605dbSEli Britstein int err = 0; 291535a605dbSEli Britstein 291635a605dbSEli Britstein while (nest_level--) { 291735a605dbSEli Britstein err = parse_tc_vlan_action(priv, &vlan_act, attr, action); 291835a605dbSEli Britstein if (err) 291935a605dbSEli Britstein return err; 292035a605dbSEli Britstein } 292135a605dbSEli Britstein 292235a605dbSEli Britstein return err; 292335a605dbSEli Britstein } 292435a605dbSEli Britstein 2925f6dc1264SPaul Blakey bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, 2926f6dc1264SPaul Blakey struct net_device *out_dev) 2927f6dc1264SPaul Blakey { 2928f6dc1264SPaul Blakey if (is_merged_eswitch_dev(priv, out_dev)) 2929f6dc1264SPaul Blakey return true; 2930f6dc1264SPaul Blakey 2931f6dc1264SPaul Blakey return mlx5e_eswitch_rep(out_dev) && 2932f6dc1264SPaul Blakey same_hw_devs(priv, netdev_priv(out_dev)); 2933f6dc1264SPaul Blakey } 2934f6dc1264SPaul Blakey 293573867881SPablo Neira Ayuso static int parse_tc_fdb_actions(struct mlx5e_priv *priv, 293673867881SPablo Neira Ayuso struct flow_action *flow_action, 2937e98bedf5SEli Britstein struct mlx5e_tc_flow *flow, 2938e98bedf5SEli Britstein struct netlink_ext_ack *extack) 2939a54e20b4SHadar Hen Zion { 294073867881SPablo Neira Ayuso struct pedit_headers_action hdrs[2] = {}; 2941bf07aa73SPaul Blakey struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 2942ecf5bb79SOr Gerlitz struct mlx5_esw_flow_attr *attr = flow->esw_attr; 29436f9af8ffSTonghao Zhang struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; 29441d447a39SSaeed Mahameed struct mlx5e_rep_priv *rpriv = priv->ppriv; 294573867881SPablo Neira Ayuso const struct ip_tunnel_info *info = NULL; 294673867881SPablo Neira Ayuso const struct flow_action_entry *act; 2947a54e20b4SHadar Hen Zion bool encap = false; 29481cab1cd7SOr Gerlitz u32 action = 0; 2949244cd96aSCong Wang int err, i; 295003a9d11eSOr Gerlitz 295173867881SPablo Neira Ayuso if (!flow_action_has_entries(flow_action)) 295203a9d11eSOr Gerlitz return -EINVAL; 295303a9d11eSOr Gerlitz 295473867881SPablo Neira Ayuso flow_action_for_each(i, act, flow_action) { 295573867881SPablo Neira Ayuso switch (act->id) { 295673867881SPablo Neira Ayuso case FLOW_ACTION_DROP: 29571cab1cd7SOr Gerlitz action |= MLX5_FLOW_CONTEXT_ACTION_DROP | 295803a9d11eSOr Gerlitz MLX5_FLOW_CONTEXT_ACTION_COUNT; 295973867881SPablo Neira Ayuso break; 296073867881SPablo Neira Ayuso case FLOW_ACTION_MANGLE: 296173867881SPablo Neira Ayuso case FLOW_ACTION_ADD: 296273867881SPablo Neira Ayuso err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB, 2963c500c86bSPablo Neira Ayuso parse_attr, hdrs, extack); 2964d7e75a32SOr Gerlitz if (err) 2965d7e75a32SOr Gerlitz return err; 2966d7e75a32SOr Gerlitz 29671cab1cd7SOr Gerlitz action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 2968e85e02baSEli Britstein attr->split_count = attr->out_count; 296973867881SPablo Neira Ayuso break; 297073867881SPablo Neira Ayuso case FLOW_ACTION_CSUM: 29711cab1cd7SOr Gerlitz if (csum_offload_supported(priv, action, 297273867881SPablo Neira Ayuso act->csum_flags, extack)) 297373867881SPablo Neira Ayuso break; 297426c02749SOr Gerlitz 297526c02749SOr Gerlitz return -EOPNOTSUPP; 297673867881SPablo Neira Ayuso case FLOW_ACTION_REDIRECT: 297773867881SPablo Neira Ayuso case FLOW_ACTION_MIRRED: { 297803a9d11eSOr Gerlitz struct mlx5e_priv *out_priv; 2979592d3651SChris Mi struct net_device *out_dev; 298003a9d11eSOr Gerlitz 298173867881SPablo Neira Ayuso out_dev = act->dev; 2982ef381359SOz Shlomo if (!out_dev) { 2983ef381359SOz Shlomo /* out_dev is NULL when filters with 2984ef381359SOz Shlomo * non-existing mirred device are replayed to 2985ef381359SOz Shlomo * the driver. 2986ef381359SOz Shlomo */ 2987ef381359SOz Shlomo return -EINVAL; 2988ef381359SOz Shlomo } 298903a9d11eSOr Gerlitz 2990592d3651SChris Mi if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { 2991e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 2992e98bedf5SEli Britstein "can't support more output ports, can't offload forwarding"); 2993592d3651SChris Mi pr_err("can't support more than %d output ports, can't offload forwarding\n", 2994592d3651SChris Mi attr->out_count); 2995592d3651SChris Mi return -EOPNOTSUPP; 2996592d3651SChris Mi } 2997592d3651SChris Mi 2998f493f155SEli Britstein action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 2999f493f155SEli Britstein MLX5_FLOW_CONTEXT_ACTION_COUNT; 3000f6dc1264SPaul Blakey if (netdev_port_same_parent_id(priv->netdev, out_dev)) { 30017ba58ba7SRabie Loulou struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 30027ba58ba7SRabie Loulou struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); 3003fa833bd5SVlad Buslov struct net_device *uplink_upper; 30047ba58ba7SRabie Loulou 3005fa833bd5SVlad Buslov rcu_read_lock(); 3006fa833bd5SVlad Buslov uplink_upper = 3007fa833bd5SVlad Buslov netdev_master_upper_dev_get_rcu(uplink_dev); 30087ba58ba7SRabie Loulou if (uplink_upper && 30097ba58ba7SRabie Loulou netif_is_lag_master(uplink_upper) && 30107ba58ba7SRabie Loulou uplink_upper == out_dev) 30117ba58ba7SRabie Loulou out_dev = uplink_dev; 3012fa833bd5SVlad Buslov rcu_read_unlock(); 30137ba58ba7SRabie Loulou 3014278748a9SEli Britstein if (is_vlan_dev(out_dev)) { 3015278748a9SEli Britstein err = add_vlan_push_action(priv, attr, 3016278748a9SEli Britstein &out_dev, 3017278748a9SEli Britstein &action); 3018278748a9SEli Britstein if (err) 3019278748a9SEli Britstein return err; 3020278748a9SEli Britstein } 3021f6dc1264SPaul Blakey 302235a605dbSEli Britstein if (is_vlan_dev(parse_attr->filter_dev)) { 302335a605dbSEli Britstein err = add_vlan_pop_action(priv, attr, 302435a605dbSEli Britstein &action); 302535a605dbSEli Britstein if (err) 302635a605dbSEli Britstein return err; 302735a605dbSEli Britstein } 3028278748a9SEli Britstein 3029f6dc1264SPaul Blakey if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { 3030f6dc1264SPaul Blakey NL_SET_ERR_MSG_MOD(extack, 3031f6dc1264SPaul Blakey "devices are not on same switch HW, can't offload forwarding"); 3032f6dc1264SPaul Blakey pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", 3033f6dc1264SPaul Blakey priv->netdev->name, out_dev->name); 3034a0646c88SEli Britstein return -EOPNOTSUPP; 3035f6dc1264SPaul Blakey } 3036a0646c88SEli Britstein 303703a9d11eSOr Gerlitz out_priv = netdev_priv(out_dev); 30381d447a39SSaeed Mahameed rpriv = out_priv->ppriv; 3039df65a573SEli Britstein attr->dests[attr->out_count].rep = rpriv->rep; 3040df65a573SEli Britstein attr->dests[attr->out_count].mdev = out_priv->mdev; 3041df65a573SEli Britstein attr->out_count++; 3042a54e20b4SHadar Hen Zion } else if (encap) { 30438c4dc42bSEli Britstein parse_attr->mirred_ifindex[attr->out_count] = 30448c4dc42bSEli Britstein out_dev->ifindex; 30451f6da306SYevgeny Kliteynik parse_attr->tun_info[attr->out_count] = info; 30468c4dc42bSEli Britstein encap = false; 3047f493f155SEli Britstein attr->dests[attr->out_count].flags |= 3048f493f155SEli Britstein MLX5_ESW_DEST_ENCAP; 30491cc26d74SEli Britstein attr->out_count++; 3050df65a573SEli Britstein /* attr->dests[].rep is resolved when we 3051df65a573SEli Britstein * handle encap 3052df65a573SEli Britstein */ 3053ef381359SOz Shlomo } else if (parse_attr->filter_dev != priv->netdev) { 3054ef381359SOz Shlomo /* All mlx5 devices are called to configure 3055ef381359SOz Shlomo * high level device filters. Therefore, the 3056ef381359SOz Shlomo * *attempt* to install a filter on invalid 3057ef381359SOz Shlomo * eswitch should not trigger an explicit error 3058ef381359SOz Shlomo */ 3059ef381359SOz Shlomo return -EINVAL; 3060a54e20b4SHadar Hen Zion } else { 3061e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 3062e98bedf5SEli Britstein "devices are not on same switch HW, can't offload forwarding"); 3063a54e20b4SHadar Hen Zion pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", 3064a54e20b4SHadar Hen Zion priv->netdev->name, out_dev->name); 3065a54e20b4SHadar Hen Zion return -EINVAL; 3066a54e20b4SHadar Hen Zion } 3067a54e20b4SHadar Hen Zion } 306873867881SPablo Neira Ayuso break; 306973867881SPablo Neira Ayuso case FLOW_ACTION_TUNNEL_ENCAP: 307073867881SPablo Neira Ayuso info = act->tunnel; 3071a54e20b4SHadar Hen Zion if (info) 3072a54e20b4SHadar Hen Zion encap = true; 3073a54e20b4SHadar Hen Zion else 3074a54e20b4SHadar Hen Zion return -EOPNOTSUPP; 307503a9d11eSOr Gerlitz 307673867881SPablo Neira Ayuso break; 307773867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_PUSH: 307873867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_POP: 307976b496b1SEli Britstein if (act->id == FLOW_ACTION_VLAN_PUSH && 308076b496b1SEli Britstein (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) { 308176b496b1SEli Britstein /* Replace vlan pop+push with vlan modify */ 308276b496b1SEli Britstein action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 308376b496b1SEli Britstein err = add_vlan_rewrite_action(priv, 308476b496b1SEli Britstein MLX5_FLOW_NAMESPACE_FDB, 308576b496b1SEli Britstein act, parse_attr, hdrs, 308676b496b1SEli Britstein &action, extack); 308776b496b1SEli Britstein } else { 308873867881SPablo Neira Ayuso err = parse_tc_vlan_action(priv, act, attr, &action); 308976b496b1SEli Britstein } 30901482bd3dSJianbo Liu if (err) 30911482bd3dSJianbo Liu return err; 30921482bd3dSJianbo Liu 3093e85e02baSEli Britstein attr->split_count = attr->out_count; 309473867881SPablo Neira Ayuso break; 3095bdc837eeSEli Britstein case FLOW_ACTION_VLAN_MANGLE: 3096bdc837eeSEli Britstein err = add_vlan_rewrite_action(priv, 3097bdc837eeSEli Britstein MLX5_FLOW_NAMESPACE_FDB, 3098bdc837eeSEli Britstein act, parse_attr, hdrs, 3099bdc837eeSEli Britstein &action, extack); 3100bdc837eeSEli Britstein if (err) 3101bdc837eeSEli Britstein return err; 3102bdc837eeSEli Britstein 3103bdc837eeSEli Britstein attr->split_count = attr->out_count; 3104bdc837eeSEli Britstein break; 310573867881SPablo Neira Ayuso case FLOW_ACTION_TUNNEL_DECAP: 31061cab1cd7SOr Gerlitz action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 310773867881SPablo Neira Ayuso break; 310873867881SPablo Neira Ayuso case FLOW_ACTION_GOTO: { 310973867881SPablo Neira Ayuso u32 dest_chain = act->chain_index; 3110bf07aa73SPaul Blakey u32 max_chain = mlx5_eswitch_get_chain_range(esw); 3111bf07aa73SPaul Blakey 3112bf07aa73SPaul Blakey if (dest_chain <= attr->chain) { 3113bf07aa73SPaul Blakey NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported"); 3114bf07aa73SPaul Blakey return -EOPNOTSUPP; 3115bf07aa73SPaul Blakey } 3116bf07aa73SPaul Blakey if (dest_chain > max_chain) { 3117bf07aa73SPaul Blakey NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range"); 3118bf07aa73SPaul Blakey return -EOPNOTSUPP; 3119bf07aa73SPaul Blakey } 3120e88afe75SOr Gerlitz action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3121bf07aa73SPaul Blakey attr->dest_chain = dest_chain; 312273867881SPablo Neira Ayuso break; 3123bf07aa73SPaul Blakey } 312473867881SPablo Neira Ayuso default: 31252cc1cb1dSTonghao Zhang NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported"); 31262cc1cb1dSTonghao Zhang return -EOPNOTSUPP; 312703a9d11eSOr Gerlitz } 312873867881SPablo Neira Ayuso } 3129bdd66ac0SOr Gerlitz 31300bac1194SEli Britstein if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && 31310bac1194SEli Britstein action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { 31320bac1194SEli Britstein /* For prio tag mode, replace vlan pop with rewrite vlan prio 31330bac1194SEli Britstein * tag rewrite. 31340bac1194SEli Britstein */ 31350bac1194SEli Britstein action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 31360bac1194SEli Britstein err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs, 31370bac1194SEli Britstein &action, extack); 31380bac1194SEli Britstein if (err) 31390bac1194SEli Britstein return err; 31400bac1194SEli Britstein } 31410bac1194SEli Britstein 3142c500c86bSPablo Neira Ayuso if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || 3143c500c86bSPablo Neira Ayuso hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { 314484be899fSTonghao Zhang err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB, 314527c11b6bSEli Britstein parse_attr, hdrs, &action, extack); 3146c500c86bSPablo Neira Ayuso if (err) 3147c500c86bSPablo Neira Ayuso return err; 314827c11b6bSEli Britstein /* in case all pedit actions are skipped, remove the MOD_HDR 314927c11b6bSEli Britstein * flag. we might have set split_count either by pedit or 315027c11b6bSEli Britstein * pop/push. if there is no pop/push either, reset it too. 315127c11b6bSEli Britstein */ 315227c11b6bSEli Britstein if (parse_attr->num_mod_hdr_actions == 0) { 315327c11b6bSEli Britstein action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 3154e7739a60SEli Britstein kfree(parse_attr->mod_hdr_actions); 315527c11b6bSEli Britstein if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || 315627c11b6bSEli Britstein (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) 315727c11b6bSEli Britstein attr->split_count = 0; 315827c11b6bSEli Britstein } 3159c500c86bSPablo Neira Ayuso } 3160c500c86bSPablo Neira Ayuso 31611cab1cd7SOr Gerlitz attr->action = action; 316273867881SPablo Neira Ayuso if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) 3163bdd66ac0SOr Gerlitz return -EOPNOTSUPP; 3164bdd66ac0SOr Gerlitz 3165e88afe75SOr Gerlitz if (attr->dest_chain) { 3166e88afe75SOr Gerlitz if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 3167e88afe75SOr Gerlitz NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported"); 3168e88afe75SOr Gerlitz return -EOPNOTSUPP; 3169e88afe75SOr Gerlitz } 3170e88afe75SOr Gerlitz attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 3171e88afe75SOr Gerlitz } 3172e88afe75SOr Gerlitz 3173e85e02baSEli Britstein if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { 3174e98bedf5SEli Britstein NL_SET_ERR_MSG_MOD(extack, 3175e98bedf5SEli Britstein "current firmware doesn't support split rule for port mirroring"); 3176592d3651SChris Mi netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); 3177592d3651SChris Mi return -EOPNOTSUPP; 3178592d3651SChris Mi } 3179592d3651SChris Mi 318031c8eba5SOr Gerlitz return 0; 318103a9d11eSOr Gerlitz } 318203a9d11eSOr Gerlitz 3183226f2ca3SVlad Buslov static void get_flags(int flags, unsigned long *flow_flags) 318460bd4af8SOr Gerlitz { 3185226f2ca3SVlad Buslov unsigned long __flow_flags = 0; 318660bd4af8SOr Gerlitz 3187226f2ca3SVlad Buslov if (flags & MLX5_TC_FLAG(INGRESS)) 3188226f2ca3SVlad Buslov __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS); 3189226f2ca3SVlad Buslov if (flags & MLX5_TC_FLAG(EGRESS)) 3190226f2ca3SVlad Buslov __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS); 319160bd4af8SOr Gerlitz 3192226f2ca3SVlad Buslov if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) 3193226f2ca3SVlad Buslov __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); 3194226f2ca3SVlad Buslov if (flags & MLX5_TC_FLAG(NIC_OFFLOAD)) 3195226f2ca3SVlad Buslov __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); 3196d9ee0491SOr Gerlitz 319760bd4af8SOr Gerlitz *flow_flags = __flow_flags; 319860bd4af8SOr Gerlitz } 319960bd4af8SOr Gerlitz 320005866c82SOr Gerlitz static const struct rhashtable_params tc_ht_params = { 320105866c82SOr Gerlitz .head_offset = offsetof(struct mlx5e_tc_flow, node), 320205866c82SOr Gerlitz .key_offset = offsetof(struct mlx5e_tc_flow, cookie), 320305866c82SOr Gerlitz .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), 320405866c82SOr Gerlitz .automatic_shrinking = true, 320505866c82SOr Gerlitz }; 320605866c82SOr Gerlitz 3207226f2ca3SVlad Buslov static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, 3208226f2ca3SVlad Buslov unsigned long flags) 320905866c82SOr Gerlitz { 3210655dc3d2SOr Gerlitz struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 3211655dc3d2SOr Gerlitz struct mlx5e_rep_priv *uplink_rpriv; 3212655dc3d2SOr Gerlitz 3213226f2ca3SVlad Buslov if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) { 3214655dc3d2SOr Gerlitz uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 3215ec1366c2SOz Shlomo return &uplink_rpriv->uplink_priv.tc_ht; 3216d9ee0491SOr Gerlitz } else /* NIC offload */ 321705866c82SOr Gerlitz return &priv->fs.tc.ht; 321805866c82SOr Gerlitz } 321905866c82SOr Gerlitz 322004de7ddaSRoi Dayan static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) 322104de7ddaSRoi Dayan { 32221418ddd9SAviv Heller struct mlx5_esw_flow_attr *attr = flow->esw_attr; 3223b05af6aaSBodong Wang bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK && 3224226f2ca3SVlad Buslov flow_flag_test(flow, INGRESS); 32251418ddd9SAviv Heller bool act_is_encap = !!(attr->action & 32261418ddd9SAviv Heller MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); 32271418ddd9SAviv Heller bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom, 32281418ddd9SAviv Heller MLX5_DEVCOM_ESW_OFFLOADS); 32291418ddd9SAviv Heller 323010fbb1cdSRoi Dayan if (!esw_paired) 323110fbb1cdSRoi Dayan return false; 323210fbb1cdSRoi Dayan 323310fbb1cdSRoi Dayan if ((mlx5_lag_is_sriov(attr->in_mdev) || 323410fbb1cdSRoi Dayan mlx5_lag_is_multipath(attr->in_mdev)) && 323510fbb1cdSRoi Dayan (is_rep_ingress || act_is_encap)) 323610fbb1cdSRoi Dayan return true; 323710fbb1cdSRoi Dayan 323810fbb1cdSRoi Dayan return false; 323904de7ddaSRoi Dayan } 324004de7ddaSRoi Dayan 3241a88780a9SRoi Dayan static int 3242a88780a9SRoi Dayan mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, 3243226f2ca3SVlad Buslov struct flow_cls_offload *f, unsigned long flow_flags, 3244a88780a9SRoi Dayan struct mlx5e_tc_flow_parse_attr **__parse_attr, 3245a88780a9SRoi Dayan struct mlx5e_tc_flow **__flow) 3246e3a2b7edSAmir Vadai { 324717091853SOr Gerlitz struct mlx5e_tc_flow_parse_attr *parse_attr; 32483bc4b7bfSOr Gerlitz struct mlx5e_tc_flow *flow; 32495a7e5bcbSVlad Buslov int out_index, err; 3250776b12b6SOr Gerlitz 325165ba8fb7SOr Gerlitz flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL); 32521b9a07eeSLeon Romanovsky parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); 325317091853SOr Gerlitz if (!parse_attr || !flow) { 3254e3a2b7edSAmir Vadai err = -ENOMEM; 3255e3a2b7edSAmir Vadai goto err_free; 3256e3a2b7edSAmir Vadai } 3257e3a2b7edSAmir Vadai 3258e3a2b7edSAmir Vadai flow->cookie = f->cookie; 325965ba8fb7SOr Gerlitz flow->flags = flow_flags; 3260655dc3d2SOr Gerlitz flow->priv = priv; 32615a7e5bcbSVlad Buslov for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) 32625a7e5bcbSVlad Buslov INIT_LIST_HEAD(&flow->encaps[out_index].list); 32635a7e5bcbSVlad Buslov INIT_LIST_HEAD(&flow->mod_hdr); 32645a7e5bcbSVlad Buslov INIT_LIST_HEAD(&flow->hairpin); 32655a7e5bcbSVlad Buslov refcount_set(&flow->refcnt, 1); 3266e3a2b7edSAmir Vadai 3267a88780a9SRoi Dayan *__flow = flow; 3268a88780a9SRoi Dayan *__parse_attr = parse_attr; 3269a88780a9SRoi Dayan 3270a88780a9SRoi Dayan return 0; 3271a88780a9SRoi Dayan 3272a88780a9SRoi Dayan err_free: 3273a88780a9SRoi Dayan kfree(flow); 3274a88780a9SRoi Dayan kvfree(parse_attr); 3275a88780a9SRoi Dayan return err; 3276adb4c123SOr Gerlitz } 3277adb4c123SOr Gerlitz 3278988ab9c7STonghao Zhang static void 3279988ab9c7STonghao Zhang mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr, 3280988ab9c7STonghao Zhang struct mlx5e_priv *priv, 3281988ab9c7STonghao Zhang struct mlx5e_tc_flow_parse_attr *parse_attr, 3282f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 3283988ab9c7STonghao Zhang struct mlx5_eswitch_rep *in_rep, 3284988ab9c7STonghao Zhang struct mlx5_core_dev *in_mdev) 3285988ab9c7STonghao Zhang { 3286988ab9c7STonghao Zhang struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 3287988ab9c7STonghao Zhang 3288988ab9c7STonghao Zhang esw_attr->parse_attr = parse_attr; 3289988ab9c7STonghao Zhang esw_attr->chain = f->common.chain_index; 3290988ab9c7STonghao Zhang esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; 3291988ab9c7STonghao Zhang 3292988ab9c7STonghao Zhang esw_attr->in_rep = in_rep; 3293988ab9c7STonghao Zhang esw_attr->in_mdev = in_mdev; 3294988ab9c7STonghao Zhang 3295988ab9c7STonghao Zhang if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) == 3296988ab9c7STonghao Zhang MLX5_COUNTER_SOURCE_ESWITCH) 3297988ab9c7STonghao Zhang esw_attr->counter_dev = in_mdev; 3298988ab9c7STonghao Zhang else 3299988ab9c7STonghao Zhang esw_attr->counter_dev = priv->mdev; 3300988ab9c7STonghao Zhang } 3301988ab9c7STonghao Zhang 330271129676SJason Gunthorpe static struct mlx5e_tc_flow * 330304de7ddaSRoi Dayan __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, 3304f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 3305226f2ca3SVlad Buslov unsigned long flow_flags, 3306d11afc26SOz Shlomo struct net_device *filter_dev, 330704de7ddaSRoi Dayan struct mlx5_eswitch_rep *in_rep, 330871129676SJason Gunthorpe struct mlx5_core_dev *in_mdev) 3309a88780a9SRoi Dayan { 3310f9e30088SPablo Neira Ayuso struct flow_rule *rule = flow_cls_offload_flow_rule(f); 3311a88780a9SRoi Dayan struct netlink_ext_ack *extack = f->common.extack; 3312a88780a9SRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr; 3313a88780a9SRoi Dayan struct mlx5e_tc_flow *flow; 3314a88780a9SRoi Dayan int attr_size, err; 3315a88780a9SRoi Dayan 3316226f2ca3SVlad Buslov flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); 3317a88780a9SRoi Dayan attr_size = sizeof(struct mlx5_esw_flow_attr); 3318a88780a9SRoi Dayan err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, 3319a88780a9SRoi Dayan &parse_attr, &flow); 3320a88780a9SRoi Dayan if (err) 3321a88780a9SRoi Dayan goto out; 3322988ab9c7STonghao Zhang 3323d11afc26SOz Shlomo parse_attr->filter_dev = filter_dev; 3324988ab9c7STonghao Zhang mlx5e_flow_esw_attr_init(flow->esw_attr, 3325988ab9c7STonghao Zhang priv, parse_attr, 3326988ab9c7STonghao Zhang f, in_rep, in_mdev); 3327988ab9c7STonghao Zhang 332854c177caSOz Shlomo err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, 332954c177caSOz Shlomo f, filter_dev); 3330d11afc26SOz Shlomo if (err) 3331d11afc26SOz Shlomo goto err_free; 3332a88780a9SRoi Dayan 33336f9af8ffSTonghao Zhang err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); 3334a88780a9SRoi Dayan if (err) 3335a88780a9SRoi Dayan goto err_free; 3336a88780a9SRoi Dayan 33377040632dSTonghao Zhang err = mlx5e_tc_add_fdb_flow(priv, flow, extack); 3338ef06c9eeSRoi Dayan if (err) { 3339ef06c9eeSRoi Dayan if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev))) 3340aa0cbbaeSOr Gerlitz goto err_free; 33415c40348cSOr Gerlitz 3342b4a23329SRoi Dayan add_unready_flow(flow); 3343ef06c9eeSRoi Dayan } 3344ef06c9eeSRoi Dayan 334571129676SJason Gunthorpe return flow; 3346e3a2b7edSAmir Vadai 3347e3a2b7edSAmir Vadai err_free: 33485a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 3349a88780a9SRoi Dayan out: 335071129676SJason Gunthorpe return ERR_PTR(err); 3351a88780a9SRoi Dayan } 3352a88780a9SRoi Dayan 3353f9e30088SPablo Neira Ayuso static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, 335495dc1902SRoi Dayan struct mlx5e_tc_flow *flow, 3355226f2ca3SVlad Buslov unsigned long flow_flags) 335604de7ddaSRoi Dayan { 335704de7ddaSRoi Dayan struct mlx5e_priv *priv = flow->priv, *peer_priv; 335804de7ddaSRoi Dayan struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw; 335904de7ddaSRoi Dayan struct mlx5_devcom *devcom = priv->mdev->priv.devcom; 336004de7ddaSRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr; 336104de7ddaSRoi Dayan struct mlx5e_rep_priv *peer_urpriv; 336204de7ddaSRoi Dayan struct mlx5e_tc_flow *peer_flow; 336304de7ddaSRoi Dayan struct mlx5_core_dev *in_mdev; 336404de7ddaSRoi Dayan int err = 0; 336504de7ddaSRoi Dayan 336604de7ddaSRoi Dayan peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 336704de7ddaSRoi Dayan if (!peer_esw) 336804de7ddaSRoi Dayan return -ENODEV; 336904de7ddaSRoi Dayan 337004de7ddaSRoi Dayan peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH); 337104de7ddaSRoi Dayan peer_priv = netdev_priv(peer_urpriv->netdev); 337204de7ddaSRoi Dayan 337304de7ddaSRoi Dayan /* in_mdev is assigned of which the packet originated from. 337404de7ddaSRoi Dayan * So packets redirected to uplink use the same mdev of the 337504de7ddaSRoi Dayan * original flow and packets redirected from uplink use the 337604de7ddaSRoi Dayan * peer mdev. 337704de7ddaSRoi Dayan */ 3378b05af6aaSBodong Wang if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK) 337904de7ddaSRoi Dayan in_mdev = peer_priv->mdev; 338004de7ddaSRoi Dayan else 338104de7ddaSRoi Dayan in_mdev = priv->mdev; 338204de7ddaSRoi Dayan 338304de7ddaSRoi Dayan parse_attr = flow->esw_attr->parse_attr; 338495dc1902SRoi Dayan peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags, 338504de7ddaSRoi Dayan parse_attr->filter_dev, 338671129676SJason Gunthorpe flow->esw_attr->in_rep, in_mdev); 338771129676SJason Gunthorpe if (IS_ERR(peer_flow)) { 338871129676SJason Gunthorpe err = PTR_ERR(peer_flow); 338904de7ddaSRoi Dayan goto out; 339071129676SJason Gunthorpe } 339104de7ddaSRoi Dayan 339204de7ddaSRoi Dayan flow->peer_flow = peer_flow; 3393226f2ca3SVlad Buslov flow_flag_set(flow, DUP); 339404de7ddaSRoi Dayan mutex_lock(&esw->offloads.peer_mutex); 339504de7ddaSRoi Dayan list_add_tail(&flow->peer, &esw->offloads.peer_flows); 339604de7ddaSRoi Dayan mutex_unlock(&esw->offloads.peer_mutex); 339704de7ddaSRoi Dayan 339804de7ddaSRoi Dayan out: 339904de7ddaSRoi Dayan mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 340004de7ddaSRoi Dayan return err; 340104de7ddaSRoi Dayan } 340204de7ddaSRoi Dayan 340304de7ddaSRoi Dayan static int 340404de7ddaSRoi Dayan mlx5e_add_fdb_flow(struct mlx5e_priv *priv, 3405f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 3406226f2ca3SVlad Buslov unsigned long flow_flags, 340704de7ddaSRoi Dayan struct net_device *filter_dev, 340804de7ddaSRoi Dayan struct mlx5e_tc_flow **__flow) 340904de7ddaSRoi Dayan { 341004de7ddaSRoi Dayan struct mlx5e_rep_priv *rpriv = priv->ppriv; 341104de7ddaSRoi Dayan struct mlx5_eswitch_rep *in_rep = rpriv->rep; 341204de7ddaSRoi Dayan struct mlx5_core_dev *in_mdev = priv->mdev; 341304de7ddaSRoi Dayan struct mlx5e_tc_flow *flow; 341404de7ddaSRoi Dayan int err; 341504de7ddaSRoi Dayan 341671129676SJason Gunthorpe flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, 341771129676SJason Gunthorpe in_mdev); 341871129676SJason Gunthorpe if (IS_ERR(flow)) 341971129676SJason Gunthorpe return PTR_ERR(flow); 342004de7ddaSRoi Dayan 342104de7ddaSRoi Dayan if (is_peer_flow_needed(flow)) { 342295dc1902SRoi Dayan err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags); 342304de7ddaSRoi Dayan if (err) { 342404de7ddaSRoi Dayan mlx5e_tc_del_fdb_flow(priv, flow); 342504de7ddaSRoi Dayan goto out; 342604de7ddaSRoi Dayan } 342704de7ddaSRoi Dayan } 342804de7ddaSRoi Dayan 342904de7ddaSRoi Dayan *__flow = flow; 343004de7ddaSRoi Dayan 343104de7ddaSRoi Dayan return 0; 343204de7ddaSRoi Dayan 343304de7ddaSRoi Dayan out: 343404de7ddaSRoi Dayan return err; 343504de7ddaSRoi Dayan } 343604de7ddaSRoi Dayan 3437a88780a9SRoi Dayan static int 3438a88780a9SRoi Dayan mlx5e_add_nic_flow(struct mlx5e_priv *priv, 3439f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 3440226f2ca3SVlad Buslov unsigned long flow_flags, 3441d11afc26SOz Shlomo struct net_device *filter_dev, 3442a88780a9SRoi Dayan struct mlx5e_tc_flow **__flow) 3443a88780a9SRoi Dayan { 3444f9e30088SPablo Neira Ayuso struct flow_rule *rule = flow_cls_offload_flow_rule(f); 3445a88780a9SRoi Dayan struct netlink_ext_ack *extack = f->common.extack; 3446a88780a9SRoi Dayan struct mlx5e_tc_flow_parse_attr *parse_attr; 3447a88780a9SRoi Dayan struct mlx5e_tc_flow *flow; 3448a88780a9SRoi Dayan int attr_size, err; 3449a88780a9SRoi Dayan 3450bf07aa73SPaul Blakey /* multi-chain not supported for NIC rules */ 3451bf07aa73SPaul Blakey if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common)) 3452bf07aa73SPaul Blakey return -EOPNOTSUPP; 3453bf07aa73SPaul Blakey 3454226f2ca3SVlad Buslov flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); 3455a88780a9SRoi Dayan attr_size = sizeof(struct mlx5_nic_flow_attr); 3456a88780a9SRoi Dayan err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, 3457a88780a9SRoi Dayan &parse_attr, &flow); 3458a88780a9SRoi Dayan if (err) 3459a88780a9SRoi Dayan goto out; 3460a88780a9SRoi Dayan 3461d11afc26SOz Shlomo parse_attr->filter_dev = filter_dev; 346254c177caSOz Shlomo err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, 346354c177caSOz Shlomo f, filter_dev); 3464d11afc26SOz Shlomo if (err) 3465d11afc26SOz Shlomo goto err_free; 3466d11afc26SOz Shlomo 346773867881SPablo Neira Ayuso err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack); 3468a88780a9SRoi Dayan if (err) 3469a88780a9SRoi Dayan goto err_free; 3470a88780a9SRoi Dayan 3471a88780a9SRoi Dayan err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack); 3472a88780a9SRoi Dayan if (err) 3473a88780a9SRoi Dayan goto err_free; 3474a88780a9SRoi Dayan 3475226f2ca3SVlad Buslov flow_flag_set(flow, OFFLOADED); 3476a88780a9SRoi Dayan kvfree(parse_attr); 3477a88780a9SRoi Dayan *__flow = flow; 3478a88780a9SRoi Dayan 3479a88780a9SRoi Dayan return 0; 3480a88780a9SRoi Dayan 3481a88780a9SRoi Dayan err_free: 34825a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 3483a88780a9SRoi Dayan kvfree(parse_attr); 3484a88780a9SRoi Dayan out: 3485a88780a9SRoi Dayan return err; 3486a88780a9SRoi Dayan } 3487a88780a9SRoi Dayan 3488a88780a9SRoi Dayan static int 3489a88780a9SRoi Dayan mlx5e_tc_add_flow(struct mlx5e_priv *priv, 3490f9e30088SPablo Neira Ayuso struct flow_cls_offload *f, 3491226f2ca3SVlad Buslov unsigned long flags, 3492d11afc26SOz Shlomo struct net_device *filter_dev, 3493a88780a9SRoi Dayan struct mlx5e_tc_flow **flow) 3494a88780a9SRoi Dayan { 3495a88780a9SRoi Dayan struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 3496226f2ca3SVlad Buslov unsigned long flow_flags; 3497a88780a9SRoi Dayan int err; 3498a88780a9SRoi Dayan 3499a88780a9SRoi Dayan get_flags(flags, &flow_flags); 3500a88780a9SRoi Dayan 3501bf07aa73SPaul Blakey if (!tc_can_offload_extack(priv->netdev, f->common.extack)) 3502bf07aa73SPaul Blakey return -EOPNOTSUPP; 3503bf07aa73SPaul Blakey 3504f6455de0SBodong Wang if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) 3505d11afc26SOz Shlomo err = mlx5e_add_fdb_flow(priv, f, flow_flags, 3506d11afc26SOz Shlomo filter_dev, flow); 3507a88780a9SRoi Dayan else 3508d11afc26SOz Shlomo err = mlx5e_add_nic_flow(priv, f, flow_flags, 3509d11afc26SOz Shlomo filter_dev, flow); 3510a88780a9SRoi Dayan 3511a88780a9SRoi Dayan return err; 3512a88780a9SRoi Dayan } 3513a88780a9SRoi Dayan 351471d82d2aSOz Shlomo int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, 3515226f2ca3SVlad Buslov struct flow_cls_offload *f, unsigned long flags) 3516a88780a9SRoi Dayan { 3517a88780a9SRoi Dayan struct netlink_ext_ack *extack = f->common.extack; 3518d9ee0491SOr Gerlitz struct rhashtable *tc_ht = get_tc_ht(priv, flags); 3519a88780a9SRoi Dayan struct mlx5e_tc_flow *flow; 3520a88780a9SRoi Dayan int err = 0; 3521a88780a9SRoi Dayan 3522c5d326b2SVlad Buslov rcu_read_lock(); 3523c5d326b2SVlad Buslov flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); 3524c5d326b2SVlad Buslov rcu_read_unlock(); 3525a88780a9SRoi Dayan if (flow) { 3526a88780a9SRoi Dayan NL_SET_ERR_MSG_MOD(extack, 3527a88780a9SRoi Dayan "flow cookie already exists, ignoring"); 3528a88780a9SRoi Dayan netdev_warn_once(priv->netdev, 3529a88780a9SRoi Dayan "flow cookie %lx already exists, ignoring\n", 3530a88780a9SRoi Dayan f->cookie); 35310e1c1a2fSVlad Buslov err = -EEXIST; 3532a88780a9SRoi Dayan goto out; 3533a88780a9SRoi Dayan } 3534a88780a9SRoi Dayan 3535d11afc26SOz Shlomo err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); 3536a88780a9SRoi Dayan if (err) 3537a88780a9SRoi Dayan goto out; 3538a88780a9SRoi Dayan 3539c5d326b2SVlad Buslov err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params); 3540a88780a9SRoi Dayan if (err) 3541a88780a9SRoi Dayan goto err_free; 3542a88780a9SRoi Dayan 3543a88780a9SRoi Dayan return 0; 3544a88780a9SRoi Dayan 3545a88780a9SRoi Dayan err_free: 35465a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 3547a88780a9SRoi Dayan out: 3548e3a2b7edSAmir Vadai return err; 3549e3a2b7edSAmir Vadai } 3550e3a2b7edSAmir Vadai 35518f8ae895SOr Gerlitz static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags) 35528f8ae895SOr Gerlitz { 3553226f2ca3SVlad Buslov bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS)); 3554226f2ca3SVlad Buslov bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS)); 35558f8ae895SOr Gerlitz 3556226f2ca3SVlad Buslov return flow_flag_test(flow, INGRESS) == dir_ingress && 3557226f2ca3SVlad Buslov flow_flag_test(flow, EGRESS) == dir_egress; 35588f8ae895SOr Gerlitz } 35598f8ae895SOr Gerlitz 356071d82d2aSOz Shlomo int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, 3561226f2ca3SVlad Buslov struct flow_cls_offload *f, unsigned long flags) 3562e3a2b7edSAmir Vadai { 3563d9ee0491SOr Gerlitz struct rhashtable *tc_ht = get_tc_ht(priv, flags); 3564e3a2b7edSAmir Vadai struct mlx5e_tc_flow *flow; 3565c5d326b2SVlad Buslov int err; 3566e3a2b7edSAmir Vadai 3567c5d326b2SVlad Buslov rcu_read_lock(); 356805866c82SOr Gerlitz flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); 3569c5d326b2SVlad Buslov if (!flow || !same_flow_direction(flow, flags)) { 3570c5d326b2SVlad Buslov err = -EINVAL; 3571c5d326b2SVlad Buslov goto errout; 3572c5d326b2SVlad Buslov } 3573e3a2b7edSAmir Vadai 3574c5d326b2SVlad Buslov /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag 3575c5d326b2SVlad Buslov * set. 3576c5d326b2SVlad Buslov */ 3577c5d326b2SVlad Buslov if (flow_flag_test_and_set(flow, DELETED)) { 3578c5d326b2SVlad Buslov err = -EINVAL; 3579c5d326b2SVlad Buslov goto errout; 3580c5d326b2SVlad Buslov } 358105866c82SOr Gerlitz rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); 3582c5d326b2SVlad Buslov rcu_read_unlock(); 3583e3a2b7edSAmir Vadai 35845a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 3585e3a2b7edSAmir Vadai 3586e3a2b7edSAmir Vadai return 0; 3587c5d326b2SVlad Buslov 3588c5d326b2SVlad Buslov errout: 3589c5d326b2SVlad Buslov rcu_read_unlock(); 3590c5d326b2SVlad Buslov return err; 3591e3a2b7edSAmir Vadai } 3592e3a2b7edSAmir Vadai 359371d82d2aSOz Shlomo int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, 3594226f2ca3SVlad Buslov struct flow_cls_offload *f, unsigned long flags) 3595aad7e08dSAmir Vadai { 359604de7ddaSRoi Dayan struct mlx5_devcom *devcom = priv->mdev->priv.devcom; 3597d9ee0491SOr Gerlitz struct rhashtable *tc_ht = get_tc_ht(priv, flags); 359804de7ddaSRoi Dayan struct mlx5_eswitch *peer_esw; 3599aad7e08dSAmir Vadai struct mlx5e_tc_flow *flow; 3600aad7e08dSAmir Vadai struct mlx5_fc *counter; 3601316d5f72SRoi Dayan u64 lastuse = 0; 3602316d5f72SRoi Dayan u64 packets = 0; 3603316d5f72SRoi Dayan u64 bytes = 0; 36045a7e5bcbSVlad Buslov int err = 0; 3605aad7e08dSAmir Vadai 3606c5d326b2SVlad Buslov rcu_read_lock(); 3607c5d326b2SVlad Buslov flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie, 36085a7e5bcbSVlad Buslov tc_ht_params)); 3609c5d326b2SVlad Buslov rcu_read_unlock(); 36105a7e5bcbSVlad Buslov if (IS_ERR(flow)) 36115a7e5bcbSVlad Buslov return PTR_ERR(flow); 36125a7e5bcbSVlad Buslov 36135a7e5bcbSVlad Buslov if (!same_flow_direction(flow, flags)) { 36145a7e5bcbSVlad Buslov err = -EINVAL; 36155a7e5bcbSVlad Buslov goto errout; 36165a7e5bcbSVlad Buslov } 3617aad7e08dSAmir Vadai 3618226f2ca3SVlad Buslov if (mlx5e_is_offloaded_flow(flow)) { 3619b8aee822SMark Bloch counter = mlx5e_tc_get_counter(flow); 3620aad7e08dSAmir Vadai if (!counter) 36215a7e5bcbSVlad Buslov goto errout; 3622aad7e08dSAmir Vadai 3623aad7e08dSAmir Vadai mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 3624316d5f72SRoi Dayan } 3625aad7e08dSAmir Vadai 3626316d5f72SRoi Dayan /* Under multipath it's possible for one rule to be currently 3627316d5f72SRoi Dayan * un-offloaded while the other rule is offloaded. 3628316d5f72SRoi Dayan */ 362904de7ddaSRoi Dayan peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 363004de7ddaSRoi Dayan if (!peer_esw) 363104de7ddaSRoi Dayan goto out; 363204de7ddaSRoi Dayan 3633226f2ca3SVlad Buslov if (flow_flag_test(flow, DUP) && 3634226f2ca3SVlad Buslov flow_flag_test(flow->peer_flow, OFFLOADED)) { 363504de7ddaSRoi Dayan u64 bytes2; 363604de7ddaSRoi Dayan u64 packets2; 363704de7ddaSRoi Dayan u64 lastuse2; 363804de7ddaSRoi Dayan 363904de7ddaSRoi Dayan counter = mlx5e_tc_get_counter(flow->peer_flow); 3640316d5f72SRoi Dayan if (!counter) 3641316d5f72SRoi Dayan goto no_peer_counter; 364204de7ddaSRoi Dayan mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2); 364304de7ddaSRoi Dayan 364404de7ddaSRoi Dayan bytes += bytes2; 364504de7ddaSRoi Dayan packets += packets2; 364604de7ddaSRoi Dayan lastuse = max_t(u64, lastuse, lastuse2); 364704de7ddaSRoi Dayan } 364804de7ddaSRoi Dayan 3649316d5f72SRoi Dayan no_peer_counter: 365004de7ddaSRoi Dayan mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 365104de7ddaSRoi Dayan out: 36523b1903efSPablo Neira Ayuso flow_stats_update(&f->stats, bytes, packets, lastuse); 36535a7e5bcbSVlad Buslov errout: 36545a7e5bcbSVlad Buslov mlx5e_flow_put(priv, flow); 36555a7e5bcbSVlad Buslov return err; 3656aad7e08dSAmir Vadai } 3657aad7e08dSAmir Vadai 3658fcb64c0fSEli Cohen static int apply_police_params(struct mlx5e_priv *priv, u32 rate, 3659fcb64c0fSEli Cohen struct netlink_ext_ack *extack) 3660fcb64c0fSEli Cohen { 3661fcb64c0fSEli Cohen struct mlx5e_rep_priv *rpriv = priv->ppriv; 3662fcb64c0fSEli Cohen struct mlx5_eswitch *esw; 3663fcb64c0fSEli Cohen u16 vport_num; 3664fcb64c0fSEli Cohen u32 rate_mbps; 3665fcb64c0fSEli Cohen int err; 3666fcb64c0fSEli Cohen 3667fcb64c0fSEli Cohen esw = priv->mdev->priv.eswitch; 3668fcb64c0fSEli Cohen /* rate is given in bytes/sec. 3669fcb64c0fSEli Cohen * First convert to bits/sec and then round to the nearest mbit/secs. 3670fcb64c0fSEli Cohen * mbit means million bits. 3671fcb64c0fSEli Cohen * Moreover, if rate is non zero we choose to configure to a minimum of 3672fcb64c0fSEli Cohen * 1 mbit/sec. 3673fcb64c0fSEli Cohen */ 3674fcb64c0fSEli Cohen rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; 3675fcb64c0fSEli Cohen vport_num = rpriv->rep->vport; 3676fcb64c0fSEli Cohen 3677fcb64c0fSEli Cohen err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); 3678fcb64c0fSEli Cohen if (err) 3679fcb64c0fSEli Cohen NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); 3680fcb64c0fSEli Cohen 3681fcb64c0fSEli Cohen return err; 3682fcb64c0fSEli Cohen } 3683fcb64c0fSEli Cohen 3684fcb64c0fSEli Cohen static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, 3685fcb64c0fSEli Cohen struct flow_action *flow_action, 3686fcb64c0fSEli Cohen struct netlink_ext_ack *extack) 3687fcb64c0fSEli Cohen { 3688fcb64c0fSEli Cohen struct mlx5e_rep_priv *rpriv = priv->ppriv; 3689fcb64c0fSEli Cohen const struct flow_action_entry *act; 3690fcb64c0fSEli Cohen int err; 3691fcb64c0fSEli Cohen int i; 3692fcb64c0fSEli Cohen 3693fcb64c0fSEli Cohen if (!flow_action_has_entries(flow_action)) { 3694fcb64c0fSEli Cohen NL_SET_ERR_MSG_MOD(extack, "matchall called with no action"); 3695fcb64c0fSEli Cohen return -EINVAL; 3696fcb64c0fSEli Cohen } 3697fcb64c0fSEli Cohen 3698fcb64c0fSEli Cohen if (!flow_offload_has_one_action(flow_action)) { 3699fcb64c0fSEli Cohen NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action"); 3700fcb64c0fSEli Cohen return -EOPNOTSUPP; 3701fcb64c0fSEli Cohen } 3702fcb64c0fSEli Cohen 3703fcb64c0fSEli Cohen flow_action_for_each(i, act, flow_action) { 3704fcb64c0fSEli Cohen switch (act->id) { 3705fcb64c0fSEli Cohen case FLOW_ACTION_POLICE: 3706fcb64c0fSEli Cohen err = apply_police_params(priv, act->police.rate_bytes_ps, extack); 3707fcb64c0fSEli Cohen if (err) 3708fcb64c0fSEli Cohen return err; 3709fcb64c0fSEli Cohen 3710fcb64c0fSEli Cohen rpriv->prev_vf_vport_stats = priv->stats.vf_vport; 3711fcb64c0fSEli Cohen break; 3712fcb64c0fSEli Cohen default: 3713fcb64c0fSEli Cohen NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall"); 3714fcb64c0fSEli Cohen return -EOPNOTSUPP; 3715fcb64c0fSEli Cohen } 3716fcb64c0fSEli Cohen } 3717fcb64c0fSEli Cohen 3718fcb64c0fSEli Cohen return 0; 3719fcb64c0fSEli Cohen } 3720fcb64c0fSEli Cohen 3721fcb64c0fSEli Cohen int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, 3722fcb64c0fSEli Cohen struct tc_cls_matchall_offload *ma) 3723fcb64c0fSEli Cohen { 3724fcb64c0fSEli Cohen struct netlink_ext_ack *extack = ma->common.extack; 3725fcb64c0fSEli Cohen int prio = TC_H_MAJ(ma->common.prio) >> 16; 3726fcb64c0fSEli Cohen 3727fcb64c0fSEli Cohen if (prio != 1) { 3728fcb64c0fSEli Cohen NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); 3729fcb64c0fSEli Cohen return -EINVAL; 3730fcb64c0fSEli Cohen } 3731fcb64c0fSEli Cohen 3732fcb64c0fSEli Cohen return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack); 3733fcb64c0fSEli Cohen } 3734fcb64c0fSEli Cohen 3735fcb64c0fSEli Cohen int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, 3736fcb64c0fSEli Cohen struct tc_cls_matchall_offload *ma) 3737fcb64c0fSEli Cohen { 3738fcb64c0fSEli Cohen struct netlink_ext_ack *extack = ma->common.extack; 3739fcb64c0fSEli Cohen 3740fcb64c0fSEli Cohen return apply_police_params(priv, 0, extack); 3741fcb64c0fSEli Cohen } 3742fcb64c0fSEli Cohen 3743fcb64c0fSEli Cohen void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, 3744fcb64c0fSEli Cohen struct tc_cls_matchall_offload *ma) 3745fcb64c0fSEli Cohen { 3746fcb64c0fSEli Cohen struct mlx5e_rep_priv *rpriv = priv->ppriv; 3747fcb64c0fSEli Cohen struct rtnl_link_stats64 cur_stats; 3748fcb64c0fSEli Cohen u64 dbytes; 3749fcb64c0fSEli Cohen u64 dpkts; 3750fcb64c0fSEli Cohen 3751fcb64c0fSEli Cohen cur_stats = priv->stats.vf_vport; 3752fcb64c0fSEli Cohen dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; 3753fcb64c0fSEli Cohen dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; 3754fcb64c0fSEli Cohen rpriv->prev_vf_vport_stats = cur_stats; 3755fcb64c0fSEli Cohen flow_stats_update(&ma->stats, dpkts, dbytes, jiffies); 3756fcb64c0fSEli Cohen } 3757fcb64c0fSEli Cohen 37584d8fcf21SAlaa Hleihel static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, 37594d8fcf21SAlaa Hleihel struct mlx5e_priv *peer_priv) 37604d8fcf21SAlaa Hleihel { 37614d8fcf21SAlaa Hleihel struct mlx5_core_dev *peer_mdev = peer_priv->mdev; 37624d8fcf21SAlaa Hleihel struct mlx5e_hairpin_entry *hpe; 37634d8fcf21SAlaa Hleihel u16 peer_vhca_id; 37644d8fcf21SAlaa Hleihel int bkt; 37654d8fcf21SAlaa Hleihel 37664d8fcf21SAlaa Hleihel if (!same_hw_devs(priv, peer_priv)) 37674d8fcf21SAlaa Hleihel return; 37684d8fcf21SAlaa Hleihel 37694d8fcf21SAlaa Hleihel peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id); 37704d8fcf21SAlaa Hleihel 37714d8fcf21SAlaa Hleihel hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) { 37724d8fcf21SAlaa Hleihel if (hpe->peer_vhca_id == peer_vhca_id) 37734d8fcf21SAlaa Hleihel hpe->hp->pair->peer_gone = true; 37744d8fcf21SAlaa Hleihel } 37754d8fcf21SAlaa Hleihel } 37764d8fcf21SAlaa Hleihel 37774d8fcf21SAlaa Hleihel static int mlx5e_tc_netdev_event(struct notifier_block *this, 37784d8fcf21SAlaa Hleihel unsigned long event, void *ptr) 37794d8fcf21SAlaa Hleihel { 37804d8fcf21SAlaa Hleihel struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 37814d8fcf21SAlaa Hleihel struct mlx5e_flow_steering *fs; 37824d8fcf21SAlaa Hleihel struct mlx5e_priv *peer_priv; 37834d8fcf21SAlaa Hleihel struct mlx5e_tc_table *tc; 37844d8fcf21SAlaa Hleihel struct mlx5e_priv *priv; 37854d8fcf21SAlaa Hleihel 37864d8fcf21SAlaa Hleihel if (ndev->netdev_ops != &mlx5e_netdev_ops || 37874d8fcf21SAlaa Hleihel event != NETDEV_UNREGISTER || 37884d8fcf21SAlaa Hleihel ndev->reg_state == NETREG_REGISTERED) 37894d8fcf21SAlaa Hleihel return NOTIFY_DONE; 37904d8fcf21SAlaa Hleihel 37914d8fcf21SAlaa Hleihel tc = container_of(this, struct mlx5e_tc_table, netdevice_nb); 37924d8fcf21SAlaa Hleihel fs = container_of(tc, struct mlx5e_flow_steering, tc); 37934d8fcf21SAlaa Hleihel priv = container_of(fs, struct mlx5e_priv, fs); 37944d8fcf21SAlaa Hleihel peer_priv = netdev_priv(ndev); 37954d8fcf21SAlaa Hleihel if (priv == peer_priv || 37964d8fcf21SAlaa Hleihel !(priv->netdev->features & NETIF_F_HW_TC)) 37974d8fcf21SAlaa Hleihel return NOTIFY_DONE; 37984d8fcf21SAlaa Hleihel 37994d8fcf21SAlaa Hleihel mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv); 38004d8fcf21SAlaa Hleihel 38014d8fcf21SAlaa Hleihel return NOTIFY_DONE; 38024d8fcf21SAlaa Hleihel } 38034d8fcf21SAlaa Hleihel 3804655dc3d2SOr Gerlitz int mlx5e_tc_nic_init(struct mlx5e_priv *priv) 3805e8f887acSAmir Vadai { 3806acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 38074d8fcf21SAlaa Hleihel int err; 3808e8f887acSAmir Vadai 3809b6fac0b4SVlad Buslov mutex_init(&tc->t_lock); 381011c9c548SOr Gerlitz hash_init(tc->mod_hdr_tbl); 38115c65c564SOr Gerlitz hash_init(tc->hairpin_tbl); 381211c9c548SOr Gerlitz 38134d8fcf21SAlaa Hleihel err = rhashtable_init(&tc->ht, &tc_ht_params); 38144d8fcf21SAlaa Hleihel if (err) 38154d8fcf21SAlaa Hleihel return err; 38164d8fcf21SAlaa Hleihel 38174d8fcf21SAlaa Hleihel tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; 38184d8fcf21SAlaa Hleihel if (register_netdevice_notifier(&tc->netdevice_nb)) { 38194d8fcf21SAlaa Hleihel tc->netdevice_nb.notifier_call = NULL; 38204d8fcf21SAlaa Hleihel mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n"); 38214d8fcf21SAlaa Hleihel } 38224d8fcf21SAlaa Hleihel 38234d8fcf21SAlaa Hleihel return err; 3824e8f887acSAmir Vadai } 3825e8f887acSAmir Vadai 3826e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg) 3827e8f887acSAmir Vadai { 3828e8f887acSAmir Vadai struct mlx5e_tc_flow *flow = ptr; 3829655dc3d2SOr Gerlitz struct mlx5e_priv *priv = flow->priv; 3830e8f887acSAmir Vadai 3831961e8979SRoi Dayan mlx5e_tc_del_flow(priv, flow); 3832e8f887acSAmir Vadai kfree(flow); 3833e8f887acSAmir Vadai } 3834e8f887acSAmir Vadai 3835655dc3d2SOr Gerlitz void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) 3836e8f887acSAmir Vadai { 3837acff797cSMaor Gottlieb struct mlx5e_tc_table *tc = &priv->fs.tc; 3838e8f887acSAmir Vadai 38394d8fcf21SAlaa Hleihel if (tc->netdevice_nb.notifier_call) 38404d8fcf21SAlaa Hleihel unregister_netdevice_notifier(&tc->netdevice_nb); 38414d8fcf21SAlaa Hleihel 3842d9ee0491SOr Gerlitz rhashtable_destroy(&tc->ht); 3843e8f887acSAmir Vadai 3844acff797cSMaor Gottlieb if (!IS_ERR_OR_NULL(tc->t)) { 3845acff797cSMaor Gottlieb mlx5_destroy_flow_table(tc->t); 3846acff797cSMaor Gottlieb tc->t = NULL; 3847e8f887acSAmir Vadai } 3848b6fac0b4SVlad Buslov mutex_destroy(&tc->t_lock); 3849e8f887acSAmir Vadai } 3850655dc3d2SOr Gerlitz 3851655dc3d2SOr Gerlitz int mlx5e_tc_esw_init(struct rhashtable *tc_ht) 3852655dc3d2SOr Gerlitz { 3853655dc3d2SOr Gerlitz return rhashtable_init(tc_ht, &tc_ht_params); 3854655dc3d2SOr Gerlitz } 3855655dc3d2SOr Gerlitz 3856655dc3d2SOr Gerlitz void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) 3857655dc3d2SOr Gerlitz { 3858655dc3d2SOr Gerlitz rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); 3859655dc3d2SOr Gerlitz } 386001252a27SOr Gerlitz 3861226f2ca3SVlad Buslov int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) 386201252a27SOr Gerlitz { 3863d9ee0491SOr Gerlitz struct rhashtable *tc_ht = get_tc_ht(priv, flags); 386401252a27SOr Gerlitz 386501252a27SOr Gerlitz return atomic_read(&tc_ht->nelems); 386601252a27SOr Gerlitz } 386704de7ddaSRoi Dayan 386804de7ddaSRoi Dayan void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw) 386904de7ddaSRoi Dayan { 387004de7ddaSRoi Dayan struct mlx5e_tc_flow *flow, *tmp; 387104de7ddaSRoi Dayan 387204de7ddaSRoi Dayan list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer) 387304de7ddaSRoi Dayan __mlx5e_tc_del_fdb_peer_flow(flow); 387404de7ddaSRoi Dayan } 3875b4a23329SRoi Dayan 3876b4a23329SRoi Dayan void mlx5e_tc_reoffload_flows_work(struct work_struct *work) 3877b4a23329SRoi Dayan { 3878b4a23329SRoi Dayan struct mlx5_rep_uplink_priv *rpriv = 3879b4a23329SRoi Dayan container_of(work, struct mlx5_rep_uplink_priv, 3880b4a23329SRoi Dayan reoffload_flows_work); 3881b4a23329SRoi Dayan struct mlx5e_tc_flow *flow, *tmp; 3882b4a23329SRoi Dayan 3883ad86755bSVlad Buslov mutex_lock(&rpriv->unready_flows_lock); 3884b4a23329SRoi Dayan list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) { 3885b4a23329SRoi Dayan if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL)) 3886ad86755bSVlad Buslov unready_flow_del(flow); 3887b4a23329SRoi Dayan } 3888ad86755bSVlad Buslov mutex_unlock(&rpriv->unready_flows_lock); 3889b4a23329SRoi Dayan } 3890