1e8f887acSAmir Vadai /*
2e8f887acSAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3e8f887acSAmir Vadai  *
4e8f887acSAmir Vadai  * This software is available to you under a choice of one of two
5e8f887acSAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
6e8f887acSAmir Vadai  * General Public License (GPL) Version 2, available from the file
7e8f887acSAmir Vadai  * COPYING in the main directory of this source tree, or the
8e8f887acSAmir Vadai  * OpenIB.org BSD license below:
9e8f887acSAmir Vadai  *
10e8f887acSAmir Vadai  *     Redistribution and use in source and binary forms, with or
11e8f887acSAmir Vadai  *     without modification, are permitted provided that the following
12e8f887acSAmir Vadai  *     conditions are met:
13e8f887acSAmir Vadai  *
14e8f887acSAmir Vadai  *      - Redistributions of source code must retain the above
15e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
16e8f887acSAmir Vadai  *        disclaimer.
17e8f887acSAmir Vadai  *
18e8f887acSAmir Vadai  *      - Redistributions in binary form must reproduce the above
19e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
20e8f887acSAmir Vadai  *        disclaimer in the documentation and/or other materials
21e8f887acSAmir Vadai  *        provided with the distribution.
22e8f887acSAmir Vadai  *
23e8f887acSAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e8f887acSAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e8f887acSAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e8f887acSAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e8f887acSAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e8f887acSAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e8f887acSAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e8f887acSAmir Vadai  * SOFTWARE.
31e8f887acSAmir Vadai  */
32e8f887acSAmir Vadai 
33e3a2b7edSAmir Vadai #include <net/flow_dissector.h>
343f7d0eb4SOr Gerlitz #include <net/sch_generic.h>
35e3a2b7edSAmir Vadai #include <net/pkt_cls.h>
36e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h>
3712185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h>
38e8f887acSAmir Vadai #include <linux/mlx5/fs.h>
39e8f887acSAmir Vadai #include <linux/mlx5/device.h>
40e8f887acSAmir Vadai #include <linux/rhashtable.h>
415a7e5bcbSVlad Buslov #include <linux/refcount.h>
42db76ca24SVlad Buslov #include <linux/completion.h>
4303a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h>
44776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h>
45bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h>
46d79b6df6SOr Gerlitz #include <net/tc_act/tc_pedit.h>
4726c02749SOr Gerlitz #include <net/tc_act/tc_csum.h>
48f6dfb4c3SHadar Hen Zion #include <net/arp.h>
493616d08bSDavid Ahern #include <net/ipv6_stubs.h>
50e8f887acSAmir Vadai #include "en.h"
511d447a39SSaeed Mahameed #include "en_rep.h"
52232c0013SHadar Hen Zion #include "en_tc.h"
5303a9d11eSOr Gerlitz #include "eswitch.h"
543f6d08d1SOr Gerlitz #include "fs_core.h"
552c81bfd5SHuy Nguyen #include "en/port.h"
56101f4de9SOz Shlomo #include "en/tc_tun.h"
5704de7ddaSRoi Dayan #include "lib/devcom.h"
589272e3dfSYevgeny Kliteynik #include "lib/geneve.h"
59e8f887acSAmir Vadai 
603bc4b7bfSOr Gerlitz struct mlx5_nic_flow_attr {
613bc4b7bfSOr Gerlitz 	u32 action;
623bc4b7bfSOr Gerlitz 	u32 flow_tag;
632f4fe4caSOr Gerlitz 	u32 mod_hdr_id;
645c65c564SOr Gerlitz 	u32 hairpin_tirn;
6538aa51c1SOr Gerlitz 	u8 match_level;
663f6d08d1SOr Gerlitz 	struct mlx5_flow_table	*hairpin_ft;
67b8aee822SMark Bloch 	struct mlx5_fc		*counter;
683bc4b7bfSOr Gerlitz };
693bc4b7bfSOr Gerlitz 
70226f2ca3SVlad Buslov #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
7160bd4af8SOr Gerlitz 
7265ba8fb7SOr Gerlitz enum {
73226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_INGRESS	= MLX5E_TC_FLAG_INGRESS_BIT,
74226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_EGRESS	= MLX5E_TC_FLAG_EGRESS_BIT,
75226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_ESWITCH	= MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
76226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_NIC		= MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
77226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_OFFLOADED	= MLX5E_TC_FLOW_BASE,
78226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_HAIRPIN	= MLX5E_TC_FLOW_BASE + 1,
79226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS	= MLX5E_TC_FLOW_BASE + 2,
80226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_SLOW		= MLX5E_TC_FLOW_BASE + 3,
81226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_DUP		= MLX5E_TC_FLOW_BASE + 4,
82226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_NOT_READY	= MLX5E_TC_FLOW_BASE + 5,
83c5d326b2SVlad Buslov 	MLX5E_TC_FLOW_FLAG_DELETED	= MLX5E_TC_FLOW_BASE + 6,
8465ba8fb7SOr Gerlitz };
8565ba8fb7SOr Gerlitz 
86e4ad91f2SChris Mi #define MLX5E_TC_MAX_SPLITS 1
87e4ad91f2SChris Mi 
8879baaec7SEli Britstein /* Helper struct for accessing a struct containing list_head array.
8979baaec7SEli Britstein  * Containing struct
9079baaec7SEli Britstein  *   |- Helper array
9179baaec7SEli Britstein  *      [0] Helper item 0
9279baaec7SEli Britstein  *          |- list_head item 0
9379baaec7SEli Britstein  *          |- index (0)
9479baaec7SEli Britstein  *      [1] Helper item 1
9579baaec7SEli Britstein  *          |- list_head item 1
9679baaec7SEli Britstein  *          |- index (1)
9779baaec7SEli Britstein  * To access the containing struct from one of the list_head items:
9879baaec7SEli Britstein  * 1. Get the helper item from the list_head item using
9979baaec7SEli Britstein  *    helper item =
10079baaec7SEli Britstein  *        container_of(list_head item, helper struct type, list_head field)
10179baaec7SEli Britstein  * 2. Get the contining struct from the helper item and its index in the array:
10279baaec7SEli Britstein  *    containing struct =
10379baaec7SEli Britstein  *        container_of(helper item, containing struct type, helper field[index])
10479baaec7SEli Britstein  */
10579baaec7SEli Britstein struct encap_flow_item {
106948993f2SVlad Buslov 	struct mlx5e_encap_entry *e; /* attached encap instance */
10779baaec7SEli Britstein 	struct list_head list;
10879baaec7SEli Britstein 	int index;
10979baaec7SEli Britstein };
11079baaec7SEli Britstein 
111e8f887acSAmir Vadai struct mlx5e_tc_flow {
112e8f887acSAmir Vadai 	struct rhash_head	node;
113655dc3d2SOr Gerlitz 	struct mlx5e_priv	*priv;
114e8f887acSAmir Vadai 	u64			cookie;
115226f2ca3SVlad Buslov 	unsigned long		flags;
116e4ad91f2SChris Mi 	struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
11779baaec7SEli Britstein 	/* Flow can be associated with multiple encap IDs.
11879baaec7SEli Britstein 	 * The number of encaps is bounded by the number of supported
11979baaec7SEli Britstein 	 * destinations.
12079baaec7SEli Britstein 	 */
12179baaec7SEli Britstein 	struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
12204de7ddaSRoi Dayan 	struct mlx5e_tc_flow    *peer_flow;
123dd58edc3SVlad Buslov 	struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
12411c9c548SOr Gerlitz 	struct list_head	mod_hdr; /* flows sharing the same mod hdr ID */
125e4f9abbdSVlad Buslov 	struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
1265c65c564SOr Gerlitz 	struct list_head	hairpin; /* flows sharing the same hairpin */
12704de7ddaSRoi Dayan 	struct list_head	peer;    /* flows with peer flow */
128b4a23329SRoi Dayan 	struct list_head	unready; /* flows not ready to be offloaded (e.g due to missing route) */
1292a1f1768SVlad Buslov 	int			tmp_efi_index;
1306a06c2f7SVlad Buslov 	struct list_head	tmp_list; /* temporary flow list used by neigh update */
1315a7e5bcbSVlad Buslov 	refcount_t		refcnt;
132c5d326b2SVlad Buslov 	struct rcu_head		rcu_head;
1333bc4b7bfSOr Gerlitz 	union {
134ecf5bb79SOr Gerlitz 		struct mlx5_esw_flow_attr esw_attr[0];
1353bc4b7bfSOr Gerlitz 		struct mlx5_nic_flow_attr nic_attr[0];
1363bc4b7bfSOr Gerlitz 	};
137e8f887acSAmir Vadai };
138e8f887acSAmir Vadai 
13917091853SOr Gerlitz struct mlx5e_tc_flow_parse_attr {
1401f6da306SYevgeny Kliteynik 	const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
141d11afc26SOz Shlomo 	struct net_device *filter_dev;
14217091853SOr Gerlitz 	struct mlx5_flow_spec spec;
143d79b6df6SOr Gerlitz 	int num_mod_hdr_actions;
144218d05ceSTonghao Zhang 	int max_mod_hdr_actions;
145d79b6df6SOr Gerlitz 	void *mod_hdr_actions;
14698b66cb1SEli Britstein 	int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
14717091853SOr Gerlitz };
14817091853SOr Gerlitz 
149acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4
150b3a433deSOr Gerlitz #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
151e8f887acSAmir Vadai 
15277ab67b7SOr Gerlitz struct mlx5e_hairpin {
15377ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
15477ab67b7SOr Gerlitz 
15577ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev;
1563f6d08d1SOr Gerlitz 	struct mlx5e_priv *func_priv;
15777ab67b7SOr Gerlitz 	u32 tdn;
15877ab67b7SOr Gerlitz 	u32 tirn;
1593f6d08d1SOr Gerlitz 
1603f6d08d1SOr Gerlitz 	int num_channels;
1613f6d08d1SOr Gerlitz 	struct mlx5e_rqt indir_rqt;
1623f6d08d1SOr Gerlitz 	u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
1633f6d08d1SOr Gerlitz 	struct mlx5e_ttc_table ttc;
16477ab67b7SOr Gerlitz };
16577ab67b7SOr Gerlitz 
1665c65c564SOr Gerlitz struct mlx5e_hairpin_entry {
1675c65c564SOr Gerlitz 	/* a node of a hash table which keeps all the  hairpin entries */
1685c65c564SOr Gerlitz 	struct hlist_node hairpin_hlist;
1695c65c564SOr Gerlitz 
17073edca73SVlad Buslov 	/* protects flows list */
17173edca73SVlad Buslov 	spinlock_t flows_lock;
1725c65c564SOr Gerlitz 	/* flows sharing the same hairpin */
1735c65c564SOr Gerlitz 	struct list_head flows;
174db76ca24SVlad Buslov 	/* hpe's that were not fully initialized when dead peer update event
175db76ca24SVlad Buslov 	 * function traversed them.
176db76ca24SVlad Buslov 	 */
177db76ca24SVlad Buslov 	struct list_head dead_peer_wait_list;
1785c65c564SOr Gerlitz 
179d8822868SOr Gerlitz 	u16 peer_vhca_id;
180106be53bSOr Gerlitz 	u8 prio;
1815c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
182e4f9abbdSVlad Buslov 	refcount_t refcnt;
183db76ca24SVlad Buslov 	struct completion res_ready;
1845c65c564SOr Gerlitz };
1855c65c564SOr Gerlitz 
18611c9c548SOr Gerlitz struct mod_hdr_key {
18711c9c548SOr Gerlitz 	int num_actions;
18811c9c548SOr Gerlitz 	void *actions;
18911c9c548SOr Gerlitz };
19011c9c548SOr Gerlitz 
19111c9c548SOr Gerlitz struct mlx5e_mod_hdr_entry {
19211c9c548SOr Gerlitz 	/* a node of a hash table which keeps all the mod_hdr entries */
19311c9c548SOr Gerlitz 	struct hlist_node mod_hdr_hlist;
19411c9c548SOr Gerlitz 
19583a52f0dSVlad Buslov 	/* protects flows list */
19683a52f0dSVlad Buslov 	spinlock_t flows_lock;
19711c9c548SOr Gerlitz 	/* flows sharing the same mod_hdr entry */
19811c9c548SOr Gerlitz 	struct list_head flows;
19911c9c548SOr Gerlitz 
20011c9c548SOr Gerlitz 	struct mod_hdr_key key;
20111c9c548SOr Gerlitz 
20211c9c548SOr Gerlitz 	u32 mod_hdr_id;
203dd58edc3SVlad Buslov 
204dd58edc3SVlad Buslov 	refcount_t refcnt;
205a734d007SVlad Buslov 	struct completion res_ready;
206a734d007SVlad Buslov 	int compl_result;
20711c9c548SOr Gerlitz };
20811c9c548SOr Gerlitz 
20911c9c548SOr Gerlitz #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
21011c9c548SOr Gerlitz 
2115a7e5bcbSVlad Buslov static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
2125a7e5bcbSVlad Buslov 			      struct mlx5e_tc_flow *flow);
2135a7e5bcbSVlad Buslov 
2145a7e5bcbSVlad Buslov static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
2155a7e5bcbSVlad Buslov {
2165a7e5bcbSVlad Buslov 	if (!flow || !refcount_inc_not_zero(&flow->refcnt))
2175a7e5bcbSVlad Buslov 		return ERR_PTR(-EINVAL);
2185a7e5bcbSVlad Buslov 	return flow;
2195a7e5bcbSVlad Buslov }
2205a7e5bcbSVlad Buslov 
2215a7e5bcbSVlad Buslov static void mlx5e_flow_put(struct mlx5e_priv *priv,
2225a7e5bcbSVlad Buslov 			   struct mlx5e_tc_flow *flow)
2235a7e5bcbSVlad Buslov {
2245a7e5bcbSVlad Buslov 	if (refcount_dec_and_test(&flow->refcnt)) {
2255a7e5bcbSVlad Buslov 		mlx5e_tc_del_flow(priv, flow);
226c5d326b2SVlad Buslov 		kfree_rcu(flow, rcu_head);
2275a7e5bcbSVlad Buslov 	}
2285a7e5bcbSVlad Buslov }
2295a7e5bcbSVlad Buslov 
230226f2ca3SVlad Buslov static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
231226f2ca3SVlad Buslov {
232226f2ca3SVlad Buslov 	/* Complete all memory stores before setting bit. */
233226f2ca3SVlad Buslov 	smp_mb__before_atomic();
234226f2ca3SVlad Buslov 	set_bit(flag, &flow->flags);
235226f2ca3SVlad Buslov }
236226f2ca3SVlad Buslov 
237226f2ca3SVlad Buslov #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
238226f2ca3SVlad Buslov 
239c5d326b2SVlad Buslov static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
240c5d326b2SVlad Buslov 				     unsigned long flag)
241c5d326b2SVlad Buslov {
242c5d326b2SVlad Buslov 	/* test_and_set_bit() provides all necessary barriers */
243c5d326b2SVlad Buslov 	return test_and_set_bit(flag, &flow->flags);
244c5d326b2SVlad Buslov }
245c5d326b2SVlad Buslov 
246c5d326b2SVlad Buslov #define flow_flag_test_and_set(flow, flag)			\
247c5d326b2SVlad Buslov 	__flow_flag_test_and_set(flow,				\
248c5d326b2SVlad Buslov 				 MLX5E_TC_FLOW_FLAG_##flag)
249c5d326b2SVlad Buslov 
250226f2ca3SVlad Buslov static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
251226f2ca3SVlad Buslov {
252226f2ca3SVlad Buslov 	/* Complete all memory stores before clearing bit. */
253226f2ca3SVlad Buslov 	smp_mb__before_atomic();
254226f2ca3SVlad Buslov 	clear_bit(flag, &flow->flags);
255226f2ca3SVlad Buslov }
256226f2ca3SVlad Buslov 
257226f2ca3SVlad Buslov #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
258226f2ca3SVlad Buslov 						      MLX5E_TC_FLOW_FLAG_##flag)
259226f2ca3SVlad Buslov 
260226f2ca3SVlad Buslov static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
261226f2ca3SVlad Buslov {
262226f2ca3SVlad Buslov 	bool ret = test_bit(flag, &flow->flags);
263226f2ca3SVlad Buslov 
264226f2ca3SVlad Buslov 	/* Read fields of flow structure only after checking flags. */
265226f2ca3SVlad Buslov 	smp_mb__after_atomic();
266226f2ca3SVlad Buslov 	return ret;
267226f2ca3SVlad Buslov }
268226f2ca3SVlad Buslov 
269226f2ca3SVlad Buslov #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
270226f2ca3SVlad Buslov 						    MLX5E_TC_FLOW_FLAG_##flag)
271226f2ca3SVlad Buslov 
272226f2ca3SVlad Buslov static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
273226f2ca3SVlad Buslov {
274226f2ca3SVlad Buslov 	return flow_flag_test(flow, ESWITCH);
275226f2ca3SVlad Buslov }
276226f2ca3SVlad Buslov 
277226f2ca3SVlad Buslov static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
278226f2ca3SVlad Buslov {
279226f2ca3SVlad Buslov 	return flow_flag_test(flow, OFFLOADED);
280226f2ca3SVlad Buslov }
281226f2ca3SVlad Buslov 
28211c9c548SOr Gerlitz static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
28311c9c548SOr Gerlitz {
28411c9c548SOr Gerlitz 	return jhash(key->actions,
28511c9c548SOr Gerlitz 		     key->num_actions * MLX5_MH_ACT_SZ, 0);
28611c9c548SOr Gerlitz }
28711c9c548SOr Gerlitz 
28811c9c548SOr Gerlitz static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
28911c9c548SOr Gerlitz 				   struct mod_hdr_key *b)
29011c9c548SOr Gerlitz {
29111c9c548SOr Gerlitz 	if (a->num_actions != b->num_actions)
29211c9c548SOr Gerlitz 		return 1;
29311c9c548SOr Gerlitz 
29411c9c548SOr Gerlitz 	return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
29511c9c548SOr Gerlitz }
29611c9c548SOr Gerlitz 
297dd58edc3SVlad Buslov static struct mod_hdr_tbl *
298dd58edc3SVlad Buslov get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
299dd58edc3SVlad Buslov {
300dd58edc3SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
301dd58edc3SVlad Buslov 
302dd58edc3SVlad Buslov 	return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
303dd58edc3SVlad Buslov 		&priv->fs.tc.mod_hdr;
304dd58edc3SVlad Buslov }
305dd58edc3SVlad Buslov 
306dd58edc3SVlad Buslov static struct mlx5e_mod_hdr_entry *
307dd58edc3SVlad Buslov mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
308dd58edc3SVlad Buslov {
309dd58edc3SVlad Buslov 	struct mlx5e_mod_hdr_entry *mh, *found = NULL;
310dd58edc3SVlad Buslov 
311dd58edc3SVlad Buslov 	hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
312dd58edc3SVlad Buslov 		if (!cmp_mod_hdr_info(&mh->key, key)) {
313dd58edc3SVlad Buslov 			refcount_inc(&mh->refcnt);
314dd58edc3SVlad Buslov 			found = mh;
315dd58edc3SVlad Buslov 			break;
316dd58edc3SVlad Buslov 		}
317dd58edc3SVlad Buslov 	}
318dd58edc3SVlad Buslov 
319dd58edc3SVlad Buslov 	return found;
320dd58edc3SVlad Buslov }
321dd58edc3SVlad Buslov 
322dd58edc3SVlad Buslov static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
323d2faae25SVlad Buslov 			      struct mlx5e_mod_hdr_entry *mh,
324d2faae25SVlad Buslov 			      int namespace)
325dd58edc3SVlad Buslov {
326d2faae25SVlad Buslov 	struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
327d2faae25SVlad Buslov 
328d2faae25SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
329dd58edc3SVlad Buslov 		return;
330d2faae25SVlad Buslov 	hash_del(&mh->mod_hdr_hlist);
331d2faae25SVlad Buslov 	mutex_unlock(&tbl->lock);
332dd58edc3SVlad Buslov 
333dd58edc3SVlad Buslov 	WARN_ON(!list_empty(&mh->flows));
334a734d007SVlad Buslov 	if (mh->compl_result > 0)
335dd58edc3SVlad Buslov 		mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
336d2faae25SVlad Buslov 
337dd58edc3SVlad Buslov 	kfree(mh);
338dd58edc3SVlad Buslov }
339dd58edc3SVlad Buslov 
340d2faae25SVlad Buslov static int get_flow_name_space(struct mlx5e_tc_flow *flow)
341d2faae25SVlad Buslov {
342d2faae25SVlad Buslov 	return mlx5e_is_eswitch_flow(flow) ?
343d2faae25SVlad Buslov 		MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
344d2faae25SVlad Buslov }
34511c9c548SOr Gerlitz static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
34611c9c548SOr Gerlitz 				struct mlx5e_tc_flow *flow,
34711c9c548SOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr)
34811c9c548SOr Gerlitz {
34911c9c548SOr Gerlitz 	int num_actions, actions_size, namespace, err;
35011c9c548SOr Gerlitz 	struct mlx5e_mod_hdr_entry *mh;
351dd58edc3SVlad Buslov 	struct mod_hdr_tbl *tbl;
35211c9c548SOr Gerlitz 	struct mod_hdr_key key;
35311c9c548SOr Gerlitz 	u32 hash_key;
35411c9c548SOr Gerlitz 
35511c9c548SOr Gerlitz 	num_actions  = parse_attr->num_mod_hdr_actions;
35611c9c548SOr Gerlitz 	actions_size = MLX5_MH_ACT_SZ * num_actions;
35711c9c548SOr Gerlitz 
35811c9c548SOr Gerlitz 	key.actions = parse_attr->mod_hdr_actions;
35911c9c548SOr Gerlitz 	key.num_actions = num_actions;
36011c9c548SOr Gerlitz 
36111c9c548SOr Gerlitz 	hash_key = hash_mod_hdr_info(&key);
36211c9c548SOr Gerlitz 
363d2faae25SVlad Buslov 	namespace = get_flow_name_space(flow);
364dd58edc3SVlad Buslov 	tbl = get_mod_hdr_table(priv, namespace);
36511c9c548SOr Gerlitz 
366d2faae25SVlad Buslov 	mutex_lock(&tbl->lock);
367dd58edc3SVlad Buslov 	mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
368a734d007SVlad Buslov 	if (mh) {
369a734d007SVlad Buslov 		mutex_unlock(&tbl->lock);
370a734d007SVlad Buslov 		wait_for_completion(&mh->res_ready);
371a734d007SVlad Buslov 
372a734d007SVlad Buslov 		if (mh->compl_result < 0) {
373a734d007SVlad Buslov 			err = -EREMOTEIO;
374a734d007SVlad Buslov 			goto attach_header_err;
375a734d007SVlad Buslov 		}
37611c9c548SOr Gerlitz 		goto attach_flow;
377a734d007SVlad Buslov 	}
37811c9c548SOr Gerlitz 
37911c9c548SOr Gerlitz 	mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
380d2faae25SVlad Buslov 	if (!mh) {
381a734d007SVlad Buslov 		mutex_unlock(&tbl->lock);
382a734d007SVlad Buslov 		return -ENOMEM;
383d2faae25SVlad Buslov 	}
38411c9c548SOr Gerlitz 
38511c9c548SOr Gerlitz 	mh->key.actions = (void *)mh + sizeof(*mh);
38611c9c548SOr Gerlitz 	memcpy(mh->key.actions, key.actions, actions_size);
38711c9c548SOr Gerlitz 	mh->key.num_actions = num_actions;
38883a52f0dSVlad Buslov 	spin_lock_init(&mh->flows_lock);
38911c9c548SOr Gerlitz 	INIT_LIST_HEAD(&mh->flows);
390dd58edc3SVlad Buslov 	refcount_set(&mh->refcnt, 1);
391a734d007SVlad Buslov 	init_completion(&mh->res_ready);
392a734d007SVlad Buslov 
393a734d007SVlad Buslov 	hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
394a734d007SVlad Buslov 	mutex_unlock(&tbl->lock);
39511c9c548SOr Gerlitz 
39611c9c548SOr Gerlitz 	err = mlx5_modify_header_alloc(priv->mdev, namespace,
39711c9c548SOr Gerlitz 				       mh->key.num_actions,
39811c9c548SOr Gerlitz 				       mh->key.actions,
39911c9c548SOr Gerlitz 				       &mh->mod_hdr_id);
400a734d007SVlad Buslov 	if (err) {
401a734d007SVlad Buslov 		mh->compl_result = err;
402a734d007SVlad Buslov 		goto alloc_header_err;
403a734d007SVlad Buslov 	}
404a734d007SVlad Buslov 	mh->compl_result = 1;
405a734d007SVlad Buslov 	complete_all(&mh->res_ready);
40611c9c548SOr Gerlitz 
40711c9c548SOr Gerlitz attach_flow:
408dd58edc3SVlad Buslov 	flow->mh = mh;
40983a52f0dSVlad Buslov 	spin_lock(&mh->flows_lock);
41011c9c548SOr Gerlitz 	list_add(&flow->mod_hdr, &mh->flows);
41183a52f0dSVlad Buslov 	spin_unlock(&mh->flows_lock);
412d2faae25SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
41311c9c548SOr Gerlitz 		flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
41411c9c548SOr Gerlitz 	else
41511c9c548SOr Gerlitz 		flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
41611c9c548SOr Gerlitz 
41711c9c548SOr Gerlitz 	return 0;
41811c9c548SOr Gerlitz 
419a734d007SVlad Buslov alloc_header_err:
420a734d007SVlad Buslov 	complete_all(&mh->res_ready);
421a734d007SVlad Buslov attach_header_err:
422a734d007SVlad Buslov 	mlx5e_mod_hdr_put(priv, mh, namespace);
42311c9c548SOr Gerlitz 	return err;
42411c9c548SOr Gerlitz }
42511c9c548SOr Gerlitz 
42611c9c548SOr Gerlitz static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
42711c9c548SOr Gerlitz 				 struct mlx5e_tc_flow *flow)
42811c9c548SOr Gerlitz {
4295a7e5bcbSVlad Buslov 	/* flow wasn't fully initialized */
430dd58edc3SVlad Buslov 	if (!flow->mh)
4315a7e5bcbSVlad Buslov 		return;
4325a7e5bcbSVlad Buslov 
43383a52f0dSVlad Buslov 	spin_lock(&flow->mh->flows_lock);
43411c9c548SOr Gerlitz 	list_del(&flow->mod_hdr);
43583a52f0dSVlad Buslov 	spin_unlock(&flow->mh->flows_lock);
43611c9c548SOr Gerlitz 
437d2faae25SVlad Buslov 	mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
438dd58edc3SVlad Buslov 	flow->mh = NULL;
43911c9c548SOr Gerlitz }
44011c9c548SOr Gerlitz 
44177ab67b7SOr Gerlitz static
44277ab67b7SOr Gerlitz struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
44377ab67b7SOr Gerlitz {
44477ab67b7SOr Gerlitz 	struct net_device *netdev;
44577ab67b7SOr Gerlitz 	struct mlx5e_priv *priv;
44677ab67b7SOr Gerlitz 
44777ab67b7SOr Gerlitz 	netdev = __dev_get_by_index(net, ifindex);
44877ab67b7SOr Gerlitz 	priv = netdev_priv(netdev);
44977ab67b7SOr Gerlitz 	return priv->mdev;
45077ab67b7SOr Gerlitz }
45177ab67b7SOr Gerlitz 
45277ab67b7SOr Gerlitz static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
45377ab67b7SOr Gerlitz {
45477ab67b7SOr Gerlitz 	u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
45577ab67b7SOr Gerlitz 	void *tirc;
45677ab67b7SOr Gerlitz 	int err;
45777ab67b7SOr Gerlitz 
45877ab67b7SOr Gerlitz 	err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
45977ab67b7SOr Gerlitz 	if (err)
46077ab67b7SOr Gerlitz 		goto alloc_tdn_err;
46177ab67b7SOr Gerlitz 
46277ab67b7SOr Gerlitz 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
46377ab67b7SOr Gerlitz 
46477ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
465ddae74acSOr Gerlitz 	MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
46677ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
46777ab67b7SOr Gerlitz 
46877ab67b7SOr Gerlitz 	err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
46977ab67b7SOr Gerlitz 	if (err)
47077ab67b7SOr Gerlitz 		goto create_tir_err;
47177ab67b7SOr Gerlitz 
47277ab67b7SOr Gerlitz 	return 0;
47377ab67b7SOr Gerlitz 
47477ab67b7SOr Gerlitz create_tir_err:
47577ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
47677ab67b7SOr Gerlitz alloc_tdn_err:
47777ab67b7SOr Gerlitz 	return err;
47877ab67b7SOr Gerlitz }
47977ab67b7SOr Gerlitz 
48077ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
48177ab67b7SOr Gerlitz {
48277ab67b7SOr Gerlitz 	mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
48377ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
48477ab67b7SOr Gerlitz }
48577ab67b7SOr Gerlitz 
4863f6d08d1SOr Gerlitz static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
4873f6d08d1SOr Gerlitz {
4883f6d08d1SOr Gerlitz 	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
4893f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
4903f6d08d1SOr Gerlitz 	int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
4913f6d08d1SOr Gerlitz 
4923f6d08d1SOr Gerlitz 	mlx5e_build_default_indir_rqt(indirection_rqt, sz,
4933f6d08d1SOr Gerlitz 				      hp->num_channels);
4943f6d08d1SOr Gerlitz 
4953f6d08d1SOr Gerlitz 	for (i = 0; i < sz; i++) {
4963f6d08d1SOr Gerlitz 		ix = i;
497bbeb53b8SAya Levin 		if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
4983f6d08d1SOr Gerlitz 			ix = mlx5e_bits_invert(i, ilog2(sz));
4993f6d08d1SOr Gerlitz 		ix = indirection_rqt[ix];
5003f6d08d1SOr Gerlitz 		rqn = hp->pair->rqn[ix];
5013f6d08d1SOr Gerlitz 		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
5023f6d08d1SOr Gerlitz 	}
5033f6d08d1SOr Gerlitz }
5043f6d08d1SOr Gerlitz 
5053f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
5063f6d08d1SOr Gerlitz {
5073f6d08d1SOr Gerlitz 	int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
5083f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
5093f6d08d1SOr Gerlitz 	struct mlx5_core_dev *mdev = priv->mdev;
5103f6d08d1SOr Gerlitz 	void *rqtc;
5113f6d08d1SOr Gerlitz 	u32 *in;
5123f6d08d1SOr Gerlitz 
5133f6d08d1SOr Gerlitz 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
5143f6d08d1SOr Gerlitz 	in = kvzalloc(inlen, GFP_KERNEL);
5153f6d08d1SOr Gerlitz 	if (!in)
5163f6d08d1SOr Gerlitz 		return -ENOMEM;
5173f6d08d1SOr Gerlitz 
5183f6d08d1SOr Gerlitz 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
5193f6d08d1SOr Gerlitz 
5203f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5213f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
5223f6d08d1SOr Gerlitz 
5233f6d08d1SOr Gerlitz 	mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
5243f6d08d1SOr Gerlitz 
5253f6d08d1SOr Gerlitz 	err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
5263f6d08d1SOr Gerlitz 	if (!err)
5273f6d08d1SOr Gerlitz 		hp->indir_rqt.enabled = true;
5283f6d08d1SOr Gerlitz 
5293f6d08d1SOr Gerlitz 	kvfree(in);
5303f6d08d1SOr Gerlitz 	return err;
5313f6d08d1SOr Gerlitz }
5323f6d08d1SOr Gerlitz 
5333f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
5343f6d08d1SOr Gerlitz {
5353f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
5363f6d08d1SOr Gerlitz 	u32 in[MLX5_ST_SZ_DW(create_tir_in)];
5373f6d08d1SOr Gerlitz 	int tt, i, err;
5383f6d08d1SOr Gerlitz 	void *tirc;
5393f6d08d1SOr Gerlitz 
5403f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
541d930ac79SAya Levin 		struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
542d930ac79SAya Levin 
5433f6d08d1SOr Gerlitz 		memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
5443f6d08d1SOr Gerlitz 		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
5453f6d08d1SOr Gerlitz 
5463f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
5473f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
5483f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
549bbeb53b8SAya Levin 		mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
550bbeb53b8SAya Levin 
5513f6d08d1SOr Gerlitz 		err = mlx5_core_create_tir(hp->func_mdev, in,
5523f6d08d1SOr Gerlitz 					   MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
5533f6d08d1SOr Gerlitz 		if (err) {
5543f6d08d1SOr Gerlitz 			mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
5553f6d08d1SOr Gerlitz 			goto err_destroy_tirs;
5563f6d08d1SOr Gerlitz 		}
5573f6d08d1SOr Gerlitz 	}
5583f6d08d1SOr Gerlitz 	return 0;
5593f6d08d1SOr Gerlitz 
5603f6d08d1SOr Gerlitz err_destroy_tirs:
5613f6d08d1SOr Gerlitz 	for (i = 0; i < tt; i++)
5623f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
5633f6d08d1SOr Gerlitz 	return err;
5643f6d08d1SOr Gerlitz }
5653f6d08d1SOr Gerlitz 
5663f6d08d1SOr Gerlitz static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
5673f6d08d1SOr Gerlitz {
5683f6d08d1SOr Gerlitz 	int tt;
5693f6d08d1SOr Gerlitz 
5703f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
5713f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
5723f6d08d1SOr Gerlitz }
5733f6d08d1SOr Gerlitz 
5743f6d08d1SOr Gerlitz static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
5753f6d08d1SOr Gerlitz 					 struct ttc_params *ttc_params)
5763f6d08d1SOr Gerlitz {
5773f6d08d1SOr Gerlitz 	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
5783f6d08d1SOr Gerlitz 	int tt;
5793f6d08d1SOr Gerlitz 
5803f6d08d1SOr Gerlitz 	memset(ttc_params, 0, sizeof(*ttc_params));
5813f6d08d1SOr Gerlitz 
5823f6d08d1SOr Gerlitz 	ttc_params->any_tt_tirn = hp->tirn;
5833f6d08d1SOr Gerlitz 
5843f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
5853f6d08d1SOr Gerlitz 		ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
5863f6d08d1SOr Gerlitz 
5873f6d08d1SOr Gerlitz 	ft_attr->max_fte = MLX5E_NUM_TT;
5883f6d08d1SOr Gerlitz 	ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
5893f6d08d1SOr Gerlitz 	ft_attr->prio = MLX5E_TC_PRIO;
5903f6d08d1SOr Gerlitz }
5913f6d08d1SOr Gerlitz 
5923f6d08d1SOr Gerlitz static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
5933f6d08d1SOr Gerlitz {
5943f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
5953f6d08d1SOr Gerlitz 	struct ttc_params ttc_params;
5963f6d08d1SOr Gerlitz 	int err;
5973f6d08d1SOr Gerlitz 
5983f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_rqt(hp);
5993f6d08d1SOr Gerlitz 	if (err)
6003f6d08d1SOr Gerlitz 		return err;
6013f6d08d1SOr Gerlitz 
6023f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_tirs(hp);
6033f6d08d1SOr Gerlitz 	if (err)
6043f6d08d1SOr Gerlitz 		goto err_create_indirect_tirs;
6053f6d08d1SOr Gerlitz 
6063f6d08d1SOr Gerlitz 	mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
6073f6d08d1SOr Gerlitz 	err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
6083f6d08d1SOr Gerlitz 	if (err)
6093f6d08d1SOr Gerlitz 		goto err_create_ttc_table;
6103f6d08d1SOr Gerlitz 
6113f6d08d1SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
6123f6d08d1SOr Gerlitz 		   hp->num_channels, hp->ttc.ft.t->id);
6133f6d08d1SOr Gerlitz 
6143f6d08d1SOr Gerlitz 	return 0;
6153f6d08d1SOr Gerlitz 
6163f6d08d1SOr Gerlitz err_create_ttc_table:
6173f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
6183f6d08d1SOr Gerlitz err_create_indirect_tirs:
6193f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
6203f6d08d1SOr Gerlitz 
6213f6d08d1SOr Gerlitz 	return err;
6223f6d08d1SOr Gerlitz }
6233f6d08d1SOr Gerlitz 
6243f6d08d1SOr Gerlitz static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
6253f6d08d1SOr Gerlitz {
6263f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
6273f6d08d1SOr Gerlitz 
6283f6d08d1SOr Gerlitz 	mlx5e_destroy_ttc_table(priv, &hp->ttc);
6293f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
6303f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
6313f6d08d1SOr Gerlitz }
6323f6d08d1SOr Gerlitz 
63377ab67b7SOr Gerlitz static struct mlx5e_hairpin *
63477ab67b7SOr Gerlitz mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
63577ab67b7SOr Gerlitz 		     int peer_ifindex)
63677ab67b7SOr Gerlitz {
63777ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev, *peer_mdev;
63877ab67b7SOr Gerlitz 	struct mlx5e_hairpin *hp;
63977ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
64077ab67b7SOr Gerlitz 	int err;
64177ab67b7SOr Gerlitz 
64277ab67b7SOr Gerlitz 	hp = kzalloc(sizeof(*hp), GFP_KERNEL);
64377ab67b7SOr Gerlitz 	if (!hp)
64477ab67b7SOr Gerlitz 		return ERR_PTR(-ENOMEM);
64577ab67b7SOr Gerlitz 
64677ab67b7SOr Gerlitz 	func_mdev = priv->mdev;
64777ab67b7SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
64877ab67b7SOr Gerlitz 
64977ab67b7SOr Gerlitz 	pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
65077ab67b7SOr Gerlitz 	if (IS_ERR(pair)) {
65177ab67b7SOr Gerlitz 		err = PTR_ERR(pair);
65277ab67b7SOr Gerlitz 		goto create_pair_err;
65377ab67b7SOr Gerlitz 	}
65477ab67b7SOr Gerlitz 	hp->pair = pair;
65577ab67b7SOr Gerlitz 	hp->func_mdev = func_mdev;
6563f6d08d1SOr Gerlitz 	hp->func_priv = priv;
6573f6d08d1SOr Gerlitz 	hp->num_channels = params->num_channels;
65877ab67b7SOr Gerlitz 
65977ab67b7SOr Gerlitz 	err = mlx5e_hairpin_create_transport(hp);
66077ab67b7SOr Gerlitz 	if (err)
66177ab67b7SOr Gerlitz 		goto create_transport_err;
66277ab67b7SOr Gerlitz 
6633f6d08d1SOr Gerlitz 	if (hp->num_channels > 1) {
6643f6d08d1SOr Gerlitz 		err = mlx5e_hairpin_rss_init(hp);
6653f6d08d1SOr Gerlitz 		if (err)
6663f6d08d1SOr Gerlitz 			goto rss_init_err;
6673f6d08d1SOr Gerlitz 	}
6683f6d08d1SOr Gerlitz 
66977ab67b7SOr Gerlitz 	return hp;
67077ab67b7SOr Gerlitz 
6713f6d08d1SOr Gerlitz rss_init_err:
6723f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
67377ab67b7SOr Gerlitz create_transport_err:
67477ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
67577ab67b7SOr Gerlitz create_pair_err:
67677ab67b7SOr Gerlitz 	kfree(hp);
67777ab67b7SOr Gerlitz 	return ERR_PTR(err);
67877ab67b7SOr Gerlitz }
67977ab67b7SOr Gerlitz 
68077ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
68177ab67b7SOr Gerlitz {
6823f6d08d1SOr Gerlitz 	if (hp->num_channels > 1)
6833f6d08d1SOr Gerlitz 		mlx5e_hairpin_rss_cleanup(hp);
68477ab67b7SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
68577ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
68677ab67b7SOr Gerlitz 	kvfree(hp);
68777ab67b7SOr Gerlitz }
68877ab67b7SOr Gerlitz 
689106be53bSOr Gerlitz static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
690106be53bSOr Gerlitz {
691106be53bSOr Gerlitz 	return (peer_vhca_id << 16 | prio);
692106be53bSOr Gerlitz }
693106be53bSOr Gerlitz 
6945c65c564SOr Gerlitz static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
695106be53bSOr Gerlitz 						     u16 peer_vhca_id, u8 prio)
6965c65c564SOr Gerlitz {
6975c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
698106be53bSOr Gerlitz 	u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
6995c65c564SOr Gerlitz 
7005c65c564SOr Gerlitz 	hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
701106be53bSOr Gerlitz 			       hairpin_hlist, hash_key) {
702e4f9abbdSVlad Buslov 		if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
703e4f9abbdSVlad Buslov 			refcount_inc(&hpe->refcnt);
7045c65c564SOr Gerlitz 			return hpe;
7055c65c564SOr Gerlitz 		}
706e4f9abbdSVlad Buslov 	}
7075c65c564SOr Gerlitz 
7085c65c564SOr Gerlitz 	return NULL;
7095c65c564SOr Gerlitz }
7105c65c564SOr Gerlitz 
711e4f9abbdSVlad Buslov static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
712e4f9abbdSVlad Buslov 			      struct mlx5e_hairpin_entry *hpe)
713e4f9abbdSVlad Buslov {
714e4f9abbdSVlad Buslov 	/* no more hairpin flows for us, release the hairpin pair */
715b32accdaSVlad Buslov 	if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
716e4f9abbdSVlad Buslov 		return;
717b32accdaSVlad Buslov 	hash_del(&hpe->hairpin_hlist);
718b32accdaSVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
719e4f9abbdSVlad Buslov 
720db76ca24SVlad Buslov 	if (!IS_ERR_OR_NULL(hpe->hp)) {
721e4f9abbdSVlad Buslov 		netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
722e4f9abbdSVlad Buslov 			   dev_name(hpe->hp->pair->peer_mdev->device));
723e4f9abbdSVlad Buslov 
724e4f9abbdSVlad Buslov 		mlx5e_hairpin_destroy(hpe->hp);
725db76ca24SVlad Buslov 	}
726db76ca24SVlad Buslov 
727db76ca24SVlad Buslov 	WARN_ON(!list_empty(&hpe->flows));
728e4f9abbdSVlad Buslov 	kfree(hpe);
729e4f9abbdSVlad Buslov }
730e4f9abbdSVlad Buslov 
731106be53bSOr Gerlitz #define UNKNOWN_MATCH_PRIO 8
732106be53bSOr Gerlitz 
733106be53bSOr Gerlitz static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
734e98bedf5SEli Britstein 				  struct mlx5_flow_spec *spec, u8 *match_prio,
735e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
736106be53bSOr Gerlitz {
737106be53bSOr Gerlitz 	void *headers_c, *headers_v;
738106be53bSOr Gerlitz 	u8 prio_val, prio_mask = 0;
739106be53bSOr Gerlitz 	bool vlan_present;
740106be53bSOr Gerlitz 
741106be53bSOr Gerlitz #ifdef CONFIG_MLX5_CORE_EN_DCB
742106be53bSOr Gerlitz 	if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
743e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
744e98bedf5SEli Britstein 				   "only PCP trust state supported for hairpin");
745106be53bSOr Gerlitz 		return -EOPNOTSUPP;
746106be53bSOr Gerlitz 	}
747106be53bSOr Gerlitz #endif
748106be53bSOr Gerlitz 	headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
749106be53bSOr Gerlitz 	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
750106be53bSOr Gerlitz 
751106be53bSOr Gerlitz 	vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
752106be53bSOr Gerlitz 	if (vlan_present) {
753106be53bSOr Gerlitz 		prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
754106be53bSOr Gerlitz 		prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
755106be53bSOr Gerlitz 	}
756106be53bSOr Gerlitz 
757106be53bSOr Gerlitz 	if (!vlan_present || !prio_mask) {
758106be53bSOr Gerlitz 		prio_val = UNKNOWN_MATCH_PRIO;
759106be53bSOr Gerlitz 	} else if (prio_mask != 0x7) {
760e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
761e98bedf5SEli Britstein 				   "masked priority match not supported for hairpin");
762106be53bSOr Gerlitz 		return -EOPNOTSUPP;
763106be53bSOr Gerlitz 	}
764106be53bSOr Gerlitz 
765106be53bSOr Gerlitz 	*match_prio = prio_val;
766106be53bSOr Gerlitz 	return 0;
767106be53bSOr Gerlitz }
768106be53bSOr Gerlitz 
7695c65c564SOr Gerlitz static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
7705c65c564SOr Gerlitz 				  struct mlx5e_tc_flow *flow,
771e98bedf5SEli Britstein 				  struct mlx5e_tc_flow_parse_attr *parse_attr,
772e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
7735c65c564SOr Gerlitz {
77498b66cb1SEli Britstein 	int peer_ifindex = parse_attr->mirred_ifindex[0];
7755c65c564SOr Gerlitz 	struct mlx5_hairpin_params params;
776d8822868SOr Gerlitz 	struct mlx5_core_dev *peer_mdev;
7775c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
7785c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
7793f6d08d1SOr Gerlitz 	u64 link_speed64;
7803f6d08d1SOr Gerlitz 	u32 link_speed;
781106be53bSOr Gerlitz 	u8 match_prio;
782d8822868SOr Gerlitz 	u16 peer_id;
7835c65c564SOr Gerlitz 	int err;
7845c65c564SOr Gerlitz 
785d8822868SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
786d8822868SOr Gerlitz 	if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
787e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
7885c65c564SOr Gerlitz 		return -EOPNOTSUPP;
7895c65c564SOr Gerlitz 	}
7905c65c564SOr Gerlitz 
791d8822868SOr Gerlitz 	peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
792e98bedf5SEli Britstein 	err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
793e98bedf5SEli Britstein 				     extack);
794106be53bSOr Gerlitz 	if (err)
795106be53bSOr Gerlitz 		return err;
796b32accdaSVlad Buslov 
797b32accdaSVlad Buslov 	mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
798106be53bSOr Gerlitz 	hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
799db76ca24SVlad Buslov 	if (hpe) {
800db76ca24SVlad Buslov 		mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
801db76ca24SVlad Buslov 		wait_for_completion(&hpe->res_ready);
802db76ca24SVlad Buslov 
803db76ca24SVlad Buslov 		if (IS_ERR(hpe->hp)) {
804db76ca24SVlad Buslov 			err = -EREMOTEIO;
805db76ca24SVlad Buslov 			goto out_err;
806db76ca24SVlad Buslov 		}
8075c65c564SOr Gerlitz 		goto attach_flow;
808db76ca24SVlad Buslov 	}
8095c65c564SOr Gerlitz 
8105c65c564SOr Gerlitz 	hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
811b32accdaSVlad Buslov 	if (!hpe) {
812db76ca24SVlad Buslov 		mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
813db76ca24SVlad Buslov 		return -ENOMEM;
814b32accdaSVlad Buslov 	}
8155c65c564SOr Gerlitz 
81673edca73SVlad Buslov 	spin_lock_init(&hpe->flows_lock);
8175c65c564SOr Gerlitz 	INIT_LIST_HEAD(&hpe->flows);
818db76ca24SVlad Buslov 	INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
819d8822868SOr Gerlitz 	hpe->peer_vhca_id = peer_id;
820106be53bSOr Gerlitz 	hpe->prio = match_prio;
821e4f9abbdSVlad Buslov 	refcount_set(&hpe->refcnt, 1);
822db76ca24SVlad Buslov 	init_completion(&hpe->res_ready);
823db76ca24SVlad Buslov 
824db76ca24SVlad Buslov 	hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
825db76ca24SVlad Buslov 		 hash_hairpin_info(peer_id, match_prio));
826db76ca24SVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
8275c65c564SOr Gerlitz 
8285c65c564SOr Gerlitz 	params.log_data_size = 15;
8295c65c564SOr Gerlitz 	params.log_data_size = min_t(u8, params.log_data_size,
8305c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
8315c65c564SOr Gerlitz 	params.log_data_size = max_t(u8, params.log_data_size,
8325c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
8335c65c564SOr Gerlitz 
834eb9180f7SOr Gerlitz 	params.log_num_packets = params.log_data_size -
835eb9180f7SOr Gerlitz 				 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
836eb9180f7SOr Gerlitz 	params.log_num_packets = min_t(u8, params.log_num_packets,
837eb9180f7SOr Gerlitz 				       MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
838eb9180f7SOr Gerlitz 
839eb9180f7SOr Gerlitz 	params.q_counter = priv->q_counter;
8403f6d08d1SOr Gerlitz 	/* set hairpin pair per each 50Gbs share of the link */
8412c81bfd5SHuy Nguyen 	mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
8423f6d08d1SOr Gerlitz 	link_speed = max_t(u32, link_speed, 50000);
8433f6d08d1SOr Gerlitz 	link_speed64 = link_speed;
8443f6d08d1SOr Gerlitz 	do_div(link_speed64, 50000);
8453f6d08d1SOr Gerlitz 	params.num_channels = link_speed64;
8463f6d08d1SOr Gerlitz 
8475c65c564SOr Gerlitz 	hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
848db76ca24SVlad Buslov 	hpe->hp = hp;
849db76ca24SVlad Buslov 	complete_all(&hpe->res_ready);
8505c65c564SOr Gerlitz 	if (IS_ERR(hp)) {
8515c65c564SOr Gerlitz 		err = PTR_ERR(hp);
852db76ca24SVlad Buslov 		goto out_err;
8535c65c564SOr Gerlitz 	}
8545c65c564SOr Gerlitz 
855eb9180f7SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
85627b942fbSParav Pandit 		   hp->tirn, hp->pair->rqn[0],
85727b942fbSParav Pandit 		   dev_name(hp->pair->peer_mdev->device),
858eb9180f7SOr Gerlitz 		   hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
8595c65c564SOr Gerlitz 
8605c65c564SOr Gerlitz attach_flow:
8613f6d08d1SOr Gerlitz 	if (hpe->hp->num_channels > 1) {
862226f2ca3SVlad Buslov 		flow_flag_set(flow, HAIRPIN_RSS);
8633f6d08d1SOr Gerlitz 		flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
8643f6d08d1SOr Gerlitz 	} else {
8655c65c564SOr Gerlitz 		flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
8663f6d08d1SOr Gerlitz 	}
867b32accdaSVlad Buslov 
868e4f9abbdSVlad Buslov 	flow->hpe = hpe;
86973edca73SVlad Buslov 	spin_lock(&hpe->flows_lock);
8705c65c564SOr Gerlitz 	list_add(&flow->hairpin, &hpe->flows);
87173edca73SVlad Buslov 	spin_unlock(&hpe->flows_lock);
8723f6d08d1SOr Gerlitz 
8735c65c564SOr Gerlitz 	return 0;
8745c65c564SOr Gerlitz 
875db76ca24SVlad Buslov out_err:
876db76ca24SVlad Buslov 	mlx5e_hairpin_put(priv, hpe);
8775c65c564SOr Gerlitz 	return err;
8785c65c564SOr Gerlitz }
8795c65c564SOr Gerlitz 
8805c65c564SOr Gerlitz static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
8815c65c564SOr Gerlitz 				   struct mlx5e_tc_flow *flow)
8825c65c564SOr Gerlitz {
8835a7e5bcbSVlad Buslov 	/* flow wasn't fully initialized */
884e4f9abbdSVlad Buslov 	if (!flow->hpe)
8855a7e5bcbSVlad Buslov 		return;
8865a7e5bcbSVlad Buslov 
88773edca73SVlad Buslov 	spin_lock(&flow->hpe->flows_lock);
8885c65c564SOr Gerlitz 	list_del(&flow->hairpin);
88973edca73SVlad Buslov 	spin_unlock(&flow->hpe->flows_lock);
89073edca73SVlad Buslov 
891e4f9abbdSVlad Buslov 	mlx5e_hairpin_put(priv, flow->hpe);
892e4f9abbdSVlad Buslov 	flow->hpe = NULL;
8935c65c564SOr Gerlitz }
8945c65c564SOr Gerlitz 
895c83954abSRabie Loulou static int
89674491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
89717091853SOr Gerlitz 		      struct mlx5e_tc_flow_parse_attr *parse_attr,
898e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
899e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
900e8f887acSAmir Vadai {
901bb0ee7dcSJianbo Liu 	struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
902aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
903aad7e08dSAmir Vadai 	struct mlx5_core_dev *dev = priv->mdev;
9045c65c564SOr Gerlitz 	struct mlx5_flow_destination dest[2] = {};
90566958ed9SHadar Hen Zion 	struct mlx5_flow_act flow_act = {
9063bc4b7bfSOr Gerlitz 		.action = attr->action,
90760786f09SMark Bloch 		.reformat_id = 0,
908bb0ee7dcSJianbo Liu 		.flags    = FLOW_ACT_NO_APPEND,
90966958ed9SHadar Hen Zion 	};
910aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
9115c65c564SOr Gerlitz 	int err, dest_ix = 0;
912e8f887acSAmir Vadai 
913bb0ee7dcSJianbo Liu 	flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
914bb0ee7dcSJianbo Liu 	flow_context->flow_tag = attr->flow_tag;
915bb0ee7dcSJianbo Liu 
916226f2ca3SVlad Buslov 	if (flow_flag_test(flow, HAIRPIN)) {
917e98bedf5SEli Britstein 		err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
9185a7e5bcbSVlad Buslov 		if (err)
9195a7e5bcbSVlad Buslov 			return err;
9205a7e5bcbSVlad Buslov 
921226f2ca3SVlad Buslov 		if (flow_flag_test(flow, HAIRPIN_RSS)) {
9223f6d08d1SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
9233f6d08d1SOr Gerlitz 			dest[dest_ix].ft = attr->hairpin_ft;
9243f6d08d1SOr Gerlitz 		} else {
9255c65c564SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
9265c65c564SOr Gerlitz 			dest[dest_ix].tir_num = attr->hairpin_tirn;
9273f6d08d1SOr Gerlitz 		}
9283f6d08d1SOr Gerlitz 		dest_ix++;
9293f6d08d1SOr Gerlitz 	} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
9305c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
9315c65c564SOr Gerlitz 		dest[dest_ix].ft = priv->fs.vlan.ft.t;
9325c65c564SOr Gerlitz 		dest_ix++;
9335c65c564SOr Gerlitz 	}
934aad7e08dSAmir Vadai 
9355c65c564SOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
9365c65c564SOr Gerlitz 		counter = mlx5_fc_create(dev, true);
9375a7e5bcbSVlad Buslov 		if (IS_ERR(counter))
9385a7e5bcbSVlad Buslov 			return PTR_ERR(counter);
9395a7e5bcbSVlad Buslov 
9405c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
941171c7625SMark Bloch 		dest[dest_ix].counter_id = mlx5_fc_id(counter);
9425c65c564SOr Gerlitz 		dest_ix++;
943b8aee822SMark Bloch 		attr->counter = counter;
944aad7e08dSAmir Vadai 	}
945aad7e08dSAmir Vadai 
9462f4fe4caSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
9473099eb5aSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
948d7e75a32SOr Gerlitz 		flow_act.modify_id = attr->mod_hdr_id;
9492f4fe4caSOr Gerlitz 		kfree(parse_attr->mod_hdr_actions);
950c83954abSRabie Loulou 		if (err)
9515a7e5bcbSVlad Buslov 			return err;
9522f4fe4caSOr Gerlitz 	}
9532f4fe4caSOr Gerlitz 
954b6fac0b4SVlad Buslov 	mutex_lock(&priv->fs.tc.t_lock);
955acff797cSMaor Gottlieb 	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
95621b9c144SOr Gerlitz 		int tc_grp_size, tc_tbl_size;
95721b9c144SOr Gerlitz 		u32 max_flow_counter;
95821b9c144SOr Gerlitz 
95921b9c144SOr Gerlitz 		max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
96021b9c144SOr Gerlitz 				    MLX5_CAP_GEN(dev, max_flow_counter_15_0);
96121b9c144SOr Gerlitz 
96221b9c144SOr Gerlitz 		tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
96321b9c144SOr Gerlitz 
96421b9c144SOr Gerlitz 		tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
96521b9c144SOr Gerlitz 				    BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
96621b9c144SOr Gerlitz 
967acff797cSMaor Gottlieb 		priv->fs.tc.t =
968acff797cSMaor Gottlieb 			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
969acff797cSMaor Gottlieb 							    MLX5E_TC_PRIO,
97021b9c144SOr Gerlitz 							    tc_tbl_size,
971acff797cSMaor Gottlieb 							    MLX5E_TC_TABLE_NUM_GROUPS,
9723f6d08d1SOr Gerlitz 							    MLX5E_TC_FT_LEVEL, 0);
973acff797cSMaor Gottlieb 		if (IS_ERR(priv->fs.tc.t)) {
974b6fac0b4SVlad Buslov 			mutex_unlock(&priv->fs.tc.t_lock);
975e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
976e98bedf5SEli Britstein 					   "Failed to create tc offload table\n");
977e8f887acSAmir Vadai 			netdev_err(priv->netdev,
978e8f887acSAmir Vadai 				   "Failed to create tc offload table\n");
9795a7e5bcbSVlad Buslov 			return PTR_ERR(priv->fs.tc.t);
980e8f887acSAmir Vadai 		}
981e8f887acSAmir Vadai 	}
982e8f887acSAmir Vadai 
98338aa51c1SOr Gerlitz 	if (attr->match_level != MLX5_MATCH_NONE)
984d4a18e16SYevgeny Kliteynik 		parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
98538aa51c1SOr Gerlitz 
986c83954abSRabie Loulou 	flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
9875c65c564SOr Gerlitz 					    &flow_act, dest, dest_ix);
988b6fac0b4SVlad Buslov 	mutex_unlock(&priv->fs.tc.t_lock);
989e8f887acSAmir Vadai 
9905a7e5bcbSVlad Buslov 	if (IS_ERR(flow->rule[0]))
9915a7e5bcbSVlad Buslov 		return PTR_ERR(flow->rule[0]);
992aad7e08dSAmir Vadai 
993c83954abSRabie Loulou 	return 0;
994e8f887acSAmir Vadai }
995e8f887acSAmir Vadai 
996d85cdccbSOr Gerlitz static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
997d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
998d85cdccbSOr Gerlitz {
999513f8f7fSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1000d85cdccbSOr Gerlitz 	struct mlx5_fc *counter = NULL;
1001d85cdccbSOr Gerlitz 
1002b8aee822SMark Bloch 	counter = attr->counter;
10035a7e5bcbSVlad Buslov 	if (!IS_ERR_OR_NULL(flow->rule[0]))
1004e4ad91f2SChris Mi 		mlx5_del_flow_rules(flow->rule[0]);
1005d85cdccbSOr Gerlitz 	mlx5_fc_destroy(priv->mdev, counter);
1006d85cdccbSOr Gerlitz 
1007b6fac0b4SVlad Buslov 	mutex_lock(&priv->fs.tc.t_lock);
1008226f2ca3SVlad Buslov 	if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
1009d85cdccbSOr Gerlitz 		mlx5_destroy_flow_table(priv->fs.tc.t);
1010d85cdccbSOr Gerlitz 		priv->fs.tc.t = NULL;
1011d85cdccbSOr Gerlitz 	}
1012b6fac0b4SVlad Buslov 	mutex_unlock(&priv->fs.tc.t_lock);
10132f4fe4caSOr Gerlitz 
1014513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
10153099eb5aSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
10165c65c564SOr Gerlitz 
1017226f2ca3SVlad Buslov 	if (flow_flag_test(flow, HAIRPIN))
10185c65c564SOr Gerlitz 		mlx5e_hairpin_flow_del(priv, flow);
1019d85cdccbSOr Gerlitz }
1020d85cdccbSOr Gerlitz 
1021aa0cbbaeSOr Gerlitz static void mlx5e_detach_encap(struct mlx5e_priv *priv,
10228c4dc42bSEli Britstein 			       struct mlx5e_tc_flow *flow, int out_index);
1023aa0cbbaeSOr Gerlitz 
10243c37745eSOr Gerlitz static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1025e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
1026733d4f36SRoi Dayan 			      struct net_device *mirred_dev,
1027733d4f36SRoi Dayan 			      int out_index,
10288c4dc42bSEli Britstein 			      struct netlink_ext_ack *extack,
10290ad060eeSRoi Dayan 			      struct net_device **encap_dev,
10300ad060eeSRoi Dayan 			      bool *encap_valid);
10313c37745eSOr Gerlitz 
10326d2a3ed0SOr Gerlitz static struct mlx5_flow_handle *
10336d2a3ed0SOr Gerlitz mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
10346d2a3ed0SOr Gerlitz 			   struct mlx5e_tc_flow *flow,
10356d2a3ed0SOr Gerlitz 			   struct mlx5_flow_spec *spec,
10366d2a3ed0SOr Gerlitz 			   struct mlx5_esw_flow_attr *attr)
10376d2a3ed0SOr Gerlitz {
10386d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
10396d2a3ed0SOr Gerlitz 
10406d2a3ed0SOr Gerlitz 	rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
10416d2a3ed0SOr Gerlitz 	if (IS_ERR(rule))
10426d2a3ed0SOr Gerlitz 		return rule;
10436d2a3ed0SOr Gerlitz 
1044e85e02baSEli Britstein 	if (attr->split_count) {
10456d2a3ed0SOr Gerlitz 		flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
10466d2a3ed0SOr Gerlitz 		if (IS_ERR(flow->rule[1])) {
10476d2a3ed0SOr Gerlitz 			mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
10486d2a3ed0SOr Gerlitz 			return flow->rule[1];
10496d2a3ed0SOr Gerlitz 		}
10506d2a3ed0SOr Gerlitz 	}
10516d2a3ed0SOr Gerlitz 
10526d2a3ed0SOr Gerlitz 	return rule;
10536d2a3ed0SOr Gerlitz }
10546d2a3ed0SOr Gerlitz 
10556d2a3ed0SOr Gerlitz static void
10566d2a3ed0SOr Gerlitz mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
10576d2a3ed0SOr Gerlitz 			     struct mlx5e_tc_flow *flow,
10586d2a3ed0SOr Gerlitz 			   struct mlx5_esw_flow_attr *attr)
10596d2a3ed0SOr Gerlitz {
1060226f2ca3SVlad Buslov 	flow_flag_clear(flow, OFFLOADED);
10616d2a3ed0SOr Gerlitz 
1062e85e02baSEli Britstein 	if (attr->split_count)
10636d2a3ed0SOr Gerlitz 		mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
10646d2a3ed0SOr Gerlitz 
10656d2a3ed0SOr Gerlitz 	mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
10666d2a3ed0SOr Gerlitz }
10676d2a3ed0SOr Gerlitz 
10685dbe906fSPaul Blakey static struct mlx5_flow_handle *
10695dbe906fSPaul Blakey mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
10705dbe906fSPaul Blakey 			      struct mlx5e_tc_flow *flow,
10715dbe906fSPaul Blakey 			      struct mlx5_flow_spec *spec,
10725dbe906fSPaul Blakey 			      struct mlx5_esw_flow_attr *slow_attr)
10735dbe906fSPaul Blakey {
10745dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
10755dbe906fSPaul Blakey 
10765dbe906fSPaul Blakey 	memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
1077154e62abSOr Gerlitz 	slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
10782be09de7SDavid S. Miller 	slow_attr->split_count = 0;
1079154e62abSOr Gerlitz 	slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
10805dbe906fSPaul Blakey 
10815dbe906fSPaul Blakey 	rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
10825dbe906fSPaul Blakey 	if (!IS_ERR(rule))
1083226f2ca3SVlad Buslov 		flow_flag_set(flow, SLOW);
10845dbe906fSPaul Blakey 
10855dbe906fSPaul Blakey 	return rule;
10865dbe906fSPaul Blakey }
10875dbe906fSPaul Blakey 
10885dbe906fSPaul Blakey static void
10895dbe906fSPaul Blakey mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
10905dbe906fSPaul Blakey 				  struct mlx5e_tc_flow *flow,
10915dbe906fSPaul Blakey 				  struct mlx5_esw_flow_attr *slow_attr)
10925dbe906fSPaul Blakey {
10935dbe906fSPaul Blakey 	memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
1094154e62abSOr Gerlitz 	slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
10952be09de7SDavid S. Miller 	slow_attr->split_count = 0;
1096154e62abSOr Gerlitz 	slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
10975dbe906fSPaul Blakey 	mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1098226f2ca3SVlad Buslov 	flow_flag_clear(flow, SLOW);
10995dbe906fSPaul Blakey }
11005dbe906fSPaul Blakey 
1101ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1102ad86755bSVlad Buslov  * function.
1103ad86755bSVlad Buslov  */
1104ad86755bSVlad Buslov static void unready_flow_add(struct mlx5e_tc_flow *flow,
1105ad86755bSVlad Buslov 			     struct list_head *unready_flows)
1106ad86755bSVlad Buslov {
1107ad86755bSVlad Buslov 	flow_flag_set(flow, NOT_READY);
1108ad86755bSVlad Buslov 	list_add_tail(&flow->unready, unready_flows);
1109ad86755bSVlad Buslov }
1110ad86755bSVlad Buslov 
1111ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1112ad86755bSVlad Buslov  * function.
1113ad86755bSVlad Buslov  */
1114ad86755bSVlad Buslov static void unready_flow_del(struct mlx5e_tc_flow *flow)
1115ad86755bSVlad Buslov {
1116ad86755bSVlad Buslov 	list_del(&flow->unready);
1117ad86755bSVlad Buslov 	flow_flag_clear(flow, NOT_READY);
1118ad86755bSVlad Buslov }
1119ad86755bSVlad Buslov 
1120b4a23329SRoi Dayan static void add_unready_flow(struct mlx5e_tc_flow *flow)
1121b4a23329SRoi Dayan {
1122b4a23329SRoi Dayan 	struct mlx5_rep_uplink_priv *uplink_priv;
1123b4a23329SRoi Dayan 	struct mlx5e_rep_priv *rpriv;
1124b4a23329SRoi Dayan 	struct mlx5_eswitch *esw;
1125b4a23329SRoi Dayan 
1126b4a23329SRoi Dayan 	esw = flow->priv->mdev->priv.eswitch;
1127b4a23329SRoi Dayan 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1128b4a23329SRoi Dayan 	uplink_priv = &rpriv->uplink_priv;
1129b4a23329SRoi Dayan 
1130ad86755bSVlad Buslov 	mutex_lock(&uplink_priv->unready_flows_lock);
1131ad86755bSVlad Buslov 	unready_flow_add(flow, &uplink_priv->unready_flows);
1132ad86755bSVlad Buslov 	mutex_unlock(&uplink_priv->unready_flows_lock);
1133b4a23329SRoi Dayan }
1134b4a23329SRoi Dayan 
1135b4a23329SRoi Dayan static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1136b4a23329SRoi Dayan {
1137ad86755bSVlad Buslov 	struct mlx5_rep_uplink_priv *uplink_priv;
1138ad86755bSVlad Buslov 	struct mlx5e_rep_priv *rpriv;
1139ad86755bSVlad Buslov 	struct mlx5_eswitch *esw;
1140ad86755bSVlad Buslov 
1141ad86755bSVlad Buslov 	esw = flow->priv->mdev->priv.eswitch;
1142ad86755bSVlad Buslov 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1143ad86755bSVlad Buslov 	uplink_priv = &rpriv->uplink_priv;
1144ad86755bSVlad Buslov 
1145ad86755bSVlad Buslov 	mutex_lock(&uplink_priv->unready_flows_lock);
1146ad86755bSVlad Buslov 	unready_flow_del(flow);
1147ad86755bSVlad Buslov 	mutex_unlock(&uplink_priv->unready_flows_lock);
1148b4a23329SRoi Dayan }
1149b4a23329SRoi Dayan 
1150c83954abSRabie Loulou static int
115174491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1152e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
1153e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
1154adb4c123SOr Gerlitz {
1155adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1156bf07aa73SPaul Blakey 	u32 max_chain = mlx5_eswitch_get_chain_range(esw);
1157aa0cbbaeSOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
11587040632dSTonghao Zhang 	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
1159bf07aa73SPaul Blakey 	u16 max_prio = mlx5_eswitch_get_prio_range(esw);
11603c37745eSOr Gerlitz 	struct net_device *out_dev, *encap_dev = NULL;
1161b8aee822SMark Bloch 	struct mlx5_fc *counter = NULL;
11623c37745eSOr Gerlitz 	struct mlx5e_rep_priv *rpriv;
11633c37745eSOr Gerlitz 	struct mlx5e_priv *out_priv;
11640ad060eeSRoi Dayan 	bool encap_valid = true;
11650ad060eeSRoi Dayan 	int err = 0;
1166f493f155SEli Britstein 	int out_index;
11678b32580dSOr Gerlitz 
1168d14f6f2aSOr Gerlitz 	if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
1169d14f6f2aSOr Gerlitz 		NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
1170d14f6f2aSOr Gerlitz 		return -EOPNOTSUPP;
1171d14f6f2aSOr Gerlitz 	}
1172e52c2802SPaul Blakey 
1173bf07aa73SPaul Blakey 	if (attr->chain > max_chain) {
1174bf07aa73SPaul Blakey 		NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
11755a7e5bcbSVlad Buslov 		return -EOPNOTSUPP;
1176bf07aa73SPaul Blakey 	}
1177bf07aa73SPaul Blakey 
1178bf07aa73SPaul Blakey 	if (attr->prio > max_prio) {
1179bf07aa73SPaul Blakey 		NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
11805a7e5bcbSVlad Buslov 		return -EOPNOTSUPP;
1181bf07aa73SPaul Blakey 	}
1182bf07aa73SPaul Blakey 
1183f493f155SEli Britstein 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
11848c4dc42bSEli Britstein 		int mirred_ifindex;
11858c4dc42bSEli Britstein 
1186f493f155SEli Britstein 		if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1187f493f155SEli Britstein 			continue;
1188f493f155SEli Britstein 
11897040632dSTonghao Zhang 		mirred_ifindex = parse_attr->mirred_ifindex[out_index];
11903c37745eSOr Gerlitz 		out_dev = __dev_get_by_index(dev_net(priv->netdev),
11918c4dc42bSEli Britstein 					     mirred_ifindex);
1192733d4f36SRoi Dayan 		err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
11930ad060eeSRoi Dayan 					 extack, &encap_dev, &encap_valid);
11940ad060eeSRoi Dayan 		if (err)
11955a7e5bcbSVlad Buslov 			return err;
11960ad060eeSRoi Dayan 
11973c37745eSOr Gerlitz 		out_priv = netdev_priv(encap_dev);
11983c37745eSOr Gerlitz 		rpriv = out_priv->ppriv;
11991cc26d74SEli Britstein 		attr->dests[out_index].rep = rpriv->rep;
12001cc26d74SEli Britstein 		attr->dests[out_index].mdev = out_priv->mdev;
12013c37745eSOr Gerlitz 	}
12023c37745eSOr Gerlitz 
12038b32580dSOr Gerlitz 	err = mlx5_eswitch_add_vlan_action(esw, attr);
1204c83954abSRabie Loulou 	if (err)
12055a7e5bcbSVlad Buslov 		return err;
1206adb4c123SOr Gerlitz 
1207d7e75a32SOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
12081a9527bbSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1209d7e75a32SOr Gerlitz 		kfree(parse_attr->mod_hdr_actions);
1210c83954abSRabie Loulou 		if (err)
12115a7e5bcbSVlad Buslov 			return err;
1212d7e75a32SOr Gerlitz 	}
1213d7e75a32SOr Gerlitz 
1214b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1215f9392795SShahar Klein 		counter = mlx5_fc_create(attr->counter_dev, true);
12165a7e5bcbSVlad Buslov 		if (IS_ERR(counter))
12175a7e5bcbSVlad Buslov 			return PTR_ERR(counter);
1218b8aee822SMark Bloch 
1219b8aee822SMark Bloch 		attr->counter = counter;
1220b8aee822SMark Bloch 	}
1221b8aee822SMark Bloch 
12220ad060eeSRoi Dayan 	/* we get here if one of the following takes place:
12230ad060eeSRoi Dayan 	 * (1) there's no error
12240ad060eeSRoi Dayan 	 * (2) there's an encap action and we don't have valid neigh
12253c37745eSOr Gerlitz 	 */
12260ad060eeSRoi Dayan 	if (!encap_valid) {
12275dbe906fSPaul Blakey 		/* continue with goto slow path rule instead */
12285dbe906fSPaul Blakey 		struct mlx5_esw_flow_attr slow_attr;
12295dbe906fSPaul Blakey 
12305dbe906fSPaul Blakey 		flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
12315dbe906fSPaul Blakey 	} else {
12326d2a3ed0SOr Gerlitz 		flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
12335dbe906fSPaul Blakey 	}
12345dbe906fSPaul Blakey 
12355a7e5bcbSVlad Buslov 	if (IS_ERR(flow->rule[0]))
12365a7e5bcbSVlad Buslov 		return PTR_ERR(flow->rule[0]);
1237226f2ca3SVlad Buslov 	else
1238226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1239c83954abSRabie Loulou 
12405dbe906fSPaul Blakey 	return 0;
1241aa0cbbaeSOr Gerlitz }
1242d85cdccbSOr Gerlitz 
12439272e3dfSYevgeny Kliteynik static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
12449272e3dfSYevgeny Kliteynik {
12459272e3dfSYevgeny Kliteynik 	struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
12469272e3dfSYevgeny Kliteynik 	void *headers_v = MLX5_ADDR_OF(fte_match_param,
12479272e3dfSYevgeny Kliteynik 				       spec->match_value,
12489272e3dfSYevgeny Kliteynik 				       misc_parameters_3);
12499272e3dfSYevgeny Kliteynik 	u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
12509272e3dfSYevgeny Kliteynik 					     headers_v,
12519272e3dfSYevgeny Kliteynik 					     geneve_tlv_option_0_data);
12529272e3dfSYevgeny Kliteynik 
12539272e3dfSYevgeny Kliteynik 	return !!geneve_tlv_opt_0_data;
12549272e3dfSYevgeny Kliteynik }
12559272e3dfSYevgeny Kliteynik 
1256d85cdccbSOr Gerlitz static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1257d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
1258d85cdccbSOr Gerlitz {
1259d85cdccbSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1260d7e75a32SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
12615dbe906fSPaul Blakey 	struct mlx5_esw_flow_attr slow_attr;
1262f493f155SEli Britstein 	int out_index;
1263d85cdccbSOr Gerlitz 
1264226f2ca3SVlad Buslov 	if (flow_flag_test(flow, NOT_READY)) {
1265b4a23329SRoi Dayan 		remove_unready_flow(flow);
1266ef06c9eeSRoi Dayan 		kvfree(attr->parse_attr);
1267ef06c9eeSRoi Dayan 		return;
1268ef06c9eeSRoi Dayan 	}
1269ef06c9eeSRoi Dayan 
1270226f2ca3SVlad Buslov 	if (mlx5e_is_offloaded_flow(flow)) {
1271226f2ca3SVlad Buslov 		if (flow_flag_test(flow, SLOW))
12725dbe906fSPaul Blakey 			mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
12735dbe906fSPaul Blakey 		else
12745dbe906fSPaul Blakey 			mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
12755dbe906fSPaul Blakey 	}
1276d85cdccbSOr Gerlitz 
12779272e3dfSYevgeny Kliteynik 	if (mlx5_flow_has_geneve_opt(flow))
12789272e3dfSYevgeny Kliteynik 		mlx5_geneve_tlv_option_del(priv->mdev->geneve);
12799272e3dfSYevgeny Kliteynik 
1280513f8f7fSOr Gerlitz 	mlx5_eswitch_del_vlan_action(esw, attr);
1281d85cdccbSOr Gerlitz 
1282f493f155SEli Britstein 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
12838c4dc42bSEli Britstein 		if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
12848c4dc42bSEli Britstein 			mlx5e_detach_encap(priv, flow, out_index);
1285f493f155SEli Britstein 	kvfree(attr->parse_attr);
1286d7e75a32SOr Gerlitz 
1287513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
12881a9527bbSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
1289b8aee822SMark Bloch 
1290b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1291f9392795SShahar Klein 		mlx5_fc_destroy(attr->counter_dev, attr->counter);
1292d85cdccbSOr Gerlitz }
1293d85cdccbSOr Gerlitz 
1294232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
12952a1f1768SVlad Buslov 			      struct mlx5e_encap_entry *e,
12962a1f1768SVlad Buslov 			      struct list_head *flow_list)
1297232c0013SHadar Hen Zion {
12983c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
12995dbe906fSPaul Blakey 	struct mlx5_esw_flow_attr slow_attr, *esw_attr;
13006d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
13016d2a3ed0SOr Gerlitz 	struct mlx5_flow_spec *spec;
1302232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1303232c0013SHadar Hen Zion 	int err;
1304232c0013SHadar Hen Zion 
130554c177caSOz Shlomo 	err = mlx5_packet_reformat_alloc(priv->mdev,
130654c177caSOz Shlomo 					 e->reformat_type,
1307232c0013SHadar Hen Zion 					 e->encap_size, e->encap_header,
130831ca3648SMark Bloch 					 MLX5_FLOW_NAMESPACE_FDB,
1309232c0013SHadar Hen Zion 					 &e->encap_id);
1310232c0013SHadar Hen Zion 	if (err) {
1311232c0013SHadar Hen Zion 		mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
1312232c0013SHadar Hen Zion 			       err);
1313232c0013SHadar Hen Zion 		return;
1314232c0013SHadar Hen Zion 	}
1315232c0013SHadar Hen Zion 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
1316f6dfb4c3SHadar Hen Zion 	mlx5e_rep_queue_neigh_stats_work(priv);
1317232c0013SHadar Hen Zion 
13182a1f1768SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
13198c4dc42bSEli Britstein 		bool all_flow_encaps_valid = true;
13208c4dc42bSEli Britstein 		int i;
13218c4dc42bSEli Britstein 
13223c37745eSOr Gerlitz 		esw_attr = flow->esw_attr;
13236d2a3ed0SOr Gerlitz 		spec = &esw_attr->parse_attr->spec;
13246d2a3ed0SOr Gerlitz 
13252a1f1768SVlad Buslov 		esw_attr->dests[flow->tmp_efi_index].encap_id = e->encap_id;
13262a1f1768SVlad Buslov 		esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
13278c4dc42bSEli Britstein 		/* Flow can be associated with multiple encap entries.
13288c4dc42bSEli Britstein 		 * Before offloading the flow verify that all of them have
13298c4dc42bSEli Britstein 		 * a valid neighbour.
13308c4dc42bSEli Britstein 		 */
13318c4dc42bSEli Britstein 		for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
13328c4dc42bSEli Britstein 			if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
13338c4dc42bSEli Britstein 				continue;
13348c4dc42bSEli Britstein 			if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
13358c4dc42bSEli Britstein 				all_flow_encaps_valid = false;
13368c4dc42bSEli Britstein 				break;
13378c4dc42bSEli Britstein 			}
13388c4dc42bSEli Britstein 		}
13398c4dc42bSEli Britstein 		/* Do not offload flows with unresolved neighbors */
13408c4dc42bSEli Britstein 		if (!all_flow_encaps_valid)
13412a1f1768SVlad Buslov 			continue;
13425dbe906fSPaul Blakey 		/* update from slow path rule to encap rule */
13436d2a3ed0SOr Gerlitz 		rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
13446d2a3ed0SOr Gerlitz 		if (IS_ERR(rule)) {
13456d2a3ed0SOr Gerlitz 			err = PTR_ERR(rule);
1346232c0013SHadar Hen Zion 			mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1347232c0013SHadar Hen Zion 				       err);
13482a1f1768SVlad Buslov 			continue;
1349232c0013SHadar Hen Zion 		}
13505dbe906fSPaul Blakey 
13515dbe906fSPaul Blakey 		mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
13526d2a3ed0SOr Gerlitz 		flow->rule[0] = rule;
1353226f2ca3SVlad Buslov 		/* was unset when slow path rule removed */
1354226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1355232c0013SHadar Hen Zion 	}
1356232c0013SHadar Hen Zion }
1357232c0013SHadar Hen Zion 
1358232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
13592a1f1768SVlad Buslov 			      struct mlx5e_encap_entry *e,
13602a1f1768SVlad Buslov 			      struct list_head *flow_list)
1361232c0013SHadar Hen Zion {
13623c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
13635dbe906fSPaul Blakey 	struct mlx5_esw_flow_attr slow_attr;
13645dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
13655dbe906fSPaul Blakey 	struct mlx5_flow_spec *spec;
1366232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
13675dbe906fSPaul Blakey 	int err;
1368232c0013SHadar Hen Zion 
13692a1f1768SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
13705dbe906fSPaul Blakey 		spec = &flow->esw_attr->parse_attr->spec;
13715dbe906fSPaul Blakey 
13725dbe906fSPaul Blakey 		/* update from encap rule to slow path rule */
13735dbe906fSPaul Blakey 		rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
13748c4dc42bSEli Britstein 		/* mark the flow's encap dest as non-valid */
13752a1f1768SVlad Buslov 		flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
13765dbe906fSPaul Blakey 
13775dbe906fSPaul Blakey 		if (IS_ERR(rule)) {
13785dbe906fSPaul Blakey 			err = PTR_ERR(rule);
13795dbe906fSPaul Blakey 			mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
13805dbe906fSPaul Blakey 				       err);
13812a1f1768SVlad Buslov 			continue;
13825dbe906fSPaul Blakey 		}
13835dbe906fSPaul Blakey 
13846d2a3ed0SOr Gerlitz 		mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
13855dbe906fSPaul Blakey 		flow->rule[0] = rule;
1386226f2ca3SVlad Buslov 		/* was unset when fast path rule removed */
1387226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1388232c0013SHadar Hen Zion 	}
1389232c0013SHadar Hen Zion 
139061c806daSOr Gerlitz 	/* we know that the encap is valid */
1391232c0013SHadar Hen Zion 	e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
139260786f09SMark Bloch 	mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1393232c0013SHadar Hen Zion }
1394232c0013SHadar Hen Zion 
1395b8aee822SMark Bloch static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1396b8aee822SMark Bloch {
1397226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
1398b8aee822SMark Bloch 		return flow->esw_attr->counter;
1399b8aee822SMark Bloch 	else
1400b8aee822SMark Bloch 		return flow->nic_attr->counter;
1401b8aee822SMark Bloch }
1402b8aee822SMark Bloch 
14032a1f1768SVlad Buslov /* Takes reference to all flows attached to encap and adds the flows to
14042a1f1768SVlad Buslov  * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
14052a1f1768SVlad Buslov  */
14062a1f1768SVlad Buslov void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
14072a1f1768SVlad Buslov {
14082a1f1768SVlad Buslov 	struct encap_flow_item *efi;
14092a1f1768SVlad Buslov 	struct mlx5e_tc_flow *flow;
14102a1f1768SVlad Buslov 
14112a1f1768SVlad Buslov 	list_for_each_entry(efi, &e->flows, list) {
14122a1f1768SVlad Buslov 		flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
14132a1f1768SVlad Buslov 		if (IS_ERR(mlx5e_flow_get(flow)))
14142a1f1768SVlad Buslov 			continue;
14152a1f1768SVlad Buslov 
14162a1f1768SVlad Buslov 		flow->tmp_efi_index = efi->index;
14172a1f1768SVlad Buslov 		list_add(&flow->tmp_list, flow_list);
14182a1f1768SVlad Buslov 	}
14192a1f1768SVlad Buslov }
14202a1f1768SVlad Buslov 
14216a06c2f7SVlad Buslov /* Iterate over tmp_list of flows attached to flow_list head. */
14222a1f1768SVlad Buslov void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
14236a06c2f7SVlad Buslov {
14246a06c2f7SVlad Buslov 	struct mlx5e_tc_flow *flow, *tmp;
14256a06c2f7SVlad Buslov 
14266a06c2f7SVlad Buslov 	list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
14276a06c2f7SVlad Buslov 		mlx5e_flow_put(priv, flow);
14286a06c2f7SVlad Buslov }
14296a06c2f7SVlad Buslov 
1430ac0d9176SVlad Buslov static struct mlx5e_encap_entry *
1431ac0d9176SVlad Buslov mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1432ac0d9176SVlad Buslov 			   struct mlx5e_encap_entry *e)
1433ac0d9176SVlad Buslov {
1434ac0d9176SVlad Buslov 	struct mlx5e_encap_entry *next = NULL;
1435ac0d9176SVlad Buslov 
1436ac0d9176SVlad Buslov retry:
1437ac0d9176SVlad Buslov 	rcu_read_lock();
1438ac0d9176SVlad Buslov 
1439ac0d9176SVlad Buslov 	/* find encap with non-zero reference counter value */
1440ac0d9176SVlad Buslov 	for (next = e ?
1441ac0d9176SVlad Buslov 		     list_next_or_null_rcu(&nhe->encap_list,
1442ac0d9176SVlad Buslov 					   &e->encap_list,
1443ac0d9176SVlad Buslov 					   struct mlx5e_encap_entry,
1444ac0d9176SVlad Buslov 					   encap_list) :
1445ac0d9176SVlad Buslov 		     list_first_or_null_rcu(&nhe->encap_list,
1446ac0d9176SVlad Buslov 					    struct mlx5e_encap_entry,
1447ac0d9176SVlad Buslov 					    encap_list);
1448ac0d9176SVlad Buslov 	     next;
1449ac0d9176SVlad Buslov 	     next = list_next_or_null_rcu(&nhe->encap_list,
1450ac0d9176SVlad Buslov 					  &next->encap_list,
1451ac0d9176SVlad Buslov 					  struct mlx5e_encap_entry,
1452ac0d9176SVlad Buslov 					  encap_list))
1453ac0d9176SVlad Buslov 		if (mlx5e_encap_take(next))
1454ac0d9176SVlad Buslov 			break;
1455ac0d9176SVlad Buslov 
1456ac0d9176SVlad Buslov 	rcu_read_unlock();
1457ac0d9176SVlad Buslov 
1458ac0d9176SVlad Buslov 	/* release starting encap */
1459ac0d9176SVlad Buslov 	if (e)
1460ac0d9176SVlad Buslov 		mlx5e_encap_put(netdev_priv(e->out_dev), e);
1461ac0d9176SVlad Buslov 	if (!next)
1462ac0d9176SVlad Buslov 		return next;
1463ac0d9176SVlad Buslov 
1464ac0d9176SVlad Buslov 	/* wait for encap to be fully initialized */
1465ac0d9176SVlad Buslov 	wait_for_completion(&next->res_ready);
1466ac0d9176SVlad Buslov 	/* continue searching if encap entry is not in valid state after completion */
1467ac0d9176SVlad Buslov 	if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1468ac0d9176SVlad Buslov 		e = next;
1469ac0d9176SVlad Buslov 		goto retry;
1470ac0d9176SVlad Buslov 	}
1471ac0d9176SVlad Buslov 
1472ac0d9176SVlad Buslov 	return next;
1473ac0d9176SVlad Buslov }
1474ac0d9176SVlad Buslov 
1475f6dfb4c3SHadar Hen Zion void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1476f6dfb4c3SHadar Hen Zion {
1477f6dfb4c3SHadar Hen Zion 	struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1478ac0d9176SVlad Buslov 	struct mlx5e_encap_entry *e = NULL;
1479f6dfb4c3SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1480f6dfb4c3SHadar Hen Zion 	struct mlx5_fc *counter;
1481f6dfb4c3SHadar Hen Zion 	struct neigh_table *tbl;
1482f6dfb4c3SHadar Hen Zion 	bool neigh_used = false;
1483f6dfb4c3SHadar Hen Zion 	struct neighbour *n;
148490bb7692SAriel Levkovich 	u64 lastuse;
1485f6dfb4c3SHadar Hen Zion 
1486f6dfb4c3SHadar Hen Zion 	if (m_neigh->family == AF_INET)
1487f6dfb4c3SHadar Hen Zion 		tbl = &arp_tbl;
1488f6dfb4c3SHadar Hen Zion #if IS_ENABLED(CONFIG_IPV6)
1489f6dfb4c3SHadar Hen Zion 	else if (m_neigh->family == AF_INET6)
1490423c9db2SOr Gerlitz 		tbl = &nd_tbl;
1491f6dfb4c3SHadar Hen Zion #endif
1492f6dfb4c3SHadar Hen Zion 	else
1493f6dfb4c3SHadar Hen Zion 		return;
1494f6dfb4c3SHadar Hen Zion 
1495ac0d9176SVlad Buslov 	/* mlx5e_get_next_valid_encap() releases previous encap before returning
1496ac0d9176SVlad Buslov 	 * next one.
1497ac0d9176SVlad Buslov 	 */
1498ac0d9176SVlad Buslov 	while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
14996a06c2f7SVlad Buslov 		struct mlx5e_priv *priv = netdev_priv(e->out_dev);
15005a7e5bcbSVlad Buslov 		struct encap_flow_item *efi, *tmp;
15016a06c2f7SVlad Buslov 		struct mlx5_eswitch *esw;
15026a06c2f7SVlad Buslov 		LIST_HEAD(flow_list);
1503948993f2SVlad Buslov 
15046a06c2f7SVlad Buslov 		esw = priv->mdev->priv.eswitch;
15056a06c2f7SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
15065a7e5bcbSVlad Buslov 		list_for_each_entry_safe(efi, tmp, &e->flows, list) {
150779baaec7SEli Britstein 			flow = container_of(efi, struct mlx5e_tc_flow,
150879baaec7SEli Britstein 					    encaps[efi->index]);
15095a7e5bcbSVlad Buslov 			if (IS_ERR(mlx5e_flow_get(flow)))
15105a7e5bcbSVlad Buslov 				continue;
15116a06c2f7SVlad Buslov 			list_add(&flow->tmp_list, &flow_list);
15125a7e5bcbSVlad Buslov 
1513226f2ca3SVlad Buslov 			if (mlx5e_is_offloaded_flow(flow)) {
1514b8aee822SMark Bloch 				counter = mlx5e_tc_get_counter(flow);
151590bb7692SAriel Levkovich 				lastuse = mlx5_fc_query_lastuse(counter);
1516f6dfb4c3SHadar Hen Zion 				if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1517f6dfb4c3SHadar Hen Zion 					neigh_used = true;
1518f6dfb4c3SHadar Hen Zion 					break;
1519f6dfb4c3SHadar Hen Zion 				}
1520f6dfb4c3SHadar Hen Zion 			}
1521f6dfb4c3SHadar Hen Zion 		}
15226a06c2f7SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
1523948993f2SVlad Buslov 
15246a06c2f7SVlad Buslov 		mlx5e_put_encap_flow_list(priv, &flow_list);
1525ac0d9176SVlad Buslov 		if (neigh_used) {
1526ac0d9176SVlad Buslov 			/* release current encap before breaking the loop */
15276a06c2f7SVlad Buslov 			mlx5e_encap_put(priv, e);
1528e36d4810SRoi Dayan 			break;
1529f6dfb4c3SHadar Hen Zion 		}
1530ac0d9176SVlad Buslov 	}
1531f6dfb4c3SHadar Hen Zion 
1532f6dfb4c3SHadar Hen Zion 	if (neigh_used) {
1533f6dfb4c3SHadar Hen Zion 		nhe->reported_lastuse = jiffies;
1534f6dfb4c3SHadar Hen Zion 
1535f6dfb4c3SHadar Hen Zion 		/* find the relevant neigh according to the cached device and
1536f6dfb4c3SHadar Hen Zion 		 * dst ip pair
1537f6dfb4c3SHadar Hen Zion 		 */
1538f6dfb4c3SHadar Hen Zion 		n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1539c7f7ba8dSRoi Dayan 		if (!n)
1540f6dfb4c3SHadar Hen Zion 			return;
1541f6dfb4c3SHadar Hen Zion 
1542f6dfb4c3SHadar Hen Zion 		neigh_event_send(n, NULL);
1543f6dfb4c3SHadar Hen Zion 		neigh_release(n);
1544f6dfb4c3SHadar Hen Zion 	}
1545f6dfb4c3SHadar Hen Zion }
1546f6dfb4c3SHadar Hen Zion 
154761086f39SVlad Buslov static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1548d85cdccbSOr Gerlitz {
1549948993f2SVlad Buslov 	WARN_ON(!list_empty(&e->flows));
15503c140dd5SVlad Buslov 
15513c140dd5SVlad Buslov 	if (e->compl_result > 0) {
1552232c0013SHadar Hen Zion 		mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1553232c0013SHadar Hen Zion 
1554232c0013SHadar Hen Zion 		if (e->flags & MLX5_ENCAP_ENTRY_VALID)
155560786f09SMark Bloch 			mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
15563c140dd5SVlad Buslov 	}
1557232c0013SHadar Hen Zion 
1558232c0013SHadar Hen Zion 	kfree(e->encap_header);
1559ac0d9176SVlad Buslov 	kfree_rcu(e, rcu);
15605067b602SRoi Dayan }
1561948993f2SVlad Buslov 
156261086f39SVlad Buslov void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
156361086f39SVlad Buslov {
156461086f39SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
156561086f39SVlad Buslov 
156661086f39SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
156761086f39SVlad Buslov 		return;
156861086f39SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
156961086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
157061086f39SVlad Buslov 
157161086f39SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
157261086f39SVlad Buslov }
157361086f39SVlad Buslov 
1574948993f2SVlad Buslov static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1575948993f2SVlad Buslov 			       struct mlx5e_tc_flow *flow, int out_index)
1576948993f2SVlad Buslov {
157761086f39SVlad Buslov 	struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
157861086f39SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
157961086f39SVlad Buslov 
1580948993f2SVlad Buslov 	/* flow wasn't fully initialized */
158161086f39SVlad Buslov 	if (!e)
1582948993f2SVlad Buslov 		return;
1583948993f2SVlad Buslov 
158461086f39SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
1585948993f2SVlad Buslov 	list_del(&flow->encaps[out_index].list);
1586948993f2SVlad Buslov 	flow->encaps[out_index].e = NULL;
158761086f39SVlad Buslov 	if (!refcount_dec_and_test(&e->refcnt)) {
158861086f39SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
158961086f39SVlad Buslov 		return;
159061086f39SVlad Buslov 	}
159161086f39SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
159261086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
159361086f39SVlad Buslov 
159461086f39SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
15955067b602SRoi Dayan }
15965067b602SRoi Dayan 
159704de7ddaSRoi Dayan static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
159804de7ddaSRoi Dayan {
159904de7ddaSRoi Dayan 	struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
160004de7ddaSRoi Dayan 
1601226f2ca3SVlad Buslov 	if (!flow_flag_test(flow, ESWITCH) ||
1602226f2ca3SVlad Buslov 	    !flow_flag_test(flow, DUP))
160304de7ddaSRoi Dayan 		return;
160404de7ddaSRoi Dayan 
160504de7ddaSRoi Dayan 	mutex_lock(&esw->offloads.peer_mutex);
160604de7ddaSRoi Dayan 	list_del(&flow->peer);
160704de7ddaSRoi Dayan 	mutex_unlock(&esw->offloads.peer_mutex);
160804de7ddaSRoi Dayan 
1609226f2ca3SVlad Buslov 	flow_flag_clear(flow, DUP);
161004de7ddaSRoi Dayan 
161104de7ddaSRoi Dayan 	mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
161204de7ddaSRoi Dayan 	kvfree(flow->peer_flow);
161304de7ddaSRoi Dayan 	flow->peer_flow = NULL;
161404de7ddaSRoi Dayan }
161504de7ddaSRoi Dayan 
161604de7ddaSRoi Dayan static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
161704de7ddaSRoi Dayan {
161804de7ddaSRoi Dayan 	struct mlx5_core_dev *dev = flow->priv->mdev;
161904de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = dev->priv.devcom;
162004de7ddaSRoi Dayan 	struct mlx5_eswitch *peer_esw;
162104de7ddaSRoi Dayan 
162204de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
162304de7ddaSRoi Dayan 	if (!peer_esw)
162404de7ddaSRoi Dayan 		return;
162504de7ddaSRoi Dayan 
162604de7ddaSRoi Dayan 	__mlx5e_tc_del_fdb_peer_flow(flow);
162704de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
162804de7ddaSRoi Dayan }
162904de7ddaSRoi Dayan 
1630e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1631961e8979SRoi Dayan 			      struct mlx5e_tc_flow *flow)
1632e8f887acSAmir Vadai {
1633226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow)) {
163404de7ddaSRoi Dayan 		mlx5e_tc_del_fdb_peer_flow(flow);
1635d85cdccbSOr Gerlitz 		mlx5e_tc_del_fdb_flow(priv, flow);
163604de7ddaSRoi Dayan 	} else {
1637d85cdccbSOr Gerlitz 		mlx5e_tc_del_nic_flow(priv, flow);
1638e8f887acSAmir Vadai 	}
163904de7ddaSRoi Dayan }
1640e8f887acSAmir Vadai 
1641bbd00f7eSHadar Hen Zion 
1642bbd00f7eSHadar Hen Zion static int parse_tunnel_attr(struct mlx5e_priv *priv,
1643bbd00f7eSHadar Hen Zion 			     struct mlx5_flow_spec *spec,
1644f9e30088SPablo Neira Ayuso 			     struct flow_cls_offload *f,
16456363651dSOr Gerlitz 			     struct net_device *filter_dev, u8 *match_level)
1646bbd00f7eSHadar Hen Zion {
1647e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
1648bbd00f7eSHadar Hen Zion 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1649bbd00f7eSHadar Hen Zion 				       outer_headers);
1650bbd00f7eSHadar Hen Zion 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1651bbd00f7eSHadar Hen Zion 				       outer_headers);
1652f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
16538f256622SPablo Neira Ayuso 	int err;
1654bbd00f7eSHadar Hen Zion 
1655101f4de9SOz Shlomo 	err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
16566363651dSOr Gerlitz 				 headers_c, headers_v, match_level);
165754c177caSOz Shlomo 	if (err) {
165854c177caSOz Shlomo 		NL_SET_ERR_MSG_MOD(extack,
165954c177caSOz Shlomo 				   "failed to parse tunnel attributes");
1660101f4de9SOz Shlomo 		return err;
1661bbd00f7eSHadar Hen Zion 	}
1662bbd00f7eSHadar Hen Zion 
1663d1bda7eeSTonghao Zhang 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
16648f256622SPablo Neira Ayuso 		struct flow_match_ipv4_addrs match;
16658f256622SPablo Neira Ayuso 
16668f256622SPablo Neira Ayuso 		flow_rule_match_enc_ipv4_addrs(rule, &match);
1667bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1668bbd00f7eSHadar Hen Zion 			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
16698f256622SPablo Neira Ayuso 			 ntohl(match.mask->src));
1670bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1671bbd00f7eSHadar Hen Zion 			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
16728f256622SPablo Neira Ayuso 			 ntohl(match.key->src));
1673bbd00f7eSHadar Hen Zion 
1674bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1675bbd00f7eSHadar Hen Zion 			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
16768f256622SPablo Neira Ayuso 			 ntohl(match.mask->dst));
1677bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1678bbd00f7eSHadar Hen Zion 			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
16798f256622SPablo Neira Ayuso 			 ntohl(match.key->dst));
1680bbd00f7eSHadar Hen Zion 
1681bbd00f7eSHadar Hen Zion 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1682bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
1683d1bda7eeSTonghao Zhang 	} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
16848f256622SPablo Neira Ayuso 		struct flow_match_ipv6_addrs match;
168519f44401SOr Gerlitz 
16868f256622SPablo Neira Ayuso 		flow_rule_match_enc_ipv6_addrs(rule, &match);
168719f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
168819f44401SOr Gerlitz 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
16898f256622SPablo Neira Ayuso 		       &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
169019f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
169119f44401SOr Gerlitz 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
16928f256622SPablo Neira Ayuso 		       &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
169319f44401SOr Gerlitz 
169419f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
169519f44401SOr Gerlitz 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
16968f256622SPablo Neira Ayuso 		       &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
169719f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
169819f44401SOr Gerlitz 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
16998f256622SPablo Neira Ayuso 		       &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
170019f44401SOr Gerlitz 
170119f44401SOr Gerlitz 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
170219f44401SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
17032e72eb43SOr Gerlitz 	}
1704bbd00f7eSHadar Hen Zion 
17058f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
17068f256622SPablo Neira Ayuso 		struct flow_match_ip match;
1707bcef735cSOr Gerlitz 
17088f256622SPablo Neira Ayuso 		flow_rule_match_enc_ip(rule, &match);
17098f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
17108f256622SPablo Neira Ayuso 			 match.mask->tos & 0x3);
17118f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
17128f256622SPablo Neira Ayuso 			 match.key->tos & 0x3);
1713bcef735cSOr Gerlitz 
17148f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
17158f256622SPablo Neira Ayuso 			 match.mask->tos >> 2);
17168f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
17178f256622SPablo Neira Ayuso 			 match.key->tos  >> 2);
1718bcef735cSOr Gerlitz 
17198f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
17208f256622SPablo Neira Ayuso 			 match.mask->ttl);
17218f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
17228f256622SPablo Neira Ayuso 			 match.key->ttl);
1723e98bedf5SEli Britstein 
17248f256622SPablo Neira Ayuso 		if (match.mask->ttl &&
1725e98bedf5SEli Britstein 		    !MLX5_CAP_ESW_FLOWTABLE_FDB
1726e98bedf5SEli Britstein 			(priv->mdev,
1727e98bedf5SEli Britstein 			 ft_field_support.outer_ipv4_ttl)) {
1728e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
1729e98bedf5SEli Britstein 					   "Matching on TTL is not supported");
1730e98bedf5SEli Britstein 			return -EOPNOTSUPP;
1731e98bedf5SEli Britstein 		}
1732e98bedf5SEli Britstein 
1733bcef735cSOr Gerlitz 	}
1734bcef735cSOr Gerlitz 
1735bbd00f7eSHadar Hen Zion 	/* Enforce DMAC when offloading incoming tunneled flows.
1736bbd00f7eSHadar Hen Zion 	 * Flow counters require a match on the DMAC.
1737bbd00f7eSHadar Hen Zion 	 */
1738bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1739bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1740bbd00f7eSHadar Hen Zion 	ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1741bbd00f7eSHadar Hen Zion 				     dmac_47_16), priv->netdev->dev_addr);
1742bbd00f7eSHadar Hen Zion 
1743bbd00f7eSHadar Hen Zion 	/* let software handle IP fragments */
1744bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1745bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1746bbd00f7eSHadar Hen Zion 
1747bbd00f7eSHadar Hen Zion 	return 0;
1748bbd00f7eSHadar Hen Zion }
1749bbd00f7eSHadar Hen Zion 
17508377629eSEli Britstein static void *get_match_headers_criteria(u32 flags,
17518377629eSEli Britstein 					struct mlx5_flow_spec *spec)
17528377629eSEli Britstein {
17538377629eSEli Britstein 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
17548377629eSEli Britstein 		MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
17558377629eSEli Britstein 			     inner_headers) :
17568377629eSEli Britstein 		MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
17578377629eSEli Britstein 			     outer_headers);
17588377629eSEli Britstein }
17598377629eSEli Britstein 
17608377629eSEli Britstein static void *get_match_headers_value(u32 flags,
17618377629eSEli Britstein 				     struct mlx5_flow_spec *spec)
17628377629eSEli Britstein {
17638377629eSEli Britstein 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
17648377629eSEli Britstein 		MLX5_ADDR_OF(fte_match_param, spec->match_value,
17658377629eSEli Britstein 			     inner_headers) :
17668377629eSEli Britstein 		MLX5_ADDR_OF(fte_match_param, spec->match_value,
17678377629eSEli Britstein 			     outer_headers);
17688377629eSEli Britstein }
17698377629eSEli Britstein 
1770de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv,
1771de0af0bfSRoi Dayan 			      struct mlx5_flow_spec *spec,
1772f9e30088SPablo Neira Ayuso 			      struct flow_cls_offload *f,
177354c177caSOz Shlomo 			      struct net_device *filter_dev,
177493b3586eSHuy Nguyen 			      u8 *inner_match_level, u8 *outer_match_level)
1775e3a2b7edSAmir Vadai {
1776e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
1777c5bb1730SMaor Gottlieb 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1778c5bb1730SMaor Gottlieb 				       outer_headers);
1779c5bb1730SMaor Gottlieb 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1780c5bb1730SMaor Gottlieb 				       outer_headers);
1781699e96ddSJianbo Liu 	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1782699e96ddSJianbo Liu 				    misc_parameters);
1783699e96ddSJianbo Liu 	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1784699e96ddSJianbo Liu 				    misc_parameters);
1785f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
17868f256622SPablo Neira Ayuso 	struct flow_dissector *dissector = rule->match.dissector;
1787e3a2b7edSAmir Vadai 	u16 addr_type = 0;
1788e3a2b7edSAmir Vadai 	u8 ip_proto = 0;
178993b3586eSHuy Nguyen 	u8 *match_level;
1790e3a2b7edSAmir Vadai 
179193b3586eSHuy Nguyen 	match_level = outer_match_level;
1792de0af0bfSRoi Dayan 
17938f256622SPablo Neira Ayuso 	if (dissector->used_keys &
17943d144578SVlad Buslov 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
17953d144578SVlad Buslov 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1796e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
1797e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1798095b6cfdSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
1799699e96ddSJianbo Liu 	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1800e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1801e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1802bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
1803bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1804bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1805bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1806bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
1807e77834ecSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1808fd7da28bSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
1809bcef735cSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_IP)  |
18109272e3dfSYevgeny Kliteynik 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
18119272e3dfSYevgeny Kliteynik 	      BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
1812e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
1813e3a2b7edSAmir Vadai 		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
18148f256622SPablo Neira Ayuso 			    dissector->used_keys);
1815e3a2b7edSAmir Vadai 		return -EOPNOTSUPP;
1816e3a2b7edSAmir Vadai 	}
1817e3a2b7edSAmir Vadai 
1818075973c7SVlad Buslov 	if (mlx5e_get_tc_tun(filter_dev)) {
181993b3586eSHuy Nguyen 		if (parse_tunnel_attr(priv, spec, f, filter_dev,
182093b3586eSHuy Nguyen 				      outer_match_level))
1821bbd00f7eSHadar Hen Zion 			return -EOPNOTSUPP;
1822bbd00f7eSHadar Hen Zion 
182393b3586eSHuy Nguyen 		/* At this point, header pointers should point to the inner
1824bbd00f7eSHadar Hen Zion 		 * headers, outer header were already set by parse_tunnel_attr
1825bbd00f7eSHadar Hen Zion 		 */
182693b3586eSHuy Nguyen 		match_level = inner_match_level;
18278377629eSEli Britstein 		headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
18288377629eSEli Britstein 						       spec);
18298377629eSEli Britstein 		headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
18308377629eSEli Britstein 						    spec);
1831bbd00f7eSHadar Hen Zion 	}
1832bbd00f7eSHadar Hen Zion 
18338f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
18348f256622SPablo Neira Ayuso 		struct flow_match_basic match;
1835e3a2b7edSAmir Vadai 
18368f256622SPablo Neira Ayuso 		flow_rule_match_basic(rule, &match);
18378f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
18388f256622SPablo Neira Ayuso 			 ntohs(match.mask->n_proto));
18398f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
18408f256622SPablo Neira Ayuso 			 ntohs(match.key->n_proto));
18418f256622SPablo Neira Ayuso 
18428f256622SPablo Neira Ayuso 		if (match.mask->n_proto)
1843d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
1844e3a2b7edSAmir Vadai 	}
184535a605dbSEli Britstein 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
184635a605dbSEli Britstein 	    is_vlan_dev(filter_dev)) {
184735a605dbSEli Britstein 		struct flow_dissector_key_vlan filter_dev_mask;
184835a605dbSEli Britstein 		struct flow_dissector_key_vlan filter_dev_key;
18498f256622SPablo Neira Ayuso 		struct flow_match_vlan match;
18508f256622SPablo Neira Ayuso 
185135a605dbSEli Britstein 		if (is_vlan_dev(filter_dev)) {
185235a605dbSEli Britstein 			match.key = &filter_dev_key;
185335a605dbSEli Britstein 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
185435a605dbSEli Britstein 			match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
185535a605dbSEli Britstein 			match.key->vlan_priority = 0;
185635a605dbSEli Britstein 			match.mask = &filter_dev_mask;
185735a605dbSEli Britstein 			memset(match.mask, 0xff, sizeof(*match.mask));
185835a605dbSEli Britstein 			match.mask->vlan_priority = 0;
185935a605dbSEli Britstein 		} else {
18608f256622SPablo Neira Ayuso 			flow_rule_match_vlan(rule, &match);
186135a605dbSEli Britstein 		}
18628f256622SPablo Neira Ayuso 		if (match.mask->vlan_id ||
18638f256622SPablo Neira Ayuso 		    match.mask->vlan_priority ||
18648f256622SPablo Neira Ayuso 		    match.mask->vlan_tpid) {
18658f256622SPablo Neira Ayuso 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
1866699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1867699e96ddSJianbo Liu 					 svlan_tag, 1);
1868699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1869699e96ddSJianbo Liu 					 svlan_tag, 1);
1870699e96ddSJianbo Liu 			} else {
1871699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1872699e96ddSJianbo Liu 					 cvlan_tag, 1);
1873699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1874699e96ddSJianbo Liu 					 cvlan_tag, 1);
1875699e96ddSJianbo Liu 			}
1876095b6cfdSOr Gerlitz 
18778f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
18788f256622SPablo Neira Ayuso 				 match.mask->vlan_id);
18798f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
18808f256622SPablo Neira Ayuso 				 match.key->vlan_id);
1881358d79a4SOr Gerlitz 
18828f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
18838f256622SPablo Neira Ayuso 				 match.mask->vlan_priority);
18848f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
18858f256622SPablo Neira Ayuso 				 match.key->vlan_priority);
188654782900SOr Gerlitz 
1887d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
1888095b6cfdSOr Gerlitz 		}
1889d3a80bb5SOr Gerlitz 	} else if (*match_level != MLX5_MATCH_NONE) {
1890cee26487SJianbo Liu 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1891cee26487SJianbo Liu 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1892d3a80bb5SOr Gerlitz 		*match_level = MLX5_MATCH_L2;
1893095b6cfdSOr Gerlitz 	}
1894095b6cfdSOr Gerlitz 
18958f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
18968f256622SPablo Neira Ayuso 		struct flow_match_vlan match;
18978f256622SPablo Neira Ayuso 
189812d5cbf8SJianbo Liu 		flow_rule_match_cvlan(rule, &match);
18998f256622SPablo Neira Ayuso 		if (match.mask->vlan_id ||
19008f256622SPablo Neira Ayuso 		    match.mask->vlan_priority ||
19018f256622SPablo Neira Ayuso 		    match.mask->vlan_tpid) {
19028f256622SPablo Neira Ayuso 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
1903699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
1904699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
1905699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
1906699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
1907699e96ddSJianbo Liu 			} else {
1908699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
1909699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
1910699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
1911699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
1912699e96ddSJianbo Liu 			}
1913699e96ddSJianbo Liu 
1914699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
19158f256622SPablo Neira Ayuso 				 match.mask->vlan_id);
1916699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
19178f256622SPablo Neira Ayuso 				 match.key->vlan_id);
1918699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
19198f256622SPablo Neira Ayuso 				 match.mask->vlan_priority);
1920699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
19218f256622SPablo Neira Ayuso 				 match.key->vlan_priority);
1922699e96ddSJianbo Liu 
1923699e96ddSJianbo Liu 			*match_level = MLX5_MATCH_L2;
1924699e96ddSJianbo Liu 		}
1925699e96ddSJianbo Liu 	}
1926699e96ddSJianbo Liu 
19278f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
19288f256622SPablo Neira Ayuso 		struct flow_match_eth_addrs match;
192954782900SOr Gerlitz 
19308f256622SPablo Neira Ayuso 		flow_rule_match_eth_addrs(rule, &match);
1931d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1932d3a80bb5SOr Gerlitz 					     dmac_47_16),
19338f256622SPablo Neira Ayuso 				match.mask->dst);
1934d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1935d3a80bb5SOr Gerlitz 					     dmac_47_16),
19368f256622SPablo Neira Ayuso 				match.key->dst);
1937d3a80bb5SOr Gerlitz 
1938d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1939d3a80bb5SOr Gerlitz 					     smac_47_16),
19408f256622SPablo Neira Ayuso 				match.mask->src);
1941d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1942d3a80bb5SOr Gerlitz 					     smac_47_16),
19438f256622SPablo Neira Ayuso 				match.key->src);
1944d3a80bb5SOr Gerlitz 
19458f256622SPablo Neira Ayuso 		if (!is_zero_ether_addr(match.mask->src) ||
19468f256622SPablo Neira Ayuso 		    !is_zero_ether_addr(match.mask->dst))
1947d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
194854782900SOr Gerlitz 	}
194954782900SOr Gerlitz 
19508f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
19518f256622SPablo Neira Ayuso 		struct flow_match_control match;
195254782900SOr Gerlitz 
19538f256622SPablo Neira Ayuso 		flow_rule_match_control(rule, &match);
19548f256622SPablo Neira Ayuso 		addr_type = match.key->addr_type;
195554782900SOr Gerlitz 
195654782900SOr Gerlitz 		/* the HW doesn't support frag first/later */
19578f256622SPablo Neira Ayuso 		if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
195854782900SOr Gerlitz 			return -EOPNOTSUPP;
195954782900SOr Gerlitz 
19608f256622SPablo Neira Ayuso 		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
196154782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
196254782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
19638f256622SPablo Neira Ayuso 				 match.key->flags & FLOW_DIS_IS_FRAGMENT);
196454782900SOr Gerlitz 
196554782900SOr Gerlitz 			/* the HW doesn't need L3 inline to match on frag=no */
19668f256622SPablo Neira Ayuso 			if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
196783621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L2;
196854782900SOr Gerlitz 	/* ***  L2 attributes parsing up to here *** */
196954782900SOr Gerlitz 			else
197083621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L3;
197154782900SOr Gerlitz 		}
197254782900SOr Gerlitz 	}
197354782900SOr Gerlitz 
19748f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
19758f256622SPablo Neira Ayuso 		struct flow_match_basic match;
19768f256622SPablo Neira Ayuso 
19778f256622SPablo Neira Ayuso 		flow_rule_match_basic(rule, &match);
19788f256622SPablo Neira Ayuso 		ip_proto = match.key->ip_proto;
197954782900SOr Gerlitz 
198054782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
19818f256622SPablo Neira Ayuso 			 match.mask->ip_proto);
198254782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
19838f256622SPablo Neira Ayuso 			 match.key->ip_proto);
198454782900SOr Gerlitz 
19858f256622SPablo Neira Ayuso 		if (match.mask->ip_proto)
1986d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
198754782900SOr Gerlitz 	}
198854782900SOr Gerlitz 
1989e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
19908f256622SPablo Neira Ayuso 		struct flow_match_ipv4_addrs match;
1991e3a2b7edSAmir Vadai 
19928f256622SPablo Neira Ayuso 		flow_rule_match_ipv4_addrs(rule, &match);
1993e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1994e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
19958f256622SPablo Neira Ayuso 		       &match.mask->src, sizeof(match.mask->src));
1996e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1997e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
19988f256622SPablo Neira Ayuso 		       &match.key->src, sizeof(match.key->src));
1999e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2000e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
20018f256622SPablo Neira Ayuso 		       &match.mask->dst, sizeof(match.mask->dst));
2002e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2003e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
20048f256622SPablo Neira Ayuso 		       &match.key->dst, sizeof(match.key->dst));
2005de0af0bfSRoi Dayan 
20068f256622SPablo Neira Ayuso 		if (match.mask->src || match.mask->dst)
2007d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
2008e3a2b7edSAmir Vadai 	}
2009e3a2b7edSAmir Vadai 
2010e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
20118f256622SPablo Neira Ayuso 		struct flow_match_ipv6_addrs match;
2012e3a2b7edSAmir Vadai 
20138f256622SPablo Neira Ayuso 		flow_rule_match_ipv6_addrs(rule, &match);
2014e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2015e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
20168f256622SPablo Neira Ayuso 		       &match.mask->src, sizeof(match.mask->src));
2017e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2018e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
20198f256622SPablo Neira Ayuso 		       &match.key->src, sizeof(match.key->src));
2020e3a2b7edSAmir Vadai 
2021e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2022e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
20238f256622SPablo Neira Ayuso 		       &match.mask->dst, sizeof(match.mask->dst));
2024e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2025e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
20268f256622SPablo Neira Ayuso 		       &match.key->dst, sizeof(match.key->dst));
2027de0af0bfSRoi Dayan 
20288f256622SPablo Neira Ayuso 		if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
20298f256622SPablo Neira Ayuso 		    ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2030d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
2031e3a2b7edSAmir Vadai 	}
2032e3a2b7edSAmir Vadai 
20338f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
20348f256622SPablo Neira Ayuso 		struct flow_match_ip match;
20351f97a526SOr Gerlitz 
20368f256622SPablo Neira Ayuso 		flow_rule_match_ip(rule, &match);
20378f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
20388f256622SPablo Neira Ayuso 			 match.mask->tos & 0x3);
20398f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
20408f256622SPablo Neira Ayuso 			 match.key->tos & 0x3);
20411f97a526SOr Gerlitz 
20428f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
20438f256622SPablo Neira Ayuso 			 match.mask->tos >> 2);
20448f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
20458f256622SPablo Neira Ayuso 			 match.key->tos  >> 2);
20461f97a526SOr Gerlitz 
20478f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
20488f256622SPablo Neira Ayuso 			 match.mask->ttl);
20498f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
20508f256622SPablo Neira Ayuso 			 match.key->ttl);
20511f97a526SOr Gerlitz 
20528f256622SPablo Neira Ayuso 		if (match.mask->ttl &&
2053a8ade55fSOr Gerlitz 		    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2054e98bedf5SEli Britstein 						ft_field_support.outer_ipv4_ttl)) {
2055e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2056e98bedf5SEli Britstein 					   "Matching on TTL is not supported");
20571f97a526SOr Gerlitz 			return -EOPNOTSUPP;
2058e98bedf5SEli Britstein 		}
2059a8ade55fSOr Gerlitz 
20608f256622SPablo Neira Ayuso 		if (match.mask->tos || match.mask->ttl)
2061d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
20621f97a526SOr Gerlitz 	}
20631f97a526SOr Gerlitz 
206454782900SOr Gerlitz 	/* ***  L3 attributes parsing up to here *** */
206554782900SOr Gerlitz 
20668f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
20678f256622SPablo Neira Ayuso 		struct flow_match_ports match;
20688f256622SPablo Neira Ayuso 
20698f256622SPablo Neira Ayuso 		flow_rule_match_ports(rule, &match);
2070e3a2b7edSAmir Vadai 		switch (ip_proto) {
2071e3a2b7edSAmir Vadai 		case IPPROTO_TCP:
2072e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
20738f256622SPablo Neira Ayuso 				 tcp_sport, ntohs(match.mask->src));
2074e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
20758f256622SPablo Neira Ayuso 				 tcp_sport, ntohs(match.key->src));
2076e3a2b7edSAmir Vadai 
2077e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
20788f256622SPablo Neira Ayuso 				 tcp_dport, ntohs(match.mask->dst));
2079e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
20808f256622SPablo Neira Ayuso 				 tcp_dport, ntohs(match.key->dst));
2081e3a2b7edSAmir Vadai 			break;
2082e3a2b7edSAmir Vadai 
2083e3a2b7edSAmir Vadai 		case IPPROTO_UDP:
2084e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
20858f256622SPablo Neira Ayuso 				 udp_sport, ntohs(match.mask->src));
2086e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
20878f256622SPablo Neira Ayuso 				 udp_sport, ntohs(match.key->src));
2088e3a2b7edSAmir Vadai 
2089e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
20908f256622SPablo Neira Ayuso 				 udp_dport, ntohs(match.mask->dst));
2091e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
20928f256622SPablo Neira Ayuso 				 udp_dport, ntohs(match.key->dst));
2093e3a2b7edSAmir Vadai 			break;
2094e3a2b7edSAmir Vadai 		default:
2095e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2096e98bedf5SEli Britstein 					   "Only UDP and TCP transports are supported for L4 matching");
2097e3a2b7edSAmir Vadai 			netdev_err(priv->netdev,
2098e3a2b7edSAmir Vadai 				   "Only UDP and TCP transport are supported\n");
2099e3a2b7edSAmir Vadai 			return -EINVAL;
2100e3a2b7edSAmir Vadai 		}
2101de0af0bfSRoi Dayan 
21028f256622SPablo Neira Ayuso 		if (match.mask->src || match.mask->dst)
2103d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
2104e3a2b7edSAmir Vadai 	}
2105e3a2b7edSAmir Vadai 
21068f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
21078f256622SPablo Neira Ayuso 		struct flow_match_tcp match;
2108e77834ecSOr Gerlitz 
21098f256622SPablo Neira Ayuso 		flow_rule_match_tcp(rule, &match);
2110e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
21118f256622SPablo Neira Ayuso 			 ntohs(match.mask->flags));
2112e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
21138f256622SPablo Neira Ayuso 			 ntohs(match.key->flags));
2114e77834ecSOr Gerlitz 
21158f256622SPablo Neira Ayuso 		if (match.mask->flags)
2116d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
2117e77834ecSOr Gerlitz 	}
2118e77834ecSOr Gerlitz 
2119e3a2b7edSAmir Vadai 	return 0;
2120e3a2b7edSAmir Vadai }
2121e3a2b7edSAmir Vadai 
2122de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv,
212365ba8fb7SOr Gerlitz 			    struct mlx5e_tc_flow *flow,
2124de0af0bfSRoi Dayan 			    struct mlx5_flow_spec *spec,
2125f9e30088SPablo Neira Ayuso 			    struct flow_cls_offload *f,
212654c177caSOz Shlomo 			    struct net_device *filter_dev)
2127de0af0bfSRoi Dayan {
212893b3586eSHuy Nguyen 	u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2129e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
2130de0af0bfSRoi Dayan 	struct mlx5_core_dev *dev = priv->mdev;
2131de0af0bfSRoi Dayan 	struct mlx5_eswitch *esw = dev->priv.eswitch;
21321d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
21331d447a39SSaeed Mahameed 	struct mlx5_eswitch_rep *rep;
2134226f2ca3SVlad Buslov 	bool is_eswitch_flow;
2135de0af0bfSRoi Dayan 	int err;
2136de0af0bfSRoi Dayan 
213793b3586eSHuy Nguyen 	inner_match_level = MLX5_MATCH_NONE;
213893b3586eSHuy Nguyen 	outer_match_level = MLX5_MATCH_NONE;
213993b3586eSHuy Nguyen 
214093b3586eSHuy Nguyen 	err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
214193b3586eSHuy Nguyen 				 &outer_match_level);
214293b3586eSHuy Nguyen 	non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
214393b3586eSHuy Nguyen 				 outer_match_level : inner_match_level;
2144de0af0bfSRoi Dayan 
2145226f2ca3SVlad Buslov 	is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2146226f2ca3SVlad Buslov 	if (!err && is_eswitch_flow) {
21471d447a39SSaeed Mahameed 		rep = rpriv->rep;
2148b05af6aaSBodong Wang 		if (rep->vport != MLX5_VPORT_UPLINK &&
21491d447a39SSaeed Mahameed 		    (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
215093b3586eSHuy Nguyen 		    esw->offloads.inline_mode < non_tunnel_match_level)) {
2151e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2152e98bedf5SEli Britstein 					   "Flow is not offloaded due to min inline setting");
2153de0af0bfSRoi Dayan 			netdev_warn(priv->netdev,
2154de0af0bfSRoi Dayan 				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
215593b3586eSHuy Nguyen 				    non_tunnel_match_level, esw->offloads.inline_mode);
2156de0af0bfSRoi Dayan 			return -EOPNOTSUPP;
2157de0af0bfSRoi Dayan 		}
2158de0af0bfSRoi Dayan 	}
2159de0af0bfSRoi Dayan 
2160226f2ca3SVlad Buslov 	if (is_eswitch_flow) {
216193b3586eSHuy Nguyen 		flow->esw_attr->inner_match_level = inner_match_level;
216293b3586eSHuy Nguyen 		flow->esw_attr->outer_match_level = outer_match_level;
21636363651dSOr Gerlitz 	} else {
216493b3586eSHuy Nguyen 		flow->nic_attr->match_level = non_tunnel_match_level;
21656363651dSOr Gerlitz 	}
216638aa51c1SOr Gerlitz 
2167de0af0bfSRoi Dayan 	return err;
2168de0af0bfSRoi Dayan }
2169de0af0bfSRoi Dayan 
2170d79b6df6SOr Gerlitz struct pedit_headers {
2171d79b6df6SOr Gerlitz 	struct ethhdr  eth;
21720eb69bb9SEli Britstein 	struct vlan_hdr vlan;
2173d79b6df6SOr Gerlitz 	struct iphdr   ip4;
2174d79b6df6SOr Gerlitz 	struct ipv6hdr ip6;
2175d79b6df6SOr Gerlitz 	struct tcphdr  tcp;
2176d79b6df6SOr Gerlitz 	struct udphdr  udp;
2177d79b6df6SOr Gerlitz };
2178d79b6df6SOr Gerlitz 
2179c500c86bSPablo Neira Ayuso struct pedit_headers_action {
2180c500c86bSPablo Neira Ayuso 	struct pedit_headers	vals;
2181c500c86bSPablo Neira Ayuso 	struct pedit_headers	masks;
2182c500c86bSPablo Neira Ayuso 	u32			pedits;
2183c500c86bSPablo Neira Ayuso };
2184c500c86bSPablo Neira Ayuso 
2185d79b6df6SOr Gerlitz static int pedit_header_offsets[] = {
218673867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
218773867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
218873867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
218973867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
219073867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2191d79b6df6SOr Gerlitz };
2192d79b6df6SOr Gerlitz 
2193d79b6df6SOr Gerlitz #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2194d79b6df6SOr Gerlitz 
2195d79b6df6SOr Gerlitz static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2196c500c86bSPablo Neira Ayuso 			 struct pedit_headers_action *hdrs)
2197d79b6df6SOr Gerlitz {
2198d79b6df6SOr Gerlitz 	u32 *curr_pmask, *curr_pval;
2199d79b6df6SOr Gerlitz 
2200c500c86bSPablo Neira Ayuso 	curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2201c500c86bSPablo Neira Ayuso 	curr_pval  = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2202d79b6df6SOr Gerlitz 
2203d79b6df6SOr Gerlitz 	if (*curr_pmask & mask)  /* disallow acting twice on the same location */
2204d79b6df6SOr Gerlitz 		goto out_err;
2205d79b6df6SOr Gerlitz 
2206d79b6df6SOr Gerlitz 	*curr_pmask |= mask;
2207d79b6df6SOr Gerlitz 	*curr_pval  |= (val & mask);
2208d79b6df6SOr Gerlitz 
2209d79b6df6SOr Gerlitz 	return 0;
2210d79b6df6SOr Gerlitz 
2211d79b6df6SOr Gerlitz out_err:
2212d79b6df6SOr Gerlitz 	return -EOPNOTSUPP;
2213d79b6df6SOr Gerlitz }
2214d79b6df6SOr Gerlitz 
2215d79b6df6SOr Gerlitz struct mlx5_fields {
2216d79b6df6SOr Gerlitz 	u8  field;
2217d79b6df6SOr Gerlitz 	u8  size;
2218d79b6df6SOr Gerlitz 	u32 offset;
221927c11b6bSEli Britstein 	u32 match_offset;
2220d79b6df6SOr Gerlitz };
2221d79b6df6SOr Gerlitz 
222227c11b6bSEli Britstein #define OFFLOAD(fw_field, size, field, off, match_field) \
222327c11b6bSEli Britstein 		{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \
222427c11b6bSEli Britstein 		 offsetof(struct pedit_headers, field) + (off), \
222527c11b6bSEli Britstein 		 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
222627c11b6bSEli Britstein 
22272ef86872SEli Britstein /* masked values are the same and there are no rewrites that do not have a
22282ef86872SEli Britstein  * match.
22292ef86872SEli Britstein  */
22302ef86872SEli Britstein #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
22312ef86872SEli Britstein 	type matchmaskx = *(type *)(matchmaskp); \
22322ef86872SEli Britstein 	type matchvalx = *(type *)(matchvalp); \
22332ef86872SEli Britstein 	type maskx = *(type *)(maskp); \
22342ef86872SEli Britstein 	type valx = *(type *)(valp); \
22352ef86872SEli Britstein 	\
22362ef86872SEli Britstein 	(valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
22372ef86872SEli Britstein 								 matchmaskx)); \
22382ef86872SEli Britstein })
22392ef86872SEli Britstein 
224027c11b6bSEli Britstein static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
224127c11b6bSEli Britstein 			 void *matchmaskp, int size)
224227c11b6bSEli Britstein {
224327c11b6bSEli Britstein 	bool same = false;
224427c11b6bSEli Britstein 
224527c11b6bSEli Britstein 	switch (size) {
224627c11b6bSEli Britstein 	case sizeof(u8):
22472ef86872SEli Britstein 		same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
224827c11b6bSEli Britstein 		break;
224927c11b6bSEli Britstein 	case sizeof(u16):
22502ef86872SEli Britstein 		same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
225127c11b6bSEli Britstein 		break;
225227c11b6bSEli Britstein 	case sizeof(u32):
22532ef86872SEli Britstein 		same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
225427c11b6bSEli Britstein 		break;
225527c11b6bSEli Britstein 	}
225627c11b6bSEli Britstein 
225727c11b6bSEli Britstein 	return same;
225827c11b6bSEli Britstein }
2259a8e4f0c4SOr Gerlitz 
2260d79b6df6SOr Gerlitz static struct mlx5_fields fields[] = {
226127c11b6bSEli Britstein 	OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16),
226227c11b6bSEli Britstein 	OFFLOAD(DMAC_15_0,  2, eth.h_dest[4], 0, dmac_15_0),
226327c11b6bSEli Britstein 	OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16),
226427c11b6bSEli Britstein 	OFFLOAD(SMAC_15_0,  2, eth.h_source[4], 0, smac_15_0),
226527c11b6bSEli Britstein 	OFFLOAD(ETHERTYPE,  2, eth.h_proto, 0, ethertype),
226627c11b6bSEli Britstein 	OFFLOAD(FIRST_VID,  2, vlan.h_vlan_TCI, 0, first_vid),
2267d79b6df6SOr Gerlitz 
226827c11b6bSEli Britstein 	OFFLOAD(IP_TTL, 1, ip4.ttl,   0, ttl_hoplimit),
226927c11b6bSEli Britstein 	OFFLOAD(SIPV4,  4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
227027c11b6bSEli Britstein 	OFFLOAD(DIPV4,  4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2271d79b6df6SOr Gerlitz 
227227c11b6bSEli Britstein 	OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0,
227327c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
227427c11b6bSEli Britstein 	OFFLOAD(SIPV6_95_64,  4, ip6.saddr.s6_addr32[1], 0,
227527c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
227627c11b6bSEli Britstein 	OFFLOAD(SIPV6_63_32,  4, ip6.saddr.s6_addr32[2], 0,
227727c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
227827c11b6bSEli Britstein 	OFFLOAD(SIPV6_31_0,   4, ip6.saddr.s6_addr32[3], 0,
227927c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
228027c11b6bSEli Britstein 	OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0,
228127c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
228227c11b6bSEli Britstein 	OFFLOAD(DIPV6_95_64,  4, ip6.daddr.s6_addr32[1], 0,
228327c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
228427c11b6bSEli Britstein 	OFFLOAD(DIPV6_63_32,  4, ip6.daddr.s6_addr32[2], 0,
228527c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
228627c11b6bSEli Britstein 	OFFLOAD(DIPV6_31_0,   4, ip6.daddr.s6_addr32[3], 0,
228727c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
228827c11b6bSEli Britstein 	OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit),
2289d79b6df6SOr Gerlitz 
229027c11b6bSEli Britstein 	OFFLOAD(TCP_SPORT, 2, tcp.source,  0, tcp_sport),
229127c11b6bSEli Britstein 	OFFLOAD(TCP_DPORT, 2, tcp.dest,    0, tcp_dport),
229227c11b6bSEli Britstein 	OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags),
2293d79b6df6SOr Gerlitz 
229427c11b6bSEli Britstein 	OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport),
229527c11b6bSEli Britstein 	OFFLOAD(UDP_DPORT, 2, udp.dest,   0, udp_dport),
2296d79b6df6SOr Gerlitz };
2297d79b6df6SOr Gerlitz 
2298218d05ceSTonghao Zhang /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
2299218d05ceSTonghao Zhang  * max from the SW pedit action. On success, attr->num_mod_hdr_actions
2300218d05ceSTonghao Zhang  * says how many HW actions were actually parsed.
2301d79b6df6SOr Gerlitz  */
2302c500c86bSPablo Neira Ayuso static int offload_pedit_fields(struct pedit_headers_action *hdrs,
2303e98bedf5SEli Britstein 				struct mlx5e_tc_flow_parse_attr *parse_attr,
230427c11b6bSEli Britstein 				u32 *action_flags,
2305e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
2306d79b6df6SOr Gerlitz {
2307d79b6df6SOr Gerlitz 	struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
230827c11b6bSEli Britstein 	void *headers_c = get_match_headers_criteria(*action_flags,
230927c11b6bSEli Britstein 						     &parse_attr->spec);
231027c11b6bSEli Britstein 	void *headers_v = get_match_headers_value(*action_flags,
231127c11b6bSEli Britstein 						  &parse_attr->spec);
23122b64bebaSOr Gerlitz 	int i, action_size, nactions, max_actions, first, last, next_z;
2313d79b6df6SOr Gerlitz 	void *s_masks_p, *a_masks_p, *vals_p;
2314d79b6df6SOr Gerlitz 	struct mlx5_fields *f;
2315d79b6df6SOr Gerlitz 	u8 cmd, field_bsize;
2316e3ca4e05SOr Gerlitz 	u32 s_mask, a_mask;
2317d79b6df6SOr Gerlitz 	unsigned long mask;
23182b64bebaSOr Gerlitz 	__be32 mask_be32;
23192b64bebaSOr Gerlitz 	__be16 mask_be16;
2320d79b6df6SOr Gerlitz 	void *action;
2321d79b6df6SOr Gerlitz 
232273867881SPablo Neira Ayuso 	set_masks = &hdrs[0].masks;
232373867881SPablo Neira Ayuso 	add_masks = &hdrs[1].masks;
232473867881SPablo Neira Ayuso 	set_vals = &hdrs[0].vals;
232573867881SPablo Neira Ayuso 	add_vals = &hdrs[1].vals;
2326d79b6df6SOr Gerlitz 
2327d79b6df6SOr Gerlitz 	action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2328218d05ceSTonghao Zhang 	action = parse_attr->mod_hdr_actions +
2329218d05ceSTonghao Zhang 		 parse_attr->num_mod_hdr_actions * action_size;
2330218d05ceSTonghao Zhang 
2331218d05ceSTonghao Zhang 	max_actions = parse_attr->max_mod_hdr_actions;
2332218d05ceSTonghao Zhang 	nactions = parse_attr->num_mod_hdr_actions;
2333d79b6df6SOr Gerlitz 
2334d79b6df6SOr Gerlitz 	for (i = 0; i < ARRAY_SIZE(fields); i++) {
233527c11b6bSEli Britstein 		bool skip;
233627c11b6bSEli Britstein 
2337d79b6df6SOr Gerlitz 		f = &fields[i];
2338d79b6df6SOr Gerlitz 		/* avoid seeing bits set from previous iterations */
2339e3ca4e05SOr Gerlitz 		s_mask = 0;
2340e3ca4e05SOr Gerlitz 		a_mask = 0;
2341d79b6df6SOr Gerlitz 
2342d79b6df6SOr Gerlitz 		s_masks_p = (void *)set_masks + f->offset;
2343d79b6df6SOr Gerlitz 		a_masks_p = (void *)add_masks + f->offset;
2344d79b6df6SOr Gerlitz 
2345d79b6df6SOr Gerlitz 		memcpy(&s_mask, s_masks_p, f->size);
2346d79b6df6SOr Gerlitz 		memcpy(&a_mask, a_masks_p, f->size);
2347d79b6df6SOr Gerlitz 
2348d79b6df6SOr Gerlitz 		if (!s_mask && !a_mask) /* nothing to offload here */
2349d79b6df6SOr Gerlitz 			continue;
2350d79b6df6SOr Gerlitz 
2351d79b6df6SOr Gerlitz 		if (s_mask && a_mask) {
2352e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2353e98bedf5SEli Britstein 					   "can't set and add to the same HW field");
2354d79b6df6SOr Gerlitz 			printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2355d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2356d79b6df6SOr Gerlitz 		}
2357d79b6df6SOr Gerlitz 
2358d79b6df6SOr Gerlitz 		if (nactions == max_actions) {
2359e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2360e98bedf5SEli Britstein 					   "too many pedit actions, can't offload");
2361d79b6df6SOr Gerlitz 			printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
2362d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2363d79b6df6SOr Gerlitz 		}
2364d79b6df6SOr Gerlitz 
236527c11b6bSEli Britstein 		skip = false;
2366d79b6df6SOr Gerlitz 		if (s_mask) {
236727c11b6bSEli Britstein 			void *match_mask = headers_c + f->match_offset;
236827c11b6bSEli Britstein 			void *match_val = headers_v + f->match_offset;
236927c11b6bSEli Britstein 
2370d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_SET;
2371d79b6df6SOr Gerlitz 			mask = s_mask;
2372d79b6df6SOr Gerlitz 			vals_p = (void *)set_vals + f->offset;
237327c11b6bSEli Britstein 			/* don't rewrite if we have a match on the same value */
237427c11b6bSEli Britstein 			if (cmp_val_mask(vals_p, s_masks_p, match_val,
237527c11b6bSEli Britstein 					 match_mask, f->size))
237627c11b6bSEli Britstein 				skip = true;
2377d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
2378d79b6df6SOr Gerlitz 			memset(s_masks_p, 0, f->size);
2379d79b6df6SOr Gerlitz 		} else {
238027c11b6bSEli Britstein 			u32 zero = 0;
238127c11b6bSEli Britstein 
2382d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_ADD;
2383d79b6df6SOr Gerlitz 			mask = a_mask;
2384d79b6df6SOr Gerlitz 			vals_p = (void *)add_vals + f->offset;
238527c11b6bSEli Britstein 			/* add 0 is no change */
238627c11b6bSEli Britstein 			if (!memcmp(vals_p, &zero, f->size))
238727c11b6bSEli Britstein 				skip = true;
2388d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
2389d79b6df6SOr Gerlitz 			memset(a_masks_p, 0, f->size);
2390d79b6df6SOr Gerlitz 		}
239127c11b6bSEli Britstein 		if (skip)
239227c11b6bSEli Britstein 			continue;
2393d79b6df6SOr Gerlitz 
2394d79b6df6SOr Gerlitz 		field_bsize = f->size * BITS_PER_BYTE;
2395e3ca4e05SOr Gerlitz 
23962b64bebaSOr Gerlitz 		if (field_bsize == 32) {
23972b64bebaSOr Gerlitz 			mask_be32 = *(__be32 *)&mask;
23982b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
23992b64bebaSOr Gerlitz 		} else if (field_bsize == 16) {
24002b64bebaSOr Gerlitz 			mask_be16 = *(__be16 *)&mask;
24012b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
24022b64bebaSOr Gerlitz 		}
24032b64bebaSOr Gerlitz 
2404d79b6df6SOr Gerlitz 		first = find_first_bit(&mask, field_bsize);
24052b64bebaSOr Gerlitz 		next_z = find_next_zero_bit(&mask, field_bsize, first);
2406d79b6df6SOr Gerlitz 		last  = find_last_bit(&mask, field_bsize);
24072b64bebaSOr Gerlitz 		if (first < next_z && next_z < last) {
2408e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2409e98bedf5SEli Britstein 					   "rewrite of few sub-fields isn't supported");
24102b64bebaSOr Gerlitz 			printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2411d79b6df6SOr Gerlitz 			       mask);
2412d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2413d79b6df6SOr Gerlitz 		}
2414d79b6df6SOr Gerlitz 
2415d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, action_type, cmd);
2416d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, field, f->field);
2417d79b6df6SOr Gerlitz 
2418d79b6df6SOr Gerlitz 		if (cmd == MLX5_ACTION_TYPE_SET) {
24192b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, offset, first);
2420d79b6df6SOr Gerlitz 			/* length is num of bits to be written, zero means length of 32 */
24212b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, length, (last - first + 1));
2422d79b6df6SOr Gerlitz 		}
2423d79b6df6SOr Gerlitz 
2424d79b6df6SOr Gerlitz 		if (field_bsize == 32)
24252b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2426d79b6df6SOr Gerlitz 		else if (field_bsize == 16)
24272b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2428d79b6df6SOr Gerlitz 		else if (field_bsize == 8)
24292b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2430d79b6df6SOr Gerlitz 
2431d79b6df6SOr Gerlitz 		action += action_size;
2432d79b6df6SOr Gerlitz 		nactions++;
2433d79b6df6SOr Gerlitz 	}
2434d79b6df6SOr Gerlitz 
2435d79b6df6SOr Gerlitz 	parse_attr->num_mod_hdr_actions = nactions;
2436d79b6df6SOr Gerlitz 	return 0;
2437d79b6df6SOr Gerlitz }
2438d79b6df6SOr Gerlitz 
24392cc1cb1dSTonghao Zhang static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
24402cc1cb1dSTonghao Zhang 						  int namespace)
24412cc1cb1dSTonghao Zhang {
24422cc1cb1dSTonghao Zhang 	if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
24432cc1cb1dSTonghao Zhang 		return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
24442cc1cb1dSTonghao Zhang 	else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
24452cc1cb1dSTonghao Zhang 		return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
24462cc1cb1dSTonghao Zhang }
24472cc1cb1dSTonghao Zhang 
2448d79b6df6SOr Gerlitz static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2449c500c86bSPablo Neira Ayuso 				 struct pedit_headers_action *hdrs,
2450c500c86bSPablo Neira Ayuso 				 int namespace,
2451d79b6df6SOr Gerlitz 				 struct mlx5e_tc_flow_parse_attr *parse_attr)
2452d79b6df6SOr Gerlitz {
2453d79b6df6SOr Gerlitz 	int nkeys, action_size, max_actions;
2454d79b6df6SOr Gerlitz 
2455c500c86bSPablo Neira Ayuso 	nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits +
2456c500c86bSPablo Neira Ayuso 		hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
2457d79b6df6SOr Gerlitz 	action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2458d79b6df6SOr Gerlitz 
24592cc1cb1dSTonghao Zhang 	max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace);
2460d79b6df6SOr Gerlitz 	/* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
2461d79b6df6SOr Gerlitz 	max_actions = min(max_actions, nkeys * 16);
2462d79b6df6SOr Gerlitz 
2463d79b6df6SOr Gerlitz 	parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
2464d79b6df6SOr Gerlitz 	if (!parse_attr->mod_hdr_actions)
2465d79b6df6SOr Gerlitz 		return -ENOMEM;
2466d79b6df6SOr Gerlitz 
2467218d05ceSTonghao Zhang 	parse_attr->max_mod_hdr_actions = max_actions;
2468d79b6df6SOr Gerlitz 	return 0;
2469d79b6df6SOr Gerlitz }
2470d79b6df6SOr Gerlitz 
2471d79b6df6SOr Gerlitz static const struct pedit_headers zero_masks = {};
2472d79b6df6SOr Gerlitz 
2473d79b6df6SOr Gerlitz static int parse_tc_pedit_action(struct mlx5e_priv *priv,
247473867881SPablo Neira Ayuso 				 const struct flow_action_entry *act, int namespace,
2475e98bedf5SEli Britstein 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
2476c500c86bSPablo Neira Ayuso 				 struct pedit_headers_action *hdrs,
2477e98bedf5SEli Britstein 				 struct netlink_ext_ack *extack)
2478d79b6df6SOr Gerlitz {
247973867881SPablo Neira Ayuso 	u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
248073867881SPablo Neira Ayuso 	int err = -EOPNOTSUPP;
2481d79b6df6SOr Gerlitz 	u32 mask, val, offset;
248273867881SPablo Neira Ayuso 	u8 htype;
2483d79b6df6SOr Gerlitz 
248473867881SPablo Neira Ayuso 	htype = act->mangle.htype;
2485d79b6df6SOr Gerlitz 	err = -EOPNOTSUPP; /* can't be all optimistic */
2486d79b6df6SOr Gerlitz 
248773867881SPablo Neira Ayuso 	if (htype == FLOW_ACT_MANGLE_UNSPEC) {
248873867881SPablo Neira Ayuso 		NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2489d79b6df6SOr Gerlitz 		goto out_err;
2490d79b6df6SOr Gerlitz 	}
2491d79b6df6SOr Gerlitz 
24922cc1cb1dSTonghao Zhang 	if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
24932cc1cb1dSTonghao Zhang 		NL_SET_ERR_MSG_MOD(extack,
24942cc1cb1dSTonghao Zhang 				   "The pedit offload action is not supported");
24952cc1cb1dSTonghao Zhang 		goto out_err;
24962cc1cb1dSTonghao Zhang 	}
24972cc1cb1dSTonghao Zhang 
249873867881SPablo Neira Ayuso 	mask = act->mangle.mask;
249973867881SPablo Neira Ayuso 	val = act->mangle.val;
250073867881SPablo Neira Ayuso 	offset = act->mangle.offset;
2501d79b6df6SOr Gerlitz 
2502c500c86bSPablo Neira Ayuso 	err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2503d79b6df6SOr Gerlitz 	if (err)
2504d79b6df6SOr Gerlitz 		goto out_err;
2505c500c86bSPablo Neira Ayuso 
2506c500c86bSPablo Neira Ayuso 	hdrs[cmd].pedits++;
2507d79b6df6SOr Gerlitz 
2508c500c86bSPablo Neira Ayuso 	return 0;
2509c500c86bSPablo Neira Ayuso out_err:
2510c500c86bSPablo Neira Ayuso 	return err;
2511c500c86bSPablo Neira Ayuso }
2512c500c86bSPablo Neira Ayuso 
2513c500c86bSPablo Neira Ayuso static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2514c500c86bSPablo Neira Ayuso 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
2515c500c86bSPablo Neira Ayuso 				 struct pedit_headers_action *hdrs,
251627c11b6bSEli Britstein 				 u32 *action_flags,
2517c500c86bSPablo Neira Ayuso 				 struct netlink_ext_ack *extack)
2518c500c86bSPablo Neira Ayuso {
2519c500c86bSPablo Neira Ayuso 	struct pedit_headers *cmd_masks;
2520c500c86bSPablo Neira Ayuso 	int err;
2521c500c86bSPablo Neira Ayuso 	u8 cmd;
2522c500c86bSPablo Neira Ayuso 
2523218d05ceSTonghao Zhang 	if (!parse_attr->mod_hdr_actions) {
2524c500c86bSPablo Neira Ayuso 		err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
2525d79b6df6SOr Gerlitz 		if (err)
2526d79b6df6SOr Gerlitz 			goto out_err;
2527218d05ceSTonghao Zhang 	}
2528d79b6df6SOr Gerlitz 
252927c11b6bSEli Britstein 	err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack);
2530d79b6df6SOr Gerlitz 	if (err < 0)
2531d79b6df6SOr Gerlitz 		goto out_dealloc_parsed_actions;
2532d79b6df6SOr Gerlitz 
2533d79b6df6SOr Gerlitz 	for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2534c500c86bSPablo Neira Ayuso 		cmd_masks = &hdrs[cmd].masks;
2535d79b6df6SOr Gerlitz 		if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2536e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2537e98bedf5SEli Britstein 					   "attempt to offload an unsupported field");
2538b3a433deSOr Gerlitz 			netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2539d79b6df6SOr Gerlitz 			print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2540d79b6df6SOr Gerlitz 				       16, 1, cmd_masks, sizeof(zero_masks), true);
2541d79b6df6SOr Gerlitz 			err = -EOPNOTSUPP;
2542d79b6df6SOr Gerlitz 			goto out_dealloc_parsed_actions;
2543d79b6df6SOr Gerlitz 		}
2544d79b6df6SOr Gerlitz 	}
2545d79b6df6SOr Gerlitz 
2546d79b6df6SOr Gerlitz 	return 0;
2547d79b6df6SOr Gerlitz 
2548d79b6df6SOr Gerlitz out_dealloc_parsed_actions:
2549d79b6df6SOr Gerlitz 	kfree(parse_attr->mod_hdr_actions);
2550d79b6df6SOr Gerlitz out_err:
2551d79b6df6SOr Gerlitz 	return err;
2552d79b6df6SOr Gerlitz }
2553d79b6df6SOr Gerlitz 
2554e98bedf5SEli Britstein static bool csum_offload_supported(struct mlx5e_priv *priv,
2555e98bedf5SEli Britstein 				   u32 action,
2556e98bedf5SEli Britstein 				   u32 update_flags,
2557e98bedf5SEli Britstein 				   struct netlink_ext_ack *extack)
255826c02749SOr Gerlitz {
255926c02749SOr Gerlitz 	u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
256026c02749SOr Gerlitz 			 TCA_CSUM_UPDATE_FLAG_UDP;
256126c02749SOr Gerlitz 
256226c02749SOr Gerlitz 	/*  The HW recalcs checksums only if re-writing headers */
256326c02749SOr Gerlitz 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2564e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2565e98bedf5SEli Britstein 				   "TC csum action is only offloaded with pedit");
256626c02749SOr Gerlitz 		netdev_warn(priv->netdev,
256726c02749SOr Gerlitz 			    "TC csum action is only offloaded with pedit\n");
256826c02749SOr Gerlitz 		return false;
256926c02749SOr Gerlitz 	}
257026c02749SOr Gerlitz 
257126c02749SOr Gerlitz 	if (update_flags & ~prot_flags) {
2572e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2573e98bedf5SEli Britstein 				   "can't offload TC csum action for some header/s");
257426c02749SOr Gerlitz 		netdev_warn(priv->netdev,
257526c02749SOr Gerlitz 			    "can't offload TC csum action for some header/s - flags %#x\n",
257626c02749SOr Gerlitz 			    update_flags);
257726c02749SOr Gerlitz 		return false;
257826c02749SOr Gerlitz 	}
257926c02749SOr Gerlitz 
258026c02749SOr Gerlitz 	return true;
258126c02749SOr Gerlitz }
258226c02749SOr Gerlitz 
25838998576bSDmytro Linkin struct ip_ttl_word {
25848998576bSDmytro Linkin 	__u8	ttl;
25858998576bSDmytro Linkin 	__u8	protocol;
25868998576bSDmytro Linkin 	__sum16	check;
25878998576bSDmytro Linkin };
25888998576bSDmytro Linkin 
25898998576bSDmytro Linkin struct ipv6_hoplimit_word {
25908998576bSDmytro Linkin 	__be16	payload_len;
25918998576bSDmytro Linkin 	__u8	nexthdr;
25928998576bSDmytro Linkin 	__u8	hop_limit;
25938998576bSDmytro Linkin };
25948998576bSDmytro Linkin 
25958998576bSDmytro Linkin static bool is_action_keys_supported(const struct flow_action_entry *act)
25968998576bSDmytro Linkin {
25978998576bSDmytro Linkin 	u32 mask, offset;
25988998576bSDmytro Linkin 	u8 htype;
25998998576bSDmytro Linkin 
26008998576bSDmytro Linkin 	htype = act->mangle.htype;
26018998576bSDmytro Linkin 	offset = act->mangle.offset;
26028998576bSDmytro Linkin 	mask = ~act->mangle.mask;
26038998576bSDmytro Linkin 	/* For IPv4 & IPv6 header check 4 byte word,
26048998576bSDmytro Linkin 	 * to determine that modified fields
26058998576bSDmytro Linkin 	 * are NOT ttl & hop_limit only.
26068998576bSDmytro Linkin 	 */
26078998576bSDmytro Linkin 	if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
26088998576bSDmytro Linkin 		struct ip_ttl_word *ttl_word =
26098998576bSDmytro Linkin 			(struct ip_ttl_word *)&mask;
26108998576bSDmytro Linkin 
26118998576bSDmytro Linkin 		if (offset != offsetof(struct iphdr, ttl) ||
26128998576bSDmytro Linkin 		    ttl_word->protocol ||
26138998576bSDmytro Linkin 		    ttl_word->check) {
26148998576bSDmytro Linkin 			return true;
26158998576bSDmytro Linkin 		}
26168998576bSDmytro Linkin 	} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
26178998576bSDmytro Linkin 		struct ipv6_hoplimit_word *hoplimit_word =
26188998576bSDmytro Linkin 			(struct ipv6_hoplimit_word *)&mask;
26198998576bSDmytro Linkin 
26208998576bSDmytro Linkin 		if (offset != offsetof(struct ipv6hdr, payload_len) ||
26218998576bSDmytro Linkin 		    hoplimit_word->payload_len ||
26228998576bSDmytro Linkin 		    hoplimit_word->nexthdr) {
26238998576bSDmytro Linkin 			return true;
26248998576bSDmytro Linkin 		}
26258998576bSDmytro Linkin 	}
26268998576bSDmytro Linkin 	return false;
26278998576bSDmytro Linkin }
26288998576bSDmytro Linkin 
2629bdd66ac0SOr Gerlitz static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
263073867881SPablo Neira Ayuso 					  struct flow_action *flow_action,
26311651925dSGuy Shattah 					  u32 actions,
2632e98bedf5SEli Britstein 					  struct netlink_ext_ack *extack)
2633bdd66ac0SOr Gerlitz {
263473867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
2635bdd66ac0SOr Gerlitz 	bool modify_ip_header;
2636bdd66ac0SOr Gerlitz 	void *headers_v;
2637bdd66ac0SOr Gerlitz 	u16 ethertype;
26388998576bSDmytro Linkin 	u8 ip_proto;
263973867881SPablo Neira Ayuso 	int i;
2640bdd66ac0SOr Gerlitz 
26418377629eSEli Britstein 	headers_v = get_match_headers_value(actions, spec);
2642bdd66ac0SOr Gerlitz 	ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2643bdd66ac0SOr Gerlitz 
2644bdd66ac0SOr Gerlitz 	/* for non-IP we only re-write MACs, so we're okay */
2645bdd66ac0SOr Gerlitz 	if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2646bdd66ac0SOr Gerlitz 		goto out_ok;
2647bdd66ac0SOr Gerlitz 
2648bdd66ac0SOr Gerlitz 	modify_ip_header = false;
264973867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
265073867881SPablo Neira Ayuso 		if (act->id != FLOW_ACTION_MANGLE &&
265173867881SPablo Neira Ayuso 		    act->id != FLOW_ACTION_ADD)
2652bdd66ac0SOr Gerlitz 			continue;
2653bdd66ac0SOr Gerlitz 
26548998576bSDmytro Linkin 		if (is_action_keys_supported(act)) {
2655bdd66ac0SOr Gerlitz 			modify_ip_header = true;
2656bdd66ac0SOr Gerlitz 			break;
2657bdd66ac0SOr Gerlitz 		}
2658bdd66ac0SOr Gerlitz 	}
2659bdd66ac0SOr Gerlitz 
2660bdd66ac0SOr Gerlitz 	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
26611ccef350SJianbo Liu 	if (modify_ip_header && ip_proto != IPPROTO_TCP &&
26621ccef350SJianbo Liu 	    ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
2663e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2664e98bedf5SEli Britstein 				   "can't offload re-write of non TCP/UDP");
2665bdd66ac0SOr Gerlitz 		pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2666bdd66ac0SOr Gerlitz 		return false;
2667bdd66ac0SOr Gerlitz 	}
2668bdd66ac0SOr Gerlitz 
2669bdd66ac0SOr Gerlitz out_ok:
2670bdd66ac0SOr Gerlitz 	return true;
2671bdd66ac0SOr Gerlitz }
2672bdd66ac0SOr Gerlitz 
2673bdd66ac0SOr Gerlitz static bool actions_match_supported(struct mlx5e_priv *priv,
267473867881SPablo Neira Ayuso 				    struct flow_action *flow_action,
2675bdd66ac0SOr Gerlitz 				    struct mlx5e_tc_flow_parse_attr *parse_attr,
2676e98bedf5SEli Britstein 				    struct mlx5e_tc_flow *flow,
2677e98bedf5SEli Britstein 				    struct netlink_ext_ack *extack)
2678bdd66ac0SOr Gerlitz {
2679bdd66ac0SOr Gerlitz 	u32 actions;
2680bdd66ac0SOr Gerlitz 
2681226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
2682bdd66ac0SOr Gerlitz 		actions = flow->esw_attr->action;
2683bdd66ac0SOr Gerlitz 	else
2684bdd66ac0SOr Gerlitz 		actions = flow->nic_attr->action;
2685bdd66ac0SOr Gerlitz 
2686226f2ca3SVlad Buslov 	if (flow_flag_test(flow, EGRESS) &&
268735a605dbSEli Britstein 	    !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
26886830b468STonghao Zhang 	      (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
26896830b468STonghao Zhang 	      (actions & MLX5_FLOW_CONTEXT_ACTION_DROP)))
26907e29392eSRoi Dayan 		return false;
26917e29392eSRoi Dayan 
2692bdd66ac0SOr Gerlitz 	if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
269373867881SPablo Neira Ayuso 		return modify_header_match_supported(&parse_attr->spec,
2694a655fe9fSDavid S. Miller 						     flow_action, actions,
2695e98bedf5SEli Britstein 						     extack);
2696bdd66ac0SOr Gerlitz 
2697bdd66ac0SOr Gerlitz 	return true;
2698bdd66ac0SOr Gerlitz }
2699bdd66ac0SOr Gerlitz 
27005c65c564SOr Gerlitz static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
27015c65c564SOr Gerlitz {
27025c65c564SOr Gerlitz 	struct mlx5_core_dev *fmdev, *pmdev;
2703816f6706SOr Gerlitz 	u64 fsystem_guid, psystem_guid;
27045c65c564SOr Gerlitz 
27055c65c564SOr Gerlitz 	fmdev = priv->mdev;
27065c65c564SOr Gerlitz 	pmdev = peer_priv->mdev;
27075c65c564SOr Gerlitz 
270859c9d35eSAlaa Hleihel 	fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
270959c9d35eSAlaa Hleihel 	psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
27105c65c564SOr Gerlitz 
2711816f6706SOr Gerlitz 	return (fsystem_guid == psystem_guid);
27125c65c564SOr Gerlitz }
27135c65c564SOr Gerlitz 
2714bdc837eeSEli Britstein static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
2715bdc837eeSEli Britstein 				   const struct flow_action_entry *act,
2716bdc837eeSEli Britstein 				   struct mlx5e_tc_flow_parse_attr *parse_attr,
2717bdc837eeSEli Britstein 				   struct pedit_headers_action *hdrs,
2718bdc837eeSEli Britstein 				   u32 *action, struct netlink_ext_ack *extack)
2719bdc837eeSEli Britstein {
2720bdc837eeSEli Britstein 	u16 mask16 = VLAN_VID_MASK;
2721bdc837eeSEli Britstein 	u16 val16 = act->vlan.vid & VLAN_VID_MASK;
2722bdc837eeSEli Britstein 	const struct flow_action_entry pedit_act = {
2723bdc837eeSEli Britstein 		.id = FLOW_ACTION_MANGLE,
2724bdc837eeSEli Britstein 		.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
2725bdc837eeSEli Britstein 		.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
2726bdc837eeSEli Britstein 		.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
2727bdc837eeSEli Britstein 		.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
2728bdc837eeSEli Britstein 	};
27296fca9d1eSEli Britstein 	u8 match_prio_mask, match_prio_val;
2730bf2f3bcaSEli Britstein 	void *headers_c, *headers_v;
2731bdc837eeSEli Britstein 	int err;
2732bdc837eeSEli Britstein 
2733bf2f3bcaSEli Britstein 	headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
2734bf2f3bcaSEli Britstein 	headers_v = get_match_headers_value(*action, &parse_attr->spec);
2735bf2f3bcaSEli Britstein 
2736bf2f3bcaSEli Britstein 	if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
2737bf2f3bcaSEli Britstein 	      MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
2738bf2f3bcaSEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2739bf2f3bcaSEli Britstein 				   "VLAN rewrite action must have VLAN protocol match");
2740bf2f3bcaSEli Britstein 		return -EOPNOTSUPP;
2741bf2f3bcaSEli Britstein 	}
2742bf2f3bcaSEli Britstein 
27436fca9d1eSEli Britstein 	match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
27446fca9d1eSEli Britstein 	match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
27456fca9d1eSEli Britstein 	if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
27466fca9d1eSEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
27476fca9d1eSEli Britstein 				   "Changing VLAN prio is not supported");
2748bdc837eeSEli Britstein 		return -EOPNOTSUPP;
2749bdc837eeSEli Britstein 	}
2750bdc837eeSEli Britstein 
2751bdc837eeSEli Britstein 	err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
2752bdc837eeSEli Britstein 				    hdrs, NULL);
2753bdc837eeSEli Britstein 	*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2754bdc837eeSEli Britstein 
2755bdc837eeSEli Britstein 	return err;
2756bdc837eeSEli Britstein }
2757bdc837eeSEli Britstein 
27580bac1194SEli Britstein static int
27590bac1194SEli Britstein add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
27600bac1194SEli Britstein 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
27610bac1194SEli Britstein 				 struct pedit_headers_action *hdrs,
27620bac1194SEli Britstein 				 u32 *action, struct netlink_ext_ack *extack)
27630bac1194SEli Britstein {
27640bac1194SEli Britstein 	const struct flow_action_entry prio_tag_act = {
27650bac1194SEli Britstein 		.vlan.vid = 0,
27660bac1194SEli Britstein 		.vlan.prio =
27670bac1194SEli Britstein 			MLX5_GET(fte_match_set_lyr_2_4,
27680bac1194SEli Britstein 				 get_match_headers_value(*action,
27690bac1194SEli Britstein 							 &parse_attr->spec),
27700bac1194SEli Britstein 				 first_prio) &
27710bac1194SEli Britstein 			MLX5_GET(fte_match_set_lyr_2_4,
27720bac1194SEli Britstein 				 get_match_headers_criteria(*action,
27730bac1194SEli Britstein 							    &parse_attr->spec),
27740bac1194SEli Britstein 				 first_prio),
27750bac1194SEli Britstein 	};
27760bac1194SEli Britstein 
27770bac1194SEli Britstein 	return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
27780bac1194SEli Britstein 				       &prio_tag_act, parse_attr, hdrs, action,
27790bac1194SEli Britstein 				       extack);
27800bac1194SEli Britstein }
27810bac1194SEli Britstein 
278273867881SPablo Neira Ayuso static int parse_tc_nic_actions(struct mlx5e_priv *priv,
278373867881SPablo Neira Ayuso 				struct flow_action *flow_action,
2784aa0cbbaeSOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr,
2785e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
2786e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
2787e3a2b7edSAmir Vadai {
2788aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
278973867881SPablo Neira Ayuso 	struct pedit_headers_action hdrs[2] = {};
279073867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
27911cab1cd7SOr Gerlitz 	u32 action = 0;
2792244cd96aSCong Wang 	int err, i;
2793e3a2b7edSAmir Vadai 
279473867881SPablo Neira Ayuso 	if (!flow_action_has_entries(flow_action))
2795e3a2b7edSAmir Vadai 		return -EINVAL;
2796e3a2b7edSAmir Vadai 
27973bc4b7bfSOr Gerlitz 	attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2798e3a2b7edSAmir Vadai 
279973867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
280073867881SPablo Neira Ayuso 		switch (act->id) {
280173867881SPablo Neira Ayuso 		case FLOW_ACTION_DROP:
28021cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2803aad7e08dSAmir Vadai 			if (MLX5_CAP_FLOWTABLE(priv->mdev,
2804aad7e08dSAmir Vadai 					       flow_table_properties_nic_receive.flow_counter))
28051cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
280673867881SPablo Neira Ayuso 			break;
280773867881SPablo Neira Ayuso 		case FLOW_ACTION_MANGLE:
280873867881SPablo Neira Ayuso 		case FLOW_ACTION_ADD:
280973867881SPablo Neira Ayuso 			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
2810c500c86bSPablo Neira Ayuso 						    parse_attr, hdrs, extack);
28112f4fe4caSOr Gerlitz 			if (err)
28122f4fe4caSOr Gerlitz 				return err;
28132f4fe4caSOr Gerlitz 
28141cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
28152f4fe4caSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
281673867881SPablo Neira Ayuso 			break;
2817bdc837eeSEli Britstein 		case FLOW_ACTION_VLAN_MANGLE:
2818bdc837eeSEli Britstein 			err = add_vlan_rewrite_action(priv,
2819bdc837eeSEli Britstein 						      MLX5_FLOW_NAMESPACE_KERNEL,
2820bdc837eeSEli Britstein 						      act, parse_attr, hdrs,
2821bdc837eeSEli Britstein 						      &action, extack);
2822bdc837eeSEli Britstein 			if (err)
2823bdc837eeSEli Britstein 				return err;
2824bdc837eeSEli Britstein 
2825bdc837eeSEli Britstein 			break;
282673867881SPablo Neira Ayuso 		case FLOW_ACTION_CSUM:
28271cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
282873867881SPablo Neira Ayuso 						   act->csum_flags,
2829e98bedf5SEli Britstein 						   extack))
283073867881SPablo Neira Ayuso 				break;
283126c02749SOr Gerlitz 
283226c02749SOr Gerlitz 			return -EOPNOTSUPP;
283373867881SPablo Neira Ayuso 		case FLOW_ACTION_REDIRECT: {
283473867881SPablo Neira Ayuso 			struct net_device *peer_dev = act->dev;
28355c65c564SOr Gerlitz 
28365c65c564SOr Gerlitz 			if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
28375c65c564SOr Gerlitz 			    same_hw_devs(priv, netdev_priv(peer_dev))) {
283898b66cb1SEli Britstein 				parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
2839226f2ca3SVlad Buslov 				flow_flag_set(flow, HAIRPIN);
28401cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
28415c65c564SOr Gerlitz 					  MLX5_FLOW_CONTEXT_ACTION_COUNT;
28425c65c564SOr Gerlitz 			} else {
2843e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
2844e98bedf5SEli Britstein 						   "device is not on same HW, can't offload");
28455c65c564SOr Gerlitz 				netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
28465c65c564SOr Gerlitz 					    peer_dev->name);
28475c65c564SOr Gerlitz 				return -EINVAL;
28485c65c564SOr Gerlitz 			}
28495c65c564SOr Gerlitz 			}
285073867881SPablo Neira Ayuso 			break;
285173867881SPablo Neira Ayuso 		case FLOW_ACTION_MARK: {
285273867881SPablo Neira Ayuso 			u32 mark = act->mark;
2853e3a2b7edSAmir Vadai 
2854e3a2b7edSAmir Vadai 			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2855e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
2856e98bedf5SEli Britstein 						   "Bad flow mark - only 16 bit is supported");
2857e3a2b7edSAmir Vadai 				return -EINVAL;
2858e3a2b7edSAmir Vadai 			}
2859e3a2b7edSAmir Vadai 
28603bc4b7bfSOr Gerlitz 			attr->flow_tag = mark;
28611cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2862e3a2b7edSAmir Vadai 			}
286373867881SPablo Neira Ayuso 			break;
286473867881SPablo Neira Ayuso 		default:
28652cc1cb1dSTonghao Zhang 			NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
28662cc1cb1dSTonghao Zhang 			return -EOPNOTSUPP;
2867e3a2b7edSAmir Vadai 		}
286873867881SPablo Neira Ayuso 	}
2869e3a2b7edSAmir Vadai 
2870c500c86bSPablo Neira Ayuso 	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2871c500c86bSPablo Neira Ayuso 	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2872c500c86bSPablo Neira Ayuso 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
287327c11b6bSEli Britstein 					    parse_attr, hdrs, &action, extack);
2874c500c86bSPablo Neira Ayuso 		if (err)
2875c500c86bSPablo Neira Ayuso 			return err;
287627c11b6bSEli Britstein 		/* in case all pedit actions are skipped, remove the MOD_HDR
287727c11b6bSEli Britstein 		 * flag.
287827c11b6bSEli Britstein 		 */
2879e7739a60SEli Britstein 		if (parse_attr->num_mod_hdr_actions == 0) {
288027c11b6bSEli Britstein 			action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2881e7739a60SEli Britstein 			kfree(parse_attr->mod_hdr_actions);
2882e7739a60SEli Britstein 		}
2883c500c86bSPablo Neira Ayuso 	}
2884c500c86bSPablo Neira Ayuso 
28851cab1cd7SOr Gerlitz 	attr->action = action;
288673867881SPablo Neira Ayuso 	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
2887bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
2888bdd66ac0SOr Gerlitz 
2889e3a2b7edSAmir Vadai 	return 0;
2890e3a2b7edSAmir Vadai }
2891e3a2b7edSAmir Vadai 
28927f1a546eSEli Britstein struct encap_key {
28931f6da306SYevgeny Kliteynik 	const struct ip_tunnel_key *ip_tun_key;
2894d386939aSYevgeny Kliteynik 	struct mlx5e_tc_tunnel *tc_tunnel;
28957f1a546eSEli Britstein };
28967f1a546eSEli Britstein 
28977f1a546eSEli Britstein static inline int cmp_encap_info(struct encap_key *a,
28987f1a546eSEli Britstein 				 struct encap_key *b)
2899a54e20b4SHadar Hen Zion {
29007f1a546eSEli Britstein 	return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
2901d386939aSYevgeny Kliteynik 	       a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
2902a54e20b4SHadar Hen Zion }
2903a54e20b4SHadar Hen Zion 
29047f1a546eSEli Britstein static inline int hash_encap_info(struct encap_key *key)
2905a54e20b4SHadar Hen Zion {
29067f1a546eSEli Britstein 	return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
2907d386939aSYevgeny Kliteynik 		     key->tc_tunnel->tunnel_type);
2908a54e20b4SHadar Hen Zion }
2909a54e20b4SHadar Hen Zion 
2910a54e20b4SHadar Hen Zion 
2911b1d90e6bSRabie Loulou static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2912b1d90e6bSRabie Loulou 				  struct net_device *peer_netdev)
2913b1d90e6bSRabie Loulou {
2914b1d90e6bSRabie Loulou 	struct mlx5e_priv *peer_priv;
2915b1d90e6bSRabie Loulou 
2916b1d90e6bSRabie Loulou 	peer_priv = netdev_priv(peer_netdev);
2917b1d90e6bSRabie Loulou 
2918b1d90e6bSRabie Loulou 	return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
291968931c7dSRoi Dayan 		mlx5e_eswitch_rep(priv->netdev) &&
292068931c7dSRoi Dayan 		mlx5e_eswitch_rep(peer_netdev) &&
292168931c7dSRoi Dayan 		same_hw_devs(priv, peer_priv));
2922b1d90e6bSRabie Loulou }
2923b1d90e6bSRabie Loulou 
2924ce99f6b9SOr Gerlitz 
292554c177caSOz Shlomo 
2926948993f2SVlad Buslov bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
2927948993f2SVlad Buslov {
2928948993f2SVlad Buslov 	return refcount_inc_not_zero(&e->refcnt);
2929948993f2SVlad Buslov }
2930948993f2SVlad Buslov 
2931948993f2SVlad Buslov static struct mlx5e_encap_entry *
2932948993f2SVlad Buslov mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
2933948993f2SVlad Buslov 		uintptr_t hash_key)
2934948993f2SVlad Buslov {
2935948993f2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2936948993f2SVlad Buslov 	struct mlx5e_encap_entry *e;
2937948993f2SVlad Buslov 	struct encap_key e_key;
2938948993f2SVlad Buslov 
2939948993f2SVlad Buslov 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2940948993f2SVlad Buslov 				   encap_hlist, hash_key) {
2941948993f2SVlad Buslov 		e_key.ip_tun_key = &e->tun_info->key;
2942948993f2SVlad Buslov 		e_key.tc_tunnel = e->tunnel;
2943948993f2SVlad Buslov 		if (!cmp_encap_info(&e_key, key) &&
2944948993f2SVlad Buslov 		    mlx5e_encap_take(e))
2945948993f2SVlad Buslov 			return e;
2946948993f2SVlad Buslov 	}
2947948993f2SVlad Buslov 
2948948993f2SVlad Buslov 	return NULL;
2949948993f2SVlad Buslov }
2950948993f2SVlad Buslov 
2951a54e20b4SHadar Hen Zion static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2952e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
2953733d4f36SRoi Dayan 			      struct net_device *mirred_dev,
2954733d4f36SRoi Dayan 			      int out_index,
29558c4dc42bSEli Britstein 			      struct netlink_ext_ack *extack,
29560ad060eeSRoi Dayan 			      struct net_device **encap_dev,
29570ad060eeSRoi Dayan 			      bool *encap_valid)
295803a9d11eSOr Gerlitz {
2959a54e20b4SHadar Hen Zion 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
296045247bf2SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2961733d4f36SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
29621f6da306SYevgeny Kliteynik 	const struct ip_tunnel_info *tun_info;
2963948993f2SVlad Buslov 	struct encap_key key;
2964c1ae1152SOr Gerlitz 	struct mlx5e_encap_entry *e;
2965733d4f36SRoi Dayan 	unsigned short family;
2966a54e20b4SHadar Hen Zion 	uintptr_t hash_key;
296754c177caSOz Shlomo 	int err = 0;
2968a54e20b4SHadar Hen Zion 
2969733d4f36SRoi Dayan 	parse_attr = attr->parse_attr;
29701f6da306SYevgeny Kliteynik 	tun_info = parse_attr->tun_info[out_index];
2971733d4f36SRoi Dayan 	family = ip_tunnel_info_af(tun_info);
29727f1a546eSEli Britstein 	key.ip_tun_key = &tun_info->key;
2973d386939aSYevgeny Kliteynik 	key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
2974d71f895cSEli Cohen 	if (!key.tc_tunnel) {
2975d71f895cSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
2976d71f895cSEli Cohen 		return -EOPNOTSUPP;
2977d71f895cSEli Cohen 	}
2978733d4f36SRoi Dayan 
29797f1a546eSEli Britstein 	hash_key = hash_encap_info(&key);
2980a54e20b4SHadar Hen Zion 
298161086f39SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
2982948993f2SVlad Buslov 	e = mlx5e_encap_get(priv, &key, hash_key);
2983a54e20b4SHadar Hen Zion 
2984b2812089SVlad Buslov 	/* must verify if encap is valid or not */
2985d589e785SVlad Buslov 	if (e) {
2986d589e785SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
2987d589e785SVlad Buslov 		wait_for_completion(&e->res_ready);
2988d589e785SVlad Buslov 
2989d589e785SVlad Buslov 		/* Protect against concurrent neigh update. */
2990d589e785SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
29913c140dd5SVlad Buslov 		if (e->compl_result < 0) {
2992d589e785SVlad Buslov 			err = -EREMOTEIO;
2993d589e785SVlad Buslov 			goto out_err;
2994d589e785SVlad Buslov 		}
299545247bf2SOr Gerlitz 		goto attach_flow;
2996d589e785SVlad Buslov 	}
2997a54e20b4SHadar Hen Zion 
2998a54e20b4SHadar Hen Zion 	e = kzalloc(sizeof(*e), GFP_KERNEL);
299961086f39SVlad Buslov 	if (!e) {
300061086f39SVlad Buslov 		err = -ENOMEM;
300161086f39SVlad Buslov 		goto out_err;
300261086f39SVlad Buslov 	}
3003a54e20b4SHadar Hen Zion 
3004948993f2SVlad Buslov 	refcount_set(&e->refcnt, 1);
3005d589e785SVlad Buslov 	init_completion(&e->res_ready);
3006d589e785SVlad Buslov 
30071f6da306SYevgeny Kliteynik 	e->tun_info = tun_info;
3008101f4de9SOz Shlomo 	err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
3009d589e785SVlad Buslov 	if (err) {
3010d589e785SVlad Buslov 		kfree(e);
3011d589e785SVlad Buslov 		e = NULL;
301254c177caSOz Shlomo 		goto out_err;
3013d589e785SVlad Buslov 	}
301454c177caSOz Shlomo 
3015a54e20b4SHadar Hen Zion 	INIT_LIST_HEAD(&e->flows);
3016d589e785SVlad Buslov 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3017d589e785SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
3018a54e20b4SHadar Hen Zion 
3019ce99f6b9SOr Gerlitz 	if (family == AF_INET)
3020101f4de9SOz Shlomo 		err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3021ce99f6b9SOr Gerlitz 	else if (family == AF_INET6)
3022101f4de9SOz Shlomo 		err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3023ce99f6b9SOr Gerlitz 
3024d589e785SVlad Buslov 	/* Protect against concurrent neigh update. */
3025d589e785SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
3026d589e785SVlad Buslov 	complete_all(&e->res_ready);
3027d589e785SVlad Buslov 	if (err) {
3028d589e785SVlad Buslov 		e->compl_result = err;
3029a54e20b4SHadar Hen Zion 		goto out_err;
3030d589e785SVlad Buslov 	}
30313c140dd5SVlad Buslov 	e->compl_result = 1;
3032a54e20b4SHadar Hen Zion 
303345247bf2SOr Gerlitz attach_flow:
3034948993f2SVlad Buslov 	flow->encaps[out_index].e = e;
30358c4dc42bSEli Britstein 	list_add(&flow->encaps[out_index].list, &e->flows);
30368c4dc42bSEli Britstein 	flow->encaps[out_index].index = out_index;
303745247bf2SOr Gerlitz 	*encap_dev = e->out_dev;
30388c4dc42bSEli Britstein 	if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
30398c4dc42bSEli Britstein 		attr->dests[out_index].encap_id = e->encap_id;
30408c4dc42bSEli Britstein 		attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
30410ad060eeSRoi Dayan 		*encap_valid = true;
30428c4dc42bSEli Britstein 	} else {
30430ad060eeSRoi Dayan 		*encap_valid = false;
30448c4dc42bSEli Britstein 	}
304561086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
304645247bf2SOr Gerlitz 
3047232c0013SHadar Hen Zion 	return err;
3048a54e20b4SHadar Hen Zion 
3049a54e20b4SHadar Hen Zion out_err:
305061086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
3051d589e785SVlad Buslov 	if (e)
3052d589e785SVlad Buslov 		mlx5e_encap_put(priv, e);
3053a54e20b4SHadar Hen Zion 	return err;
3054a54e20b4SHadar Hen Zion }
3055a54e20b4SHadar Hen Zion 
30561482bd3dSJianbo Liu static int parse_tc_vlan_action(struct mlx5e_priv *priv,
305773867881SPablo Neira Ayuso 				const struct flow_action_entry *act,
30581482bd3dSJianbo Liu 				struct mlx5_esw_flow_attr *attr,
30591482bd3dSJianbo Liu 				u32 *action)
30601482bd3dSJianbo Liu {
3061cc495188SJianbo Liu 	u8 vlan_idx = attr->total_vlan;
3062cc495188SJianbo Liu 
3063cc495188SJianbo Liu 	if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
30641482bd3dSJianbo Liu 		return -EOPNOTSUPP;
3065cc495188SJianbo Liu 
306673867881SPablo Neira Ayuso 	switch (act->id) {
306773867881SPablo Neira Ayuso 	case FLOW_ACTION_VLAN_POP:
3068cc495188SJianbo Liu 		if (vlan_idx) {
3069cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3070cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
3071cc495188SJianbo Liu 				return -EOPNOTSUPP;
3072cc495188SJianbo Liu 
3073cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3074cc495188SJianbo Liu 		} else {
3075cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3076cc495188SJianbo Liu 		}
307773867881SPablo Neira Ayuso 		break;
307873867881SPablo Neira Ayuso 	case FLOW_ACTION_VLAN_PUSH:
307973867881SPablo Neira Ayuso 		attr->vlan_vid[vlan_idx] = act->vlan.vid;
308073867881SPablo Neira Ayuso 		attr->vlan_prio[vlan_idx] = act->vlan.prio;
308173867881SPablo Neira Ayuso 		attr->vlan_proto[vlan_idx] = act->vlan.proto;
3082cc495188SJianbo Liu 		if (!attr->vlan_proto[vlan_idx])
3083cc495188SJianbo Liu 			attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3084cc495188SJianbo Liu 
3085cc495188SJianbo Liu 		if (vlan_idx) {
3086cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3087cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
3088cc495188SJianbo Liu 				return -EOPNOTSUPP;
3089cc495188SJianbo Liu 
3090cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3091cc495188SJianbo Liu 		} else {
3092cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
309373867881SPablo Neira Ayuso 			    (act->vlan.proto != htons(ETH_P_8021Q) ||
309473867881SPablo Neira Ayuso 			     act->vlan.prio))
3095cc495188SJianbo Liu 				return -EOPNOTSUPP;
3096cc495188SJianbo Liu 
3097cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
30981482bd3dSJianbo Liu 		}
309973867881SPablo Neira Ayuso 		break;
310073867881SPablo Neira Ayuso 	default:
3101bdc837eeSEli Britstein 		return -EINVAL;
31021482bd3dSJianbo Liu 	}
31031482bd3dSJianbo Liu 
3104cc495188SJianbo Liu 	attr->total_vlan = vlan_idx + 1;
3105cc495188SJianbo Liu 
31061482bd3dSJianbo Liu 	return 0;
31071482bd3dSJianbo Liu }
31081482bd3dSJianbo Liu 
3109278748a9SEli Britstein static int add_vlan_push_action(struct mlx5e_priv *priv,
3110278748a9SEli Britstein 				struct mlx5_esw_flow_attr *attr,
3111278748a9SEli Britstein 				struct net_device **out_dev,
3112278748a9SEli Britstein 				u32 *action)
3113278748a9SEli Britstein {
3114278748a9SEli Britstein 	struct net_device *vlan_dev = *out_dev;
3115278748a9SEli Britstein 	struct flow_action_entry vlan_act = {
3116278748a9SEli Britstein 		.id = FLOW_ACTION_VLAN_PUSH,
3117278748a9SEli Britstein 		.vlan.vid = vlan_dev_vlan_id(vlan_dev),
3118278748a9SEli Britstein 		.vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3119278748a9SEli Britstein 		.vlan.prio = 0,
3120278748a9SEli Britstein 	};
3121278748a9SEli Britstein 	int err;
3122278748a9SEli Britstein 
3123278748a9SEli Britstein 	err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3124278748a9SEli Britstein 	if (err)
3125278748a9SEli Britstein 		return err;
3126278748a9SEli Britstein 
3127278748a9SEli Britstein 	*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3128278748a9SEli Britstein 					dev_get_iflink(vlan_dev));
3129278748a9SEli Britstein 	if (is_vlan_dev(*out_dev))
3130278748a9SEli Britstein 		err = add_vlan_push_action(priv, attr, out_dev, action);
3131278748a9SEli Britstein 
3132278748a9SEli Britstein 	return err;
3133278748a9SEli Britstein }
3134278748a9SEli Britstein 
313535a605dbSEli Britstein static int add_vlan_pop_action(struct mlx5e_priv *priv,
313635a605dbSEli Britstein 			       struct mlx5_esw_flow_attr *attr,
313735a605dbSEli Britstein 			       u32 *action)
313835a605dbSEli Britstein {
313935a605dbSEli Britstein 	int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev);
314035a605dbSEli Britstein 	struct flow_action_entry vlan_act = {
314135a605dbSEli Britstein 		.id = FLOW_ACTION_VLAN_POP,
314235a605dbSEli Britstein 	};
314335a605dbSEli Britstein 	int err = 0;
314435a605dbSEli Britstein 
314535a605dbSEli Britstein 	while (nest_level--) {
314635a605dbSEli Britstein 		err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
314735a605dbSEli Britstein 		if (err)
314835a605dbSEli Britstein 			return err;
314935a605dbSEli Britstein 	}
315035a605dbSEli Britstein 
315135a605dbSEli Britstein 	return err;
315235a605dbSEli Britstein }
315335a605dbSEli Britstein 
3154f6dc1264SPaul Blakey bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3155f6dc1264SPaul Blakey 				    struct net_device *out_dev)
3156f6dc1264SPaul Blakey {
3157f6dc1264SPaul Blakey 	if (is_merged_eswitch_dev(priv, out_dev))
3158f6dc1264SPaul Blakey 		return true;
3159f6dc1264SPaul Blakey 
3160f6dc1264SPaul Blakey 	return mlx5e_eswitch_rep(out_dev) &&
3161f6dc1264SPaul Blakey 	       same_hw_devs(priv, netdev_priv(out_dev));
3162f6dc1264SPaul Blakey }
3163f6dc1264SPaul Blakey 
316473867881SPablo Neira Ayuso static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
316573867881SPablo Neira Ayuso 				struct flow_action *flow_action,
3166e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
3167e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
3168a54e20b4SHadar Hen Zion {
316973867881SPablo Neira Ayuso 	struct pedit_headers_action hdrs[2] = {};
3170bf07aa73SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3171ecf5bb79SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
31726f9af8ffSTonghao Zhang 	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
31731d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
317473867881SPablo Neira Ayuso 	const struct ip_tunnel_info *info = NULL;
317573867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
3176a54e20b4SHadar Hen Zion 	bool encap = false;
31771cab1cd7SOr Gerlitz 	u32 action = 0;
3178244cd96aSCong Wang 	int err, i;
317903a9d11eSOr Gerlitz 
318073867881SPablo Neira Ayuso 	if (!flow_action_has_entries(flow_action))
318103a9d11eSOr Gerlitz 		return -EINVAL;
318203a9d11eSOr Gerlitz 
318373867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
318473867881SPablo Neira Ayuso 		switch (act->id) {
318573867881SPablo Neira Ayuso 		case FLOW_ACTION_DROP:
31861cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
318703a9d11eSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
318873867881SPablo Neira Ayuso 			break;
318973867881SPablo Neira Ayuso 		case FLOW_ACTION_MANGLE:
319073867881SPablo Neira Ayuso 		case FLOW_ACTION_ADD:
319173867881SPablo Neira Ayuso 			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3192c500c86bSPablo Neira Ayuso 						    parse_attr, hdrs, extack);
3193d7e75a32SOr Gerlitz 			if (err)
3194d7e75a32SOr Gerlitz 				return err;
3195d7e75a32SOr Gerlitz 
31961cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3197e85e02baSEli Britstein 			attr->split_count = attr->out_count;
319873867881SPablo Neira Ayuso 			break;
319973867881SPablo Neira Ayuso 		case FLOW_ACTION_CSUM:
32001cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
320173867881SPablo Neira Ayuso 						   act->csum_flags, extack))
320273867881SPablo Neira Ayuso 				break;
320326c02749SOr Gerlitz 
320426c02749SOr Gerlitz 			return -EOPNOTSUPP;
320573867881SPablo Neira Ayuso 		case FLOW_ACTION_REDIRECT:
320673867881SPablo Neira Ayuso 		case FLOW_ACTION_MIRRED: {
320703a9d11eSOr Gerlitz 			struct mlx5e_priv *out_priv;
3208592d3651SChris Mi 			struct net_device *out_dev;
320903a9d11eSOr Gerlitz 
321073867881SPablo Neira Ayuso 			out_dev = act->dev;
3211ef381359SOz Shlomo 			if (!out_dev) {
3212ef381359SOz Shlomo 				/* out_dev is NULL when filters with
3213ef381359SOz Shlomo 				 * non-existing mirred device are replayed to
3214ef381359SOz Shlomo 				 * the driver.
3215ef381359SOz Shlomo 				 */
3216ef381359SOz Shlomo 				return -EINVAL;
3217ef381359SOz Shlomo 			}
321803a9d11eSOr Gerlitz 
3219592d3651SChris Mi 			if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
3220e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3221e98bedf5SEli Britstein 						   "can't support more output ports, can't offload forwarding");
3222592d3651SChris Mi 				pr_err("can't support more than %d output ports, can't offload forwarding\n",
3223592d3651SChris Mi 				       attr->out_count);
3224592d3651SChris Mi 				return -EOPNOTSUPP;
3225592d3651SChris Mi 			}
3226592d3651SChris Mi 
3227f493f155SEli Britstein 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3228f493f155SEli Britstein 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
3229f6dc1264SPaul Blakey 			if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
32307ba58ba7SRabie Loulou 				struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
32317ba58ba7SRabie Loulou 				struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
3232fa833bd5SVlad Buslov 				struct net_device *uplink_upper;
32337ba58ba7SRabie Loulou 
3234fa833bd5SVlad Buslov 				rcu_read_lock();
3235fa833bd5SVlad Buslov 				uplink_upper =
3236fa833bd5SVlad Buslov 					netdev_master_upper_dev_get_rcu(uplink_dev);
32377ba58ba7SRabie Loulou 				if (uplink_upper &&
32387ba58ba7SRabie Loulou 				    netif_is_lag_master(uplink_upper) &&
32397ba58ba7SRabie Loulou 				    uplink_upper == out_dev)
32407ba58ba7SRabie Loulou 					out_dev = uplink_dev;
3241fa833bd5SVlad Buslov 				rcu_read_unlock();
32427ba58ba7SRabie Loulou 
3243278748a9SEli Britstein 				if (is_vlan_dev(out_dev)) {
3244278748a9SEli Britstein 					err = add_vlan_push_action(priv, attr,
3245278748a9SEli Britstein 								   &out_dev,
3246278748a9SEli Britstein 								   &action);
3247278748a9SEli Britstein 					if (err)
3248278748a9SEli Britstein 						return err;
3249278748a9SEli Britstein 				}
3250f6dc1264SPaul Blakey 
325135a605dbSEli Britstein 				if (is_vlan_dev(parse_attr->filter_dev)) {
325235a605dbSEli Britstein 					err = add_vlan_pop_action(priv, attr,
325335a605dbSEli Britstein 								  &action);
325435a605dbSEli Britstein 					if (err)
325535a605dbSEli Britstein 						return err;
325635a605dbSEli Britstein 				}
3257278748a9SEli Britstein 
3258f6dc1264SPaul Blakey 				if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
3259f6dc1264SPaul Blakey 					NL_SET_ERR_MSG_MOD(extack,
3260f6dc1264SPaul Blakey 							   "devices are not on same switch HW, can't offload forwarding");
3261f6dc1264SPaul Blakey 					pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
3262f6dc1264SPaul Blakey 					       priv->netdev->name, out_dev->name);
3263a0646c88SEli Britstein 					return -EOPNOTSUPP;
3264f6dc1264SPaul Blakey 				}
3265a0646c88SEli Britstein 
326603a9d11eSOr Gerlitz 				out_priv = netdev_priv(out_dev);
32671d447a39SSaeed Mahameed 				rpriv = out_priv->ppriv;
3268df65a573SEli Britstein 				attr->dests[attr->out_count].rep = rpriv->rep;
3269df65a573SEli Britstein 				attr->dests[attr->out_count].mdev = out_priv->mdev;
3270df65a573SEli Britstein 				attr->out_count++;
3271a54e20b4SHadar Hen Zion 			} else if (encap) {
32728c4dc42bSEli Britstein 				parse_attr->mirred_ifindex[attr->out_count] =
32738c4dc42bSEli Britstein 					out_dev->ifindex;
32741f6da306SYevgeny Kliteynik 				parse_attr->tun_info[attr->out_count] = info;
32758c4dc42bSEli Britstein 				encap = false;
3276f493f155SEli Britstein 				attr->dests[attr->out_count].flags |=
3277f493f155SEli Britstein 					MLX5_ESW_DEST_ENCAP;
32781cc26d74SEli Britstein 				attr->out_count++;
3279df65a573SEli Britstein 				/* attr->dests[].rep is resolved when we
3280df65a573SEli Britstein 				 * handle encap
3281df65a573SEli Britstein 				 */
3282ef381359SOz Shlomo 			} else if (parse_attr->filter_dev != priv->netdev) {
3283ef381359SOz Shlomo 				/* All mlx5 devices are called to configure
3284ef381359SOz Shlomo 				 * high level device filters. Therefore, the
3285ef381359SOz Shlomo 				 * *attempt* to  install a filter on invalid
3286ef381359SOz Shlomo 				 * eswitch should not trigger an explicit error
3287ef381359SOz Shlomo 				 */
3288ef381359SOz Shlomo 				return -EINVAL;
3289a54e20b4SHadar Hen Zion 			} else {
3290e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3291e98bedf5SEli Britstein 						   "devices are not on same switch HW, can't offload forwarding");
3292a54e20b4SHadar Hen Zion 				pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
3293a54e20b4SHadar Hen Zion 				       priv->netdev->name, out_dev->name);
3294a54e20b4SHadar Hen Zion 				return -EINVAL;
3295a54e20b4SHadar Hen Zion 			}
3296a54e20b4SHadar Hen Zion 			}
329773867881SPablo Neira Ayuso 			break;
329873867881SPablo Neira Ayuso 		case FLOW_ACTION_TUNNEL_ENCAP:
329973867881SPablo Neira Ayuso 			info = act->tunnel;
3300a54e20b4SHadar Hen Zion 			if (info)
3301a54e20b4SHadar Hen Zion 				encap = true;
3302a54e20b4SHadar Hen Zion 			else
3303a54e20b4SHadar Hen Zion 				return -EOPNOTSUPP;
330403a9d11eSOr Gerlitz 
330573867881SPablo Neira Ayuso 			break;
330673867881SPablo Neira Ayuso 		case FLOW_ACTION_VLAN_PUSH:
330773867881SPablo Neira Ayuso 		case FLOW_ACTION_VLAN_POP:
330876b496b1SEli Britstein 			if (act->id == FLOW_ACTION_VLAN_PUSH &&
330976b496b1SEli Britstein 			    (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
331076b496b1SEli Britstein 				/* Replace vlan pop+push with vlan modify */
331176b496b1SEli Britstein 				action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
331276b496b1SEli Britstein 				err = add_vlan_rewrite_action(priv,
331376b496b1SEli Britstein 							      MLX5_FLOW_NAMESPACE_FDB,
331476b496b1SEli Britstein 							      act, parse_attr, hdrs,
331576b496b1SEli Britstein 							      &action, extack);
331676b496b1SEli Britstein 			} else {
331773867881SPablo Neira Ayuso 				err = parse_tc_vlan_action(priv, act, attr, &action);
331876b496b1SEli Britstein 			}
33191482bd3dSJianbo Liu 			if (err)
33201482bd3dSJianbo Liu 				return err;
33211482bd3dSJianbo Liu 
3322e85e02baSEli Britstein 			attr->split_count = attr->out_count;
332373867881SPablo Neira Ayuso 			break;
3324bdc837eeSEli Britstein 		case FLOW_ACTION_VLAN_MANGLE:
3325bdc837eeSEli Britstein 			err = add_vlan_rewrite_action(priv,
3326bdc837eeSEli Britstein 						      MLX5_FLOW_NAMESPACE_FDB,
3327bdc837eeSEli Britstein 						      act, parse_attr, hdrs,
3328bdc837eeSEli Britstein 						      &action, extack);
3329bdc837eeSEli Britstein 			if (err)
3330bdc837eeSEli Britstein 				return err;
3331bdc837eeSEli Britstein 
3332bdc837eeSEli Britstein 			attr->split_count = attr->out_count;
3333bdc837eeSEli Britstein 			break;
333473867881SPablo Neira Ayuso 		case FLOW_ACTION_TUNNEL_DECAP:
33351cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
333673867881SPablo Neira Ayuso 			break;
333773867881SPablo Neira Ayuso 		case FLOW_ACTION_GOTO: {
333873867881SPablo Neira Ayuso 			u32 dest_chain = act->chain_index;
3339bf07aa73SPaul Blakey 			u32 max_chain = mlx5_eswitch_get_chain_range(esw);
3340bf07aa73SPaul Blakey 
3341bf07aa73SPaul Blakey 			if (dest_chain <= attr->chain) {
3342bf07aa73SPaul Blakey 				NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
3343bf07aa73SPaul Blakey 				return -EOPNOTSUPP;
3344bf07aa73SPaul Blakey 			}
3345bf07aa73SPaul Blakey 			if (dest_chain > max_chain) {
3346bf07aa73SPaul Blakey 				NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
3347bf07aa73SPaul Blakey 				return -EOPNOTSUPP;
3348bf07aa73SPaul Blakey 			}
3349e88afe75SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3350bf07aa73SPaul Blakey 			attr->dest_chain = dest_chain;
335173867881SPablo Neira Ayuso 			break;
3352bf07aa73SPaul Blakey 			}
335373867881SPablo Neira Ayuso 		default:
33542cc1cb1dSTonghao Zhang 			NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
33552cc1cb1dSTonghao Zhang 			return -EOPNOTSUPP;
335603a9d11eSOr Gerlitz 		}
335773867881SPablo Neira Ayuso 	}
3358bdd66ac0SOr Gerlitz 
33590bac1194SEli Britstein 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
33600bac1194SEli Britstein 	    action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
33610bac1194SEli Britstein 		/* For prio tag mode, replace vlan pop with rewrite vlan prio
33620bac1194SEli Britstein 		 * tag rewrite.
33630bac1194SEli Britstein 		 */
33640bac1194SEli Britstein 		action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
33650bac1194SEli Britstein 		err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
33660bac1194SEli Britstein 						       &action, extack);
33670bac1194SEli Britstein 		if (err)
33680bac1194SEli Britstein 			return err;
33690bac1194SEli Britstein 	}
33700bac1194SEli Britstein 
3371c500c86bSPablo Neira Ayuso 	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3372c500c86bSPablo Neira Ayuso 	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
337384be899fSTonghao Zhang 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
337427c11b6bSEli Britstein 					    parse_attr, hdrs, &action, extack);
3375c500c86bSPablo Neira Ayuso 		if (err)
3376c500c86bSPablo Neira Ayuso 			return err;
337727c11b6bSEli Britstein 		/* in case all pedit actions are skipped, remove the MOD_HDR
337827c11b6bSEli Britstein 		 * flag. we might have set split_count either by pedit or
337927c11b6bSEli Britstein 		 * pop/push. if there is no pop/push either, reset it too.
338027c11b6bSEli Britstein 		 */
338127c11b6bSEli Britstein 		if (parse_attr->num_mod_hdr_actions == 0) {
338227c11b6bSEli Britstein 			action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3383e7739a60SEli Britstein 			kfree(parse_attr->mod_hdr_actions);
338427c11b6bSEli Britstein 			if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
338527c11b6bSEli Britstein 			      (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
338627c11b6bSEli Britstein 				attr->split_count = 0;
338727c11b6bSEli Britstein 		}
3388c500c86bSPablo Neira Ayuso 	}
3389c500c86bSPablo Neira Ayuso 
33901cab1cd7SOr Gerlitz 	attr->action = action;
339173867881SPablo Neira Ayuso 	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3392bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
3393bdd66ac0SOr Gerlitz 
3394e88afe75SOr Gerlitz 	if (attr->dest_chain) {
3395e88afe75SOr Gerlitz 		if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
3396e88afe75SOr Gerlitz 			NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
3397e88afe75SOr Gerlitz 			return -EOPNOTSUPP;
3398e88afe75SOr Gerlitz 		}
3399e88afe75SOr Gerlitz 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3400e88afe75SOr Gerlitz 	}
3401e88afe75SOr Gerlitz 
3402e85e02baSEli Britstein 	if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3403e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
3404e98bedf5SEli Britstein 				   "current firmware doesn't support split rule for port mirroring");
3405592d3651SChris Mi 		netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
3406592d3651SChris Mi 		return -EOPNOTSUPP;
3407592d3651SChris Mi 	}
3408592d3651SChris Mi 
340931c8eba5SOr Gerlitz 	return 0;
341003a9d11eSOr Gerlitz }
341103a9d11eSOr Gerlitz 
3412226f2ca3SVlad Buslov static void get_flags(int flags, unsigned long *flow_flags)
341360bd4af8SOr Gerlitz {
3414226f2ca3SVlad Buslov 	unsigned long __flow_flags = 0;
341560bd4af8SOr Gerlitz 
3416226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(INGRESS))
3417226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
3418226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(EGRESS))
3419226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
342060bd4af8SOr Gerlitz 
3421226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
3422226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3423226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
3424226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
3425d9ee0491SOr Gerlitz 
342660bd4af8SOr Gerlitz 	*flow_flags = __flow_flags;
342760bd4af8SOr Gerlitz }
342860bd4af8SOr Gerlitz 
342905866c82SOr Gerlitz static const struct rhashtable_params tc_ht_params = {
343005866c82SOr Gerlitz 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
343105866c82SOr Gerlitz 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
343205866c82SOr Gerlitz 	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
343305866c82SOr Gerlitz 	.automatic_shrinking = true,
343405866c82SOr Gerlitz };
343505866c82SOr Gerlitz 
3436226f2ca3SVlad Buslov static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
3437226f2ca3SVlad Buslov 				    unsigned long flags)
343805866c82SOr Gerlitz {
3439655dc3d2SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3440655dc3d2SOr Gerlitz 	struct mlx5e_rep_priv *uplink_rpriv;
3441655dc3d2SOr Gerlitz 
3442226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
3443655dc3d2SOr Gerlitz 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
3444ec1366c2SOz Shlomo 		return &uplink_rpriv->uplink_priv.tc_ht;
3445d9ee0491SOr Gerlitz 	} else /* NIC offload */
344605866c82SOr Gerlitz 		return &priv->fs.tc.ht;
344705866c82SOr Gerlitz }
344805866c82SOr Gerlitz 
344904de7ddaSRoi Dayan static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
345004de7ddaSRoi Dayan {
34511418ddd9SAviv Heller 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3452b05af6aaSBodong Wang 	bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
3453226f2ca3SVlad Buslov 		flow_flag_test(flow, INGRESS);
34541418ddd9SAviv Heller 	bool act_is_encap = !!(attr->action &
34551418ddd9SAviv Heller 			       MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
34561418ddd9SAviv Heller 	bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
34571418ddd9SAviv Heller 						MLX5_DEVCOM_ESW_OFFLOADS);
34581418ddd9SAviv Heller 
345910fbb1cdSRoi Dayan 	if (!esw_paired)
346010fbb1cdSRoi Dayan 		return false;
346110fbb1cdSRoi Dayan 
346210fbb1cdSRoi Dayan 	if ((mlx5_lag_is_sriov(attr->in_mdev) ||
346310fbb1cdSRoi Dayan 	     mlx5_lag_is_multipath(attr->in_mdev)) &&
346410fbb1cdSRoi Dayan 	    (is_rep_ingress || act_is_encap))
346510fbb1cdSRoi Dayan 		return true;
346610fbb1cdSRoi Dayan 
346710fbb1cdSRoi Dayan 	return false;
346804de7ddaSRoi Dayan }
346904de7ddaSRoi Dayan 
3470a88780a9SRoi Dayan static int
3471a88780a9SRoi Dayan mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
3472226f2ca3SVlad Buslov 		 struct flow_cls_offload *f, unsigned long flow_flags,
3473a88780a9SRoi Dayan 		 struct mlx5e_tc_flow_parse_attr **__parse_attr,
3474a88780a9SRoi Dayan 		 struct mlx5e_tc_flow **__flow)
3475e3a2b7edSAmir Vadai {
347617091853SOr Gerlitz 	struct mlx5e_tc_flow_parse_attr *parse_attr;
34773bc4b7bfSOr Gerlitz 	struct mlx5e_tc_flow *flow;
34785a7e5bcbSVlad Buslov 	int out_index, err;
3479776b12b6SOr Gerlitz 
348065ba8fb7SOr Gerlitz 	flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
34811b9a07eeSLeon Romanovsky 	parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
348217091853SOr Gerlitz 	if (!parse_attr || !flow) {
3483e3a2b7edSAmir Vadai 		err = -ENOMEM;
3484e3a2b7edSAmir Vadai 		goto err_free;
3485e3a2b7edSAmir Vadai 	}
3486e3a2b7edSAmir Vadai 
3487e3a2b7edSAmir Vadai 	flow->cookie = f->cookie;
348865ba8fb7SOr Gerlitz 	flow->flags = flow_flags;
3489655dc3d2SOr Gerlitz 	flow->priv = priv;
34905a7e5bcbSVlad Buslov 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
34915a7e5bcbSVlad Buslov 		INIT_LIST_HEAD(&flow->encaps[out_index].list);
34925a7e5bcbSVlad Buslov 	INIT_LIST_HEAD(&flow->mod_hdr);
34935a7e5bcbSVlad Buslov 	INIT_LIST_HEAD(&flow->hairpin);
34945a7e5bcbSVlad Buslov 	refcount_set(&flow->refcnt, 1);
3495e3a2b7edSAmir Vadai 
3496a88780a9SRoi Dayan 	*__flow = flow;
3497a88780a9SRoi Dayan 	*__parse_attr = parse_attr;
3498a88780a9SRoi Dayan 
3499a88780a9SRoi Dayan 	return 0;
3500a88780a9SRoi Dayan 
3501a88780a9SRoi Dayan err_free:
3502a88780a9SRoi Dayan 	kfree(flow);
3503a88780a9SRoi Dayan 	kvfree(parse_attr);
3504a88780a9SRoi Dayan 	return err;
3505adb4c123SOr Gerlitz }
3506adb4c123SOr Gerlitz 
3507988ab9c7STonghao Zhang static void
3508988ab9c7STonghao Zhang mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
3509988ab9c7STonghao Zhang 			 struct mlx5e_priv *priv,
3510988ab9c7STonghao Zhang 			 struct mlx5e_tc_flow_parse_attr *parse_attr,
3511f9e30088SPablo Neira Ayuso 			 struct flow_cls_offload *f,
3512988ab9c7STonghao Zhang 			 struct mlx5_eswitch_rep *in_rep,
3513988ab9c7STonghao Zhang 			 struct mlx5_core_dev *in_mdev)
3514988ab9c7STonghao Zhang {
3515988ab9c7STonghao Zhang 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3516988ab9c7STonghao Zhang 
3517988ab9c7STonghao Zhang 	esw_attr->parse_attr = parse_attr;
3518988ab9c7STonghao Zhang 	esw_attr->chain = f->common.chain_index;
3519ef01adaeSPablo Neira Ayuso 	esw_attr->prio = f->common.prio;
3520988ab9c7STonghao Zhang 
3521988ab9c7STonghao Zhang 	esw_attr->in_rep = in_rep;
3522988ab9c7STonghao Zhang 	esw_attr->in_mdev = in_mdev;
3523988ab9c7STonghao Zhang 
3524988ab9c7STonghao Zhang 	if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
3525988ab9c7STonghao Zhang 	    MLX5_COUNTER_SOURCE_ESWITCH)
3526988ab9c7STonghao Zhang 		esw_attr->counter_dev = in_mdev;
3527988ab9c7STonghao Zhang 	else
3528988ab9c7STonghao Zhang 		esw_attr->counter_dev = priv->mdev;
3529988ab9c7STonghao Zhang }
3530988ab9c7STonghao Zhang 
353171129676SJason Gunthorpe static struct mlx5e_tc_flow *
353204de7ddaSRoi Dayan __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
3533f9e30088SPablo Neira Ayuso 		     struct flow_cls_offload *f,
3534226f2ca3SVlad Buslov 		     unsigned long flow_flags,
3535d11afc26SOz Shlomo 		     struct net_device *filter_dev,
353604de7ddaSRoi Dayan 		     struct mlx5_eswitch_rep *in_rep,
353771129676SJason Gunthorpe 		     struct mlx5_core_dev *in_mdev)
3538a88780a9SRoi Dayan {
3539f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3540a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
3541a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
3542a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
3543a88780a9SRoi Dayan 	int attr_size, err;
3544a88780a9SRoi Dayan 
3545226f2ca3SVlad Buslov 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3546a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_esw_flow_attr);
3547a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3548a88780a9SRoi Dayan 			       &parse_attr, &flow);
3549a88780a9SRoi Dayan 	if (err)
3550a88780a9SRoi Dayan 		goto out;
3551988ab9c7STonghao Zhang 
3552d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
3553988ab9c7STonghao Zhang 	mlx5e_flow_esw_attr_init(flow->esw_attr,
3554988ab9c7STonghao Zhang 				 priv, parse_attr,
3555988ab9c7STonghao Zhang 				 f, in_rep, in_mdev);
3556988ab9c7STonghao Zhang 
355754c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
355854c177caSOz Shlomo 			       f, filter_dev);
3559d11afc26SOz Shlomo 	if (err)
3560d11afc26SOz Shlomo 		goto err_free;
3561a88780a9SRoi Dayan 
35626f9af8ffSTonghao Zhang 	err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
3563a88780a9SRoi Dayan 	if (err)
3564a88780a9SRoi Dayan 		goto err_free;
3565a88780a9SRoi Dayan 
35667040632dSTonghao Zhang 	err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
3567ef06c9eeSRoi Dayan 	if (err) {
3568ef06c9eeSRoi Dayan 		if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
3569aa0cbbaeSOr Gerlitz 			goto err_free;
35705c40348cSOr Gerlitz 
3571b4a23329SRoi Dayan 		add_unready_flow(flow);
3572ef06c9eeSRoi Dayan 	}
3573ef06c9eeSRoi Dayan 
357471129676SJason Gunthorpe 	return flow;
3575e3a2b7edSAmir Vadai 
3576e3a2b7edSAmir Vadai err_free:
35775a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
3578a88780a9SRoi Dayan out:
357971129676SJason Gunthorpe 	return ERR_PTR(err);
3580a88780a9SRoi Dayan }
3581a88780a9SRoi Dayan 
3582f9e30088SPablo Neira Ayuso static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
358395dc1902SRoi Dayan 				      struct mlx5e_tc_flow *flow,
3584226f2ca3SVlad Buslov 				      unsigned long flow_flags)
358504de7ddaSRoi Dayan {
358604de7ddaSRoi Dayan 	struct mlx5e_priv *priv = flow->priv, *peer_priv;
358704de7ddaSRoi Dayan 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
358804de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
358904de7ddaSRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
359004de7ddaSRoi Dayan 	struct mlx5e_rep_priv *peer_urpriv;
359104de7ddaSRoi Dayan 	struct mlx5e_tc_flow *peer_flow;
359204de7ddaSRoi Dayan 	struct mlx5_core_dev *in_mdev;
359304de7ddaSRoi Dayan 	int err = 0;
359404de7ddaSRoi Dayan 
359504de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
359604de7ddaSRoi Dayan 	if (!peer_esw)
359704de7ddaSRoi Dayan 		return -ENODEV;
359804de7ddaSRoi Dayan 
359904de7ddaSRoi Dayan 	peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
360004de7ddaSRoi Dayan 	peer_priv = netdev_priv(peer_urpriv->netdev);
360104de7ddaSRoi Dayan 
360204de7ddaSRoi Dayan 	/* in_mdev is assigned of which the packet originated from.
360304de7ddaSRoi Dayan 	 * So packets redirected to uplink use the same mdev of the
360404de7ddaSRoi Dayan 	 * original flow and packets redirected from uplink use the
360504de7ddaSRoi Dayan 	 * peer mdev.
360604de7ddaSRoi Dayan 	 */
3607b05af6aaSBodong Wang 	if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
360804de7ddaSRoi Dayan 		in_mdev = peer_priv->mdev;
360904de7ddaSRoi Dayan 	else
361004de7ddaSRoi Dayan 		in_mdev = priv->mdev;
361104de7ddaSRoi Dayan 
361204de7ddaSRoi Dayan 	parse_attr = flow->esw_attr->parse_attr;
361395dc1902SRoi Dayan 	peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
361404de7ddaSRoi Dayan 					 parse_attr->filter_dev,
361571129676SJason Gunthorpe 					 flow->esw_attr->in_rep, in_mdev);
361671129676SJason Gunthorpe 	if (IS_ERR(peer_flow)) {
361771129676SJason Gunthorpe 		err = PTR_ERR(peer_flow);
361804de7ddaSRoi Dayan 		goto out;
361971129676SJason Gunthorpe 	}
362004de7ddaSRoi Dayan 
362104de7ddaSRoi Dayan 	flow->peer_flow = peer_flow;
3622226f2ca3SVlad Buslov 	flow_flag_set(flow, DUP);
362304de7ddaSRoi Dayan 	mutex_lock(&esw->offloads.peer_mutex);
362404de7ddaSRoi Dayan 	list_add_tail(&flow->peer, &esw->offloads.peer_flows);
362504de7ddaSRoi Dayan 	mutex_unlock(&esw->offloads.peer_mutex);
362604de7ddaSRoi Dayan 
362704de7ddaSRoi Dayan out:
362804de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
362904de7ddaSRoi Dayan 	return err;
363004de7ddaSRoi Dayan }
363104de7ddaSRoi Dayan 
363204de7ddaSRoi Dayan static int
363304de7ddaSRoi Dayan mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
3634f9e30088SPablo Neira Ayuso 		   struct flow_cls_offload *f,
3635226f2ca3SVlad Buslov 		   unsigned long flow_flags,
363604de7ddaSRoi Dayan 		   struct net_device *filter_dev,
363704de7ddaSRoi Dayan 		   struct mlx5e_tc_flow **__flow)
363804de7ddaSRoi Dayan {
363904de7ddaSRoi Dayan 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
364004de7ddaSRoi Dayan 	struct mlx5_eswitch_rep *in_rep = rpriv->rep;
364104de7ddaSRoi Dayan 	struct mlx5_core_dev *in_mdev = priv->mdev;
364204de7ddaSRoi Dayan 	struct mlx5e_tc_flow *flow;
364304de7ddaSRoi Dayan 	int err;
364404de7ddaSRoi Dayan 
364571129676SJason Gunthorpe 	flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
364671129676SJason Gunthorpe 				    in_mdev);
364771129676SJason Gunthorpe 	if (IS_ERR(flow))
364871129676SJason Gunthorpe 		return PTR_ERR(flow);
364904de7ddaSRoi Dayan 
365004de7ddaSRoi Dayan 	if (is_peer_flow_needed(flow)) {
365195dc1902SRoi Dayan 		err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
365204de7ddaSRoi Dayan 		if (err) {
365304de7ddaSRoi Dayan 			mlx5e_tc_del_fdb_flow(priv, flow);
365404de7ddaSRoi Dayan 			goto out;
365504de7ddaSRoi Dayan 		}
365604de7ddaSRoi Dayan 	}
365704de7ddaSRoi Dayan 
365804de7ddaSRoi Dayan 	*__flow = flow;
365904de7ddaSRoi Dayan 
366004de7ddaSRoi Dayan 	return 0;
366104de7ddaSRoi Dayan 
366204de7ddaSRoi Dayan out:
366304de7ddaSRoi Dayan 	return err;
366404de7ddaSRoi Dayan }
366504de7ddaSRoi Dayan 
3666a88780a9SRoi Dayan static int
3667a88780a9SRoi Dayan mlx5e_add_nic_flow(struct mlx5e_priv *priv,
3668f9e30088SPablo Neira Ayuso 		   struct flow_cls_offload *f,
3669226f2ca3SVlad Buslov 		   unsigned long flow_flags,
3670d11afc26SOz Shlomo 		   struct net_device *filter_dev,
3671a88780a9SRoi Dayan 		   struct mlx5e_tc_flow **__flow)
3672a88780a9SRoi Dayan {
3673f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3674a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
3675a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
3676a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
3677a88780a9SRoi Dayan 	int attr_size, err;
3678a88780a9SRoi Dayan 
3679bf07aa73SPaul Blakey 	/* multi-chain not supported for NIC rules */
3680bf07aa73SPaul Blakey 	if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
3681bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
3682bf07aa73SPaul Blakey 
3683226f2ca3SVlad Buslov 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
3684a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_nic_flow_attr);
3685a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3686a88780a9SRoi Dayan 			       &parse_attr, &flow);
3687a88780a9SRoi Dayan 	if (err)
3688a88780a9SRoi Dayan 		goto out;
3689a88780a9SRoi Dayan 
3690d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
369154c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
369254c177caSOz Shlomo 			       f, filter_dev);
3693d11afc26SOz Shlomo 	if (err)
3694d11afc26SOz Shlomo 		goto err_free;
3695d11afc26SOz Shlomo 
369673867881SPablo Neira Ayuso 	err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
3697a88780a9SRoi Dayan 	if (err)
3698a88780a9SRoi Dayan 		goto err_free;
3699a88780a9SRoi Dayan 
3700a88780a9SRoi Dayan 	err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
3701a88780a9SRoi Dayan 	if (err)
3702a88780a9SRoi Dayan 		goto err_free;
3703a88780a9SRoi Dayan 
3704226f2ca3SVlad Buslov 	flow_flag_set(flow, OFFLOADED);
3705a88780a9SRoi Dayan 	kvfree(parse_attr);
3706a88780a9SRoi Dayan 	*__flow = flow;
3707a88780a9SRoi Dayan 
3708a88780a9SRoi Dayan 	return 0;
3709a88780a9SRoi Dayan 
3710a88780a9SRoi Dayan err_free:
37115a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
3712a88780a9SRoi Dayan 	kvfree(parse_attr);
3713a88780a9SRoi Dayan out:
3714a88780a9SRoi Dayan 	return err;
3715a88780a9SRoi Dayan }
3716a88780a9SRoi Dayan 
3717a88780a9SRoi Dayan static int
3718a88780a9SRoi Dayan mlx5e_tc_add_flow(struct mlx5e_priv *priv,
3719f9e30088SPablo Neira Ayuso 		  struct flow_cls_offload *f,
3720226f2ca3SVlad Buslov 		  unsigned long flags,
3721d11afc26SOz Shlomo 		  struct net_device *filter_dev,
3722a88780a9SRoi Dayan 		  struct mlx5e_tc_flow **flow)
3723a88780a9SRoi Dayan {
3724a88780a9SRoi Dayan 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3725226f2ca3SVlad Buslov 	unsigned long flow_flags;
3726a88780a9SRoi Dayan 	int err;
3727a88780a9SRoi Dayan 
3728a88780a9SRoi Dayan 	get_flags(flags, &flow_flags);
3729a88780a9SRoi Dayan 
3730bf07aa73SPaul Blakey 	if (!tc_can_offload_extack(priv->netdev, f->common.extack))
3731bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
3732bf07aa73SPaul Blakey 
3733f6455de0SBodong Wang 	if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
3734d11afc26SOz Shlomo 		err = mlx5e_add_fdb_flow(priv, f, flow_flags,
3735d11afc26SOz Shlomo 					 filter_dev, flow);
3736a88780a9SRoi Dayan 	else
3737d11afc26SOz Shlomo 		err = mlx5e_add_nic_flow(priv, f, flow_flags,
3738d11afc26SOz Shlomo 					 filter_dev, flow);
3739a88780a9SRoi Dayan 
3740a88780a9SRoi Dayan 	return err;
3741a88780a9SRoi Dayan }
3742a88780a9SRoi Dayan 
374371d82d2aSOz Shlomo int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
3744226f2ca3SVlad Buslov 			   struct flow_cls_offload *f, unsigned long flags)
3745a88780a9SRoi Dayan {
3746a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
3747d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3748a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
3749a88780a9SRoi Dayan 	int err = 0;
3750a88780a9SRoi Dayan 
3751c5d326b2SVlad Buslov 	rcu_read_lock();
3752c5d326b2SVlad Buslov 	flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
3753c5d326b2SVlad Buslov 	rcu_read_unlock();
3754a88780a9SRoi Dayan 	if (flow) {
3755a88780a9SRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
3756a88780a9SRoi Dayan 				   "flow cookie already exists, ignoring");
3757a88780a9SRoi Dayan 		netdev_warn_once(priv->netdev,
3758a88780a9SRoi Dayan 				 "flow cookie %lx already exists, ignoring\n",
3759a88780a9SRoi Dayan 				 f->cookie);
37600e1c1a2fSVlad Buslov 		err = -EEXIST;
3761a88780a9SRoi Dayan 		goto out;
3762a88780a9SRoi Dayan 	}
3763a88780a9SRoi Dayan 
3764d11afc26SOz Shlomo 	err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
3765a88780a9SRoi Dayan 	if (err)
3766a88780a9SRoi Dayan 		goto out;
3767a88780a9SRoi Dayan 
3768c5d326b2SVlad Buslov 	err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
3769a88780a9SRoi Dayan 	if (err)
3770a88780a9SRoi Dayan 		goto err_free;
3771a88780a9SRoi Dayan 
3772a88780a9SRoi Dayan 	return 0;
3773a88780a9SRoi Dayan 
3774a88780a9SRoi Dayan err_free:
37755a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
3776a88780a9SRoi Dayan out:
3777e3a2b7edSAmir Vadai 	return err;
3778e3a2b7edSAmir Vadai }
3779e3a2b7edSAmir Vadai 
37808f8ae895SOr Gerlitz static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
37818f8ae895SOr Gerlitz {
3782226f2ca3SVlad Buslov 	bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
3783226f2ca3SVlad Buslov 	bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
37848f8ae895SOr Gerlitz 
3785226f2ca3SVlad Buslov 	return flow_flag_test(flow, INGRESS) == dir_ingress &&
3786226f2ca3SVlad Buslov 		flow_flag_test(flow, EGRESS) == dir_egress;
37878f8ae895SOr Gerlitz }
37888f8ae895SOr Gerlitz 
378971d82d2aSOz Shlomo int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
3790226f2ca3SVlad Buslov 			struct flow_cls_offload *f, unsigned long flags)
3791e3a2b7edSAmir Vadai {
3792d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3793e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
3794c5d326b2SVlad Buslov 	int err;
3795e3a2b7edSAmir Vadai 
3796c5d326b2SVlad Buslov 	rcu_read_lock();
379705866c82SOr Gerlitz 	flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3798c5d326b2SVlad Buslov 	if (!flow || !same_flow_direction(flow, flags)) {
3799c5d326b2SVlad Buslov 		err = -EINVAL;
3800c5d326b2SVlad Buslov 		goto errout;
3801c5d326b2SVlad Buslov 	}
3802e3a2b7edSAmir Vadai 
3803c5d326b2SVlad Buslov 	/* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
3804c5d326b2SVlad Buslov 	 * set.
3805c5d326b2SVlad Buslov 	 */
3806c5d326b2SVlad Buslov 	if (flow_flag_test_and_set(flow, DELETED)) {
3807c5d326b2SVlad Buslov 		err = -EINVAL;
3808c5d326b2SVlad Buslov 		goto errout;
3809c5d326b2SVlad Buslov 	}
381005866c82SOr Gerlitz 	rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
3811c5d326b2SVlad Buslov 	rcu_read_unlock();
3812e3a2b7edSAmir Vadai 
38135a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
3814e3a2b7edSAmir Vadai 
3815e3a2b7edSAmir Vadai 	return 0;
3816c5d326b2SVlad Buslov 
3817c5d326b2SVlad Buslov errout:
3818c5d326b2SVlad Buslov 	rcu_read_unlock();
3819c5d326b2SVlad Buslov 	return err;
3820e3a2b7edSAmir Vadai }
3821e3a2b7edSAmir Vadai 
382271d82d2aSOz Shlomo int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
3823226f2ca3SVlad Buslov 		       struct flow_cls_offload *f, unsigned long flags)
3824aad7e08dSAmir Vadai {
382504de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
3826d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
382704de7ddaSRoi Dayan 	struct mlx5_eswitch *peer_esw;
3828aad7e08dSAmir Vadai 	struct mlx5e_tc_flow *flow;
3829aad7e08dSAmir Vadai 	struct mlx5_fc *counter;
3830316d5f72SRoi Dayan 	u64 lastuse = 0;
3831316d5f72SRoi Dayan 	u64 packets = 0;
3832316d5f72SRoi Dayan 	u64 bytes = 0;
38335a7e5bcbSVlad Buslov 	int err = 0;
3834aad7e08dSAmir Vadai 
3835c5d326b2SVlad Buslov 	rcu_read_lock();
3836c5d326b2SVlad Buslov 	flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
38375a7e5bcbSVlad Buslov 						tc_ht_params));
3838c5d326b2SVlad Buslov 	rcu_read_unlock();
38395a7e5bcbSVlad Buslov 	if (IS_ERR(flow))
38405a7e5bcbSVlad Buslov 		return PTR_ERR(flow);
38415a7e5bcbSVlad Buslov 
38425a7e5bcbSVlad Buslov 	if (!same_flow_direction(flow, flags)) {
38435a7e5bcbSVlad Buslov 		err = -EINVAL;
38445a7e5bcbSVlad Buslov 		goto errout;
38455a7e5bcbSVlad Buslov 	}
3846aad7e08dSAmir Vadai 
3847226f2ca3SVlad Buslov 	if (mlx5e_is_offloaded_flow(flow)) {
3848b8aee822SMark Bloch 		counter = mlx5e_tc_get_counter(flow);
3849aad7e08dSAmir Vadai 		if (!counter)
38505a7e5bcbSVlad Buslov 			goto errout;
3851aad7e08dSAmir Vadai 
3852aad7e08dSAmir Vadai 		mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3853316d5f72SRoi Dayan 	}
3854aad7e08dSAmir Vadai 
3855316d5f72SRoi Dayan 	/* Under multipath it's possible for one rule to be currently
3856316d5f72SRoi Dayan 	 * un-offloaded while the other rule is offloaded.
3857316d5f72SRoi Dayan 	 */
385804de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
385904de7ddaSRoi Dayan 	if (!peer_esw)
386004de7ddaSRoi Dayan 		goto out;
386104de7ddaSRoi Dayan 
3862226f2ca3SVlad Buslov 	if (flow_flag_test(flow, DUP) &&
3863226f2ca3SVlad Buslov 	    flow_flag_test(flow->peer_flow, OFFLOADED)) {
386404de7ddaSRoi Dayan 		u64 bytes2;
386504de7ddaSRoi Dayan 		u64 packets2;
386604de7ddaSRoi Dayan 		u64 lastuse2;
386704de7ddaSRoi Dayan 
386804de7ddaSRoi Dayan 		counter = mlx5e_tc_get_counter(flow->peer_flow);
3869316d5f72SRoi Dayan 		if (!counter)
3870316d5f72SRoi Dayan 			goto no_peer_counter;
387104de7ddaSRoi Dayan 		mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
387204de7ddaSRoi Dayan 
387304de7ddaSRoi Dayan 		bytes += bytes2;
387404de7ddaSRoi Dayan 		packets += packets2;
387504de7ddaSRoi Dayan 		lastuse = max_t(u64, lastuse, lastuse2);
387604de7ddaSRoi Dayan 	}
387704de7ddaSRoi Dayan 
3878316d5f72SRoi Dayan no_peer_counter:
387904de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
388004de7ddaSRoi Dayan out:
38813b1903efSPablo Neira Ayuso 	flow_stats_update(&f->stats, bytes, packets, lastuse);
38825a7e5bcbSVlad Buslov errout:
38835a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
38845a7e5bcbSVlad Buslov 	return err;
3885aad7e08dSAmir Vadai }
3886aad7e08dSAmir Vadai 
3887fcb64c0fSEli Cohen static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
3888fcb64c0fSEli Cohen 			       struct netlink_ext_ack *extack)
3889fcb64c0fSEli Cohen {
3890fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
3891fcb64c0fSEli Cohen 	struct mlx5_eswitch *esw;
3892fcb64c0fSEli Cohen 	u16 vport_num;
3893fcb64c0fSEli Cohen 	u32 rate_mbps;
3894fcb64c0fSEli Cohen 	int err;
3895fcb64c0fSEli Cohen 
3896fcb64c0fSEli Cohen 	esw = priv->mdev->priv.eswitch;
3897fcb64c0fSEli Cohen 	/* rate is given in bytes/sec.
3898fcb64c0fSEli Cohen 	 * First convert to bits/sec and then round to the nearest mbit/secs.
3899fcb64c0fSEli Cohen 	 * mbit means million bits.
3900fcb64c0fSEli Cohen 	 * Moreover, if rate is non zero we choose to configure to a minimum of
3901fcb64c0fSEli Cohen 	 * 1 mbit/sec.
3902fcb64c0fSEli Cohen 	 */
3903fcb64c0fSEli Cohen 	rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
3904fcb64c0fSEli Cohen 	vport_num = rpriv->rep->vport;
3905fcb64c0fSEli Cohen 
3906fcb64c0fSEli Cohen 	err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
3907fcb64c0fSEli Cohen 	if (err)
3908fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
3909fcb64c0fSEli Cohen 
3910fcb64c0fSEli Cohen 	return err;
3911fcb64c0fSEli Cohen }
3912fcb64c0fSEli Cohen 
3913fcb64c0fSEli Cohen static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
3914fcb64c0fSEli Cohen 					struct flow_action *flow_action,
3915fcb64c0fSEli Cohen 					struct netlink_ext_ack *extack)
3916fcb64c0fSEli Cohen {
3917fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
3918fcb64c0fSEli Cohen 	const struct flow_action_entry *act;
3919fcb64c0fSEli Cohen 	int err;
3920fcb64c0fSEli Cohen 	int i;
3921fcb64c0fSEli Cohen 
3922fcb64c0fSEli Cohen 	if (!flow_action_has_entries(flow_action)) {
3923fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
3924fcb64c0fSEli Cohen 		return -EINVAL;
3925fcb64c0fSEli Cohen 	}
3926fcb64c0fSEli Cohen 
3927fcb64c0fSEli Cohen 	if (!flow_offload_has_one_action(flow_action)) {
3928fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
3929fcb64c0fSEli Cohen 		return -EOPNOTSUPP;
3930fcb64c0fSEli Cohen 	}
3931fcb64c0fSEli Cohen 
3932fcb64c0fSEli Cohen 	flow_action_for_each(i, act, flow_action) {
3933fcb64c0fSEli Cohen 		switch (act->id) {
3934fcb64c0fSEli Cohen 		case FLOW_ACTION_POLICE:
3935fcb64c0fSEli Cohen 			err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
3936fcb64c0fSEli Cohen 			if (err)
3937fcb64c0fSEli Cohen 				return err;
3938fcb64c0fSEli Cohen 
3939fcb64c0fSEli Cohen 			rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
3940fcb64c0fSEli Cohen 			break;
3941fcb64c0fSEli Cohen 		default:
3942fcb64c0fSEli Cohen 			NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
3943fcb64c0fSEli Cohen 			return -EOPNOTSUPP;
3944fcb64c0fSEli Cohen 		}
3945fcb64c0fSEli Cohen 	}
3946fcb64c0fSEli Cohen 
3947fcb64c0fSEli Cohen 	return 0;
3948fcb64c0fSEli Cohen }
3949fcb64c0fSEli Cohen 
3950fcb64c0fSEli Cohen int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
3951fcb64c0fSEli Cohen 				struct tc_cls_matchall_offload *ma)
3952fcb64c0fSEli Cohen {
3953fcb64c0fSEli Cohen 	struct netlink_ext_ack *extack = ma->common.extack;
3954fcb64c0fSEli Cohen 	int prio = TC_H_MAJ(ma->common.prio) >> 16;
3955fcb64c0fSEli Cohen 
3956fcb64c0fSEli Cohen 	if (prio != 1) {
3957fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
3958fcb64c0fSEli Cohen 		return -EINVAL;
3959fcb64c0fSEli Cohen 	}
3960fcb64c0fSEli Cohen 
3961fcb64c0fSEli Cohen 	return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
3962fcb64c0fSEli Cohen }
3963fcb64c0fSEli Cohen 
3964fcb64c0fSEli Cohen int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
3965fcb64c0fSEli Cohen 			     struct tc_cls_matchall_offload *ma)
3966fcb64c0fSEli Cohen {
3967fcb64c0fSEli Cohen 	struct netlink_ext_ack *extack = ma->common.extack;
3968fcb64c0fSEli Cohen 
3969fcb64c0fSEli Cohen 	return apply_police_params(priv, 0, extack);
3970fcb64c0fSEli Cohen }
3971fcb64c0fSEli Cohen 
3972fcb64c0fSEli Cohen void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
3973fcb64c0fSEli Cohen 			     struct tc_cls_matchall_offload *ma)
3974fcb64c0fSEli Cohen {
3975fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
3976fcb64c0fSEli Cohen 	struct rtnl_link_stats64 cur_stats;
3977fcb64c0fSEli Cohen 	u64 dbytes;
3978fcb64c0fSEli Cohen 	u64 dpkts;
3979fcb64c0fSEli Cohen 
3980fcb64c0fSEli Cohen 	cur_stats = priv->stats.vf_vport;
3981fcb64c0fSEli Cohen 	dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
3982fcb64c0fSEli Cohen 	dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
3983fcb64c0fSEli Cohen 	rpriv->prev_vf_vport_stats = cur_stats;
3984fcb64c0fSEli Cohen 	flow_stats_update(&ma->stats, dpkts, dbytes, jiffies);
3985fcb64c0fSEli Cohen }
3986fcb64c0fSEli Cohen 
39874d8fcf21SAlaa Hleihel static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
39884d8fcf21SAlaa Hleihel 					      struct mlx5e_priv *peer_priv)
39894d8fcf21SAlaa Hleihel {
39904d8fcf21SAlaa Hleihel 	struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
3991db76ca24SVlad Buslov 	struct mlx5e_hairpin_entry *hpe, *tmp;
3992db76ca24SVlad Buslov 	LIST_HEAD(init_wait_list);
39934d8fcf21SAlaa Hleihel 	u16 peer_vhca_id;
39944d8fcf21SAlaa Hleihel 	int bkt;
39954d8fcf21SAlaa Hleihel 
39964d8fcf21SAlaa Hleihel 	if (!same_hw_devs(priv, peer_priv))
39974d8fcf21SAlaa Hleihel 		return;
39984d8fcf21SAlaa Hleihel 
39994d8fcf21SAlaa Hleihel 	peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
40004d8fcf21SAlaa Hleihel 
4001b32accdaSVlad Buslov 	mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4002db76ca24SVlad Buslov 	hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4003db76ca24SVlad Buslov 		if (refcount_inc_not_zero(&hpe->refcnt))
4004db76ca24SVlad Buslov 			list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4005b32accdaSVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4006db76ca24SVlad Buslov 
4007db76ca24SVlad Buslov 	list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4008db76ca24SVlad Buslov 		wait_for_completion(&hpe->res_ready);
4009db76ca24SVlad Buslov 		if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4010db76ca24SVlad Buslov 			hpe->hp->pair->peer_gone = true;
4011db76ca24SVlad Buslov 
4012db76ca24SVlad Buslov 		mlx5e_hairpin_put(priv, hpe);
4013db76ca24SVlad Buslov 	}
40144d8fcf21SAlaa Hleihel }
40154d8fcf21SAlaa Hleihel 
40164d8fcf21SAlaa Hleihel static int mlx5e_tc_netdev_event(struct notifier_block *this,
40174d8fcf21SAlaa Hleihel 				 unsigned long event, void *ptr)
40184d8fcf21SAlaa Hleihel {
40194d8fcf21SAlaa Hleihel 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
40204d8fcf21SAlaa Hleihel 	struct mlx5e_flow_steering *fs;
40214d8fcf21SAlaa Hleihel 	struct mlx5e_priv *peer_priv;
40224d8fcf21SAlaa Hleihel 	struct mlx5e_tc_table *tc;
40234d8fcf21SAlaa Hleihel 	struct mlx5e_priv *priv;
40244d8fcf21SAlaa Hleihel 
40254d8fcf21SAlaa Hleihel 	if (ndev->netdev_ops != &mlx5e_netdev_ops ||
40264d8fcf21SAlaa Hleihel 	    event != NETDEV_UNREGISTER ||
40274d8fcf21SAlaa Hleihel 	    ndev->reg_state == NETREG_REGISTERED)
40284d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
40294d8fcf21SAlaa Hleihel 
40304d8fcf21SAlaa Hleihel 	tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
40314d8fcf21SAlaa Hleihel 	fs = container_of(tc, struct mlx5e_flow_steering, tc);
40324d8fcf21SAlaa Hleihel 	priv = container_of(fs, struct mlx5e_priv, fs);
40334d8fcf21SAlaa Hleihel 	peer_priv = netdev_priv(ndev);
40344d8fcf21SAlaa Hleihel 	if (priv == peer_priv ||
40354d8fcf21SAlaa Hleihel 	    !(priv->netdev->features & NETIF_F_HW_TC))
40364d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
40374d8fcf21SAlaa Hleihel 
40384d8fcf21SAlaa Hleihel 	mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
40394d8fcf21SAlaa Hleihel 
40404d8fcf21SAlaa Hleihel 	return NOTIFY_DONE;
40414d8fcf21SAlaa Hleihel }
40424d8fcf21SAlaa Hleihel 
4043655dc3d2SOr Gerlitz int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4044e8f887acSAmir Vadai {
4045acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
40464d8fcf21SAlaa Hleihel 	int err;
4047e8f887acSAmir Vadai 
4048b6fac0b4SVlad Buslov 	mutex_init(&tc->t_lock);
4049d2faae25SVlad Buslov 	mutex_init(&tc->mod_hdr.lock);
4050dd58edc3SVlad Buslov 	hash_init(tc->mod_hdr.hlist);
4051b32accdaSVlad Buslov 	mutex_init(&tc->hairpin_tbl_lock);
40525c65c564SOr Gerlitz 	hash_init(tc->hairpin_tbl);
405311c9c548SOr Gerlitz 
40544d8fcf21SAlaa Hleihel 	err = rhashtable_init(&tc->ht, &tc_ht_params);
40554d8fcf21SAlaa Hleihel 	if (err)
40564d8fcf21SAlaa Hleihel 		return err;
40574d8fcf21SAlaa Hleihel 
40584d8fcf21SAlaa Hleihel 	tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
40594d8fcf21SAlaa Hleihel 	if (register_netdevice_notifier(&tc->netdevice_nb)) {
40604d8fcf21SAlaa Hleihel 		tc->netdevice_nb.notifier_call = NULL;
40614d8fcf21SAlaa Hleihel 		mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
40624d8fcf21SAlaa Hleihel 	}
40634d8fcf21SAlaa Hleihel 
40644d8fcf21SAlaa Hleihel 	return err;
4065e8f887acSAmir Vadai }
4066e8f887acSAmir Vadai 
4067e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4068e8f887acSAmir Vadai {
4069e8f887acSAmir Vadai 	struct mlx5e_tc_flow *flow = ptr;
4070655dc3d2SOr Gerlitz 	struct mlx5e_priv *priv = flow->priv;
4071e8f887acSAmir Vadai 
4072961e8979SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
4073e8f887acSAmir Vadai 	kfree(flow);
4074e8f887acSAmir Vadai }
4075e8f887acSAmir Vadai 
4076655dc3d2SOr Gerlitz void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4077e8f887acSAmir Vadai {
4078acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
4079e8f887acSAmir Vadai 
40804d8fcf21SAlaa Hleihel 	if (tc->netdevice_nb.notifier_call)
40814d8fcf21SAlaa Hleihel 		unregister_netdevice_notifier(&tc->netdevice_nb);
40824d8fcf21SAlaa Hleihel 
4083d2faae25SVlad Buslov 	mutex_destroy(&tc->mod_hdr.lock);
4084b32accdaSVlad Buslov 	mutex_destroy(&tc->hairpin_tbl_lock);
4085b32accdaSVlad Buslov 
4086d9ee0491SOr Gerlitz 	rhashtable_destroy(&tc->ht);
4087e8f887acSAmir Vadai 
4088acff797cSMaor Gottlieb 	if (!IS_ERR_OR_NULL(tc->t)) {
4089acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(tc->t);
4090acff797cSMaor Gottlieb 		tc->t = NULL;
4091e8f887acSAmir Vadai 	}
4092b6fac0b4SVlad Buslov 	mutex_destroy(&tc->t_lock);
4093e8f887acSAmir Vadai }
4094655dc3d2SOr Gerlitz 
4095655dc3d2SOr Gerlitz int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4096655dc3d2SOr Gerlitz {
4097655dc3d2SOr Gerlitz 	return rhashtable_init(tc_ht, &tc_ht_params);
4098655dc3d2SOr Gerlitz }
4099655dc3d2SOr Gerlitz 
4100655dc3d2SOr Gerlitz void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
4101655dc3d2SOr Gerlitz {
4102655dc3d2SOr Gerlitz 	rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
4103655dc3d2SOr Gerlitz }
410401252a27SOr Gerlitz 
4105226f2ca3SVlad Buslov int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
410601252a27SOr Gerlitz {
4107d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
410801252a27SOr Gerlitz 
410901252a27SOr Gerlitz 	return atomic_read(&tc_ht->nelems);
411001252a27SOr Gerlitz }
411104de7ddaSRoi Dayan 
411204de7ddaSRoi Dayan void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
411304de7ddaSRoi Dayan {
411404de7ddaSRoi Dayan 	struct mlx5e_tc_flow *flow, *tmp;
411504de7ddaSRoi Dayan 
411604de7ddaSRoi Dayan 	list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
411704de7ddaSRoi Dayan 		__mlx5e_tc_del_fdb_peer_flow(flow);
411804de7ddaSRoi Dayan }
4119b4a23329SRoi Dayan 
4120b4a23329SRoi Dayan void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
4121b4a23329SRoi Dayan {
4122b4a23329SRoi Dayan 	struct mlx5_rep_uplink_priv *rpriv =
4123b4a23329SRoi Dayan 		container_of(work, struct mlx5_rep_uplink_priv,
4124b4a23329SRoi Dayan 			     reoffload_flows_work);
4125b4a23329SRoi Dayan 	struct mlx5e_tc_flow *flow, *tmp;
4126b4a23329SRoi Dayan 
4127ad86755bSVlad Buslov 	mutex_lock(&rpriv->unready_flows_lock);
4128b4a23329SRoi Dayan 	list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
4129b4a23329SRoi Dayan 		if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
4130ad86755bSVlad Buslov 			unready_flow_del(flow);
4131b4a23329SRoi Dayan 	}
4132ad86755bSVlad Buslov 	mutex_unlock(&rpriv->unready_flows_lock);
4133b4a23329SRoi Dayan }
4134