1e8f887acSAmir Vadai /*
2e8f887acSAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3e8f887acSAmir Vadai  *
4e8f887acSAmir Vadai  * This software is available to you under a choice of one of two
5e8f887acSAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
6e8f887acSAmir Vadai  * General Public License (GPL) Version 2, available from the file
7e8f887acSAmir Vadai  * COPYING in the main directory of this source tree, or the
8e8f887acSAmir Vadai  * OpenIB.org BSD license below:
9e8f887acSAmir Vadai  *
10e8f887acSAmir Vadai  *     Redistribution and use in source and binary forms, with or
11e8f887acSAmir Vadai  *     without modification, are permitted provided that the following
12e8f887acSAmir Vadai  *     conditions are met:
13e8f887acSAmir Vadai  *
14e8f887acSAmir Vadai  *      - Redistributions of source code must retain the above
15e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
16e8f887acSAmir Vadai  *        disclaimer.
17e8f887acSAmir Vadai  *
18e8f887acSAmir Vadai  *      - Redistributions in binary form must reproduce the above
19e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
20e8f887acSAmir Vadai  *        disclaimer in the documentation and/or other materials
21e8f887acSAmir Vadai  *        provided with the distribution.
22e8f887acSAmir Vadai  *
23e8f887acSAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e8f887acSAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e8f887acSAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e8f887acSAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e8f887acSAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e8f887acSAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e8f887acSAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e8f887acSAmir Vadai  * SOFTWARE.
31e8f887acSAmir Vadai  */
32e8f887acSAmir Vadai 
33e3a2b7edSAmir Vadai #include <net/flow_dissector.h>
34e2394a61SVlad Buslov #include <net/flow_offload.h>
353f7d0eb4SOr Gerlitz #include <net/sch_generic.h>
36e3a2b7edSAmir Vadai #include <net/pkt_cls.h>
37e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h>
3812185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h>
39e8f887acSAmir Vadai #include <linux/mlx5/fs.h>
40e8f887acSAmir Vadai #include <linux/mlx5/device.h>
41e8f887acSAmir Vadai #include <linux/rhashtable.h>
425a7e5bcbSVlad Buslov #include <linux/refcount.h>
43db76ca24SVlad Buslov #include <linux/completion.h>
4403a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h>
45776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h>
46bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h>
47d79b6df6SOr Gerlitz #include <net/tc_act/tc_pedit.h>
4826c02749SOr Gerlitz #include <net/tc_act/tc_csum.h>
49f6dfb4c3SHadar Hen Zion #include <net/arp.h>
503616d08bSDavid Ahern #include <net/ipv6_stubs.h>
51f828ca6aSEli Cohen #include <net/bareudp.h>
52e8f887acSAmir Vadai #include "en.h"
531d447a39SSaeed Mahameed #include "en_rep.h"
54768c3667SVlad Buslov #include "en/rep/tc.h"
55e2394a61SVlad Buslov #include "en/rep/neigh.h"
56232c0013SHadar Hen Zion #include "en_tc.h"
5703a9d11eSOr Gerlitz #include "eswitch.h"
5849964352SSaeed Mahameed #include "esw/chains.h"
593f6d08d1SOr Gerlitz #include "fs_core.h"
602c81bfd5SHuy Nguyen #include "en/port.h"
61101f4de9SOz Shlomo #include "en/tc_tun.h"
620a7fcb78SPaul Blakey #include "en/mapping.h"
634c3844d9SPaul Blakey #include "en/tc_ct.h"
6404de7ddaSRoi Dayan #include "lib/devcom.h"
659272e3dfSYevgeny Kliteynik #include "lib/geneve.h"
667a978759SDmytro Linkin #include "diag/en_tc_tracepoint.h"
67e8f887acSAmir Vadai 
68d65dbedfSHuy Nguyen #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
690a7fcb78SPaul Blakey 
703bc4b7bfSOr Gerlitz struct mlx5_nic_flow_attr {
713bc4b7bfSOr Gerlitz 	u32 action;
723bc4b7bfSOr Gerlitz 	u32 flow_tag;
732b688ea5SMaor Gottlieb 	struct mlx5_modify_hdr *modify_hdr;
745c65c564SOr Gerlitz 	u32 hairpin_tirn;
7538aa51c1SOr Gerlitz 	u8 match_level;
763f6d08d1SOr Gerlitz 	struct mlx5_flow_table	*hairpin_ft;
77b8aee822SMark Bloch 	struct mlx5_fc		*counter;
783bc4b7bfSOr Gerlitz };
793bc4b7bfSOr Gerlitz 
80226f2ca3SVlad Buslov #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
8160bd4af8SOr Gerlitz 
8265ba8fb7SOr Gerlitz enum {
83226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_INGRESS	= MLX5E_TC_FLAG_INGRESS_BIT,
84226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_EGRESS	= MLX5E_TC_FLAG_EGRESS_BIT,
85226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_ESWITCH	= MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
8684179981SPaul Blakey 	MLX5E_TC_FLOW_FLAG_FT		= MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
87226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_NIC		= MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
88226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_OFFLOADED	= MLX5E_TC_FLOW_BASE,
89226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_HAIRPIN	= MLX5E_TC_FLOW_BASE + 1,
90226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS	= MLX5E_TC_FLOW_BASE + 2,
91226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_SLOW		= MLX5E_TC_FLOW_BASE + 3,
92226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_DUP		= MLX5E_TC_FLOW_BASE + 4,
93226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_NOT_READY	= MLX5E_TC_FLOW_BASE + 5,
94c5d326b2SVlad Buslov 	MLX5E_TC_FLOW_FLAG_DELETED	= MLX5E_TC_FLOW_BASE + 6,
954c3844d9SPaul Blakey 	MLX5E_TC_FLOW_FLAG_CT		= MLX5E_TC_FLOW_BASE + 7,
9665ba8fb7SOr Gerlitz };
9765ba8fb7SOr Gerlitz 
98e4ad91f2SChris Mi #define MLX5E_TC_MAX_SPLITS 1
99e4ad91f2SChris Mi 
10079baaec7SEli Britstein /* Helper struct for accessing a struct containing list_head array.
10179baaec7SEli Britstein  * Containing struct
10279baaec7SEli Britstein  *   |- Helper array
10379baaec7SEli Britstein  *      [0] Helper item 0
10479baaec7SEli Britstein  *          |- list_head item 0
10579baaec7SEli Britstein  *          |- index (0)
10679baaec7SEli Britstein  *      [1] Helper item 1
10779baaec7SEli Britstein  *          |- list_head item 1
10879baaec7SEli Britstein  *          |- index (1)
10979baaec7SEli Britstein  * To access the containing struct from one of the list_head items:
11079baaec7SEli Britstein  * 1. Get the helper item from the list_head item using
11179baaec7SEli Britstein  *    helper item =
11279baaec7SEli Britstein  *        container_of(list_head item, helper struct type, list_head field)
11379baaec7SEli Britstein  * 2. Get the contining struct from the helper item and its index in the array:
11479baaec7SEli Britstein  *    containing struct =
11579baaec7SEli Britstein  *        container_of(helper item, containing struct type, helper field[index])
11679baaec7SEli Britstein  */
11779baaec7SEli Britstein struct encap_flow_item {
118948993f2SVlad Buslov 	struct mlx5e_encap_entry *e; /* attached encap instance */
11979baaec7SEli Britstein 	struct list_head list;
12079baaec7SEli Britstein 	int index;
12179baaec7SEli Britstein };
12279baaec7SEli Britstein 
123e8f887acSAmir Vadai struct mlx5e_tc_flow {
124e8f887acSAmir Vadai 	struct rhash_head	node;
125655dc3d2SOr Gerlitz 	struct mlx5e_priv	*priv;
126e8f887acSAmir Vadai 	u64			cookie;
127226f2ca3SVlad Buslov 	unsigned long		flags;
128e4ad91f2SChris Mi 	struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
12979baaec7SEli Britstein 	/* Flow can be associated with multiple encap IDs.
13079baaec7SEli Britstein 	 * The number of encaps is bounded by the number of supported
13179baaec7SEli Britstein 	 * destinations.
13279baaec7SEli Britstein 	 */
13379baaec7SEli Britstein 	struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
13404de7ddaSRoi Dayan 	struct mlx5e_tc_flow    *peer_flow;
135dd58edc3SVlad Buslov 	struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
13611c9c548SOr Gerlitz 	struct list_head	mod_hdr; /* flows sharing the same mod hdr ID */
137e4f9abbdSVlad Buslov 	struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
1385c65c564SOr Gerlitz 	struct list_head	hairpin; /* flows sharing the same hairpin */
13904de7ddaSRoi Dayan 	struct list_head	peer;    /* flows with peer flow */
140b4a23329SRoi Dayan 	struct list_head	unready; /* flows not ready to be offloaded (e.g due to missing route) */
1412a1f1768SVlad Buslov 	int			tmp_efi_index;
1426a06c2f7SVlad Buslov 	struct list_head	tmp_list; /* temporary flow list used by neigh update */
1435a7e5bcbSVlad Buslov 	refcount_t		refcnt;
144c5d326b2SVlad Buslov 	struct rcu_head		rcu_head;
14595435ad7SVlad Buslov 	struct completion	init_done;
1460a7fcb78SPaul Blakey 	int tunnel_id; /* the mapped tunnel id of this flow */
1470a7fcb78SPaul Blakey 
1483bc4b7bfSOr Gerlitz 	union {
149ecf5bb79SOr Gerlitz 		struct mlx5_esw_flow_attr esw_attr[0];
1503bc4b7bfSOr Gerlitz 		struct mlx5_nic_flow_attr nic_attr[0];
1513bc4b7bfSOr Gerlitz 	};
152e8f887acSAmir Vadai };
153e8f887acSAmir Vadai 
15417091853SOr Gerlitz struct mlx5e_tc_flow_parse_attr {
1551f6da306SYevgeny Kliteynik 	const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
156d11afc26SOz Shlomo 	struct net_device *filter_dev;
15717091853SOr Gerlitz 	struct mlx5_flow_spec spec;
1586ae4a6a5SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
15998b66cb1SEli Britstein 	int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
16017091853SOr Gerlitz };
16117091853SOr Gerlitz 
162acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4
163b3a433deSOr Gerlitz #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
164e8f887acSAmir Vadai 
1658f1e0b97SPaul Blakey struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
1668f1e0b97SPaul Blakey 	[CHAIN_TO_REG] = {
1678f1e0b97SPaul Blakey 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
1688f1e0b97SPaul Blakey 		.moffset = 0,
1698f1e0b97SPaul Blakey 		.mlen = 2,
1708f1e0b97SPaul Blakey 	},
1710a7fcb78SPaul Blakey 	[TUNNEL_TO_REG] = {
1720a7fcb78SPaul Blakey 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
1730a7fcb78SPaul Blakey 		.moffset = 3,
1740a7fcb78SPaul Blakey 		.mlen = 1,
1750a7fcb78SPaul Blakey 		.soffset = MLX5_BYTE_OFF(fte_match_param,
1760a7fcb78SPaul Blakey 					 misc_parameters_2.metadata_reg_c_1),
1770a7fcb78SPaul Blakey 	},
1784c3844d9SPaul Blakey 	[ZONE_TO_REG] = zone_to_reg_ct,
1794c3844d9SPaul Blakey 	[CTSTATE_TO_REG] = ctstate_to_reg_ct,
1804c3844d9SPaul Blakey 	[MARK_TO_REG] = mark_to_reg_ct,
1814c3844d9SPaul Blakey 	[LABELS_TO_REG] = labels_to_reg_ct,
1824c3844d9SPaul Blakey 	[FTEID_TO_REG] = fteid_to_reg_ct,
1835c6b9460SPaul Blakey 	[TUPLEID_TO_REG] = tupleid_to_reg_ct,
1848f1e0b97SPaul Blakey };
1858f1e0b97SPaul Blakey 
1860a7fcb78SPaul Blakey static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
1870a7fcb78SPaul Blakey 
1880a7fcb78SPaul Blakey void
1890a7fcb78SPaul Blakey mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
1900a7fcb78SPaul Blakey 			    enum mlx5e_tc_attr_to_reg type,
1910a7fcb78SPaul Blakey 			    u32 data,
1920a7fcb78SPaul Blakey 			    u32 mask)
1930a7fcb78SPaul Blakey {
1940a7fcb78SPaul Blakey 	int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
1950a7fcb78SPaul Blakey 	int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
1960a7fcb78SPaul Blakey 	void *headers_c = spec->match_criteria;
1970a7fcb78SPaul Blakey 	void *headers_v = spec->match_value;
1980a7fcb78SPaul Blakey 	void *fmask, *fval;
1990a7fcb78SPaul Blakey 
2000a7fcb78SPaul Blakey 	fmask = headers_c + soffset;
2010a7fcb78SPaul Blakey 	fval = headers_v + soffset;
2020a7fcb78SPaul Blakey 
2030a7fcb78SPaul Blakey 	mask = cpu_to_be32(mask) >> (32 - (match_len * 8));
2040a7fcb78SPaul Blakey 	data = cpu_to_be32(data) >> (32 - (match_len * 8));
2050a7fcb78SPaul Blakey 
2060a7fcb78SPaul Blakey 	memcpy(fmask, &mask, match_len);
2070a7fcb78SPaul Blakey 	memcpy(fval, &data, match_len);
2080a7fcb78SPaul Blakey 
2090a7fcb78SPaul Blakey 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
2100a7fcb78SPaul Blakey }
2110a7fcb78SPaul Blakey 
2120a7fcb78SPaul Blakey int
2130a7fcb78SPaul Blakey mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
2140a7fcb78SPaul Blakey 			  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
2150a7fcb78SPaul Blakey 			  enum mlx5e_tc_attr_to_reg type,
2160a7fcb78SPaul Blakey 			  u32 data)
2170a7fcb78SPaul Blakey {
2180a7fcb78SPaul Blakey 	int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
2190a7fcb78SPaul Blakey 	int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
2200a7fcb78SPaul Blakey 	int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
2210a7fcb78SPaul Blakey 	char *modact;
2220a7fcb78SPaul Blakey 	int err;
2230a7fcb78SPaul Blakey 
2240a7fcb78SPaul Blakey 	err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
2250a7fcb78SPaul Blakey 				    mod_hdr_acts);
2260a7fcb78SPaul Blakey 	if (err)
2270a7fcb78SPaul Blakey 		return err;
2280a7fcb78SPaul Blakey 
2290a7fcb78SPaul Blakey 	modact = mod_hdr_acts->actions +
2300a7fcb78SPaul Blakey 		 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
2310a7fcb78SPaul Blakey 
2320a7fcb78SPaul Blakey 	/* Firmware has 5bit length field and 0 means 32bits */
2330a7fcb78SPaul Blakey 	if (mlen == 4)
2340a7fcb78SPaul Blakey 		mlen = 0;
2350a7fcb78SPaul Blakey 
2360a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
2370a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, field, mfield);
2380a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, offset, moffset * 8);
2390a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, length, mlen * 8);
2400a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, data, data);
2410a7fcb78SPaul Blakey 	mod_hdr_acts->num_actions++;
2420a7fcb78SPaul Blakey 
2430a7fcb78SPaul Blakey 	return 0;
2440a7fcb78SPaul Blakey }
2450a7fcb78SPaul Blakey 
24677ab67b7SOr Gerlitz struct mlx5e_hairpin {
24777ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
24877ab67b7SOr Gerlitz 
24977ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev;
2503f6d08d1SOr Gerlitz 	struct mlx5e_priv *func_priv;
25177ab67b7SOr Gerlitz 	u32 tdn;
25277ab67b7SOr Gerlitz 	u32 tirn;
2533f6d08d1SOr Gerlitz 
2543f6d08d1SOr Gerlitz 	int num_channels;
2553f6d08d1SOr Gerlitz 	struct mlx5e_rqt indir_rqt;
2563f6d08d1SOr Gerlitz 	u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
2573f6d08d1SOr Gerlitz 	struct mlx5e_ttc_table ttc;
25877ab67b7SOr Gerlitz };
25977ab67b7SOr Gerlitz 
2605c65c564SOr Gerlitz struct mlx5e_hairpin_entry {
2615c65c564SOr Gerlitz 	/* a node of a hash table which keeps all the  hairpin entries */
2625c65c564SOr Gerlitz 	struct hlist_node hairpin_hlist;
2635c65c564SOr Gerlitz 
26473edca73SVlad Buslov 	/* protects flows list */
26573edca73SVlad Buslov 	spinlock_t flows_lock;
2665c65c564SOr Gerlitz 	/* flows sharing the same hairpin */
2675c65c564SOr Gerlitz 	struct list_head flows;
268db76ca24SVlad Buslov 	/* hpe's that were not fully initialized when dead peer update event
269db76ca24SVlad Buslov 	 * function traversed them.
270db76ca24SVlad Buslov 	 */
271db76ca24SVlad Buslov 	struct list_head dead_peer_wait_list;
2725c65c564SOr Gerlitz 
273d8822868SOr Gerlitz 	u16 peer_vhca_id;
274106be53bSOr Gerlitz 	u8 prio;
2755c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
276e4f9abbdSVlad Buslov 	refcount_t refcnt;
277db76ca24SVlad Buslov 	struct completion res_ready;
2785c65c564SOr Gerlitz };
2795c65c564SOr Gerlitz 
28011c9c548SOr Gerlitz struct mod_hdr_key {
28111c9c548SOr Gerlitz 	int num_actions;
28211c9c548SOr Gerlitz 	void *actions;
28311c9c548SOr Gerlitz };
28411c9c548SOr Gerlitz 
28511c9c548SOr Gerlitz struct mlx5e_mod_hdr_entry {
28611c9c548SOr Gerlitz 	/* a node of a hash table which keeps all the mod_hdr entries */
28711c9c548SOr Gerlitz 	struct hlist_node mod_hdr_hlist;
28811c9c548SOr Gerlitz 
28983a52f0dSVlad Buslov 	/* protects flows list */
29083a52f0dSVlad Buslov 	spinlock_t flows_lock;
29111c9c548SOr Gerlitz 	/* flows sharing the same mod_hdr entry */
29211c9c548SOr Gerlitz 	struct list_head flows;
29311c9c548SOr Gerlitz 
29411c9c548SOr Gerlitz 	struct mod_hdr_key key;
29511c9c548SOr Gerlitz 
2962b688ea5SMaor Gottlieb 	struct mlx5_modify_hdr *modify_hdr;
297dd58edc3SVlad Buslov 
298dd58edc3SVlad Buslov 	refcount_t refcnt;
299a734d007SVlad Buslov 	struct completion res_ready;
300a734d007SVlad Buslov 	int compl_result;
30111c9c548SOr Gerlitz };
30211c9c548SOr Gerlitz 
3035a7e5bcbSVlad Buslov static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
3045a7e5bcbSVlad Buslov 			      struct mlx5e_tc_flow *flow);
3055a7e5bcbSVlad Buslov 
3065a7e5bcbSVlad Buslov static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
3075a7e5bcbSVlad Buslov {
3085a7e5bcbSVlad Buslov 	if (!flow || !refcount_inc_not_zero(&flow->refcnt))
3095a7e5bcbSVlad Buslov 		return ERR_PTR(-EINVAL);
3105a7e5bcbSVlad Buslov 	return flow;
3115a7e5bcbSVlad Buslov }
3125a7e5bcbSVlad Buslov 
3135a7e5bcbSVlad Buslov static void mlx5e_flow_put(struct mlx5e_priv *priv,
3145a7e5bcbSVlad Buslov 			   struct mlx5e_tc_flow *flow)
3155a7e5bcbSVlad Buslov {
3165a7e5bcbSVlad Buslov 	if (refcount_dec_and_test(&flow->refcnt)) {
3175a7e5bcbSVlad Buslov 		mlx5e_tc_del_flow(priv, flow);
318c5d326b2SVlad Buslov 		kfree_rcu(flow, rcu_head);
3195a7e5bcbSVlad Buslov 	}
3205a7e5bcbSVlad Buslov }
3215a7e5bcbSVlad Buslov 
322226f2ca3SVlad Buslov static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
323226f2ca3SVlad Buslov {
324226f2ca3SVlad Buslov 	/* Complete all memory stores before setting bit. */
325226f2ca3SVlad Buslov 	smp_mb__before_atomic();
326226f2ca3SVlad Buslov 	set_bit(flag, &flow->flags);
327226f2ca3SVlad Buslov }
328226f2ca3SVlad Buslov 
329226f2ca3SVlad Buslov #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
330226f2ca3SVlad Buslov 
331c5d326b2SVlad Buslov static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
332c5d326b2SVlad Buslov 				     unsigned long flag)
333c5d326b2SVlad Buslov {
334c5d326b2SVlad Buslov 	/* test_and_set_bit() provides all necessary barriers */
335c5d326b2SVlad Buslov 	return test_and_set_bit(flag, &flow->flags);
336c5d326b2SVlad Buslov }
337c5d326b2SVlad Buslov 
338c5d326b2SVlad Buslov #define flow_flag_test_and_set(flow, flag)			\
339c5d326b2SVlad Buslov 	__flow_flag_test_and_set(flow,				\
340c5d326b2SVlad Buslov 				 MLX5E_TC_FLOW_FLAG_##flag)
341c5d326b2SVlad Buslov 
342226f2ca3SVlad Buslov static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
343226f2ca3SVlad Buslov {
344226f2ca3SVlad Buslov 	/* Complete all memory stores before clearing bit. */
345226f2ca3SVlad Buslov 	smp_mb__before_atomic();
346226f2ca3SVlad Buslov 	clear_bit(flag, &flow->flags);
347226f2ca3SVlad Buslov }
348226f2ca3SVlad Buslov 
349226f2ca3SVlad Buslov #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
350226f2ca3SVlad Buslov 						      MLX5E_TC_FLOW_FLAG_##flag)
351226f2ca3SVlad Buslov 
352226f2ca3SVlad Buslov static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
353226f2ca3SVlad Buslov {
354226f2ca3SVlad Buslov 	bool ret = test_bit(flag, &flow->flags);
355226f2ca3SVlad Buslov 
356226f2ca3SVlad Buslov 	/* Read fields of flow structure only after checking flags. */
357226f2ca3SVlad Buslov 	smp_mb__after_atomic();
358226f2ca3SVlad Buslov 	return ret;
359226f2ca3SVlad Buslov }
360226f2ca3SVlad Buslov 
361226f2ca3SVlad Buslov #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
362226f2ca3SVlad Buslov 						    MLX5E_TC_FLOW_FLAG_##flag)
363226f2ca3SVlad Buslov 
364226f2ca3SVlad Buslov static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
365226f2ca3SVlad Buslov {
366226f2ca3SVlad Buslov 	return flow_flag_test(flow, ESWITCH);
367226f2ca3SVlad Buslov }
368226f2ca3SVlad Buslov 
36984179981SPaul Blakey static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
37084179981SPaul Blakey {
37184179981SPaul Blakey 	return flow_flag_test(flow, FT);
37284179981SPaul Blakey }
37384179981SPaul Blakey 
374226f2ca3SVlad Buslov static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
375226f2ca3SVlad Buslov {
376226f2ca3SVlad Buslov 	return flow_flag_test(flow, OFFLOADED);
377226f2ca3SVlad Buslov }
378226f2ca3SVlad Buslov 
37911c9c548SOr Gerlitz static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
38011c9c548SOr Gerlitz {
38111c9c548SOr Gerlitz 	return jhash(key->actions,
38211c9c548SOr Gerlitz 		     key->num_actions * MLX5_MH_ACT_SZ, 0);
38311c9c548SOr Gerlitz }
38411c9c548SOr Gerlitz 
38511c9c548SOr Gerlitz static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
38611c9c548SOr Gerlitz 				   struct mod_hdr_key *b)
38711c9c548SOr Gerlitz {
38811c9c548SOr Gerlitz 	if (a->num_actions != b->num_actions)
38911c9c548SOr Gerlitz 		return 1;
39011c9c548SOr Gerlitz 
39111c9c548SOr Gerlitz 	return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
39211c9c548SOr Gerlitz }
39311c9c548SOr Gerlitz 
394dd58edc3SVlad Buslov static struct mod_hdr_tbl *
395dd58edc3SVlad Buslov get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
396dd58edc3SVlad Buslov {
397dd58edc3SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
398dd58edc3SVlad Buslov 
399dd58edc3SVlad Buslov 	return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
400dd58edc3SVlad Buslov 		&priv->fs.tc.mod_hdr;
401dd58edc3SVlad Buslov }
402dd58edc3SVlad Buslov 
403dd58edc3SVlad Buslov static struct mlx5e_mod_hdr_entry *
404dd58edc3SVlad Buslov mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
405dd58edc3SVlad Buslov {
406dd58edc3SVlad Buslov 	struct mlx5e_mod_hdr_entry *mh, *found = NULL;
407dd58edc3SVlad Buslov 
408dd58edc3SVlad Buslov 	hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
409dd58edc3SVlad Buslov 		if (!cmp_mod_hdr_info(&mh->key, key)) {
410dd58edc3SVlad Buslov 			refcount_inc(&mh->refcnt);
411dd58edc3SVlad Buslov 			found = mh;
412dd58edc3SVlad Buslov 			break;
413dd58edc3SVlad Buslov 		}
414dd58edc3SVlad Buslov 	}
415dd58edc3SVlad Buslov 
416dd58edc3SVlad Buslov 	return found;
417dd58edc3SVlad Buslov }
418dd58edc3SVlad Buslov 
419dd58edc3SVlad Buslov static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
420d2faae25SVlad Buslov 			      struct mlx5e_mod_hdr_entry *mh,
421d2faae25SVlad Buslov 			      int namespace)
422dd58edc3SVlad Buslov {
423d2faae25SVlad Buslov 	struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
424d2faae25SVlad Buslov 
425d2faae25SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
426dd58edc3SVlad Buslov 		return;
427d2faae25SVlad Buslov 	hash_del(&mh->mod_hdr_hlist);
428d2faae25SVlad Buslov 	mutex_unlock(&tbl->lock);
429dd58edc3SVlad Buslov 
430dd58edc3SVlad Buslov 	WARN_ON(!list_empty(&mh->flows));
431a734d007SVlad Buslov 	if (mh->compl_result > 0)
4322b688ea5SMaor Gottlieb 		mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
433d2faae25SVlad Buslov 
434dd58edc3SVlad Buslov 	kfree(mh);
435dd58edc3SVlad Buslov }
436dd58edc3SVlad Buslov 
437d2faae25SVlad Buslov static int get_flow_name_space(struct mlx5e_tc_flow *flow)
438d2faae25SVlad Buslov {
439d2faae25SVlad Buslov 	return mlx5e_is_eswitch_flow(flow) ?
440d2faae25SVlad Buslov 		MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
441d2faae25SVlad Buslov }
44211c9c548SOr Gerlitz static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
44311c9c548SOr Gerlitz 				struct mlx5e_tc_flow *flow,
44411c9c548SOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr)
44511c9c548SOr Gerlitz {
44611c9c548SOr Gerlitz 	int num_actions, actions_size, namespace, err;
44711c9c548SOr Gerlitz 	struct mlx5e_mod_hdr_entry *mh;
448dd58edc3SVlad Buslov 	struct mod_hdr_tbl *tbl;
44911c9c548SOr Gerlitz 	struct mod_hdr_key key;
45011c9c548SOr Gerlitz 	u32 hash_key;
45111c9c548SOr Gerlitz 
4526ae4a6a5SPaul Blakey 	num_actions  = parse_attr->mod_hdr_acts.num_actions;
45311c9c548SOr Gerlitz 	actions_size = MLX5_MH_ACT_SZ * num_actions;
45411c9c548SOr Gerlitz 
4556ae4a6a5SPaul Blakey 	key.actions = parse_attr->mod_hdr_acts.actions;
45611c9c548SOr Gerlitz 	key.num_actions = num_actions;
45711c9c548SOr Gerlitz 
45811c9c548SOr Gerlitz 	hash_key = hash_mod_hdr_info(&key);
45911c9c548SOr Gerlitz 
460d2faae25SVlad Buslov 	namespace = get_flow_name_space(flow);
461dd58edc3SVlad Buslov 	tbl = get_mod_hdr_table(priv, namespace);
46211c9c548SOr Gerlitz 
463d2faae25SVlad Buslov 	mutex_lock(&tbl->lock);
464dd58edc3SVlad Buslov 	mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
465a734d007SVlad Buslov 	if (mh) {
466a734d007SVlad Buslov 		mutex_unlock(&tbl->lock);
467a734d007SVlad Buslov 		wait_for_completion(&mh->res_ready);
468a734d007SVlad Buslov 
469a734d007SVlad Buslov 		if (mh->compl_result < 0) {
470a734d007SVlad Buslov 			err = -EREMOTEIO;
471a734d007SVlad Buslov 			goto attach_header_err;
472a734d007SVlad Buslov 		}
47311c9c548SOr Gerlitz 		goto attach_flow;
474a734d007SVlad Buslov 	}
47511c9c548SOr Gerlitz 
47611c9c548SOr Gerlitz 	mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
477d2faae25SVlad Buslov 	if (!mh) {
478a734d007SVlad Buslov 		mutex_unlock(&tbl->lock);
479a734d007SVlad Buslov 		return -ENOMEM;
480d2faae25SVlad Buslov 	}
48111c9c548SOr Gerlitz 
48211c9c548SOr Gerlitz 	mh->key.actions = (void *)mh + sizeof(*mh);
48311c9c548SOr Gerlitz 	memcpy(mh->key.actions, key.actions, actions_size);
48411c9c548SOr Gerlitz 	mh->key.num_actions = num_actions;
48583a52f0dSVlad Buslov 	spin_lock_init(&mh->flows_lock);
48611c9c548SOr Gerlitz 	INIT_LIST_HEAD(&mh->flows);
487dd58edc3SVlad Buslov 	refcount_set(&mh->refcnt, 1);
488a734d007SVlad Buslov 	init_completion(&mh->res_ready);
489a734d007SVlad Buslov 
490a734d007SVlad Buslov 	hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
491a734d007SVlad Buslov 	mutex_unlock(&tbl->lock);
49211c9c548SOr Gerlitz 
4932b688ea5SMaor Gottlieb 	mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace,
49411c9c548SOr Gerlitz 						  mh->key.num_actions,
4952b688ea5SMaor Gottlieb 						  mh->key.actions);
4962b688ea5SMaor Gottlieb 	if (IS_ERR(mh->modify_hdr)) {
4972b688ea5SMaor Gottlieb 		err = PTR_ERR(mh->modify_hdr);
498a734d007SVlad Buslov 		mh->compl_result = err;
499a734d007SVlad Buslov 		goto alloc_header_err;
500a734d007SVlad Buslov 	}
501a734d007SVlad Buslov 	mh->compl_result = 1;
502a734d007SVlad Buslov 	complete_all(&mh->res_ready);
50311c9c548SOr Gerlitz 
50411c9c548SOr Gerlitz attach_flow:
505dd58edc3SVlad Buslov 	flow->mh = mh;
50683a52f0dSVlad Buslov 	spin_lock(&mh->flows_lock);
50711c9c548SOr Gerlitz 	list_add(&flow->mod_hdr, &mh->flows);
50883a52f0dSVlad Buslov 	spin_unlock(&mh->flows_lock);
509d2faae25SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
5102b688ea5SMaor Gottlieb 		flow->esw_attr->modify_hdr = mh->modify_hdr;
51111c9c548SOr Gerlitz 	else
5122b688ea5SMaor Gottlieb 		flow->nic_attr->modify_hdr = mh->modify_hdr;
51311c9c548SOr Gerlitz 
51411c9c548SOr Gerlitz 	return 0;
51511c9c548SOr Gerlitz 
516a734d007SVlad Buslov alloc_header_err:
517a734d007SVlad Buslov 	complete_all(&mh->res_ready);
518a734d007SVlad Buslov attach_header_err:
519a734d007SVlad Buslov 	mlx5e_mod_hdr_put(priv, mh, namespace);
52011c9c548SOr Gerlitz 	return err;
52111c9c548SOr Gerlitz }
52211c9c548SOr Gerlitz 
52311c9c548SOr Gerlitz static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
52411c9c548SOr Gerlitz 				 struct mlx5e_tc_flow *flow)
52511c9c548SOr Gerlitz {
5265a7e5bcbSVlad Buslov 	/* flow wasn't fully initialized */
527dd58edc3SVlad Buslov 	if (!flow->mh)
5285a7e5bcbSVlad Buslov 		return;
5295a7e5bcbSVlad Buslov 
53083a52f0dSVlad Buslov 	spin_lock(&flow->mh->flows_lock);
53111c9c548SOr Gerlitz 	list_del(&flow->mod_hdr);
53283a52f0dSVlad Buslov 	spin_unlock(&flow->mh->flows_lock);
53311c9c548SOr Gerlitz 
534d2faae25SVlad Buslov 	mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
535dd58edc3SVlad Buslov 	flow->mh = NULL;
53611c9c548SOr Gerlitz }
53711c9c548SOr Gerlitz 
53877ab67b7SOr Gerlitz static
53977ab67b7SOr Gerlitz struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
54077ab67b7SOr Gerlitz {
54177ab67b7SOr Gerlitz 	struct net_device *netdev;
54277ab67b7SOr Gerlitz 	struct mlx5e_priv *priv;
54377ab67b7SOr Gerlitz 
54477ab67b7SOr Gerlitz 	netdev = __dev_get_by_index(net, ifindex);
54577ab67b7SOr Gerlitz 	priv = netdev_priv(netdev);
54677ab67b7SOr Gerlitz 	return priv->mdev;
54777ab67b7SOr Gerlitz }
54877ab67b7SOr Gerlitz 
54977ab67b7SOr Gerlitz static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
55077ab67b7SOr Gerlitz {
551e0b4b472SLeon Romanovsky 	u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
55277ab67b7SOr Gerlitz 	void *tirc;
55377ab67b7SOr Gerlitz 	int err;
55477ab67b7SOr Gerlitz 
55577ab67b7SOr Gerlitz 	err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
55677ab67b7SOr Gerlitz 	if (err)
55777ab67b7SOr Gerlitz 		goto alloc_tdn_err;
55877ab67b7SOr Gerlitz 
55977ab67b7SOr Gerlitz 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
56077ab67b7SOr Gerlitz 
56177ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
562ddae74acSOr Gerlitz 	MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
56377ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
56477ab67b7SOr Gerlitz 
565e0b4b472SLeon Romanovsky 	err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
56677ab67b7SOr Gerlitz 	if (err)
56777ab67b7SOr Gerlitz 		goto create_tir_err;
56877ab67b7SOr Gerlitz 
56977ab67b7SOr Gerlitz 	return 0;
57077ab67b7SOr Gerlitz 
57177ab67b7SOr Gerlitz create_tir_err:
57277ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
57377ab67b7SOr Gerlitz alloc_tdn_err:
57477ab67b7SOr Gerlitz 	return err;
57577ab67b7SOr Gerlitz }
57677ab67b7SOr Gerlitz 
57777ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
57877ab67b7SOr Gerlitz {
57977ab67b7SOr Gerlitz 	mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
58077ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
58177ab67b7SOr Gerlitz }
58277ab67b7SOr Gerlitz 
5833f6d08d1SOr Gerlitz static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
5843f6d08d1SOr Gerlitz {
5853f6d08d1SOr Gerlitz 	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
5863f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
5873f6d08d1SOr Gerlitz 	int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
5883f6d08d1SOr Gerlitz 
5893f6d08d1SOr Gerlitz 	mlx5e_build_default_indir_rqt(indirection_rqt, sz,
5903f6d08d1SOr Gerlitz 				      hp->num_channels);
5913f6d08d1SOr Gerlitz 
5923f6d08d1SOr Gerlitz 	for (i = 0; i < sz; i++) {
5933f6d08d1SOr Gerlitz 		ix = i;
594bbeb53b8SAya Levin 		if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
5953f6d08d1SOr Gerlitz 			ix = mlx5e_bits_invert(i, ilog2(sz));
5963f6d08d1SOr Gerlitz 		ix = indirection_rqt[ix];
5973f6d08d1SOr Gerlitz 		rqn = hp->pair->rqn[ix];
5983f6d08d1SOr Gerlitz 		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
5993f6d08d1SOr Gerlitz 	}
6003f6d08d1SOr Gerlitz }
6013f6d08d1SOr Gerlitz 
6023f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
6033f6d08d1SOr Gerlitz {
6043f6d08d1SOr Gerlitz 	int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
6053f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
6063f6d08d1SOr Gerlitz 	struct mlx5_core_dev *mdev = priv->mdev;
6073f6d08d1SOr Gerlitz 	void *rqtc;
6083f6d08d1SOr Gerlitz 	u32 *in;
6093f6d08d1SOr Gerlitz 
6103f6d08d1SOr Gerlitz 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
6113f6d08d1SOr Gerlitz 	in = kvzalloc(inlen, GFP_KERNEL);
6123f6d08d1SOr Gerlitz 	if (!in)
6133f6d08d1SOr Gerlitz 		return -ENOMEM;
6143f6d08d1SOr Gerlitz 
6153f6d08d1SOr Gerlitz 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
6163f6d08d1SOr Gerlitz 
6173f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
6183f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
6193f6d08d1SOr Gerlitz 
6203f6d08d1SOr Gerlitz 	mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
6213f6d08d1SOr Gerlitz 
6223f6d08d1SOr Gerlitz 	err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
6233f6d08d1SOr Gerlitz 	if (!err)
6243f6d08d1SOr Gerlitz 		hp->indir_rqt.enabled = true;
6253f6d08d1SOr Gerlitz 
6263f6d08d1SOr Gerlitz 	kvfree(in);
6273f6d08d1SOr Gerlitz 	return err;
6283f6d08d1SOr Gerlitz }
6293f6d08d1SOr Gerlitz 
6303f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
6313f6d08d1SOr Gerlitz {
6323f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
6333f6d08d1SOr Gerlitz 	u32 in[MLX5_ST_SZ_DW(create_tir_in)];
6343f6d08d1SOr Gerlitz 	int tt, i, err;
6353f6d08d1SOr Gerlitz 	void *tirc;
6363f6d08d1SOr Gerlitz 
6373f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
638d930ac79SAya Levin 		struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
639d930ac79SAya Levin 
6403f6d08d1SOr Gerlitz 		memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
6413f6d08d1SOr Gerlitz 		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6423f6d08d1SOr Gerlitz 
6433f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
6443f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
6453f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
646bbeb53b8SAya Levin 		mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
647bbeb53b8SAya Levin 
6483f6d08d1SOr Gerlitz 		err = mlx5_core_create_tir(hp->func_mdev, in,
649e0b4b472SLeon Romanovsky 					   &hp->indir_tirn[tt]);
6503f6d08d1SOr Gerlitz 		if (err) {
6513f6d08d1SOr Gerlitz 			mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
6523f6d08d1SOr Gerlitz 			goto err_destroy_tirs;
6533f6d08d1SOr Gerlitz 		}
6543f6d08d1SOr Gerlitz 	}
6553f6d08d1SOr Gerlitz 	return 0;
6563f6d08d1SOr Gerlitz 
6573f6d08d1SOr Gerlitz err_destroy_tirs:
6583f6d08d1SOr Gerlitz 	for (i = 0; i < tt; i++)
6593f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
6603f6d08d1SOr Gerlitz 	return err;
6613f6d08d1SOr Gerlitz }
6623f6d08d1SOr Gerlitz 
6633f6d08d1SOr Gerlitz static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
6643f6d08d1SOr Gerlitz {
6653f6d08d1SOr Gerlitz 	int tt;
6663f6d08d1SOr Gerlitz 
6673f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
6683f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
6693f6d08d1SOr Gerlitz }
6703f6d08d1SOr Gerlitz 
6713f6d08d1SOr Gerlitz static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
6723f6d08d1SOr Gerlitz 					 struct ttc_params *ttc_params)
6733f6d08d1SOr Gerlitz {
6743f6d08d1SOr Gerlitz 	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
6753f6d08d1SOr Gerlitz 	int tt;
6763f6d08d1SOr Gerlitz 
6773f6d08d1SOr Gerlitz 	memset(ttc_params, 0, sizeof(*ttc_params));
6783f6d08d1SOr Gerlitz 
6793f6d08d1SOr Gerlitz 	ttc_params->any_tt_tirn = hp->tirn;
6803f6d08d1SOr Gerlitz 
6813f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
6823f6d08d1SOr Gerlitz 		ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
6833f6d08d1SOr Gerlitz 
6846412bb39SEli Cohen 	ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
6853f6d08d1SOr Gerlitz 	ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
6863f6d08d1SOr Gerlitz 	ft_attr->prio = MLX5E_TC_PRIO;
6873f6d08d1SOr Gerlitz }
6883f6d08d1SOr Gerlitz 
6893f6d08d1SOr Gerlitz static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
6903f6d08d1SOr Gerlitz {
6913f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
6923f6d08d1SOr Gerlitz 	struct ttc_params ttc_params;
6933f6d08d1SOr Gerlitz 	int err;
6943f6d08d1SOr Gerlitz 
6953f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_rqt(hp);
6963f6d08d1SOr Gerlitz 	if (err)
6973f6d08d1SOr Gerlitz 		return err;
6983f6d08d1SOr Gerlitz 
6993f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_tirs(hp);
7003f6d08d1SOr Gerlitz 	if (err)
7013f6d08d1SOr Gerlitz 		goto err_create_indirect_tirs;
7023f6d08d1SOr Gerlitz 
7033f6d08d1SOr Gerlitz 	mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
7043f6d08d1SOr Gerlitz 	err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
7053f6d08d1SOr Gerlitz 	if (err)
7063f6d08d1SOr Gerlitz 		goto err_create_ttc_table;
7073f6d08d1SOr Gerlitz 
7083f6d08d1SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
7093f6d08d1SOr Gerlitz 		   hp->num_channels, hp->ttc.ft.t->id);
7103f6d08d1SOr Gerlitz 
7113f6d08d1SOr Gerlitz 	return 0;
7123f6d08d1SOr Gerlitz 
7133f6d08d1SOr Gerlitz err_create_ttc_table:
7143f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
7153f6d08d1SOr Gerlitz err_create_indirect_tirs:
7163f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
7173f6d08d1SOr Gerlitz 
7183f6d08d1SOr Gerlitz 	return err;
7193f6d08d1SOr Gerlitz }
7203f6d08d1SOr Gerlitz 
7213f6d08d1SOr Gerlitz static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
7223f6d08d1SOr Gerlitz {
7233f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
7243f6d08d1SOr Gerlitz 
7253f6d08d1SOr Gerlitz 	mlx5e_destroy_ttc_table(priv, &hp->ttc);
7263f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
7273f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
7283f6d08d1SOr Gerlitz }
7293f6d08d1SOr Gerlitz 
73077ab67b7SOr Gerlitz static struct mlx5e_hairpin *
73177ab67b7SOr Gerlitz mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
73277ab67b7SOr Gerlitz 		     int peer_ifindex)
73377ab67b7SOr Gerlitz {
73477ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev, *peer_mdev;
73577ab67b7SOr Gerlitz 	struct mlx5e_hairpin *hp;
73677ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
73777ab67b7SOr Gerlitz 	int err;
73877ab67b7SOr Gerlitz 
73977ab67b7SOr Gerlitz 	hp = kzalloc(sizeof(*hp), GFP_KERNEL);
74077ab67b7SOr Gerlitz 	if (!hp)
74177ab67b7SOr Gerlitz 		return ERR_PTR(-ENOMEM);
74277ab67b7SOr Gerlitz 
74377ab67b7SOr Gerlitz 	func_mdev = priv->mdev;
74477ab67b7SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
74577ab67b7SOr Gerlitz 
74677ab67b7SOr Gerlitz 	pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
74777ab67b7SOr Gerlitz 	if (IS_ERR(pair)) {
74877ab67b7SOr Gerlitz 		err = PTR_ERR(pair);
74977ab67b7SOr Gerlitz 		goto create_pair_err;
75077ab67b7SOr Gerlitz 	}
75177ab67b7SOr Gerlitz 	hp->pair = pair;
75277ab67b7SOr Gerlitz 	hp->func_mdev = func_mdev;
7533f6d08d1SOr Gerlitz 	hp->func_priv = priv;
7543f6d08d1SOr Gerlitz 	hp->num_channels = params->num_channels;
75577ab67b7SOr Gerlitz 
75677ab67b7SOr Gerlitz 	err = mlx5e_hairpin_create_transport(hp);
75777ab67b7SOr Gerlitz 	if (err)
75877ab67b7SOr Gerlitz 		goto create_transport_err;
75977ab67b7SOr Gerlitz 
7603f6d08d1SOr Gerlitz 	if (hp->num_channels > 1) {
7613f6d08d1SOr Gerlitz 		err = mlx5e_hairpin_rss_init(hp);
7623f6d08d1SOr Gerlitz 		if (err)
7633f6d08d1SOr Gerlitz 			goto rss_init_err;
7643f6d08d1SOr Gerlitz 	}
7653f6d08d1SOr Gerlitz 
76677ab67b7SOr Gerlitz 	return hp;
76777ab67b7SOr Gerlitz 
7683f6d08d1SOr Gerlitz rss_init_err:
7693f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
77077ab67b7SOr Gerlitz create_transport_err:
77177ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
77277ab67b7SOr Gerlitz create_pair_err:
77377ab67b7SOr Gerlitz 	kfree(hp);
77477ab67b7SOr Gerlitz 	return ERR_PTR(err);
77577ab67b7SOr Gerlitz }
77677ab67b7SOr Gerlitz 
77777ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
77877ab67b7SOr Gerlitz {
7793f6d08d1SOr Gerlitz 	if (hp->num_channels > 1)
7803f6d08d1SOr Gerlitz 		mlx5e_hairpin_rss_cleanup(hp);
78177ab67b7SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
78277ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
78377ab67b7SOr Gerlitz 	kvfree(hp);
78477ab67b7SOr Gerlitz }
78577ab67b7SOr Gerlitz 
786106be53bSOr Gerlitz static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
787106be53bSOr Gerlitz {
788106be53bSOr Gerlitz 	return (peer_vhca_id << 16 | prio);
789106be53bSOr Gerlitz }
790106be53bSOr Gerlitz 
7915c65c564SOr Gerlitz static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
792106be53bSOr Gerlitz 						     u16 peer_vhca_id, u8 prio)
7935c65c564SOr Gerlitz {
7945c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
795106be53bSOr Gerlitz 	u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
7965c65c564SOr Gerlitz 
7975c65c564SOr Gerlitz 	hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
798106be53bSOr Gerlitz 			       hairpin_hlist, hash_key) {
799e4f9abbdSVlad Buslov 		if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
800e4f9abbdSVlad Buslov 			refcount_inc(&hpe->refcnt);
8015c65c564SOr Gerlitz 			return hpe;
8025c65c564SOr Gerlitz 		}
803e4f9abbdSVlad Buslov 	}
8045c65c564SOr Gerlitz 
8055c65c564SOr Gerlitz 	return NULL;
8065c65c564SOr Gerlitz }
8075c65c564SOr Gerlitz 
808e4f9abbdSVlad Buslov static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
809e4f9abbdSVlad Buslov 			      struct mlx5e_hairpin_entry *hpe)
810e4f9abbdSVlad Buslov {
811e4f9abbdSVlad Buslov 	/* no more hairpin flows for us, release the hairpin pair */
812b32accdaSVlad Buslov 	if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
813e4f9abbdSVlad Buslov 		return;
814b32accdaSVlad Buslov 	hash_del(&hpe->hairpin_hlist);
815b32accdaSVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
816e4f9abbdSVlad Buslov 
817db76ca24SVlad Buslov 	if (!IS_ERR_OR_NULL(hpe->hp)) {
818e4f9abbdSVlad Buslov 		netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
819e4f9abbdSVlad Buslov 			   dev_name(hpe->hp->pair->peer_mdev->device));
820e4f9abbdSVlad Buslov 
821e4f9abbdSVlad Buslov 		mlx5e_hairpin_destroy(hpe->hp);
822db76ca24SVlad Buslov 	}
823db76ca24SVlad Buslov 
824db76ca24SVlad Buslov 	WARN_ON(!list_empty(&hpe->flows));
825e4f9abbdSVlad Buslov 	kfree(hpe);
826e4f9abbdSVlad Buslov }
827e4f9abbdSVlad Buslov 
828106be53bSOr Gerlitz #define UNKNOWN_MATCH_PRIO 8
829106be53bSOr Gerlitz 
830106be53bSOr Gerlitz static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
831e98bedf5SEli Britstein 				  struct mlx5_flow_spec *spec, u8 *match_prio,
832e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
833106be53bSOr Gerlitz {
834106be53bSOr Gerlitz 	void *headers_c, *headers_v;
835106be53bSOr Gerlitz 	u8 prio_val, prio_mask = 0;
836106be53bSOr Gerlitz 	bool vlan_present;
837106be53bSOr Gerlitz 
838106be53bSOr Gerlitz #ifdef CONFIG_MLX5_CORE_EN_DCB
839106be53bSOr Gerlitz 	if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
840e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
841e98bedf5SEli Britstein 				   "only PCP trust state supported for hairpin");
842106be53bSOr Gerlitz 		return -EOPNOTSUPP;
843106be53bSOr Gerlitz 	}
844106be53bSOr Gerlitz #endif
845106be53bSOr Gerlitz 	headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
846106be53bSOr Gerlitz 	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
847106be53bSOr Gerlitz 
848106be53bSOr Gerlitz 	vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
849106be53bSOr Gerlitz 	if (vlan_present) {
850106be53bSOr Gerlitz 		prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
851106be53bSOr Gerlitz 		prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
852106be53bSOr Gerlitz 	}
853106be53bSOr Gerlitz 
854106be53bSOr Gerlitz 	if (!vlan_present || !prio_mask) {
855106be53bSOr Gerlitz 		prio_val = UNKNOWN_MATCH_PRIO;
856106be53bSOr Gerlitz 	} else if (prio_mask != 0x7) {
857e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
858e98bedf5SEli Britstein 				   "masked priority match not supported for hairpin");
859106be53bSOr Gerlitz 		return -EOPNOTSUPP;
860106be53bSOr Gerlitz 	}
861106be53bSOr Gerlitz 
862106be53bSOr Gerlitz 	*match_prio = prio_val;
863106be53bSOr Gerlitz 	return 0;
864106be53bSOr Gerlitz }
865106be53bSOr Gerlitz 
8665c65c564SOr Gerlitz static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
8675c65c564SOr Gerlitz 				  struct mlx5e_tc_flow *flow,
868e98bedf5SEli Britstein 				  struct mlx5e_tc_flow_parse_attr *parse_attr,
869e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
8705c65c564SOr Gerlitz {
87198b66cb1SEli Britstein 	int peer_ifindex = parse_attr->mirred_ifindex[0];
8725c65c564SOr Gerlitz 	struct mlx5_hairpin_params params;
873d8822868SOr Gerlitz 	struct mlx5_core_dev *peer_mdev;
8745c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
8755c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
8763f6d08d1SOr Gerlitz 	u64 link_speed64;
8773f6d08d1SOr Gerlitz 	u32 link_speed;
878106be53bSOr Gerlitz 	u8 match_prio;
879d8822868SOr Gerlitz 	u16 peer_id;
8805c65c564SOr Gerlitz 	int err;
8815c65c564SOr Gerlitz 
882d8822868SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
883d8822868SOr Gerlitz 	if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
884e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
8855c65c564SOr Gerlitz 		return -EOPNOTSUPP;
8865c65c564SOr Gerlitz 	}
8875c65c564SOr Gerlitz 
888d8822868SOr Gerlitz 	peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
889e98bedf5SEli Britstein 	err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
890e98bedf5SEli Britstein 				     extack);
891106be53bSOr Gerlitz 	if (err)
892106be53bSOr Gerlitz 		return err;
893b32accdaSVlad Buslov 
894b32accdaSVlad Buslov 	mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
895106be53bSOr Gerlitz 	hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
896db76ca24SVlad Buslov 	if (hpe) {
897db76ca24SVlad Buslov 		mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
898db76ca24SVlad Buslov 		wait_for_completion(&hpe->res_ready);
899db76ca24SVlad Buslov 
900db76ca24SVlad Buslov 		if (IS_ERR(hpe->hp)) {
901db76ca24SVlad Buslov 			err = -EREMOTEIO;
902db76ca24SVlad Buslov 			goto out_err;
903db76ca24SVlad Buslov 		}
9045c65c564SOr Gerlitz 		goto attach_flow;
905db76ca24SVlad Buslov 	}
9065c65c564SOr Gerlitz 
9075c65c564SOr Gerlitz 	hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
908b32accdaSVlad Buslov 	if (!hpe) {
909db76ca24SVlad Buslov 		mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
910db76ca24SVlad Buslov 		return -ENOMEM;
911b32accdaSVlad Buslov 	}
9125c65c564SOr Gerlitz 
91373edca73SVlad Buslov 	spin_lock_init(&hpe->flows_lock);
9145c65c564SOr Gerlitz 	INIT_LIST_HEAD(&hpe->flows);
915db76ca24SVlad Buslov 	INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
916d8822868SOr Gerlitz 	hpe->peer_vhca_id = peer_id;
917106be53bSOr Gerlitz 	hpe->prio = match_prio;
918e4f9abbdSVlad Buslov 	refcount_set(&hpe->refcnt, 1);
919db76ca24SVlad Buslov 	init_completion(&hpe->res_ready);
920db76ca24SVlad Buslov 
921db76ca24SVlad Buslov 	hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
922db76ca24SVlad Buslov 		 hash_hairpin_info(peer_id, match_prio));
923db76ca24SVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
9245c65c564SOr Gerlitz 
9255c65c564SOr Gerlitz 	params.log_data_size = 15;
9265c65c564SOr Gerlitz 	params.log_data_size = min_t(u8, params.log_data_size,
9275c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
9285c65c564SOr Gerlitz 	params.log_data_size = max_t(u8, params.log_data_size,
9295c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
9305c65c564SOr Gerlitz 
931eb9180f7SOr Gerlitz 	params.log_num_packets = params.log_data_size -
932eb9180f7SOr Gerlitz 				 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
933eb9180f7SOr Gerlitz 	params.log_num_packets = min_t(u8, params.log_num_packets,
934eb9180f7SOr Gerlitz 				       MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
935eb9180f7SOr Gerlitz 
936eb9180f7SOr Gerlitz 	params.q_counter = priv->q_counter;
9373f6d08d1SOr Gerlitz 	/* set hairpin pair per each 50Gbs share of the link */
9382c81bfd5SHuy Nguyen 	mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
9393f6d08d1SOr Gerlitz 	link_speed = max_t(u32, link_speed, 50000);
9403f6d08d1SOr Gerlitz 	link_speed64 = link_speed;
9413f6d08d1SOr Gerlitz 	do_div(link_speed64, 50000);
9423f6d08d1SOr Gerlitz 	params.num_channels = link_speed64;
9433f6d08d1SOr Gerlitz 
9445c65c564SOr Gerlitz 	hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
945db76ca24SVlad Buslov 	hpe->hp = hp;
946db76ca24SVlad Buslov 	complete_all(&hpe->res_ready);
9475c65c564SOr Gerlitz 	if (IS_ERR(hp)) {
9485c65c564SOr Gerlitz 		err = PTR_ERR(hp);
949db76ca24SVlad Buslov 		goto out_err;
9505c65c564SOr Gerlitz 	}
9515c65c564SOr Gerlitz 
952eb9180f7SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
95327b942fbSParav Pandit 		   hp->tirn, hp->pair->rqn[0],
95427b942fbSParav Pandit 		   dev_name(hp->pair->peer_mdev->device),
955eb9180f7SOr Gerlitz 		   hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
9565c65c564SOr Gerlitz 
9575c65c564SOr Gerlitz attach_flow:
9583f6d08d1SOr Gerlitz 	if (hpe->hp->num_channels > 1) {
959226f2ca3SVlad Buslov 		flow_flag_set(flow, HAIRPIN_RSS);
9603f6d08d1SOr Gerlitz 		flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
9613f6d08d1SOr Gerlitz 	} else {
9625c65c564SOr Gerlitz 		flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
9633f6d08d1SOr Gerlitz 	}
964b32accdaSVlad Buslov 
965e4f9abbdSVlad Buslov 	flow->hpe = hpe;
96673edca73SVlad Buslov 	spin_lock(&hpe->flows_lock);
9675c65c564SOr Gerlitz 	list_add(&flow->hairpin, &hpe->flows);
96873edca73SVlad Buslov 	spin_unlock(&hpe->flows_lock);
9693f6d08d1SOr Gerlitz 
9705c65c564SOr Gerlitz 	return 0;
9715c65c564SOr Gerlitz 
972db76ca24SVlad Buslov out_err:
973db76ca24SVlad Buslov 	mlx5e_hairpin_put(priv, hpe);
9745c65c564SOr Gerlitz 	return err;
9755c65c564SOr Gerlitz }
9765c65c564SOr Gerlitz 
9775c65c564SOr Gerlitz static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
9785c65c564SOr Gerlitz 				   struct mlx5e_tc_flow *flow)
9795c65c564SOr Gerlitz {
9805a7e5bcbSVlad Buslov 	/* flow wasn't fully initialized */
981e4f9abbdSVlad Buslov 	if (!flow->hpe)
9825a7e5bcbSVlad Buslov 		return;
9835a7e5bcbSVlad Buslov 
98473edca73SVlad Buslov 	spin_lock(&flow->hpe->flows_lock);
9855c65c564SOr Gerlitz 	list_del(&flow->hairpin);
98673edca73SVlad Buslov 	spin_unlock(&flow->hpe->flows_lock);
98773edca73SVlad Buslov 
988e4f9abbdSVlad Buslov 	mlx5e_hairpin_put(priv, flow->hpe);
989e4f9abbdSVlad Buslov 	flow->hpe = NULL;
9905c65c564SOr Gerlitz }
9915c65c564SOr Gerlitz 
992c83954abSRabie Loulou static int
99374491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
99417091853SOr Gerlitz 		      struct mlx5e_tc_flow_parse_attr *parse_attr,
995e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
996e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
997e8f887acSAmir Vadai {
998bb0ee7dcSJianbo Liu 	struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
999aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1000aad7e08dSAmir Vadai 	struct mlx5_core_dev *dev = priv->mdev;
10015c65c564SOr Gerlitz 	struct mlx5_flow_destination dest[2] = {};
100266958ed9SHadar Hen Zion 	struct mlx5_flow_act flow_act = {
10033bc4b7bfSOr Gerlitz 		.action = attr->action,
1004bb0ee7dcSJianbo Liu 		.flags    = FLOW_ACT_NO_APPEND,
100566958ed9SHadar Hen Zion 	};
1006aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
10075c65c564SOr Gerlitz 	int err, dest_ix = 0;
1008e8f887acSAmir Vadai 
1009bb0ee7dcSJianbo Liu 	flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1010bb0ee7dcSJianbo Liu 	flow_context->flow_tag = attr->flow_tag;
1011bb0ee7dcSJianbo Liu 
1012226f2ca3SVlad Buslov 	if (flow_flag_test(flow, HAIRPIN)) {
1013e98bedf5SEli Britstein 		err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
10145a7e5bcbSVlad Buslov 		if (err)
10155a7e5bcbSVlad Buslov 			return err;
10165a7e5bcbSVlad Buslov 
1017226f2ca3SVlad Buslov 		if (flow_flag_test(flow, HAIRPIN_RSS)) {
10183f6d08d1SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
10193f6d08d1SOr Gerlitz 			dest[dest_ix].ft = attr->hairpin_ft;
10203f6d08d1SOr Gerlitz 		} else {
10215c65c564SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
10225c65c564SOr Gerlitz 			dest[dest_ix].tir_num = attr->hairpin_tirn;
10233f6d08d1SOr Gerlitz 		}
10243f6d08d1SOr Gerlitz 		dest_ix++;
10253f6d08d1SOr Gerlitz 	} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
10265c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
10275c65c564SOr Gerlitz 		dest[dest_ix].ft = priv->fs.vlan.ft.t;
10285c65c564SOr Gerlitz 		dest_ix++;
10295c65c564SOr Gerlitz 	}
1030aad7e08dSAmir Vadai 
10315c65c564SOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
10325c65c564SOr Gerlitz 		counter = mlx5_fc_create(dev, true);
10335a7e5bcbSVlad Buslov 		if (IS_ERR(counter))
10345a7e5bcbSVlad Buslov 			return PTR_ERR(counter);
10355a7e5bcbSVlad Buslov 
10365c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1037171c7625SMark Bloch 		dest[dest_ix].counter_id = mlx5_fc_id(counter);
10385c65c564SOr Gerlitz 		dest_ix++;
1039b8aee822SMark Bloch 		attr->counter = counter;
1040aad7e08dSAmir Vadai 	}
1041aad7e08dSAmir Vadai 
10422f4fe4caSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
10433099eb5aSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
10442b688ea5SMaor Gottlieb 		flow_act.modify_hdr = attr->modify_hdr;
10456ae4a6a5SPaul Blakey 		dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1046c83954abSRabie Loulou 		if (err)
10475a7e5bcbSVlad Buslov 			return err;
10482f4fe4caSOr Gerlitz 	}
10492f4fe4caSOr Gerlitz 
1050b6fac0b4SVlad Buslov 	mutex_lock(&priv->fs.tc.t_lock);
1051acff797cSMaor Gottlieb 	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
105261dc7b01SPaul Blakey 		struct mlx5_flow_table_attr ft_attr = {};
105361dc7b01SPaul Blakey 		int tc_grp_size, tc_tbl_size, tc_num_grps;
105421b9c144SOr Gerlitz 		u32 max_flow_counter;
105521b9c144SOr Gerlitz 
105621b9c144SOr Gerlitz 		max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
105721b9c144SOr Gerlitz 				    MLX5_CAP_GEN(dev, max_flow_counter_15_0);
105821b9c144SOr Gerlitz 
105921b9c144SOr Gerlitz 		tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
106021b9c144SOr Gerlitz 
106121b9c144SOr Gerlitz 		tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
106221b9c144SOr Gerlitz 				    BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
106361dc7b01SPaul Blakey 		tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
106421b9c144SOr Gerlitz 
106561dc7b01SPaul Blakey 		ft_attr.prio = MLX5E_TC_PRIO;
106661dc7b01SPaul Blakey 		ft_attr.max_fte = tc_tbl_size;
106761dc7b01SPaul Blakey 		ft_attr.level = MLX5E_TC_FT_LEVEL;
106861dc7b01SPaul Blakey 		ft_attr.autogroup.max_num_groups = tc_num_grps;
1069acff797cSMaor Gottlieb 		priv->fs.tc.t =
1070acff797cSMaor Gottlieb 			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
107161dc7b01SPaul Blakey 							    &ft_attr);
1072acff797cSMaor Gottlieb 		if (IS_ERR(priv->fs.tc.t)) {
1073b6fac0b4SVlad Buslov 			mutex_unlock(&priv->fs.tc.t_lock);
1074e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
1075c75a33c8SJacob Keller 					   "Failed to create tc offload table");
1076e8f887acSAmir Vadai 			netdev_err(priv->netdev,
1077e8f887acSAmir Vadai 				   "Failed to create tc offload table\n");
10785a7e5bcbSVlad Buslov 			return PTR_ERR(priv->fs.tc.t);
1079e8f887acSAmir Vadai 		}
1080e8f887acSAmir Vadai 	}
1081e8f887acSAmir Vadai 
108238aa51c1SOr Gerlitz 	if (attr->match_level != MLX5_MATCH_NONE)
1083d4a18e16SYevgeny Kliteynik 		parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
108438aa51c1SOr Gerlitz 
1085c83954abSRabie Loulou 	flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
10865c65c564SOr Gerlitz 					    &flow_act, dest, dest_ix);
1087b6fac0b4SVlad Buslov 	mutex_unlock(&priv->fs.tc.t_lock);
1088e8f887acSAmir Vadai 
1089a2b7189bSzhong jiang 	return PTR_ERR_OR_ZERO(flow->rule[0]);
1090e8f887acSAmir Vadai }
1091e8f887acSAmir Vadai 
1092d85cdccbSOr Gerlitz static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1093d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
1094d85cdccbSOr Gerlitz {
1095513f8f7fSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1096d85cdccbSOr Gerlitz 	struct mlx5_fc *counter = NULL;
1097d85cdccbSOr Gerlitz 
1098b8aee822SMark Bloch 	counter = attr->counter;
10995a7e5bcbSVlad Buslov 	if (!IS_ERR_OR_NULL(flow->rule[0]))
1100e4ad91f2SChris Mi 		mlx5_del_flow_rules(flow->rule[0]);
1101d85cdccbSOr Gerlitz 	mlx5_fc_destroy(priv->mdev, counter);
1102d85cdccbSOr Gerlitz 
1103b6fac0b4SVlad Buslov 	mutex_lock(&priv->fs.tc.t_lock);
1104226f2ca3SVlad Buslov 	if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
1105d85cdccbSOr Gerlitz 		mlx5_destroy_flow_table(priv->fs.tc.t);
1106d85cdccbSOr Gerlitz 		priv->fs.tc.t = NULL;
1107d85cdccbSOr Gerlitz 	}
1108b6fac0b4SVlad Buslov 	mutex_unlock(&priv->fs.tc.t_lock);
11092f4fe4caSOr Gerlitz 
1110513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
11113099eb5aSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
11125c65c564SOr Gerlitz 
1113226f2ca3SVlad Buslov 	if (flow_flag_test(flow, HAIRPIN))
11145c65c564SOr Gerlitz 		mlx5e_hairpin_flow_del(priv, flow);
1115d85cdccbSOr Gerlitz }
1116d85cdccbSOr Gerlitz 
1117aa0cbbaeSOr Gerlitz static void mlx5e_detach_encap(struct mlx5e_priv *priv,
11188c4dc42bSEli Britstein 			       struct mlx5e_tc_flow *flow, int out_index);
1119aa0cbbaeSOr Gerlitz 
11203c37745eSOr Gerlitz static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1121e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
1122733d4f36SRoi Dayan 			      struct net_device *mirred_dev,
1123733d4f36SRoi Dayan 			      int out_index,
11248c4dc42bSEli Britstein 			      struct netlink_ext_ack *extack,
11250ad060eeSRoi Dayan 			      struct net_device **encap_dev,
11260ad060eeSRoi Dayan 			      bool *encap_valid);
11273c37745eSOr Gerlitz 
11286d2a3ed0SOr Gerlitz static struct mlx5_flow_handle *
11296d2a3ed0SOr Gerlitz mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
11306d2a3ed0SOr Gerlitz 			   struct mlx5e_tc_flow *flow,
11316d2a3ed0SOr Gerlitz 			   struct mlx5_flow_spec *spec,
11326d2a3ed0SOr Gerlitz 			   struct mlx5_esw_flow_attr *attr)
11336d2a3ed0SOr Gerlitz {
11341ef3018fSPaul Blakey 	struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
11356d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
11364c3844d9SPaul Blakey 
11371ef3018fSPaul Blakey 	if (flow_flag_test(flow, CT)) {
11381ef3018fSPaul Blakey 		mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
11391ef3018fSPaul Blakey 
11401ef3018fSPaul Blakey 		return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
11411ef3018fSPaul Blakey 					       mod_hdr_acts);
11421ef3018fSPaul Blakey 	}
11436d2a3ed0SOr Gerlitz 
11446d2a3ed0SOr Gerlitz 	rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
11456d2a3ed0SOr Gerlitz 	if (IS_ERR(rule))
11466d2a3ed0SOr Gerlitz 		return rule;
11476d2a3ed0SOr Gerlitz 
1148e85e02baSEli Britstein 	if (attr->split_count) {
11496d2a3ed0SOr Gerlitz 		flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
11506d2a3ed0SOr Gerlitz 		if (IS_ERR(flow->rule[1])) {
11516d2a3ed0SOr Gerlitz 			mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
11526d2a3ed0SOr Gerlitz 			return flow->rule[1];
11536d2a3ed0SOr Gerlitz 		}
11546d2a3ed0SOr Gerlitz 	}
11556d2a3ed0SOr Gerlitz 
11566d2a3ed0SOr Gerlitz 	return rule;
11576d2a3ed0SOr Gerlitz }
11586d2a3ed0SOr Gerlitz 
11596d2a3ed0SOr Gerlitz static void
11606d2a3ed0SOr Gerlitz mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
11616d2a3ed0SOr Gerlitz 			     struct mlx5e_tc_flow *flow,
11626d2a3ed0SOr Gerlitz 			     struct mlx5_esw_flow_attr *attr)
11636d2a3ed0SOr Gerlitz {
1164226f2ca3SVlad Buslov 	flow_flag_clear(flow, OFFLOADED);
11656d2a3ed0SOr Gerlitz 
11664c3844d9SPaul Blakey 	if (flow_flag_test(flow, CT)) {
11674c3844d9SPaul Blakey 		mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
11684c3844d9SPaul Blakey 		return;
11694c3844d9SPaul Blakey 	}
11704c3844d9SPaul Blakey 
1171e85e02baSEli Britstein 	if (attr->split_count)
11726d2a3ed0SOr Gerlitz 		mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
11736d2a3ed0SOr Gerlitz 
11746d2a3ed0SOr Gerlitz 	mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
11756d2a3ed0SOr Gerlitz }
11766d2a3ed0SOr Gerlitz 
11775dbe906fSPaul Blakey static struct mlx5_flow_handle *
11785dbe906fSPaul Blakey mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
11795dbe906fSPaul Blakey 			      struct mlx5e_tc_flow *flow,
1180178f69b4SEli Cohen 			      struct mlx5_flow_spec *spec)
11815dbe906fSPaul Blakey {
1182178f69b4SEli Cohen 	struct mlx5_esw_flow_attr slow_attr;
11835dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
11845dbe906fSPaul Blakey 
1185178f69b4SEli Cohen 	memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1186178f69b4SEli Cohen 	slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1187178f69b4SEli Cohen 	slow_attr.split_count = 0;
1188178f69b4SEli Cohen 	slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
11895dbe906fSPaul Blakey 
1190178f69b4SEli Cohen 	rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
11915dbe906fSPaul Blakey 	if (!IS_ERR(rule))
1192226f2ca3SVlad Buslov 		flow_flag_set(flow, SLOW);
11935dbe906fSPaul Blakey 
11945dbe906fSPaul Blakey 	return rule;
11955dbe906fSPaul Blakey }
11965dbe906fSPaul Blakey 
11975dbe906fSPaul Blakey static void
11985dbe906fSPaul Blakey mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1199178f69b4SEli Cohen 				  struct mlx5e_tc_flow *flow)
12005dbe906fSPaul Blakey {
1201178f69b4SEli Cohen 	struct mlx5_esw_flow_attr slow_attr;
1202178f69b4SEli Cohen 
1203178f69b4SEli Cohen 	memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1204178f69b4SEli Cohen 	slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1205178f69b4SEli Cohen 	slow_attr.split_count = 0;
1206178f69b4SEli Cohen 	slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1207178f69b4SEli Cohen 	mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
1208226f2ca3SVlad Buslov 	flow_flag_clear(flow, SLOW);
12095dbe906fSPaul Blakey }
12105dbe906fSPaul Blakey 
1211ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1212ad86755bSVlad Buslov  * function.
1213ad86755bSVlad Buslov  */
1214ad86755bSVlad Buslov static void unready_flow_add(struct mlx5e_tc_flow *flow,
1215ad86755bSVlad Buslov 			     struct list_head *unready_flows)
1216ad86755bSVlad Buslov {
1217ad86755bSVlad Buslov 	flow_flag_set(flow, NOT_READY);
1218ad86755bSVlad Buslov 	list_add_tail(&flow->unready, unready_flows);
1219ad86755bSVlad Buslov }
1220ad86755bSVlad Buslov 
1221ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1222ad86755bSVlad Buslov  * function.
1223ad86755bSVlad Buslov  */
1224ad86755bSVlad Buslov static void unready_flow_del(struct mlx5e_tc_flow *flow)
1225ad86755bSVlad Buslov {
1226ad86755bSVlad Buslov 	list_del(&flow->unready);
1227ad86755bSVlad Buslov 	flow_flag_clear(flow, NOT_READY);
1228ad86755bSVlad Buslov }
1229ad86755bSVlad Buslov 
1230b4a23329SRoi Dayan static void add_unready_flow(struct mlx5e_tc_flow *flow)
1231b4a23329SRoi Dayan {
1232b4a23329SRoi Dayan 	struct mlx5_rep_uplink_priv *uplink_priv;
1233b4a23329SRoi Dayan 	struct mlx5e_rep_priv *rpriv;
1234b4a23329SRoi Dayan 	struct mlx5_eswitch *esw;
1235b4a23329SRoi Dayan 
1236b4a23329SRoi Dayan 	esw = flow->priv->mdev->priv.eswitch;
1237b4a23329SRoi Dayan 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1238b4a23329SRoi Dayan 	uplink_priv = &rpriv->uplink_priv;
1239b4a23329SRoi Dayan 
1240ad86755bSVlad Buslov 	mutex_lock(&uplink_priv->unready_flows_lock);
1241ad86755bSVlad Buslov 	unready_flow_add(flow, &uplink_priv->unready_flows);
1242ad86755bSVlad Buslov 	mutex_unlock(&uplink_priv->unready_flows_lock);
1243b4a23329SRoi Dayan }
1244b4a23329SRoi Dayan 
1245b4a23329SRoi Dayan static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1246b4a23329SRoi Dayan {
1247ad86755bSVlad Buslov 	struct mlx5_rep_uplink_priv *uplink_priv;
1248ad86755bSVlad Buslov 	struct mlx5e_rep_priv *rpriv;
1249ad86755bSVlad Buslov 	struct mlx5_eswitch *esw;
1250ad86755bSVlad Buslov 
1251ad86755bSVlad Buslov 	esw = flow->priv->mdev->priv.eswitch;
1252ad86755bSVlad Buslov 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1253ad86755bSVlad Buslov 	uplink_priv = &rpriv->uplink_priv;
1254ad86755bSVlad Buslov 
1255ad86755bSVlad Buslov 	mutex_lock(&uplink_priv->unready_flows_lock);
1256ad86755bSVlad Buslov 	unready_flow_del(flow);
1257ad86755bSVlad Buslov 	mutex_unlock(&uplink_priv->unready_flows_lock);
1258b4a23329SRoi Dayan }
1259b4a23329SRoi Dayan 
1260c83954abSRabie Loulou static int
126174491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1262e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
1263e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
1264adb4c123SOr Gerlitz {
1265adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1266aa0cbbaeSOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
12677040632dSTonghao Zhang 	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
12683c37745eSOr Gerlitz 	struct net_device *out_dev, *encap_dev = NULL;
1269b8aee822SMark Bloch 	struct mlx5_fc *counter = NULL;
12703c37745eSOr Gerlitz 	struct mlx5e_rep_priv *rpriv;
12713c37745eSOr Gerlitz 	struct mlx5e_priv *out_priv;
12720ad060eeSRoi Dayan 	bool encap_valid = true;
127339ac237cSPaul Blakey 	u32 max_prio, max_chain;
12740ad060eeSRoi Dayan 	int err = 0;
1275f493f155SEli Britstein 	int out_index;
12768b32580dSOr Gerlitz 
127739ac237cSPaul Blakey 	if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
127861644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
127961644c3dSRoi Dayan 				   "E-switch priorities unsupported, upgrade FW");
1280d14f6f2aSOr Gerlitz 		return -EOPNOTSUPP;
1281d14f6f2aSOr Gerlitz 	}
1282e52c2802SPaul Blakey 
128384179981SPaul Blakey 	/* We check chain range only for tc flows.
128484179981SPaul Blakey 	 * For ft flows, we checked attr->chain was originally 0 and set it to
128584179981SPaul Blakey 	 * FDB_FT_CHAIN which is outside tc range.
128684179981SPaul Blakey 	 * See mlx5e_rep_setup_ft_cb().
128784179981SPaul Blakey 	 */
128839ac237cSPaul Blakey 	max_chain = mlx5_esw_chains_get_chain_range(esw);
128984179981SPaul Blakey 	if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
129061644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
129161644c3dSRoi Dayan 				   "Requested chain is out of supported range");
12925a7e5bcbSVlad Buslov 		return -EOPNOTSUPP;
1293bf07aa73SPaul Blakey 	}
1294bf07aa73SPaul Blakey 
129539ac237cSPaul Blakey 	max_prio = mlx5_esw_chains_get_prio_range(esw);
1296bf07aa73SPaul Blakey 	if (attr->prio > max_prio) {
129761644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
129861644c3dSRoi Dayan 				   "Requested priority is out of supported range");
12995a7e5bcbSVlad Buslov 		return -EOPNOTSUPP;
1300bf07aa73SPaul Blakey 	}
1301bf07aa73SPaul Blakey 
1302f493f155SEli Britstein 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
13038c4dc42bSEli Britstein 		int mirred_ifindex;
13048c4dc42bSEli Britstein 
1305f493f155SEli Britstein 		if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1306f493f155SEli Britstein 			continue;
1307f493f155SEli Britstein 
13087040632dSTonghao Zhang 		mirred_ifindex = parse_attr->mirred_ifindex[out_index];
13093c37745eSOr Gerlitz 		out_dev = __dev_get_by_index(dev_net(priv->netdev),
13108c4dc42bSEli Britstein 					     mirred_ifindex);
1311733d4f36SRoi Dayan 		err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
13120ad060eeSRoi Dayan 					 extack, &encap_dev, &encap_valid);
13130ad060eeSRoi Dayan 		if (err)
13145a7e5bcbSVlad Buslov 			return err;
13150ad060eeSRoi Dayan 
13163c37745eSOr Gerlitz 		out_priv = netdev_priv(encap_dev);
13173c37745eSOr Gerlitz 		rpriv = out_priv->ppriv;
13181cc26d74SEli Britstein 		attr->dests[out_index].rep = rpriv->rep;
13191cc26d74SEli Britstein 		attr->dests[out_index].mdev = out_priv->mdev;
13203c37745eSOr Gerlitz 	}
13213c37745eSOr Gerlitz 
13228b32580dSOr Gerlitz 	err = mlx5_eswitch_add_vlan_action(esw, attr);
1323c83954abSRabie Loulou 	if (err)
13245a7e5bcbSVlad Buslov 		return err;
1325adb4c123SOr Gerlitz 
1326d5a3c2b6SRoi Dayan 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1327d5a3c2b6SRoi Dayan 	    !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
13281a9527bbSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
13296ae4a6a5SPaul Blakey 		dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1330c83954abSRabie Loulou 		if (err)
13315a7e5bcbSVlad Buslov 			return err;
1332d7e75a32SOr Gerlitz 	}
1333d7e75a32SOr Gerlitz 
1334b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1335f9392795SShahar Klein 		counter = mlx5_fc_create(attr->counter_dev, true);
13365a7e5bcbSVlad Buslov 		if (IS_ERR(counter))
13375a7e5bcbSVlad Buslov 			return PTR_ERR(counter);
1338b8aee822SMark Bloch 
1339b8aee822SMark Bloch 		attr->counter = counter;
1340b8aee822SMark Bloch 	}
1341b8aee822SMark Bloch 
13420ad060eeSRoi Dayan 	/* we get here if one of the following takes place:
13430ad060eeSRoi Dayan 	 * (1) there's no error
13440ad060eeSRoi Dayan 	 * (2) there's an encap action and we don't have valid neigh
13453c37745eSOr Gerlitz 	 */
1346bc1d75faSRoi Dayan 	if (!encap_valid)
1347178f69b4SEli Cohen 		flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1348bc1d75faSRoi Dayan 	else
13496d2a3ed0SOr Gerlitz 		flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
13505dbe906fSPaul Blakey 
13515a7e5bcbSVlad Buslov 	if (IS_ERR(flow->rule[0]))
13525a7e5bcbSVlad Buslov 		return PTR_ERR(flow->rule[0]);
1353226f2ca3SVlad Buslov 	else
1354226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1355c83954abSRabie Loulou 
13565dbe906fSPaul Blakey 	return 0;
1357aa0cbbaeSOr Gerlitz }
1358d85cdccbSOr Gerlitz 
13599272e3dfSYevgeny Kliteynik static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
13609272e3dfSYevgeny Kliteynik {
13619272e3dfSYevgeny Kliteynik 	struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
13629272e3dfSYevgeny Kliteynik 	void *headers_v = MLX5_ADDR_OF(fte_match_param,
13639272e3dfSYevgeny Kliteynik 				       spec->match_value,
13649272e3dfSYevgeny Kliteynik 				       misc_parameters_3);
13659272e3dfSYevgeny Kliteynik 	u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
13669272e3dfSYevgeny Kliteynik 					     headers_v,
13679272e3dfSYevgeny Kliteynik 					     geneve_tlv_option_0_data);
13689272e3dfSYevgeny Kliteynik 
13699272e3dfSYevgeny Kliteynik 	return !!geneve_tlv_opt_0_data;
13709272e3dfSYevgeny Kliteynik }
13719272e3dfSYevgeny Kliteynik 
1372d85cdccbSOr Gerlitz static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1373d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
1374d85cdccbSOr Gerlitz {
1375d85cdccbSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1376d7e75a32SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1377f493f155SEli Britstein 	int out_index;
1378d85cdccbSOr Gerlitz 
13790a7fcb78SPaul Blakey 	mlx5e_put_flow_tunnel_id(flow);
13800a7fcb78SPaul Blakey 
1381226f2ca3SVlad Buslov 	if (flow_flag_test(flow, NOT_READY)) {
1382b4a23329SRoi Dayan 		remove_unready_flow(flow);
1383ef06c9eeSRoi Dayan 		kvfree(attr->parse_attr);
1384ef06c9eeSRoi Dayan 		return;
1385ef06c9eeSRoi Dayan 	}
1386ef06c9eeSRoi Dayan 
1387226f2ca3SVlad Buslov 	if (mlx5e_is_offloaded_flow(flow)) {
1388226f2ca3SVlad Buslov 		if (flow_flag_test(flow, SLOW))
1389178f69b4SEli Cohen 			mlx5e_tc_unoffload_from_slow_path(esw, flow);
13905dbe906fSPaul Blakey 		else
13915dbe906fSPaul Blakey 			mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
13925dbe906fSPaul Blakey 	}
1393d85cdccbSOr Gerlitz 
13949272e3dfSYevgeny Kliteynik 	if (mlx5_flow_has_geneve_opt(flow))
13959272e3dfSYevgeny Kliteynik 		mlx5_geneve_tlv_option_del(priv->mdev->geneve);
13969272e3dfSYevgeny Kliteynik 
1397513f8f7fSOr Gerlitz 	mlx5_eswitch_del_vlan_action(esw, attr);
1398d85cdccbSOr Gerlitz 
1399f493f155SEli Britstein 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
14002a4b6526SVlad Buslov 		if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
14018c4dc42bSEli Britstein 			mlx5e_detach_encap(priv, flow, out_index);
14022a4b6526SVlad Buslov 			kfree(attr->parse_attr->tun_info[out_index]);
14032a4b6526SVlad Buslov 		}
1404f493f155SEli Britstein 	kvfree(attr->parse_attr);
1405d7e75a32SOr Gerlitz 
1406513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
14071a9527bbSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
1408b8aee822SMark Bloch 
1409b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1410f9392795SShahar Klein 		mlx5_fc_destroy(attr->counter_dev, attr->counter);
1411d85cdccbSOr Gerlitz }
1412d85cdccbSOr Gerlitz 
1413232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
14142a1f1768SVlad Buslov 			      struct mlx5e_encap_entry *e,
14152a1f1768SVlad Buslov 			      struct list_head *flow_list)
1416232c0013SHadar Hen Zion {
14173c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1418178f69b4SEli Cohen 	struct mlx5_esw_flow_attr *esw_attr;
14196d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
14206d2a3ed0SOr Gerlitz 	struct mlx5_flow_spec *spec;
1421232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1422232c0013SHadar Hen Zion 	int err;
1423232c0013SHadar Hen Zion 
14242b688ea5SMaor Gottlieb 	e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
142554c177caSOz Shlomo 						     e->reformat_type,
1426232c0013SHadar Hen Zion 						     e->encap_size, e->encap_header,
14272b688ea5SMaor Gottlieb 						     MLX5_FLOW_NAMESPACE_FDB);
14282b688ea5SMaor Gottlieb 	if (IS_ERR(e->pkt_reformat)) {
14292b688ea5SMaor Gottlieb 		mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
14302b688ea5SMaor Gottlieb 			       PTR_ERR(e->pkt_reformat));
1431232c0013SHadar Hen Zion 		return;
1432232c0013SHadar Hen Zion 	}
1433232c0013SHadar Hen Zion 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
1434f6dfb4c3SHadar Hen Zion 	mlx5e_rep_queue_neigh_stats_work(priv);
1435232c0013SHadar Hen Zion 
14362a1f1768SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
14378c4dc42bSEli Britstein 		bool all_flow_encaps_valid = true;
14388c4dc42bSEli Britstein 		int i;
14398c4dc42bSEli Britstein 
144095435ad7SVlad Buslov 		if (!mlx5e_is_offloaded_flow(flow))
144195435ad7SVlad Buslov 			continue;
14423c37745eSOr Gerlitz 		esw_attr = flow->esw_attr;
14436d2a3ed0SOr Gerlitz 		spec = &esw_attr->parse_attr->spec;
14446d2a3ed0SOr Gerlitz 
14452b688ea5SMaor Gottlieb 		esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
14462a1f1768SVlad Buslov 		esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
14478c4dc42bSEli Britstein 		/* Flow can be associated with multiple encap entries.
14488c4dc42bSEli Britstein 		 * Before offloading the flow verify that all of them have
14498c4dc42bSEli Britstein 		 * a valid neighbour.
14508c4dc42bSEli Britstein 		 */
14518c4dc42bSEli Britstein 		for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
14528c4dc42bSEli Britstein 			if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
14538c4dc42bSEli Britstein 				continue;
14548c4dc42bSEli Britstein 			if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
14558c4dc42bSEli Britstein 				all_flow_encaps_valid = false;
14568c4dc42bSEli Britstein 				break;
14578c4dc42bSEli Britstein 			}
14588c4dc42bSEli Britstein 		}
14598c4dc42bSEli Britstein 		/* Do not offload flows with unresolved neighbors */
14608c4dc42bSEli Britstein 		if (!all_flow_encaps_valid)
14612a1f1768SVlad Buslov 			continue;
14625dbe906fSPaul Blakey 		/* update from slow path rule to encap rule */
14636d2a3ed0SOr Gerlitz 		rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
14646d2a3ed0SOr Gerlitz 		if (IS_ERR(rule)) {
14656d2a3ed0SOr Gerlitz 			err = PTR_ERR(rule);
1466232c0013SHadar Hen Zion 			mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1467232c0013SHadar Hen Zion 				       err);
14682a1f1768SVlad Buslov 			continue;
1469232c0013SHadar Hen Zion 		}
14705dbe906fSPaul Blakey 
1471178f69b4SEli Cohen 		mlx5e_tc_unoffload_from_slow_path(esw, flow);
14726d2a3ed0SOr Gerlitz 		flow->rule[0] = rule;
1473226f2ca3SVlad Buslov 		/* was unset when slow path rule removed */
1474226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1475232c0013SHadar Hen Zion 	}
1476232c0013SHadar Hen Zion }
1477232c0013SHadar Hen Zion 
1478232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
14792a1f1768SVlad Buslov 			      struct mlx5e_encap_entry *e,
14802a1f1768SVlad Buslov 			      struct list_head *flow_list)
1481232c0013SHadar Hen Zion {
14823c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
14835dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
14845dbe906fSPaul Blakey 	struct mlx5_flow_spec *spec;
1485232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
14865dbe906fSPaul Blakey 	int err;
1487232c0013SHadar Hen Zion 
14882a1f1768SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
148995435ad7SVlad Buslov 		if (!mlx5e_is_offloaded_flow(flow))
149095435ad7SVlad Buslov 			continue;
14915dbe906fSPaul Blakey 		spec = &flow->esw_attr->parse_attr->spec;
14925dbe906fSPaul Blakey 
14935dbe906fSPaul Blakey 		/* update from encap rule to slow path rule */
1494178f69b4SEli Cohen 		rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
14958c4dc42bSEli Britstein 		/* mark the flow's encap dest as non-valid */
14962a1f1768SVlad Buslov 		flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
14975dbe906fSPaul Blakey 
14985dbe906fSPaul Blakey 		if (IS_ERR(rule)) {
14995dbe906fSPaul Blakey 			err = PTR_ERR(rule);
15005dbe906fSPaul Blakey 			mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
15015dbe906fSPaul Blakey 				       err);
15022a1f1768SVlad Buslov 			continue;
15035dbe906fSPaul Blakey 		}
15045dbe906fSPaul Blakey 
15056d2a3ed0SOr Gerlitz 		mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
15065dbe906fSPaul Blakey 		flow->rule[0] = rule;
1507226f2ca3SVlad Buslov 		/* was unset when fast path rule removed */
1508226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1509232c0013SHadar Hen Zion 	}
1510232c0013SHadar Hen Zion 
151161c806daSOr Gerlitz 	/* we know that the encap is valid */
1512232c0013SHadar Hen Zion 	e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
15132b688ea5SMaor Gottlieb 	mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1514232c0013SHadar Hen Zion }
1515232c0013SHadar Hen Zion 
1516b8aee822SMark Bloch static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1517b8aee822SMark Bloch {
1518226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
1519b8aee822SMark Bloch 		return flow->esw_attr->counter;
1520b8aee822SMark Bloch 	else
1521b8aee822SMark Bloch 		return flow->nic_attr->counter;
1522b8aee822SMark Bloch }
1523b8aee822SMark Bloch 
15242a1f1768SVlad Buslov /* Takes reference to all flows attached to encap and adds the flows to
15252a1f1768SVlad Buslov  * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
15262a1f1768SVlad Buslov  */
15272a1f1768SVlad Buslov void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
15282a1f1768SVlad Buslov {
15292a1f1768SVlad Buslov 	struct encap_flow_item *efi;
15302a1f1768SVlad Buslov 	struct mlx5e_tc_flow *flow;
15312a1f1768SVlad Buslov 
15322a1f1768SVlad Buslov 	list_for_each_entry(efi, &e->flows, list) {
15332a1f1768SVlad Buslov 		flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
15342a1f1768SVlad Buslov 		if (IS_ERR(mlx5e_flow_get(flow)))
15352a1f1768SVlad Buslov 			continue;
153695435ad7SVlad Buslov 		wait_for_completion(&flow->init_done);
15372a1f1768SVlad Buslov 
15382a1f1768SVlad Buslov 		flow->tmp_efi_index = efi->index;
15392a1f1768SVlad Buslov 		list_add(&flow->tmp_list, flow_list);
15402a1f1768SVlad Buslov 	}
15412a1f1768SVlad Buslov }
15422a1f1768SVlad Buslov 
15436a06c2f7SVlad Buslov /* Iterate over tmp_list of flows attached to flow_list head. */
15442a1f1768SVlad Buslov void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
15456a06c2f7SVlad Buslov {
15466a06c2f7SVlad Buslov 	struct mlx5e_tc_flow *flow, *tmp;
15476a06c2f7SVlad Buslov 
15486a06c2f7SVlad Buslov 	list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
15496a06c2f7SVlad Buslov 		mlx5e_flow_put(priv, flow);
15506a06c2f7SVlad Buslov }
15516a06c2f7SVlad Buslov 
1552ac0d9176SVlad Buslov static struct mlx5e_encap_entry *
1553ac0d9176SVlad Buslov mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1554ac0d9176SVlad Buslov 			   struct mlx5e_encap_entry *e)
1555ac0d9176SVlad Buslov {
1556ac0d9176SVlad Buslov 	struct mlx5e_encap_entry *next = NULL;
1557ac0d9176SVlad Buslov 
1558ac0d9176SVlad Buslov retry:
1559ac0d9176SVlad Buslov 	rcu_read_lock();
1560ac0d9176SVlad Buslov 
1561ac0d9176SVlad Buslov 	/* find encap with non-zero reference counter value */
1562ac0d9176SVlad Buslov 	for (next = e ?
1563ac0d9176SVlad Buslov 		     list_next_or_null_rcu(&nhe->encap_list,
1564ac0d9176SVlad Buslov 					   &e->encap_list,
1565ac0d9176SVlad Buslov 					   struct mlx5e_encap_entry,
1566ac0d9176SVlad Buslov 					   encap_list) :
1567ac0d9176SVlad Buslov 		     list_first_or_null_rcu(&nhe->encap_list,
1568ac0d9176SVlad Buslov 					    struct mlx5e_encap_entry,
1569ac0d9176SVlad Buslov 					    encap_list);
1570ac0d9176SVlad Buslov 	     next;
1571ac0d9176SVlad Buslov 	     next = list_next_or_null_rcu(&nhe->encap_list,
1572ac0d9176SVlad Buslov 					  &next->encap_list,
1573ac0d9176SVlad Buslov 					  struct mlx5e_encap_entry,
1574ac0d9176SVlad Buslov 					  encap_list))
1575ac0d9176SVlad Buslov 		if (mlx5e_encap_take(next))
1576ac0d9176SVlad Buslov 			break;
1577ac0d9176SVlad Buslov 
1578ac0d9176SVlad Buslov 	rcu_read_unlock();
1579ac0d9176SVlad Buslov 
1580ac0d9176SVlad Buslov 	/* release starting encap */
1581ac0d9176SVlad Buslov 	if (e)
1582ac0d9176SVlad Buslov 		mlx5e_encap_put(netdev_priv(e->out_dev), e);
1583ac0d9176SVlad Buslov 	if (!next)
1584ac0d9176SVlad Buslov 		return next;
1585ac0d9176SVlad Buslov 
1586ac0d9176SVlad Buslov 	/* wait for encap to be fully initialized */
1587ac0d9176SVlad Buslov 	wait_for_completion(&next->res_ready);
1588ac0d9176SVlad Buslov 	/* continue searching if encap entry is not in valid state after completion */
1589ac0d9176SVlad Buslov 	if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1590ac0d9176SVlad Buslov 		e = next;
1591ac0d9176SVlad Buslov 		goto retry;
1592ac0d9176SVlad Buslov 	}
1593ac0d9176SVlad Buslov 
1594ac0d9176SVlad Buslov 	return next;
1595ac0d9176SVlad Buslov }
1596ac0d9176SVlad Buslov 
1597f6dfb4c3SHadar Hen Zion void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1598f6dfb4c3SHadar Hen Zion {
1599f6dfb4c3SHadar Hen Zion 	struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1600ac0d9176SVlad Buslov 	struct mlx5e_encap_entry *e = NULL;
1601f6dfb4c3SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1602f6dfb4c3SHadar Hen Zion 	struct mlx5_fc *counter;
1603f6dfb4c3SHadar Hen Zion 	struct neigh_table *tbl;
1604f6dfb4c3SHadar Hen Zion 	bool neigh_used = false;
1605f6dfb4c3SHadar Hen Zion 	struct neighbour *n;
160690bb7692SAriel Levkovich 	u64 lastuse;
1607f6dfb4c3SHadar Hen Zion 
1608f6dfb4c3SHadar Hen Zion 	if (m_neigh->family == AF_INET)
1609f6dfb4c3SHadar Hen Zion 		tbl = &arp_tbl;
1610f6dfb4c3SHadar Hen Zion #if IS_ENABLED(CONFIG_IPV6)
1611f6dfb4c3SHadar Hen Zion 	else if (m_neigh->family == AF_INET6)
16125cc3a8c6SSaeed Mahameed 		tbl = ipv6_stub->nd_tbl;
1613f6dfb4c3SHadar Hen Zion #endif
1614f6dfb4c3SHadar Hen Zion 	else
1615f6dfb4c3SHadar Hen Zion 		return;
1616f6dfb4c3SHadar Hen Zion 
1617ac0d9176SVlad Buslov 	/* mlx5e_get_next_valid_encap() releases previous encap before returning
1618ac0d9176SVlad Buslov 	 * next one.
1619ac0d9176SVlad Buslov 	 */
1620ac0d9176SVlad Buslov 	while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
16216a06c2f7SVlad Buslov 		struct mlx5e_priv *priv = netdev_priv(e->out_dev);
16225a7e5bcbSVlad Buslov 		struct encap_flow_item *efi, *tmp;
16236a06c2f7SVlad Buslov 		struct mlx5_eswitch *esw;
16246a06c2f7SVlad Buslov 		LIST_HEAD(flow_list);
1625948993f2SVlad Buslov 
16266a06c2f7SVlad Buslov 		esw = priv->mdev->priv.eswitch;
16276a06c2f7SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
16285a7e5bcbSVlad Buslov 		list_for_each_entry_safe(efi, tmp, &e->flows, list) {
162979baaec7SEli Britstein 			flow = container_of(efi, struct mlx5e_tc_flow,
163079baaec7SEli Britstein 					    encaps[efi->index]);
16315a7e5bcbSVlad Buslov 			if (IS_ERR(mlx5e_flow_get(flow)))
16325a7e5bcbSVlad Buslov 				continue;
16336a06c2f7SVlad Buslov 			list_add(&flow->tmp_list, &flow_list);
16345a7e5bcbSVlad Buslov 
1635226f2ca3SVlad Buslov 			if (mlx5e_is_offloaded_flow(flow)) {
1636b8aee822SMark Bloch 				counter = mlx5e_tc_get_counter(flow);
163790bb7692SAriel Levkovich 				lastuse = mlx5_fc_query_lastuse(counter);
1638f6dfb4c3SHadar Hen Zion 				if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1639f6dfb4c3SHadar Hen Zion 					neigh_used = true;
1640f6dfb4c3SHadar Hen Zion 					break;
1641f6dfb4c3SHadar Hen Zion 				}
1642f6dfb4c3SHadar Hen Zion 			}
1643f6dfb4c3SHadar Hen Zion 		}
16446a06c2f7SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
1645948993f2SVlad Buslov 
16466a06c2f7SVlad Buslov 		mlx5e_put_encap_flow_list(priv, &flow_list);
1647ac0d9176SVlad Buslov 		if (neigh_used) {
1648ac0d9176SVlad Buslov 			/* release current encap before breaking the loop */
16496a06c2f7SVlad Buslov 			mlx5e_encap_put(priv, e);
1650e36d4810SRoi Dayan 			break;
1651f6dfb4c3SHadar Hen Zion 		}
1652ac0d9176SVlad Buslov 	}
1653f6dfb4c3SHadar Hen Zion 
1654c786fe59SVlad Buslov 	trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
1655c786fe59SVlad Buslov 
1656f6dfb4c3SHadar Hen Zion 	if (neigh_used) {
1657f6dfb4c3SHadar Hen Zion 		nhe->reported_lastuse = jiffies;
1658f6dfb4c3SHadar Hen Zion 
1659f6dfb4c3SHadar Hen Zion 		/* find the relevant neigh according to the cached device and
1660f6dfb4c3SHadar Hen Zion 		 * dst ip pair
1661f6dfb4c3SHadar Hen Zion 		 */
1662f6dfb4c3SHadar Hen Zion 		n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1663c7f7ba8dSRoi Dayan 		if (!n)
1664f6dfb4c3SHadar Hen Zion 			return;
1665f6dfb4c3SHadar Hen Zion 
1666f6dfb4c3SHadar Hen Zion 		neigh_event_send(n, NULL);
1667f6dfb4c3SHadar Hen Zion 		neigh_release(n);
1668f6dfb4c3SHadar Hen Zion 	}
1669f6dfb4c3SHadar Hen Zion }
1670f6dfb4c3SHadar Hen Zion 
167161086f39SVlad Buslov static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1672d85cdccbSOr Gerlitz {
1673948993f2SVlad Buslov 	WARN_ON(!list_empty(&e->flows));
16743c140dd5SVlad Buslov 
16753c140dd5SVlad Buslov 	if (e->compl_result > 0) {
1676232c0013SHadar Hen Zion 		mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1677232c0013SHadar Hen Zion 
1678232c0013SHadar Hen Zion 		if (e->flags & MLX5_ENCAP_ENTRY_VALID)
16792b688ea5SMaor Gottlieb 			mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
16803c140dd5SVlad Buslov 	}
1681232c0013SHadar Hen Zion 
16822a4b6526SVlad Buslov 	kfree(e->tun_info);
1683232c0013SHadar Hen Zion 	kfree(e->encap_header);
1684ac0d9176SVlad Buslov 	kfree_rcu(e, rcu);
16855067b602SRoi Dayan }
1686948993f2SVlad Buslov 
168761086f39SVlad Buslov void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
168861086f39SVlad Buslov {
168961086f39SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
169061086f39SVlad Buslov 
169161086f39SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
169261086f39SVlad Buslov 		return;
169361086f39SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
169461086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
169561086f39SVlad Buslov 
169661086f39SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
169761086f39SVlad Buslov }
169861086f39SVlad Buslov 
1699948993f2SVlad Buslov static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1700948993f2SVlad Buslov 			       struct mlx5e_tc_flow *flow, int out_index)
1701948993f2SVlad Buslov {
170261086f39SVlad Buslov 	struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
170361086f39SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
170461086f39SVlad Buslov 
1705948993f2SVlad Buslov 	/* flow wasn't fully initialized */
170661086f39SVlad Buslov 	if (!e)
1707948993f2SVlad Buslov 		return;
1708948993f2SVlad Buslov 
170961086f39SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
1710948993f2SVlad Buslov 	list_del(&flow->encaps[out_index].list);
1711948993f2SVlad Buslov 	flow->encaps[out_index].e = NULL;
171261086f39SVlad Buslov 	if (!refcount_dec_and_test(&e->refcnt)) {
171361086f39SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
171461086f39SVlad Buslov 		return;
171561086f39SVlad Buslov 	}
171661086f39SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
171761086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
171861086f39SVlad Buslov 
171961086f39SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
17205067b602SRoi Dayan }
17215067b602SRoi Dayan 
172204de7ddaSRoi Dayan static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
172304de7ddaSRoi Dayan {
172404de7ddaSRoi Dayan 	struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
172504de7ddaSRoi Dayan 
1726226f2ca3SVlad Buslov 	if (!flow_flag_test(flow, ESWITCH) ||
1727226f2ca3SVlad Buslov 	    !flow_flag_test(flow, DUP))
172804de7ddaSRoi Dayan 		return;
172904de7ddaSRoi Dayan 
173004de7ddaSRoi Dayan 	mutex_lock(&esw->offloads.peer_mutex);
173104de7ddaSRoi Dayan 	list_del(&flow->peer);
173204de7ddaSRoi Dayan 	mutex_unlock(&esw->offloads.peer_mutex);
173304de7ddaSRoi Dayan 
1734226f2ca3SVlad Buslov 	flow_flag_clear(flow, DUP);
173504de7ddaSRoi Dayan 
1736eb252c3aSRoi Dayan 	if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
173704de7ddaSRoi Dayan 		mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1738a23dae79SRoi Dayan 		kfree(flow->peer_flow);
1739eb252c3aSRoi Dayan 	}
1740eb252c3aSRoi Dayan 
174104de7ddaSRoi Dayan 	flow->peer_flow = NULL;
174204de7ddaSRoi Dayan }
174304de7ddaSRoi Dayan 
174404de7ddaSRoi Dayan static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
174504de7ddaSRoi Dayan {
174604de7ddaSRoi Dayan 	struct mlx5_core_dev *dev = flow->priv->mdev;
174704de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = dev->priv.devcom;
174804de7ddaSRoi Dayan 	struct mlx5_eswitch *peer_esw;
174904de7ddaSRoi Dayan 
175004de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
175104de7ddaSRoi Dayan 	if (!peer_esw)
175204de7ddaSRoi Dayan 		return;
175304de7ddaSRoi Dayan 
175404de7ddaSRoi Dayan 	__mlx5e_tc_del_fdb_peer_flow(flow);
175504de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
175604de7ddaSRoi Dayan }
175704de7ddaSRoi Dayan 
1758e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1759961e8979SRoi Dayan 			      struct mlx5e_tc_flow *flow)
1760e8f887acSAmir Vadai {
1761226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow)) {
176204de7ddaSRoi Dayan 		mlx5e_tc_del_fdb_peer_flow(flow);
1763d85cdccbSOr Gerlitz 		mlx5e_tc_del_fdb_flow(priv, flow);
176404de7ddaSRoi Dayan 	} else {
1765d85cdccbSOr Gerlitz 		mlx5e_tc_del_nic_flow(priv, flow);
1766e8f887acSAmir Vadai 	}
176704de7ddaSRoi Dayan }
1768e8f887acSAmir Vadai 
17690a7fcb78SPaul Blakey static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1770bbd00f7eSHadar Hen Zion {
1771f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
17720a7fcb78SPaul Blakey 	struct flow_action *flow_action = &rule->action;
17730a7fcb78SPaul Blakey 	const struct flow_action_entry *act;
17740a7fcb78SPaul Blakey 	int i;
1775bbd00f7eSHadar Hen Zion 
17760a7fcb78SPaul Blakey 	flow_action_for_each(i, act, flow_action) {
17770a7fcb78SPaul Blakey 		switch (act->id) {
17780a7fcb78SPaul Blakey 		case FLOW_ACTION_GOTO:
17790a7fcb78SPaul Blakey 			return true;
17800a7fcb78SPaul Blakey 		default:
17810a7fcb78SPaul Blakey 			continue;
1782fe1587a7SDmytro Linkin 		}
17832e72eb43SOr Gerlitz 	}
1784bbd00f7eSHadar Hen Zion 
17850a7fcb78SPaul Blakey 	return false;
17860a7fcb78SPaul Blakey }
1787bcef735cSOr Gerlitz 
17880a7fcb78SPaul Blakey static int
17890a7fcb78SPaul Blakey enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
17900a7fcb78SPaul Blakey 				    struct flow_dissector_key_enc_opts *opts,
17910a7fcb78SPaul Blakey 				    struct netlink_ext_ack *extack,
17920a7fcb78SPaul Blakey 				    bool *dont_care)
17930a7fcb78SPaul Blakey {
17940a7fcb78SPaul Blakey 	struct geneve_opt *opt;
17950a7fcb78SPaul Blakey 	int off = 0;
1796bcef735cSOr Gerlitz 
17970a7fcb78SPaul Blakey 	*dont_care = true;
1798bcef735cSOr Gerlitz 
17990a7fcb78SPaul Blakey 	while (opts->len > off) {
18000a7fcb78SPaul Blakey 		opt = (struct geneve_opt *)&opts->data[off];
1801e98bedf5SEli Britstein 
18020a7fcb78SPaul Blakey 		if (!(*dont_care) || opt->opt_class || opt->type ||
18030a7fcb78SPaul Blakey 		    memchr_inv(opt->opt_data, 0, opt->length * 4)) {
18040a7fcb78SPaul Blakey 			*dont_care = false;
18050a7fcb78SPaul Blakey 
18060a7fcb78SPaul Blakey 			if (opt->opt_class != U16_MAX ||
1807d7a42ad0SRoi Dayan 			    opt->type != U8_MAX) {
18080a7fcb78SPaul Blakey 				NL_SET_ERR_MSG(extack,
18090a7fcb78SPaul Blakey 					       "Partial match of tunnel options in chain > 0 isn't supported");
18100a7fcb78SPaul Blakey 				netdev_warn(priv->netdev,
18110a7fcb78SPaul Blakey 					    "Partial match of tunnel options in chain > 0 isn't supported");
1812e98bedf5SEli Britstein 				return -EOPNOTSUPP;
1813e98bedf5SEli Britstein 			}
1814bcef735cSOr Gerlitz 		}
1815bcef735cSOr Gerlitz 
18160a7fcb78SPaul Blakey 		off += sizeof(struct geneve_opt) + opt->length * 4;
1817bbd00f7eSHadar Hen Zion 	}
1818bbd00f7eSHadar Hen Zion 
1819bbd00f7eSHadar Hen Zion 	return 0;
1820bbd00f7eSHadar Hen Zion }
1821bbd00f7eSHadar Hen Zion 
18220a7fcb78SPaul Blakey #define COPY_DISSECTOR(rule, diss_key, dst)\
18230a7fcb78SPaul Blakey ({ \
18240a7fcb78SPaul Blakey 	struct flow_rule *__rule = (rule);\
18250a7fcb78SPaul Blakey 	typeof(dst) __dst = dst;\
18260a7fcb78SPaul Blakey \
18270a7fcb78SPaul Blakey 	memcpy(__dst,\
18280a7fcb78SPaul Blakey 	       skb_flow_dissector_target(__rule->match.dissector,\
18290a7fcb78SPaul Blakey 					 diss_key,\
18300a7fcb78SPaul Blakey 					 __rule->match.key),\
18310a7fcb78SPaul Blakey 	       sizeof(*__dst));\
18320a7fcb78SPaul Blakey })
18330a7fcb78SPaul Blakey 
18340a7fcb78SPaul Blakey static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
18350a7fcb78SPaul Blakey 				    struct mlx5e_tc_flow *flow,
18360a7fcb78SPaul Blakey 				    struct flow_cls_offload *f,
18370a7fcb78SPaul Blakey 				    struct net_device *filter_dev)
18388377629eSEli Britstein {
18390a7fcb78SPaul Blakey 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
18400a7fcb78SPaul Blakey 	struct netlink_ext_ack *extack = f->common.extack;
18410a7fcb78SPaul Blakey 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
18420a7fcb78SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
18430a7fcb78SPaul Blakey 	struct flow_match_enc_opts enc_opts_match;
1844d7a42ad0SRoi Dayan 	struct tunnel_match_enc_opts tun_enc_opts;
18450a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
18460a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *uplink_rpriv;
18470a7fcb78SPaul Blakey 	struct tunnel_match_key tunnel_key;
18480a7fcb78SPaul Blakey 	bool enc_opts_is_dont_care = true;
18490a7fcb78SPaul Blakey 	u32 tun_id, enc_opts_id = 0;
18500a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw;
18510a7fcb78SPaul Blakey 	u32 value, mask;
18520a7fcb78SPaul Blakey 	int err;
18530a7fcb78SPaul Blakey 
18540a7fcb78SPaul Blakey 	esw = priv->mdev->priv.eswitch;
18550a7fcb78SPaul Blakey 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
18560a7fcb78SPaul Blakey 	uplink_priv = &uplink_rpriv->uplink_priv;
18570a7fcb78SPaul Blakey 
18580a7fcb78SPaul Blakey 	memset(&tunnel_key, 0, sizeof(tunnel_key));
18590a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
18600a7fcb78SPaul Blakey 		       &tunnel_key.enc_control);
18610a7fcb78SPaul Blakey 	if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
18620a7fcb78SPaul Blakey 		COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
18630a7fcb78SPaul Blakey 			       &tunnel_key.enc_ipv4);
18640a7fcb78SPaul Blakey 	else
18650a7fcb78SPaul Blakey 		COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
18660a7fcb78SPaul Blakey 			       &tunnel_key.enc_ipv6);
18670a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
18680a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
18690a7fcb78SPaul Blakey 		       &tunnel_key.enc_tp);
18700a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
18710a7fcb78SPaul Blakey 		       &tunnel_key.enc_key_id);
18720a7fcb78SPaul Blakey 	tunnel_key.filter_ifindex = filter_dev->ifindex;
18730a7fcb78SPaul Blakey 
18740a7fcb78SPaul Blakey 	err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
18750a7fcb78SPaul Blakey 	if (err)
18760a7fcb78SPaul Blakey 		return err;
18770a7fcb78SPaul Blakey 
18780a7fcb78SPaul Blakey 	flow_rule_match_enc_opts(rule, &enc_opts_match);
18790a7fcb78SPaul Blakey 	err = enc_opts_is_dont_care_or_full_match(priv,
18800a7fcb78SPaul Blakey 						  enc_opts_match.mask,
18810a7fcb78SPaul Blakey 						  extack,
18820a7fcb78SPaul Blakey 						  &enc_opts_is_dont_care);
18830a7fcb78SPaul Blakey 	if (err)
18840a7fcb78SPaul Blakey 		goto err_enc_opts;
18850a7fcb78SPaul Blakey 
18860a7fcb78SPaul Blakey 	if (!enc_opts_is_dont_care) {
1887d7a42ad0SRoi Dayan 		memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
1888d7a42ad0SRoi Dayan 		memcpy(&tun_enc_opts.key, enc_opts_match.key,
1889d7a42ad0SRoi Dayan 		       sizeof(*enc_opts_match.key));
1890d7a42ad0SRoi Dayan 		memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
1891d7a42ad0SRoi Dayan 		       sizeof(*enc_opts_match.mask));
1892d7a42ad0SRoi Dayan 
18930a7fcb78SPaul Blakey 		err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
1894d7a42ad0SRoi Dayan 				  &tun_enc_opts, &enc_opts_id);
18950a7fcb78SPaul Blakey 		if (err)
18960a7fcb78SPaul Blakey 			goto err_enc_opts;
18970a7fcb78SPaul Blakey 	}
18980a7fcb78SPaul Blakey 
18990a7fcb78SPaul Blakey 	value = tun_id << ENC_OPTS_BITS | enc_opts_id;
19000a7fcb78SPaul Blakey 	mask = enc_opts_id ? TUNNEL_ID_MASK :
19010a7fcb78SPaul Blakey 			     (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
19020a7fcb78SPaul Blakey 
19030a7fcb78SPaul Blakey 	if (attr->chain) {
19040a7fcb78SPaul Blakey 		mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
19050a7fcb78SPaul Blakey 					    TUNNEL_TO_REG, value, mask);
19060a7fcb78SPaul Blakey 	} else {
19070a7fcb78SPaul Blakey 		mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
19080a7fcb78SPaul Blakey 		err = mlx5e_tc_match_to_reg_set(priv->mdev,
19090a7fcb78SPaul Blakey 						mod_hdr_acts,
19100a7fcb78SPaul Blakey 						TUNNEL_TO_REG, value);
19110a7fcb78SPaul Blakey 		if (err)
19120a7fcb78SPaul Blakey 			goto err_set;
19130a7fcb78SPaul Blakey 
19140a7fcb78SPaul Blakey 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
19150a7fcb78SPaul Blakey 	}
19160a7fcb78SPaul Blakey 
19170a7fcb78SPaul Blakey 	flow->tunnel_id = value;
19180a7fcb78SPaul Blakey 	return 0;
19190a7fcb78SPaul Blakey 
19200a7fcb78SPaul Blakey err_set:
19210a7fcb78SPaul Blakey 	if (enc_opts_id)
19220a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
19230a7fcb78SPaul Blakey 			       enc_opts_id);
19240a7fcb78SPaul Blakey err_enc_opts:
19250a7fcb78SPaul Blakey 	mapping_remove(uplink_priv->tunnel_mapping, tun_id);
19260a7fcb78SPaul Blakey 	return err;
19270a7fcb78SPaul Blakey }
19280a7fcb78SPaul Blakey 
19290a7fcb78SPaul Blakey static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
19300a7fcb78SPaul Blakey {
19310a7fcb78SPaul Blakey 	u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
19320a7fcb78SPaul Blakey 	u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
19330a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
19340a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *uplink_rpriv;
19350a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw;
19360a7fcb78SPaul Blakey 
19370a7fcb78SPaul Blakey 	esw = flow->priv->mdev->priv.eswitch;
19380a7fcb78SPaul Blakey 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
19390a7fcb78SPaul Blakey 	uplink_priv = &uplink_rpriv->uplink_priv;
19400a7fcb78SPaul Blakey 
19410a7fcb78SPaul Blakey 	if (tun_id)
19420a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_mapping, tun_id);
19430a7fcb78SPaul Blakey 	if (enc_opts_id)
19440a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
19450a7fcb78SPaul Blakey 			       enc_opts_id);
19460a7fcb78SPaul Blakey }
19470a7fcb78SPaul Blakey 
19484c3844d9SPaul Blakey u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
19494c3844d9SPaul Blakey {
19504c3844d9SPaul Blakey 	return flow->tunnel_id;
19514c3844d9SPaul Blakey }
19524c3844d9SPaul Blakey 
19530a7fcb78SPaul Blakey static int parse_tunnel_attr(struct mlx5e_priv *priv,
19540a7fcb78SPaul Blakey 			     struct mlx5e_tc_flow *flow,
19550a7fcb78SPaul Blakey 			     struct mlx5_flow_spec *spec,
19560a7fcb78SPaul Blakey 			     struct flow_cls_offload *f,
19570a7fcb78SPaul Blakey 			     struct net_device *filter_dev,
19580a7fcb78SPaul Blakey 			     u8 *match_level,
19590a7fcb78SPaul Blakey 			     bool *match_inner)
19600a7fcb78SPaul Blakey {
19610a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
19620a7fcb78SPaul Blakey 	struct netlink_ext_ack *extack = f->common.extack;
19630a7fcb78SPaul Blakey 	bool needs_mapping, sets_mapping;
19640a7fcb78SPaul Blakey 	int err;
19650a7fcb78SPaul Blakey 
19660a7fcb78SPaul Blakey 	if (!mlx5e_is_eswitch_flow(flow))
19670a7fcb78SPaul Blakey 		return -EOPNOTSUPP;
19680a7fcb78SPaul Blakey 
19690a7fcb78SPaul Blakey 	needs_mapping = !!flow->esw_attr->chain;
19700a7fcb78SPaul Blakey 	sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
19710a7fcb78SPaul Blakey 	*match_inner = !needs_mapping;
19720a7fcb78SPaul Blakey 
19730a7fcb78SPaul Blakey 	if ((needs_mapping || sets_mapping) &&
1974636bb968SPaul Blakey 	    !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
19750a7fcb78SPaul Blakey 		NL_SET_ERR_MSG(extack,
1976636bb968SPaul Blakey 			       "Chains on tunnel devices isn't supported without register loopback support");
19770a7fcb78SPaul Blakey 		netdev_warn(priv->netdev,
1978636bb968SPaul Blakey 			    "Chains on tunnel devices isn't supported without register loopback support");
19790a7fcb78SPaul Blakey 		return -EOPNOTSUPP;
19800a7fcb78SPaul Blakey 	}
19810a7fcb78SPaul Blakey 
19820a7fcb78SPaul Blakey 	if (!flow->esw_attr->chain) {
19830a7fcb78SPaul Blakey 		err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
19840a7fcb78SPaul Blakey 					 match_level);
19850a7fcb78SPaul Blakey 		if (err) {
19860a7fcb78SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
19870a7fcb78SPaul Blakey 					   "Failed to parse tunnel attributes");
19880a7fcb78SPaul Blakey 			netdev_warn(priv->netdev,
19890a7fcb78SPaul Blakey 				    "Failed to parse tunnel attributes");
19900a7fcb78SPaul Blakey 			return err;
19910a7fcb78SPaul Blakey 		}
19920a7fcb78SPaul Blakey 
19930a7fcb78SPaul Blakey 		flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
19940a7fcb78SPaul Blakey 	}
19950a7fcb78SPaul Blakey 
19960a7fcb78SPaul Blakey 	if (!needs_mapping && !sets_mapping)
19970a7fcb78SPaul Blakey 		return 0;
19980a7fcb78SPaul Blakey 
19990a7fcb78SPaul Blakey 	return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
20000a7fcb78SPaul Blakey }
20010a7fcb78SPaul Blakey 
20020a7fcb78SPaul Blakey static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
20030a7fcb78SPaul Blakey {
20040a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
20050a7fcb78SPaul Blakey 			    inner_headers);
20060a7fcb78SPaul Blakey }
20070a7fcb78SPaul Blakey 
20080a7fcb78SPaul Blakey static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
20090a7fcb78SPaul Blakey {
20100a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_value,
20110a7fcb78SPaul Blakey 			    inner_headers);
20120a7fcb78SPaul Blakey }
20130a7fcb78SPaul Blakey 
20140a7fcb78SPaul Blakey static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
20150a7fcb78SPaul Blakey {
20160a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
20170a7fcb78SPaul Blakey 			    outer_headers);
20180a7fcb78SPaul Blakey }
20190a7fcb78SPaul Blakey 
20200a7fcb78SPaul Blakey static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
20210a7fcb78SPaul Blakey {
20220a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_value,
20238377629eSEli Britstein 			    outer_headers);
20248377629eSEli Britstein }
20258377629eSEli Britstein 
20268377629eSEli Britstein static void *get_match_headers_value(u32 flags,
20278377629eSEli Britstein 				     struct mlx5_flow_spec *spec)
20288377629eSEli Britstein {
20298377629eSEli Britstein 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
20300a7fcb78SPaul Blakey 		get_match_inner_headers_value(spec) :
20310a7fcb78SPaul Blakey 		get_match_outer_headers_value(spec);
20320a7fcb78SPaul Blakey }
20330a7fcb78SPaul Blakey 
20340a7fcb78SPaul Blakey static void *get_match_headers_criteria(u32 flags,
20350a7fcb78SPaul Blakey 					struct mlx5_flow_spec *spec)
20360a7fcb78SPaul Blakey {
20370a7fcb78SPaul Blakey 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
20380a7fcb78SPaul Blakey 		get_match_inner_headers_criteria(spec) :
20390a7fcb78SPaul Blakey 		get_match_outer_headers_criteria(spec);
20408377629eSEli Britstein }
20418377629eSEli Britstein 
20426d65bc64Swenxu static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
20436d65bc64Swenxu 				   struct flow_cls_offload *f)
20446d65bc64Swenxu {
20456d65bc64Swenxu 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
20466d65bc64Swenxu 	struct netlink_ext_ack *extack = f->common.extack;
20476d65bc64Swenxu 	struct net_device *ingress_dev;
20486d65bc64Swenxu 	struct flow_match_meta match;
20496d65bc64Swenxu 
20506d65bc64Swenxu 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
20516d65bc64Swenxu 		return 0;
20526d65bc64Swenxu 
20536d65bc64Swenxu 	flow_rule_match_meta(rule, &match);
20546d65bc64Swenxu 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
20556d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
20566d65bc64Swenxu 		return -EINVAL;
20576d65bc64Swenxu 	}
20586d65bc64Swenxu 
20596d65bc64Swenxu 	ingress_dev = __dev_get_by_index(dev_net(filter_dev),
20606d65bc64Swenxu 					 match.key->ingress_ifindex);
20616d65bc64Swenxu 	if (!ingress_dev) {
20626d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack,
20636d65bc64Swenxu 				   "Can't find the ingress port to match on");
20646d65bc64Swenxu 		return -EINVAL;
20656d65bc64Swenxu 	}
20666d65bc64Swenxu 
20676d65bc64Swenxu 	if (ingress_dev != filter_dev) {
20686d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack,
20696d65bc64Swenxu 				   "Can't match on the ingress filter port");
20706d65bc64Swenxu 		return -EINVAL;
20716d65bc64Swenxu 	}
20726d65bc64Swenxu 
20736d65bc64Swenxu 	return 0;
20746d65bc64Swenxu }
20756d65bc64Swenxu 
207672046a91SEli Cohen static bool skip_key_basic(struct net_device *filter_dev,
207772046a91SEli Cohen 			   struct flow_cls_offload *f)
207872046a91SEli Cohen {
207972046a91SEli Cohen 	/* When doing mpls over udp decap, the user needs to provide
208072046a91SEli Cohen 	 * MPLS_UC as the protocol in order to be able to match on mpls
208172046a91SEli Cohen 	 * label fields.  However, the actual ethertype is IP so we want to
208272046a91SEli Cohen 	 * avoid matching on this, otherwise we'll fail the match.
208372046a91SEli Cohen 	 */
208472046a91SEli Cohen 	if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
208572046a91SEli Cohen 		return true;
208672046a91SEli Cohen 
208772046a91SEli Cohen 	return false;
208872046a91SEli Cohen }
208972046a91SEli Cohen 
2090de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv,
20910a7fcb78SPaul Blakey 			      struct mlx5e_tc_flow *flow,
2092de0af0bfSRoi Dayan 			      struct mlx5_flow_spec *spec,
2093f9e30088SPablo Neira Ayuso 			      struct flow_cls_offload *f,
209454c177caSOz Shlomo 			      struct net_device *filter_dev,
209593b3586eSHuy Nguyen 			      u8 *inner_match_level, u8 *outer_match_level)
2096e3a2b7edSAmir Vadai {
2097e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
2098c5bb1730SMaor Gottlieb 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2099c5bb1730SMaor Gottlieb 				       outer_headers);
2100c5bb1730SMaor Gottlieb 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2101c5bb1730SMaor Gottlieb 				       outer_headers);
2102699e96ddSJianbo Liu 	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2103699e96ddSJianbo Liu 				    misc_parameters);
2104699e96ddSJianbo Liu 	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2105699e96ddSJianbo Liu 				    misc_parameters);
2106f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
21078f256622SPablo Neira Ayuso 	struct flow_dissector *dissector = rule->match.dissector;
2108e3a2b7edSAmir Vadai 	u16 addr_type = 0;
2109e3a2b7edSAmir Vadai 	u8 ip_proto = 0;
211093b3586eSHuy Nguyen 	u8 *match_level;
21116d65bc64Swenxu 	int err;
2112e3a2b7edSAmir Vadai 
211393b3586eSHuy Nguyen 	match_level = outer_match_level;
2114de0af0bfSRoi Dayan 
21158f256622SPablo Neira Ayuso 	if (dissector->used_keys &
21163d144578SVlad Buslov 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
21173d144578SVlad Buslov 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2118e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
2119e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2120095b6cfdSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
2121699e96ddSJianbo Liu 	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2122e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2123e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2124bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
2125bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2126bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2127bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2128bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
2129e77834ecSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2130fd7da28bSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
2131bcef735cSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_IP)  |
21324c3844d9SPaul Blakey 	      BIT(FLOW_DISSECTOR_KEY_CT) |
21339272e3dfSYevgeny Kliteynik 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
213472046a91SEli Cohen 	      BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
213572046a91SEli Cohen 	      BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2136e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2137e3a2b7edSAmir Vadai 		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
21388f256622SPablo Neira Ayuso 			    dissector->used_keys);
2139e3a2b7edSAmir Vadai 		return -EOPNOTSUPP;
2140e3a2b7edSAmir Vadai 	}
2141e3a2b7edSAmir Vadai 
2142075973c7SVlad Buslov 	if (mlx5e_get_tc_tun(filter_dev)) {
21430a7fcb78SPaul Blakey 		bool match_inner = false;
2144bbd00f7eSHadar Hen Zion 
21450a7fcb78SPaul Blakey 		err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
21460a7fcb78SPaul Blakey 					outer_match_level, &match_inner);
21470a7fcb78SPaul Blakey 		if (err)
21480a7fcb78SPaul Blakey 			return err;
21490a7fcb78SPaul Blakey 
21500a7fcb78SPaul Blakey 		if (match_inner) {
21510a7fcb78SPaul Blakey 			/* header pointers should point to the inner headers
21520a7fcb78SPaul Blakey 			 * if the packet was decapsulated already.
21530a7fcb78SPaul Blakey 			 * outer headers are set by parse_tunnel_attr.
2154bbd00f7eSHadar Hen Zion 			 */
215593b3586eSHuy Nguyen 			match_level = inner_match_level;
21560a7fcb78SPaul Blakey 			headers_c = get_match_inner_headers_criteria(spec);
21570a7fcb78SPaul Blakey 			headers_v = get_match_inner_headers_value(spec);
21580a7fcb78SPaul Blakey 		}
2159bbd00f7eSHadar Hen Zion 	}
2160bbd00f7eSHadar Hen Zion 
21616d65bc64Swenxu 	err = mlx5e_flower_parse_meta(filter_dev, f);
21626d65bc64Swenxu 	if (err)
21636d65bc64Swenxu 		return err;
21646d65bc64Swenxu 
216572046a91SEli Cohen 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
216672046a91SEli Cohen 	    !skip_key_basic(filter_dev, f)) {
21678f256622SPablo Neira Ayuso 		struct flow_match_basic match;
2168e3a2b7edSAmir Vadai 
21698f256622SPablo Neira Ayuso 		flow_rule_match_basic(rule, &match);
21708f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
21718f256622SPablo Neira Ayuso 			 ntohs(match.mask->n_proto));
21728f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
21738f256622SPablo Neira Ayuso 			 ntohs(match.key->n_proto));
21748f256622SPablo Neira Ayuso 
21758f256622SPablo Neira Ayuso 		if (match.mask->n_proto)
2176d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
2177e3a2b7edSAmir Vadai 	}
217835a605dbSEli Britstein 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
217935a605dbSEli Britstein 	    is_vlan_dev(filter_dev)) {
218035a605dbSEli Britstein 		struct flow_dissector_key_vlan filter_dev_mask;
218135a605dbSEli Britstein 		struct flow_dissector_key_vlan filter_dev_key;
21828f256622SPablo Neira Ayuso 		struct flow_match_vlan match;
21838f256622SPablo Neira Ayuso 
218435a605dbSEli Britstein 		if (is_vlan_dev(filter_dev)) {
218535a605dbSEli Britstein 			match.key = &filter_dev_key;
218635a605dbSEli Britstein 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
218735a605dbSEli Britstein 			match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
218835a605dbSEli Britstein 			match.key->vlan_priority = 0;
218935a605dbSEli Britstein 			match.mask = &filter_dev_mask;
219035a605dbSEli Britstein 			memset(match.mask, 0xff, sizeof(*match.mask));
219135a605dbSEli Britstein 			match.mask->vlan_priority = 0;
219235a605dbSEli Britstein 		} else {
21938f256622SPablo Neira Ayuso 			flow_rule_match_vlan(rule, &match);
219435a605dbSEli Britstein 		}
21958f256622SPablo Neira Ayuso 		if (match.mask->vlan_id ||
21968f256622SPablo Neira Ayuso 		    match.mask->vlan_priority ||
21978f256622SPablo Neira Ayuso 		    match.mask->vlan_tpid) {
21988f256622SPablo Neira Ayuso 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2199699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2200699e96ddSJianbo Liu 					 svlan_tag, 1);
2201699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2202699e96ddSJianbo Liu 					 svlan_tag, 1);
2203699e96ddSJianbo Liu 			} else {
2204699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2205699e96ddSJianbo Liu 					 cvlan_tag, 1);
2206699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2207699e96ddSJianbo Liu 					 cvlan_tag, 1);
2208699e96ddSJianbo Liu 			}
2209095b6cfdSOr Gerlitz 
22108f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
22118f256622SPablo Neira Ayuso 				 match.mask->vlan_id);
22128f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
22138f256622SPablo Neira Ayuso 				 match.key->vlan_id);
2214358d79a4SOr Gerlitz 
22158f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
22168f256622SPablo Neira Ayuso 				 match.mask->vlan_priority);
22178f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
22188f256622SPablo Neira Ayuso 				 match.key->vlan_priority);
221954782900SOr Gerlitz 
2220d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
2221095b6cfdSOr Gerlitz 		}
2222d3a80bb5SOr Gerlitz 	} else if (*match_level != MLX5_MATCH_NONE) {
2223fc603294SMark Bloch 		/* cvlan_tag enabled in match criteria and
2224fc603294SMark Bloch 		 * disabled in match value means both S & C tags
2225fc603294SMark Bloch 		 * don't exist (untagged of both)
2226fc603294SMark Bloch 		 */
2227cee26487SJianbo Liu 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2228d3a80bb5SOr Gerlitz 		*match_level = MLX5_MATCH_L2;
2229095b6cfdSOr Gerlitz 	}
2230095b6cfdSOr Gerlitz 
22318f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
22328f256622SPablo Neira Ayuso 		struct flow_match_vlan match;
22338f256622SPablo Neira Ayuso 
223412d5cbf8SJianbo Liu 		flow_rule_match_cvlan(rule, &match);
22358f256622SPablo Neira Ayuso 		if (match.mask->vlan_id ||
22368f256622SPablo Neira Ayuso 		    match.mask->vlan_priority ||
22378f256622SPablo Neira Ayuso 		    match.mask->vlan_tpid) {
22388f256622SPablo Neira Ayuso 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2239699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
2240699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
2241699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
2242699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
2243699e96ddSJianbo Liu 			} else {
2244699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
2245699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
2246699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
2247699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
2248699e96ddSJianbo Liu 			}
2249699e96ddSJianbo Liu 
2250699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
22518f256622SPablo Neira Ayuso 				 match.mask->vlan_id);
2252699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
22538f256622SPablo Neira Ayuso 				 match.key->vlan_id);
2254699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
22558f256622SPablo Neira Ayuso 				 match.mask->vlan_priority);
2256699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
22578f256622SPablo Neira Ayuso 				 match.key->vlan_priority);
2258699e96ddSJianbo Liu 
2259699e96ddSJianbo Liu 			*match_level = MLX5_MATCH_L2;
2260699e96ddSJianbo Liu 		}
2261699e96ddSJianbo Liu 	}
2262699e96ddSJianbo Liu 
22638f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
22648f256622SPablo Neira Ayuso 		struct flow_match_eth_addrs match;
226554782900SOr Gerlitz 
22668f256622SPablo Neira Ayuso 		flow_rule_match_eth_addrs(rule, &match);
2267d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2268d3a80bb5SOr Gerlitz 					     dmac_47_16),
22698f256622SPablo Neira Ayuso 				match.mask->dst);
2270d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2271d3a80bb5SOr Gerlitz 					     dmac_47_16),
22728f256622SPablo Neira Ayuso 				match.key->dst);
2273d3a80bb5SOr Gerlitz 
2274d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2275d3a80bb5SOr Gerlitz 					     smac_47_16),
22768f256622SPablo Neira Ayuso 				match.mask->src);
2277d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2278d3a80bb5SOr Gerlitz 					     smac_47_16),
22798f256622SPablo Neira Ayuso 				match.key->src);
2280d3a80bb5SOr Gerlitz 
22818f256622SPablo Neira Ayuso 		if (!is_zero_ether_addr(match.mask->src) ||
22828f256622SPablo Neira Ayuso 		    !is_zero_ether_addr(match.mask->dst))
2283d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
228454782900SOr Gerlitz 	}
228554782900SOr Gerlitz 
22868f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
22878f256622SPablo Neira Ayuso 		struct flow_match_control match;
228854782900SOr Gerlitz 
22898f256622SPablo Neira Ayuso 		flow_rule_match_control(rule, &match);
22908f256622SPablo Neira Ayuso 		addr_type = match.key->addr_type;
229154782900SOr Gerlitz 
229254782900SOr Gerlitz 		/* the HW doesn't support frag first/later */
22938f256622SPablo Neira Ayuso 		if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
229454782900SOr Gerlitz 			return -EOPNOTSUPP;
229554782900SOr Gerlitz 
22968f256622SPablo Neira Ayuso 		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
229754782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
229854782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
22998f256622SPablo Neira Ayuso 				 match.key->flags & FLOW_DIS_IS_FRAGMENT);
230054782900SOr Gerlitz 
230154782900SOr Gerlitz 			/* the HW doesn't need L3 inline to match on frag=no */
23028f256622SPablo Neira Ayuso 			if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
230383621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L2;
230454782900SOr Gerlitz 	/* ***  L2 attributes parsing up to here *** */
230554782900SOr Gerlitz 			else
230683621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L3;
230754782900SOr Gerlitz 		}
230854782900SOr Gerlitz 	}
230954782900SOr Gerlitz 
23108f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
23118f256622SPablo Neira Ayuso 		struct flow_match_basic match;
23128f256622SPablo Neira Ayuso 
23138f256622SPablo Neira Ayuso 		flow_rule_match_basic(rule, &match);
23148f256622SPablo Neira Ayuso 		ip_proto = match.key->ip_proto;
231554782900SOr Gerlitz 
231654782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
23178f256622SPablo Neira Ayuso 			 match.mask->ip_proto);
231854782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
23198f256622SPablo Neira Ayuso 			 match.key->ip_proto);
232054782900SOr Gerlitz 
23218f256622SPablo Neira Ayuso 		if (match.mask->ip_proto)
2322d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
232354782900SOr Gerlitz 	}
232454782900SOr Gerlitz 
2325e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
23268f256622SPablo Neira Ayuso 		struct flow_match_ipv4_addrs match;
2327e3a2b7edSAmir Vadai 
23288f256622SPablo Neira Ayuso 		flow_rule_match_ipv4_addrs(rule, &match);
2329e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2330e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
23318f256622SPablo Neira Ayuso 		       &match.mask->src, sizeof(match.mask->src));
2332e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2333e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
23348f256622SPablo Neira Ayuso 		       &match.key->src, sizeof(match.key->src));
2335e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2336e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
23378f256622SPablo Neira Ayuso 		       &match.mask->dst, sizeof(match.mask->dst));
2338e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2339e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
23408f256622SPablo Neira Ayuso 		       &match.key->dst, sizeof(match.key->dst));
2341de0af0bfSRoi Dayan 
23428f256622SPablo Neira Ayuso 		if (match.mask->src || match.mask->dst)
2343d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
2344e3a2b7edSAmir Vadai 	}
2345e3a2b7edSAmir Vadai 
2346e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
23478f256622SPablo Neira Ayuso 		struct flow_match_ipv6_addrs match;
2348e3a2b7edSAmir Vadai 
23498f256622SPablo Neira Ayuso 		flow_rule_match_ipv6_addrs(rule, &match);
2350e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2351e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
23528f256622SPablo Neira Ayuso 		       &match.mask->src, sizeof(match.mask->src));
2353e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2354e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
23558f256622SPablo Neira Ayuso 		       &match.key->src, sizeof(match.key->src));
2356e3a2b7edSAmir Vadai 
2357e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2358e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
23598f256622SPablo Neira Ayuso 		       &match.mask->dst, sizeof(match.mask->dst));
2360e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2361e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
23628f256622SPablo Neira Ayuso 		       &match.key->dst, sizeof(match.key->dst));
2363de0af0bfSRoi Dayan 
23648f256622SPablo Neira Ayuso 		if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
23658f256622SPablo Neira Ayuso 		    ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2366d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
2367e3a2b7edSAmir Vadai 	}
2368e3a2b7edSAmir Vadai 
23698f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
23708f256622SPablo Neira Ayuso 		struct flow_match_ip match;
23711f97a526SOr Gerlitz 
23728f256622SPablo Neira Ayuso 		flow_rule_match_ip(rule, &match);
23738f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
23748f256622SPablo Neira Ayuso 			 match.mask->tos & 0x3);
23758f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
23768f256622SPablo Neira Ayuso 			 match.key->tos & 0x3);
23771f97a526SOr Gerlitz 
23788f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
23798f256622SPablo Neira Ayuso 			 match.mask->tos >> 2);
23808f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
23818f256622SPablo Neira Ayuso 			 match.key->tos  >> 2);
23821f97a526SOr Gerlitz 
23838f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
23848f256622SPablo Neira Ayuso 			 match.mask->ttl);
23858f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
23868f256622SPablo Neira Ayuso 			 match.key->ttl);
23871f97a526SOr Gerlitz 
23888f256622SPablo Neira Ayuso 		if (match.mask->ttl &&
2389a8ade55fSOr Gerlitz 		    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2390e98bedf5SEli Britstein 						ft_field_support.outer_ipv4_ttl)) {
2391e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2392e98bedf5SEli Britstein 					   "Matching on TTL is not supported");
23931f97a526SOr Gerlitz 			return -EOPNOTSUPP;
2394e98bedf5SEli Britstein 		}
2395a8ade55fSOr Gerlitz 
23968f256622SPablo Neira Ayuso 		if (match.mask->tos || match.mask->ttl)
2397d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
23981f97a526SOr Gerlitz 	}
23991f97a526SOr Gerlitz 
240054782900SOr Gerlitz 	/* ***  L3 attributes parsing up to here *** */
240154782900SOr Gerlitz 
24028f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
24038f256622SPablo Neira Ayuso 		struct flow_match_ports match;
24048f256622SPablo Neira Ayuso 
24058f256622SPablo Neira Ayuso 		flow_rule_match_ports(rule, &match);
2406e3a2b7edSAmir Vadai 		switch (ip_proto) {
2407e3a2b7edSAmir Vadai 		case IPPROTO_TCP:
2408e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24098f256622SPablo Neira Ayuso 				 tcp_sport, ntohs(match.mask->src));
2410e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24118f256622SPablo Neira Ayuso 				 tcp_sport, ntohs(match.key->src));
2412e3a2b7edSAmir Vadai 
2413e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24148f256622SPablo Neira Ayuso 				 tcp_dport, ntohs(match.mask->dst));
2415e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24168f256622SPablo Neira Ayuso 				 tcp_dport, ntohs(match.key->dst));
2417e3a2b7edSAmir Vadai 			break;
2418e3a2b7edSAmir Vadai 
2419e3a2b7edSAmir Vadai 		case IPPROTO_UDP:
2420e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24218f256622SPablo Neira Ayuso 				 udp_sport, ntohs(match.mask->src));
2422e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24238f256622SPablo Neira Ayuso 				 udp_sport, ntohs(match.key->src));
2424e3a2b7edSAmir Vadai 
2425e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24268f256622SPablo Neira Ayuso 				 udp_dport, ntohs(match.mask->dst));
2427e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24288f256622SPablo Neira Ayuso 				 udp_dport, ntohs(match.key->dst));
2429e3a2b7edSAmir Vadai 			break;
2430e3a2b7edSAmir Vadai 		default:
2431e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2432e98bedf5SEli Britstein 					   "Only UDP and TCP transports are supported for L4 matching");
2433e3a2b7edSAmir Vadai 			netdev_err(priv->netdev,
2434e3a2b7edSAmir Vadai 				   "Only UDP and TCP transport are supported\n");
2435e3a2b7edSAmir Vadai 			return -EINVAL;
2436e3a2b7edSAmir Vadai 		}
2437de0af0bfSRoi Dayan 
24388f256622SPablo Neira Ayuso 		if (match.mask->src || match.mask->dst)
2439d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
2440e3a2b7edSAmir Vadai 	}
2441e3a2b7edSAmir Vadai 
24428f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
24438f256622SPablo Neira Ayuso 		struct flow_match_tcp match;
2444e77834ecSOr Gerlitz 
24458f256622SPablo Neira Ayuso 		flow_rule_match_tcp(rule, &match);
2446e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
24478f256622SPablo Neira Ayuso 			 ntohs(match.mask->flags));
2448e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
24498f256622SPablo Neira Ayuso 			 ntohs(match.key->flags));
2450e77834ecSOr Gerlitz 
24518f256622SPablo Neira Ayuso 		if (match.mask->flags)
2452d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
2453e77834ecSOr Gerlitz 	}
2454e77834ecSOr Gerlitz 
2455e3a2b7edSAmir Vadai 	return 0;
2456e3a2b7edSAmir Vadai }
2457e3a2b7edSAmir Vadai 
2458de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv,
245965ba8fb7SOr Gerlitz 			    struct mlx5e_tc_flow *flow,
2460de0af0bfSRoi Dayan 			    struct mlx5_flow_spec *spec,
2461f9e30088SPablo Neira Ayuso 			    struct flow_cls_offload *f,
246254c177caSOz Shlomo 			    struct net_device *filter_dev)
2463de0af0bfSRoi Dayan {
246493b3586eSHuy Nguyen 	u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2465e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
2466de0af0bfSRoi Dayan 	struct mlx5_core_dev *dev = priv->mdev;
2467de0af0bfSRoi Dayan 	struct mlx5_eswitch *esw = dev->priv.eswitch;
24681d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
24691d447a39SSaeed Mahameed 	struct mlx5_eswitch_rep *rep;
2470226f2ca3SVlad Buslov 	bool is_eswitch_flow;
2471de0af0bfSRoi Dayan 	int err;
2472de0af0bfSRoi Dayan 
247393b3586eSHuy Nguyen 	inner_match_level = MLX5_MATCH_NONE;
247493b3586eSHuy Nguyen 	outer_match_level = MLX5_MATCH_NONE;
247593b3586eSHuy Nguyen 
24760a7fcb78SPaul Blakey 	err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
24770a7fcb78SPaul Blakey 				 &inner_match_level, &outer_match_level);
247893b3586eSHuy Nguyen 	non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
247993b3586eSHuy Nguyen 				 outer_match_level : inner_match_level;
2480de0af0bfSRoi Dayan 
2481226f2ca3SVlad Buslov 	is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2482226f2ca3SVlad Buslov 	if (!err && is_eswitch_flow) {
24831d447a39SSaeed Mahameed 		rep = rpriv->rep;
2484b05af6aaSBodong Wang 		if (rep->vport != MLX5_VPORT_UPLINK &&
24851d447a39SSaeed Mahameed 		    (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
248693b3586eSHuy Nguyen 		    esw->offloads.inline_mode < non_tunnel_match_level)) {
2487e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2488e98bedf5SEli Britstein 					   "Flow is not offloaded due to min inline setting");
2489de0af0bfSRoi Dayan 			netdev_warn(priv->netdev,
2490de0af0bfSRoi Dayan 				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
249193b3586eSHuy Nguyen 				    non_tunnel_match_level, esw->offloads.inline_mode);
2492de0af0bfSRoi Dayan 			return -EOPNOTSUPP;
2493de0af0bfSRoi Dayan 		}
2494de0af0bfSRoi Dayan 	}
2495de0af0bfSRoi Dayan 
2496226f2ca3SVlad Buslov 	if (is_eswitch_flow) {
249793b3586eSHuy Nguyen 		flow->esw_attr->inner_match_level = inner_match_level;
249893b3586eSHuy Nguyen 		flow->esw_attr->outer_match_level = outer_match_level;
24996363651dSOr Gerlitz 	} else {
250093b3586eSHuy Nguyen 		flow->nic_attr->match_level = non_tunnel_match_level;
25016363651dSOr Gerlitz 	}
250238aa51c1SOr Gerlitz 
2503de0af0bfSRoi Dayan 	return err;
2504de0af0bfSRoi Dayan }
2505de0af0bfSRoi Dayan 
2506d79b6df6SOr Gerlitz struct pedit_headers {
2507d79b6df6SOr Gerlitz 	struct ethhdr  eth;
25080eb69bb9SEli Britstein 	struct vlan_hdr vlan;
2509d79b6df6SOr Gerlitz 	struct iphdr   ip4;
2510d79b6df6SOr Gerlitz 	struct ipv6hdr ip6;
2511d79b6df6SOr Gerlitz 	struct tcphdr  tcp;
2512d79b6df6SOr Gerlitz 	struct udphdr  udp;
2513d79b6df6SOr Gerlitz };
2514d79b6df6SOr Gerlitz 
2515c500c86bSPablo Neira Ayuso struct pedit_headers_action {
2516c500c86bSPablo Neira Ayuso 	struct pedit_headers	vals;
2517c500c86bSPablo Neira Ayuso 	struct pedit_headers	masks;
2518c500c86bSPablo Neira Ayuso 	u32			pedits;
2519c500c86bSPablo Neira Ayuso };
2520c500c86bSPablo Neira Ayuso 
2521d79b6df6SOr Gerlitz static int pedit_header_offsets[] = {
252273867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
252373867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
252473867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
252573867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
252673867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2527d79b6df6SOr Gerlitz };
2528d79b6df6SOr Gerlitz 
2529d79b6df6SOr Gerlitz #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2530d79b6df6SOr Gerlitz 
2531d79b6df6SOr Gerlitz static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2532c500c86bSPablo Neira Ayuso 			 struct pedit_headers_action *hdrs)
2533d79b6df6SOr Gerlitz {
2534d79b6df6SOr Gerlitz 	u32 *curr_pmask, *curr_pval;
2535d79b6df6SOr Gerlitz 
2536c500c86bSPablo Neira Ayuso 	curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2537c500c86bSPablo Neira Ayuso 	curr_pval  = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2538d79b6df6SOr Gerlitz 
2539d79b6df6SOr Gerlitz 	if (*curr_pmask & mask)  /* disallow acting twice on the same location */
2540d79b6df6SOr Gerlitz 		goto out_err;
2541d79b6df6SOr Gerlitz 
2542d79b6df6SOr Gerlitz 	*curr_pmask |= mask;
2543d79b6df6SOr Gerlitz 	*curr_pval  |= (val & mask);
2544d79b6df6SOr Gerlitz 
2545d79b6df6SOr Gerlitz 	return 0;
2546d79b6df6SOr Gerlitz 
2547d79b6df6SOr Gerlitz out_err:
2548d79b6df6SOr Gerlitz 	return -EOPNOTSUPP;
2549d79b6df6SOr Gerlitz }
2550d79b6df6SOr Gerlitz 
2551d79b6df6SOr Gerlitz struct mlx5_fields {
2552d79b6df6SOr Gerlitz 	u8  field;
255388f30bbcSDmytro Linkin 	u8  field_bsize;
255488f30bbcSDmytro Linkin 	u32 field_mask;
2555d79b6df6SOr Gerlitz 	u32 offset;
255627c11b6bSEli Britstein 	u32 match_offset;
2557d79b6df6SOr Gerlitz };
2558d79b6df6SOr Gerlitz 
255988f30bbcSDmytro Linkin #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
256088f30bbcSDmytro Linkin 		{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
256127c11b6bSEli Britstein 		 offsetof(struct pedit_headers, field) + (off), \
256227c11b6bSEli Britstein 		 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
256327c11b6bSEli Britstein 
25642ef86872SEli Britstein /* masked values are the same and there are no rewrites that do not have a
25652ef86872SEli Britstein  * match.
25662ef86872SEli Britstein  */
25672ef86872SEli Britstein #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
25682ef86872SEli Britstein 	type matchmaskx = *(type *)(matchmaskp); \
25692ef86872SEli Britstein 	type matchvalx = *(type *)(matchvalp); \
25702ef86872SEli Britstein 	type maskx = *(type *)(maskp); \
25712ef86872SEli Britstein 	type valx = *(type *)(valp); \
25722ef86872SEli Britstein 	\
25732ef86872SEli Britstein 	(valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
25742ef86872SEli Britstein 								 matchmaskx)); \
25752ef86872SEli Britstein })
25762ef86872SEli Britstein 
257727c11b6bSEli Britstein static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
257888f30bbcSDmytro Linkin 			 void *matchmaskp, u8 bsize)
257927c11b6bSEli Britstein {
258027c11b6bSEli Britstein 	bool same = false;
258127c11b6bSEli Britstein 
258288f30bbcSDmytro Linkin 	switch (bsize) {
258388f30bbcSDmytro Linkin 	case 8:
25842ef86872SEli Britstein 		same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
258527c11b6bSEli Britstein 		break;
258688f30bbcSDmytro Linkin 	case 16:
25872ef86872SEli Britstein 		same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
258827c11b6bSEli Britstein 		break;
258988f30bbcSDmytro Linkin 	case 32:
25902ef86872SEli Britstein 		same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
259127c11b6bSEli Britstein 		break;
259227c11b6bSEli Britstein 	}
259327c11b6bSEli Britstein 
259427c11b6bSEli Britstein 	return same;
259527c11b6bSEli Britstein }
2596a8e4f0c4SOr Gerlitz 
2597d79b6df6SOr Gerlitz static struct mlx5_fields fields[] = {
259888f30bbcSDmytro Linkin 	OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
259988f30bbcSDmytro Linkin 	OFFLOAD(DMAC_15_0,  16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
260088f30bbcSDmytro Linkin 	OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
260188f30bbcSDmytro Linkin 	OFFLOAD(SMAC_15_0,  16, U16_MAX, eth.h_source[4], 0, smac_15_0),
260288f30bbcSDmytro Linkin 	OFFLOAD(ETHERTYPE,  16, U16_MAX, eth.h_proto, 0, ethertype),
260388f30bbcSDmytro Linkin 	OFFLOAD(FIRST_VID,  16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2604d79b6df6SOr Gerlitz 
2605ab9341b5SDmytro Linkin 	OFFLOAD(IP_DSCP, 8,    0xfc, ip4.tos,   0, ip_dscp),
260688f30bbcSDmytro Linkin 	OFFLOAD(IP_TTL,  8,  U8_MAX, ip4.ttl,   0, ttl_hoplimit),
260788f30bbcSDmytro Linkin 	OFFLOAD(SIPV4,  32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
260888f30bbcSDmytro Linkin 	OFFLOAD(DIPV4,  32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2609d79b6df6SOr Gerlitz 
261088f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
261127c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
261288f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_95_64,  32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
261327c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
261488f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_63_32,  32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
261527c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
261688f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_31_0,   32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
261727c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
261888f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
261927c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
262088f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_95_64,  32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
262127c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
262288f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_63_32,  32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
262327c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
262488f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_31_0,   32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
262527c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
262688f30bbcSDmytro Linkin 	OFFLOAD(IPV6_HOPLIMIT, 8,  U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2627d79b6df6SOr Gerlitz 
262888f30bbcSDmytro Linkin 	OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source,  0, tcp_sport),
262988f30bbcSDmytro Linkin 	OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest,    0, tcp_dport),
263088f30bbcSDmytro Linkin 	/* in linux iphdr tcp_flags is 8 bits long */
263188f30bbcSDmytro Linkin 	OFFLOAD(TCP_FLAGS,  8,  U8_MAX, tcp.ack_seq, 5, tcp_flags),
2632d79b6df6SOr Gerlitz 
263388f30bbcSDmytro Linkin 	OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
263488f30bbcSDmytro Linkin 	OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest,   0, udp_dport),
2635d79b6df6SOr Gerlitz };
2636d79b6df6SOr Gerlitz 
26376ae4a6a5SPaul Blakey static int offload_pedit_fields(struct mlx5e_priv *priv,
26386ae4a6a5SPaul Blakey 				int namespace,
26396ae4a6a5SPaul Blakey 				struct pedit_headers_action *hdrs,
2640e98bedf5SEli Britstein 				struct mlx5e_tc_flow_parse_attr *parse_attr,
264127c11b6bSEli Britstein 				u32 *action_flags,
2642e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
2643d79b6df6SOr Gerlitz {
2644d79b6df6SOr Gerlitz 	struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
26456ae4a6a5SPaul Blakey 	int i, action_size, first, last, next_z;
264688f30bbcSDmytro Linkin 	void *headers_c, *headers_v, *action, *vals_p;
264788f30bbcSDmytro Linkin 	u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
26486ae4a6a5SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts *mod_acts;
2649d79b6df6SOr Gerlitz 	struct mlx5_fields *f;
2650d79b6df6SOr Gerlitz 	unsigned long mask;
26512b64bebaSOr Gerlitz 	__be32 mask_be32;
26522b64bebaSOr Gerlitz 	__be16 mask_be16;
26536ae4a6a5SPaul Blakey 	int err;
265488f30bbcSDmytro Linkin 	u8 cmd;
265588f30bbcSDmytro Linkin 
26566ae4a6a5SPaul Blakey 	mod_acts = &parse_attr->mod_hdr_acts;
265788f30bbcSDmytro Linkin 	headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
265888f30bbcSDmytro Linkin 	headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2659d79b6df6SOr Gerlitz 
266073867881SPablo Neira Ayuso 	set_masks = &hdrs[0].masks;
266173867881SPablo Neira Ayuso 	add_masks = &hdrs[1].masks;
266273867881SPablo Neira Ayuso 	set_vals = &hdrs[0].vals;
266373867881SPablo Neira Ayuso 	add_vals = &hdrs[1].vals;
2664d79b6df6SOr Gerlitz 
2665d65dbedfSHuy Nguyen 	action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2666d79b6df6SOr Gerlitz 
2667d79b6df6SOr Gerlitz 	for (i = 0; i < ARRAY_SIZE(fields); i++) {
266827c11b6bSEli Britstein 		bool skip;
266927c11b6bSEli Britstein 
2670d79b6df6SOr Gerlitz 		f = &fields[i];
2671d79b6df6SOr Gerlitz 		/* avoid seeing bits set from previous iterations */
2672e3ca4e05SOr Gerlitz 		s_mask = 0;
2673e3ca4e05SOr Gerlitz 		a_mask = 0;
2674d79b6df6SOr Gerlitz 
2675d79b6df6SOr Gerlitz 		s_masks_p = (void *)set_masks + f->offset;
2676d79b6df6SOr Gerlitz 		a_masks_p = (void *)add_masks + f->offset;
2677d79b6df6SOr Gerlitz 
267888f30bbcSDmytro Linkin 		s_mask = *s_masks_p & f->field_mask;
267988f30bbcSDmytro Linkin 		a_mask = *a_masks_p & f->field_mask;
2680d79b6df6SOr Gerlitz 
2681d79b6df6SOr Gerlitz 		if (!s_mask && !a_mask) /* nothing to offload here */
2682d79b6df6SOr Gerlitz 			continue;
2683d79b6df6SOr Gerlitz 
2684d79b6df6SOr Gerlitz 		if (s_mask && a_mask) {
2685e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2686e98bedf5SEli Britstein 					   "can't set and add to the same HW field");
2687d79b6df6SOr Gerlitz 			printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2688d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2689d79b6df6SOr Gerlitz 		}
2690d79b6df6SOr Gerlitz 
269127c11b6bSEli Britstein 		skip = false;
2692d79b6df6SOr Gerlitz 		if (s_mask) {
269327c11b6bSEli Britstein 			void *match_mask = headers_c + f->match_offset;
269427c11b6bSEli Britstein 			void *match_val = headers_v + f->match_offset;
269527c11b6bSEli Britstein 
2696d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_SET;
2697d79b6df6SOr Gerlitz 			mask = s_mask;
2698d79b6df6SOr Gerlitz 			vals_p = (void *)set_vals + f->offset;
269927c11b6bSEli Britstein 			/* don't rewrite if we have a match on the same value */
270027c11b6bSEli Britstein 			if (cmp_val_mask(vals_p, s_masks_p, match_val,
270188f30bbcSDmytro Linkin 					 match_mask, f->field_bsize))
270227c11b6bSEli Britstein 				skip = true;
2703d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
270488f30bbcSDmytro Linkin 			*s_masks_p &= ~f->field_mask;
2705d79b6df6SOr Gerlitz 		} else {
2706d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_ADD;
2707d79b6df6SOr Gerlitz 			mask = a_mask;
2708d79b6df6SOr Gerlitz 			vals_p = (void *)add_vals + f->offset;
270927c11b6bSEli Britstein 			/* add 0 is no change */
271088f30bbcSDmytro Linkin 			if ((*(u32 *)vals_p & f->field_mask) == 0)
271127c11b6bSEli Britstein 				skip = true;
2712d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
271388f30bbcSDmytro Linkin 			*a_masks_p &= ~f->field_mask;
2714d79b6df6SOr Gerlitz 		}
271527c11b6bSEli Britstein 		if (skip)
271627c11b6bSEli Britstein 			continue;
2717d79b6df6SOr Gerlitz 
271888f30bbcSDmytro Linkin 		if (f->field_bsize == 32) {
2719404402abSSebastian Hense 			mask_be32 = (__be32)mask;
27202b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
272188f30bbcSDmytro Linkin 		} else if (f->field_bsize == 16) {
2722404402abSSebastian Hense 			mask_be32 = (__be32)mask;
2723404402abSSebastian Hense 			mask_be16 = *(__be16 *)&mask_be32;
27242b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
27252b64bebaSOr Gerlitz 		}
27262b64bebaSOr Gerlitz 
272788f30bbcSDmytro Linkin 		first = find_first_bit(&mask, f->field_bsize);
272888f30bbcSDmytro Linkin 		next_z = find_next_zero_bit(&mask, f->field_bsize, first);
272988f30bbcSDmytro Linkin 		last  = find_last_bit(&mask, f->field_bsize);
27302b64bebaSOr Gerlitz 		if (first < next_z && next_z < last) {
2731e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2732e98bedf5SEli Britstein 					   "rewrite of few sub-fields isn't supported");
27332b64bebaSOr Gerlitz 			printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2734d79b6df6SOr Gerlitz 			       mask);
2735d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2736d79b6df6SOr Gerlitz 		}
2737d79b6df6SOr Gerlitz 
27386ae4a6a5SPaul Blakey 		err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
27396ae4a6a5SPaul Blakey 		if (err) {
27406ae4a6a5SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
27416ae4a6a5SPaul Blakey 					   "too many pedit actions, can't offload");
27426ae4a6a5SPaul Blakey 			mlx5_core_warn(priv->mdev,
27436ae4a6a5SPaul Blakey 				       "mlx5: parsed %d pedit actions, can't do more\n",
27446ae4a6a5SPaul Blakey 				       mod_acts->num_actions);
27456ae4a6a5SPaul Blakey 			return err;
27466ae4a6a5SPaul Blakey 		}
27476ae4a6a5SPaul Blakey 
27486ae4a6a5SPaul Blakey 		action = mod_acts->actions +
27496ae4a6a5SPaul Blakey 			 (mod_acts->num_actions * action_size);
2750d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, action_type, cmd);
2751d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, field, f->field);
2752d79b6df6SOr Gerlitz 
2753d79b6df6SOr Gerlitz 		if (cmd == MLX5_ACTION_TYPE_SET) {
275488f30bbcSDmytro Linkin 			int start;
275588f30bbcSDmytro Linkin 
275688f30bbcSDmytro Linkin 			/* if field is bit sized it can start not from first bit */
275788f30bbcSDmytro Linkin 			start = find_first_bit((unsigned long *)&f->field_mask,
275888f30bbcSDmytro Linkin 					       f->field_bsize);
275988f30bbcSDmytro Linkin 
276088f30bbcSDmytro Linkin 			MLX5_SET(set_action_in, action, offset, first - start);
2761d79b6df6SOr Gerlitz 			/* length is num of bits to be written, zero means length of 32 */
27622b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, length, (last - first + 1));
2763d79b6df6SOr Gerlitz 		}
2764d79b6df6SOr Gerlitz 
276588f30bbcSDmytro Linkin 		if (f->field_bsize == 32)
27662b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
276788f30bbcSDmytro Linkin 		else if (f->field_bsize == 16)
27682b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
276988f30bbcSDmytro Linkin 		else if (f->field_bsize == 8)
27702b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2771d79b6df6SOr Gerlitz 
27726ae4a6a5SPaul Blakey 		++mod_acts->num_actions;
2773d79b6df6SOr Gerlitz 	}
2774d79b6df6SOr Gerlitz 
2775d79b6df6SOr Gerlitz 	return 0;
2776d79b6df6SOr Gerlitz }
2777d79b6df6SOr Gerlitz 
27782cc1cb1dSTonghao Zhang static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
27792cc1cb1dSTonghao Zhang 						  int namespace)
27802cc1cb1dSTonghao Zhang {
27812cc1cb1dSTonghao Zhang 	if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
27822cc1cb1dSTonghao Zhang 		return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
27832cc1cb1dSTonghao Zhang 	else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
27842cc1cb1dSTonghao Zhang 		return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
27852cc1cb1dSTonghao Zhang }
27862cc1cb1dSTonghao Zhang 
27876ae4a6a5SPaul Blakey int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2788c500c86bSPablo Neira Ayuso 			  int namespace,
27896ae4a6a5SPaul Blakey 			  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2790d79b6df6SOr Gerlitz {
27916ae4a6a5SPaul Blakey 	int action_size, new_num_actions, max_hw_actions;
27926ae4a6a5SPaul Blakey 	size_t new_sz, old_sz;
27936ae4a6a5SPaul Blakey 	void *ret;
2794d79b6df6SOr Gerlitz 
27956ae4a6a5SPaul Blakey 	if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
27966ae4a6a5SPaul Blakey 		return 0;
27976ae4a6a5SPaul Blakey 
2798d65dbedfSHuy Nguyen 	action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2799d79b6df6SOr Gerlitz 
28006ae4a6a5SPaul Blakey 	max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
28016ae4a6a5SPaul Blakey 								namespace);
28026ae4a6a5SPaul Blakey 	new_num_actions = min(max_hw_actions,
28036ae4a6a5SPaul Blakey 			      mod_hdr_acts->actions ?
28046ae4a6a5SPaul Blakey 			      mod_hdr_acts->max_actions * 2 : 1);
28056ae4a6a5SPaul Blakey 	if (mod_hdr_acts->max_actions == new_num_actions)
28066ae4a6a5SPaul Blakey 		return -ENOSPC;
2807d79b6df6SOr Gerlitz 
28086ae4a6a5SPaul Blakey 	new_sz = action_size * new_num_actions;
28096ae4a6a5SPaul Blakey 	old_sz = mod_hdr_acts->max_actions * action_size;
28106ae4a6a5SPaul Blakey 	ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
28116ae4a6a5SPaul Blakey 	if (!ret)
2812d79b6df6SOr Gerlitz 		return -ENOMEM;
2813d79b6df6SOr Gerlitz 
28146ae4a6a5SPaul Blakey 	memset(ret + old_sz, 0, new_sz - old_sz);
28156ae4a6a5SPaul Blakey 	mod_hdr_acts->actions = ret;
28166ae4a6a5SPaul Blakey 	mod_hdr_acts->max_actions = new_num_actions;
28176ae4a6a5SPaul Blakey 
2818d79b6df6SOr Gerlitz 	return 0;
2819d79b6df6SOr Gerlitz }
2820d79b6df6SOr Gerlitz 
28216ae4a6a5SPaul Blakey void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
28226ae4a6a5SPaul Blakey {
28236ae4a6a5SPaul Blakey 	kfree(mod_hdr_acts->actions);
28246ae4a6a5SPaul Blakey 	mod_hdr_acts->actions = NULL;
28256ae4a6a5SPaul Blakey 	mod_hdr_acts->num_actions = 0;
28266ae4a6a5SPaul Blakey 	mod_hdr_acts->max_actions = 0;
28276ae4a6a5SPaul Blakey }
28286ae4a6a5SPaul Blakey 
2829d79b6df6SOr Gerlitz static const struct pedit_headers zero_masks = {};
2830d79b6df6SOr Gerlitz 
2831d79b6df6SOr Gerlitz static int parse_tc_pedit_action(struct mlx5e_priv *priv,
283273867881SPablo Neira Ayuso 				 const struct flow_action_entry *act, int namespace,
2833c500c86bSPablo Neira Ayuso 				 struct pedit_headers_action *hdrs,
2834e98bedf5SEli Britstein 				 struct netlink_ext_ack *extack)
2835d79b6df6SOr Gerlitz {
283673867881SPablo Neira Ayuso 	u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
283773867881SPablo Neira Ayuso 	int err = -EOPNOTSUPP;
2838d79b6df6SOr Gerlitz 	u32 mask, val, offset;
283973867881SPablo Neira Ayuso 	u8 htype;
2840d79b6df6SOr Gerlitz 
284173867881SPablo Neira Ayuso 	htype = act->mangle.htype;
2842d79b6df6SOr Gerlitz 	err = -EOPNOTSUPP; /* can't be all optimistic */
2843d79b6df6SOr Gerlitz 
284473867881SPablo Neira Ayuso 	if (htype == FLOW_ACT_MANGLE_UNSPEC) {
284573867881SPablo Neira Ayuso 		NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2846d79b6df6SOr Gerlitz 		goto out_err;
2847d79b6df6SOr Gerlitz 	}
2848d79b6df6SOr Gerlitz 
28492cc1cb1dSTonghao Zhang 	if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
28502cc1cb1dSTonghao Zhang 		NL_SET_ERR_MSG_MOD(extack,
28512cc1cb1dSTonghao Zhang 				   "The pedit offload action is not supported");
28522cc1cb1dSTonghao Zhang 		goto out_err;
28532cc1cb1dSTonghao Zhang 	}
28542cc1cb1dSTonghao Zhang 
285573867881SPablo Neira Ayuso 	mask = act->mangle.mask;
285673867881SPablo Neira Ayuso 	val = act->mangle.val;
285773867881SPablo Neira Ayuso 	offset = act->mangle.offset;
2858d79b6df6SOr Gerlitz 
2859c500c86bSPablo Neira Ayuso 	err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2860d79b6df6SOr Gerlitz 	if (err)
2861d79b6df6SOr Gerlitz 		goto out_err;
2862c500c86bSPablo Neira Ayuso 
2863c500c86bSPablo Neira Ayuso 	hdrs[cmd].pedits++;
2864d79b6df6SOr Gerlitz 
2865c500c86bSPablo Neira Ayuso 	return 0;
2866c500c86bSPablo Neira Ayuso out_err:
2867c500c86bSPablo Neira Ayuso 	return err;
2868c500c86bSPablo Neira Ayuso }
2869c500c86bSPablo Neira Ayuso 
2870c500c86bSPablo Neira Ayuso static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2871c500c86bSPablo Neira Ayuso 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
2872c500c86bSPablo Neira Ayuso 				 struct pedit_headers_action *hdrs,
287327c11b6bSEli Britstein 				 u32 *action_flags,
2874c500c86bSPablo Neira Ayuso 				 struct netlink_ext_ack *extack)
2875c500c86bSPablo Neira Ayuso {
2876c500c86bSPablo Neira Ayuso 	struct pedit_headers *cmd_masks;
2877c500c86bSPablo Neira Ayuso 	int err;
2878c500c86bSPablo Neira Ayuso 	u8 cmd;
2879c500c86bSPablo Neira Ayuso 
28806ae4a6a5SPaul Blakey 	err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
28816ae4a6a5SPaul Blakey 				   action_flags, extack);
2882d79b6df6SOr Gerlitz 	if (err < 0)
2883d79b6df6SOr Gerlitz 		goto out_dealloc_parsed_actions;
2884d79b6df6SOr Gerlitz 
2885d79b6df6SOr Gerlitz 	for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2886c500c86bSPablo Neira Ayuso 		cmd_masks = &hdrs[cmd].masks;
2887d79b6df6SOr Gerlitz 		if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2888e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2889e98bedf5SEli Britstein 					   "attempt to offload an unsupported field");
2890b3a433deSOr Gerlitz 			netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2891d79b6df6SOr Gerlitz 			print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2892d79b6df6SOr Gerlitz 				       16, 1, cmd_masks, sizeof(zero_masks), true);
2893d79b6df6SOr Gerlitz 			err = -EOPNOTSUPP;
2894d79b6df6SOr Gerlitz 			goto out_dealloc_parsed_actions;
2895d79b6df6SOr Gerlitz 		}
2896d79b6df6SOr Gerlitz 	}
2897d79b6df6SOr Gerlitz 
2898d79b6df6SOr Gerlitz 	return 0;
2899d79b6df6SOr Gerlitz 
2900d79b6df6SOr Gerlitz out_dealloc_parsed_actions:
29016ae4a6a5SPaul Blakey 	dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
2902d79b6df6SOr Gerlitz 	return err;
2903d79b6df6SOr Gerlitz }
2904d79b6df6SOr Gerlitz 
2905e98bedf5SEli Britstein static bool csum_offload_supported(struct mlx5e_priv *priv,
2906e98bedf5SEli Britstein 				   u32 action,
2907e98bedf5SEli Britstein 				   u32 update_flags,
2908e98bedf5SEli Britstein 				   struct netlink_ext_ack *extack)
290926c02749SOr Gerlitz {
291026c02749SOr Gerlitz 	u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
291126c02749SOr Gerlitz 			 TCA_CSUM_UPDATE_FLAG_UDP;
291226c02749SOr Gerlitz 
291326c02749SOr Gerlitz 	/*  The HW recalcs checksums only if re-writing headers */
291426c02749SOr Gerlitz 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2915e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2916e98bedf5SEli Britstein 				   "TC csum action is only offloaded with pedit");
291726c02749SOr Gerlitz 		netdev_warn(priv->netdev,
291826c02749SOr Gerlitz 			    "TC csum action is only offloaded with pedit\n");
291926c02749SOr Gerlitz 		return false;
292026c02749SOr Gerlitz 	}
292126c02749SOr Gerlitz 
292226c02749SOr Gerlitz 	if (update_flags & ~prot_flags) {
2923e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2924e98bedf5SEli Britstein 				   "can't offload TC csum action for some header/s");
292526c02749SOr Gerlitz 		netdev_warn(priv->netdev,
292626c02749SOr Gerlitz 			    "can't offload TC csum action for some header/s - flags %#x\n",
292726c02749SOr Gerlitz 			    update_flags);
292826c02749SOr Gerlitz 		return false;
292926c02749SOr Gerlitz 	}
293026c02749SOr Gerlitz 
293126c02749SOr Gerlitz 	return true;
293226c02749SOr Gerlitz }
293326c02749SOr Gerlitz 
29348998576bSDmytro Linkin struct ip_ttl_word {
29358998576bSDmytro Linkin 	__u8	ttl;
29368998576bSDmytro Linkin 	__u8	protocol;
29378998576bSDmytro Linkin 	__sum16	check;
29388998576bSDmytro Linkin };
29398998576bSDmytro Linkin 
29408998576bSDmytro Linkin struct ipv6_hoplimit_word {
29418998576bSDmytro Linkin 	__be16	payload_len;
29428998576bSDmytro Linkin 	__u8	nexthdr;
29438998576bSDmytro Linkin 	__u8	hop_limit;
29448998576bSDmytro Linkin };
29458998576bSDmytro Linkin 
29464c3844d9SPaul Blakey static int is_action_keys_supported(const struct flow_action_entry *act,
29474c3844d9SPaul Blakey 				    bool ct_flow, bool *modify_ip_header,
29484c3844d9SPaul Blakey 				    struct netlink_ext_ack *extack)
29498998576bSDmytro Linkin {
29508998576bSDmytro Linkin 	u32 mask, offset;
29518998576bSDmytro Linkin 	u8 htype;
29528998576bSDmytro Linkin 
29538998576bSDmytro Linkin 	htype = act->mangle.htype;
29548998576bSDmytro Linkin 	offset = act->mangle.offset;
29558998576bSDmytro Linkin 	mask = ~act->mangle.mask;
29568998576bSDmytro Linkin 	/* For IPv4 & IPv6 header check 4 byte word,
29578998576bSDmytro Linkin 	 * to determine that modified fields
29588998576bSDmytro Linkin 	 * are NOT ttl & hop_limit only.
29598998576bSDmytro Linkin 	 */
29608998576bSDmytro Linkin 	if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
29618998576bSDmytro Linkin 		struct ip_ttl_word *ttl_word =
29628998576bSDmytro Linkin 			(struct ip_ttl_word *)&mask;
29638998576bSDmytro Linkin 
29648998576bSDmytro Linkin 		if (offset != offsetof(struct iphdr, ttl) ||
29658998576bSDmytro Linkin 		    ttl_word->protocol ||
29668998576bSDmytro Linkin 		    ttl_word->check) {
29674c3844d9SPaul Blakey 			*modify_ip_header = true;
29684c3844d9SPaul Blakey 		}
29694c3844d9SPaul Blakey 
29704c3844d9SPaul Blakey 		if (ct_flow && offset >= offsetof(struct iphdr, saddr)) {
29714c3844d9SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
29724c3844d9SPaul Blakey 					   "can't offload re-write of ipv4 address with action ct");
29734c3844d9SPaul Blakey 			return -EOPNOTSUPP;
29748998576bSDmytro Linkin 		}
29758998576bSDmytro Linkin 	} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
29768998576bSDmytro Linkin 		struct ipv6_hoplimit_word *hoplimit_word =
29778998576bSDmytro Linkin 			(struct ipv6_hoplimit_word *)&mask;
29788998576bSDmytro Linkin 
29798998576bSDmytro Linkin 		if (offset != offsetof(struct ipv6hdr, payload_len) ||
29808998576bSDmytro Linkin 		    hoplimit_word->payload_len ||
29818998576bSDmytro Linkin 		    hoplimit_word->nexthdr) {
29824c3844d9SPaul Blakey 			*modify_ip_header = true;
29838998576bSDmytro Linkin 		}
29844c3844d9SPaul Blakey 
29854c3844d9SPaul Blakey 		if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) {
29864c3844d9SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
29874c3844d9SPaul Blakey 					   "can't offload re-write of ipv6 address with action ct");
29884c3844d9SPaul Blakey 			return -EOPNOTSUPP;
29898998576bSDmytro Linkin 		}
29904c3844d9SPaul Blakey 	} else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
29914c3844d9SPaul Blakey 			       htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) {
29924c3844d9SPaul Blakey 		NL_SET_ERR_MSG_MOD(extack,
29934c3844d9SPaul Blakey 				   "can't offload re-write of transport header ports with action ct");
29944c3844d9SPaul Blakey 		return -EOPNOTSUPP;
29954c3844d9SPaul Blakey 	}
29964c3844d9SPaul Blakey 
29974c3844d9SPaul Blakey 	return 0;
29988998576bSDmytro Linkin }
29998998576bSDmytro Linkin 
3000bdd66ac0SOr Gerlitz static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
300173867881SPablo Neira Ayuso 					  struct flow_action *flow_action,
30024c3844d9SPaul Blakey 					  u32 actions, bool ct_flow,
3003e98bedf5SEli Britstein 					  struct netlink_ext_ack *extack)
3004bdd66ac0SOr Gerlitz {
300573867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
3006bdd66ac0SOr Gerlitz 	bool modify_ip_header;
3007bdd66ac0SOr Gerlitz 	void *headers_v;
3008bdd66ac0SOr Gerlitz 	u16 ethertype;
30098998576bSDmytro Linkin 	u8 ip_proto;
30104c3844d9SPaul Blakey 	int i, err;
3011bdd66ac0SOr Gerlitz 
30128377629eSEli Britstein 	headers_v = get_match_headers_value(actions, spec);
3013bdd66ac0SOr Gerlitz 	ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3014bdd66ac0SOr Gerlitz 
3015bdd66ac0SOr Gerlitz 	/* for non-IP we only re-write MACs, so we're okay */
3016bdd66ac0SOr Gerlitz 	if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3017bdd66ac0SOr Gerlitz 		goto out_ok;
3018bdd66ac0SOr Gerlitz 
3019bdd66ac0SOr Gerlitz 	modify_ip_header = false;
302073867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
302173867881SPablo Neira Ayuso 		if (act->id != FLOW_ACTION_MANGLE &&
302273867881SPablo Neira Ayuso 		    act->id != FLOW_ACTION_ADD)
3023bdd66ac0SOr Gerlitz 			continue;
3024bdd66ac0SOr Gerlitz 
30254c3844d9SPaul Blakey 		err = is_action_keys_supported(act, ct_flow,
30264c3844d9SPaul Blakey 					       &modify_ip_header, extack);
30274c3844d9SPaul Blakey 		if (err)
30284c3844d9SPaul Blakey 			return err;
3029bdd66ac0SOr Gerlitz 	}
3030bdd66ac0SOr Gerlitz 
3031bdd66ac0SOr Gerlitz 	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
30321ccef350SJianbo Liu 	if (modify_ip_header && ip_proto != IPPROTO_TCP &&
30331ccef350SJianbo Liu 	    ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3034e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
3035e98bedf5SEli Britstein 				   "can't offload re-write of non TCP/UDP");
3036bdd66ac0SOr Gerlitz 		pr_info("can't offload re-write of ip proto %d\n", ip_proto);
3037bdd66ac0SOr Gerlitz 		return false;
3038bdd66ac0SOr Gerlitz 	}
3039bdd66ac0SOr Gerlitz 
3040bdd66ac0SOr Gerlitz out_ok:
3041bdd66ac0SOr Gerlitz 	return true;
3042bdd66ac0SOr Gerlitz }
3043bdd66ac0SOr Gerlitz 
3044bdd66ac0SOr Gerlitz static bool actions_match_supported(struct mlx5e_priv *priv,
304573867881SPablo Neira Ayuso 				    struct flow_action *flow_action,
3046bdd66ac0SOr Gerlitz 				    struct mlx5e_tc_flow_parse_attr *parse_attr,
3047e98bedf5SEli Britstein 				    struct mlx5e_tc_flow *flow,
3048e98bedf5SEli Britstein 				    struct netlink_ext_ack *extack)
3049bdd66ac0SOr Gerlitz {
3050d0645b37SRoi Dayan 	bool ct_flow;
3051bdd66ac0SOr Gerlitz 	u32 actions;
3052bdd66ac0SOr Gerlitz 
30534c3844d9SPaul Blakey 	ct_flow = flow_flag_test(flow, CT);
30544c3844d9SPaul Blakey 	if (mlx5e_is_eswitch_flow(flow)) {
3055bdd66ac0SOr Gerlitz 		actions = flow->esw_attr->action;
30564c3844d9SPaul Blakey 
30574c3844d9SPaul Blakey 		if (flow->esw_attr->split_count && ct_flow) {
30584c3844d9SPaul Blakey 			/* All registers used by ct are cleared when using
30594c3844d9SPaul Blakey 			 * split rules.
30604c3844d9SPaul Blakey 			 */
30614c3844d9SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
30624c3844d9SPaul Blakey 					   "Can't offload mirroring with action ct");
306349397b80SDan Carpenter 			return false;
30644c3844d9SPaul Blakey 		}
30654c3844d9SPaul Blakey 	} else {
3066bdd66ac0SOr Gerlitz 		actions = flow->nic_attr->action;
30674c3844d9SPaul Blakey 	}
3068bdd66ac0SOr Gerlitz 
3069bdd66ac0SOr Gerlitz 	if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
307073867881SPablo Neira Ayuso 		return modify_header_match_supported(&parse_attr->spec,
3071a655fe9fSDavid S. Miller 						     flow_action, actions,
30724c3844d9SPaul Blakey 						     ct_flow, extack);
3073bdd66ac0SOr Gerlitz 
3074bdd66ac0SOr Gerlitz 	return true;
3075bdd66ac0SOr Gerlitz }
3076bdd66ac0SOr Gerlitz 
30775c65c564SOr Gerlitz static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
30785c65c564SOr Gerlitz {
30795c65c564SOr Gerlitz 	struct mlx5_core_dev *fmdev, *pmdev;
3080816f6706SOr Gerlitz 	u64 fsystem_guid, psystem_guid;
30815c65c564SOr Gerlitz 
30825c65c564SOr Gerlitz 	fmdev = priv->mdev;
30835c65c564SOr Gerlitz 	pmdev = peer_priv->mdev;
30845c65c564SOr Gerlitz 
308559c9d35eSAlaa Hleihel 	fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
308659c9d35eSAlaa Hleihel 	psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
30875c65c564SOr Gerlitz 
3088816f6706SOr Gerlitz 	return (fsystem_guid == psystem_guid);
30895c65c564SOr Gerlitz }
30905c65c564SOr Gerlitz 
3091bdc837eeSEli Britstein static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3092bdc837eeSEli Britstein 				   const struct flow_action_entry *act,
3093bdc837eeSEli Britstein 				   struct mlx5e_tc_flow_parse_attr *parse_attr,
3094bdc837eeSEli Britstein 				   struct pedit_headers_action *hdrs,
3095bdc837eeSEli Britstein 				   u32 *action, struct netlink_ext_ack *extack)
3096bdc837eeSEli Britstein {
3097bdc837eeSEli Britstein 	u16 mask16 = VLAN_VID_MASK;
3098bdc837eeSEli Britstein 	u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3099bdc837eeSEli Britstein 	const struct flow_action_entry pedit_act = {
3100bdc837eeSEli Britstein 		.id = FLOW_ACTION_MANGLE,
3101bdc837eeSEli Britstein 		.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3102bdc837eeSEli Britstein 		.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3103bdc837eeSEli Britstein 		.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3104bdc837eeSEli Britstein 		.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3105bdc837eeSEli Britstein 	};
31066fca9d1eSEli Britstein 	u8 match_prio_mask, match_prio_val;
3107bf2f3bcaSEli Britstein 	void *headers_c, *headers_v;
3108bdc837eeSEli Britstein 	int err;
3109bdc837eeSEli Britstein 
3110bf2f3bcaSEli Britstein 	headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3111bf2f3bcaSEli Britstein 	headers_v = get_match_headers_value(*action, &parse_attr->spec);
3112bf2f3bcaSEli Britstein 
3113bf2f3bcaSEli Britstein 	if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3114bf2f3bcaSEli Britstein 	      MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3115bf2f3bcaSEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
3116bf2f3bcaSEli Britstein 				   "VLAN rewrite action must have VLAN protocol match");
3117bf2f3bcaSEli Britstein 		return -EOPNOTSUPP;
3118bf2f3bcaSEli Britstein 	}
3119bf2f3bcaSEli Britstein 
31206fca9d1eSEli Britstein 	match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
31216fca9d1eSEli Britstein 	match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
31226fca9d1eSEli Britstein 	if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
31236fca9d1eSEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
31246fca9d1eSEli Britstein 				   "Changing VLAN prio is not supported");
3125bdc837eeSEli Britstein 		return -EOPNOTSUPP;
3126bdc837eeSEli Britstein 	}
3127bdc837eeSEli Britstein 
3128dec481c8SEli Cohen 	err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL);
3129bdc837eeSEli Britstein 	*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3130bdc837eeSEli Britstein 
3131bdc837eeSEli Britstein 	return err;
3132bdc837eeSEli Britstein }
3133bdc837eeSEli Britstein 
31340bac1194SEli Britstein static int
31350bac1194SEli Britstein add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
31360bac1194SEli Britstein 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
31370bac1194SEli Britstein 				 struct pedit_headers_action *hdrs,
31380bac1194SEli Britstein 				 u32 *action, struct netlink_ext_ack *extack)
31390bac1194SEli Britstein {
31400bac1194SEli Britstein 	const struct flow_action_entry prio_tag_act = {
31410bac1194SEli Britstein 		.vlan.vid = 0,
31420bac1194SEli Britstein 		.vlan.prio =
31430bac1194SEli Britstein 			MLX5_GET(fte_match_set_lyr_2_4,
31440bac1194SEli Britstein 				 get_match_headers_value(*action,
31450bac1194SEli Britstein 							 &parse_attr->spec),
31460bac1194SEli Britstein 				 first_prio) &
31470bac1194SEli Britstein 			MLX5_GET(fte_match_set_lyr_2_4,
31480bac1194SEli Britstein 				 get_match_headers_criteria(*action,
31490bac1194SEli Britstein 							    &parse_attr->spec),
31500bac1194SEli Britstein 				 first_prio),
31510bac1194SEli Britstein 	};
31520bac1194SEli Britstein 
31530bac1194SEli Britstein 	return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
31540bac1194SEli Britstein 				       &prio_tag_act, parse_attr, hdrs, action,
31550bac1194SEli Britstein 				       extack);
31560bac1194SEli Britstein }
31570bac1194SEli Britstein 
315873867881SPablo Neira Ayuso static int parse_tc_nic_actions(struct mlx5e_priv *priv,
315973867881SPablo Neira Ayuso 				struct flow_action *flow_action,
3160aa0cbbaeSOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr,
3161e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
3162e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
3163e3a2b7edSAmir Vadai {
3164aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
316573867881SPablo Neira Ayuso 	struct pedit_headers_action hdrs[2] = {};
316673867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
31671cab1cd7SOr Gerlitz 	u32 action = 0;
3168244cd96aSCong Wang 	int err, i;
3169e3a2b7edSAmir Vadai 
317073867881SPablo Neira Ayuso 	if (!flow_action_has_entries(flow_action))
3171e3a2b7edSAmir Vadai 		return -EINVAL;
3172e3a2b7edSAmir Vadai 
317353eca1f3SJakub Kicinski 	if (!flow_action_hw_stats_check(flow_action, extack,
317453eca1f3SJakub Kicinski 					FLOW_ACTION_HW_STATS_DELAYED_BIT))
3175319a1d19SJiri Pirko 		return -EOPNOTSUPP;
3176319a1d19SJiri Pirko 
31773bc4b7bfSOr Gerlitz 	attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3178e3a2b7edSAmir Vadai 
317973867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
318073867881SPablo Neira Ayuso 		switch (act->id) {
318115fc92ecSTonghao Zhang 		case FLOW_ACTION_ACCEPT:
318215fc92ecSTonghao Zhang 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
318315fc92ecSTonghao Zhang 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
318415fc92ecSTonghao Zhang 			break;
318573867881SPablo Neira Ayuso 		case FLOW_ACTION_DROP:
31861cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3187aad7e08dSAmir Vadai 			if (MLX5_CAP_FLOWTABLE(priv->mdev,
3188aad7e08dSAmir Vadai 					       flow_table_properties_nic_receive.flow_counter))
31891cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
319073867881SPablo Neira Ayuso 			break;
319173867881SPablo Neira Ayuso 		case FLOW_ACTION_MANGLE:
319273867881SPablo Neira Ayuso 		case FLOW_ACTION_ADD:
319373867881SPablo Neira Ayuso 			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3194dec481c8SEli Cohen 						    hdrs, extack);
31952f4fe4caSOr Gerlitz 			if (err)
31962f4fe4caSOr Gerlitz 				return err;
31972f4fe4caSOr Gerlitz 
31981cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
31992f4fe4caSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
320073867881SPablo Neira Ayuso 			break;
3201bdc837eeSEli Britstein 		case FLOW_ACTION_VLAN_MANGLE:
3202bdc837eeSEli Britstein 			err = add_vlan_rewrite_action(priv,
3203bdc837eeSEli Britstein 						      MLX5_FLOW_NAMESPACE_KERNEL,
3204bdc837eeSEli Britstein 						      act, parse_attr, hdrs,
3205bdc837eeSEli Britstein 						      &action, extack);
3206bdc837eeSEli Britstein 			if (err)
3207bdc837eeSEli Britstein 				return err;
3208bdc837eeSEli Britstein 
3209bdc837eeSEli Britstein 			break;
321073867881SPablo Neira Ayuso 		case FLOW_ACTION_CSUM:
32111cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
321273867881SPablo Neira Ayuso 						   act->csum_flags,
3213e98bedf5SEli Britstein 						   extack))
321473867881SPablo Neira Ayuso 				break;
321526c02749SOr Gerlitz 
321626c02749SOr Gerlitz 			return -EOPNOTSUPP;
321773867881SPablo Neira Ayuso 		case FLOW_ACTION_REDIRECT: {
321873867881SPablo Neira Ayuso 			struct net_device *peer_dev = act->dev;
32195c65c564SOr Gerlitz 
32205c65c564SOr Gerlitz 			if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
32215c65c564SOr Gerlitz 			    same_hw_devs(priv, netdev_priv(peer_dev))) {
322298b66cb1SEli Britstein 				parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3223226f2ca3SVlad Buslov 				flow_flag_set(flow, HAIRPIN);
32241cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
32255c65c564SOr Gerlitz 					  MLX5_FLOW_CONTEXT_ACTION_COUNT;
32265c65c564SOr Gerlitz 			} else {
3227e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3228e98bedf5SEli Britstein 						   "device is not on same HW, can't offload");
32295c65c564SOr Gerlitz 				netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
32305c65c564SOr Gerlitz 					    peer_dev->name);
32315c65c564SOr Gerlitz 				return -EINVAL;
32325c65c564SOr Gerlitz 			}
32335c65c564SOr Gerlitz 			}
323473867881SPablo Neira Ayuso 			break;
323573867881SPablo Neira Ayuso 		case FLOW_ACTION_MARK: {
323673867881SPablo Neira Ayuso 			u32 mark = act->mark;
3237e3a2b7edSAmir Vadai 
3238e3a2b7edSAmir Vadai 			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3239e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3240e98bedf5SEli Britstein 						   "Bad flow mark - only 16 bit is supported");
3241e3a2b7edSAmir Vadai 				return -EINVAL;
3242e3a2b7edSAmir Vadai 			}
3243e3a2b7edSAmir Vadai 
32443bc4b7bfSOr Gerlitz 			attr->flow_tag = mark;
32451cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3246e3a2b7edSAmir Vadai 			}
324773867881SPablo Neira Ayuso 			break;
324873867881SPablo Neira Ayuso 		default:
32492cc1cb1dSTonghao Zhang 			NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
32502cc1cb1dSTonghao Zhang 			return -EOPNOTSUPP;
3251e3a2b7edSAmir Vadai 		}
325273867881SPablo Neira Ayuso 	}
3253e3a2b7edSAmir Vadai 
3254c500c86bSPablo Neira Ayuso 	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3255c500c86bSPablo Neira Ayuso 	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3256c500c86bSPablo Neira Ayuso 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
325727c11b6bSEli Britstein 					    parse_attr, hdrs, &action, extack);
3258c500c86bSPablo Neira Ayuso 		if (err)
3259c500c86bSPablo Neira Ayuso 			return err;
326027c11b6bSEli Britstein 		/* in case all pedit actions are skipped, remove the MOD_HDR
326127c11b6bSEli Britstein 		 * flag.
326227c11b6bSEli Britstein 		 */
32636ae4a6a5SPaul Blakey 		if (parse_attr->mod_hdr_acts.num_actions == 0) {
326427c11b6bSEli Britstein 			action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
32656ae4a6a5SPaul Blakey 			dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3266e7739a60SEli Britstein 		}
3267c500c86bSPablo Neira Ayuso 	}
3268c500c86bSPablo Neira Ayuso 
32691cab1cd7SOr Gerlitz 	attr->action = action;
327073867881SPablo Neira Ayuso 	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3271bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
3272bdd66ac0SOr Gerlitz 
3273e3a2b7edSAmir Vadai 	return 0;
3274e3a2b7edSAmir Vadai }
3275e3a2b7edSAmir Vadai 
32767f1a546eSEli Britstein struct encap_key {
32771f6da306SYevgeny Kliteynik 	const struct ip_tunnel_key *ip_tun_key;
3278d386939aSYevgeny Kliteynik 	struct mlx5e_tc_tunnel *tc_tunnel;
32797f1a546eSEli Britstein };
32807f1a546eSEli Britstein 
32817f1a546eSEli Britstein static inline int cmp_encap_info(struct encap_key *a,
32827f1a546eSEli Britstein 				 struct encap_key *b)
3283a54e20b4SHadar Hen Zion {
32847f1a546eSEli Britstein 	return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
3285d386939aSYevgeny Kliteynik 	       a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
3286a54e20b4SHadar Hen Zion }
3287a54e20b4SHadar Hen Zion 
32887f1a546eSEli Britstein static inline int hash_encap_info(struct encap_key *key)
3289a54e20b4SHadar Hen Zion {
32907f1a546eSEli Britstein 	return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
3291d386939aSYevgeny Kliteynik 		     key->tc_tunnel->tunnel_type);
3292a54e20b4SHadar Hen Zion }
3293a54e20b4SHadar Hen Zion 
3294a54e20b4SHadar Hen Zion 
3295b1d90e6bSRabie Loulou static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
3296b1d90e6bSRabie Loulou 				  struct net_device *peer_netdev)
3297b1d90e6bSRabie Loulou {
3298b1d90e6bSRabie Loulou 	struct mlx5e_priv *peer_priv;
3299b1d90e6bSRabie Loulou 
3300b1d90e6bSRabie Loulou 	peer_priv = netdev_priv(peer_netdev);
3301b1d90e6bSRabie Loulou 
3302b1d90e6bSRabie Loulou 	return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
330368931c7dSRoi Dayan 		mlx5e_eswitch_rep(priv->netdev) &&
330468931c7dSRoi Dayan 		mlx5e_eswitch_rep(peer_netdev) &&
330568931c7dSRoi Dayan 		same_hw_devs(priv, peer_priv));
3306b1d90e6bSRabie Loulou }
3307b1d90e6bSRabie Loulou 
3308ce99f6b9SOr Gerlitz 
330954c177caSOz Shlomo 
3310948993f2SVlad Buslov bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
3311948993f2SVlad Buslov {
3312948993f2SVlad Buslov 	return refcount_inc_not_zero(&e->refcnt);
3313948993f2SVlad Buslov }
3314948993f2SVlad Buslov 
3315948993f2SVlad Buslov static struct mlx5e_encap_entry *
3316948993f2SVlad Buslov mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
3317948993f2SVlad Buslov 		uintptr_t hash_key)
3318948993f2SVlad Buslov {
3319948993f2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3320948993f2SVlad Buslov 	struct mlx5e_encap_entry *e;
3321948993f2SVlad Buslov 	struct encap_key e_key;
3322948993f2SVlad Buslov 
3323948993f2SVlad Buslov 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
3324948993f2SVlad Buslov 				   encap_hlist, hash_key) {
3325948993f2SVlad Buslov 		e_key.ip_tun_key = &e->tun_info->key;
3326948993f2SVlad Buslov 		e_key.tc_tunnel = e->tunnel;
3327948993f2SVlad Buslov 		if (!cmp_encap_info(&e_key, key) &&
3328948993f2SVlad Buslov 		    mlx5e_encap_take(e))
3329948993f2SVlad Buslov 			return e;
3330948993f2SVlad Buslov 	}
3331948993f2SVlad Buslov 
3332948993f2SVlad Buslov 	return NULL;
3333948993f2SVlad Buslov }
3334948993f2SVlad Buslov 
33352a4b6526SVlad Buslov static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
33362a4b6526SVlad Buslov {
33372a4b6526SVlad Buslov 	size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
33382a4b6526SVlad Buslov 
33392a4b6526SVlad Buslov 	return kmemdup(tun_info, tun_size, GFP_KERNEL);
33402a4b6526SVlad Buslov }
33412a4b6526SVlad Buslov 
3342554fe75cSDmytro Linkin static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
3343554fe75cSDmytro Linkin 				      struct mlx5e_tc_flow *flow,
3344554fe75cSDmytro Linkin 				      int out_index,
3345554fe75cSDmytro Linkin 				      struct mlx5e_encap_entry *e,
3346554fe75cSDmytro Linkin 				      struct netlink_ext_ack *extack)
3347554fe75cSDmytro Linkin {
3348554fe75cSDmytro Linkin 	int i;
3349554fe75cSDmytro Linkin 
3350554fe75cSDmytro Linkin 	for (i = 0; i < out_index; i++) {
3351554fe75cSDmytro Linkin 		if (flow->encaps[i].e != e)
3352554fe75cSDmytro Linkin 			continue;
3353554fe75cSDmytro Linkin 		NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
3354554fe75cSDmytro Linkin 		netdev_err(priv->netdev, "can't duplicate encap action\n");
3355554fe75cSDmytro Linkin 		return true;
3356554fe75cSDmytro Linkin 	}
3357554fe75cSDmytro Linkin 
3358554fe75cSDmytro Linkin 	return false;
3359554fe75cSDmytro Linkin }
3360554fe75cSDmytro Linkin 
3361a54e20b4SHadar Hen Zion static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3362e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
3363733d4f36SRoi Dayan 			      struct net_device *mirred_dev,
3364733d4f36SRoi Dayan 			      int out_index,
33658c4dc42bSEli Britstein 			      struct netlink_ext_ack *extack,
33660ad060eeSRoi Dayan 			      struct net_device **encap_dev,
33670ad060eeSRoi Dayan 			      bool *encap_valid)
336803a9d11eSOr Gerlitz {
3369a54e20b4SHadar Hen Zion 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
337045247bf2SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3371733d4f36SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
33721f6da306SYevgeny Kliteynik 	const struct ip_tunnel_info *tun_info;
3373948993f2SVlad Buslov 	struct encap_key key;
3374c1ae1152SOr Gerlitz 	struct mlx5e_encap_entry *e;
3375733d4f36SRoi Dayan 	unsigned short family;
3376a54e20b4SHadar Hen Zion 	uintptr_t hash_key;
337754c177caSOz Shlomo 	int err = 0;
3378a54e20b4SHadar Hen Zion 
3379733d4f36SRoi Dayan 	parse_attr = attr->parse_attr;
33801f6da306SYevgeny Kliteynik 	tun_info = parse_attr->tun_info[out_index];
3381733d4f36SRoi Dayan 	family = ip_tunnel_info_af(tun_info);
33827f1a546eSEli Britstein 	key.ip_tun_key = &tun_info->key;
3383d386939aSYevgeny Kliteynik 	key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
3384d71f895cSEli Cohen 	if (!key.tc_tunnel) {
3385d71f895cSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
3386d71f895cSEli Cohen 		return -EOPNOTSUPP;
3387d71f895cSEli Cohen 	}
3388733d4f36SRoi Dayan 
33897f1a546eSEli Britstein 	hash_key = hash_encap_info(&key);
3390a54e20b4SHadar Hen Zion 
339161086f39SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
3392948993f2SVlad Buslov 	e = mlx5e_encap_get(priv, &key, hash_key);
3393a54e20b4SHadar Hen Zion 
3394b2812089SVlad Buslov 	/* must verify if encap is valid or not */
3395d589e785SVlad Buslov 	if (e) {
3396554fe75cSDmytro Linkin 		/* Check that entry was not already attached to this flow */
3397554fe75cSDmytro Linkin 		if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
3398554fe75cSDmytro Linkin 			err = -EOPNOTSUPP;
3399554fe75cSDmytro Linkin 			goto out_err;
3400554fe75cSDmytro Linkin 		}
3401554fe75cSDmytro Linkin 
3402d589e785SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
3403d589e785SVlad Buslov 		wait_for_completion(&e->res_ready);
3404d589e785SVlad Buslov 
3405d589e785SVlad Buslov 		/* Protect against concurrent neigh update. */
3406d589e785SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
34073c140dd5SVlad Buslov 		if (e->compl_result < 0) {
3408d589e785SVlad Buslov 			err = -EREMOTEIO;
3409d589e785SVlad Buslov 			goto out_err;
3410d589e785SVlad Buslov 		}
341145247bf2SOr Gerlitz 		goto attach_flow;
3412d589e785SVlad Buslov 	}
3413a54e20b4SHadar Hen Zion 
3414a54e20b4SHadar Hen Zion 	e = kzalloc(sizeof(*e), GFP_KERNEL);
341561086f39SVlad Buslov 	if (!e) {
341661086f39SVlad Buslov 		err = -ENOMEM;
341761086f39SVlad Buslov 		goto out_err;
341861086f39SVlad Buslov 	}
3419a54e20b4SHadar Hen Zion 
3420948993f2SVlad Buslov 	refcount_set(&e->refcnt, 1);
3421d589e785SVlad Buslov 	init_completion(&e->res_ready);
3422d589e785SVlad Buslov 
34232a4b6526SVlad Buslov 	tun_info = dup_tun_info(tun_info);
34242a4b6526SVlad Buslov 	if (!tun_info) {
34252a4b6526SVlad Buslov 		err = -ENOMEM;
34262a4b6526SVlad Buslov 		goto out_err_init;
34272a4b6526SVlad Buslov 	}
34281f6da306SYevgeny Kliteynik 	e->tun_info = tun_info;
3429101f4de9SOz Shlomo 	err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
34302a4b6526SVlad Buslov 	if (err)
34312a4b6526SVlad Buslov 		goto out_err_init;
343254c177caSOz Shlomo 
3433a54e20b4SHadar Hen Zion 	INIT_LIST_HEAD(&e->flows);
3434d589e785SVlad Buslov 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3435d589e785SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
3436a54e20b4SHadar Hen Zion 
3437ce99f6b9SOr Gerlitz 	if (family == AF_INET)
3438101f4de9SOz Shlomo 		err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3439ce99f6b9SOr Gerlitz 	else if (family == AF_INET6)
3440101f4de9SOz Shlomo 		err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3441ce99f6b9SOr Gerlitz 
3442d589e785SVlad Buslov 	/* Protect against concurrent neigh update. */
3443d589e785SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
3444d589e785SVlad Buslov 	complete_all(&e->res_ready);
3445d589e785SVlad Buslov 	if (err) {
3446d589e785SVlad Buslov 		e->compl_result = err;
3447a54e20b4SHadar Hen Zion 		goto out_err;
3448d589e785SVlad Buslov 	}
34493c140dd5SVlad Buslov 	e->compl_result = 1;
3450a54e20b4SHadar Hen Zion 
345145247bf2SOr Gerlitz attach_flow:
3452948993f2SVlad Buslov 	flow->encaps[out_index].e = e;
34538c4dc42bSEli Britstein 	list_add(&flow->encaps[out_index].list, &e->flows);
34548c4dc42bSEli Britstein 	flow->encaps[out_index].index = out_index;
345545247bf2SOr Gerlitz 	*encap_dev = e->out_dev;
34568c4dc42bSEli Britstein 	if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
34572b688ea5SMaor Gottlieb 		attr->dests[out_index].pkt_reformat = e->pkt_reformat;
34588c4dc42bSEli Britstein 		attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
34590ad060eeSRoi Dayan 		*encap_valid = true;
34608c4dc42bSEli Britstein 	} else {
34610ad060eeSRoi Dayan 		*encap_valid = false;
34628c4dc42bSEli Britstein 	}
346361086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
346445247bf2SOr Gerlitz 
3465232c0013SHadar Hen Zion 	return err;
3466a54e20b4SHadar Hen Zion 
3467a54e20b4SHadar Hen Zion out_err:
346861086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
3469d589e785SVlad Buslov 	if (e)
3470d589e785SVlad Buslov 		mlx5e_encap_put(priv, e);
3471a54e20b4SHadar Hen Zion 	return err;
34722a4b6526SVlad Buslov 
34732a4b6526SVlad Buslov out_err_init:
34742a4b6526SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
34752a4b6526SVlad Buslov 	kfree(tun_info);
34762a4b6526SVlad Buslov 	kfree(e);
34772a4b6526SVlad Buslov 	return err;
3478a54e20b4SHadar Hen Zion }
3479a54e20b4SHadar Hen Zion 
34801482bd3dSJianbo Liu static int parse_tc_vlan_action(struct mlx5e_priv *priv,
348173867881SPablo Neira Ayuso 				const struct flow_action_entry *act,
34821482bd3dSJianbo Liu 				struct mlx5_esw_flow_attr *attr,
34831482bd3dSJianbo Liu 				u32 *action)
34841482bd3dSJianbo Liu {
3485cc495188SJianbo Liu 	u8 vlan_idx = attr->total_vlan;
3486cc495188SJianbo Liu 
3487cc495188SJianbo Liu 	if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
34881482bd3dSJianbo Liu 		return -EOPNOTSUPP;
3489cc495188SJianbo Liu 
349073867881SPablo Neira Ayuso 	switch (act->id) {
349173867881SPablo Neira Ayuso 	case FLOW_ACTION_VLAN_POP:
3492cc495188SJianbo Liu 		if (vlan_idx) {
3493cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3494cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
3495cc495188SJianbo Liu 				return -EOPNOTSUPP;
3496cc495188SJianbo Liu 
3497cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3498cc495188SJianbo Liu 		} else {
3499cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3500cc495188SJianbo Liu 		}
350173867881SPablo Neira Ayuso 		break;
350273867881SPablo Neira Ayuso 	case FLOW_ACTION_VLAN_PUSH:
350373867881SPablo Neira Ayuso 		attr->vlan_vid[vlan_idx] = act->vlan.vid;
350473867881SPablo Neira Ayuso 		attr->vlan_prio[vlan_idx] = act->vlan.prio;
350573867881SPablo Neira Ayuso 		attr->vlan_proto[vlan_idx] = act->vlan.proto;
3506cc495188SJianbo Liu 		if (!attr->vlan_proto[vlan_idx])
3507cc495188SJianbo Liu 			attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3508cc495188SJianbo Liu 
3509cc495188SJianbo Liu 		if (vlan_idx) {
3510cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3511cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
3512cc495188SJianbo Liu 				return -EOPNOTSUPP;
3513cc495188SJianbo Liu 
3514cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3515cc495188SJianbo Liu 		} else {
3516cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
351773867881SPablo Neira Ayuso 			    (act->vlan.proto != htons(ETH_P_8021Q) ||
351873867881SPablo Neira Ayuso 			     act->vlan.prio))
3519cc495188SJianbo Liu 				return -EOPNOTSUPP;
3520cc495188SJianbo Liu 
3521cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
35221482bd3dSJianbo Liu 		}
352373867881SPablo Neira Ayuso 		break;
352473867881SPablo Neira Ayuso 	default:
3525bdc837eeSEli Britstein 		return -EINVAL;
35261482bd3dSJianbo Liu 	}
35271482bd3dSJianbo Liu 
3528cc495188SJianbo Liu 	attr->total_vlan = vlan_idx + 1;
3529cc495188SJianbo Liu 
35301482bd3dSJianbo Liu 	return 0;
35311482bd3dSJianbo Liu }
35321482bd3dSJianbo Liu 
3533278748a9SEli Britstein static int add_vlan_push_action(struct mlx5e_priv *priv,
3534278748a9SEli Britstein 				struct mlx5_esw_flow_attr *attr,
3535278748a9SEli Britstein 				struct net_device **out_dev,
3536278748a9SEli Britstein 				u32 *action)
3537278748a9SEli Britstein {
3538278748a9SEli Britstein 	struct net_device *vlan_dev = *out_dev;
3539278748a9SEli Britstein 	struct flow_action_entry vlan_act = {
3540278748a9SEli Britstein 		.id = FLOW_ACTION_VLAN_PUSH,
3541278748a9SEli Britstein 		.vlan.vid = vlan_dev_vlan_id(vlan_dev),
3542278748a9SEli Britstein 		.vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3543278748a9SEli Britstein 		.vlan.prio = 0,
3544278748a9SEli Britstein 	};
3545278748a9SEli Britstein 	int err;
3546278748a9SEli Britstein 
3547278748a9SEli Britstein 	err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3548278748a9SEli Britstein 	if (err)
3549278748a9SEli Britstein 		return err;
3550278748a9SEli Britstein 
3551278748a9SEli Britstein 	*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3552278748a9SEli Britstein 					dev_get_iflink(vlan_dev));
3553278748a9SEli Britstein 	if (is_vlan_dev(*out_dev))
3554278748a9SEli Britstein 		err = add_vlan_push_action(priv, attr, out_dev, action);
3555278748a9SEli Britstein 
3556278748a9SEli Britstein 	return err;
3557278748a9SEli Britstein }
3558278748a9SEli Britstein 
355935a605dbSEli Britstein static int add_vlan_pop_action(struct mlx5e_priv *priv,
356035a605dbSEli Britstein 			       struct mlx5_esw_flow_attr *attr,
356135a605dbSEli Britstein 			       u32 *action)
356235a605dbSEli Britstein {
356335a605dbSEli Britstein 	struct flow_action_entry vlan_act = {
356435a605dbSEli Britstein 		.id = FLOW_ACTION_VLAN_POP,
356535a605dbSEli Britstein 	};
356670f478caSDmytro Linkin 	int nest_level, err = 0;
356735a605dbSEli Britstein 
356870f478caSDmytro Linkin 	nest_level = attr->parse_attr->filter_dev->lower_level -
356970f478caSDmytro Linkin 						priv->netdev->lower_level;
357035a605dbSEli Britstein 	while (nest_level--) {
357135a605dbSEli Britstein 		err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
357235a605dbSEli Britstein 		if (err)
357335a605dbSEli Britstein 			return err;
357435a605dbSEli Britstein 	}
357535a605dbSEli Britstein 
357635a605dbSEli Britstein 	return err;
357735a605dbSEli Britstein }
357835a605dbSEli Britstein 
3579f6dc1264SPaul Blakey bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3580f6dc1264SPaul Blakey 				    struct net_device *out_dev)
3581f6dc1264SPaul Blakey {
3582f6dc1264SPaul Blakey 	if (is_merged_eswitch_dev(priv, out_dev))
3583f6dc1264SPaul Blakey 		return true;
3584f6dc1264SPaul Blakey 
3585f6dc1264SPaul Blakey 	return mlx5e_eswitch_rep(out_dev) &&
3586f6dc1264SPaul Blakey 	       same_hw_devs(priv, netdev_priv(out_dev));
3587f6dc1264SPaul Blakey }
3588f6dc1264SPaul Blakey 
3589554fe75cSDmytro Linkin static bool is_duplicated_output_device(struct net_device *dev,
3590554fe75cSDmytro Linkin 					struct net_device *out_dev,
3591554fe75cSDmytro Linkin 					int *ifindexes, int if_count,
3592554fe75cSDmytro Linkin 					struct netlink_ext_ack *extack)
3593554fe75cSDmytro Linkin {
3594554fe75cSDmytro Linkin 	int i;
3595554fe75cSDmytro Linkin 
3596554fe75cSDmytro Linkin 	for (i = 0; i < if_count; i++) {
3597554fe75cSDmytro Linkin 		if (ifindexes[i] == out_dev->ifindex) {
3598554fe75cSDmytro Linkin 			NL_SET_ERR_MSG_MOD(extack,
3599554fe75cSDmytro Linkin 					   "can't duplicate output to same device");
3600554fe75cSDmytro Linkin 			netdev_err(dev, "can't duplicate output to same device: %s\n",
3601554fe75cSDmytro Linkin 				   out_dev->name);
3602554fe75cSDmytro Linkin 			return true;
3603554fe75cSDmytro Linkin 		}
3604554fe75cSDmytro Linkin 	}
3605554fe75cSDmytro Linkin 
3606554fe75cSDmytro Linkin 	return false;
3607554fe75cSDmytro Linkin }
3608554fe75cSDmytro Linkin 
36092fbbc30dSEli Cohen static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
36102fbbc30dSEli Cohen 				    struct mlx5e_tc_flow *flow,
36112fbbc30dSEli Cohen 				    const struct flow_action_entry *act,
36122fbbc30dSEli Cohen 				    u32 actions,
36132fbbc30dSEli Cohen 				    struct netlink_ext_ack *extack)
36142fbbc30dSEli Cohen {
36152fbbc30dSEli Cohen 	u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
36162fbbc30dSEli Cohen 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
36172fbbc30dSEli Cohen 	bool ft_flow = mlx5e_is_ft_flow(flow);
36182fbbc30dSEli Cohen 	u32 dest_chain = act->chain_index;
36192fbbc30dSEli Cohen 
36202fbbc30dSEli Cohen 	if (ft_flow) {
36212fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
36222fbbc30dSEli Cohen 		return -EOPNOTSUPP;
36232fbbc30dSEli Cohen 	}
36242fbbc30dSEli Cohen 
36252fbbc30dSEli Cohen 	if (!mlx5_esw_chains_backwards_supported(esw) &&
36262fbbc30dSEli Cohen 	    dest_chain <= attr->chain) {
36272fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
36282fbbc30dSEli Cohen 				   "Goto lower numbered chain isn't supported");
36292fbbc30dSEli Cohen 		return -EOPNOTSUPP;
36302fbbc30dSEli Cohen 	}
36312fbbc30dSEli Cohen 	if (dest_chain > max_chain) {
36322fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
36332fbbc30dSEli Cohen 				   "Requested destination chain is out of supported range");
36342fbbc30dSEli Cohen 		return -EOPNOTSUPP;
36352fbbc30dSEli Cohen 	}
36362fbbc30dSEli Cohen 
36372fbbc30dSEli Cohen 	if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
36382fbbc30dSEli Cohen 		       MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
36392fbbc30dSEli Cohen 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
36402fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
36412fbbc30dSEli Cohen 				   "Goto chain is not allowed if action has reformat or decap");
36422fbbc30dSEli Cohen 		return -EOPNOTSUPP;
36432fbbc30dSEli Cohen 	}
36442fbbc30dSEli Cohen 
36452fbbc30dSEli Cohen 	return 0;
36462fbbc30dSEli Cohen }
36472fbbc30dSEli Cohen 
3648613f53feSEli Cohen static int verify_uplink_forwarding(struct mlx5e_priv *priv,
3649613f53feSEli Cohen 				    struct mlx5e_tc_flow *flow,
3650613f53feSEli Cohen 				    struct net_device *out_dev,
3651613f53feSEli Cohen 				    struct netlink_ext_ack *extack)
3652613f53feSEli Cohen {
3653613f53feSEli Cohen 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3654613f53feSEli Cohen 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3655613f53feSEli Cohen 	struct mlx5e_rep_priv *rep_priv;
3656613f53feSEli Cohen 
3657613f53feSEli Cohen 	/* Forwarding non encapsulated traffic between
3658613f53feSEli Cohen 	 * uplink ports is allowed only if
3659613f53feSEli Cohen 	 * termination_table_raw_traffic cap is set.
3660613f53feSEli Cohen 	 *
3661613f53feSEli Cohen 	 * Input vport was stored esw_attr->in_rep.
3662613f53feSEli Cohen 	 * In LAG case, *priv* is the private data of
3663613f53feSEli Cohen 	 * uplink which may be not the input vport.
3664613f53feSEli Cohen 	 */
3665613f53feSEli Cohen 	rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
3666613f53feSEli Cohen 
3667613f53feSEli Cohen 	if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
3668613f53feSEli Cohen 	      mlx5e_eswitch_uplink_rep(out_dev)))
3669613f53feSEli Cohen 		return 0;
3670613f53feSEli Cohen 
3671613f53feSEli Cohen 	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
3672613f53feSEli Cohen 					termination_table_raw_traffic)) {
3673613f53feSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
3674613f53feSEli Cohen 				   "devices are both uplink, can't offload forwarding");
3675613f53feSEli Cohen 			pr_err("devices %s %s are both uplink, can't offload forwarding\n",
3676613f53feSEli Cohen 			       priv->netdev->name, out_dev->name);
3677613f53feSEli Cohen 			return -EOPNOTSUPP;
3678613f53feSEli Cohen 	} else if (out_dev != rep_priv->netdev) {
3679613f53feSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
3680613f53feSEli Cohen 				   "devices are not the same uplink, can't offload forwarding");
3681613f53feSEli Cohen 		pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
3682613f53feSEli Cohen 		       priv->netdev->name, out_dev->name);
3683613f53feSEli Cohen 		return -EOPNOTSUPP;
3684613f53feSEli Cohen 	}
3685613f53feSEli Cohen 	return 0;
3686613f53feSEli Cohen }
3687613f53feSEli Cohen 
368873867881SPablo Neira Ayuso static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
368973867881SPablo Neira Ayuso 				struct flow_action *flow_action,
3690e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
3691e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
3692a54e20b4SHadar Hen Zion {
369373867881SPablo Neira Ayuso 	struct pedit_headers_action hdrs[2] = {};
3694bf07aa73SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3695ecf5bb79SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
36966f9af8ffSTonghao Zhang 	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
36971d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
369873867881SPablo Neira Ayuso 	const struct ip_tunnel_info *info = NULL;
3699554fe75cSDmytro Linkin 	int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
370084179981SPaul Blakey 	bool ft_flow = mlx5e_is_ft_flow(flow);
370173867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
37020a7fcb78SPaul Blakey 	bool encap = false, decap = false;
37030a7fcb78SPaul Blakey 	u32 action = attr->action;
3704554fe75cSDmytro Linkin 	int err, i, if_count = 0;
3705f828ca6aSEli Cohen 	bool mpls_push = false;
370603a9d11eSOr Gerlitz 
370773867881SPablo Neira Ayuso 	if (!flow_action_has_entries(flow_action))
370803a9d11eSOr Gerlitz 		return -EINVAL;
370903a9d11eSOr Gerlitz 
371053eca1f3SJakub Kicinski 	if (!flow_action_hw_stats_check(flow_action, extack,
371153eca1f3SJakub Kicinski 					FLOW_ACTION_HW_STATS_DELAYED_BIT))
3712319a1d19SJiri Pirko 		return -EOPNOTSUPP;
3713319a1d19SJiri Pirko 
371473867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
371573867881SPablo Neira Ayuso 		switch (act->id) {
371673867881SPablo Neira Ayuso 		case FLOW_ACTION_DROP:
37171cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
371803a9d11eSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
371973867881SPablo Neira Ayuso 			break;
3720f828ca6aSEli Cohen 		case FLOW_ACTION_MPLS_PUSH:
3721f828ca6aSEli Cohen 			if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
3722f828ca6aSEli Cohen 							reformat_l2_to_l3_tunnel) ||
3723f828ca6aSEli Cohen 			    act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
3724f828ca6aSEli Cohen 				NL_SET_ERR_MSG_MOD(extack,
3725f828ca6aSEli Cohen 						   "mpls push is supported only for mpls_uc protocol");
3726f828ca6aSEli Cohen 				return -EOPNOTSUPP;
3727f828ca6aSEli Cohen 			}
3728f828ca6aSEli Cohen 			mpls_push = true;
3729f828ca6aSEli Cohen 			break;
373073867881SPablo Neira Ayuso 		case FLOW_ACTION_MANGLE:
373173867881SPablo Neira Ayuso 		case FLOW_ACTION_ADD:
373273867881SPablo Neira Ayuso 			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3733dec481c8SEli Cohen 						    hdrs, extack);
3734d7e75a32SOr Gerlitz 			if (err)
3735d7e75a32SOr Gerlitz 				return err;
3736d7e75a32SOr Gerlitz 
37371cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3738e85e02baSEli Britstein 			attr->split_count = attr->out_count;
373973867881SPablo Neira Ayuso 			break;
374073867881SPablo Neira Ayuso 		case FLOW_ACTION_CSUM:
37411cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
374273867881SPablo Neira Ayuso 						   act->csum_flags, extack))
374373867881SPablo Neira Ayuso 				break;
374426c02749SOr Gerlitz 
374526c02749SOr Gerlitz 			return -EOPNOTSUPP;
374673867881SPablo Neira Ayuso 		case FLOW_ACTION_REDIRECT:
374773867881SPablo Neira Ayuso 		case FLOW_ACTION_MIRRED: {
374803a9d11eSOr Gerlitz 			struct mlx5e_priv *out_priv;
3749592d3651SChris Mi 			struct net_device *out_dev;
375003a9d11eSOr Gerlitz 
375173867881SPablo Neira Ayuso 			out_dev = act->dev;
3752ef381359SOz Shlomo 			if (!out_dev) {
3753ef381359SOz Shlomo 				/* out_dev is NULL when filters with
3754ef381359SOz Shlomo 				 * non-existing mirred device are replayed to
3755ef381359SOz Shlomo 				 * the driver.
3756ef381359SOz Shlomo 				 */
3757ef381359SOz Shlomo 				return -EINVAL;
3758ef381359SOz Shlomo 			}
375903a9d11eSOr Gerlitz 
3760f828ca6aSEli Cohen 			if (mpls_push && !netif_is_bareudp(out_dev)) {
3761f828ca6aSEli Cohen 				NL_SET_ERR_MSG_MOD(extack,
3762f828ca6aSEli Cohen 						   "mpls is supported only through a bareudp device");
3763f828ca6aSEli Cohen 				return -EOPNOTSUPP;
3764f828ca6aSEli Cohen 			}
3765f828ca6aSEli Cohen 
376684179981SPaul Blakey 			if (ft_flow && out_dev == priv->netdev) {
376784179981SPaul Blakey 				/* Ignore forward to self rules generated
376884179981SPaul Blakey 				 * by adding both mlx5 devs to the flow table
376984179981SPaul Blakey 				 * block on a normal nft offload setup.
377084179981SPaul Blakey 				 */
377184179981SPaul Blakey 				return -EOPNOTSUPP;
377284179981SPaul Blakey 			}
377384179981SPaul Blakey 
3774592d3651SChris Mi 			if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
3775e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3776e98bedf5SEli Britstein 						   "can't support more output ports, can't offload forwarding");
37774ccd83f4SRoi Dayan 				netdev_warn(priv->netdev,
37784ccd83f4SRoi Dayan 					    "can't support more than %d output ports, can't offload forwarding\n",
3779592d3651SChris Mi 					    attr->out_count);
3780592d3651SChris Mi 				return -EOPNOTSUPP;
3781592d3651SChris Mi 			}
3782592d3651SChris Mi 
3783f493f155SEli Britstein 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3784f493f155SEli Britstein 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
3785b6a4ac24SVlad Buslov 			if (encap) {
3786b6a4ac24SVlad Buslov 				parse_attr->mirred_ifindex[attr->out_count] =
3787b6a4ac24SVlad Buslov 					out_dev->ifindex;
3788b6a4ac24SVlad Buslov 				parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
3789b6a4ac24SVlad Buslov 				if (!parse_attr->tun_info[attr->out_count])
3790b6a4ac24SVlad Buslov 					return -ENOMEM;
3791b6a4ac24SVlad Buslov 				encap = false;
3792b6a4ac24SVlad Buslov 				attr->dests[attr->out_count].flags |=
3793b6a4ac24SVlad Buslov 					MLX5_ESW_DEST_ENCAP;
3794b6a4ac24SVlad Buslov 				attr->out_count++;
3795b6a4ac24SVlad Buslov 				/* attr->dests[].rep is resolved when we
3796b6a4ac24SVlad Buslov 				 * handle encap
3797b6a4ac24SVlad Buslov 				 */
3798b6a4ac24SVlad Buslov 			} else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
37997ba58ba7SRabie Loulou 				struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
38007ba58ba7SRabie Loulou 				struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
3801fa833bd5SVlad Buslov 				struct net_device *uplink_upper;
38027ba58ba7SRabie Loulou 
3803554fe75cSDmytro Linkin 				if (is_duplicated_output_device(priv->netdev,
3804554fe75cSDmytro Linkin 								out_dev,
3805554fe75cSDmytro Linkin 								ifindexes,
3806554fe75cSDmytro Linkin 								if_count,
3807554fe75cSDmytro Linkin 								extack))
3808554fe75cSDmytro Linkin 					return -EOPNOTSUPP;
3809554fe75cSDmytro Linkin 
3810554fe75cSDmytro Linkin 				ifindexes[if_count] = out_dev->ifindex;
3811554fe75cSDmytro Linkin 				if_count++;
3812554fe75cSDmytro Linkin 
3813fa833bd5SVlad Buslov 				rcu_read_lock();
3814fa833bd5SVlad Buslov 				uplink_upper =
3815fa833bd5SVlad Buslov 					netdev_master_upper_dev_get_rcu(uplink_dev);
38167ba58ba7SRabie Loulou 				if (uplink_upper &&
38177ba58ba7SRabie Loulou 				    netif_is_lag_master(uplink_upper) &&
38187ba58ba7SRabie Loulou 				    uplink_upper == out_dev)
38197ba58ba7SRabie Loulou 					out_dev = uplink_dev;
3820fa833bd5SVlad Buslov 				rcu_read_unlock();
38217ba58ba7SRabie Loulou 
3822278748a9SEli Britstein 				if (is_vlan_dev(out_dev)) {
3823278748a9SEli Britstein 					err = add_vlan_push_action(priv, attr,
3824278748a9SEli Britstein 								   &out_dev,
3825278748a9SEli Britstein 								   &action);
3826278748a9SEli Britstein 					if (err)
3827278748a9SEli Britstein 						return err;
3828278748a9SEli Britstein 				}
3829f6dc1264SPaul Blakey 
383035a605dbSEli Britstein 				if (is_vlan_dev(parse_attr->filter_dev)) {
383135a605dbSEli Britstein 					err = add_vlan_pop_action(priv, attr,
383235a605dbSEli Britstein 								  &action);
383335a605dbSEli Britstein 					if (err)
383435a605dbSEli Britstein 						return err;
383535a605dbSEli Britstein 				}
3836278748a9SEli Britstein 
3837613f53feSEli Cohen 				err = verify_uplink_forwarding(priv, flow, out_dev, extack);
3838613f53feSEli Cohen 				if (err)
3839613f53feSEli Cohen 					return err;
3840ffec9702STonghao Zhang 
3841f6dc1264SPaul Blakey 				if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
3842f6dc1264SPaul Blakey 					NL_SET_ERR_MSG_MOD(extack,
3843f6dc1264SPaul Blakey 							   "devices are not on same switch HW, can't offload forwarding");
38444ccd83f4SRoi Dayan 					netdev_warn(priv->netdev,
38454ccd83f4SRoi Dayan 						    "devices %s %s not on same switch HW, can't offload forwarding\n",
38464ccd83f4SRoi Dayan 						    priv->netdev->name,
38474ccd83f4SRoi Dayan 						    out_dev->name);
3848a0646c88SEli Britstein 					return -EOPNOTSUPP;
3849f6dc1264SPaul Blakey 				}
3850a0646c88SEli Britstein 
385103a9d11eSOr Gerlitz 				out_priv = netdev_priv(out_dev);
38521d447a39SSaeed Mahameed 				rpriv = out_priv->ppriv;
3853df65a573SEli Britstein 				attr->dests[attr->out_count].rep = rpriv->rep;
3854df65a573SEli Britstein 				attr->dests[attr->out_count].mdev = out_priv->mdev;
3855df65a573SEli Britstein 				attr->out_count++;
3856ef381359SOz Shlomo 			} else if (parse_attr->filter_dev != priv->netdev) {
3857ef381359SOz Shlomo 				/* All mlx5 devices are called to configure
3858ef381359SOz Shlomo 				 * high level device filters. Therefore, the
3859ef381359SOz Shlomo 				 * *attempt* to  install a filter on invalid
3860ef381359SOz Shlomo 				 * eswitch should not trigger an explicit error
3861ef381359SOz Shlomo 				 */
3862ef381359SOz Shlomo 				return -EINVAL;
3863a54e20b4SHadar Hen Zion 			} else {
3864e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3865e98bedf5SEli Britstein 						   "devices are not on same switch HW, can't offload forwarding");
38664ccd83f4SRoi Dayan 				netdev_warn(priv->netdev,
38674ccd83f4SRoi Dayan 					    "devices %s %s not on same switch HW, can't offload forwarding\n",
38684ccd83f4SRoi Dayan 					    priv->netdev->name,
38694ccd83f4SRoi Dayan 					    out_dev->name);
3870a54e20b4SHadar Hen Zion 				return -EINVAL;
3871a54e20b4SHadar Hen Zion 			}
3872a54e20b4SHadar Hen Zion 			}
387373867881SPablo Neira Ayuso 			break;
387473867881SPablo Neira Ayuso 		case FLOW_ACTION_TUNNEL_ENCAP:
387573867881SPablo Neira Ayuso 			info = act->tunnel;
3876a54e20b4SHadar Hen Zion 			if (info)
3877a54e20b4SHadar Hen Zion 				encap = true;
3878a54e20b4SHadar Hen Zion 			else
3879a54e20b4SHadar Hen Zion 				return -EOPNOTSUPP;
388003a9d11eSOr Gerlitz 
388173867881SPablo Neira Ayuso 			break;
388273867881SPablo Neira Ayuso 		case FLOW_ACTION_VLAN_PUSH:
388373867881SPablo Neira Ayuso 		case FLOW_ACTION_VLAN_POP:
388476b496b1SEli Britstein 			if (act->id == FLOW_ACTION_VLAN_PUSH &&
388576b496b1SEli Britstein 			    (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
388676b496b1SEli Britstein 				/* Replace vlan pop+push with vlan modify */
388776b496b1SEli Britstein 				action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
388876b496b1SEli Britstein 				err = add_vlan_rewrite_action(priv,
388976b496b1SEli Britstein 							      MLX5_FLOW_NAMESPACE_FDB,
389076b496b1SEli Britstein 							      act, parse_attr, hdrs,
389176b496b1SEli Britstein 							      &action, extack);
389276b496b1SEli Britstein 			} else {
389373867881SPablo Neira Ayuso 				err = parse_tc_vlan_action(priv, act, attr, &action);
389476b496b1SEli Britstein 			}
38951482bd3dSJianbo Liu 			if (err)
38961482bd3dSJianbo Liu 				return err;
38971482bd3dSJianbo Liu 
3898e85e02baSEli Britstein 			attr->split_count = attr->out_count;
389973867881SPablo Neira Ayuso 			break;
3900bdc837eeSEli Britstein 		case FLOW_ACTION_VLAN_MANGLE:
3901bdc837eeSEli Britstein 			err = add_vlan_rewrite_action(priv,
3902bdc837eeSEli Britstein 						      MLX5_FLOW_NAMESPACE_FDB,
3903bdc837eeSEli Britstein 						      act, parse_attr, hdrs,
3904bdc837eeSEli Britstein 						      &action, extack);
3905bdc837eeSEli Britstein 			if (err)
3906bdc837eeSEli Britstein 				return err;
3907bdc837eeSEli Britstein 
3908bdc837eeSEli Britstein 			attr->split_count = attr->out_count;
3909bdc837eeSEli Britstein 			break;
391073867881SPablo Neira Ayuso 		case FLOW_ACTION_TUNNEL_DECAP:
39110a7fcb78SPaul Blakey 			decap = true;
391273867881SPablo Neira Ayuso 			break;
39132fbbc30dSEli Cohen 		case FLOW_ACTION_GOTO:
39142fbbc30dSEli Cohen 			err = mlx5_validate_goto_chain(esw, flow, act, action,
39152fbbc30dSEli Cohen 						       extack);
39162fbbc30dSEli Cohen 			if (err)
39172fbbc30dSEli Cohen 				return err;
3918bf07aa73SPaul Blakey 
3919e88afe75SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
39202fbbc30dSEli Cohen 			attr->dest_chain = act->chain_index;
392173867881SPablo Neira Ayuso 			break;
39224c3844d9SPaul Blakey 		case FLOW_ACTION_CT:
39234c3844d9SPaul Blakey 			err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
39244c3844d9SPaul Blakey 			if (err)
39254c3844d9SPaul Blakey 				return err;
39264c3844d9SPaul Blakey 
39274c3844d9SPaul Blakey 			flow_flag_set(flow, CT);
39284c3844d9SPaul Blakey 			break;
392973867881SPablo Neira Ayuso 		default:
39302cc1cb1dSTonghao Zhang 			NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
39312cc1cb1dSTonghao Zhang 			return -EOPNOTSUPP;
393203a9d11eSOr Gerlitz 		}
393373867881SPablo Neira Ayuso 	}
3934bdd66ac0SOr Gerlitz 
39350bac1194SEli Britstein 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
39360bac1194SEli Britstein 	    action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
39370bac1194SEli Britstein 		/* For prio tag mode, replace vlan pop with rewrite vlan prio
39380bac1194SEli Britstein 		 * tag rewrite.
39390bac1194SEli Britstein 		 */
39400bac1194SEli Britstein 		action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
39410bac1194SEli Britstein 		err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
39420bac1194SEli Britstein 						       &action, extack);
39430bac1194SEli Britstein 		if (err)
39440bac1194SEli Britstein 			return err;
39450bac1194SEli Britstein 	}
39460bac1194SEli Britstein 
3947c500c86bSPablo Neira Ayuso 	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3948c500c86bSPablo Neira Ayuso 	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
394984be899fSTonghao Zhang 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
395027c11b6bSEli Britstein 					    parse_attr, hdrs, &action, extack);
3951c500c86bSPablo Neira Ayuso 		if (err)
3952c500c86bSPablo Neira Ayuso 			return err;
395327c11b6bSEli Britstein 		/* in case all pedit actions are skipped, remove the MOD_HDR
395427c11b6bSEli Britstein 		 * flag. we might have set split_count either by pedit or
395527c11b6bSEli Britstein 		 * pop/push. if there is no pop/push either, reset it too.
395627c11b6bSEli Britstein 		 */
39576ae4a6a5SPaul Blakey 		if (parse_attr->mod_hdr_acts.num_actions == 0) {
395827c11b6bSEli Britstein 			action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
39596ae4a6a5SPaul Blakey 			dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
396027c11b6bSEli Britstein 			if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
396127c11b6bSEli Britstein 			      (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
396227c11b6bSEli Britstein 				attr->split_count = 0;
396327c11b6bSEli Britstein 		}
3964c500c86bSPablo Neira Ayuso 	}
3965c500c86bSPablo Neira Ayuso 
39661cab1cd7SOr Gerlitz 	attr->action = action;
396773867881SPablo Neira Ayuso 	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3968bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
3969bdd66ac0SOr Gerlitz 
3970e88afe75SOr Gerlitz 	if (attr->dest_chain) {
39710a7fcb78SPaul Blakey 		if (decap) {
39720a7fcb78SPaul Blakey 			/* It can be supported if we'll create a mapping for
39730a7fcb78SPaul Blakey 			 * the tunnel device only (without tunnel), and set
39740a7fcb78SPaul Blakey 			 * this tunnel id with this decap flow.
39750a7fcb78SPaul Blakey 			 *
39760a7fcb78SPaul Blakey 			 * On restore (miss), we'll just set this saved tunnel
39770a7fcb78SPaul Blakey 			 * device.
39780a7fcb78SPaul Blakey 			 */
39790a7fcb78SPaul Blakey 
39800a7fcb78SPaul Blakey 			NL_SET_ERR_MSG(extack,
39810a7fcb78SPaul Blakey 				       "Decap with goto isn't supported");
39820a7fcb78SPaul Blakey 			netdev_warn(priv->netdev,
39830a7fcb78SPaul Blakey 				    "Decap with goto isn't supported");
39840a7fcb78SPaul Blakey 			return -EOPNOTSUPP;
39850a7fcb78SPaul Blakey 		}
39860a7fcb78SPaul Blakey 
3987e88afe75SOr Gerlitz 		if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
398861644c3dSRoi Dayan 			NL_SET_ERR_MSG_MOD(extack,
398961644c3dSRoi Dayan 					   "Mirroring goto chain rules isn't supported");
3990e88afe75SOr Gerlitz 			return -EOPNOTSUPP;
3991e88afe75SOr Gerlitz 		}
3992e88afe75SOr Gerlitz 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3993e88afe75SOr Gerlitz 	}
3994e88afe75SOr Gerlitz 
3995ae2741e2SVlad Buslov 	if (!(attr->action &
3996ae2741e2SVlad Buslov 	      (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
399761644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
399861644c3dSRoi Dayan 				   "Rule must have at least one forward/drop action");
3999ae2741e2SVlad Buslov 		return -EOPNOTSUPP;
4000ae2741e2SVlad Buslov 	}
4001ae2741e2SVlad Buslov 
4002e85e02baSEli Britstein 	if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
4003e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
4004e98bedf5SEli Britstein 				   "current firmware doesn't support split rule for port mirroring");
4005592d3651SChris Mi 		netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
4006592d3651SChris Mi 		return -EOPNOTSUPP;
4007592d3651SChris Mi 	}
4008592d3651SChris Mi 
400931c8eba5SOr Gerlitz 	return 0;
401003a9d11eSOr Gerlitz }
401103a9d11eSOr Gerlitz 
4012226f2ca3SVlad Buslov static void get_flags(int flags, unsigned long *flow_flags)
401360bd4af8SOr Gerlitz {
4014226f2ca3SVlad Buslov 	unsigned long __flow_flags = 0;
401560bd4af8SOr Gerlitz 
4016226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(INGRESS))
4017226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4018226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(EGRESS))
4019226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
402060bd4af8SOr Gerlitz 
4021226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4022226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4023226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4024226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
402584179981SPaul Blakey 	if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
402684179981SPaul Blakey 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4027d9ee0491SOr Gerlitz 
402860bd4af8SOr Gerlitz 	*flow_flags = __flow_flags;
402960bd4af8SOr Gerlitz }
403060bd4af8SOr Gerlitz 
403105866c82SOr Gerlitz static const struct rhashtable_params tc_ht_params = {
403205866c82SOr Gerlitz 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
403305866c82SOr Gerlitz 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
403405866c82SOr Gerlitz 	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
403505866c82SOr Gerlitz 	.automatic_shrinking = true,
403605866c82SOr Gerlitz };
403705866c82SOr Gerlitz 
4038226f2ca3SVlad Buslov static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4039226f2ca3SVlad Buslov 				    unsigned long flags)
404005866c82SOr Gerlitz {
4041655dc3d2SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4042655dc3d2SOr Gerlitz 	struct mlx5e_rep_priv *uplink_rpriv;
4043655dc3d2SOr Gerlitz 
4044226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4045655dc3d2SOr Gerlitz 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4046ec1366c2SOz Shlomo 		return &uplink_rpriv->uplink_priv.tc_ht;
4047d9ee0491SOr Gerlitz 	} else /* NIC offload */
404805866c82SOr Gerlitz 		return &priv->fs.tc.ht;
404905866c82SOr Gerlitz }
405005866c82SOr Gerlitz 
405104de7ddaSRoi Dayan static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
405204de7ddaSRoi Dayan {
40531418ddd9SAviv Heller 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
4054b05af6aaSBodong Wang 	bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4055226f2ca3SVlad Buslov 		flow_flag_test(flow, INGRESS);
40561418ddd9SAviv Heller 	bool act_is_encap = !!(attr->action &
40571418ddd9SAviv Heller 			       MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
40581418ddd9SAviv Heller 	bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
40591418ddd9SAviv Heller 						MLX5_DEVCOM_ESW_OFFLOADS);
40601418ddd9SAviv Heller 
406110fbb1cdSRoi Dayan 	if (!esw_paired)
406210fbb1cdSRoi Dayan 		return false;
406310fbb1cdSRoi Dayan 
406410fbb1cdSRoi Dayan 	if ((mlx5_lag_is_sriov(attr->in_mdev) ||
406510fbb1cdSRoi Dayan 	     mlx5_lag_is_multipath(attr->in_mdev)) &&
406610fbb1cdSRoi Dayan 	    (is_rep_ingress || act_is_encap))
406710fbb1cdSRoi Dayan 		return true;
406810fbb1cdSRoi Dayan 
406910fbb1cdSRoi Dayan 	return false;
407004de7ddaSRoi Dayan }
407104de7ddaSRoi Dayan 
4072a88780a9SRoi Dayan static int
4073a88780a9SRoi Dayan mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4074226f2ca3SVlad Buslov 		 struct flow_cls_offload *f, unsigned long flow_flags,
4075a88780a9SRoi Dayan 		 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4076a88780a9SRoi Dayan 		 struct mlx5e_tc_flow **__flow)
4077e3a2b7edSAmir Vadai {
407817091853SOr Gerlitz 	struct mlx5e_tc_flow_parse_attr *parse_attr;
40793bc4b7bfSOr Gerlitz 	struct mlx5e_tc_flow *flow;
40805a7e5bcbSVlad Buslov 	int out_index, err;
4081776b12b6SOr Gerlitz 
408265ba8fb7SOr Gerlitz 	flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
40831b9a07eeSLeon Romanovsky 	parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
408417091853SOr Gerlitz 	if (!parse_attr || !flow) {
4085e3a2b7edSAmir Vadai 		err = -ENOMEM;
4086e3a2b7edSAmir Vadai 		goto err_free;
4087e3a2b7edSAmir Vadai 	}
4088e3a2b7edSAmir Vadai 
4089e3a2b7edSAmir Vadai 	flow->cookie = f->cookie;
409065ba8fb7SOr Gerlitz 	flow->flags = flow_flags;
4091655dc3d2SOr Gerlitz 	flow->priv = priv;
40925a7e5bcbSVlad Buslov 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
40935a7e5bcbSVlad Buslov 		INIT_LIST_HEAD(&flow->encaps[out_index].list);
40945a7e5bcbSVlad Buslov 	INIT_LIST_HEAD(&flow->mod_hdr);
40955a7e5bcbSVlad Buslov 	INIT_LIST_HEAD(&flow->hairpin);
40965a7e5bcbSVlad Buslov 	refcount_set(&flow->refcnt, 1);
409795435ad7SVlad Buslov 	init_completion(&flow->init_done);
4098e3a2b7edSAmir Vadai 
4099a88780a9SRoi Dayan 	*__flow = flow;
4100a88780a9SRoi Dayan 	*__parse_attr = parse_attr;
4101a88780a9SRoi Dayan 
4102a88780a9SRoi Dayan 	return 0;
4103a88780a9SRoi Dayan 
4104a88780a9SRoi Dayan err_free:
4105a88780a9SRoi Dayan 	kfree(flow);
4106a88780a9SRoi Dayan 	kvfree(parse_attr);
4107a88780a9SRoi Dayan 	return err;
4108adb4c123SOr Gerlitz }
4109adb4c123SOr Gerlitz 
4110988ab9c7STonghao Zhang static void
4111988ab9c7STonghao Zhang mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
4112988ab9c7STonghao Zhang 			 struct mlx5e_priv *priv,
4113988ab9c7STonghao Zhang 			 struct mlx5e_tc_flow_parse_attr *parse_attr,
4114f9e30088SPablo Neira Ayuso 			 struct flow_cls_offload *f,
4115988ab9c7STonghao Zhang 			 struct mlx5_eswitch_rep *in_rep,
4116988ab9c7STonghao Zhang 			 struct mlx5_core_dev *in_mdev)
4117988ab9c7STonghao Zhang {
4118988ab9c7STonghao Zhang 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4119988ab9c7STonghao Zhang 
4120988ab9c7STonghao Zhang 	esw_attr->parse_attr = parse_attr;
4121988ab9c7STonghao Zhang 	esw_attr->chain = f->common.chain_index;
4122ef01adaeSPablo Neira Ayuso 	esw_attr->prio = f->common.prio;
4123988ab9c7STonghao Zhang 
4124988ab9c7STonghao Zhang 	esw_attr->in_rep = in_rep;
4125988ab9c7STonghao Zhang 	esw_attr->in_mdev = in_mdev;
4126988ab9c7STonghao Zhang 
4127988ab9c7STonghao Zhang 	if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4128988ab9c7STonghao Zhang 	    MLX5_COUNTER_SOURCE_ESWITCH)
4129988ab9c7STonghao Zhang 		esw_attr->counter_dev = in_mdev;
4130988ab9c7STonghao Zhang 	else
4131988ab9c7STonghao Zhang 		esw_attr->counter_dev = priv->mdev;
4132988ab9c7STonghao Zhang }
4133988ab9c7STonghao Zhang 
413471129676SJason Gunthorpe static struct mlx5e_tc_flow *
413504de7ddaSRoi Dayan __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4136f9e30088SPablo Neira Ayuso 		     struct flow_cls_offload *f,
4137226f2ca3SVlad Buslov 		     unsigned long flow_flags,
4138d11afc26SOz Shlomo 		     struct net_device *filter_dev,
413904de7ddaSRoi Dayan 		     struct mlx5_eswitch_rep *in_rep,
414071129676SJason Gunthorpe 		     struct mlx5_core_dev *in_mdev)
4141a88780a9SRoi Dayan {
4142f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4143a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
4144a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4145a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
4146a88780a9SRoi Dayan 	int attr_size, err;
4147a88780a9SRoi Dayan 
4148226f2ca3SVlad Buslov 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4149a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_esw_flow_attr);
4150a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4151a88780a9SRoi Dayan 			       &parse_attr, &flow);
4152a88780a9SRoi Dayan 	if (err)
4153a88780a9SRoi Dayan 		goto out;
4154988ab9c7STonghao Zhang 
4155d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
4156988ab9c7STonghao Zhang 	mlx5e_flow_esw_attr_init(flow->esw_attr,
4157988ab9c7STonghao Zhang 				 priv, parse_attr,
4158988ab9c7STonghao Zhang 				 f, in_rep, in_mdev);
4159988ab9c7STonghao Zhang 
416054c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
416154c177caSOz Shlomo 			       f, filter_dev);
4162d11afc26SOz Shlomo 	if (err)
4163d11afc26SOz Shlomo 		goto err_free;
4164a88780a9SRoi Dayan 
41656f9af8ffSTonghao Zhang 	err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4166a88780a9SRoi Dayan 	if (err)
4167a88780a9SRoi Dayan 		goto err_free;
4168a88780a9SRoi Dayan 
41694c3844d9SPaul Blakey 	err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
41704c3844d9SPaul Blakey 	if (err)
41714c3844d9SPaul Blakey 		goto err_free;
41724c3844d9SPaul Blakey 
41737040632dSTonghao Zhang 	err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
417495435ad7SVlad Buslov 	complete_all(&flow->init_done);
4175ef06c9eeSRoi Dayan 	if (err) {
4176ef06c9eeSRoi Dayan 		if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4177aa0cbbaeSOr Gerlitz 			goto err_free;
41785c40348cSOr Gerlitz 
4179b4a23329SRoi Dayan 		add_unready_flow(flow);
4180ef06c9eeSRoi Dayan 	}
4181ef06c9eeSRoi Dayan 
418271129676SJason Gunthorpe 	return flow;
4183e3a2b7edSAmir Vadai 
4184e3a2b7edSAmir Vadai err_free:
41855a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4186a88780a9SRoi Dayan out:
418771129676SJason Gunthorpe 	return ERR_PTR(err);
4188a88780a9SRoi Dayan }
4189a88780a9SRoi Dayan 
4190f9e30088SPablo Neira Ayuso static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
419195dc1902SRoi Dayan 				      struct mlx5e_tc_flow *flow,
4192226f2ca3SVlad Buslov 				      unsigned long flow_flags)
419304de7ddaSRoi Dayan {
419404de7ddaSRoi Dayan 	struct mlx5e_priv *priv = flow->priv, *peer_priv;
419504de7ddaSRoi Dayan 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
419604de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
419704de7ddaSRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
419804de7ddaSRoi Dayan 	struct mlx5e_rep_priv *peer_urpriv;
419904de7ddaSRoi Dayan 	struct mlx5e_tc_flow *peer_flow;
420004de7ddaSRoi Dayan 	struct mlx5_core_dev *in_mdev;
420104de7ddaSRoi Dayan 	int err = 0;
420204de7ddaSRoi Dayan 
420304de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
420404de7ddaSRoi Dayan 	if (!peer_esw)
420504de7ddaSRoi Dayan 		return -ENODEV;
420604de7ddaSRoi Dayan 
420704de7ddaSRoi Dayan 	peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
420804de7ddaSRoi Dayan 	peer_priv = netdev_priv(peer_urpriv->netdev);
420904de7ddaSRoi Dayan 
421004de7ddaSRoi Dayan 	/* in_mdev is assigned of which the packet originated from.
421104de7ddaSRoi Dayan 	 * So packets redirected to uplink use the same mdev of the
421204de7ddaSRoi Dayan 	 * original flow and packets redirected from uplink use the
421304de7ddaSRoi Dayan 	 * peer mdev.
421404de7ddaSRoi Dayan 	 */
4215b05af6aaSBodong Wang 	if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
421604de7ddaSRoi Dayan 		in_mdev = peer_priv->mdev;
421704de7ddaSRoi Dayan 	else
421804de7ddaSRoi Dayan 		in_mdev = priv->mdev;
421904de7ddaSRoi Dayan 
422004de7ddaSRoi Dayan 	parse_attr = flow->esw_attr->parse_attr;
422195dc1902SRoi Dayan 	peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
422204de7ddaSRoi Dayan 					 parse_attr->filter_dev,
422371129676SJason Gunthorpe 					 flow->esw_attr->in_rep, in_mdev);
422471129676SJason Gunthorpe 	if (IS_ERR(peer_flow)) {
422571129676SJason Gunthorpe 		err = PTR_ERR(peer_flow);
422604de7ddaSRoi Dayan 		goto out;
422771129676SJason Gunthorpe 	}
422804de7ddaSRoi Dayan 
422904de7ddaSRoi Dayan 	flow->peer_flow = peer_flow;
4230226f2ca3SVlad Buslov 	flow_flag_set(flow, DUP);
423104de7ddaSRoi Dayan 	mutex_lock(&esw->offloads.peer_mutex);
423204de7ddaSRoi Dayan 	list_add_tail(&flow->peer, &esw->offloads.peer_flows);
423304de7ddaSRoi Dayan 	mutex_unlock(&esw->offloads.peer_mutex);
423404de7ddaSRoi Dayan 
423504de7ddaSRoi Dayan out:
423604de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
423704de7ddaSRoi Dayan 	return err;
423804de7ddaSRoi Dayan }
423904de7ddaSRoi Dayan 
424004de7ddaSRoi Dayan static int
424104de7ddaSRoi Dayan mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4242f9e30088SPablo Neira Ayuso 		   struct flow_cls_offload *f,
4243226f2ca3SVlad Buslov 		   unsigned long flow_flags,
424404de7ddaSRoi Dayan 		   struct net_device *filter_dev,
424504de7ddaSRoi Dayan 		   struct mlx5e_tc_flow **__flow)
424604de7ddaSRoi Dayan {
424704de7ddaSRoi Dayan 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
424804de7ddaSRoi Dayan 	struct mlx5_eswitch_rep *in_rep = rpriv->rep;
424904de7ddaSRoi Dayan 	struct mlx5_core_dev *in_mdev = priv->mdev;
425004de7ddaSRoi Dayan 	struct mlx5e_tc_flow *flow;
425104de7ddaSRoi Dayan 	int err;
425204de7ddaSRoi Dayan 
425371129676SJason Gunthorpe 	flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
425471129676SJason Gunthorpe 				    in_mdev);
425571129676SJason Gunthorpe 	if (IS_ERR(flow))
425671129676SJason Gunthorpe 		return PTR_ERR(flow);
425704de7ddaSRoi Dayan 
425804de7ddaSRoi Dayan 	if (is_peer_flow_needed(flow)) {
425995dc1902SRoi Dayan 		err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
426004de7ddaSRoi Dayan 		if (err) {
426104de7ddaSRoi Dayan 			mlx5e_tc_del_fdb_flow(priv, flow);
426204de7ddaSRoi Dayan 			goto out;
426304de7ddaSRoi Dayan 		}
426404de7ddaSRoi Dayan 	}
426504de7ddaSRoi Dayan 
426604de7ddaSRoi Dayan 	*__flow = flow;
426704de7ddaSRoi Dayan 
426804de7ddaSRoi Dayan 	return 0;
426904de7ddaSRoi Dayan 
427004de7ddaSRoi Dayan out:
427104de7ddaSRoi Dayan 	return err;
427204de7ddaSRoi Dayan }
427304de7ddaSRoi Dayan 
4274a88780a9SRoi Dayan static int
4275a88780a9SRoi Dayan mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4276f9e30088SPablo Neira Ayuso 		   struct flow_cls_offload *f,
4277226f2ca3SVlad Buslov 		   unsigned long flow_flags,
4278d11afc26SOz Shlomo 		   struct net_device *filter_dev,
4279a88780a9SRoi Dayan 		   struct mlx5e_tc_flow **__flow)
4280a88780a9SRoi Dayan {
4281f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4282a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
4283a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4284a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
4285a88780a9SRoi Dayan 	int attr_size, err;
4286a88780a9SRoi Dayan 
4287bf07aa73SPaul Blakey 	/* multi-chain not supported for NIC rules */
4288bf07aa73SPaul Blakey 	if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4289bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
4290bf07aa73SPaul Blakey 
4291226f2ca3SVlad Buslov 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4292a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_nic_flow_attr);
4293a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4294a88780a9SRoi Dayan 			       &parse_attr, &flow);
4295a88780a9SRoi Dayan 	if (err)
4296a88780a9SRoi Dayan 		goto out;
4297a88780a9SRoi Dayan 
4298d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
429954c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
430054c177caSOz Shlomo 			       f, filter_dev);
4301d11afc26SOz Shlomo 	if (err)
4302d11afc26SOz Shlomo 		goto err_free;
4303d11afc26SOz Shlomo 
430473867881SPablo Neira Ayuso 	err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4305a88780a9SRoi Dayan 	if (err)
4306a88780a9SRoi Dayan 		goto err_free;
4307a88780a9SRoi Dayan 
4308a88780a9SRoi Dayan 	err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4309a88780a9SRoi Dayan 	if (err)
4310a88780a9SRoi Dayan 		goto err_free;
4311a88780a9SRoi Dayan 
4312226f2ca3SVlad Buslov 	flow_flag_set(flow, OFFLOADED);
4313a88780a9SRoi Dayan 	kvfree(parse_attr);
4314a88780a9SRoi Dayan 	*__flow = flow;
4315a88780a9SRoi Dayan 
4316a88780a9SRoi Dayan 	return 0;
4317a88780a9SRoi Dayan 
4318a88780a9SRoi Dayan err_free:
43195a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4320a88780a9SRoi Dayan 	kvfree(parse_attr);
4321a88780a9SRoi Dayan out:
4322a88780a9SRoi Dayan 	return err;
4323a88780a9SRoi Dayan }
4324a88780a9SRoi Dayan 
4325a88780a9SRoi Dayan static int
4326a88780a9SRoi Dayan mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4327f9e30088SPablo Neira Ayuso 		  struct flow_cls_offload *f,
4328226f2ca3SVlad Buslov 		  unsigned long flags,
4329d11afc26SOz Shlomo 		  struct net_device *filter_dev,
4330a88780a9SRoi Dayan 		  struct mlx5e_tc_flow **flow)
4331a88780a9SRoi Dayan {
4332a88780a9SRoi Dayan 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4333226f2ca3SVlad Buslov 	unsigned long flow_flags;
4334a88780a9SRoi Dayan 	int err;
4335a88780a9SRoi Dayan 
4336a88780a9SRoi Dayan 	get_flags(flags, &flow_flags);
4337a88780a9SRoi Dayan 
4338bf07aa73SPaul Blakey 	if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4339bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
4340bf07aa73SPaul Blakey 
4341f6455de0SBodong Wang 	if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4342d11afc26SOz Shlomo 		err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4343d11afc26SOz Shlomo 					 filter_dev, flow);
4344a88780a9SRoi Dayan 	else
4345d11afc26SOz Shlomo 		err = mlx5e_add_nic_flow(priv, f, flow_flags,
4346d11afc26SOz Shlomo 					 filter_dev, flow);
4347a88780a9SRoi Dayan 
4348a88780a9SRoi Dayan 	return err;
4349a88780a9SRoi Dayan }
4350a88780a9SRoi Dayan 
435171d82d2aSOz Shlomo int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4352226f2ca3SVlad Buslov 			   struct flow_cls_offload *f, unsigned long flags)
4353a88780a9SRoi Dayan {
4354a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
4355d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4356a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
4357a88780a9SRoi Dayan 	int err = 0;
4358a88780a9SRoi Dayan 
4359c5d326b2SVlad Buslov 	rcu_read_lock();
4360c5d326b2SVlad Buslov 	flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4361c5d326b2SVlad Buslov 	rcu_read_unlock();
4362a88780a9SRoi Dayan 	if (flow) {
4363a88780a9SRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
4364a88780a9SRoi Dayan 				   "flow cookie already exists, ignoring");
4365a88780a9SRoi Dayan 		netdev_warn_once(priv->netdev,
4366a88780a9SRoi Dayan 				 "flow cookie %lx already exists, ignoring\n",
4367a88780a9SRoi Dayan 				 f->cookie);
43680e1c1a2fSVlad Buslov 		err = -EEXIST;
4369a88780a9SRoi Dayan 		goto out;
4370a88780a9SRoi Dayan 	}
4371a88780a9SRoi Dayan 
43727a978759SDmytro Linkin 	trace_mlx5e_configure_flower(f);
4373d11afc26SOz Shlomo 	err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4374a88780a9SRoi Dayan 	if (err)
4375a88780a9SRoi Dayan 		goto out;
4376a88780a9SRoi Dayan 
4377c5d326b2SVlad Buslov 	err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4378a88780a9SRoi Dayan 	if (err)
4379a88780a9SRoi Dayan 		goto err_free;
4380a88780a9SRoi Dayan 
4381a88780a9SRoi Dayan 	return 0;
4382a88780a9SRoi Dayan 
4383a88780a9SRoi Dayan err_free:
43845a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4385a88780a9SRoi Dayan out:
4386e3a2b7edSAmir Vadai 	return err;
4387e3a2b7edSAmir Vadai }
4388e3a2b7edSAmir Vadai 
43898f8ae895SOr Gerlitz static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
43908f8ae895SOr Gerlitz {
4391226f2ca3SVlad Buslov 	bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4392226f2ca3SVlad Buslov 	bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
43938f8ae895SOr Gerlitz 
4394226f2ca3SVlad Buslov 	return flow_flag_test(flow, INGRESS) == dir_ingress &&
4395226f2ca3SVlad Buslov 		flow_flag_test(flow, EGRESS) == dir_egress;
43968f8ae895SOr Gerlitz }
43978f8ae895SOr Gerlitz 
439871d82d2aSOz Shlomo int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4399226f2ca3SVlad Buslov 			struct flow_cls_offload *f, unsigned long flags)
4400e3a2b7edSAmir Vadai {
4401d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4402e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
4403c5d326b2SVlad Buslov 	int err;
4404e3a2b7edSAmir Vadai 
4405c5d326b2SVlad Buslov 	rcu_read_lock();
4406ab818362STaehee Yoo 	flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4407c5d326b2SVlad Buslov 	if (!flow || !same_flow_direction(flow, flags)) {
4408c5d326b2SVlad Buslov 		err = -EINVAL;
4409c5d326b2SVlad Buslov 		goto errout;
4410c5d326b2SVlad Buslov 	}
4411e3a2b7edSAmir Vadai 
4412c5d326b2SVlad Buslov 	/* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4413c5d326b2SVlad Buslov 	 * set.
4414c5d326b2SVlad Buslov 	 */
4415c5d326b2SVlad Buslov 	if (flow_flag_test_and_set(flow, DELETED)) {
4416c5d326b2SVlad Buslov 		err = -EINVAL;
4417c5d326b2SVlad Buslov 		goto errout;
4418c5d326b2SVlad Buslov 	}
441905866c82SOr Gerlitz 	rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4420c5d326b2SVlad Buslov 	rcu_read_unlock();
4421e3a2b7edSAmir Vadai 
44227a978759SDmytro Linkin 	trace_mlx5e_delete_flower(f);
44235a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4424e3a2b7edSAmir Vadai 
4425e3a2b7edSAmir Vadai 	return 0;
4426c5d326b2SVlad Buslov 
4427c5d326b2SVlad Buslov errout:
4428c5d326b2SVlad Buslov 	rcu_read_unlock();
4429c5d326b2SVlad Buslov 	return err;
4430e3a2b7edSAmir Vadai }
4431e3a2b7edSAmir Vadai 
443271d82d2aSOz Shlomo int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4433226f2ca3SVlad Buslov 		       struct flow_cls_offload *f, unsigned long flags)
4434aad7e08dSAmir Vadai {
443504de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4436d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
443704de7ddaSRoi Dayan 	struct mlx5_eswitch *peer_esw;
4438aad7e08dSAmir Vadai 	struct mlx5e_tc_flow *flow;
4439aad7e08dSAmir Vadai 	struct mlx5_fc *counter;
4440316d5f72SRoi Dayan 	u64 lastuse = 0;
4441316d5f72SRoi Dayan 	u64 packets = 0;
4442316d5f72SRoi Dayan 	u64 bytes = 0;
44435a7e5bcbSVlad Buslov 	int err = 0;
4444aad7e08dSAmir Vadai 
4445c5d326b2SVlad Buslov 	rcu_read_lock();
4446c5d326b2SVlad Buslov 	flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
44475a7e5bcbSVlad Buslov 						tc_ht_params));
4448c5d326b2SVlad Buslov 	rcu_read_unlock();
44495a7e5bcbSVlad Buslov 	if (IS_ERR(flow))
44505a7e5bcbSVlad Buslov 		return PTR_ERR(flow);
44515a7e5bcbSVlad Buslov 
44525a7e5bcbSVlad Buslov 	if (!same_flow_direction(flow, flags)) {
44535a7e5bcbSVlad Buslov 		err = -EINVAL;
44545a7e5bcbSVlad Buslov 		goto errout;
44555a7e5bcbSVlad Buslov 	}
4456aad7e08dSAmir Vadai 
44574c3844d9SPaul Blakey 	if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4458b8aee822SMark Bloch 		counter = mlx5e_tc_get_counter(flow);
4459aad7e08dSAmir Vadai 		if (!counter)
44605a7e5bcbSVlad Buslov 			goto errout;
4461aad7e08dSAmir Vadai 
4462aad7e08dSAmir Vadai 		mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4463316d5f72SRoi Dayan 	}
4464aad7e08dSAmir Vadai 
4465316d5f72SRoi Dayan 	/* Under multipath it's possible for one rule to be currently
4466316d5f72SRoi Dayan 	 * un-offloaded while the other rule is offloaded.
4467316d5f72SRoi Dayan 	 */
446804de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
446904de7ddaSRoi Dayan 	if (!peer_esw)
447004de7ddaSRoi Dayan 		goto out;
447104de7ddaSRoi Dayan 
4472226f2ca3SVlad Buslov 	if (flow_flag_test(flow, DUP) &&
4473226f2ca3SVlad Buslov 	    flow_flag_test(flow->peer_flow, OFFLOADED)) {
447404de7ddaSRoi Dayan 		u64 bytes2;
447504de7ddaSRoi Dayan 		u64 packets2;
447604de7ddaSRoi Dayan 		u64 lastuse2;
447704de7ddaSRoi Dayan 
447804de7ddaSRoi Dayan 		counter = mlx5e_tc_get_counter(flow->peer_flow);
4479316d5f72SRoi Dayan 		if (!counter)
4480316d5f72SRoi Dayan 			goto no_peer_counter;
448104de7ddaSRoi Dayan 		mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
448204de7ddaSRoi Dayan 
448304de7ddaSRoi Dayan 		bytes += bytes2;
448404de7ddaSRoi Dayan 		packets += packets2;
448504de7ddaSRoi Dayan 		lastuse = max_t(u64, lastuse, lastuse2);
448604de7ddaSRoi Dayan 	}
448704de7ddaSRoi Dayan 
4488316d5f72SRoi Dayan no_peer_counter:
448904de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
449004de7ddaSRoi Dayan out:
449193a129ebSJiri Pirko 	flow_stats_update(&f->stats, bytes, packets, lastuse,
449293a129ebSJiri Pirko 			  FLOW_ACTION_HW_STATS_DELAYED);
44937a978759SDmytro Linkin 	trace_mlx5e_stats_flower(f);
44945a7e5bcbSVlad Buslov errout:
44955a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
44965a7e5bcbSVlad Buslov 	return err;
4497aad7e08dSAmir Vadai }
4498aad7e08dSAmir Vadai 
4499fcb64c0fSEli Cohen static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
4500fcb64c0fSEli Cohen 			       struct netlink_ext_ack *extack)
4501fcb64c0fSEli Cohen {
4502fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4503fcb64c0fSEli Cohen 	struct mlx5_eswitch *esw;
4504fcb64c0fSEli Cohen 	u16 vport_num;
4505fcb64c0fSEli Cohen 	u32 rate_mbps;
4506fcb64c0fSEli Cohen 	int err;
4507fcb64c0fSEli Cohen 
4508e401a184SEli Cohen 	vport_num = rpriv->rep->vport;
4509e401a184SEli Cohen 	if (vport_num >= MLX5_VPORT_ECPF) {
4510e401a184SEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
4511e401a184SEli Cohen 				   "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4512e401a184SEli Cohen 		return -EOPNOTSUPP;
4513e401a184SEli Cohen 	}
4514e401a184SEli Cohen 
4515fcb64c0fSEli Cohen 	esw = priv->mdev->priv.eswitch;
4516fcb64c0fSEli Cohen 	/* rate is given in bytes/sec.
4517fcb64c0fSEli Cohen 	 * First convert to bits/sec and then round to the nearest mbit/secs.
4518fcb64c0fSEli Cohen 	 * mbit means million bits.
4519fcb64c0fSEli Cohen 	 * Moreover, if rate is non zero we choose to configure to a minimum of
4520fcb64c0fSEli Cohen 	 * 1 mbit/sec.
4521fcb64c0fSEli Cohen 	 */
4522fcb64c0fSEli Cohen 	rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
4523fcb64c0fSEli Cohen 	err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
4524fcb64c0fSEli Cohen 	if (err)
4525fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4526fcb64c0fSEli Cohen 
4527fcb64c0fSEli Cohen 	return err;
4528fcb64c0fSEli Cohen }
4529fcb64c0fSEli Cohen 
4530fcb64c0fSEli Cohen static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4531fcb64c0fSEli Cohen 					struct flow_action *flow_action,
4532fcb64c0fSEli Cohen 					struct netlink_ext_ack *extack)
4533fcb64c0fSEli Cohen {
4534fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4535fcb64c0fSEli Cohen 	const struct flow_action_entry *act;
4536fcb64c0fSEli Cohen 	int err;
4537fcb64c0fSEli Cohen 	int i;
4538fcb64c0fSEli Cohen 
4539fcb64c0fSEli Cohen 	if (!flow_action_has_entries(flow_action)) {
4540fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4541fcb64c0fSEli Cohen 		return -EINVAL;
4542fcb64c0fSEli Cohen 	}
4543fcb64c0fSEli Cohen 
4544fcb64c0fSEli Cohen 	if (!flow_offload_has_one_action(flow_action)) {
4545fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4546fcb64c0fSEli Cohen 		return -EOPNOTSUPP;
4547fcb64c0fSEli Cohen 	}
4548fcb64c0fSEli Cohen 
454953eca1f3SJakub Kicinski 	if (!flow_action_basic_hw_stats_check(flow_action, extack))
4550319a1d19SJiri Pirko 		return -EOPNOTSUPP;
4551319a1d19SJiri Pirko 
4552fcb64c0fSEli Cohen 	flow_action_for_each(i, act, flow_action) {
4553fcb64c0fSEli Cohen 		switch (act->id) {
4554fcb64c0fSEli Cohen 		case FLOW_ACTION_POLICE:
4555fcb64c0fSEli Cohen 			err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4556fcb64c0fSEli Cohen 			if (err)
4557fcb64c0fSEli Cohen 				return err;
4558fcb64c0fSEli Cohen 
4559fcb64c0fSEli Cohen 			rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4560fcb64c0fSEli Cohen 			break;
4561fcb64c0fSEli Cohen 		default:
4562fcb64c0fSEli Cohen 			NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4563fcb64c0fSEli Cohen 			return -EOPNOTSUPP;
4564fcb64c0fSEli Cohen 		}
4565fcb64c0fSEli Cohen 	}
4566fcb64c0fSEli Cohen 
4567fcb64c0fSEli Cohen 	return 0;
4568fcb64c0fSEli Cohen }
4569fcb64c0fSEli Cohen 
4570fcb64c0fSEli Cohen int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4571fcb64c0fSEli Cohen 				struct tc_cls_matchall_offload *ma)
4572fcb64c0fSEli Cohen {
4573b5f814ccSEli Cohen 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4574fcb64c0fSEli Cohen 	struct netlink_ext_ack *extack = ma->common.extack;
4575fcb64c0fSEli Cohen 
4576b5f814ccSEli Cohen 	if (!mlx5_esw_qos_enabled(esw)) {
4577b5f814ccSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
4578b5f814ccSEli Cohen 		return -EOPNOTSUPP;
4579b5f814ccSEli Cohen 	}
4580b5f814ccSEli Cohen 
45817b83355fSEli Cohen 	if (ma->common.prio != 1) {
4582fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4583fcb64c0fSEli Cohen 		return -EINVAL;
4584fcb64c0fSEli Cohen 	}
4585fcb64c0fSEli Cohen 
4586fcb64c0fSEli Cohen 	return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4587fcb64c0fSEli Cohen }
4588fcb64c0fSEli Cohen 
4589fcb64c0fSEli Cohen int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4590fcb64c0fSEli Cohen 			     struct tc_cls_matchall_offload *ma)
4591fcb64c0fSEli Cohen {
4592fcb64c0fSEli Cohen 	struct netlink_ext_ack *extack = ma->common.extack;
4593fcb64c0fSEli Cohen 
4594fcb64c0fSEli Cohen 	return apply_police_params(priv, 0, extack);
4595fcb64c0fSEli Cohen }
4596fcb64c0fSEli Cohen 
4597fcb64c0fSEli Cohen void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4598fcb64c0fSEli Cohen 			     struct tc_cls_matchall_offload *ma)
4599fcb64c0fSEli Cohen {
4600fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4601fcb64c0fSEli Cohen 	struct rtnl_link_stats64 cur_stats;
4602fcb64c0fSEli Cohen 	u64 dbytes;
4603fcb64c0fSEli Cohen 	u64 dpkts;
4604fcb64c0fSEli Cohen 
4605fcb64c0fSEli Cohen 	cur_stats = priv->stats.vf_vport;
4606fcb64c0fSEli Cohen 	dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4607fcb64c0fSEli Cohen 	dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4608fcb64c0fSEli Cohen 	rpriv->prev_vf_vport_stats = cur_stats;
460993a129ebSJiri Pirko 	flow_stats_update(&ma->stats, dpkts, dbytes, jiffies,
461093a129ebSJiri Pirko 			  FLOW_ACTION_HW_STATS_DELAYED);
4611fcb64c0fSEli Cohen }
4612fcb64c0fSEli Cohen 
46134d8fcf21SAlaa Hleihel static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
46144d8fcf21SAlaa Hleihel 					      struct mlx5e_priv *peer_priv)
46154d8fcf21SAlaa Hleihel {
46164d8fcf21SAlaa Hleihel 	struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4617db76ca24SVlad Buslov 	struct mlx5e_hairpin_entry *hpe, *tmp;
4618db76ca24SVlad Buslov 	LIST_HEAD(init_wait_list);
46194d8fcf21SAlaa Hleihel 	u16 peer_vhca_id;
46204d8fcf21SAlaa Hleihel 	int bkt;
46214d8fcf21SAlaa Hleihel 
46224d8fcf21SAlaa Hleihel 	if (!same_hw_devs(priv, peer_priv))
46234d8fcf21SAlaa Hleihel 		return;
46244d8fcf21SAlaa Hleihel 
46254d8fcf21SAlaa Hleihel 	peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
46264d8fcf21SAlaa Hleihel 
4627b32accdaSVlad Buslov 	mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4628db76ca24SVlad Buslov 	hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4629db76ca24SVlad Buslov 		if (refcount_inc_not_zero(&hpe->refcnt))
4630db76ca24SVlad Buslov 			list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4631b32accdaSVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4632db76ca24SVlad Buslov 
4633db76ca24SVlad Buslov 	list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4634db76ca24SVlad Buslov 		wait_for_completion(&hpe->res_ready);
4635db76ca24SVlad Buslov 		if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4636db76ca24SVlad Buslov 			hpe->hp->pair->peer_gone = true;
4637db76ca24SVlad Buslov 
4638db76ca24SVlad Buslov 		mlx5e_hairpin_put(priv, hpe);
4639db76ca24SVlad Buslov 	}
46404d8fcf21SAlaa Hleihel }
46414d8fcf21SAlaa Hleihel 
46424d8fcf21SAlaa Hleihel static int mlx5e_tc_netdev_event(struct notifier_block *this,
46434d8fcf21SAlaa Hleihel 				 unsigned long event, void *ptr)
46444d8fcf21SAlaa Hleihel {
46454d8fcf21SAlaa Hleihel 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
46464d8fcf21SAlaa Hleihel 	struct mlx5e_flow_steering *fs;
46474d8fcf21SAlaa Hleihel 	struct mlx5e_priv *peer_priv;
46484d8fcf21SAlaa Hleihel 	struct mlx5e_tc_table *tc;
46494d8fcf21SAlaa Hleihel 	struct mlx5e_priv *priv;
46504d8fcf21SAlaa Hleihel 
46514d8fcf21SAlaa Hleihel 	if (ndev->netdev_ops != &mlx5e_netdev_ops ||
46524d8fcf21SAlaa Hleihel 	    event != NETDEV_UNREGISTER ||
46534d8fcf21SAlaa Hleihel 	    ndev->reg_state == NETREG_REGISTERED)
46544d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
46554d8fcf21SAlaa Hleihel 
46564d8fcf21SAlaa Hleihel 	tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
46574d8fcf21SAlaa Hleihel 	fs = container_of(tc, struct mlx5e_flow_steering, tc);
46584d8fcf21SAlaa Hleihel 	priv = container_of(fs, struct mlx5e_priv, fs);
46594d8fcf21SAlaa Hleihel 	peer_priv = netdev_priv(ndev);
46604d8fcf21SAlaa Hleihel 	if (priv == peer_priv ||
46614d8fcf21SAlaa Hleihel 	    !(priv->netdev->features & NETIF_F_HW_TC))
46624d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
46634d8fcf21SAlaa Hleihel 
46644d8fcf21SAlaa Hleihel 	mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
46654d8fcf21SAlaa Hleihel 
46664d8fcf21SAlaa Hleihel 	return NOTIFY_DONE;
46674d8fcf21SAlaa Hleihel }
46684d8fcf21SAlaa Hleihel 
4669655dc3d2SOr Gerlitz int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4670e8f887acSAmir Vadai {
4671acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
46724d8fcf21SAlaa Hleihel 	int err;
4673e8f887acSAmir Vadai 
4674b6fac0b4SVlad Buslov 	mutex_init(&tc->t_lock);
4675d2faae25SVlad Buslov 	mutex_init(&tc->mod_hdr.lock);
4676dd58edc3SVlad Buslov 	hash_init(tc->mod_hdr.hlist);
4677b32accdaSVlad Buslov 	mutex_init(&tc->hairpin_tbl_lock);
46785c65c564SOr Gerlitz 	hash_init(tc->hairpin_tbl);
467911c9c548SOr Gerlitz 
46804d8fcf21SAlaa Hleihel 	err = rhashtable_init(&tc->ht, &tc_ht_params);
46814d8fcf21SAlaa Hleihel 	if (err)
46824d8fcf21SAlaa Hleihel 		return err;
46834d8fcf21SAlaa Hleihel 
46844d8fcf21SAlaa Hleihel 	tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4685d48834f9SJiri Pirko 	err = register_netdevice_notifier_dev_net(priv->netdev,
4686d48834f9SJiri Pirko 						  &tc->netdevice_nb,
4687d48834f9SJiri Pirko 						  &tc->netdevice_nn);
4688d48834f9SJiri Pirko 	if (err) {
46894d8fcf21SAlaa Hleihel 		tc->netdevice_nb.notifier_call = NULL;
46904d8fcf21SAlaa Hleihel 		mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
46914d8fcf21SAlaa Hleihel 	}
46924d8fcf21SAlaa Hleihel 
46934d8fcf21SAlaa Hleihel 	return err;
4694e8f887acSAmir Vadai }
4695e8f887acSAmir Vadai 
4696e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4697e8f887acSAmir Vadai {
4698e8f887acSAmir Vadai 	struct mlx5e_tc_flow *flow = ptr;
4699655dc3d2SOr Gerlitz 	struct mlx5e_priv *priv = flow->priv;
4700e8f887acSAmir Vadai 
4701961e8979SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
4702e8f887acSAmir Vadai 	kfree(flow);
4703e8f887acSAmir Vadai }
4704e8f887acSAmir Vadai 
4705655dc3d2SOr Gerlitz void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4706e8f887acSAmir Vadai {
4707acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
4708e8f887acSAmir Vadai 
47094d8fcf21SAlaa Hleihel 	if (tc->netdevice_nb.notifier_call)
4710d48834f9SJiri Pirko 		unregister_netdevice_notifier_dev_net(priv->netdev,
4711d48834f9SJiri Pirko 						      &tc->netdevice_nb,
4712d48834f9SJiri Pirko 						      &tc->netdevice_nn);
47134d8fcf21SAlaa Hleihel 
4714d2faae25SVlad Buslov 	mutex_destroy(&tc->mod_hdr.lock);
4715b32accdaSVlad Buslov 	mutex_destroy(&tc->hairpin_tbl_lock);
4716b32accdaSVlad Buslov 
4717d9ee0491SOr Gerlitz 	rhashtable_destroy(&tc->ht);
4718e8f887acSAmir Vadai 
4719acff797cSMaor Gottlieb 	if (!IS_ERR_OR_NULL(tc->t)) {
4720acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(tc->t);
4721acff797cSMaor Gottlieb 		tc->t = NULL;
4722e8f887acSAmir Vadai 	}
4723b6fac0b4SVlad Buslov 	mutex_destroy(&tc->t_lock);
4724e8f887acSAmir Vadai }
4725655dc3d2SOr Gerlitz 
4726655dc3d2SOr Gerlitz int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4727655dc3d2SOr Gerlitz {
4728d7a42ad0SRoi Dayan 	const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
47290a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
47300a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *priv;
47310a7fcb78SPaul Blakey 	struct mapping_ctx *mapping;
47320a7fcb78SPaul Blakey 	int err;
47330a7fcb78SPaul Blakey 
47340a7fcb78SPaul Blakey 	uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
47350a7fcb78SPaul Blakey 	priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
47360a7fcb78SPaul Blakey 
47374c3844d9SPaul Blakey 	err = mlx5_tc_ct_init(uplink_priv);
47384c3844d9SPaul Blakey 	if (err)
47394c3844d9SPaul Blakey 		goto err_ct;
47404c3844d9SPaul Blakey 
47410a7fcb78SPaul Blakey 	mapping = mapping_create(sizeof(struct tunnel_match_key),
47420a7fcb78SPaul Blakey 				 TUNNEL_INFO_BITS_MASK, true);
47430a7fcb78SPaul Blakey 	if (IS_ERR(mapping)) {
47440a7fcb78SPaul Blakey 		err = PTR_ERR(mapping);
47450a7fcb78SPaul Blakey 		goto err_tun_mapping;
47460a7fcb78SPaul Blakey 	}
47470a7fcb78SPaul Blakey 	uplink_priv->tunnel_mapping = mapping;
47480a7fcb78SPaul Blakey 
47490a7fcb78SPaul Blakey 	mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
47500a7fcb78SPaul Blakey 	if (IS_ERR(mapping)) {
47510a7fcb78SPaul Blakey 		err = PTR_ERR(mapping);
47520a7fcb78SPaul Blakey 		goto err_enc_opts_mapping;
47530a7fcb78SPaul Blakey 	}
47540a7fcb78SPaul Blakey 	uplink_priv->tunnel_enc_opts_mapping = mapping;
47550a7fcb78SPaul Blakey 
47560a7fcb78SPaul Blakey 	err = rhashtable_init(tc_ht, &tc_ht_params);
47570a7fcb78SPaul Blakey 	if (err)
47580a7fcb78SPaul Blakey 		goto err_ht_init;
47590a7fcb78SPaul Blakey 
47600a7fcb78SPaul Blakey 	return err;
47610a7fcb78SPaul Blakey 
47620a7fcb78SPaul Blakey err_ht_init:
47630a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
47640a7fcb78SPaul Blakey err_enc_opts_mapping:
47650a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_mapping);
47660a7fcb78SPaul Blakey err_tun_mapping:
47674c3844d9SPaul Blakey 	mlx5_tc_ct_clean(uplink_priv);
47684c3844d9SPaul Blakey err_ct:
47690a7fcb78SPaul Blakey 	netdev_warn(priv->netdev,
47700a7fcb78SPaul Blakey 		    "Failed to initialize tc (eswitch), err: %d", err);
47710a7fcb78SPaul Blakey 	return err;
4772655dc3d2SOr Gerlitz }
4773655dc3d2SOr Gerlitz 
4774655dc3d2SOr Gerlitz void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
4775655dc3d2SOr Gerlitz {
47760a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
47770a7fcb78SPaul Blakey 
4778655dc3d2SOr Gerlitz 	rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
47790a7fcb78SPaul Blakey 
47800a7fcb78SPaul Blakey 	uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
47810a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
47820a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_mapping);
47834c3844d9SPaul Blakey 
47844c3844d9SPaul Blakey 	mlx5_tc_ct_clean(uplink_priv);
4785655dc3d2SOr Gerlitz }
478601252a27SOr Gerlitz 
4787226f2ca3SVlad Buslov int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
478801252a27SOr Gerlitz {
4789d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
479001252a27SOr Gerlitz 
479101252a27SOr Gerlitz 	return atomic_read(&tc_ht->nelems);
479201252a27SOr Gerlitz }
479304de7ddaSRoi Dayan 
479404de7ddaSRoi Dayan void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
479504de7ddaSRoi Dayan {
479604de7ddaSRoi Dayan 	struct mlx5e_tc_flow *flow, *tmp;
479704de7ddaSRoi Dayan 
479804de7ddaSRoi Dayan 	list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
479904de7ddaSRoi Dayan 		__mlx5e_tc_del_fdb_peer_flow(flow);
480004de7ddaSRoi Dayan }
4801b4a23329SRoi Dayan 
4802b4a23329SRoi Dayan void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
4803b4a23329SRoi Dayan {
4804b4a23329SRoi Dayan 	struct mlx5_rep_uplink_priv *rpriv =
4805b4a23329SRoi Dayan 		container_of(work, struct mlx5_rep_uplink_priv,
4806b4a23329SRoi Dayan 			     reoffload_flows_work);
4807b4a23329SRoi Dayan 	struct mlx5e_tc_flow *flow, *tmp;
4808b4a23329SRoi Dayan 
4809ad86755bSVlad Buslov 	mutex_lock(&rpriv->unready_flows_lock);
4810b4a23329SRoi Dayan 	list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
4811b4a23329SRoi Dayan 		if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
4812ad86755bSVlad Buslov 			unready_flow_del(flow);
4813b4a23329SRoi Dayan 	}
4814ad86755bSVlad Buslov 	mutex_unlock(&rpriv->unready_flows_lock);
4815b4a23329SRoi Dayan }
4816e2394a61SVlad Buslov 
4817e2394a61SVlad Buslov static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
4818e2394a61SVlad Buslov 				     struct flow_cls_offload *cls_flower,
4819e2394a61SVlad Buslov 				     unsigned long flags)
4820e2394a61SVlad Buslov {
4821e2394a61SVlad Buslov 	switch (cls_flower->command) {
4822e2394a61SVlad Buslov 	case FLOW_CLS_REPLACE:
4823e2394a61SVlad Buslov 		return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
4824e2394a61SVlad Buslov 					      flags);
4825e2394a61SVlad Buslov 	case FLOW_CLS_DESTROY:
4826e2394a61SVlad Buslov 		return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
4827e2394a61SVlad Buslov 					   flags);
4828e2394a61SVlad Buslov 	case FLOW_CLS_STATS:
4829e2394a61SVlad Buslov 		return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
4830e2394a61SVlad Buslov 					  flags);
4831e2394a61SVlad Buslov 	default:
4832e2394a61SVlad Buslov 		return -EOPNOTSUPP;
4833e2394a61SVlad Buslov 	}
4834e2394a61SVlad Buslov }
4835e2394a61SVlad Buslov 
4836e2394a61SVlad Buslov int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4837e2394a61SVlad Buslov 			    void *cb_priv)
4838e2394a61SVlad Buslov {
4839e2394a61SVlad Buslov 	unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
4840e2394a61SVlad Buslov 	struct mlx5e_priv *priv = cb_priv;
4841e2394a61SVlad Buslov 
4842e2394a61SVlad Buslov 	switch (type) {
4843e2394a61SVlad Buslov 	case TC_SETUP_CLSFLOWER:
4844e2394a61SVlad Buslov 		return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
4845e2394a61SVlad Buslov 	default:
4846e2394a61SVlad Buslov 		return -EOPNOTSUPP;
4847e2394a61SVlad Buslov 	}
4848e2394a61SVlad Buslov }
4849