1e8f887acSAmir Vadai /*
2e8f887acSAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3e8f887acSAmir Vadai  *
4e8f887acSAmir Vadai  * This software is available to you under a choice of one of two
5e8f887acSAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
6e8f887acSAmir Vadai  * General Public License (GPL) Version 2, available from the file
7e8f887acSAmir Vadai  * COPYING in the main directory of this source tree, or the
8e8f887acSAmir Vadai  * OpenIB.org BSD license below:
9e8f887acSAmir Vadai  *
10e8f887acSAmir Vadai  *     Redistribution and use in source and binary forms, with or
11e8f887acSAmir Vadai  *     without modification, are permitted provided that the following
12e8f887acSAmir Vadai  *     conditions are met:
13e8f887acSAmir Vadai  *
14e8f887acSAmir Vadai  *      - Redistributions of source code must retain the above
15e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
16e8f887acSAmir Vadai  *        disclaimer.
17e8f887acSAmir Vadai  *
18e8f887acSAmir Vadai  *      - Redistributions in binary form must reproduce the above
19e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
20e8f887acSAmir Vadai  *        disclaimer in the documentation and/or other materials
21e8f887acSAmir Vadai  *        provided with the distribution.
22e8f887acSAmir Vadai  *
23e8f887acSAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e8f887acSAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e8f887acSAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e8f887acSAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e8f887acSAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e8f887acSAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e8f887acSAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e8f887acSAmir Vadai  * SOFTWARE.
31e8f887acSAmir Vadai  */
32e8f887acSAmir Vadai 
33e3a2b7edSAmir Vadai #include <net/flow_dissector.h>
34e2394a61SVlad Buslov #include <net/flow_offload.h>
353f7d0eb4SOr Gerlitz #include <net/sch_generic.h>
36e3a2b7edSAmir Vadai #include <net/pkt_cls.h>
37e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h>
3812185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h>
39e8f887acSAmir Vadai #include <linux/mlx5/fs.h>
40e8f887acSAmir Vadai #include <linux/mlx5/device.h>
41e8f887acSAmir Vadai #include <linux/rhashtable.h>
425a7e5bcbSVlad Buslov #include <linux/refcount.h>
43db76ca24SVlad Buslov #include <linux/completion.h>
4403a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h>
45776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h>
46bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h>
47d79b6df6SOr Gerlitz #include <net/tc_act/tc_pedit.h>
4826c02749SOr Gerlitz #include <net/tc_act/tc_csum.h>
4914e6b038SEli Cohen #include <net/tc_act/tc_mpls.h>
50f6dfb4c3SHadar Hen Zion #include <net/arp.h>
513616d08bSDavid Ahern #include <net/ipv6_stubs.h>
52f828ca6aSEli Cohen #include <net/bareudp.h>
53e8f887acSAmir Vadai #include "en.h"
541d447a39SSaeed Mahameed #include "en_rep.h"
55768c3667SVlad Buslov #include "en/rep/tc.h"
56e2394a61SVlad Buslov #include "en/rep/neigh.h"
57232c0013SHadar Hen Zion #include "en_tc.h"
5803a9d11eSOr Gerlitz #include "eswitch.h"
5949964352SSaeed Mahameed #include "esw/chains.h"
603f6d08d1SOr Gerlitz #include "fs_core.h"
612c81bfd5SHuy Nguyen #include "en/port.h"
62101f4de9SOz Shlomo #include "en/tc_tun.h"
630a7fcb78SPaul Blakey #include "en/mapping.h"
644c3844d9SPaul Blakey #include "en/tc_ct.h"
6504de7ddaSRoi Dayan #include "lib/devcom.h"
669272e3dfSYevgeny Kliteynik #include "lib/geneve.h"
677a978759SDmytro Linkin #include "diag/en_tc_tracepoint.h"
68e8f887acSAmir Vadai 
69d65dbedfSHuy Nguyen #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
700a7fcb78SPaul Blakey 
713bc4b7bfSOr Gerlitz struct mlx5_nic_flow_attr {
723bc4b7bfSOr Gerlitz 	u32 action;
733bc4b7bfSOr Gerlitz 	u32 flow_tag;
742b688ea5SMaor Gottlieb 	struct mlx5_modify_hdr *modify_hdr;
755c65c564SOr Gerlitz 	u32 hairpin_tirn;
7638aa51c1SOr Gerlitz 	u8 match_level;
773f6d08d1SOr Gerlitz 	struct mlx5_flow_table	*hairpin_ft;
78b8aee822SMark Bloch 	struct mlx5_fc		*counter;
793bc4b7bfSOr Gerlitz };
803bc4b7bfSOr Gerlitz 
81226f2ca3SVlad Buslov #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
8260bd4af8SOr Gerlitz 
8365ba8fb7SOr Gerlitz enum {
84226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_INGRESS	= MLX5E_TC_FLAG_INGRESS_BIT,
85226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_EGRESS	= MLX5E_TC_FLAG_EGRESS_BIT,
86226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_ESWITCH	= MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
8784179981SPaul Blakey 	MLX5E_TC_FLOW_FLAG_FT		= MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
88226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_NIC		= MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
89226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_OFFLOADED	= MLX5E_TC_FLOW_BASE,
90226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_HAIRPIN	= MLX5E_TC_FLOW_BASE + 1,
91226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS	= MLX5E_TC_FLOW_BASE + 2,
92226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_SLOW		= MLX5E_TC_FLOW_BASE + 3,
93226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_DUP		= MLX5E_TC_FLOW_BASE + 4,
94226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_NOT_READY	= MLX5E_TC_FLOW_BASE + 5,
95c5d326b2SVlad Buslov 	MLX5E_TC_FLOW_FLAG_DELETED	= MLX5E_TC_FLOW_BASE + 6,
964c3844d9SPaul Blakey 	MLX5E_TC_FLOW_FLAG_CT		= MLX5E_TC_FLOW_BASE + 7,
9714e6b038SEli Cohen 	MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
9865ba8fb7SOr Gerlitz };
9965ba8fb7SOr Gerlitz 
100e4ad91f2SChris Mi #define MLX5E_TC_MAX_SPLITS 1
101e4ad91f2SChris Mi 
10279baaec7SEli Britstein /* Helper struct for accessing a struct containing list_head array.
10379baaec7SEli Britstein  * Containing struct
10479baaec7SEli Britstein  *   |- Helper array
10579baaec7SEli Britstein  *      [0] Helper item 0
10679baaec7SEli Britstein  *          |- list_head item 0
10779baaec7SEli Britstein  *          |- index (0)
10879baaec7SEli Britstein  *      [1] Helper item 1
10979baaec7SEli Britstein  *          |- list_head item 1
11079baaec7SEli Britstein  *          |- index (1)
11179baaec7SEli Britstein  * To access the containing struct from one of the list_head items:
11279baaec7SEli Britstein  * 1. Get the helper item from the list_head item using
11379baaec7SEli Britstein  *    helper item =
11479baaec7SEli Britstein  *        container_of(list_head item, helper struct type, list_head field)
11579baaec7SEli Britstein  * 2. Get the contining struct from the helper item and its index in the array:
11679baaec7SEli Britstein  *    containing struct =
11779baaec7SEli Britstein  *        container_of(helper item, containing struct type, helper field[index])
11879baaec7SEli Britstein  */
11979baaec7SEli Britstein struct encap_flow_item {
120948993f2SVlad Buslov 	struct mlx5e_encap_entry *e; /* attached encap instance */
12179baaec7SEli Britstein 	struct list_head list;
12279baaec7SEli Britstein 	int index;
12379baaec7SEli Britstein };
12479baaec7SEli Britstein 
125e8f887acSAmir Vadai struct mlx5e_tc_flow {
126e8f887acSAmir Vadai 	struct rhash_head	node;
127655dc3d2SOr Gerlitz 	struct mlx5e_priv	*priv;
128e8f887acSAmir Vadai 	u64			cookie;
129226f2ca3SVlad Buslov 	unsigned long		flags;
130e4ad91f2SChris Mi 	struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
13114e6b038SEli Cohen 
13214e6b038SEli Cohen 	/* flows sharing the same reformat object - currently mpls decap */
13314e6b038SEli Cohen 	struct list_head l3_to_l2_reformat;
13414e6b038SEli Cohen 	struct mlx5e_decap_entry *decap_reformat;
13514e6b038SEli Cohen 
13679baaec7SEli Britstein 	/* Flow can be associated with multiple encap IDs.
13779baaec7SEli Britstein 	 * The number of encaps is bounded by the number of supported
13879baaec7SEli Britstein 	 * destinations.
13979baaec7SEli Britstein 	 */
14079baaec7SEli Britstein 	struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
14104de7ddaSRoi Dayan 	struct mlx5e_tc_flow    *peer_flow;
142dd58edc3SVlad Buslov 	struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
14311c9c548SOr Gerlitz 	struct list_head	mod_hdr; /* flows sharing the same mod hdr ID */
144e4f9abbdSVlad Buslov 	struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
1455c65c564SOr Gerlitz 	struct list_head	hairpin; /* flows sharing the same hairpin */
14604de7ddaSRoi Dayan 	struct list_head	peer;    /* flows with peer flow */
147b4a23329SRoi Dayan 	struct list_head	unready; /* flows not ready to be offloaded (e.g due to missing route) */
1482a1f1768SVlad Buslov 	int			tmp_efi_index;
1496a06c2f7SVlad Buslov 	struct list_head	tmp_list; /* temporary flow list used by neigh update */
1505a7e5bcbSVlad Buslov 	refcount_t		refcnt;
151c5d326b2SVlad Buslov 	struct rcu_head		rcu_head;
15295435ad7SVlad Buslov 	struct completion	init_done;
1530a7fcb78SPaul Blakey 	int tunnel_id; /* the mapped tunnel id of this flow */
1540a7fcb78SPaul Blakey 
1553bc4b7bfSOr Gerlitz 	union {
156ecf5bb79SOr Gerlitz 		struct mlx5_esw_flow_attr esw_attr[0];
1573bc4b7bfSOr Gerlitz 		struct mlx5_nic_flow_attr nic_attr[0];
1583bc4b7bfSOr Gerlitz 	};
159e8f887acSAmir Vadai };
160e8f887acSAmir Vadai 
16117091853SOr Gerlitz struct mlx5e_tc_flow_parse_attr {
1621f6da306SYevgeny Kliteynik 	const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
163d11afc26SOz Shlomo 	struct net_device *filter_dev;
16417091853SOr Gerlitz 	struct mlx5_flow_spec spec;
1656ae4a6a5SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
16698b66cb1SEli Britstein 	int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
16714e6b038SEli Cohen 	struct ethhdr eth;
16817091853SOr Gerlitz };
16917091853SOr Gerlitz 
170acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4
171b3a433deSOr Gerlitz #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
172e8f887acSAmir Vadai 
1738f1e0b97SPaul Blakey struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
1748f1e0b97SPaul Blakey 	[CHAIN_TO_REG] = {
1758f1e0b97SPaul Blakey 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
1768f1e0b97SPaul Blakey 		.moffset = 0,
1778f1e0b97SPaul Blakey 		.mlen = 2,
1788f1e0b97SPaul Blakey 	},
1790a7fcb78SPaul Blakey 	[TUNNEL_TO_REG] = {
1800a7fcb78SPaul Blakey 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
1810a7fcb78SPaul Blakey 		.moffset = 3,
1820a7fcb78SPaul Blakey 		.mlen = 1,
1830a7fcb78SPaul Blakey 		.soffset = MLX5_BYTE_OFF(fte_match_param,
1840a7fcb78SPaul Blakey 					 misc_parameters_2.metadata_reg_c_1),
1850a7fcb78SPaul Blakey 	},
1864c3844d9SPaul Blakey 	[ZONE_TO_REG] = zone_to_reg_ct,
1874c3844d9SPaul Blakey 	[CTSTATE_TO_REG] = ctstate_to_reg_ct,
1884c3844d9SPaul Blakey 	[MARK_TO_REG] = mark_to_reg_ct,
1894c3844d9SPaul Blakey 	[LABELS_TO_REG] = labels_to_reg_ct,
1904c3844d9SPaul Blakey 	[FTEID_TO_REG] = fteid_to_reg_ct,
1915c6b9460SPaul Blakey 	[TUPLEID_TO_REG] = tupleid_to_reg_ct,
1928f1e0b97SPaul Blakey };
1938f1e0b97SPaul Blakey 
1940a7fcb78SPaul Blakey static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
1950a7fcb78SPaul Blakey 
1960a7fcb78SPaul Blakey void
1970a7fcb78SPaul Blakey mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
1980a7fcb78SPaul Blakey 			    enum mlx5e_tc_attr_to_reg type,
1990a7fcb78SPaul Blakey 			    u32 data,
2000a7fcb78SPaul Blakey 			    u32 mask)
2010a7fcb78SPaul Blakey {
2020a7fcb78SPaul Blakey 	int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
2030a7fcb78SPaul Blakey 	int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
2040a7fcb78SPaul Blakey 	void *headers_c = spec->match_criteria;
2050a7fcb78SPaul Blakey 	void *headers_v = spec->match_value;
2060a7fcb78SPaul Blakey 	void *fmask, *fval;
2070a7fcb78SPaul Blakey 
2080a7fcb78SPaul Blakey 	fmask = headers_c + soffset;
2090a7fcb78SPaul Blakey 	fval = headers_v + soffset;
2100a7fcb78SPaul Blakey 
2110a7fcb78SPaul Blakey 	mask = cpu_to_be32(mask) >> (32 - (match_len * 8));
2120a7fcb78SPaul Blakey 	data = cpu_to_be32(data) >> (32 - (match_len * 8));
2130a7fcb78SPaul Blakey 
2140a7fcb78SPaul Blakey 	memcpy(fmask, &mask, match_len);
2150a7fcb78SPaul Blakey 	memcpy(fval, &data, match_len);
2160a7fcb78SPaul Blakey 
2170a7fcb78SPaul Blakey 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
2180a7fcb78SPaul Blakey }
2190a7fcb78SPaul Blakey 
2200a7fcb78SPaul Blakey int
2210a7fcb78SPaul Blakey mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
2220a7fcb78SPaul Blakey 			  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
2230a7fcb78SPaul Blakey 			  enum mlx5e_tc_attr_to_reg type,
2240a7fcb78SPaul Blakey 			  u32 data)
2250a7fcb78SPaul Blakey {
2260a7fcb78SPaul Blakey 	int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
2270a7fcb78SPaul Blakey 	int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
2280a7fcb78SPaul Blakey 	int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
2290a7fcb78SPaul Blakey 	char *modact;
2300a7fcb78SPaul Blakey 	int err;
2310a7fcb78SPaul Blakey 
2320a7fcb78SPaul Blakey 	err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
2330a7fcb78SPaul Blakey 				    mod_hdr_acts);
2340a7fcb78SPaul Blakey 	if (err)
2350a7fcb78SPaul Blakey 		return err;
2360a7fcb78SPaul Blakey 
2370a7fcb78SPaul Blakey 	modact = mod_hdr_acts->actions +
2380a7fcb78SPaul Blakey 		 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
2390a7fcb78SPaul Blakey 
2400a7fcb78SPaul Blakey 	/* Firmware has 5bit length field and 0 means 32bits */
2410a7fcb78SPaul Blakey 	if (mlen == 4)
2420a7fcb78SPaul Blakey 		mlen = 0;
2430a7fcb78SPaul Blakey 
2440a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
2450a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, field, mfield);
2460a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, offset, moffset * 8);
2470a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, length, mlen * 8);
2480a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, data, data);
2490a7fcb78SPaul Blakey 	mod_hdr_acts->num_actions++;
2500a7fcb78SPaul Blakey 
2510a7fcb78SPaul Blakey 	return 0;
2520a7fcb78SPaul Blakey }
2530a7fcb78SPaul Blakey 
25477ab67b7SOr Gerlitz struct mlx5e_hairpin {
25577ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
25677ab67b7SOr Gerlitz 
25777ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev;
2583f6d08d1SOr Gerlitz 	struct mlx5e_priv *func_priv;
25977ab67b7SOr Gerlitz 	u32 tdn;
26077ab67b7SOr Gerlitz 	u32 tirn;
2613f6d08d1SOr Gerlitz 
2623f6d08d1SOr Gerlitz 	int num_channels;
2633f6d08d1SOr Gerlitz 	struct mlx5e_rqt indir_rqt;
2643f6d08d1SOr Gerlitz 	u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
2653f6d08d1SOr Gerlitz 	struct mlx5e_ttc_table ttc;
26677ab67b7SOr Gerlitz };
26777ab67b7SOr Gerlitz 
2685c65c564SOr Gerlitz struct mlx5e_hairpin_entry {
2695c65c564SOr Gerlitz 	/* a node of a hash table which keeps all the  hairpin entries */
2705c65c564SOr Gerlitz 	struct hlist_node hairpin_hlist;
2715c65c564SOr Gerlitz 
27273edca73SVlad Buslov 	/* protects flows list */
27373edca73SVlad Buslov 	spinlock_t flows_lock;
2745c65c564SOr Gerlitz 	/* flows sharing the same hairpin */
2755c65c564SOr Gerlitz 	struct list_head flows;
276db76ca24SVlad Buslov 	/* hpe's that were not fully initialized when dead peer update event
277db76ca24SVlad Buslov 	 * function traversed them.
278db76ca24SVlad Buslov 	 */
279db76ca24SVlad Buslov 	struct list_head dead_peer_wait_list;
2805c65c564SOr Gerlitz 
281d8822868SOr Gerlitz 	u16 peer_vhca_id;
282106be53bSOr Gerlitz 	u8 prio;
2835c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
284e4f9abbdSVlad Buslov 	refcount_t refcnt;
285db76ca24SVlad Buslov 	struct completion res_ready;
2865c65c564SOr Gerlitz };
2875c65c564SOr Gerlitz 
28811c9c548SOr Gerlitz struct mod_hdr_key {
28911c9c548SOr Gerlitz 	int num_actions;
29011c9c548SOr Gerlitz 	void *actions;
29111c9c548SOr Gerlitz };
29211c9c548SOr Gerlitz 
29311c9c548SOr Gerlitz struct mlx5e_mod_hdr_entry {
29411c9c548SOr Gerlitz 	/* a node of a hash table which keeps all the mod_hdr entries */
29511c9c548SOr Gerlitz 	struct hlist_node mod_hdr_hlist;
29611c9c548SOr Gerlitz 
29783a52f0dSVlad Buslov 	/* protects flows list */
29883a52f0dSVlad Buslov 	spinlock_t flows_lock;
29911c9c548SOr Gerlitz 	/* flows sharing the same mod_hdr entry */
30011c9c548SOr Gerlitz 	struct list_head flows;
30111c9c548SOr Gerlitz 
30211c9c548SOr Gerlitz 	struct mod_hdr_key key;
30311c9c548SOr Gerlitz 
3042b688ea5SMaor Gottlieb 	struct mlx5_modify_hdr *modify_hdr;
305dd58edc3SVlad Buslov 
306dd58edc3SVlad Buslov 	refcount_t refcnt;
307a734d007SVlad Buslov 	struct completion res_ready;
308a734d007SVlad Buslov 	int compl_result;
30911c9c548SOr Gerlitz };
31011c9c548SOr Gerlitz 
3115a7e5bcbSVlad Buslov static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
3125a7e5bcbSVlad Buslov 			      struct mlx5e_tc_flow *flow);
3135a7e5bcbSVlad Buslov 
3145a7e5bcbSVlad Buslov static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
3155a7e5bcbSVlad Buslov {
3165a7e5bcbSVlad Buslov 	if (!flow || !refcount_inc_not_zero(&flow->refcnt))
3175a7e5bcbSVlad Buslov 		return ERR_PTR(-EINVAL);
3185a7e5bcbSVlad Buslov 	return flow;
3195a7e5bcbSVlad Buslov }
3205a7e5bcbSVlad Buslov 
3215a7e5bcbSVlad Buslov static void mlx5e_flow_put(struct mlx5e_priv *priv,
3225a7e5bcbSVlad Buslov 			   struct mlx5e_tc_flow *flow)
3235a7e5bcbSVlad Buslov {
3245a7e5bcbSVlad Buslov 	if (refcount_dec_and_test(&flow->refcnt)) {
3255a7e5bcbSVlad Buslov 		mlx5e_tc_del_flow(priv, flow);
326c5d326b2SVlad Buslov 		kfree_rcu(flow, rcu_head);
3275a7e5bcbSVlad Buslov 	}
3285a7e5bcbSVlad Buslov }
3295a7e5bcbSVlad Buslov 
330226f2ca3SVlad Buslov static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
331226f2ca3SVlad Buslov {
332226f2ca3SVlad Buslov 	/* Complete all memory stores before setting bit. */
333226f2ca3SVlad Buslov 	smp_mb__before_atomic();
334226f2ca3SVlad Buslov 	set_bit(flag, &flow->flags);
335226f2ca3SVlad Buslov }
336226f2ca3SVlad Buslov 
337226f2ca3SVlad Buslov #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
338226f2ca3SVlad Buslov 
339c5d326b2SVlad Buslov static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
340c5d326b2SVlad Buslov 				     unsigned long flag)
341c5d326b2SVlad Buslov {
342c5d326b2SVlad Buslov 	/* test_and_set_bit() provides all necessary barriers */
343c5d326b2SVlad Buslov 	return test_and_set_bit(flag, &flow->flags);
344c5d326b2SVlad Buslov }
345c5d326b2SVlad Buslov 
346c5d326b2SVlad Buslov #define flow_flag_test_and_set(flow, flag)			\
347c5d326b2SVlad Buslov 	__flow_flag_test_and_set(flow,				\
348c5d326b2SVlad Buslov 				 MLX5E_TC_FLOW_FLAG_##flag)
349c5d326b2SVlad Buslov 
350226f2ca3SVlad Buslov static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
351226f2ca3SVlad Buslov {
352226f2ca3SVlad Buslov 	/* Complete all memory stores before clearing bit. */
353226f2ca3SVlad Buslov 	smp_mb__before_atomic();
354226f2ca3SVlad Buslov 	clear_bit(flag, &flow->flags);
355226f2ca3SVlad Buslov }
356226f2ca3SVlad Buslov 
357226f2ca3SVlad Buslov #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
358226f2ca3SVlad Buslov 						      MLX5E_TC_FLOW_FLAG_##flag)
359226f2ca3SVlad Buslov 
360226f2ca3SVlad Buslov static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
361226f2ca3SVlad Buslov {
362226f2ca3SVlad Buslov 	bool ret = test_bit(flag, &flow->flags);
363226f2ca3SVlad Buslov 
364226f2ca3SVlad Buslov 	/* Read fields of flow structure only after checking flags. */
365226f2ca3SVlad Buslov 	smp_mb__after_atomic();
366226f2ca3SVlad Buslov 	return ret;
367226f2ca3SVlad Buslov }
368226f2ca3SVlad Buslov 
369226f2ca3SVlad Buslov #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
370226f2ca3SVlad Buslov 						    MLX5E_TC_FLOW_FLAG_##flag)
371226f2ca3SVlad Buslov 
372226f2ca3SVlad Buslov static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
373226f2ca3SVlad Buslov {
374226f2ca3SVlad Buslov 	return flow_flag_test(flow, ESWITCH);
375226f2ca3SVlad Buslov }
376226f2ca3SVlad Buslov 
37784179981SPaul Blakey static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
37884179981SPaul Blakey {
37984179981SPaul Blakey 	return flow_flag_test(flow, FT);
38084179981SPaul Blakey }
38184179981SPaul Blakey 
382226f2ca3SVlad Buslov static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
383226f2ca3SVlad Buslov {
384226f2ca3SVlad Buslov 	return flow_flag_test(flow, OFFLOADED);
385226f2ca3SVlad Buslov }
386226f2ca3SVlad Buslov 
38711c9c548SOr Gerlitz static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
38811c9c548SOr Gerlitz {
38911c9c548SOr Gerlitz 	return jhash(key->actions,
39011c9c548SOr Gerlitz 		     key->num_actions * MLX5_MH_ACT_SZ, 0);
39111c9c548SOr Gerlitz }
39211c9c548SOr Gerlitz 
39311c9c548SOr Gerlitz static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
39411c9c548SOr Gerlitz 				   struct mod_hdr_key *b)
39511c9c548SOr Gerlitz {
39611c9c548SOr Gerlitz 	if (a->num_actions != b->num_actions)
39711c9c548SOr Gerlitz 		return 1;
39811c9c548SOr Gerlitz 
39911c9c548SOr Gerlitz 	return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
40011c9c548SOr Gerlitz }
40111c9c548SOr Gerlitz 
402dd58edc3SVlad Buslov static struct mod_hdr_tbl *
403dd58edc3SVlad Buslov get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
404dd58edc3SVlad Buslov {
405dd58edc3SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
406dd58edc3SVlad Buslov 
407dd58edc3SVlad Buslov 	return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
408dd58edc3SVlad Buslov 		&priv->fs.tc.mod_hdr;
409dd58edc3SVlad Buslov }
410dd58edc3SVlad Buslov 
411dd58edc3SVlad Buslov static struct mlx5e_mod_hdr_entry *
412dd58edc3SVlad Buslov mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
413dd58edc3SVlad Buslov {
414dd58edc3SVlad Buslov 	struct mlx5e_mod_hdr_entry *mh, *found = NULL;
415dd58edc3SVlad Buslov 
416dd58edc3SVlad Buslov 	hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
417dd58edc3SVlad Buslov 		if (!cmp_mod_hdr_info(&mh->key, key)) {
418dd58edc3SVlad Buslov 			refcount_inc(&mh->refcnt);
419dd58edc3SVlad Buslov 			found = mh;
420dd58edc3SVlad Buslov 			break;
421dd58edc3SVlad Buslov 		}
422dd58edc3SVlad Buslov 	}
423dd58edc3SVlad Buslov 
424dd58edc3SVlad Buslov 	return found;
425dd58edc3SVlad Buslov }
426dd58edc3SVlad Buslov 
427dd58edc3SVlad Buslov static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
428d2faae25SVlad Buslov 			      struct mlx5e_mod_hdr_entry *mh,
429d2faae25SVlad Buslov 			      int namespace)
430dd58edc3SVlad Buslov {
431d2faae25SVlad Buslov 	struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
432d2faae25SVlad Buslov 
433d2faae25SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
434dd58edc3SVlad Buslov 		return;
435d2faae25SVlad Buslov 	hash_del(&mh->mod_hdr_hlist);
436d2faae25SVlad Buslov 	mutex_unlock(&tbl->lock);
437dd58edc3SVlad Buslov 
438dd58edc3SVlad Buslov 	WARN_ON(!list_empty(&mh->flows));
439a734d007SVlad Buslov 	if (mh->compl_result > 0)
4402b688ea5SMaor Gottlieb 		mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
441d2faae25SVlad Buslov 
442dd58edc3SVlad Buslov 	kfree(mh);
443dd58edc3SVlad Buslov }
444dd58edc3SVlad Buslov 
445d2faae25SVlad Buslov static int get_flow_name_space(struct mlx5e_tc_flow *flow)
446d2faae25SVlad Buslov {
447d2faae25SVlad Buslov 	return mlx5e_is_eswitch_flow(flow) ?
448d2faae25SVlad Buslov 		MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
449d2faae25SVlad Buslov }
45011c9c548SOr Gerlitz static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
45111c9c548SOr Gerlitz 				struct mlx5e_tc_flow *flow,
45211c9c548SOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr)
45311c9c548SOr Gerlitz {
45411c9c548SOr Gerlitz 	int num_actions, actions_size, namespace, err;
45511c9c548SOr Gerlitz 	struct mlx5e_mod_hdr_entry *mh;
456dd58edc3SVlad Buslov 	struct mod_hdr_tbl *tbl;
45711c9c548SOr Gerlitz 	struct mod_hdr_key key;
45811c9c548SOr Gerlitz 	u32 hash_key;
45911c9c548SOr Gerlitz 
4606ae4a6a5SPaul Blakey 	num_actions  = parse_attr->mod_hdr_acts.num_actions;
46111c9c548SOr Gerlitz 	actions_size = MLX5_MH_ACT_SZ * num_actions;
46211c9c548SOr Gerlitz 
4636ae4a6a5SPaul Blakey 	key.actions = parse_attr->mod_hdr_acts.actions;
46411c9c548SOr Gerlitz 	key.num_actions = num_actions;
46511c9c548SOr Gerlitz 
46611c9c548SOr Gerlitz 	hash_key = hash_mod_hdr_info(&key);
46711c9c548SOr Gerlitz 
468d2faae25SVlad Buslov 	namespace = get_flow_name_space(flow);
469dd58edc3SVlad Buslov 	tbl = get_mod_hdr_table(priv, namespace);
47011c9c548SOr Gerlitz 
471d2faae25SVlad Buslov 	mutex_lock(&tbl->lock);
472dd58edc3SVlad Buslov 	mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
473a734d007SVlad Buslov 	if (mh) {
474a734d007SVlad Buslov 		mutex_unlock(&tbl->lock);
475a734d007SVlad Buslov 		wait_for_completion(&mh->res_ready);
476a734d007SVlad Buslov 
477a734d007SVlad Buslov 		if (mh->compl_result < 0) {
478a734d007SVlad Buslov 			err = -EREMOTEIO;
479a734d007SVlad Buslov 			goto attach_header_err;
480a734d007SVlad Buslov 		}
48111c9c548SOr Gerlitz 		goto attach_flow;
482a734d007SVlad Buslov 	}
48311c9c548SOr Gerlitz 
48411c9c548SOr Gerlitz 	mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
485d2faae25SVlad Buslov 	if (!mh) {
486a734d007SVlad Buslov 		mutex_unlock(&tbl->lock);
487a734d007SVlad Buslov 		return -ENOMEM;
488d2faae25SVlad Buslov 	}
48911c9c548SOr Gerlitz 
49011c9c548SOr Gerlitz 	mh->key.actions = (void *)mh + sizeof(*mh);
49111c9c548SOr Gerlitz 	memcpy(mh->key.actions, key.actions, actions_size);
49211c9c548SOr Gerlitz 	mh->key.num_actions = num_actions;
49383a52f0dSVlad Buslov 	spin_lock_init(&mh->flows_lock);
49411c9c548SOr Gerlitz 	INIT_LIST_HEAD(&mh->flows);
495dd58edc3SVlad Buslov 	refcount_set(&mh->refcnt, 1);
496a734d007SVlad Buslov 	init_completion(&mh->res_ready);
497a734d007SVlad Buslov 
498a734d007SVlad Buslov 	hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
499a734d007SVlad Buslov 	mutex_unlock(&tbl->lock);
50011c9c548SOr Gerlitz 
5012b688ea5SMaor Gottlieb 	mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace,
50211c9c548SOr Gerlitz 						  mh->key.num_actions,
5032b688ea5SMaor Gottlieb 						  mh->key.actions);
5042b688ea5SMaor Gottlieb 	if (IS_ERR(mh->modify_hdr)) {
5052b688ea5SMaor Gottlieb 		err = PTR_ERR(mh->modify_hdr);
506a734d007SVlad Buslov 		mh->compl_result = err;
507a734d007SVlad Buslov 		goto alloc_header_err;
508a734d007SVlad Buslov 	}
509a734d007SVlad Buslov 	mh->compl_result = 1;
510a734d007SVlad Buslov 	complete_all(&mh->res_ready);
51111c9c548SOr Gerlitz 
51211c9c548SOr Gerlitz attach_flow:
513dd58edc3SVlad Buslov 	flow->mh = mh;
51483a52f0dSVlad Buslov 	spin_lock(&mh->flows_lock);
51511c9c548SOr Gerlitz 	list_add(&flow->mod_hdr, &mh->flows);
51683a52f0dSVlad Buslov 	spin_unlock(&mh->flows_lock);
517d2faae25SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
5182b688ea5SMaor Gottlieb 		flow->esw_attr->modify_hdr = mh->modify_hdr;
51911c9c548SOr Gerlitz 	else
5202b688ea5SMaor Gottlieb 		flow->nic_attr->modify_hdr = mh->modify_hdr;
52111c9c548SOr Gerlitz 
52211c9c548SOr Gerlitz 	return 0;
52311c9c548SOr Gerlitz 
524a734d007SVlad Buslov alloc_header_err:
525a734d007SVlad Buslov 	complete_all(&mh->res_ready);
526a734d007SVlad Buslov attach_header_err:
527a734d007SVlad Buslov 	mlx5e_mod_hdr_put(priv, mh, namespace);
52811c9c548SOr Gerlitz 	return err;
52911c9c548SOr Gerlitz }
53011c9c548SOr Gerlitz 
53111c9c548SOr Gerlitz static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
53211c9c548SOr Gerlitz 				 struct mlx5e_tc_flow *flow)
53311c9c548SOr Gerlitz {
5345a7e5bcbSVlad Buslov 	/* flow wasn't fully initialized */
535dd58edc3SVlad Buslov 	if (!flow->mh)
5365a7e5bcbSVlad Buslov 		return;
5375a7e5bcbSVlad Buslov 
53883a52f0dSVlad Buslov 	spin_lock(&flow->mh->flows_lock);
53911c9c548SOr Gerlitz 	list_del(&flow->mod_hdr);
54083a52f0dSVlad Buslov 	spin_unlock(&flow->mh->flows_lock);
54111c9c548SOr Gerlitz 
542d2faae25SVlad Buslov 	mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
543dd58edc3SVlad Buslov 	flow->mh = NULL;
54411c9c548SOr Gerlitz }
54511c9c548SOr Gerlitz 
54677ab67b7SOr Gerlitz static
54777ab67b7SOr Gerlitz struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
54877ab67b7SOr Gerlitz {
54977ab67b7SOr Gerlitz 	struct net_device *netdev;
55077ab67b7SOr Gerlitz 	struct mlx5e_priv *priv;
55177ab67b7SOr Gerlitz 
55277ab67b7SOr Gerlitz 	netdev = __dev_get_by_index(net, ifindex);
55377ab67b7SOr Gerlitz 	priv = netdev_priv(netdev);
55477ab67b7SOr Gerlitz 	return priv->mdev;
55577ab67b7SOr Gerlitz }
55677ab67b7SOr Gerlitz 
55777ab67b7SOr Gerlitz static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
55877ab67b7SOr Gerlitz {
559e0b4b472SLeon Romanovsky 	u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
56077ab67b7SOr Gerlitz 	void *tirc;
56177ab67b7SOr Gerlitz 	int err;
56277ab67b7SOr Gerlitz 
56377ab67b7SOr Gerlitz 	err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
56477ab67b7SOr Gerlitz 	if (err)
56577ab67b7SOr Gerlitz 		goto alloc_tdn_err;
56677ab67b7SOr Gerlitz 
56777ab67b7SOr Gerlitz 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
56877ab67b7SOr Gerlitz 
56977ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
570ddae74acSOr Gerlitz 	MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
57177ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
57277ab67b7SOr Gerlitz 
573e0b4b472SLeon Romanovsky 	err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
57477ab67b7SOr Gerlitz 	if (err)
57577ab67b7SOr Gerlitz 		goto create_tir_err;
57677ab67b7SOr Gerlitz 
57777ab67b7SOr Gerlitz 	return 0;
57877ab67b7SOr Gerlitz 
57977ab67b7SOr Gerlitz create_tir_err:
58077ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
58177ab67b7SOr Gerlitz alloc_tdn_err:
58277ab67b7SOr Gerlitz 	return err;
58377ab67b7SOr Gerlitz }
58477ab67b7SOr Gerlitz 
58577ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
58677ab67b7SOr Gerlitz {
58777ab67b7SOr Gerlitz 	mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
58877ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
58977ab67b7SOr Gerlitz }
59077ab67b7SOr Gerlitz 
5913f6d08d1SOr Gerlitz static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
5923f6d08d1SOr Gerlitz {
5933f6d08d1SOr Gerlitz 	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
5943f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
5953f6d08d1SOr Gerlitz 	int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
5963f6d08d1SOr Gerlitz 
5973f6d08d1SOr Gerlitz 	mlx5e_build_default_indir_rqt(indirection_rqt, sz,
5983f6d08d1SOr Gerlitz 				      hp->num_channels);
5993f6d08d1SOr Gerlitz 
6003f6d08d1SOr Gerlitz 	for (i = 0; i < sz; i++) {
6013f6d08d1SOr Gerlitz 		ix = i;
602bbeb53b8SAya Levin 		if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
6033f6d08d1SOr Gerlitz 			ix = mlx5e_bits_invert(i, ilog2(sz));
6043f6d08d1SOr Gerlitz 		ix = indirection_rqt[ix];
6053f6d08d1SOr Gerlitz 		rqn = hp->pair->rqn[ix];
6063f6d08d1SOr Gerlitz 		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
6073f6d08d1SOr Gerlitz 	}
6083f6d08d1SOr Gerlitz }
6093f6d08d1SOr Gerlitz 
6103f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
6113f6d08d1SOr Gerlitz {
6123f6d08d1SOr Gerlitz 	int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
6133f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
6143f6d08d1SOr Gerlitz 	struct mlx5_core_dev *mdev = priv->mdev;
6153f6d08d1SOr Gerlitz 	void *rqtc;
6163f6d08d1SOr Gerlitz 	u32 *in;
6173f6d08d1SOr Gerlitz 
6183f6d08d1SOr Gerlitz 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
6193f6d08d1SOr Gerlitz 	in = kvzalloc(inlen, GFP_KERNEL);
6203f6d08d1SOr Gerlitz 	if (!in)
6213f6d08d1SOr Gerlitz 		return -ENOMEM;
6223f6d08d1SOr Gerlitz 
6233f6d08d1SOr Gerlitz 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
6243f6d08d1SOr Gerlitz 
6253f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
6263f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
6273f6d08d1SOr Gerlitz 
6283f6d08d1SOr Gerlitz 	mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
6293f6d08d1SOr Gerlitz 
6303f6d08d1SOr Gerlitz 	err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
6313f6d08d1SOr Gerlitz 	if (!err)
6323f6d08d1SOr Gerlitz 		hp->indir_rqt.enabled = true;
6333f6d08d1SOr Gerlitz 
6343f6d08d1SOr Gerlitz 	kvfree(in);
6353f6d08d1SOr Gerlitz 	return err;
6363f6d08d1SOr Gerlitz }
6373f6d08d1SOr Gerlitz 
6383f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
6393f6d08d1SOr Gerlitz {
6403f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
6413f6d08d1SOr Gerlitz 	u32 in[MLX5_ST_SZ_DW(create_tir_in)];
6423f6d08d1SOr Gerlitz 	int tt, i, err;
6433f6d08d1SOr Gerlitz 	void *tirc;
6443f6d08d1SOr Gerlitz 
6453f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
646d930ac79SAya Levin 		struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
647d930ac79SAya Levin 
6483f6d08d1SOr Gerlitz 		memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
6493f6d08d1SOr Gerlitz 		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6503f6d08d1SOr Gerlitz 
6513f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
6523f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
6533f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
654bbeb53b8SAya Levin 		mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
655bbeb53b8SAya Levin 
6563f6d08d1SOr Gerlitz 		err = mlx5_core_create_tir(hp->func_mdev, in,
657e0b4b472SLeon Romanovsky 					   &hp->indir_tirn[tt]);
6583f6d08d1SOr Gerlitz 		if (err) {
6593f6d08d1SOr Gerlitz 			mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
6603f6d08d1SOr Gerlitz 			goto err_destroy_tirs;
6613f6d08d1SOr Gerlitz 		}
6623f6d08d1SOr Gerlitz 	}
6633f6d08d1SOr Gerlitz 	return 0;
6643f6d08d1SOr Gerlitz 
6653f6d08d1SOr Gerlitz err_destroy_tirs:
6663f6d08d1SOr Gerlitz 	for (i = 0; i < tt; i++)
6673f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
6683f6d08d1SOr Gerlitz 	return err;
6693f6d08d1SOr Gerlitz }
6703f6d08d1SOr Gerlitz 
6713f6d08d1SOr Gerlitz static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
6723f6d08d1SOr Gerlitz {
6733f6d08d1SOr Gerlitz 	int tt;
6743f6d08d1SOr Gerlitz 
6753f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
6763f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
6773f6d08d1SOr Gerlitz }
6783f6d08d1SOr Gerlitz 
6793f6d08d1SOr Gerlitz static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
6803f6d08d1SOr Gerlitz 					 struct ttc_params *ttc_params)
6813f6d08d1SOr Gerlitz {
6823f6d08d1SOr Gerlitz 	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
6833f6d08d1SOr Gerlitz 	int tt;
6843f6d08d1SOr Gerlitz 
6853f6d08d1SOr Gerlitz 	memset(ttc_params, 0, sizeof(*ttc_params));
6863f6d08d1SOr Gerlitz 
6873f6d08d1SOr Gerlitz 	ttc_params->any_tt_tirn = hp->tirn;
6883f6d08d1SOr Gerlitz 
6893f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
6903f6d08d1SOr Gerlitz 		ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
6913f6d08d1SOr Gerlitz 
6926412bb39SEli Cohen 	ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
6933f6d08d1SOr Gerlitz 	ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
6943f6d08d1SOr Gerlitz 	ft_attr->prio = MLX5E_TC_PRIO;
6953f6d08d1SOr Gerlitz }
6963f6d08d1SOr Gerlitz 
6973f6d08d1SOr Gerlitz static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
6983f6d08d1SOr Gerlitz {
6993f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
7003f6d08d1SOr Gerlitz 	struct ttc_params ttc_params;
7013f6d08d1SOr Gerlitz 	int err;
7023f6d08d1SOr Gerlitz 
7033f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_rqt(hp);
7043f6d08d1SOr Gerlitz 	if (err)
7053f6d08d1SOr Gerlitz 		return err;
7063f6d08d1SOr Gerlitz 
7073f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_tirs(hp);
7083f6d08d1SOr Gerlitz 	if (err)
7093f6d08d1SOr Gerlitz 		goto err_create_indirect_tirs;
7103f6d08d1SOr Gerlitz 
7113f6d08d1SOr Gerlitz 	mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
7123f6d08d1SOr Gerlitz 	err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
7133f6d08d1SOr Gerlitz 	if (err)
7143f6d08d1SOr Gerlitz 		goto err_create_ttc_table;
7153f6d08d1SOr Gerlitz 
7163f6d08d1SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
7173f6d08d1SOr Gerlitz 		   hp->num_channels, hp->ttc.ft.t->id);
7183f6d08d1SOr Gerlitz 
7193f6d08d1SOr Gerlitz 	return 0;
7203f6d08d1SOr Gerlitz 
7213f6d08d1SOr Gerlitz err_create_ttc_table:
7223f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
7233f6d08d1SOr Gerlitz err_create_indirect_tirs:
7243f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
7253f6d08d1SOr Gerlitz 
7263f6d08d1SOr Gerlitz 	return err;
7273f6d08d1SOr Gerlitz }
7283f6d08d1SOr Gerlitz 
7293f6d08d1SOr Gerlitz static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
7303f6d08d1SOr Gerlitz {
7313f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
7323f6d08d1SOr Gerlitz 
7333f6d08d1SOr Gerlitz 	mlx5e_destroy_ttc_table(priv, &hp->ttc);
7343f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
7353f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
7363f6d08d1SOr Gerlitz }
7373f6d08d1SOr Gerlitz 
73877ab67b7SOr Gerlitz static struct mlx5e_hairpin *
73977ab67b7SOr Gerlitz mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
74077ab67b7SOr Gerlitz 		     int peer_ifindex)
74177ab67b7SOr Gerlitz {
74277ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev, *peer_mdev;
74377ab67b7SOr Gerlitz 	struct mlx5e_hairpin *hp;
74477ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
74577ab67b7SOr Gerlitz 	int err;
74677ab67b7SOr Gerlitz 
74777ab67b7SOr Gerlitz 	hp = kzalloc(sizeof(*hp), GFP_KERNEL);
74877ab67b7SOr Gerlitz 	if (!hp)
74977ab67b7SOr Gerlitz 		return ERR_PTR(-ENOMEM);
75077ab67b7SOr Gerlitz 
75177ab67b7SOr Gerlitz 	func_mdev = priv->mdev;
75277ab67b7SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
75377ab67b7SOr Gerlitz 
75477ab67b7SOr Gerlitz 	pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
75577ab67b7SOr Gerlitz 	if (IS_ERR(pair)) {
75677ab67b7SOr Gerlitz 		err = PTR_ERR(pair);
75777ab67b7SOr Gerlitz 		goto create_pair_err;
75877ab67b7SOr Gerlitz 	}
75977ab67b7SOr Gerlitz 	hp->pair = pair;
76077ab67b7SOr Gerlitz 	hp->func_mdev = func_mdev;
7613f6d08d1SOr Gerlitz 	hp->func_priv = priv;
7623f6d08d1SOr Gerlitz 	hp->num_channels = params->num_channels;
76377ab67b7SOr Gerlitz 
76477ab67b7SOr Gerlitz 	err = mlx5e_hairpin_create_transport(hp);
76577ab67b7SOr Gerlitz 	if (err)
76677ab67b7SOr Gerlitz 		goto create_transport_err;
76777ab67b7SOr Gerlitz 
7683f6d08d1SOr Gerlitz 	if (hp->num_channels > 1) {
7693f6d08d1SOr Gerlitz 		err = mlx5e_hairpin_rss_init(hp);
7703f6d08d1SOr Gerlitz 		if (err)
7713f6d08d1SOr Gerlitz 			goto rss_init_err;
7723f6d08d1SOr Gerlitz 	}
7733f6d08d1SOr Gerlitz 
77477ab67b7SOr Gerlitz 	return hp;
77577ab67b7SOr Gerlitz 
7763f6d08d1SOr Gerlitz rss_init_err:
7773f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
77877ab67b7SOr Gerlitz create_transport_err:
77977ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
78077ab67b7SOr Gerlitz create_pair_err:
78177ab67b7SOr Gerlitz 	kfree(hp);
78277ab67b7SOr Gerlitz 	return ERR_PTR(err);
78377ab67b7SOr Gerlitz }
78477ab67b7SOr Gerlitz 
78577ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
78677ab67b7SOr Gerlitz {
7873f6d08d1SOr Gerlitz 	if (hp->num_channels > 1)
7883f6d08d1SOr Gerlitz 		mlx5e_hairpin_rss_cleanup(hp);
78977ab67b7SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
79077ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
79177ab67b7SOr Gerlitz 	kvfree(hp);
79277ab67b7SOr Gerlitz }
79377ab67b7SOr Gerlitz 
794106be53bSOr Gerlitz static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
795106be53bSOr Gerlitz {
796106be53bSOr Gerlitz 	return (peer_vhca_id << 16 | prio);
797106be53bSOr Gerlitz }
798106be53bSOr Gerlitz 
7995c65c564SOr Gerlitz static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
800106be53bSOr Gerlitz 						     u16 peer_vhca_id, u8 prio)
8015c65c564SOr Gerlitz {
8025c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
803106be53bSOr Gerlitz 	u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
8045c65c564SOr Gerlitz 
8055c65c564SOr Gerlitz 	hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
806106be53bSOr Gerlitz 			       hairpin_hlist, hash_key) {
807e4f9abbdSVlad Buslov 		if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
808e4f9abbdSVlad Buslov 			refcount_inc(&hpe->refcnt);
8095c65c564SOr Gerlitz 			return hpe;
8105c65c564SOr Gerlitz 		}
811e4f9abbdSVlad Buslov 	}
8125c65c564SOr Gerlitz 
8135c65c564SOr Gerlitz 	return NULL;
8145c65c564SOr Gerlitz }
8155c65c564SOr Gerlitz 
816e4f9abbdSVlad Buslov static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
817e4f9abbdSVlad Buslov 			      struct mlx5e_hairpin_entry *hpe)
818e4f9abbdSVlad Buslov {
819e4f9abbdSVlad Buslov 	/* no more hairpin flows for us, release the hairpin pair */
820b32accdaSVlad Buslov 	if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
821e4f9abbdSVlad Buslov 		return;
822b32accdaSVlad Buslov 	hash_del(&hpe->hairpin_hlist);
823b32accdaSVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
824e4f9abbdSVlad Buslov 
825db76ca24SVlad Buslov 	if (!IS_ERR_OR_NULL(hpe->hp)) {
826e4f9abbdSVlad Buslov 		netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
827e4f9abbdSVlad Buslov 			   dev_name(hpe->hp->pair->peer_mdev->device));
828e4f9abbdSVlad Buslov 
829e4f9abbdSVlad Buslov 		mlx5e_hairpin_destroy(hpe->hp);
830db76ca24SVlad Buslov 	}
831db76ca24SVlad Buslov 
832db76ca24SVlad Buslov 	WARN_ON(!list_empty(&hpe->flows));
833e4f9abbdSVlad Buslov 	kfree(hpe);
834e4f9abbdSVlad Buslov }
835e4f9abbdSVlad Buslov 
836106be53bSOr Gerlitz #define UNKNOWN_MATCH_PRIO 8
837106be53bSOr Gerlitz 
838106be53bSOr Gerlitz static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
839e98bedf5SEli Britstein 				  struct mlx5_flow_spec *spec, u8 *match_prio,
840e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
841106be53bSOr Gerlitz {
842106be53bSOr Gerlitz 	void *headers_c, *headers_v;
843106be53bSOr Gerlitz 	u8 prio_val, prio_mask = 0;
844106be53bSOr Gerlitz 	bool vlan_present;
845106be53bSOr Gerlitz 
846106be53bSOr Gerlitz #ifdef CONFIG_MLX5_CORE_EN_DCB
847106be53bSOr Gerlitz 	if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
848e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
849e98bedf5SEli Britstein 				   "only PCP trust state supported for hairpin");
850106be53bSOr Gerlitz 		return -EOPNOTSUPP;
851106be53bSOr Gerlitz 	}
852106be53bSOr Gerlitz #endif
853106be53bSOr Gerlitz 	headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
854106be53bSOr Gerlitz 	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
855106be53bSOr Gerlitz 
856106be53bSOr Gerlitz 	vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
857106be53bSOr Gerlitz 	if (vlan_present) {
858106be53bSOr Gerlitz 		prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
859106be53bSOr Gerlitz 		prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
860106be53bSOr Gerlitz 	}
861106be53bSOr Gerlitz 
862106be53bSOr Gerlitz 	if (!vlan_present || !prio_mask) {
863106be53bSOr Gerlitz 		prio_val = UNKNOWN_MATCH_PRIO;
864106be53bSOr Gerlitz 	} else if (prio_mask != 0x7) {
865e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
866e98bedf5SEli Britstein 				   "masked priority match not supported for hairpin");
867106be53bSOr Gerlitz 		return -EOPNOTSUPP;
868106be53bSOr Gerlitz 	}
869106be53bSOr Gerlitz 
870106be53bSOr Gerlitz 	*match_prio = prio_val;
871106be53bSOr Gerlitz 	return 0;
872106be53bSOr Gerlitz }
873106be53bSOr Gerlitz 
8745c65c564SOr Gerlitz static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
8755c65c564SOr Gerlitz 				  struct mlx5e_tc_flow *flow,
876e98bedf5SEli Britstein 				  struct mlx5e_tc_flow_parse_attr *parse_attr,
877e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
8785c65c564SOr Gerlitz {
87998b66cb1SEli Britstein 	int peer_ifindex = parse_attr->mirred_ifindex[0];
8805c65c564SOr Gerlitz 	struct mlx5_hairpin_params params;
881d8822868SOr Gerlitz 	struct mlx5_core_dev *peer_mdev;
8825c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
8835c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
8843f6d08d1SOr Gerlitz 	u64 link_speed64;
8853f6d08d1SOr Gerlitz 	u32 link_speed;
886106be53bSOr Gerlitz 	u8 match_prio;
887d8822868SOr Gerlitz 	u16 peer_id;
8885c65c564SOr Gerlitz 	int err;
8895c65c564SOr Gerlitz 
890d8822868SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
891d8822868SOr Gerlitz 	if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
892e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
8935c65c564SOr Gerlitz 		return -EOPNOTSUPP;
8945c65c564SOr Gerlitz 	}
8955c65c564SOr Gerlitz 
896d8822868SOr Gerlitz 	peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
897e98bedf5SEli Britstein 	err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
898e98bedf5SEli Britstein 				     extack);
899106be53bSOr Gerlitz 	if (err)
900106be53bSOr Gerlitz 		return err;
901b32accdaSVlad Buslov 
902b32accdaSVlad Buslov 	mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
903106be53bSOr Gerlitz 	hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
904db76ca24SVlad Buslov 	if (hpe) {
905db76ca24SVlad Buslov 		mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
906db76ca24SVlad Buslov 		wait_for_completion(&hpe->res_ready);
907db76ca24SVlad Buslov 
908db76ca24SVlad Buslov 		if (IS_ERR(hpe->hp)) {
909db76ca24SVlad Buslov 			err = -EREMOTEIO;
910db76ca24SVlad Buslov 			goto out_err;
911db76ca24SVlad Buslov 		}
9125c65c564SOr Gerlitz 		goto attach_flow;
913db76ca24SVlad Buslov 	}
9145c65c564SOr Gerlitz 
9155c65c564SOr Gerlitz 	hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
916b32accdaSVlad Buslov 	if (!hpe) {
917db76ca24SVlad Buslov 		mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
918db76ca24SVlad Buslov 		return -ENOMEM;
919b32accdaSVlad Buslov 	}
9205c65c564SOr Gerlitz 
92173edca73SVlad Buslov 	spin_lock_init(&hpe->flows_lock);
9225c65c564SOr Gerlitz 	INIT_LIST_HEAD(&hpe->flows);
923db76ca24SVlad Buslov 	INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
924d8822868SOr Gerlitz 	hpe->peer_vhca_id = peer_id;
925106be53bSOr Gerlitz 	hpe->prio = match_prio;
926e4f9abbdSVlad Buslov 	refcount_set(&hpe->refcnt, 1);
927db76ca24SVlad Buslov 	init_completion(&hpe->res_ready);
928db76ca24SVlad Buslov 
929db76ca24SVlad Buslov 	hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
930db76ca24SVlad Buslov 		 hash_hairpin_info(peer_id, match_prio));
931db76ca24SVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
9325c65c564SOr Gerlitz 
9335c65c564SOr Gerlitz 	params.log_data_size = 15;
9345c65c564SOr Gerlitz 	params.log_data_size = min_t(u8, params.log_data_size,
9355c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
9365c65c564SOr Gerlitz 	params.log_data_size = max_t(u8, params.log_data_size,
9375c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
9385c65c564SOr Gerlitz 
939eb9180f7SOr Gerlitz 	params.log_num_packets = params.log_data_size -
940eb9180f7SOr Gerlitz 				 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
941eb9180f7SOr Gerlitz 	params.log_num_packets = min_t(u8, params.log_num_packets,
942eb9180f7SOr Gerlitz 				       MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
943eb9180f7SOr Gerlitz 
944eb9180f7SOr Gerlitz 	params.q_counter = priv->q_counter;
9453f6d08d1SOr Gerlitz 	/* set hairpin pair per each 50Gbs share of the link */
9462c81bfd5SHuy Nguyen 	mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
9473f6d08d1SOr Gerlitz 	link_speed = max_t(u32, link_speed, 50000);
9483f6d08d1SOr Gerlitz 	link_speed64 = link_speed;
9493f6d08d1SOr Gerlitz 	do_div(link_speed64, 50000);
9503f6d08d1SOr Gerlitz 	params.num_channels = link_speed64;
9513f6d08d1SOr Gerlitz 
9525c65c564SOr Gerlitz 	hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
953db76ca24SVlad Buslov 	hpe->hp = hp;
954db76ca24SVlad Buslov 	complete_all(&hpe->res_ready);
9555c65c564SOr Gerlitz 	if (IS_ERR(hp)) {
9565c65c564SOr Gerlitz 		err = PTR_ERR(hp);
957db76ca24SVlad Buslov 		goto out_err;
9585c65c564SOr Gerlitz 	}
9595c65c564SOr Gerlitz 
960eb9180f7SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
96127b942fbSParav Pandit 		   hp->tirn, hp->pair->rqn[0],
96227b942fbSParav Pandit 		   dev_name(hp->pair->peer_mdev->device),
963eb9180f7SOr Gerlitz 		   hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
9645c65c564SOr Gerlitz 
9655c65c564SOr Gerlitz attach_flow:
9663f6d08d1SOr Gerlitz 	if (hpe->hp->num_channels > 1) {
967226f2ca3SVlad Buslov 		flow_flag_set(flow, HAIRPIN_RSS);
9683f6d08d1SOr Gerlitz 		flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
9693f6d08d1SOr Gerlitz 	} else {
9705c65c564SOr Gerlitz 		flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
9713f6d08d1SOr Gerlitz 	}
972b32accdaSVlad Buslov 
973e4f9abbdSVlad Buslov 	flow->hpe = hpe;
97473edca73SVlad Buslov 	spin_lock(&hpe->flows_lock);
9755c65c564SOr Gerlitz 	list_add(&flow->hairpin, &hpe->flows);
97673edca73SVlad Buslov 	spin_unlock(&hpe->flows_lock);
9773f6d08d1SOr Gerlitz 
9785c65c564SOr Gerlitz 	return 0;
9795c65c564SOr Gerlitz 
980db76ca24SVlad Buslov out_err:
981db76ca24SVlad Buslov 	mlx5e_hairpin_put(priv, hpe);
9825c65c564SOr Gerlitz 	return err;
9835c65c564SOr Gerlitz }
9845c65c564SOr Gerlitz 
9855c65c564SOr Gerlitz static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
9865c65c564SOr Gerlitz 				   struct mlx5e_tc_flow *flow)
9875c65c564SOr Gerlitz {
9885a7e5bcbSVlad Buslov 	/* flow wasn't fully initialized */
989e4f9abbdSVlad Buslov 	if (!flow->hpe)
9905a7e5bcbSVlad Buslov 		return;
9915a7e5bcbSVlad Buslov 
99273edca73SVlad Buslov 	spin_lock(&flow->hpe->flows_lock);
9935c65c564SOr Gerlitz 	list_del(&flow->hairpin);
99473edca73SVlad Buslov 	spin_unlock(&flow->hpe->flows_lock);
99573edca73SVlad Buslov 
996e4f9abbdSVlad Buslov 	mlx5e_hairpin_put(priv, flow->hpe);
997e4f9abbdSVlad Buslov 	flow->hpe = NULL;
9985c65c564SOr Gerlitz }
9995c65c564SOr Gerlitz 
1000c83954abSRabie Loulou static int
100174491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
100217091853SOr Gerlitz 		      struct mlx5e_tc_flow_parse_attr *parse_attr,
1003e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
1004e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
1005e8f887acSAmir Vadai {
1006bb0ee7dcSJianbo Liu 	struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
1007aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1008aad7e08dSAmir Vadai 	struct mlx5_core_dev *dev = priv->mdev;
10095c65c564SOr Gerlitz 	struct mlx5_flow_destination dest[2] = {};
101066958ed9SHadar Hen Zion 	struct mlx5_flow_act flow_act = {
10113bc4b7bfSOr Gerlitz 		.action = attr->action,
1012bb0ee7dcSJianbo Liu 		.flags    = FLOW_ACT_NO_APPEND,
101366958ed9SHadar Hen Zion 	};
1014aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
10155c65c564SOr Gerlitz 	int err, dest_ix = 0;
1016e8f887acSAmir Vadai 
1017bb0ee7dcSJianbo Liu 	flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1018bb0ee7dcSJianbo Liu 	flow_context->flow_tag = attr->flow_tag;
1019bb0ee7dcSJianbo Liu 
1020226f2ca3SVlad Buslov 	if (flow_flag_test(flow, HAIRPIN)) {
1021e98bedf5SEli Britstein 		err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
10225a7e5bcbSVlad Buslov 		if (err)
10235a7e5bcbSVlad Buslov 			return err;
10245a7e5bcbSVlad Buslov 
1025226f2ca3SVlad Buslov 		if (flow_flag_test(flow, HAIRPIN_RSS)) {
10263f6d08d1SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
10273f6d08d1SOr Gerlitz 			dest[dest_ix].ft = attr->hairpin_ft;
10283f6d08d1SOr Gerlitz 		} else {
10295c65c564SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
10305c65c564SOr Gerlitz 			dest[dest_ix].tir_num = attr->hairpin_tirn;
10313f6d08d1SOr Gerlitz 		}
10323f6d08d1SOr Gerlitz 		dest_ix++;
10333f6d08d1SOr Gerlitz 	} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
10345c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
10355c65c564SOr Gerlitz 		dest[dest_ix].ft = priv->fs.vlan.ft.t;
10365c65c564SOr Gerlitz 		dest_ix++;
10375c65c564SOr Gerlitz 	}
1038aad7e08dSAmir Vadai 
10395c65c564SOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
10405c65c564SOr Gerlitz 		counter = mlx5_fc_create(dev, true);
10415a7e5bcbSVlad Buslov 		if (IS_ERR(counter))
10425a7e5bcbSVlad Buslov 			return PTR_ERR(counter);
10435a7e5bcbSVlad Buslov 
10445c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1045171c7625SMark Bloch 		dest[dest_ix].counter_id = mlx5_fc_id(counter);
10465c65c564SOr Gerlitz 		dest_ix++;
1047b8aee822SMark Bloch 		attr->counter = counter;
1048aad7e08dSAmir Vadai 	}
1049aad7e08dSAmir Vadai 
10502f4fe4caSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
10513099eb5aSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
10522b688ea5SMaor Gottlieb 		flow_act.modify_hdr = attr->modify_hdr;
10536ae4a6a5SPaul Blakey 		dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1054c83954abSRabie Loulou 		if (err)
10555a7e5bcbSVlad Buslov 			return err;
10562f4fe4caSOr Gerlitz 	}
10572f4fe4caSOr Gerlitz 
1058b6fac0b4SVlad Buslov 	mutex_lock(&priv->fs.tc.t_lock);
1059acff797cSMaor Gottlieb 	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
106061dc7b01SPaul Blakey 		struct mlx5_flow_table_attr ft_attr = {};
106161dc7b01SPaul Blakey 		int tc_grp_size, tc_tbl_size, tc_num_grps;
106221b9c144SOr Gerlitz 		u32 max_flow_counter;
106321b9c144SOr Gerlitz 
106421b9c144SOr Gerlitz 		max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
106521b9c144SOr Gerlitz 				    MLX5_CAP_GEN(dev, max_flow_counter_15_0);
106621b9c144SOr Gerlitz 
106721b9c144SOr Gerlitz 		tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
106821b9c144SOr Gerlitz 
106921b9c144SOr Gerlitz 		tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
107021b9c144SOr Gerlitz 				    BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
107161dc7b01SPaul Blakey 		tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
107221b9c144SOr Gerlitz 
107361dc7b01SPaul Blakey 		ft_attr.prio = MLX5E_TC_PRIO;
107461dc7b01SPaul Blakey 		ft_attr.max_fte = tc_tbl_size;
107561dc7b01SPaul Blakey 		ft_attr.level = MLX5E_TC_FT_LEVEL;
107661dc7b01SPaul Blakey 		ft_attr.autogroup.max_num_groups = tc_num_grps;
1077acff797cSMaor Gottlieb 		priv->fs.tc.t =
1078acff797cSMaor Gottlieb 			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
107961dc7b01SPaul Blakey 							    &ft_attr);
1080acff797cSMaor Gottlieb 		if (IS_ERR(priv->fs.tc.t)) {
1081b6fac0b4SVlad Buslov 			mutex_unlock(&priv->fs.tc.t_lock);
1082e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
1083c75a33c8SJacob Keller 					   "Failed to create tc offload table");
1084e8f887acSAmir Vadai 			netdev_err(priv->netdev,
1085e8f887acSAmir Vadai 				   "Failed to create tc offload table\n");
10865a7e5bcbSVlad Buslov 			return PTR_ERR(priv->fs.tc.t);
1087e8f887acSAmir Vadai 		}
1088e8f887acSAmir Vadai 	}
1089e8f887acSAmir Vadai 
109038aa51c1SOr Gerlitz 	if (attr->match_level != MLX5_MATCH_NONE)
1091d4a18e16SYevgeny Kliteynik 		parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
109238aa51c1SOr Gerlitz 
1093c83954abSRabie Loulou 	flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
10945c65c564SOr Gerlitz 					    &flow_act, dest, dest_ix);
1095b6fac0b4SVlad Buslov 	mutex_unlock(&priv->fs.tc.t_lock);
1096e8f887acSAmir Vadai 
1097a2b7189bSzhong jiang 	return PTR_ERR_OR_ZERO(flow->rule[0]);
1098e8f887acSAmir Vadai }
1099e8f887acSAmir Vadai 
1100d85cdccbSOr Gerlitz static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1101d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
1102d85cdccbSOr Gerlitz {
1103513f8f7fSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1104d85cdccbSOr Gerlitz 	struct mlx5_fc *counter = NULL;
1105d85cdccbSOr Gerlitz 
1106b8aee822SMark Bloch 	counter = attr->counter;
11075a7e5bcbSVlad Buslov 	if (!IS_ERR_OR_NULL(flow->rule[0]))
1108e4ad91f2SChris Mi 		mlx5_del_flow_rules(flow->rule[0]);
1109d85cdccbSOr Gerlitz 	mlx5_fc_destroy(priv->mdev, counter);
1110d85cdccbSOr Gerlitz 
1111b6fac0b4SVlad Buslov 	mutex_lock(&priv->fs.tc.t_lock);
1112226f2ca3SVlad Buslov 	if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
1113d85cdccbSOr Gerlitz 		mlx5_destroy_flow_table(priv->fs.tc.t);
1114d85cdccbSOr Gerlitz 		priv->fs.tc.t = NULL;
1115d85cdccbSOr Gerlitz 	}
1116b6fac0b4SVlad Buslov 	mutex_unlock(&priv->fs.tc.t_lock);
11172f4fe4caSOr Gerlitz 
1118513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
11193099eb5aSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
11205c65c564SOr Gerlitz 
1121226f2ca3SVlad Buslov 	if (flow_flag_test(flow, HAIRPIN))
11225c65c564SOr Gerlitz 		mlx5e_hairpin_flow_del(priv, flow);
1123d85cdccbSOr Gerlitz }
1124d85cdccbSOr Gerlitz 
1125aa0cbbaeSOr Gerlitz static void mlx5e_detach_encap(struct mlx5e_priv *priv,
11268c4dc42bSEli Britstein 			       struct mlx5e_tc_flow *flow, int out_index);
1127aa0cbbaeSOr Gerlitz 
11283c37745eSOr Gerlitz static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1129e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
1130733d4f36SRoi Dayan 			      struct net_device *mirred_dev,
1131733d4f36SRoi Dayan 			      int out_index,
11328c4dc42bSEli Britstein 			      struct netlink_ext_ack *extack,
11330ad060eeSRoi Dayan 			      struct net_device **encap_dev,
11340ad060eeSRoi Dayan 			      bool *encap_valid);
113514e6b038SEli Cohen static int mlx5e_attach_decap(struct mlx5e_priv *priv,
113614e6b038SEli Cohen 			      struct mlx5e_tc_flow *flow,
113714e6b038SEli Cohen 			      struct netlink_ext_ack *extack);
113814e6b038SEli Cohen static void mlx5e_detach_decap(struct mlx5e_priv *priv,
113914e6b038SEli Cohen 			       struct mlx5e_tc_flow *flow);
11403c37745eSOr Gerlitz 
11416d2a3ed0SOr Gerlitz static struct mlx5_flow_handle *
11426d2a3ed0SOr Gerlitz mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
11436d2a3ed0SOr Gerlitz 			   struct mlx5e_tc_flow *flow,
11446d2a3ed0SOr Gerlitz 			   struct mlx5_flow_spec *spec,
11456d2a3ed0SOr Gerlitz 			   struct mlx5_esw_flow_attr *attr)
11466d2a3ed0SOr Gerlitz {
11471ef3018fSPaul Blakey 	struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
11486d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
11494c3844d9SPaul Blakey 
11501ef3018fSPaul Blakey 	if (flow_flag_test(flow, CT)) {
11511ef3018fSPaul Blakey 		mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
11521ef3018fSPaul Blakey 
11531ef3018fSPaul Blakey 		return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
11541ef3018fSPaul Blakey 					       mod_hdr_acts);
11551ef3018fSPaul Blakey 	}
11566d2a3ed0SOr Gerlitz 
11576d2a3ed0SOr Gerlitz 	rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
11586d2a3ed0SOr Gerlitz 	if (IS_ERR(rule))
11596d2a3ed0SOr Gerlitz 		return rule;
11606d2a3ed0SOr Gerlitz 
1161e85e02baSEli Britstein 	if (attr->split_count) {
11626d2a3ed0SOr Gerlitz 		flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
11636d2a3ed0SOr Gerlitz 		if (IS_ERR(flow->rule[1])) {
11646d2a3ed0SOr Gerlitz 			mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
11656d2a3ed0SOr Gerlitz 			return flow->rule[1];
11666d2a3ed0SOr Gerlitz 		}
11676d2a3ed0SOr Gerlitz 	}
11686d2a3ed0SOr Gerlitz 
11696d2a3ed0SOr Gerlitz 	return rule;
11706d2a3ed0SOr Gerlitz }
11716d2a3ed0SOr Gerlitz 
11726d2a3ed0SOr Gerlitz static void
11736d2a3ed0SOr Gerlitz mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
11746d2a3ed0SOr Gerlitz 			     struct mlx5e_tc_flow *flow,
11756d2a3ed0SOr Gerlitz 			     struct mlx5_esw_flow_attr *attr)
11766d2a3ed0SOr Gerlitz {
1177226f2ca3SVlad Buslov 	flow_flag_clear(flow, OFFLOADED);
11786d2a3ed0SOr Gerlitz 
11794c3844d9SPaul Blakey 	if (flow_flag_test(flow, CT)) {
11804c3844d9SPaul Blakey 		mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
11814c3844d9SPaul Blakey 		return;
11824c3844d9SPaul Blakey 	}
11834c3844d9SPaul Blakey 
1184e85e02baSEli Britstein 	if (attr->split_count)
11856d2a3ed0SOr Gerlitz 		mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
11866d2a3ed0SOr Gerlitz 
11876d2a3ed0SOr Gerlitz 	mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
11886d2a3ed0SOr Gerlitz }
11896d2a3ed0SOr Gerlitz 
11905dbe906fSPaul Blakey static struct mlx5_flow_handle *
11915dbe906fSPaul Blakey mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
11925dbe906fSPaul Blakey 			      struct mlx5e_tc_flow *flow,
1193178f69b4SEli Cohen 			      struct mlx5_flow_spec *spec)
11945dbe906fSPaul Blakey {
1195178f69b4SEli Cohen 	struct mlx5_esw_flow_attr slow_attr;
11965dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
11975dbe906fSPaul Blakey 
1198178f69b4SEli Cohen 	memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1199178f69b4SEli Cohen 	slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1200178f69b4SEli Cohen 	slow_attr.split_count = 0;
1201178f69b4SEli Cohen 	slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
12025dbe906fSPaul Blakey 
1203178f69b4SEli Cohen 	rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
12045dbe906fSPaul Blakey 	if (!IS_ERR(rule))
1205226f2ca3SVlad Buslov 		flow_flag_set(flow, SLOW);
12065dbe906fSPaul Blakey 
12075dbe906fSPaul Blakey 	return rule;
12085dbe906fSPaul Blakey }
12095dbe906fSPaul Blakey 
12105dbe906fSPaul Blakey static void
12115dbe906fSPaul Blakey mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1212178f69b4SEli Cohen 				  struct mlx5e_tc_flow *flow)
12135dbe906fSPaul Blakey {
1214178f69b4SEli Cohen 	struct mlx5_esw_flow_attr slow_attr;
1215178f69b4SEli Cohen 
1216178f69b4SEli Cohen 	memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1217178f69b4SEli Cohen 	slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1218178f69b4SEli Cohen 	slow_attr.split_count = 0;
1219178f69b4SEli Cohen 	slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1220178f69b4SEli Cohen 	mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
1221226f2ca3SVlad Buslov 	flow_flag_clear(flow, SLOW);
12225dbe906fSPaul Blakey }
12235dbe906fSPaul Blakey 
1224ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1225ad86755bSVlad Buslov  * function.
1226ad86755bSVlad Buslov  */
1227ad86755bSVlad Buslov static void unready_flow_add(struct mlx5e_tc_flow *flow,
1228ad86755bSVlad Buslov 			     struct list_head *unready_flows)
1229ad86755bSVlad Buslov {
1230ad86755bSVlad Buslov 	flow_flag_set(flow, NOT_READY);
1231ad86755bSVlad Buslov 	list_add_tail(&flow->unready, unready_flows);
1232ad86755bSVlad Buslov }
1233ad86755bSVlad Buslov 
1234ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1235ad86755bSVlad Buslov  * function.
1236ad86755bSVlad Buslov  */
1237ad86755bSVlad Buslov static void unready_flow_del(struct mlx5e_tc_flow *flow)
1238ad86755bSVlad Buslov {
1239ad86755bSVlad Buslov 	list_del(&flow->unready);
1240ad86755bSVlad Buslov 	flow_flag_clear(flow, NOT_READY);
1241ad86755bSVlad Buslov }
1242ad86755bSVlad Buslov 
1243b4a23329SRoi Dayan static void add_unready_flow(struct mlx5e_tc_flow *flow)
1244b4a23329SRoi Dayan {
1245b4a23329SRoi Dayan 	struct mlx5_rep_uplink_priv *uplink_priv;
1246b4a23329SRoi Dayan 	struct mlx5e_rep_priv *rpriv;
1247b4a23329SRoi Dayan 	struct mlx5_eswitch *esw;
1248b4a23329SRoi Dayan 
1249b4a23329SRoi Dayan 	esw = flow->priv->mdev->priv.eswitch;
1250b4a23329SRoi Dayan 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1251b4a23329SRoi Dayan 	uplink_priv = &rpriv->uplink_priv;
1252b4a23329SRoi Dayan 
1253ad86755bSVlad Buslov 	mutex_lock(&uplink_priv->unready_flows_lock);
1254ad86755bSVlad Buslov 	unready_flow_add(flow, &uplink_priv->unready_flows);
1255ad86755bSVlad Buslov 	mutex_unlock(&uplink_priv->unready_flows_lock);
1256b4a23329SRoi Dayan }
1257b4a23329SRoi Dayan 
1258b4a23329SRoi Dayan static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1259b4a23329SRoi Dayan {
1260ad86755bSVlad Buslov 	struct mlx5_rep_uplink_priv *uplink_priv;
1261ad86755bSVlad Buslov 	struct mlx5e_rep_priv *rpriv;
1262ad86755bSVlad Buslov 	struct mlx5_eswitch *esw;
1263ad86755bSVlad Buslov 
1264ad86755bSVlad Buslov 	esw = flow->priv->mdev->priv.eswitch;
1265ad86755bSVlad Buslov 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1266ad86755bSVlad Buslov 	uplink_priv = &rpriv->uplink_priv;
1267ad86755bSVlad Buslov 
1268ad86755bSVlad Buslov 	mutex_lock(&uplink_priv->unready_flows_lock);
1269ad86755bSVlad Buslov 	unready_flow_del(flow);
1270ad86755bSVlad Buslov 	mutex_unlock(&uplink_priv->unready_flows_lock);
1271b4a23329SRoi Dayan }
1272b4a23329SRoi Dayan 
1273c83954abSRabie Loulou static int
127474491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1275e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
1276e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
1277adb4c123SOr Gerlitz {
1278adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1279aa0cbbaeSOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
12807040632dSTonghao Zhang 	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
12813c37745eSOr Gerlitz 	struct net_device *out_dev, *encap_dev = NULL;
1282b8aee822SMark Bloch 	struct mlx5_fc *counter = NULL;
12833c37745eSOr Gerlitz 	struct mlx5e_rep_priv *rpriv;
12843c37745eSOr Gerlitz 	struct mlx5e_priv *out_priv;
12850ad060eeSRoi Dayan 	bool encap_valid = true;
128639ac237cSPaul Blakey 	u32 max_prio, max_chain;
12870ad060eeSRoi Dayan 	int err = 0;
1288f493f155SEli Britstein 	int out_index;
12898b32580dSOr Gerlitz 
129039ac237cSPaul Blakey 	if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
129161644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
129261644c3dSRoi Dayan 				   "E-switch priorities unsupported, upgrade FW");
1293d14f6f2aSOr Gerlitz 		return -EOPNOTSUPP;
1294d14f6f2aSOr Gerlitz 	}
1295e52c2802SPaul Blakey 
129684179981SPaul Blakey 	/* We check chain range only for tc flows.
129784179981SPaul Blakey 	 * For ft flows, we checked attr->chain was originally 0 and set it to
129884179981SPaul Blakey 	 * FDB_FT_CHAIN which is outside tc range.
129984179981SPaul Blakey 	 * See mlx5e_rep_setup_ft_cb().
130084179981SPaul Blakey 	 */
130139ac237cSPaul Blakey 	max_chain = mlx5_esw_chains_get_chain_range(esw);
130284179981SPaul Blakey 	if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
130361644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
130461644c3dSRoi Dayan 				   "Requested chain is out of supported range");
13055a7e5bcbSVlad Buslov 		return -EOPNOTSUPP;
1306bf07aa73SPaul Blakey 	}
1307bf07aa73SPaul Blakey 
130839ac237cSPaul Blakey 	max_prio = mlx5_esw_chains_get_prio_range(esw);
1309bf07aa73SPaul Blakey 	if (attr->prio > max_prio) {
131061644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
131161644c3dSRoi Dayan 				   "Requested priority is out of supported range");
13125a7e5bcbSVlad Buslov 		return -EOPNOTSUPP;
1313bf07aa73SPaul Blakey 	}
1314bf07aa73SPaul Blakey 
131514e6b038SEli Cohen 	if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
131614e6b038SEli Cohen 		err = mlx5e_attach_decap(priv, flow, extack);
131714e6b038SEli Cohen 		if (err)
131814e6b038SEli Cohen 			return err;
131914e6b038SEli Cohen 	}
132014e6b038SEli Cohen 
1321f493f155SEli Britstein 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
13228c4dc42bSEli Britstein 		int mirred_ifindex;
13238c4dc42bSEli Britstein 
1324f493f155SEli Britstein 		if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1325f493f155SEli Britstein 			continue;
1326f493f155SEli Britstein 
13277040632dSTonghao Zhang 		mirred_ifindex = parse_attr->mirred_ifindex[out_index];
13283c37745eSOr Gerlitz 		out_dev = __dev_get_by_index(dev_net(priv->netdev),
13298c4dc42bSEli Britstein 					     mirred_ifindex);
1330733d4f36SRoi Dayan 		err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
13310ad060eeSRoi Dayan 					 extack, &encap_dev, &encap_valid);
13320ad060eeSRoi Dayan 		if (err)
13335a7e5bcbSVlad Buslov 			return err;
13340ad060eeSRoi Dayan 
13353c37745eSOr Gerlitz 		out_priv = netdev_priv(encap_dev);
13363c37745eSOr Gerlitz 		rpriv = out_priv->ppriv;
13371cc26d74SEli Britstein 		attr->dests[out_index].rep = rpriv->rep;
13381cc26d74SEli Britstein 		attr->dests[out_index].mdev = out_priv->mdev;
13393c37745eSOr Gerlitz 	}
13403c37745eSOr Gerlitz 
13418b32580dSOr Gerlitz 	err = mlx5_eswitch_add_vlan_action(esw, attr);
1342c83954abSRabie Loulou 	if (err)
13435a7e5bcbSVlad Buslov 		return err;
1344adb4c123SOr Gerlitz 
1345d5a3c2b6SRoi Dayan 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1346d5a3c2b6SRoi Dayan 	    !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
13471a9527bbSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
13486ae4a6a5SPaul Blakey 		dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1349c83954abSRabie Loulou 		if (err)
13505a7e5bcbSVlad Buslov 			return err;
1351d7e75a32SOr Gerlitz 	}
1352d7e75a32SOr Gerlitz 
1353b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1354f9392795SShahar Klein 		counter = mlx5_fc_create(attr->counter_dev, true);
13555a7e5bcbSVlad Buslov 		if (IS_ERR(counter))
13565a7e5bcbSVlad Buslov 			return PTR_ERR(counter);
1357b8aee822SMark Bloch 
1358b8aee822SMark Bloch 		attr->counter = counter;
1359b8aee822SMark Bloch 	}
1360b8aee822SMark Bloch 
13610ad060eeSRoi Dayan 	/* we get here if one of the following takes place:
13620ad060eeSRoi Dayan 	 * (1) there's no error
13630ad060eeSRoi Dayan 	 * (2) there's an encap action and we don't have valid neigh
13643c37745eSOr Gerlitz 	 */
1365bc1d75faSRoi Dayan 	if (!encap_valid)
1366178f69b4SEli Cohen 		flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1367bc1d75faSRoi Dayan 	else
13686d2a3ed0SOr Gerlitz 		flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
13695dbe906fSPaul Blakey 
13705a7e5bcbSVlad Buslov 	if (IS_ERR(flow->rule[0]))
13715a7e5bcbSVlad Buslov 		return PTR_ERR(flow->rule[0]);
1372226f2ca3SVlad Buslov 	else
1373226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1374c83954abSRabie Loulou 
13755dbe906fSPaul Blakey 	return 0;
1376aa0cbbaeSOr Gerlitz }
1377d85cdccbSOr Gerlitz 
13789272e3dfSYevgeny Kliteynik static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
13799272e3dfSYevgeny Kliteynik {
13809272e3dfSYevgeny Kliteynik 	struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
13819272e3dfSYevgeny Kliteynik 	void *headers_v = MLX5_ADDR_OF(fte_match_param,
13829272e3dfSYevgeny Kliteynik 				       spec->match_value,
13839272e3dfSYevgeny Kliteynik 				       misc_parameters_3);
13849272e3dfSYevgeny Kliteynik 	u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
13859272e3dfSYevgeny Kliteynik 					     headers_v,
13869272e3dfSYevgeny Kliteynik 					     geneve_tlv_option_0_data);
13879272e3dfSYevgeny Kliteynik 
13889272e3dfSYevgeny Kliteynik 	return !!geneve_tlv_opt_0_data;
13899272e3dfSYevgeny Kliteynik }
13909272e3dfSYevgeny Kliteynik 
1391d85cdccbSOr Gerlitz static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1392d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
1393d85cdccbSOr Gerlitz {
1394d85cdccbSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1395d7e75a32SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1396f493f155SEli Britstein 	int out_index;
1397d85cdccbSOr Gerlitz 
13980a7fcb78SPaul Blakey 	mlx5e_put_flow_tunnel_id(flow);
13990a7fcb78SPaul Blakey 
1400226f2ca3SVlad Buslov 	if (flow_flag_test(flow, NOT_READY)) {
1401b4a23329SRoi Dayan 		remove_unready_flow(flow);
1402ef06c9eeSRoi Dayan 		kvfree(attr->parse_attr);
1403ef06c9eeSRoi Dayan 		return;
1404ef06c9eeSRoi Dayan 	}
1405ef06c9eeSRoi Dayan 
1406226f2ca3SVlad Buslov 	if (mlx5e_is_offloaded_flow(flow)) {
1407226f2ca3SVlad Buslov 		if (flow_flag_test(flow, SLOW))
1408178f69b4SEli Cohen 			mlx5e_tc_unoffload_from_slow_path(esw, flow);
14095dbe906fSPaul Blakey 		else
14105dbe906fSPaul Blakey 			mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
14115dbe906fSPaul Blakey 	}
1412d85cdccbSOr Gerlitz 
14139272e3dfSYevgeny Kliteynik 	if (mlx5_flow_has_geneve_opt(flow))
14149272e3dfSYevgeny Kliteynik 		mlx5_geneve_tlv_option_del(priv->mdev->geneve);
14159272e3dfSYevgeny Kliteynik 
1416513f8f7fSOr Gerlitz 	mlx5_eswitch_del_vlan_action(esw, attr);
1417d85cdccbSOr Gerlitz 
1418f493f155SEli Britstein 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
14192a4b6526SVlad Buslov 		if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
14208c4dc42bSEli Britstein 			mlx5e_detach_encap(priv, flow, out_index);
14212a4b6526SVlad Buslov 			kfree(attr->parse_attr->tun_info[out_index]);
14222a4b6526SVlad Buslov 		}
1423f493f155SEli Britstein 	kvfree(attr->parse_attr);
1424d7e75a32SOr Gerlitz 
1425513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
14261a9527bbSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
1427b8aee822SMark Bloch 
1428b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1429f9392795SShahar Klein 		mlx5_fc_destroy(attr->counter_dev, attr->counter);
143014e6b038SEli Cohen 
143114e6b038SEli Cohen 	if (flow_flag_test(flow, L3_TO_L2_DECAP))
143214e6b038SEli Cohen 		mlx5e_detach_decap(priv, flow);
1433d85cdccbSOr Gerlitz }
1434d85cdccbSOr Gerlitz 
1435232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
14362a1f1768SVlad Buslov 			      struct mlx5e_encap_entry *e,
14372a1f1768SVlad Buslov 			      struct list_head *flow_list)
1438232c0013SHadar Hen Zion {
14393c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1440178f69b4SEli Cohen 	struct mlx5_esw_flow_attr *esw_attr;
14416d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
14426d2a3ed0SOr Gerlitz 	struct mlx5_flow_spec *spec;
1443232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1444232c0013SHadar Hen Zion 	int err;
1445232c0013SHadar Hen Zion 
14462b688ea5SMaor Gottlieb 	e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
144754c177caSOz Shlomo 						     e->reformat_type,
1448232c0013SHadar Hen Zion 						     e->encap_size, e->encap_header,
14492b688ea5SMaor Gottlieb 						     MLX5_FLOW_NAMESPACE_FDB);
14502b688ea5SMaor Gottlieb 	if (IS_ERR(e->pkt_reformat)) {
14512b688ea5SMaor Gottlieb 		mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
14522b688ea5SMaor Gottlieb 			       PTR_ERR(e->pkt_reformat));
1453232c0013SHadar Hen Zion 		return;
1454232c0013SHadar Hen Zion 	}
1455232c0013SHadar Hen Zion 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
1456f6dfb4c3SHadar Hen Zion 	mlx5e_rep_queue_neigh_stats_work(priv);
1457232c0013SHadar Hen Zion 
14582a1f1768SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
14598c4dc42bSEli Britstein 		bool all_flow_encaps_valid = true;
14608c4dc42bSEli Britstein 		int i;
14618c4dc42bSEli Britstein 
146295435ad7SVlad Buslov 		if (!mlx5e_is_offloaded_flow(flow))
146395435ad7SVlad Buslov 			continue;
14643c37745eSOr Gerlitz 		esw_attr = flow->esw_attr;
14656d2a3ed0SOr Gerlitz 		spec = &esw_attr->parse_attr->spec;
14666d2a3ed0SOr Gerlitz 
14672b688ea5SMaor Gottlieb 		esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
14682a1f1768SVlad Buslov 		esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
14698c4dc42bSEli Britstein 		/* Flow can be associated with multiple encap entries.
14708c4dc42bSEli Britstein 		 * Before offloading the flow verify that all of them have
14718c4dc42bSEli Britstein 		 * a valid neighbour.
14728c4dc42bSEli Britstein 		 */
14738c4dc42bSEli Britstein 		for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
14748c4dc42bSEli Britstein 			if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
14758c4dc42bSEli Britstein 				continue;
14768c4dc42bSEli Britstein 			if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
14778c4dc42bSEli Britstein 				all_flow_encaps_valid = false;
14788c4dc42bSEli Britstein 				break;
14798c4dc42bSEli Britstein 			}
14808c4dc42bSEli Britstein 		}
14818c4dc42bSEli Britstein 		/* Do not offload flows with unresolved neighbors */
14828c4dc42bSEli Britstein 		if (!all_flow_encaps_valid)
14832a1f1768SVlad Buslov 			continue;
14845dbe906fSPaul Blakey 		/* update from slow path rule to encap rule */
14856d2a3ed0SOr Gerlitz 		rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
14866d2a3ed0SOr Gerlitz 		if (IS_ERR(rule)) {
14876d2a3ed0SOr Gerlitz 			err = PTR_ERR(rule);
1488232c0013SHadar Hen Zion 			mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1489232c0013SHadar Hen Zion 				       err);
14902a1f1768SVlad Buslov 			continue;
1491232c0013SHadar Hen Zion 		}
14925dbe906fSPaul Blakey 
1493178f69b4SEli Cohen 		mlx5e_tc_unoffload_from_slow_path(esw, flow);
14946d2a3ed0SOr Gerlitz 		flow->rule[0] = rule;
1495226f2ca3SVlad Buslov 		/* was unset when slow path rule removed */
1496226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1497232c0013SHadar Hen Zion 	}
1498232c0013SHadar Hen Zion }
1499232c0013SHadar Hen Zion 
1500232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
15012a1f1768SVlad Buslov 			      struct mlx5e_encap_entry *e,
15022a1f1768SVlad Buslov 			      struct list_head *flow_list)
1503232c0013SHadar Hen Zion {
15043c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
15055dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
15065dbe906fSPaul Blakey 	struct mlx5_flow_spec *spec;
1507232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
15085dbe906fSPaul Blakey 	int err;
1509232c0013SHadar Hen Zion 
15102a1f1768SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
151195435ad7SVlad Buslov 		if (!mlx5e_is_offloaded_flow(flow))
151295435ad7SVlad Buslov 			continue;
15135dbe906fSPaul Blakey 		spec = &flow->esw_attr->parse_attr->spec;
15145dbe906fSPaul Blakey 
15155dbe906fSPaul Blakey 		/* update from encap rule to slow path rule */
1516178f69b4SEli Cohen 		rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
15178c4dc42bSEli Britstein 		/* mark the flow's encap dest as non-valid */
15182a1f1768SVlad Buslov 		flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
15195dbe906fSPaul Blakey 
15205dbe906fSPaul Blakey 		if (IS_ERR(rule)) {
15215dbe906fSPaul Blakey 			err = PTR_ERR(rule);
15225dbe906fSPaul Blakey 			mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
15235dbe906fSPaul Blakey 				       err);
15242a1f1768SVlad Buslov 			continue;
15255dbe906fSPaul Blakey 		}
15265dbe906fSPaul Blakey 
15276d2a3ed0SOr Gerlitz 		mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
15285dbe906fSPaul Blakey 		flow->rule[0] = rule;
1529226f2ca3SVlad Buslov 		/* was unset when fast path rule removed */
1530226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1531232c0013SHadar Hen Zion 	}
1532232c0013SHadar Hen Zion 
153361c806daSOr Gerlitz 	/* we know that the encap is valid */
1534232c0013SHadar Hen Zion 	e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
15352b688ea5SMaor Gottlieb 	mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1536232c0013SHadar Hen Zion }
1537232c0013SHadar Hen Zion 
1538b8aee822SMark Bloch static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1539b8aee822SMark Bloch {
1540226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
1541b8aee822SMark Bloch 		return flow->esw_attr->counter;
1542b8aee822SMark Bloch 	else
1543b8aee822SMark Bloch 		return flow->nic_attr->counter;
1544b8aee822SMark Bloch }
1545b8aee822SMark Bloch 
15462a1f1768SVlad Buslov /* Takes reference to all flows attached to encap and adds the flows to
15472a1f1768SVlad Buslov  * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
15482a1f1768SVlad Buslov  */
15492a1f1768SVlad Buslov void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
15502a1f1768SVlad Buslov {
15512a1f1768SVlad Buslov 	struct encap_flow_item *efi;
15522a1f1768SVlad Buslov 	struct mlx5e_tc_flow *flow;
15532a1f1768SVlad Buslov 
15542a1f1768SVlad Buslov 	list_for_each_entry(efi, &e->flows, list) {
15552a1f1768SVlad Buslov 		flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
15562a1f1768SVlad Buslov 		if (IS_ERR(mlx5e_flow_get(flow)))
15572a1f1768SVlad Buslov 			continue;
155895435ad7SVlad Buslov 		wait_for_completion(&flow->init_done);
15592a1f1768SVlad Buslov 
15602a1f1768SVlad Buslov 		flow->tmp_efi_index = efi->index;
15612a1f1768SVlad Buslov 		list_add(&flow->tmp_list, flow_list);
15622a1f1768SVlad Buslov 	}
15632a1f1768SVlad Buslov }
15642a1f1768SVlad Buslov 
15656a06c2f7SVlad Buslov /* Iterate over tmp_list of flows attached to flow_list head. */
15662a1f1768SVlad Buslov void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
15676a06c2f7SVlad Buslov {
15686a06c2f7SVlad Buslov 	struct mlx5e_tc_flow *flow, *tmp;
15696a06c2f7SVlad Buslov 
15706a06c2f7SVlad Buslov 	list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
15716a06c2f7SVlad Buslov 		mlx5e_flow_put(priv, flow);
15726a06c2f7SVlad Buslov }
15736a06c2f7SVlad Buslov 
1574ac0d9176SVlad Buslov static struct mlx5e_encap_entry *
1575ac0d9176SVlad Buslov mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1576ac0d9176SVlad Buslov 			   struct mlx5e_encap_entry *e)
1577ac0d9176SVlad Buslov {
1578ac0d9176SVlad Buslov 	struct mlx5e_encap_entry *next = NULL;
1579ac0d9176SVlad Buslov 
1580ac0d9176SVlad Buslov retry:
1581ac0d9176SVlad Buslov 	rcu_read_lock();
1582ac0d9176SVlad Buslov 
1583ac0d9176SVlad Buslov 	/* find encap with non-zero reference counter value */
1584ac0d9176SVlad Buslov 	for (next = e ?
1585ac0d9176SVlad Buslov 		     list_next_or_null_rcu(&nhe->encap_list,
1586ac0d9176SVlad Buslov 					   &e->encap_list,
1587ac0d9176SVlad Buslov 					   struct mlx5e_encap_entry,
1588ac0d9176SVlad Buslov 					   encap_list) :
1589ac0d9176SVlad Buslov 		     list_first_or_null_rcu(&nhe->encap_list,
1590ac0d9176SVlad Buslov 					    struct mlx5e_encap_entry,
1591ac0d9176SVlad Buslov 					    encap_list);
1592ac0d9176SVlad Buslov 	     next;
1593ac0d9176SVlad Buslov 	     next = list_next_or_null_rcu(&nhe->encap_list,
1594ac0d9176SVlad Buslov 					  &next->encap_list,
1595ac0d9176SVlad Buslov 					  struct mlx5e_encap_entry,
1596ac0d9176SVlad Buslov 					  encap_list))
1597ac0d9176SVlad Buslov 		if (mlx5e_encap_take(next))
1598ac0d9176SVlad Buslov 			break;
1599ac0d9176SVlad Buslov 
1600ac0d9176SVlad Buslov 	rcu_read_unlock();
1601ac0d9176SVlad Buslov 
1602ac0d9176SVlad Buslov 	/* release starting encap */
1603ac0d9176SVlad Buslov 	if (e)
1604ac0d9176SVlad Buslov 		mlx5e_encap_put(netdev_priv(e->out_dev), e);
1605ac0d9176SVlad Buslov 	if (!next)
1606ac0d9176SVlad Buslov 		return next;
1607ac0d9176SVlad Buslov 
1608ac0d9176SVlad Buslov 	/* wait for encap to be fully initialized */
1609ac0d9176SVlad Buslov 	wait_for_completion(&next->res_ready);
1610ac0d9176SVlad Buslov 	/* continue searching if encap entry is not in valid state after completion */
1611ac0d9176SVlad Buslov 	if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1612ac0d9176SVlad Buslov 		e = next;
1613ac0d9176SVlad Buslov 		goto retry;
1614ac0d9176SVlad Buslov 	}
1615ac0d9176SVlad Buslov 
1616ac0d9176SVlad Buslov 	return next;
1617ac0d9176SVlad Buslov }
1618ac0d9176SVlad Buslov 
1619f6dfb4c3SHadar Hen Zion void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1620f6dfb4c3SHadar Hen Zion {
1621f6dfb4c3SHadar Hen Zion 	struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1622ac0d9176SVlad Buslov 	struct mlx5e_encap_entry *e = NULL;
1623f6dfb4c3SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1624f6dfb4c3SHadar Hen Zion 	struct mlx5_fc *counter;
1625f6dfb4c3SHadar Hen Zion 	struct neigh_table *tbl;
1626f6dfb4c3SHadar Hen Zion 	bool neigh_used = false;
1627f6dfb4c3SHadar Hen Zion 	struct neighbour *n;
162890bb7692SAriel Levkovich 	u64 lastuse;
1629f6dfb4c3SHadar Hen Zion 
1630f6dfb4c3SHadar Hen Zion 	if (m_neigh->family == AF_INET)
1631f6dfb4c3SHadar Hen Zion 		tbl = &arp_tbl;
1632f6dfb4c3SHadar Hen Zion #if IS_ENABLED(CONFIG_IPV6)
1633f6dfb4c3SHadar Hen Zion 	else if (m_neigh->family == AF_INET6)
16345cc3a8c6SSaeed Mahameed 		tbl = ipv6_stub->nd_tbl;
1635f6dfb4c3SHadar Hen Zion #endif
1636f6dfb4c3SHadar Hen Zion 	else
1637f6dfb4c3SHadar Hen Zion 		return;
1638f6dfb4c3SHadar Hen Zion 
1639ac0d9176SVlad Buslov 	/* mlx5e_get_next_valid_encap() releases previous encap before returning
1640ac0d9176SVlad Buslov 	 * next one.
1641ac0d9176SVlad Buslov 	 */
1642ac0d9176SVlad Buslov 	while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
16436a06c2f7SVlad Buslov 		struct mlx5e_priv *priv = netdev_priv(e->out_dev);
16445a7e5bcbSVlad Buslov 		struct encap_flow_item *efi, *tmp;
16456a06c2f7SVlad Buslov 		struct mlx5_eswitch *esw;
16466a06c2f7SVlad Buslov 		LIST_HEAD(flow_list);
1647948993f2SVlad Buslov 
16486a06c2f7SVlad Buslov 		esw = priv->mdev->priv.eswitch;
16496a06c2f7SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
16505a7e5bcbSVlad Buslov 		list_for_each_entry_safe(efi, tmp, &e->flows, list) {
165179baaec7SEli Britstein 			flow = container_of(efi, struct mlx5e_tc_flow,
165279baaec7SEli Britstein 					    encaps[efi->index]);
16535a7e5bcbSVlad Buslov 			if (IS_ERR(mlx5e_flow_get(flow)))
16545a7e5bcbSVlad Buslov 				continue;
16556a06c2f7SVlad Buslov 			list_add(&flow->tmp_list, &flow_list);
16565a7e5bcbSVlad Buslov 
1657226f2ca3SVlad Buslov 			if (mlx5e_is_offloaded_flow(flow)) {
1658b8aee822SMark Bloch 				counter = mlx5e_tc_get_counter(flow);
165990bb7692SAriel Levkovich 				lastuse = mlx5_fc_query_lastuse(counter);
1660f6dfb4c3SHadar Hen Zion 				if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1661f6dfb4c3SHadar Hen Zion 					neigh_used = true;
1662f6dfb4c3SHadar Hen Zion 					break;
1663f6dfb4c3SHadar Hen Zion 				}
1664f6dfb4c3SHadar Hen Zion 			}
1665f6dfb4c3SHadar Hen Zion 		}
16666a06c2f7SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
1667948993f2SVlad Buslov 
16686a06c2f7SVlad Buslov 		mlx5e_put_encap_flow_list(priv, &flow_list);
1669ac0d9176SVlad Buslov 		if (neigh_used) {
1670ac0d9176SVlad Buslov 			/* release current encap before breaking the loop */
16716a06c2f7SVlad Buslov 			mlx5e_encap_put(priv, e);
1672e36d4810SRoi Dayan 			break;
1673f6dfb4c3SHadar Hen Zion 		}
1674ac0d9176SVlad Buslov 	}
1675f6dfb4c3SHadar Hen Zion 
1676c786fe59SVlad Buslov 	trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
1677c786fe59SVlad Buslov 
1678f6dfb4c3SHadar Hen Zion 	if (neigh_used) {
1679f6dfb4c3SHadar Hen Zion 		nhe->reported_lastuse = jiffies;
1680f6dfb4c3SHadar Hen Zion 
1681f6dfb4c3SHadar Hen Zion 		/* find the relevant neigh according to the cached device and
1682f6dfb4c3SHadar Hen Zion 		 * dst ip pair
1683f6dfb4c3SHadar Hen Zion 		 */
1684f6dfb4c3SHadar Hen Zion 		n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1685c7f7ba8dSRoi Dayan 		if (!n)
1686f6dfb4c3SHadar Hen Zion 			return;
1687f6dfb4c3SHadar Hen Zion 
1688f6dfb4c3SHadar Hen Zion 		neigh_event_send(n, NULL);
1689f6dfb4c3SHadar Hen Zion 		neigh_release(n);
1690f6dfb4c3SHadar Hen Zion 	}
1691f6dfb4c3SHadar Hen Zion }
1692f6dfb4c3SHadar Hen Zion 
169361086f39SVlad Buslov static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1694d85cdccbSOr Gerlitz {
1695948993f2SVlad Buslov 	WARN_ON(!list_empty(&e->flows));
16963c140dd5SVlad Buslov 
16973c140dd5SVlad Buslov 	if (e->compl_result > 0) {
1698232c0013SHadar Hen Zion 		mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1699232c0013SHadar Hen Zion 
1700232c0013SHadar Hen Zion 		if (e->flags & MLX5_ENCAP_ENTRY_VALID)
17012b688ea5SMaor Gottlieb 			mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
17023c140dd5SVlad Buslov 	}
1703232c0013SHadar Hen Zion 
17042a4b6526SVlad Buslov 	kfree(e->tun_info);
1705232c0013SHadar Hen Zion 	kfree(e->encap_header);
1706ac0d9176SVlad Buslov 	kfree_rcu(e, rcu);
17075067b602SRoi Dayan }
1708948993f2SVlad Buslov 
170914e6b038SEli Cohen static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
171014e6b038SEli Cohen 				struct mlx5e_decap_entry *d)
171114e6b038SEli Cohen {
171214e6b038SEli Cohen 	WARN_ON(!list_empty(&d->flows));
171314e6b038SEli Cohen 
171414e6b038SEli Cohen 	if (!d->compl_result)
171514e6b038SEli Cohen 		mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
171614e6b038SEli Cohen 
171714e6b038SEli Cohen 	kfree_rcu(d, rcu);
171814e6b038SEli Cohen }
171914e6b038SEli Cohen 
172061086f39SVlad Buslov void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
172161086f39SVlad Buslov {
172261086f39SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
172361086f39SVlad Buslov 
172461086f39SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
172561086f39SVlad Buslov 		return;
172661086f39SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
172761086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
172861086f39SVlad Buslov 
172961086f39SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
173061086f39SVlad Buslov }
173161086f39SVlad Buslov 
173214e6b038SEli Cohen static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
173314e6b038SEli Cohen {
173414e6b038SEli Cohen 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
173514e6b038SEli Cohen 
173614e6b038SEli Cohen 	if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
173714e6b038SEli Cohen 		return;
173814e6b038SEli Cohen 	hash_del_rcu(&d->hlist);
173914e6b038SEli Cohen 	mutex_unlock(&esw->offloads.decap_tbl_lock);
174014e6b038SEli Cohen 
174114e6b038SEli Cohen 	mlx5e_decap_dealloc(priv, d);
174214e6b038SEli Cohen }
174314e6b038SEli Cohen 
1744948993f2SVlad Buslov static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1745948993f2SVlad Buslov 			       struct mlx5e_tc_flow *flow, int out_index)
1746948993f2SVlad Buslov {
174761086f39SVlad Buslov 	struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
174861086f39SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
174961086f39SVlad Buslov 
1750948993f2SVlad Buslov 	/* flow wasn't fully initialized */
175161086f39SVlad Buslov 	if (!e)
1752948993f2SVlad Buslov 		return;
1753948993f2SVlad Buslov 
175461086f39SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
1755948993f2SVlad Buslov 	list_del(&flow->encaps[out_index].list);
1756948993f2SVlad Buslov 	flow->encaps[out_index].e = NULL;
175761086f39SVlad Buslov 	if (!refcount_dec_and_test(&e->refcnt)) {
175861086f39SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
175961086f39SVlad Buslov 		return;
176061086f39SVlad Buslov 	}
176161086f39SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
176261086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
176361086f39SVlad Buslov 
176461086f39SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
17655067b602SRoi Dayan }
17665067b602SRoi Dayan 
176714e6b038SEli Cohen static void mlx5e_detach_decap(struct mlx5e_priv *priv,
176814e6b038SEli Cohen 			       struct mlx5e_tc_flow *flow)
176914e6b038SEli Cohen {
177014e6b038SEli Cohen 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
177114e6b038SEli Cohen 	struct mlx5e_decap_entry *d = flow->decap_reformat;
177214e6b038SEli Cohen 
177314e6b038SEli Cohen 	if (!d)
177414e6b038SEli Cohen 		return;
177514e6b038SEli Cohen 
177614e6b038SEli Cohen 	mutex_lock(&esw->offloads.decap_tbl_lock);
177714e6b038SEli Cohen 	list_del(&flow->l3_to_l2_reformat);
177814e6b038SEli Cohen 	flow->decap_reformat = NULL;
177914e6b038SEli Cohen 
178014e6b038SEli Cohen 	if (!refcount_dec_and_test(&d->refcnt)) {
178114e6b038SEli Cohen 		mutex_unlock(&esw->offloads.decap_tbl_lock);
178214e6b038SEli Cohen 		return;
178314e6b038SEli Cohen 	}
178414e6b038SEli Cohen 	hash_del_rcu(&d->hlist);
178514e6b038SEli Cohen 	mutex_unlock(&esw->offloads.decap_tbl_lock);
178614e6b038SEli Cohen 
178714e6b038SEli Cohen 	mlx5e_decap_dealloc(priv, d);
178814e6b038SEli Cohen }
178914e6b038SEli Cohen 
179004de7ddaSRoi Dayan static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
179104de7ddaSRoi Dayan {
179204de7ddaSRoi Dayan 	struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
179304de7ddaSRoi Dayan 
1794226f2ca3SVlad Buslov 	if (!flow_flag_test(flow, ESWITCH) ||
1795226f2ca3SVlad Buslov 	    !flow_flag_test(flow, DUP))
179604de7ddaSRoi Dayan 		return;
179704de7ddaSRoi Dayan 
179804de7ddaSRoi Dayan 	mutex_lock(&esw->offloads.peer_mutex);
179904de7ddaSRoi Dayan 	list_del(&flow->peer);
180004de7ddaSRoi Dayan 	mutex_unlock(&esw->offloads.peer_mutex);
180104de7ddaSRoi Dayan 
1802226f2ca3SVlad Buslov 	flow_flag_clear(flow, DUP);
180304de7ddaSRoi Dayan 
1804eb252c3aSRoi Dayan 	if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
180504de7ddaSRoi Dayan 		mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1806a23dae79SRoi Dayan 		kfree(flow->peer_flow);
1807eb252c3aSRoi Dayan 	}
1808eb252c3aSRoi Dayan 
180904de7ddaSRoi Dayan 	flow->peer_flow = NULL;
181004de7ddaSRoi Dayan }
181104de7ddaSRoi Dayan 
181204de7ddaSRoi Dayan static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
181304de7ddaSRoi Dayan {
181404de7ddaSRoi Dayan 	struct mlx5_core_dev *dev = flow->priv->mdev;
181504de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = dev->priv.devcom;
181604de7ddaSRoi Dayan 	struct mlx5_eswitch *peer_esw;
181704de7ddaSRoi Dayan 
181804de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
181904de7ddaSRoi Dayan 	if (!peer_esw)
182004de7ddaSRoi Dayan 		return;
182104de7ddaSRoi Dayan 
182204de7ddaSRoi Dayan 	__mlx5e_tc_del_fdb_peer_flow(flow);
182304de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
182404de7ddaSRoi Dayan }
182504de7ddaSRoi Dayan 
1826e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1827961e8979SRoi Dayan 			      struct mlx5e_tc_flow *flow)
1828e8f887acSAmir Vadai {
1829226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow)) {
183004de7ddaSRoi Dayan 		mlx5e_tc_del_fdb_peer_flow(flow);
1831d85cdccbSOr Gerlitz 		mlx5e_tc_del_fdb_flow(priv, flow);
183204de7ddaSRoi Dayan 	} else {
1833d85cdccbSOr Gerlitz 		mlx5e_tc_del_nic_flow(priv, flow);
1834e8f887acSAmir Vadai 	}
183504de7ddaSRoi Dayan }
1836e8f887acSAmir Vadai 
18370a7fcb78SPaul Blakey static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1838bbd00f7eSHadar Hen Zion {
1839f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
18400a7fcb78SPaul Blakey 	struct flow_action *flow_action = &rule->action;
18410a7fcb78SPaul Blakey 	const struct flow_action_entry *act;
18420a7fcb78SPaul Blakey 	int i;
1843bbd00f7eSHadar Hen Zion 
18440a7fcb78SPaul Blakey 	flow_action_for_each(i, act, flow_action) {
18450a7fcb78SPaul Blakey 		switch (act->id) {
18460a7fcb78SPaul Blakey 		case FLOW_ACTION_GOTO:
18470a7fcb78SPaul Blakey 			return true;
18480a7fcb78SPaul Blakey 		default:
18490a7fcb78SPaul Blakey 			continue;
1850fe1587a7SDmytro Linkin 		}
18512e72eb43SOr Gerlitz 	}
1852bbd00f7eSHadar Hen Zion 
18530a7fcb78SPaul Blakey 	return false;
18540a7fcb78SPaul Blakey }
1855bcef735cSOr Gerlitz 
18560a7fcb78SPaul Blakey static int
18570a7fcb78SPaul Blakey enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
18580a7fcb78SPaul Blakey 				    struct flow_dissector_key_enc_opts *opts,
18590a7fcb78SPaul Blakey 				    struct netlink_ext_ack *extack,
18600a7fcb78SPaul Blakey 				    bool *dont_care)
18610a7fcb78SPaul Blakey {
18620a7fcb78SPaul Blakey 	struct geneve_opt *opt;
18630a7fcb78SPaul Blakey 	int off = 0;
1864bcef735cSOr Gerlitz 
18650a7fcb78SPaul Blakey 	*dont_care = true;
1866bcef735cSOr Gerlitz 
18670a7fcb78SPaul Blakey 	while (opts->len > off) {
18680a7fcb78SPaul Blakey 		opt = (struct geneve_opt *)&opts->data[off];
1869e98bedf5SEli Britstein 
18700a7fcb78SPaul Blakey 		if (!(*dont_care) || opt->opt_class || opt->type ||
18710a7fcb78SPaul Blakey 		    memchr_inv(opt->opt_data, 0, opt->length * 4)) {
18720a7fcb78SPaul Blakey 			*dont_care = false;
18730a7fcb78SPaul Blakey 
18740a7fcb78SPaul Blakey 			if (opt->opt_class != U16_MAX ||
1875d7a42ad0SRoi Dayan 			    opt->type != U8_MAX) {
18760a7fcb78SPaul Blakey 				NL_SET_ERR_MSG(extack,
18770a7fcb78SPaul Blakey 					       "Partial match of tunnel options in chain > 0 isn't supported");
18780a7fcb78SPaul Blakey 				netdev_warn(priv->netdev,
18790a7fcb78SPaul Blakey 					    "Partial match of tunnel options in chain > 0 isn't supported");
1880e98bedf5SEli Britstein 				return -EOPNOTSUPP;
1881e98bedf5SEli Britstein 			}
1882bcef735cSOr Gerlitz 		}
1883bcef735cSOr Gerlitz 
18840a7fcb78SPaul Blakey 		off += sizeof(struct geneve_opt) + opt->length * 4;
1885bbd00f7eSHadar Hen Zion 	}
1886bbd00f7eSHadar Hen Zion 
1887bbd00f7eSHadar Hen Zion 	return 0;
1888bbd00f7eSHadar Hen Zion }
1889bbd00f7eSHadar Hen Zion 
18900a7fcb78SPaul Blakey #define COPY_DISSECTOR(rule, diss_key, dst)\
18910a7fcb78SPaul Blakey ({ \
18920a7fcb78SPaul Blakey 	struct flow_rule *__rule = (rule);\
18930a7fcb78SPaul Blakey 	typeof(dst) __dst = dst;\
18940a7fcb78SPaul Blakey \
18950a7fcb78SPaul Blakey 	memcpy(__dst,\
18960a7fcb78SPaul Blakey 	       skb_flow_dissector_target(__rule->match.dissector,\
18970a7fcb78SPaul Blakey 					 diss_key,\
18980a7fcb78SPaul Blakey 					 __rule->match.key),\
18990a7fcb78SPaul Blakey 	       sizeof(*__dst));\
19000a7fcb78SPaul Blakey })
19010a7fcb78SPaul Blakey 
19020a7fcb78SPaul Blakey static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
19030a7fcb78SPaul Blakey 				    struct mlx5e_tc_flow *flow,
19040a7fcb78SPaul Blakey 				    struct flow_cls_offload *f,
19050a7fcb78SPaul Blakey 				    struct net_device *filter_dev)
19068377629eSEli Britstein {
19070a7fcb78SPaul Blakey 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
19080a7fcb78SPaul Blakey 	struct netlink_ext_ack *extack = f->common.extack;
19090a7fcb78SPaul Blakey 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
19100a7fcb78SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
19110a7fcb78SPaul Blakey 	struct flow_match_enc_opts enc_opts_match;
1912d7a42ad0SRoi Dayan 	struct tunnel_match_enc_opts tun_enc_opts;
19130a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
19140a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *uplink_rpriv;
19150a7fcb78SPaul Blakey 	struct tunnel_match_key tunnel_key;
19160a7fcb78SPaul Blakey 	bool enc_opts_is_dont_care = true;
19170a7fcb78SPaul Blakey 	u32 tun_id, enc_opts_id = 0;
19180a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw;
19190a7fcb78SPaul Blakey 	u32 value, mask;
19200a7fcb78SPaul Blakey 	int err;
19210a7fcb78SPaul Blakey 
19220a7fcb78SPaul Blakey 	esw = priv->mdev->priv.eswitch;
19230a7fcb78SPaul Blakey 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
19240a7fcb78SPaul Blakey 	uplink_priv = &uplink_rpriv->uplink_priv;
19250a7fcb78SPaul Blakey 
19260a7fcb78SPaul Blakey 	memset(&tunnel_key, 0, sizeof(tunnel_key));
19270a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
19280a7fcb78SPaul Blakey 		       &tunnel_key.enc_control);
19290a7fcb78SPaul Blakey 	if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
19300a7fcb78SPaul Blakey 		COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
19310a7fcb78SPaul Blakey 			       &tunnel_key.enc_ipv4);
19320a7fcb78SPaul Blakey 	else
19330a7fcb78SPaul Blakey 		COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
19340a7fcb78SPaul Blakey 			       &tunnel_key.enc_ipv6);
19350a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
19360a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
19370a7fcb78SPaul Blakey 		       &tunnel_key.enc_tp);
19380a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
19390a7fcb78SPaul Blakey 		       &tunnel_key.enc_key_id);
19400a7fcb78SPaul Blakey 	tunnel_key.filter_ifindex = filter_dev->ifindex;
19410a7fcb78SPaul Blakey 
19420a7fcb78SPaul Blakey 	err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
19430a7fcb78SPaul Blakey 	if (err)
19440a7fcb78SPaul Blakey 		return err;
19450a7fcb78SPaul Blakey 
19460a7fcb78SPaul Blakey 	flow_rule_match_enc_opts(rule, &enc_opts_match);
19470a7fcb78SPaul Blakey 	err = enc_opts_is_dont_care_or_full_match(priv,
19480a7fcb78SPaul Blakey 						  enc_opts_match.mask,
19490a7fcb78SPaul Blakey 						  extack,
19500a7fcb78SPaul Blakey 						  &enc_opts_is_dont_care);
19510a7fcb78SPaul Blakey 	if (err)
19520a7fcb78SPaul Blakey 		goto err_enc_opts;
19530a7fcb78SPaul Blakey 
19540a7fcb78SPaul Blakey 	if (!enc_opts_is_dont_care) {
1955d7a42ad0SRoi Dayan 		memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
1956d7a42ad0SRoi Dayan 		memcpy(&tun_enc_opts.key, enc_opts_match.key,
1957d7a42ad0SRoi Dayan 		       sizeof(*enc_opts_match.key));
1958d7a42ad0SRoi Dayan 		memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
1959d7a42ad0SRoi Dayan 		       sizeof(*enc_opts_match.mask));
1960d7a42ad0SRoi Dayan 
19610a7fcb78SPaul Blakey 		err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
1962d7a42ad0SRoi Dayan 				  &tun_enc_opts, &enc_opts_id);
19630a7fcb78SPaul Blakey 		if (err)
19640a7fcb78SPaul Blakey 			goto err_enc_opts;
19650a7fcb78SPaul Blakey 	}
19660a7fcb78SPaul Blakey 
19670a7fcb78SPaul Blakey 	value = tun_id << ENC_OPTS_BITS | enc_opts_id;
19680a7fcb78SPaul Blakey 	mask = enc_opts_id ? TUNNEL_ID_MASK :
19690a7fcb78SPaul Blakey 			     (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
19700a7fcb78SPaul Blakey 
19710a7fcb78SPaul Blakey 	if (attr->chain) {
19720a7fcb78SPaul Blakey 		mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
19730a7fcb78SPaul Blakey 					    TUNNEL_TO_REG, value, mask);
19740a7fcb78SPaul Blakey 	} else {
19750a7fcb78SPaul Blakey 		mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
19760a7fcb78SPaul Blakey 		err = mlx5e_tc_match_to_reg_set(priv->mdev,
19770a7fcb78SPaul Blakey 						mod_hdr_acts,
19780a7fcb78SPaul Blakey 						TUNNEL_TO_REG, value);
19790a7fcb78SPaul Blakey 		if (err)
19800a7fcb78SPaul Blakey 			goto err_set;
19810a7fcb78SPaul Blakey 
19820a7fcb78SPaul Blakey 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
19830a7fcb78SPaul Blakey 	}
19840a7fcb78SPaul Blakey 
19850a7fcb78SPaul Blakey 	flow->tunnel_id = value;
19860a7fcb78SPaul Blakey 	return 0;
19870a7fcb78SPaul Blakey 
19880a7fcb78SPaul Blakey err_set:
19890a7fcb78SPaul Blakey 	if (enc_opts_id)
19900a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
19910a7fcb78SPaul Blakey 			       enc_opts_id);
19920a7fcb78SPaul Blakey err_enc_opts:
19930a7fcb78SPaul Blakey 	mapping_remove(uplink_priv->tunnel_mapping, tun_id);
19940a7fcb78SPaul Blakey 	return err;
19950a7fcb78SPaul Blakey }
19960a7fcb78SPaul Blakey 
19970a7fcb78SPaul Blakey static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
19980a7fcb78SPaul Blakey {
19990a7fcb78SPaul Blakey 	u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
20000a7fcb78SPaul Blakey 	u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
20010a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
20020a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *uplink_rpriv;
20030a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw;
20040a7fcb78SPaul Blakey 
20050a7fcb78SPaul Blakey 	esw = flow->priv->mdev->priv.eswitch;
20060a7fcb78SPaul Blakey 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
20070a7fcb78SPaul Blakey 	uplink_priv = &uplink_rpriv->uplink_priv;
20080a7fcb78SPaul Blakey 
20090a7fcb78SPaul Blakey 	if (tun_id)
20100a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_mapping, tun_id);
20110a7fcb78SPaul Blakey 	if (enc_opts_id)
20120a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
20130a7fcb78SPaul Blakey 			       enc_opts_id);
20140a7fcb78SPaul Blakey }
20150a7fcb78SPaul Blakey 
20164c3844d9SPaul Blakey u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
20174c3844d9SPaul Blakey {
20184c3844d9SPaul Blakey 	return flow->tunnel_id;
20194c3844d9SPaul Blakey }
20204c3844d9SPaul Blakey 
20210a7fcb78SPaul Blakey static int parse_tunnel_attr(struct mlx5e_priv *priv,
20220a7fcb78SPaul Blakey 			     struct mlx5e_tc_flow *flow,
20230a7fcb78SPaul Blakey 			     struct mlx5_flow_spec *spec,
20240a7fcb78SPaul Blakey 			     struct flow_cls_offload *f,
20250a7fcb78SPaul Blakey 			     struct net_device *filter_dev,
20260a7fcb78SPaul Blakey 			     u8 *match_level,
20270a7fcb78SPaul Blakey 			     bool *match_inner)
20280a7fcb78SPaul Blakey {
20290a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
20300a7fcb78SPaul Blakey 	struct netlink_ext_ack *extack = f->common.extack;
20310a7fcb78SPaul Blakey 	bool needs_mapping, sets_mapping;
20320a7fcb78SPaul Blakey 	int err;
20330a7fcb78SPaul Blakey 
20340a7fcb78SPaul Blakey 	if (!mlx5e_is_eswitch_flow(flow))
20350a7fcb78SPaul Blakey 		return -EOPNOTSUPP;
20360a7fcb78SPaul Blakey 
20370a7fcb78SPaul Blakey 	needs_mapping = !!flow->esw_attr->chain;
20380a7fcb78SPaul Blakey 	sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
20390a7fcb78SPaul Blakey 	*match_inner = !needs_mapping;
20400a7fcb78SPaul Blakey 
20410a7fcb78SPaul Blakey 	if ((needs_mapping || sets_mapping) &&
2042636bb968SPaul Blakey 	    !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
20430a7fcb78SPaul Blakey 		NL_SET_ERR_MSG(extack,
2044636bb968SPaul Blakey 			       "Chains on tunnel devices isn't supported without register loopback support");
20450a7fcb78SPaul Blakey 		netdev_warn(priv->netdev,
2046636bb968SPaul Blakey 			    "Chains on tunnel devices isn't supported without register loopback support");
20470a7fcb78SPaul Blakey 		return -EOPNOTSUPP;
20480a7fcb78SPaul Blakey 	}
20490a7fcb78SPaul Blakey 
20500a7fcb78SPaul Blakey 	if (!flow->esw_attr->chain) {
20510a7fcb78SPaul Blakey 		err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
20520a7fcb78SPaul Blakey 					 match_level);
20530a7fcb78SPaul Blakey 		if (err) {
20540a7fcb78SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
20550a7fcb78SPaul Blakey 					   "Failed to parse tunnel attributes");
20560a7fcb78SPaul Blakey 			netdev_warn(priv->netdev,
20570a7fcb78SPaul Blakey 				    "Failed to parse tunnel attributes");
20580a7fcb78SPaul Blakey 			return err;
20590a7fcb78SPaul Blakey 		}
20600a7fcb78SPaul Blakey 
206114e6b038SEli Cohen 		/* With mpls over udp we decapsulate using packet reformat
206214e6b038SEli Cohen 		 * object
206314e6b038SEli Cohen 		 */
206414e6b038SEli Cohen 		if (!netif_is_bareudp(filter_dev))
20650a7fcb78SPaul Blakey 			flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
20660a7fcb78SPaul Blakey 	}
20670a7fcb78SPaul Blakey 
20680a7fcb78SPaul Blakey 	if (!needs_mapping && !sets_mapping)
20690a7fcb78SPaul Blakey 		return 0;
20700a7fcb78SPaul Blakey 
20710a7fcb78SPaul Blakey 	return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
20720a7fcb78SPaul Blakey }
20730a7fcb78SPaul Blakey 
20740a7fcb78SPaul Blakey static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
20750a7fcb78SPaul Blakey {
20760a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
20770a7fcb78SPaul Blakey 			    inner_headers);
20780a7fcb78SPaul Blakey }
20790a7fcb78SPaul Blakey 
20800a7fcb78SPaul Blakey static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
20810a7fcb78SPaul Blakey {
20820a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_value,
20830a7fcb78SPaul Blakey 			    inner_headers);
20840a7fcb78SPaul Blakey }
20850a7fcb78SPaul Blakey 
20860a7fcb78SPaul Blakey static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
20870a7fcb78SPaul Blakey {
20880a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
20890a7fcb78SPaul Blakey 			    outer_headers);
20900a7fcb78SPaul Blakey }
20910a7fcb78SPaul Blakey 
20920a7fcb78SPaul Blakey static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
20930a7fcb78SPaul Blakey {
20940a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_value,
20958377629eSEli Britstein 			    outer_headers);
20968377629eSEli Britstein }
20978377629eSEli Britstein 
20988377629eSEli Britstein static void *get_match_headers_value(u32 flags,
20998377629eSEli Britstein 				     struct mlx5_flow_spec *spec)
21008377629eSEli Britstein {
21018377629eSEli Britstein 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
21020a7fcb78SPaul Blakey 		get_match_inner_headers_value(spec) :
21030a7fcb78SPaul Blakey 		get_match_outer_headers_value(spec);
21040a7fcb78SPaul Blakey }
21050a7fcb78SPaul Blakey 
21060a7fcb78SPaul Blakey static void *get_match_headers_criteria(u32 flags,
21070a7fcb78SPaul Blakey 					struct mlx5_flow_spec *spec)
21080a7fcb78SPaul Blakey {
21090a7fcb78SPaul Blakey 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
21100a7fcb78SPaul Blakey 		get_match_inner_headers_criteria(spec) :
21110a7fcb78SPaul Blakey 		get_match_outer_headers_criteria(spec);
21128377629eSEli Britstein }
21138377629eSEli Britstein 
21146d65bc64Swenxu static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
21156d65bc64Swenxu 				   struct flow_cls_offload *f)
21166d65bc64Swenxu {
21176d65bc64Swenxu 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
21186d65bc64Swenxu 	struct netlink_ext_ack *extack = f->common.extack;
21196d65bc64Swenxu 	struct net_device *ingress_dev;
21206d65bc64Swenxu 	struct flow_match_meta match;
21216d65bc64Swenxu 
21226d65bc64Swenxu 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
21236d65bc64Swenxu 		return 0;
21246d65bc64Swenxu 
21256d65bc64Swenxu 	flow_rule_match_meta(rule, &match);
21266d65bc64Swenxu 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
21276d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
21286d65bc64Swenxu 		return -EINVAL;
21296d65bc64Swenxu 	}
21306d65bc64Swenxu 
21316d65bc64Swenxu 	ingress_dev = __dev_get_by_index(dev_net(filter_dev),
21326d65bc64Swenxu 					 match.key->ingress_ifindex);
21336d65bc64Swenxu 	if (!ingress_dev) {
21346d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack,
21356d65bc64Swenxu 				   "Can't find the ingress port to match on");
21366d65bc64Swenxu 		return -EINVAL;
21376d65bc64Swenxu 	}
21386d65bc64Swenxu 
21396d65bc64Swenxu 	if (ingress_dev != filter_dev) {
21406d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack,
21416d65bc64Swenxu 				   "Can't match on the ingress filter port");
21426d65bc64Swenxu 		return -EINVAL;
21436d65bc64Swenxu 	}
21446d65bc64Swenxu 
21456d65bc64Swenxu 	return 0;
21466d65bc64Swenxu }
21476d65bc64Swenxu 
214872046a91SEli Cohen static bool skip_key_basic(struct net_device *filter_dev,
214972046a91SEli Cohen 			   struct flow_cls_offload *f)
215072046a91SEli Cohen {
215172046a91SEli Cohen 	/* When doing mpls over udp decap, the user needs to provide
215272046a91SEli Cohen 	 * MPLS_UC as the protocol in order to be able to match on mpls
215372046a91SEli Cohen 	 * label fields.  However, the actual ethertype is IP so we want to
215472046a91SEli Cohen 	 * avoid matching on this, otherwise we'll fail the match.
215572046a91SEli Cohen 	 */
215672046a91SEli Cohen 	if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
215772046a91SEli Cohen 		return true;
215872046a91SEli Cohen 
215972046a91SEli Cohen 	return false;
216072046a91SEli Cohen }
216172046a91SEli Cohen 
2162de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv,
21630a7fcb78SPaul Blakey 			      struct mlx5e_tc_flow *flow,
2164de0af0bfSRoi Dayan 			      struct mlx5_flow_spec *spec,
2165f9e30088SPablo Neira Ayuso 			      struct flow_cls_offload *f,
216654c177caSOz Shlomo 			      struct net_device *filter_dev,
216793b3586eSHuy Nguyen 			      u8 *inner_match_level, u8 *outer_match_level)
2168e3a2b7edSAmir Vadai {
2169e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
2170c5bb1730SMaor Gottlieb 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2171c5bb1730SMaor Gottlieb 				       outer_headers);
2172c5bb1730SMaor Gottlieb 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2173c5bb1730SMaor Gottlieb 				       outer_headers);
2174699e96ddSJianbo Liu 	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2175699e96ddSJianbo Liu 				    misc_parameters);
2176699e96ddSJianbo Liu 	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2177699e96ddSJianbo Liu 				    misc_parameters);
2178f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
21798f256622SPablo Neira Ayuso 	struct flow_dissector *dissector = rule->match.dissector;
2180e3a2b7edSAmir Vadai 	u16 addr_type = 0;
2181e3a2b7edSAmir Vadai 	u8 ip_proto = 0;
218293b3586eSHuy Nguyen 	u8 *match_level;
21836d65bc64Swenxu 	int err;
2184e3a2b7edSAmir Vadai 
218593b3586eSHuy Nguyen 	match_level = outer_match_level;
2186de0af0bfSRoi Dayan 
21878f256622SPablo Neira Ayuso 	if (dissector->used_keys &
21883d144578SVlad Buslov 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
21893d144578SVlad Buslov 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2190e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
2191e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2192095b6cfdSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
2193699e96ddSJianbo Liu 	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2194e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2195e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2196bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
2197bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2198bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2199bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2200bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
2201e77834ecSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2202fd7da28bSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
2203bcef735cSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_IP)  |
22044c3844d9SPaul Blakey 	      BIT(FLOW_DISSECTOR_KEY_CT) |
22059272e3dfSYevgeny Kliteynik 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
220672046a91SEli Cohen 	      BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
220772046a91SEli Cohen 	      BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2208e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2209e3a2b7edSAmir Vadai 		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
22108f256622SPablo Neira Ayuso 			    dissector->used_keys);
2211e3a2b7edSAmir Vadai 		return -EOPNOTSUPP;
2212e3a2b7edSAmir Vadai 	}
2213e3a2b7edSAmir Vadai 
2214075973c7SVlad Buslov 	if (mlx5e_get_tc_tun(filter_dev)) {
22150a7fcb78SPaul Blakey 		bool match_inner = false;
2216bbd00f7eSHadar Hen Zion 
22170a7fcb78SPaul Blakey 		err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
22180a7fcb78SPaul Blakey 					outer_match_level, &match_inner);
22190a7fcb78SPaul Blakey 		if (err)
22200a7fcb78SPaul Blakey 			return err;
22210a7fcb78SPaul Blakey 
22220a7fcb78SPaul Blakey 		if (match_inner) {
22230a7fcb78SPaul Blakey 			/* header pointers should point to the inner headers
22240a7fcb78SPaul Blakey 			 * if the packet was decapsulated already.
22250a7fcb78SPaul Blakey 			 * outer headers are set by parse_tunnel_attr.
2226bbd00f7eSHadar Hen Zion 			 */
222793b3586eSHuy Nguyen 			match_level = inner_match_level;
22280a7fcb78SPaul Blakey 			headers_c = get_match_inner_headers_criteria(spec);
22290a7fcb78SPaul Blakey 			headers_v = get_match_inner_headers_value(spec);
22300a7fcb78SPaul Blakey 		}
2231bbd00f7eSHadar Hen Zion 	}
2232bbd00f7eSHadar Hen Zion 
22336d65bc64Swenxu 	err = mlx5e_flower_parse_meta(filter_dev, f);
22346d65bc64Swenxu 	if (err)
22356d65bc64Swenxu 		return err;
22366d65bc64Swenxu 
223772046a91SEli Cohen 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
223872046a91SEli Cohen 	    !skip_key_basic(filter_dev, f)) {
22398f256622SPablo Neira Ayuso 		struct flow_match_basic match;
2240e3a2b7edSAmir Vadai 
22418f256622SPablo Neira Ayuso 		flow_rule_match_basic(rule, &match);
22428f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
22438f256622SPablo Neira Ayuso 			 ntohs(match.mask->n_proto));
22448f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
22458f256622SPablo Neira Ayuso 			 ntohs(match.key->n_proto));
22468f256622SPablo Neira Ayuso 
22478f256622SPablo Neira Ayuso 		if (match.mask->n_proto)
2248d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
2249e3a2b7edSAmir Vadai 	}
225035a605dbSEli Britstein 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
225135a605dbSEli Britstein 	    is_vlan_dev(filter_dev)) {
225235a605dbSEli Britstein 		struct flow_dissector_key_vlan filter_dev_mask;
225335a605dbSEli Britstein 		struct flow_dissector_key_vlan filter_dev_key;
22548f256622SPablo Neira Ayuso 		struct flow_match_vlan match;
22558f256622SPablo Neira Ayuso 
225635a605dbSEli Britstein 		if (is_vlan_dev(filter_dev)) {
225735a605dbSEli Britstein 			match.key = &filter_dev_key;
225835a605dbSEli Britstein 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
225935a605dbSEli Britstein 			match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
226035a605dbSEli Britstein 			match.key->vlan_priority = 0;
226135a605dbSEli Britstein 			match.mask = &filter_dev_mask;
226235a605dbSEli Britstein 			memset(match.mask, 0xff, sizeof(*match.mask));
226335a605dbSEli Britstein 			match.mask->vlan_priority = 0;
226435a605dbSEli Britstein 		} else {
22658f256622SPablo Neira Ayuso 			flow_rule_match_vlan(rule, &match);
226635a605dbSEli Britstein 		}
22678f256622SPablo Neira Ayuso 		if (match.mask->vlan_id ||
22688f256622SPablo Neira Ayuso 		    match.mask->vlan_priority ||
22698f256622SPablo Neira Ayuso 		    match.mask->vlan_tpid) {
22708f256622SPablo Neira Ayuso 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2271699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2272699e96ddSJianbo Liu 					 svlan_tag, 1);
2273699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2274699e96ddSJianbo Liu 					 svlan_tag, 1);
2275699e96ddSJianbo Liu 			} else {
2276699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2277699e96ddSJianbo Liu 					 cvlan_tag, 1);
2278699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2279699e96ddSJianbo Liu 					 cvlan_tag, 1);
2280699e96ddSJianbo Liu 			}
2281095b6cfdSOr Gerlitz 
22828f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
22838f256622SPablo Neira Ayuso 				 match.mask->vlan_id);
22848f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
22858f256622SPablo Neira Ayuso 				 match.key->vlan_id);
2286358d79a4SOr Gerlitz 
22878f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
22888f256622SPablo Neira Ayuso 				 match.mask->vlan_priority);
22898f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
22908f256622SPablo Neira Ayuso 				 match.key->vlan_priority);
229154782900SOr Gerlitz 
2292d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
2293095b6cfdSOr Gerlitz 		}
2294d3a80bb5SOr Gerlitz 	} else if (*match_level != MLX5_MATCH_NONE) {
2295fc603294SMark Bloch 		/* cvlan_tag enabled in match criteria and
2296fc603294SMark Bloch 		 * disabled in match value means both S & C tags
2297fc603294SMark Bloch 		 * don't exist (untagged of both)
2298fc603294SMark Bloch 		 */
2299cee26487SJianbo Liu 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2300d3a80bb5SOr Gerlitz 		*match_level = MLX5_MATCH_L2;
2301095b6cfdSOr Gerlitz 	}
2302095b6cfdSOr Gerlitz 
23038f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
23048f256622SPablo Neira Ayuso 		struct flow_match_vlan match;
23058f256622SPablo Neira Ayuso 
230612d5cbf8SJianbo Liu 		flow_rule_match_cvlan(rule, &match);
23078f256622SPablo Neira Ayuso 		if (match.mask->vlan_id ||
23088f256622SPablo Neira Ayuso 		    match.mask->vlan_priority ||
23098f256622SPablo Neira Ayuso 		    match.mask->vlan_tpid) {
23108f256622SPablo Neira Ayuso 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2311699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
2312699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
2313699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
2314699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
2315699e96ddSJianbo Liu 			} else {
2316699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
2317699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
2318699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
2319699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
2320699e96ddSJianbo Liu 			}
2321699e96ddSJianbo Liu 
2322699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
23238f256622SPablo Neira Ayuso 				 match.mask->vlan_id);
2324699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
23258f256622SPablo Neira Ayuso 				 match.key->vlan_id);
2326699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
23278f256622SPablo Neira Ayuso 				 match.mask->vlan_priority);
2328699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
23298f256622SPablo Neira Ayuso 				 match.key->vlan_priority);
2330699e96ddSJianbo Liu 
2331699e96ddSJianbo Liu 			*match_level = MLX5_MATCH_L2;
2332699e96ddSJianbo Liu 		}
2333699e96ddSJianbo Liu 	}
2334699e96ddSJianbo Liu 
23358f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
23368f256622SPablo Neira Ayuso 		struct flow_match_eth_addrs match;
233754782900SOr Gerlitz 
23388f256622SPablo Neira Ayuso 		flow_rule_match_eth_addrs(rule, &match);
2339d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2340d3a80bb5SOr Gerlitz 					     dmac_47_16),
23418f256622SPablo Neira Ayuso 				match.mask->dst);
2342d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2343d3a80bb5SOr Gerlitz 					     dmac_47_16),
23448f256622SPablo Neira Ayuso 				match.key->dst);
2345d3a80bb5SOr Gerlitz 
2346d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2347d3a80bb5SOr Gerlitz 					     smac_47_16),
23488f256622SPablo Neira Ayuso 				match.mask->src);
2349d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2350d3a80bb5SOr Gerlitz 					     smac_47_16),
23518f256622SPablo Neira Ayuso 				match.key->src);
2352d3a80bb5SOr Gerlitz 
23538f256622SPablo Neira Ayuso 		if (!is_zero_ether_addr(match.mask->src) ||
23548f256622SPablo Neira Ayuso 		    !is_zero_ether_addr(match.mask->dst))
2355d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
235654782900SOr Gerlitz 	}
235754782900SOr Gerlitz 
23588f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
23598f256622SPablo Neira Ayuso 		struct flow_match_control match;
236054782900SOr Gerlitz 
23618f256622SPablo Neira Ayuso 		flow_rule_match_control(rule, &match);
23628f256622SPablo Neira Ayuso 		addr_type = match.key->addr_type;
236354782900SOr Gerlitz 
236454782900SOr Gerlitz 		/* the HW doesn't support frag first/later */
23658f256622SPablo Neira Ayuso 		if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
236654782900SOr Gerlitz 			return -EOPNOTSUPP;
236754782900SOr Gerlitz 
23688f256622SPablo Neira Ayuso 		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
236954782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
237054782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
23718f256622SPablo Neira Ayuso 				 match.key->flags & FLOW_DIS_IS_FRAGMENT);
237254782900SOr Gerlitz 
237354782900SOr Gerlitz 			/* the HW doesn't need L3 inline to match on frag=no */
23748f256622SPablo Neira Ayuso 			if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
237583621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L2;
237654782900SOr Gerlitz 	/* ***  L2 attributes parsing up to here *** */
237754782900SOr Gerlitz 			else
237883621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L3;
237954782900SOr Gerlitz 		}
238054782900SOr Gerlitz 	}
238154782900SOr Gerlitz 
23828f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
23838f256622SPablo Neira Ayuso 		struct flow_match_basic match;
23848f256622SPablo Neira Ayuso 
23858f256622SPablo Neira Ayuso 		flow_rule_match_basic(rule, &match);
23868f256622SPablo Neira Ayuso 		ip_proto = match.key->ip_proto;
238754782900SOr Gerlitz 
238854782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
23898f256622SPablo Neira Ayuso 			 match.mask->ip_proto);
239054782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
23918f256622SPablo Neira Ayuso 			 match.key->ip_proto);
239254782900SOr Gerlitz 
23938f256622SPablo Neira Ayuso 		if (match.mask->ip_proto)
2394d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
239554782900SOr Gerlitz 	}
239654782900SOr Gerlitz 
2397e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
23988f256622SPablo Neira Ayuso 		struct flow_match_ipv4_addrs match;
2399e3a2b7edSAmir Vadai 
24008f256622SPablo Neira Ayuso 		flow_rule_match_ipv4_addrs(rule, &match);
2401e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2402e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
24038f256622SPablo Neira Ayuso 		       &match.mask->src, sizeof(match.mask->src));
2404e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2405e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
24068f256622SPablo Neira Ayuso 		       &match.key->src, sizeof(match.key->src));
2407e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2408e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
24098f256622SPablo Neira Ayuso 		       &match.mask->dst, sizeof(match.mask->dst));
2410e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2411e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
24128f256622SPablo Neira Ayuso 		       &match.key->dst, sizeof(match.key->dst));
2413de0af0bfSRoi Dayan 
24148f256622SPablo Neira Ayuso 		if (match.mask->src || match.mask->dst)
2415d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
2416e3a2b7edSAmir Vadai 	}
2417e3a2b7edSAmir Vadai 
2418e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
24198f256622SPablo Neira Ayuso 		struct flow_match_ipv6_addrs match;
2420e3a2b7edSAmir Vadai 
24218f256622SPablo Neira Ayuso 		flow_rule_match_ipv6_addrs(rule, &match);
2422e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2423e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
24248f256622SPablo Neira Ayuso 		       &match.mask->src, sizeof(match.mask->src));
2425e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2426e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
24278f256622SPablo Neira Ayuso 		       &match.key->src, sizeof(match.key->src));
2428e3a2b7edSAmir Vadai 
2429e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2430e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
24318f256622SPablo Neira Ayuso 		       &match.mask->dst, sizeof(match.mask->dst));
2432e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2433e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
24348f256622SPablo Neira Ayuso 		       &match.key->dst, sizeof(match.key->dst));
2435de0af0bfSRoi Dayan 
24368f256622SPablo Neira Ayuso 		if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
24378f256622SPablo Neira Ayuso 		    ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2438d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
2439e3a2b7edSAmir Vadai 	}
2440e3a2b7edSAmir Vadai 
24418f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
24428f256622SPablo Neira Ayuso 		struct flow_match_ip match;
24431f97a526SOr Gerlitz 
24448f256622SPablo Neira Ayuso 		flow_rule_match_ip(rule, &match);
24458f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
24468f256622SPablo Neira Ayuso 			 match.mask->tos & 0x3);
24478f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
24488f256622SPablo Neira Ayuso 			 match.key->tos & 0x3);
24491f97a526SOr Gerlitz 
24508f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
24518f256622SPablo Neira Ayuso 			 match.mask->tos >> 2);
24528f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
24538f256622SPablo Neira Ayuso 			 match.key->tos  >> 2);
24541f97a526SOr Gerlitz 
24558f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
24568f256622SPablo Neira Ayuso 			 match.mask->ttl);
24578f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
24588f256622SPablo Neira Ayuso 			 match.key->ttl);
24591f97a526SOr Gerlitz 
24608f256622SPablo Neira Ayuso 		if (match.mask->ttl &&
2461a8ade55fSOr Gerlitz 		    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2462e98bedf5SEli Britstein 						ft_field_support.outer_ipv4_ttl)) {
2463e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2464e98bedf5SEli Britstein 					   "Matching on TTL is not supported");
24651f97a526SOr Gerlitz 			return -EOPNOTSUPP;
2466e98bedf5SEli Britstein 		}
2467a8ade55fSOr Gerlitz 
24688f256622SPablo Neira Ayuso 		if (match.mask->tos || match.mask->ttl)
2469d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
24701f97a526SOr Gerlitz 	}
24711f97a526SOr Gerlitz 
247254782900SOr Gerlitz 	/* ***  L3 attributes parsing up to here *** */
247354782900SOr Gerlitz 
24748f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
24758f256622SPablo Neira Ayuso 		struct flow_match_ports match;
24768f256622SPablo Neira Ayuso 
24778f256622SPablo Neira Ayuso 		flow_rule_match_ports(rule, &match);
2478e3a2b7edSAmir Vadai 		switch (ip_proto) {
2479e3a2b7edSAmir Vadai 		case IPPROTO_TCP:
2480e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24818f256622SPablo Neira Ayuso 				 tcp_sport, ntohs(match.mask->src));
2482e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24838f256622SPablo Neira Ayuso 				 tcp_sport, ntohs(match.key->src));
2484e3a2b7edSAmir Vadai 
2485e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24868f256622SPablo Neira Ayuso 				 tcp_dport, ntohs(match.mask->dst));
2487e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24888f256622SPablo Neira Ayuso 				 tcp_dport, ntohs(match.key->dst));
2489e3a2b7edSAmir Vadai 			break;
2490e3a2b7edSAmir Vadai 
2491e3a2b7edSAmir Vadai 		case IPPROTO_UDP:
2492e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24938f256622SPablo Neira Ayuso 				 udp_sport, ntohs(match.mask->src));
2494e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24958f256622SPablo Neira Ayuso 				 udp_sport, ntohs(match.key->src));
2496e3a2b7edSAmir Vadai 
2497e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24988f256622SPablo Neira Ayuso 				 udp_dport, ntohs(match.mask->dst));
2499e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
25008f256622SPablo Neira Ayuso 				 udp_dport, ntohs(match.key->dst));
2501e3a2b7edSAmir Vadai 			break;
2502e3a2b7edSAmir Vadai 		default:
2503e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2504e98bedf5SEli Britstein 					   "Only UDP and TCP transports are supported for L4 matching");
2505e3a2b7edSAmir Vadai 			netdev_err(priv->netdev,
2506e3a2b7edSAmir Vadai 				   "Only UDP and TCP transport are supported\n");
2507e3a2b7edSAmir Vadai 			return -EINVAL;
2508e3a2b7edSAmir Vadai 		}
2509de0af0bfSRoi Dayan 
25108f256622SPablo Neira Ayuso 		if (match.mask->src || match.mask->dst)
2511d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
2512e3a2b7edSAmir Vadai 	}
2513e3a2b7edSAmir Vadai 
25148f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
25158f256622SPablo Neira Ayuso 		struct flow_match_tcp match;
2516e77834ecSOr Gerlitz 
25178f256622SPablo Neira Ayuso 		flow_rule_match_tcp(rule, &match);
2518e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
25198f256622SPablo Neira Ayuso 			 ntohs(match.mask->flags));
2520e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
25218f256622SPablo Neira Ayuso 			 ntohs(match.key->flags));
2522e77834ecSOr Gerlitz 
25238f256622SPablo Neira Ayuso 		if (match.mask->flags)
2524d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
2525e77834ecSOr Gerlitz 	}
2526e77834ecSOr Gerlitz 
2527e3a2b7edSAmir Vadai 	return 0;
2528e3a2b7edSAmir Vadai }
2529e3a2b7edSAmir Vadai 
2530de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv,
253165ba8fb7SOr Gerlitz 			    struct mlx5e_tc_flow *flow,
2532de0af0bfSRoi Dayan 			    struct mlx5_flow_spec *spec,
2533f9e30088SPablo Neira Ayuso 			    struct flow_cls_offload *f,
253454c177caSOz Shlomo 			    struct net_device *filter_dev)
2535de0af0bfSRoi Dayan {
253693b3586eSHuy Nguyen 	u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2537e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
2538de0af0bfSRoi Dayan 	struct mlx5_core_dev *dev = priv->mdev;
2539de0af0bfSRoi Dayan 	struct mlx5_eswitch *esw = dev->priv.eswitch;
25401d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
25411d447a39SSaeed Mahameed 	struct mlx5_eswitch_rep *rep;
2542226f2ca3SVlad Buslov 	bool is_eswitch_flow;
2543de0af0bfSRoi Dayan 	int err;
2544de0af0bfSRoi Dayan 
254593b3586eSHuy Nguyen 	inner_match_level = MLX5_MATCH_NONE;
254693b3586eSHuy Nguyen 	outer_match_level = MLX5_MATCH_NONE;
254793b3586eSHuy Nguyen 
25480a7fcb78SPaul Blakey 	err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
25490a7fcb78SPaul Blakey 				 &inner_match_level, &outer_match_level);
255093b3586eSHuy Nguyen 	non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
255193b3586eSHuy Nguyen 				 outer_match_level : inner_match_level;
2552de0af0bfSRoi Dayan 
2553226f2ca3SVlad Buslov 	is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2554226f2ca3SVlad Buslov 	if (!err && is_eswitch_flow) {
25551d447a39SSaeed Mahameed 		rep = rpriv->rep;
2556b05af6aaSBodong Wang 		if (rep->vport != MLX5_VPORT_UPLINK &&
25571d447a39SSaeed Mahameed 		    (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
255893b3586eSHuy Nguyen 		    esw->offloads.inline_mode < non_tunnel_match_level)) {
2559e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2560e98bedf5SEli Britstein 					   "Flow is not offloaded due to min inline setting");
2561de0af0bfSRoi Dayan 			netdev_warn(priv->netdev,
2562de0af0bfSRoi Dayan 				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
256393b3586eSHuy Nguyen 				    non_tunnel_match_level, esw->offloads.inline_mode);
2564de0af0bfSRoi Dayan 			return -EOPNOTSUPP;
2565de0af0bfSRoi Dayan 		}
2566de0af0bfSRoi Dayan 	}
2567de0af0bfSRoi Dayan 
2568226f2ca3SVlad Buslov 	if (is_eswitch_flow) {
256993b3586eSHuy Nguyen 		flow->esw_attr->inner_match_level = inner_match_level;
257093b3586eSHuy Nguyen 		flow->esw_attr->outer_match_level = outer_match_level;
25716363651dSOr Gerlitz 	} else {
257293b3586eSHuy Nguyen 		flow->nic_attr->match_level = non_tunnel_match_level;
25736363651dSOr Gerlitz 	}
257438aa51c1SOr Gerlitz 
2575de0af0bfSRoi Dayan 	return err;
2576de0af0bfSRoi Dayan }
2577de0af0bfSRoi Dayan 
2578d79b6df6SOr Gerlitz struct pedit_headers {
2579d79b6df6SOr Gerlitz 	struct ethhdr  eth;
25800eb69bb9SEli Britstein 	struct vlan_hdr vlan;
2581d79b6df6SOr Gerlitz 	struct iphdr   ip4;
2582d79b6df6SOr Gerlitz 	struct ipv6hdr ip6;
2583d79b6df6SOr Gerlitz 	struct tcphdr  tcp;
2584d79b6df6SOr Gerlitz 	struct udphdr  udp;
2585d79b6df6SOr Gerlitz };
2586d79b6df6SOr Gerlitz 
2587c500c86bSPablo Neira Ayuso struct pedit_headers_action {
2588c500c86bSPablo Neira Ayuso 	struct pedit_headers	vals;
2589c500c86bSPablo Neira Ayuso 	struct pedit_headers	masks;
2590c500c86bSPablo Neira Ayuso 	u32			pedits;
2591c500c86bSPablo Neira Ayuso };
2592c500c86bSPablo Neira Ayuso 
2593d79b6df6SOr Gerlitz static int pedit_header_offsets[] = {
259473867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
259573867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
259673867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
259773867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
259873867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2599d79b6df6SOr Gerlitz };
2600d79b6df6SOr Gerlitz 
2601d79b6df6SOr Gerlitz #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2602d79b6df6SOr Gerlitz 
2603d79b6df6SOr Gerlitz static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2604c500c86bSPablo Neira Ayuso 			 struct pedit_headers_action *hdrs)
2605d79b6df6SOr Gerlitz {
2606d79b6df6SOr Gerlitz 	u32 *curr_pmask, *curr_pval;
2607d79b6df6SOr Gerlitz 
2608c500c86bSPablo Neira Ayuso 	curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2609c500c86bSPablo Neira Ayuso 	curr_pval  = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2610d79b6df6SOr Gerlitz 
2611d79b6df6SOr Gerlitz 	if (*curr_pmask & mask)  /* disallow acting twice on the same location */
2612d79b6df6SOr Gerlitz 		goto out_err;
2613d79b6df6SOr Gerlitz 
2614d79b6df6SOr Gerlitz 	*curr_pmask |= mask;
2615d79b6df6SOr Gerlitz 	*curr_pval  |= (val & mask);
2616d79b6df6SOr Gerlitz 
2617d79b6df6SOr Gerlitz 	return 0;
2618d79b6df6SOr Gerlitz 
2619d79b6df6SOr Gerlitz out_err:
2620d79b6df6SOr Gerlitz 	return -EOPNOTSUPP;
2621d79b6df6SOr Gerlitz }
2622d79b6df6SOr Gerlitz 
2623d79b6df6SOr Gerlitz struct mlx5_fields {
2624d79b6df6SOr Gerlitz 	u8  field;
262588f30bbcSDmytro Linkin 	u8  field_bsize;
262688f30bbcSDmytro Linkin 	u32 field_mask;
2627d79b6df6SOr Gerlitz 	u32 offset;
262827c11b6bSEli Britstein 	u32 match_offset;
2629d79b6df6SOr Gerlitz };
2630d79b6df6SOr Gerlitz 
263188f30bbcSDmytro Linkin #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
263288f30bbcSDmytro Linkin 		{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
263327c11b6bSEli Britstein 		 offsetof(struct pedit_headers, field) + (off), \
263427c11b6bSEli Britstein 		 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
263527c11b6bSEli Britstein 
26362ef86872SEli Britstein /* masked values are the same and there are no rewrites that do not have a
26372ef86872SEli Britstein  * match.
26382ef86872SEli Britstein  */
26392ef86872SEli Britstein #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
26402ef86872SEli Britstein 	type matchmaskx = *(type *)(matchmaskp); \
26412ef86872SEli Britstein 	type matchvalx = *(type *)(matchvalp); \
26422ef86872SEli Britstein 	type maskx = *(type *)(maskp); \
26432ef86872SEli Britstein 	type valx = *(type *)(valp); \
26442ef86872SEli Britstein 	\
26452ef86872SEli Britstein 	(valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
26462ef86872SEli Britstein 								 matchmaskx)); \
26472ef86872SEli Britstein })
26482ef86872SEli Britstein 
264927c11b6bSEli Britstein static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
265088f30bbcSDmytro Linkin 			 void *matchmaskp, u8 bsize)
265127c11b6bSEli Britstein {
265227c11b6bSEli Britstein 	bool same = false;
265327c11b6bSEli Britstein 
265488f30bbcSDmytro Linkin 	switch (bsize) {
265588f30bbcSDmytro Linkin 	case 8:
26562ef86872SEli Britstein 		same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
265727c11b6bSEli Britstein 		break;
265888f30bbcSDmytro Linkin 	case 16:
26592ef86872SEli Britstein 		same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
266027c11b6bSEli Britstein 		break;
266188f30bbcSDmytro Linkin 	case 32:
26622ef86872SEli Britstein 		same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
266327c11b6bSEli Britstein 		break;
266427c11b6bSEli Britstein 	}
266527c11b6bSEli Britstein 
266627c11b6bSEli Britstein 	return same;
266727c11b6bSEli Britstein }
2668a8e4f0c4SOr Gerlitz 
2669d79b6df6SOr Gerlitz static struct mlx5_fields fields[] = {
267088f30bbcSDmytro Linkin 	OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
267188f30bbcSDmytro Linkin 	OFFLOAD(DMAC_15_0,  16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
267288f30bbcSDmytro Linkin 	OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
267388f30bbcSDmytro Linkin 	OFFLOAD(SMAC_15_0,  16, U16_MAX, eth.h_source[4], 0, smac_15_0),
267488f30bbcSDmytro Linkin 	OFFLOAD(ETHERTYPE,  16, U16_MAX, eth.h_proto, 0, ethertype),
267588f30bbcSDmytro Linkin 	OFFLOAD(FIRST_VID,  16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2676d79b6df6SOr Gerlitz 
2677ab9341b5SDmytro Linkin 	OFFLOAD(IP_DSCP, 8,    0xfc, ip4.tos,   0, ip_dscp),
267888f30bbcSDmytro Linkin 	OFFLOAD(IP_TTL,  8,  U8_MAX, ip4.ttl,   0, ttl_hoplimit),
267988f30bbcSDmytro Linkin 	OFFLOAD(SIPV4,  32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
268088f30bbcSDmytro Linkin 	OFFLOAD(DIPV4,  32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2681d79b6df6SOr Gerlitz 
268288f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
268327c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
268488f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_95_64,  32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
268527c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
268688f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_63_32,  32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
268727c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
268888f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_31_0,   32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
268927c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
269088f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
269127c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
269288f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_95_64,  32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
269327c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
269488f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_63_32,  32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
269527c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
269688f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_31_0,   32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
269727c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
269888f30bbcSDmytro Linkin 	OFFLOAD(IPV6_HOPLIMIT, 8,  U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2699d79b6df6SOr Gerlitz 
270088f30bbcSDmytro Linkin 	OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source,  0, tcp_sport),
270188f30bbcSDmytro Linkin 	OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest,    0, tcp_dport),
270288f30bbcSDmytro Linkin 	/* in linux iphdr tcp_flags is 8 bits long */
270388f30bbcSDmytro Linkin 	OFFLOAD(TCP_FLAGS,  8,  U8_MAX, tcp.ack_seq, 5, tcp_flags),
2704d79b6df6SOr Gerlitz 
270588f30bbcSDmytro Linkin 	OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
270688f30bbcSDmytro Linkin 	OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest,   0, udp_dport),
2707d79b6df6SOr Gerlitz };
2708d79b6df6SOr Gerlitz 
27096ae4a6a5SPaul Blakey static int offload_pedit_fields(struct mlx5e_priv *priv,
27106ae4a6a5SPaul Blakey 				int namespace,
27116ae4a6a5SPaul Blakey 				struct pedit_headers_action *hdrs,
2712e98bedf5SEli Britstein 				struct mlx5e_tc_flow_parse_attr *parse_attr,
271327c11b6bSEli Britstein 				u32 *action_flags,
2714e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
2715d79b6df6SOr Gerlitz {
2716d79b6df6SOr Gerlitz 	struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
27176ae4a6a5SPaul Blakey 	int i, action_size, first, last, next_z;
271888f30bbcSDmytro Linkin 	void *headers_c, *headers_v, *action, *vals_p;
271988f30bbcSDmytro Linkin 	u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
27206ae4a6a5SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts *mod_acts;
2721d79b6df6SOr Gerlitz 	struct mlx5_fields *f;
2722d79b6df6SOr Gerlitz 	unsigned long mask;
27232b64bebaSOr Gerlitz 	__be32 mask_be32;
27242b64bebaSOr Gerlitz 	__be16 mask_be16;
27256ae4a6a5SPaul Blakey 	int err;
272688f30bbcSDmytro Linkin 	u8 cmd;
272788f30bbcSDmytro Linkin 
27286ae4a6a5SPaul Blakey 	mod_acts = &parse_attr->mod_hdr_acts;
272988f30bbcSDmytro Linkin 	headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
273088f30bbcSDmytro Linkin 	headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2731d79b6df6SOr Gerlitz 
273273867881SPablo Neira Ayuso 	set_masks = &hdrs[0].masks;
273373867881SPablo Neira Ayuso 	add_masks = &hdrs[1].masks;
273473867881SPablo Neira Ayuso 	set_vals = &hdrs[0].vals;
273573867881SPablo Neira Ayuso 	add_vals = &hdrs[1].vals;
2736d79b6df6SOr Gerlitz 
2737d65dbedfSHuy Nguyen 	action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2738d79b6df6SOr Gerlitz 
2739d79b6df6SOr Gerlitz 	for (i = 0; i < ARRAY_SIZE(fields); i++) {
274027c11b6bSEli Britstein 		bool skip;
274127c11b6bSEli Britstein 
2742d79b6df6SOr Gerlitz 		f = &fields[i];
2743d79b6df6SOr Gerlitz 		/* avoid seeing bits set from previous iterations */
2744e3ca4e05SOr Gerlitz 		s_mask = 0;
2745e3ca4e05SOr Gerlitz 		a_mask = 0;
2746d79b6df6SOr Gerlitz 
2747d79b6df6SOr Gerlitz 		s_masks_p = (void *)set_masks + f->offset;
2748d79b6df6SOr Gerlitz 		a_masks_p = (void *)add_masks + f->offset;
2749d79b6df6SOr Gerlitz 
275088f30bbcSDmytro Linkin 		s_mask = *s_masks_p & f->field_mask;
275188f30bbcSDmytro Linkin 		a_mask = *a_masks_p & f->field_mask;
2752d79b6df6SOr Gerlitz 
2753d79b6df6SOr Gerlitz 		if (!s_mask && !a_mask) /* nothing to offload here */
2754d79b6df6SOr Gerlitz 			continue;
2755d79b6df6SOr Gerlitz 
2756d79b6df6SOr Gerlitz 		if (s_mask && a_mask) {
2757e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2758e98bedf5SEli Britstein 					   "can't set and add to the same HW field");
2759d79b6df6SOr Gerlitz 			printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2760d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2761d79b6df6SOr Gerlitz 		}
2762d79b6df6SOr Gerlitz 
276327c11b6bSEli Britstein 		skip = false;
2764d79b6df6SOr Gerlitz 		if (s_mask) {
276527c11b6bSEli Britstein 			void *match_mask = headers_c + f->match_offset;
276627c11b6bSEli Britstein 			void *match_val = headers_v + f->match_offset;
276727c11b6bSEli Britstein 
2768d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_SET;
2769d79b6df6SOr Gerlitz 			mask = s_mask;
2770d79b6df6SOr Gerlitz 			vals_p = (void *)set_vals + f->offset;
277127c11b6bSEli Britstein 			/* don't rewrite if we have a match on the same value */
277227c11b6bSEli Britstein 			if (cmp_val_mask(vals_p, s_masks_p, match_val,
277388f30bbcSDmytro Linkin 					 match_mask, f->field_bsize))
277427c11b6bSEli Britstein 				skip = true;
2775d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
277688f30bbcSDmytro Linkin 			*s_masks_p &= ~f->field_mask;
2777d79b6df6SOr Gerlitz 		} else {
2778d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_ADD;
2779d79b6df6SOr Gerlitz 			mask = a_mask;
2780d79b6df6SOr Gerlitz 			vals_p = (void *)add_vals + f->offset;
278127c11b6bSEli Britstein 			/* add 0 is no change */
278288f30bbcSDmytro Linkin 			if ((*(u32 *)vals_p & f->field_mask) == 0)
278327c11b6bSEli Britstein 				skip = true;
2784d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
278588f30bbcSDmytro Linkin 			*a_masks_p &= ~f->field_mask;
2786d79b6df6SOr Gerlitz 		}
278727c11b6bSEli Britstein 		if (skip)
278827c11b6bSEli Britstein 			continue;
2789d79b6df6SOr Gerlitz 
279088f30bbcSDmytro Linkin 		if (f->field_bsize == 32) {
2791404402abSSebastian Hense 			mask_be32 = (__be32)mask;
27922b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
279388f30bbcSDmytro Linkin 		} else if (f->field_bsize == 16) {
2794404402abSSebastian Hense 			mask_be32 = (__be32)mask;
2795404402abSSebastian Hense 			mask_be16 = *(__be16 *)&mask_be32;
27962b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
27972b64bebaSOr Gerlitz 		}
27982b64bebaSOr Gerlitz 
279988f30bbcSDmytro Linkin 		first = find_first_bit(&mask, f->field_bsize);
280088f30bbcSDmytro Linkin 		next_z = find_next_zero_bit(&mask, f->field_bsize, first);
280188f30bbcSDmytro Linkin 		last  = find_last_bit(&mask, f->field_bsize);
28022b64bebaSOr Gerlitz 		if (first < next_z && next_z < last) {
2803e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2804e98bedf5SEli Britstein 					   "rewrite of few sub-fields isn't supported");
28052b64bebaSOr Gerlitz 			printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2806d79b6df6SOr Gerlitz 			       mask);
2807d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2808d79b6df6SOr Gerlitz 		}
2809d79b6df6SOr Gerlitz 
28106ae4a6a5SPaul Blakey 		err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
28116ae4a6a5SPaul Blakey 		if (err) {
28126ae4a6a5SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
28136ae4a6a5SPaul Blakey 					   "too many pedit actions, can't offload");
28146ae4a6a5SPaul Blakey 			mlx5_core_warn(priv->mdev,
28156ae4a6a5SPaul Blakey 				       "mlx5: parsed %d pedit actions, can't do more\n",
28166ae4a6a5SPaul Blakey 				       mod_acts->num_actions);
28176ae4a6a5SPaul Blakey 			return err;
28186ae4a6a5SPaul Blakey 		}
28196ae4a6a5SPaul Blakey 
28206ae4a6a5SPaul Blakey 		action = mod_acts->actions +
28216ae4a6a5SPaul Blakey 			 (mod_acts->num_actions * action_size);
2822d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, action_type, cmd);
2823d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, field, f->field);
2824d79b6df6SOr Gerlitz 
2825d79b6df6SOr Gerlitz 		if (cmd == MLX5_ACTION_TYPE_SET) {
282688f30bbcSDmytro Linkin 			int start;
282788f30bbcSDmytro Linkin 
282888f30bbcSDmytro Linkin 			/* if field is bit sized it can start not from first bit */
282988f30bbcSDmytro Linkin 			start = find_first_bit((unsigned long *)&f->field_mask,
283088f30bbcSDmytro Linkin 					       f->field_bsize);
283188f30bbcSDmytro Linkin 
283288f30bbcSDmytro Linkin 			MLX5_SET(set_action_in, action, offset, first - start);
2833d79b6df6SOr Gerlitz 			/* length is num of bits to be written, zero means length of 32 */
28342b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, length, (last - first + 1));
2835d79b6df6SOr Gerlitz 		}
2836d79b6df6SOr Gerlitz 
283788f30bbcSDmytro Linkin 		if (f->field_bsize == 32)
28382b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
283988f30bbcSDmytro Linkin 		else if (f->field_bsize == 16)
28402b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
284188f30bbcSDmytro Linkin 		else if (f->field_bsize == 8)
28422b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2843d79b6df6SOr Gerlitz 
28446ae4a6a5SPaul Blakey 		++mod_acts->num_actions;
2845d79b6df6SOr Gerlitz 	}
2846d79b6df6SOr Gerlitz 
2847d79b6df6SOr Gerlitz 	return 0;
2848d79b6df6SOr Gerlitz }
2849d79b6df6SOr Gerlitz 
28502cc1cb1dSTonghao Zhang static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
28512cc1cb1dSTonghao Zhang 						  int namespace)
28522cc1cb1dSTonghao Zhang {
28532cc1cb1dSTonghao Zhang 	if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
28542cc1cb1dSTonghao Zhang 		return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
28552cc1cb1dSTonghao Zhang 	else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
28562cc1cb1dSTonghao Zhang 		return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
28572cc1cb1dSTonghao Zhang }
28582cc1cb1dSTonghao Zhang 
28596ae4a6a5SPaul Blakey int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2860c500c86bSPablo Neira Ayuso 			  int namespace,
28616ae4a6a5SPaul Blakey 			  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2862d79b6df6SOr Gerlitz {
28636ae4a6a5SPaul Blakey 	int action_size, new_num_actions, max_hw_actions;
28646ae4a6a5SPaul Blakey 	size_t new_sz, old_sz;
28656ae4a6a5SPaul Blakey 	void *ret;
2866d79b6df6SOr Gerlitz 
28676ae4a6a5SPaul Blakey 	if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
28686ae4a6a5SPaul Blakey 		return 0;
28696ae4a6a5SPaul Blakey 
2870d65dbedfSHuy Nguyen 	action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2871d79b6df6SOr Gerlitz 
28726ae4a6a5SPaul Blakey 	max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
28736ae4a6a5SPaul Blakey 								namespace);
28746ae4a6a5SPaul Blakey 	new_num_actions = min(max_hw_actions,
28756ae4a6a5SPaul Blakey 			      mod_hdr_acts->actions ?
28766ae4a6a5SPaul Blakey 			      mod_hdr_acts->max_actions * 2 : 1);
28776ae4a6a5SPaul Blakey 	if (mod_hdr_acts->max_actions == new_num_actions)
28786ae4a6a5SPaul Blakey 		return -ENOSPC;
2879d79b6df6SOr Gerlitz 
28806ae4a6a5SPaul Blakey 	new_sz = action_size * new_num_actions;
28816ae4a6a5SPaul Blakey 	old_sz = mod_hdr_acts->max_actions * action_size;
28826ae4a6a5SPaul Blakey 	ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
28836ae4a6a5SPaul Blakey 	if (!ret)
2884d79b6df6SOr Gerlitz 		return -ENOMEM;
2885d79b6df6SOr Gerlitz 
28866ae4a6a5SPaul Blakey 	memset(ret + old_sz, 0, new_sz - old_sz);
28876ae4a6a5SPaul Blakey 	mod_hdr_acts->actions = ret;
28886ae4a6a5SPaul Blakey 	mod_hdr_acts->max_actions = new_num_actions;
28896ae4a6a5SPaul Blakey 
2890d79b6df6SOr Gerlitz 	return 0;
2891d79b6df6SOr Gerlitz }
2892d79b6df6SOr Gerlitz 
28936ae4a6a5SPaul Blakey void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
28946ae4a6a5SPaul Blakey {
28956ae4a6a5SPaul Blakey 	kfree(mod_hdr_acts->actions);
28966ae4a6a5SPaul Blakey 	mod_hdr_acts->actions = NULL;
28976ae4a6a5SPaul Blakey 	mod_hdr_acts->num_actions = 0;
28986ae4a6a5SPaul Blakey 	mod_hdr_acts->max_actions = 0;
28996ae4a6a5SPaul Blakey }
29006ae4a6a5SPaul Blakey 
2901d79b6df6SOr Gerlitz static const struct pedit_headers zero_masks = {};
2902d79b6df6SOr Gerlitz 
2903d79b6df6SOr Gerlitz static int parse_tc_pedit_action(struct mlx5e_priv *priv,
290473867881SPablo Neira Ayuso 				 const struct flow_action_entry *act, int namespace,
2905c500c86bSPablo Neira Ayuso 				 struct pedit_headers_action *hdrs,
2906e98bedf5SEli Britstein 				 struct netlink_ext_ack *extack)
2907d79b6df6SOr Gerlitz {
290873867881SPablo Neira Ayuso 	u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
290973867881SPablo Neira Ayuso 	int err = -EOPNOTSUPP;
2910d79b6df6SOr Gerlitz 	u32 mask, val, offset;
291173867881SPablo Neira Ayuso 	u8 htype;
2912d79b6df6SOr Gerlitz 
291373867881SPablo Neira Ayuso 	htype = act->mangle.htype;
2914d79b6df6SOr Gerlitz 	err = -EOPNOTSUPP; /* can't be all optimistic */
2915d79b6df6SOr Gerlitz 
291673867881SPablo Neira Ayuso 	if (htype == FLOW_ACT_MANGLE_UNSPEC) {
291773867881SPablo Neira Ayuso 		NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2918d79b6df6SOr Gerlitz 		goto out_err;
2919d79b6df6SOr Gerlitz 	}
2920d79b6df6SOr Gerlitz 
29212cc1cb1dSTonghao Zhang 	if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
29222cc1cb1dSTonghao Zhang 		NL_SET_ERR_MSG_MOD(extack,
29232cc1cb1dSTonghao Zhang 				   "The pedit offload action is not supported");
29242cc1cb1dSTonghao Zhang 		goto out_err;
29252cc1cb1dSTonghao Zhang 	}
29262cc1cb1dSTonghao Zhang 
292773867881SPablo Neira Ayuso 	mask = act->mangle.mask;
292873867881SPablo Neira Ayuso 	val = act->mangle.val;
292973867881SPablo Neira Ayuso 	offset = act->mangle.offset;
2930d79b6df6SOr Gerlitz 
2931c500c86bSPablo Neira Ayuso 	err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2932d79b6df6SOr Gerlitz 	if (err)
2933d79b6df6SOr Gerlitz 		goto out_err;
2934c500c86bSPablo Neira Ayuso 
2935c500c86bSPablo Neira Ayuso 	hdrs[cmd].pedits++;
2936d79b6df6SOr Gerlitz 
2937c500c86bSPablo Neira Ayuso 	return 0;
2938c500c86bSPablo Neira Ayuso out_err:
2939c500c86bSPablo Neira Ayuso 	return err;
2940c500c86bSPablo Neira Ayuso }
2941c500c86bSPablo Neira Ayuso 
2942c500c86bSPablo Neira Ayuso static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2943c500c86bSPablo Neira Ayuso 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
2944c500c86bSPablo Neira Ayuso 				 struct pedit_headers_action *hdrs,
294527c11b6bSEli Britstein 				 u32 *action_flags,
2946c500c86bSPablo Neira Ayuso 				 struct netlink_ext_ack *extack)
2947c500c86bSPablo Neira Ayuso {
2948c500c86bSPablo Neira Ayuso 	struct pedit_headers *cmd_masks;
2949c500c86bSPablo Neira Ayuso 	int err;
2950c500c86bSPablo Neira Ayuso 	u8 cmd;
2951c500c86bSPablo Neira Ayuso 
29526ae4a6a5SPaul Blakey 	err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
29536ae4a6a5SPaul Blakey 				   action_flags, extack);
2954d79b6df6SOr Gerlitz 	if (err < 0)
2955d79b6df6SOr Gerlitz 		goto out_dealloc_parsed_actions;
2956d79b6df6SOr Gerlitz 
2957d79b6df6SOr Gerlitz 	for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2958c500c86bSPablo Neira Ayuso 		cmd_masks = &hdrs[cmd].masks;
2959d79b6df6SOr Gerlitz 		if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2960e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2961e98bedf5SEli Britstein 					   "attempt to offload an unsupported field");
2962b3a433deSOr Gerlitz 			netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2963d79b6df6SOr Gerlitz 			print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2964d79b6df6SOr Gerlitz 				       16, 1, cmd_masks, sizeof(zero_masks), true);
2965d79b6df6SOr Gerlitz 			err = -EOPNOTSUPP;
2966d79b6df6SOr Gerlitz 			goto out_dealloc_parsed_actions;
2967d79b6df6SOr Gerlitz 		}
2968d79b6df6SOr Gerlitz 	}
2969d79b6df6SOr Gerlitz 
2970d79b6df6SOr Gerlitz 	return 0;
2971d79b6df6SOr Gerlitz 
2972d79b6df6SOr Gerlitz out_dealloc_parsed_actions:
29736ae4a6a5SPaul Blakey 	dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
2974d79b6df6SOr Gerlitz 	return err;
2975d79b6df6SOr Gerlitz }
2976d79b6df6SOr Gerlitz 
2977e98bedf5SEli Britstein static bool csum_offload_supported(struct mlx5e_priv *priv,
2978e98bedf5SEli Britstein 				   u32 action,
2979e98bedf5SEli Britstein 				   u32 update_flags,
2980e98bedf5SEli Britstein 				   struct netlink_ext_ack *extack)
298126c02749SOr Gerlitz {
298226c02749SOr Gerlitz 	u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
298326c02749SOr Gerlitz 			 TCA_CSUM_UPDATE_FLAG_UDP;
298426c02749SOr Gerlitz 
298526c02749SOr Gerlitz 	/*  The HW recalcs checksums only if re-writing headers */
298626c02749SOr Gerlitz 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2987e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2988e98bedf5SEli Britstein 				   "TC csum action is only offloaded with pedit");
298926c02749SOr Gerlitz 		netdev_warn(priv->netdev,
299026c02749SOr Gerlitz 			    "TC csum action is only offloaded with pedit\n");
299126c02749SOr Gerlitz 		return false;
299226c02749SOr Gerlitz 	}
299326c02749SOr Gerlitz 
299426c02749SOr Gerlitz 	if (update_flags & ~prot_flags) {
2995e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2996e98bedf5SEli Britstein 				   "can't offload TC csum action for some header/s");
299726c02749SOr Gerlitz 		netdev_warn(priv->netdev,
299826c02749SOr Gerlitz 			    "can't offload TC csum action for some header/s - flags %#x\n",
299926c02749SOr Gerlitz 			    update_flags);
300026c02749SOr Gerlitz 		return false;
300126c02749SOr Gerlitz 	}
300226c02749SOr Gerlitz 
300326c02749SOr Gerlitz 	return true;
300426c02749SOr Gerlitz }
300526c02749SOr Gerlitz 
30068998576bSDmytro Linkin struct ip_ttl_word {
30078998576bSDmytro Linkin 	__u8	ttl;
30088998576bSDmytro Linkin 	__u8	protocol;
30098998576bSDmytro Linkin 	__sum16	check;
30108998576bSDmytro Linkin };
30118998576bSDmytro Linkin 
30128998576bSDmytro Linkin struct ipv6_hoplimit_word {
30138998576bSDmytro Linkin 	__be16	payload_len;
30148998576bSDmytro Linkin 	__u8	nexthdr;
30158998576bSDmytro Linkin 	__u8	hop_limit;
30168998576bSDmytro Linkin };
30178998576bSDmytro Linkin 
30184c3844d9SPaul Blakey static int is_action_keys_supported(const struct flow_action_entry *act,
30194c3844d9SPaul Blakey 				    bool ct_flow, bool *modify_ip_header,
30204c3844d9SPaul Blakey 				    struct netlink_ext_ack *extack)
30218998576bSDmytro Linkin {
30228998576bSDmytro Linkin 	u32 mask, offset;
30238998576bSDmytro Linkin 	u8 htype;
30248998576bSDmytro Linkin 
30258998576bSDmytro Linkin 	htype = act->mangle.htype;
30268998576bSDmytro Linkin 	offset = act->mangle.offset;
30278998576bSDmytro Linkin 	mask = ~act->mangle.mask;
30288998576bSDmytro Linkin 	/* For IPv4 & IPv6 header check 4 byte word,
30298998576bSDmytro Linkin 	 * to determine that modified fields
30308998576bSDmytro Linkin 	 * are NOT ttl & hop_limit only.
30318998576bSDmytro Linkin 	 */
30328998576bSDmytro Linkin 	if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
30338998576bSDmytro Linkin 		struct ip_ttl_word *ttl_word =
30348998576bSDmytro Linkin 			(struct ip_ttl_word *)&mask;
30358998576bSDmytro Linkin 
30368998576bSDmytro Linkin 		if (offset != offsetof(struct iphdr, ttl) ||
30378998576bSDmytro Linkin 		    ttl_word->protocol ||
30388998576bSDmytro Linkin 		    ttl_word->check) {
30394c3844d9SPaul Blakey 			*modify_ip_header = true;
30404c3844d9SPaul Blakey 		}
30414c3844d9SPaul Blakey 
30424c3844d9SPaul Blakey 		if (ct_flow && offset >= offsetof(struct iphdr, saddr)) {
30434c3844d9SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
30444c3844d9SPaul Blakey 					   "can't offload re-write of ipv4 address with action ct");
30454c3844d9SPaul Blakey 			return -EOPNOTSUPP;
30468998576bSDmytro Linkin 		}
30478998576bSDmytro Linkin 	} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
30488998576bSDmytro Linkin 		struct ipv6_hoplimit_word *hoplimit_word =
30498998576bSDmytro Linkin 			(struct ipv6_hoplimit_word *)&mask;
30508998576bSDmytro Linkin 
30518998576bSDmytro Linkin 		if (offset != offsetof(struct ipv6hdr, payload_len) ||
30528998576bSDmytro Linkin 		    hoplimit_word->payload_len ||
30538998576bSDmytro Linkin 		    hoplimit_word->nexthdr) {
30544c3844d9SPaul Blakey 			*modify_ip_header = true;
30558998576bSDmytro Linkin 		}
30564c3844d9SPaul Blakey 
30574c3844d9SPaul Blakey 		if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) {
30584c3844d9SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
30594c3844d9SPaul Blakey 					   "can't offload re-write of ipv6 address with action ct");
30604c3844d9SPaul Blakey 			return -EOPNOTSUPP;
30618998576bSDmytro Linkin 		}
30624c3844d9SPaul Blakey 	} else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
30634c3844d9SPaul Blakey 			       htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) {
30644c3844d9SPaul Blakey 		NL_SET_ERR_MSG_MOD(extack,
30654c3844d9SPaul Blakey 				   "can't offload re-write of transport header ports with action ct");
30664c3844d9SPaul Blakey 		return -EOPNOTSUPP;
30674c3844d9SPaul Blakey 	}
30684c3844d9SPaul Blakey 
30694c3844d9SPaul Blakey 	return 0;
30708998576bSDmytro Linkin }
30718998576bSDmytro Linkin 
3072bdd66ac0SOr Gerlitz static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
307373867881SPablo Neira Ayuso 					  struct flow_action *flow_action,
30744c3844d9SPaul Blakey 					  u32 actions, bool ct_flow,
3075e98bedf5SEli Britstein 					  struct netlink_ext_ack *extack)
3076bdd66ac0SOr Gerlitz {
307773867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
3078bdd66ac0SOr Gerlitz 	bool modify_ip_header;
3079bdd66ac0SOr Gerlitz 	void *headers_v;
3080bdd66ac0SOr Gerlitz 	u16 ethertype;
30818998576bSDmytro Linkin 	u8 ip_proto;
30824c3844d9SPaul Blakey 	int i, err;
3083bdd66ac0SOr Gerlitz 
30848377629eSEli Britstein 	headers_v = get_match_headers_value(actions, spec);
3085bdd66ac0SOr Gerlitz 	ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3086bdd66ac0SOr Gerlitz 
3087bdd66ac0SOr Gerlitz 	/* for non-IP we only re-write MACs, so we're okay */
3088bdd66ac0SOr Gerlitz 	if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3089bdd66ac0SOr Gerlitz 		goto out_ok;
3090bdd66ac0SOr Gerlitz 
3091bdd66ac0SOr Gerlitz 	modify_ip_header = false;
309273867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
309373867881SPablo Neira Ayuso 		if (act->id != FLOW_ACTION_MANGLE &&
309473867881SPablo Neira Ayuso 		    act->id != FLOW_ACTION_ADD)
3095bdd66ac0SOr Gerlitz 			continue;
3096bdd66ac0SOr Gerlitz 
30974c3844d9SPaul Blakey 		err = is_action_keys_supported(act, ct_flow,
30984c3844d9SPaul Blakey 					       &modify_ip_header, extack);
30994c3844d9SPaul Blakey 		if (err)
31004c3844d9SPaul Blakey 			return err;
3101bdd66ac0SOr Gerlitz 	}
3102bdd66ac0SOr Gerlitz 
3103bdd66ac0SOr Gerlitz 	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
31041ccef350SJianbo Liu 	if (modify_ip_header && ip_proto != IPPROTO_TCP &&
31051ccef350SJianbo Liu 	    ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3106e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
3107e98bedf5SEli Britstein 				   "can't offload re-write of non TCP/UDP");
3108bdd66ac0SOr Gerlitz 		pr_info("can't offload re-write of ip proto %d\n", ip_proto);
3109bdd66ac0SOr Gerlitz 		return false;
3110bdd66ac0SOr Gerlitz 	}
3111bdd66ac0SOr Gerlitz 
3112bdd66ac0SOr Gerlitz out_ok:
3113bdd66ac0SOr Gerlitz 	return true;
3114bdd66ac0SOr Gerlitz }
3115bdd66ac0SOr Gerlitz 
3116bdd66ac0SOr Gerlitz static bool actions_match_supported(struct mlx5e_priv *priv,
311773867881SPablo Neira Ayuso 				    struct flow_action *flow_action,
3118bdd66ac0SOr Gerlitz 				    struct mlx5e_tc_flow_parse_attr *parse_attr,
3119e98bedf5SEli Britstein 				    struct mlx5e_tc_flow *flow,
3120e98bedf5SEli Britstein 				    struct netlink_ext_ack *extack)
3121bdd66ac0SOr Gerlitz {
3122d0645b37SRoi Dayan 	bool ct_flow;
3123bdd66ac0SOr Gerlitz 	u32 actions;
3124bdd66ac0SOr Gerlitz 
31254c3844d9SPaul Blakey 	ct_flow = flow_flag_test(flow, CT);
31264c3844d9SPaul Blakey 	if (mlx5e_is_eswitch_flow(flow)) {
3127bdd66ac0SOr Gerlitz 		actions = flow->esw_attr->action;
31284c3844d9SPaul Blakey 
31294c3844d9SPaul Blakey 		if (flow->esw_attr->split_count && ct_flow) {
31304c3844d9SPaul Blakey 			/* All registers used by ct are cleared when using
31314c3844d9SPaul Blakey 			 * split rules.
31324c3844d9SPaul Blakey 			 */
31334c3844d9SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
31344c3844d9SPaul Blakey 					   "Can't offload mirroring with action ct");
313549397b80SDan Carpenter 			return false;
31364c3844d9SPaul Blakey 		}
31374c3844d9SPaul Blakey 	} else {
3138bdd66ac0SOr Gerlitz 		actions = flow->nic_attr->action;
31394c3844d9SPaul Blakey 	}
3140bdd66ac0SOr Gerlitz 
3141bdd66ac0SOr Gerlitz 	if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
314273867881SPablo Neira Ayuso 		return modify_header_match_supported(&parse_attr->spec,
3143a655fe9fSDavid S. Miller 						     flow_action, actions,
31444c3844d9SPaul Blakey 						     ct_flow, extack);
3145bdd66ac0SOr Gerlitz 
3146bdd66ac0SOr Gerlitz 	return true;
3147bdd66ac0SOr Gerlitz }
3148bdd66ac0SOr Gerlitz 
31495c65c564SOr Gerlitz static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
31505c65c564SOr Gerlitz {
31515c65c564SOr Gerlitz 	struct mlx5_core_dev *fmdev, *pmdev;
3152816f6706SOr Gerlitz 	u64 fsystem_guid, psystem_guid;
31535c65c564SOr Gerlitz 
31545c65c564SOr Gerlitz 	fmdev = priv->mdev;
31555c65c564SOr Gerlitz 	pmdev = peer_priv->mdev;
31565c65c564SOr Gerlitz 
315759c9d35eSAlaa Hleihel 	fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
315859c9d35eSAlaa Hleihel 	psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
31595c65c564SOr Gerlitz 
3160816f6706SOr Gerlitz 	return (fsystem_guid == psystem_guid);
31615c65c564SOr Gerlitz }
31625c65c564SOr Gerlitz 
3163bdc837eeSEli Britstein static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3164bdc837eeSEli Britstein 				   const struct flow_action_entry *act,
3165bdc837eeSEli Britstein 				   struct mlx5e_tc_flow_parse_attr *parse_attr,
3166bdc837eeSEli Britstein 				   struct pedit_headers_action *hdrs,
3167bdc837eeSEli Britstein 				   u32 *action, struct netlink_ext_ack *extack)
3168bdc837eeSEli Britstein {
3169bdc837eeSEli Britstein 	u16 mask16 = VLAN_VID_MASK;
3170bdc837eeSEli Britstein 	u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3171bdc837eeSEli Britstein 	const struct flow_action_entry pedit_act = {
3172bdc837eeSEli Britstein 		.id = FLOW_ACTION_MANGLE,
3173bdc837eeSEli Britstein 		.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3174bdc837eeSEli Britstein 		.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3175bdc837eeSEli Britstein 		.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3176bdc837eeSEli Britstein 		.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3177bdc837eeSEli Britstein 	};
31786fca9d1eSEli Britstein 	u8 match_prio_mask, match_prio_val;
3179bf2f3bcaSEli Britstein 	void *headers_c, *headers_v;
3180bdc837eeSEli Britstein 	int err;
3181bdc837eeSEli Britstein 
3182bf2f3bcaSEli Britstein 	headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3183bf2f3bcaSEli Britstein 	headers_v = get_match_headers_value(*action, &parse_attr->spec);
3184bf2f3bcaSEli Britstein 
3185bf2f3bcaSEli Britstein 	if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3186bf2f3bcaSEli Britstein 	      MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3187bf2f3bcaSEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
3188bf2f3bcaSEli Britstein 				   "VLAN rewrite action must have VLAN protocol match");
3189bf2f3bcaSEli Britstein 		return -EOPNOTSUPP;
3190bf2f3bcaSEli Britstein 	}
3191bf2f3bcaSEli Britstein 
31926fca9d1eSEli Britstein 	match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
31936fca9d1eSEli Britstein 	match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
31946fca9d1eSEli Britstein 	if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
31956fca9d1eSEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
31966fca9d1eSEli Britstein 				   "Changing VLAN prio is not supported");
3197bdc837eeSEli Britstein 		return -EOPNOTSUPP;
3198bdc837eeSEli Britstein 	}
3199bdc837eeSEli Britstein 
3200dec481c8SEli Cohen 	err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL);
3201bdc837eeSEli Britstein 	*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3202bdc837eeSEli Britstein 
3203bdc837eeSEli Britstein 	return err;
3204bdc837eeSEli Britstein }
3205bdc837eeSEli Britstein 
32060bac1194SEli Britstein static int
32070bac1194SEli Britstein add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
32080bac1194SEli Britstein 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
32090bac1194SEli Britstein 				 struct pedit_headers_action *hdrs,
32100bac1194SEli Britstein 				 u32 *action, struct netlink_ext_ack *extack)
32110bac1194SEli Britstein {
32120bac1194SEli Britstein 	const struct flow_action_entry prio_tag_act = {
32130bac1194SEli Britstein 		.vlan.vid = 0,
32140bac1194SEli Britstein 		.vlan.prio =
32150bac1194SEli Britstein 			MLX5_GET(fte_match_set_lyr_2_4,
32160bac1194SEli Britstein 				 get_match_headers_value(*action,
32170bac1194SEli Britstein 							 &parse_attr->spec),
32180bac1194SEli Britstein 				 first_prio) &
32190bac1194SEli Britstein 			MLX5_GET(fte_match_set_lyr_2_4,
32200bac1194SEli Britstein 				 get_match_headers_criteria(*action,
32210bac1194SEli Britstein 							    &parse_attr->spec),
32220bac1194SEli Britstein 				 first_prio),
32230bac1194SEli Britstein 	};
32240bac1194SEli Britstein 
32250bac1194SEli Britstein 	return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
32260bac1194SEli Britstein 				       &prio_tag_act, parse_attr, hdrs, action,
32270bac1194SEli Britstein 				       extack);
32280bac1194SEli Britstein }
32290bac1194SEli Britstein 
323073867881SPablo Neira Ayuso static int parse_tc_nic_actions(struct mlx5e_priv *priv,
323173867881SPablo Neira Ayuso 				struct flow_action *flow_action,
3232aa0cbbaeSOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr,
3233e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
3234e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
3235e3a2b7edSAmir Vadai {
3236aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
323773867881SPablo Neira Ayuso 	struct pedit_headers_action hdrs[2] = {};
323873867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
32391cab1cd7SOr Gerlitz 	u32 action = 0;
3240244cd96aSCong Wang 	int err, i;
3241e3a2b7edSAmir Vadai 
324273867881SPablo Neira Ayuso 	if (!flow_action_has_entries(flow_action))
3243e3a2b7edSAmir Vadai 		return -EINVAL;
3244e3a2b7edSAmir Vadai 
324553eca1f3SJakub Kicinski 	if (!flow_action_hw_stats_check(flow_action, extack,
324653eca1f3SJakub Kicinski 					FLOW_ACTION_HW_STATS_DELAYED_BIT))
3247319a1d19SJiri Pirko 		return -EOPNOTSUPP;
3248319a1d19SJiri Pirko 
32493bc4b7bfSOr Gerlitz 	attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3250e3a2b7edSAmir Vadai 
325173867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
325273867881SPablo Neira Ayuso 		switch (act->id) {
325315fc92ecSTonghao Zhang 		case FLOW_ACTION_ACCEPT:
325415fc92ecSTonghao Zhang 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
325515fc92ecSTonghao Zhang 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
325615fc92ecSTonghao Zhang 			break;
325773867881SPablo Neira Ayuso 		case FLOW_ACTION_DROP:
32581cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3259aad7e08dSAmir Vadai 			if (MLX5_CAP_FLOWTABLE(priv->mdev,
3260aad7e08dSAmir Vadai 					       flow_table_properties_nic_receive.flow_counter))
32611cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
326273867881SPablo Neira Ayuso 			break;
326373867881SPablo Neira Ayuso 		case FLOW_ACTION_MANGLE:
326473867881SPablo Neira Ayuso 		case FLOW_ACTION_ADD:
326573867881SPablo Neira Ayuso 			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3266dec481c8SEli Cohen 						    hdrs, extack);
32672f4fe4caSOr Gerlitz 			if (err)
32682f4fe4caSOr Gerlitz 				return err;
32692f4fe4caSOr Gerlitz 
32701cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
32712f4fe4caSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
327273867881SPablo Neira Ayuso 			break;
3273bdc837eeSEli Britstein 		case FLOW_ACTION_VLAN_MANGLE:
3274bdc837eeSEli Britstein 			err = add_vlan_rewrite_action(priv,
3275bdc837eeSEli Britstein 						      MLX5_FLOW_NAMESPACE_KERNEL,
3276bdc837eeSEli Britstein 						      act, parse_attr, hdrs,
3277bdc837eeSEli Britstein 						      &action, extack);
3278bdc837eeSEli Britstein 			if (err)
3279bdc837eeSEli Britstein 				return err;
3280bdc837eeSEli Britstein 
3281bdc837eeSEli Britstein 			break;
328273867881SPablo Neira Ayuso 		case FLOW_ACTION_CSUM:
32831cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
328473867881SPablo Neira Ayuso 						   act->csum_flags,
3285e98bedf5SEli Britstein 						   extack))
328673867881SPablo Neira Ayuso 				break;
328726c02749SOr Gerlitz 
328826c02749SOr Gerlitz 			return -EOPNOTSUPP;
328973867881SPablo Neira Ayuso 		case FLOW_ACTION_REDIRECT: {
329073867881SPablo Neira Ayuso 			struct net_device *peer_dev = act->dev;
32915c65c564SOr Gerlitz 
32925c65c564SOr Gerlitz 			if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
32935c65c564SOr Gerlitz 			    same_hw_devs(priv, netdev_priv(peer_dev))) {
329498b66cb1SEli Britstein 				parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3295226f2ca3SVlad Buslov 				flow_flag_set(flow, HAIRPIN);
32961cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
32975c65c564SOr Gerlitz 					  MLX5_FLOW_CONTEXT_ACTION_COUNT;
32985c65c564SOr Gerlitz 			} else {
3299e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3300e98bedf5SEli Britstein 						   "device is not on same HW, can't offload");
33015c65c564SOr Gerlitz 				netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
33025c65c564SOr Gerlitz 					    peer_dev->name);
33035c65c564SOr Gerlitz 				return -EINVAL;
33045c65c564SOr Gerlitz 			}
33055c65c564SOr Gerlitz 			}
330673867881SPablo Neira Ayuso 			break;
330773867881SPablo Neira Ayuso 		case FLOW_ACTION_MARK: {
330873867881SPablo Neira Ayuso 			u32 mark = act->mark;
3309e3a2b7edSAmir Vadai 
3310e3a2b7edSAmir Vadai 			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3311e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3312e98bedf5SEli Britstein 						   "Bad flow mark - only 16 bit is supported");
3313e3a2b7edSAmir Vadai 				return -EINVAL;
3314e3a2b7edSAmir Vadai 			}
3315e3a2b7edSAmir Vadai 
33163bc4b7bfSOr Gerlitz 			attr->flow_tag = mark;
33171cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3318e3a2b7edSAmir Vadai 			}
331973867881SPablo Neira Ayuso 			break;
332073867881SPablo Neira Ayuso 		default:
33212cc1cb1dSTonghao Zhang 			NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
33222cc1cb1dSTonghao Zhang 			return -EOPNOTSUPP;
3323e3a2b7edSAmir Vadai 		}
332473867881SPablo Neira Ayuso 	}
3325e3a2b7edSAmir Vadai 
3326c500c86bSPablo Neira Ayuso 	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3327c500c86bSPablo Neira Ayuso 	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3328c500c86bSPablo Neira Ayuso 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
332927c11b6bSEli Britstein 					    parse_attr, hdrs, &action, extack);
3330c500c86bSPablo Neira Ayuso 		if (err)
3331c500c86bSPablo Neira Ayuso 			return err;
333227c11b6bSEli Britstein 		/* in case all pedit actions are skipped, remove the MOD_HDR
333327c11b6bSEli Britstein 		 * flag.
333427c11b6bSEli Britstein 		 */
33356ae4a6a5SPaul Blakey 		if (parse_attr->mod_hdr_acts.num_actions == 0) {
333627c11b6bSEli Britstein 			action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
33376ae4a6a5SPaul Blakey 			dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3338e7739a60SEli Britstein 		}
3339c500c86bSPablo Neira Ayuso 	}
3340c500c86bSPablo Neira Ayuso 
33411cab1cd7SOr Gerlitz 	attr->action = action;
334273867881SPablo Neira Ayuso 	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3343bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
3344bdd66ac0SOr Gerlitz 
3345e3a2b7edSAmir Vadai 	return 0;
3346e3a2b7edSAmir Vadai }
3347e3a2b7edSAmir Vadai 
33487f1a546eSEli Britstein struct encap_key {
33491f6da306SYevgeny Kliteynik 	const struct ip_tunnel_key *ip_tun_key;
3350d386939aSYevgeny Kliteynik 	struct mlx5e_tc_tunnel *tc_tunnel;
33517f1a546eSEli Britstein };
33527f1a546eSEli Britstein 
33537f1a546eSEli Britstein static inline int cmp_encap_info(struct encap_key *a,
33547f1a546eSEli Britstein 				 struct encap_key *b)
3355a54e20b4SHadar Hen Zion {
33567f1a546eSEli Britstein 	return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
3357d386939aSYevgeny Kliteynik 	       a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
3358a54e20b4SHadar Hen Zion }
3359a54e20b4SHadar Hen Zion 
336014e6b038SEli Cohen static inline int cmp_decap_info(struct mlx5e_decap_key *a,
336114e6b038SEli Cohen 				 struct mlx5e_decap_key *b)
336214e6b038SEli Cohen {
336314e6b038SEli Cohen 	return memcmp(&a->key, &b->key, sizeof(b->key));
336414e6b038SEli Cohen }
336514e6b038SEli Cohen 
33667f1a546eSEli Britstein static inline int hash_encap_info(struct encap_key *key)
3367a54e20b4SHadar Hen Zion {
33687f1a546eSEli Britstein 	return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
3369d386939aSYevgeny Kliteynik 		     key->tc_tunnel->tunnel_type);
3370a54e20b4SHadar Hen Zion }
3371a54e20b4SHadar Hen Zion 
337214e6b038SEli Cohen static inline int hash_decap_info(struct mlx5e_decap_key *key)
337314e6b038SEli Cohen {
337414e6b038SEli Cohen 	return jhash(&key->key, sizeof(key->key), 0);
337514e6b038SEli Cohen }
3376a54e20b4SHadar Hen Zion 
3377b1d90e6bSRabie Loulou static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
3378b1d90e6bSRabie Loulou 				  struct net_device *peer_netdev)
3379b1d90e6bSRabie Loulou {
3380b1d90e6bSRabie Loulou 	struct mlx5e_priv *peer_priv;
3381b1d90e6bSRabie Loulou 
3382b1d90e6bSRabie Loulou 	peer_priv = netdev_priv(peer_netdev);
3383b1d90e6bSRabie Loulou 
3384b1d90e6bSRabie Loulou 	return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
338568931c7dSRoi Dayan 		mlx5e_eswitch_rep(priv->netdev) &&
338668931c7dSRoi Dayan 		mlx5e_eswitch_rep(peer_netdev) &&
338768931c7dSRoi Dayan 		same_hw_devs(priv, peer_priv));
3388b1d90e6bSRabie Loulou }
3389b1d90e6bSRabie Loulou 
3390948993f2SVlad Buslov bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
3391948993f2SVlad Buslov {
3392948993f2SVlad Buslov 	return refcount_inc_not_zero(&e->refcnt);
3393948993f2SVlad Buslov }
3394948993f2SVlad Buslov 
339514e6b038SEli Cohen static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
339614e6b038SEli Cohen {
339714e6b038SEli Cohen 	return refcount_inc_not_zero(&e->refcnt);
339814e6b038SEli Cohen }
339914e6b038SEli Cohen 
3400948993f2SVlad Buslov static struct mlx5e_encap_entry *
3401948993f2SVlad Buslov mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
3402948993f2SVlad Buslov 		uintptr_t hash_key)
3403948993f2SVlad Buslov {
3404948993f2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3405948993f2SVlad Buslov 	struct mlx5e_encap_entry *e;
3406948993f2SVlad Buslov 	struct encap_key e_key;
3407948993f2SVlad Buslov 
3408948993f2SVlad Buslov 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
3409948993f2SVlad Buslov 				   encap_hlist, hash_key) {
3410948993f2SVlad Buslov 		e_key.ip_tun_key = &e->tun_info->key;
3411948993f2SVlad Buslov 		e_key.tc_tunnel = e->tunnel;
3412948993f2SVlad Buslov 		if (!cmp_encap_info(&e_key, key) &&
3413948993f2SVlad Buslov 		    mlx5e_encap_take(e))
3414948993f2SVlad Buslov 			return e;
3415948993f2SVlad Buslov 	}
3416948993f2SVlad Buslov 
3417948993f2SVlad Buslov 	return NULL;
3418948993f2SVlad Buslov }
3419948993f2SVlad Buslov 
342014e6b038SEli Cohen static struct mlx5e_decap_entry *
342114e6b038SEli Cohen mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
342214e6b038SEli Cohen 		uintptr_t hash_key)
342314e6b038SEli Cohen {
342414e6b038SEli Cohen 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
342514e6b038SEli Cohen 	struct mlx5e_decap_key r_key;
342614e6b038SEli Cohen 	struct mlx5e_decap_entry *e;
342714e6b038SEli Cohen 
342814e6b038SEli Cohen 	hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
342914e6b038SEli Cohen 				   hlist, hash_key) {
343014e6b038SEli Cohen 		r_key = e->key;
343114e6b038SEli Cohen 		if (!cmp_decap_info(&r_key, key) &&
343214e6b038SEli Cohen 		    mlx5e_decap_take(e))
343314e6b038SEli Cohen 			return e;
343414e6b038SEli Cohen 	}
343514e6b038SEli Cohen 	return NULL;
343614e6b038SEli Cohen }
343714e6b038SEli Cohen 
34382a4b6526SVlad Buslov static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
34392a4b6526SVlad Buslov {
34402a4b6526SVlad Buslov 	size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
34412a4b6526SVlad Buslov 
34422a4b6526SVlad Buslov 	return kmemdup(tun_info, tun_size, GFP_KERNEL);
34432a4b6526SVlad Buslov }
34442a4b6526SVlad Buslov 
3445554fe75cSDmytro Linkin static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
3446554fe75cSDmytro Linkin 				      struct mlx5e_tc_flow *flow,
3447554fe75cSDmytro Linkin 				      int out_index,
3448554fe75cSDmytro Linkin 				      struct mlx5e_encap_entry *e,
3449554fe75cSDmytro Linkin 				      struct netlink_ext_ack *extack)
3450554fe75cSDmytro Linkin {
3451554fe75cSDmytro Linkin 	int i;
3452554fe75cSDmytro Linkin 
3453554fe75cSDmytro Linkin 	for (i = 0; i < out_index; i++) {
3454554fe75cSDmytro Linkin 		if (flow->encaps[i].e != e)
3455554fe75cSDmytro Linkin 			continue;
3456554fe75cSDmytro Linkin 		NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
3457554fe75cSDmytro Linkin 		netdev_err(priv->netdev, "can't duplicate encap action\n");
3458554fe75cSDmytro Linkin 		return true;
3459554fe75cSDmytro Linkin 	}
3460554fe75cSDmytro Linkin 
3461554fe75cSDmytro Linkin 	return false;
3462554fe75cSDmytro Linkin }
3463554fe75cSDmytro Linkin 
3464a54e20b4SHadar Hen Zion static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3465e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
3466733d4f36SRoi Dayan 			      struct net_device *mirred_dev,
3467733d4f36SRoi Dayan 			      int out_index,
34688c4dc42bSEli Britstein 			      struct netlink_ext_ack *extack,
34690ad060eeSRoi Dayan 			      struct net_device **encap_dev,
34700ad060eeSRoi Dayan 			      bool *encap_valid)
347103a9d11eSOr Gerlitz {
3472a54e20b4SHadar Hen Zion 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
347345247bf2SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3474733d4f36SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
34751f6da306SYevgeny Kliteynik 	const struct ip_tunnel_info *tun_info;
3476948993f2SVlad Buslov 	struct encap_key key;
3477c1ae1152SOr Gerlitz 	struct mlx5e_encap_entry *e;
3478733d4f36SRoi Dayan 	unsigned short family;
3479a54e20b4SHadar Hen Zion 	uintptr_t hash_key;
348054c177caSOz Shlomo 	int err = 0;
3481a54e20b4SHadar Hen Zion 
3482733d4f36SRoi Dayan 	parse_attr = attr->parse_attr;
34831f6da306SYevgeny Kliteynik 	tun_info = parse_attr->tun_info[out_index];
3484733d4f36SRoi Dayan 	family = ip_tunnel_info_af(tun_info);
34857f1a546eSEli Britstein 	key.ip_tun_key = &tun_info->key;
3486d386939aSYevgeny Kliteynik 	key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
3487d71f895cSEli Cohen 	if (!key.tc_tunnel) {
3488d71f895cSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
3489d71f895cSEli Cohen 		return -EOPNOTSUPP;
3490d71f895cSEli Cohen 	}
3491733d4f36SRoi Dayan 
34927f1a546eSEli Britstein 	hash_key = hash_encap_info(&key);
3493a54e20b4SHadar Hen Zion 
349461086f39SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
3495948993f2SVlad Buslov 	e = mlx5e_encap_get(priv, &key, hash_key);
3496a54e20b4SHadar Hen Zion 
3497b2812089SVlad Buslov 	/* must verify if encap is valid or not */
3498d589e785SVlad Buslov 	if (e) {
3499554fe75cSDmytro Linkin 		/* Check that entry was not already attached to this flow */
3500554fe75cSDmytro Linkin 		if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
3501554fe75cSDmytro Linkin 			err = -EOPNOTSUPP;
3502554fe75cSDmytro Linkin 			goto out_err;
3503554fe75cSDmytro Linkin 		}
3504554fe75cSDmytro Linkin 
3505d589e785SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
3506d589e785SVlad Buslov 		wait_for_completion(&e->res_ready);
3507d589e785SVlad Buslov 
3508d589e785SVlad Buslov 		/* Protect against concurrent neigh update. */
3509d589e785SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
35103c140dd5SVlad Buslov 		if (e->compl_result < 0) {
3511d589e785SVlad Buslov 			err = -EREMOTEIO;
3512d589e785SVlad Buslov 			goto out_err;
3513d589e785SVlad Buslov 		}
351445247bf2SOr Gerlitz 		goto attach_flow;
3515d589e785SVlad Buslov 	}
3516a54e20b4SHadar Hen Zion 
3517a54e20b4SHadar Hen Zion 	e = kzalloc(sizeof(*e), GFP_KERNEL);
351861086f39SVlad Buslov 	if (!e) {
351961086f39SVlad Buslov 		err = -ENOMEM;
352061086f39SVlad Buslov 		goto out_err;
352161086f39SVlad Buslov 	}
3522a54e20b4SHadar Hen Zion 
3523948993f2SVlad Buslov 	refcount_set(&e->refcnt, 1);
3524d589e785SVlad Buslov 	init_completion(&e->res_ready);
3525d589e785SVlad Buslov 
35262a4b6526SVlad Buslov 	tun_info = dup_tun_info(tun_info);
35272a4b6526SVlad Buslov 	if (!tun_info) {
35282a4b6526SVlad Buslov 		err = -ENOMEM;
35292a4b6526SVlad Buslov 		goto out_err_init;
35302a4b6526SVlad Buslov 	}
35311f6da306SYevgeny Kliteynik 	e->tun_info = tun_info;
3532101f4de9SOz Shlomo 	err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
35332a4b6526SVlad Buslov 	if (err)
35342a4b6526SVlad Buslov 		goto out_err_init;
353554c177caSOz Shlomo 
3536a54e20b4SHadar Hen Zion 	INIT_LIST_HEAD(&e->flows);
3537d589e785SVlad Buslov 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3538d589e785SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
3539a54e20b4SHadar Hen Zion 
3540ce99f6b9SOr Gerlitz 	if (family == AF_INET)
3541101f4de9SOz Shlomo 		err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3542ce99f6b9SOr Gerlitz 	else if (family == AF_INET6)
3543101f4de9SOz Shlomo 		err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3544ce99f6b9SOr Gerlitz 
3545d589e785SVlad Buslov 	/* Protect against concurrent neigh update. */
3546d589e785SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
3547d589e785SVlad Buslov 	complete_all(&e->res_ready);
3548d589e785SVlad Buslov 	if (err) {
3549d589e785SVlad Buslov 		e->compl_result = err;
3550a54e20b4SHadar Hen Zion 		goto out_err;
3551d589e785SVlad Buslov 	}
35523c140dd5SVlad Buslov 	e->compl_result = 1;
3553a54e20b4SHadar Hen Zion 
355445247bf2SOr Gerlitz attach_flow:
3555948993f2SVlad Buslov 	flow->encaps[out_index].e = e;
35568c4dc42bSEli Britstein 	list_add(&flow->encaps[out_index].list, &e->flows);
35578c4dc42bSEli Britstein 	flow->encaps[out_index].index = out_index;
355845247bf2SOr Gerlitz 	*encap_dev = e->out_dev;
35598c4dc42bSEli Britstein 	if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
35602b688ea5SMaor Gottlieb 		attr->dests[out_index].pkt_reformat = e->pkt_reformat;
35618c4dc42bSEli Britstein 		attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
35620ad060eeSRoi Dayan 		*encap_valid = true;
35638c4dc42bSEli Britstein 	} else {
35640ad060eeSRoi Dayan 		*encap_valid = false;
35658c4dc42bSEli Britstein 	}
356661086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
356745247bf2SOr Gerlitz 
3568232c0013SHadar Hen Zion 	return err;
3569a54e20b4SHadar Hen Zion 
3570a54e20b4SHadar Hen Zion out_err:
357161086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
3572d589e785SVlad Buslov 	if (e)
3573d589e785SVlad Buslov 		mlx5e_encap_put(priv, e);
3574a54e20b4SHadar Hen Zion 	return err;
35752a4b6526SVlad Buslov 
35762a4b6526SVlad Buslov out_err_init:
35772a4b6526SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
35782a4b6526SVlad Buslov 	kfree(tun_info);
35792a4b6526SVlad Buslov 	kfree(e);
35802a4b6526SVlad Buslov 	return err;
3581a54e20b4SHadar Hen Zion }
3582a54e20b4SHadar Hen Zion 
358314e6b038SEli Cohen static int mlx5e_attach_decap(struct mlx5e_priv *priv,
358414e6b038SEli Cohen 			      struct mlx5e_tc_flow *flow,
358514e6b038SEli Cohen 			      struct netlink_ext_ack *extack)
358614e6b038SEli Cohen {
358714e6b038SEli Cohen 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
358814e6b038SEli Cohen 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
358914e6b038SEli Cohen 	struct mlx5e_tc_flow_parse_attr *parse_attr;
359014e6b038SEli Cohen 	struct mlx5e_decap_entry *d;
359114e6b038SEli Cohen 	struct mlx5e_decap_key key;
359214e6b038SEli Cohen 	uintptr_t hash_key;
359314e6b038SEli Cohen 	int err;
359414e6b038SEli Cohen 
359514e6b038SEli Cohen 	parse_attr = attr->parse_attr;
359614e6b038SEli Cohen 	if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
359714e6b038SEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
359814e6b038SEli Cohen 				   "encap header larger than max supported");
359914e6b038SEli Cohen 		return -EOPNOTSUPP;
360014e6b038SEli Cohen 	}
360114e6b038SEli Cohen 
360214e6b038SEli Cohen 	key.key = parse_attr->eth;
360314e6b038SEli Cohen 	hash_key = hash_decap_info(&key);
360414e6b038SEli Cohen 	mutex_lock(&esw->offloads.decap_tbl_lock);
360514e6b038SEli Cohen 	d = mlx5e_decap_get(priv, &key, hash_key);
360614e6b038SEli Cohen 	if (d) {
360714e6b038SEli Cohen 		mutex_unlock(&esw->offloads.decap_tbl_lock);
360814e6b038SEli Cohen 		wait_for_completion(&d->res_ready);
360914e6b038SEli Cohen 		mutex_lock(&esw->offloads.decap_tbl_lock);
361014e6b038SEli Cohen 		if (d->compl_result) {
361114e6b038SEli Cohen 			err = -EREMOTEIO;
361214e6b038SEli Cohen 			goto out_free;
361314e6b038SEli Cohen 		}
361414e6b038SEli Cohen 		goto found;
361514e6b038SEli Cohen 	}
361614e6b038SEli Cohen 
361714e6b038SEli Cohen 	d = kzalloc(sizeof(*d), GFP_KERNEL);
361814e6b038SEli Cohen 	if (!d) {
361914e6b038SEli Cohen 		err = -ENOMEM;
362014e6b038SEli Cohen 		goto out_err;
362114e6b038SEli Cohen 	}
362214e6b038SEli Cohen 
362314e6b038SEli Cohen 	d->key = key;
362414e6b038SEli Cohen 	refcount_set(&d->refcnt, 1);
362514e6b038SEli Cohen 	init_completion(&d->res_ready);
362614e6b038SEli Cohen 	INIT_LIST_HEAD(&d->flows);
362714e6b038SEli Cohen 	hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
362814e6b038SEli Cohen 	mutex_unlock(&esw->offloads.decap_tbl_lock);
362914e6b038SEli Cohen 
363014e6b038SEli Cohen 	d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
363114e6b038SEli Cohen 						     MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
363214e6b038SEli Cohen 						     sizeof(parse_attr->eth),
363314e6b038SEli Cohen 						     &parse_attr->eth,
363414e6b038SEli Cohen 						     MLX5_FLOW_NAMESPACE_FDB);
363514e6b038SEli Cohen 	if (IS_ERR(d->pkt_reformat)) {
363614e6b038SEli Cohen 		err = PTR_ERR(d->pkt_reformat);
363714e6b038SEli Cohen 		d->compl_result = err;
363814e6b038SEli Cohen 	}
363914e6b038SEli Cohen 	mutex_lock(&esw->offloads.decap_tbl_lock);
364014e6b038SEli Cohen 	complete_all(&d->res_ready);
364114e6b038SEli Cohen 	if (err)
364214e6b038SEli Cohen 		goto out_free;
364314e6b038SEli Cohen 
364414e6b038SEli Cohen found:
364514e6b038SEli Cohen 	flow->decap_reformat = d;
364614e6b038SEli Cohen 	attr->decap_pkt_reformat = d->pkt_reformat;
364714e6b038SEli Cohen 	list_add(&flow->l3_to_l2_reformat, &d->flows);
364814e6b038SEli Cohen 	mutex_unlock(&esw->offloads.decap_tbl_lock);
364914e6b038SEli Cohen 	return 0;
365014e6b038SEli Cohen 
365114e6b038SEli Cohen out_free:
365214e6b038SEli Cohen 	mutex_unlock(&esw->offloads.decap_tbl_lock);
365314e6b038SEli Cohen 	mlx5e_decap_put(priv, d);
365414e6b038SEli Cohen 	return err;
365514e6b038SEli Cohen 
365614e6b038SEli Cohen out_err:
365714e6b038SEli Cohen 	mutex_unlock(&esw->offloads.decap_tbl_lock);
365814e6b038SEli Cohen 	return err;
365914e6b038SEli Cohen }
366014e6b038SEli Cohen 
36611482bd3dSJianbo Liu static int parse_tc_vlan_action(struct mlx5e_priv *priv,
366273867881SPablo Neira Ayuso 				const struct flow_action_entry *act,
36631482bd3dSJianbo Liu 				struct mlx5_esw_flow_attr *attr,
36641482bd3dSJianbo Liu 				u32 *action)
36651482bd3dSJianbo Liu {
3666cc495188SJianbo Liu 	u8 vlan_idx = attr->total_vlan;
3667cc495188SJianbo Liu 
3668cc495188SJianbo Liu 	if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
36691482bd3dSJianbo Liu 		return -EOPNOTSUPP;
3670cc495188SJianbo Liu 
367173867881SPablo Neira Ayuso 	switch (act->id) {
367273867881SPablo Neira Ayuso 	case FLOW_ACTION_VLAN_POP:
3673cc495188SJianbo Liu 		if (vlan_idx) {
3674cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3675cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
3676cc495188SJianbo Liu 				return -EOPNOTSUPP;
3677cc495188SJianbo Liu 
3678cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3679cc495188SJianbo Liu 		} else {
3680cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3681cc495188SJianbo Liu 		}
368273867881SPablo Neira Ayuso 		break;
368373867881SPablo Neira Ayuso 	case FLOW_ACTION_VLAN_PUSH:
368473867881SPablo Neira Ayuso 		attr->vlan_vid[vlan_idx] = act->vlan.vid;
368573867881SPablo Neira Ayuso 		attr->vlan_prio[vlan_idx] = act->vlan.prio;
368673867881SPablo Neira Ayuso 		attr->vlan_proto[vlan_idx] = act->vlan.proto;
3687cc495188SJianbo Liu 		if (!attr->vlan_proto[vlan_idx])
3688cc495188SJianbo Liu 			attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3689cc495188SJianbo Liu 
3690cc495188SJianbo Liu 		if (vlan_idx) {
3691cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3692cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
3693cc495188SJianbo Liu 				return -EOPNOTSUPP;
3694cc495188SJianbo Liu 
3695cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3696cc495188SJianbo Liu 		} else {
3697cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
369873867881SPablo Neira Ayuso 			    (act->vlan.proto != htons(ETH_P_8021Q) ||
369973867881SPablo Neira Ayuso 			     act->vlan.prio))
3700cc495188SJianbo Liu 				return -EOPNOTSUPP;
3701cc495188SJianbo Liu 
3702cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
37031482bd3dSJianbo Liu 		}
370473867881SPablo Neira Ayuso 		break;
370573867881SPablo Neira Ayuso 	default:
3706bdc837eeSEli Britstein 		return -EINVAL;
37071482bd3dSJianbo Liu 	}
37081482bd3dSJianbo Liu 
3709cc495188SJianbo Liu 	attr->total_vlan = vlan_idx + 1;
3710cc495188SJianbo Liu 
37111482bd3dSJianbo Liu 	return 0;
37121482bd3dSJianbo Liu }
37131482bd3dSJianbo Liu 
3714278748a9SEli Britstein static int add_vlan_push_action(struct mlx5e_priv *priv,
3715278748a9SEli Britstein 				struct mlx5_esw_flow_attr *attr,
3716278748a9SEli Britstein 				struct net_device **out_dev,
3717278748a9SEli Britstein 				u32 *action)
3718278748a9SEli Britstein {
3719278748a9SEli Britstein 	struct net_device *vlan_dev = *out_dev;
3720278748a9SEli Britstein 	struct flow_action_entry vlan_act = {
3721278748a9SEli Britstein 		.id = FLOW_ACTION_VLAN_PUSH,
3722278748a9SEli Britstein 		.vlan.vid = vlan_dev_vlan_id(vlan_dev),
3723278748a9SEli Britstein 		.vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3724278748a9SEli Britstein 		.vlan.prio = 0,
3725278748a9SEli Britstein 	};
3726278748a9SEli Britstein 	int err;
3727278748a9SEli Britstein 
3728278748a9SEli Britstein 	err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3729278748a9SEli Britstein 	if (err)
3730278748a9SEli Britstein 		return err;
3731278748a9SEli Britstein 
3732278748a9SEli Britstein 	*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3733278748a9SEli Britstein 					dev_get_iflink(vlan_dev));
3734278748a9SEli Britstein 	if (is_vlan_dev(*out_dev))
3735278748a9SEli Britstein 		err = add_vlan_push_action(priv, attr, out_dev, action);
3736278748a9SEli Britstein 
3737278748a9SEli Britstein 	return err;
3738278748a9SEli Britstein }
3739278748a9SEli Britstein 
374035a605dbSEli Britstein static int add_vlan_pop_action(struct mlx5e_priv *priv,
374135a605dbSEli Britstein 			       struct mlx5_esw_flow_attr *attr,
374235a605dbSEli Britstein 			       u32 *action)
374335a605dbSEli Britstein {
374435a605dbSEli Britstein 	struct flow_action_entry vlan_act = {
374535a605dbSEli Britstein 		.id = FLOW_ACTION_VLAN_POP,
374635a605dbSEli Britstein 	};
374770f478caSDmytro Linkin 	int nest_level, err = 0;
374835a605dbSEli Britstein 
374970f478caSDmytro Linkin 	nest_level = attr->parse_attr->filter_dev->lower_level -
375070f478caSDmytro Linkin 						priv->netdev->lower_level;
375135a605dbSEli Britstein 	while (nest_level--) {
375235a605dbSEli Britstein 		err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
375335a605dbSEli Britstein 		if (err)
375435a605dbSEli Britstein 			return err;
375535a605dbSEli Britstein 	}
375635a605dbSEli Britstein 
375735a605dbSEli Britstein 	return err;
375835a605dbSEli Britstein }
375935a605dbSEli Britstein 
3760f6dc1264SPaul Blakey bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3761f6dc1264SPaul Blakey 				    struct net_device *out_dev)
3762f6dc1264SPaul Blakey {
3763f6dc1264SPaul Blakey 	if (is_merged_eswitch_dev(priv, out_dev))
3764f6dc1264SPaul Blakey 		return true;
3765f6dc1264SPaul Blakey 
3766f6dc1264SPaul Blakey 	return mlx5e_eswitch_rep(out_dev) &&
3767f6dc1264SPaul Blakey 	       same_hw_devs(priv, netdev_priv(out_dev));
3768f6dc1264SPaul Blakey }
3769f6dc1264SPaul Blakey 
3770554fe75cSDmytro Linkin static bool is_duplicated_output_device(struct net_device *dev,
3771554fe75cSDmytro Linkin 					struct net_device *out_dev,
3772554fe75cSDmytro Linkin 					int *ifindexes, int if_count,
3773554fe75cSDmytro Linkin 					struct netlink_ext_ack *extack)
3774554fe75cSDmytro Linkin {
3775554fe75cSDmytro Linkin 	int i;
3776554fe75cSDmytro Linkin 
3777554fe75cSDmytro Linkin 	for (i = 0; i < if_count; i++) {
3778554fe75cSDmytro Linkin 		if (ifindexes[i] == out_dev->ifindex) {
3779554fe75cSDmytro Linkin 			NL_SET_ERR_MSG_MOD(extack,
3780554fe75cSDmytro Linkin 					   "can't duplicate output to same device");
3781554fe75cSDmytro Linkin 			netdev_err(dev, "can't duplicate output to same device: %s\n",
3782554fe75cSDmytro Linkin 				   out_dev->name);
3783554fe75cSDmytro Linkin 			return true;
3784554fe75cSDmytro Linkin 		}
3785554fe75cSDmytro Linkin 	}
3786554fe75cSDmytro Linkin 
3787554fe75cSDmytro Linkin 	return false;
3788554fe75cSDmytro Linkin }
3789554fe75cSDmytro Linkin 
37902fbbc30dSEli Cohen static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
37912fbbc30dSEli Cohen 				    struct mlx5e_tc_flow *flow,
37922fbbc30dSEli Cohen 				    const struct flow_action_entry *act,
37932fbbc30dSEli Cohen 				    u32 actions,
37942fbbc30dSEli Cohen 				    struct netlink_ext_ack *extack)
37952fbbc30dSEli Cohen {
37962fbbc30dSEli Cohen 	u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
37972fbbc30dSEli Cohen 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
37982fbbc30dSEli Cohen 	bool ft_flow = mlx5e_is_ft_flow(flow);
37992fbbc30dSEli Cohen 	u32 dest_chain = act->chain_index;
38002fbbc30dSEli Cohen 
38012fbbc30dSEli Cohen 	if (ft_flow) {
38022fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
38032fbbc30dSEli Cohen 		return -EOPNOTSUPP;
38042fbbc30dSEli Cohen 	}
38052fbbc30dSEli Cohen 
38062fbbc30dSEli Cohen 	if (!mlx5_esw_chains_backwards_supported(esw) &&
38072fbbc30dSEli Cohen 	    dest_chain <= attr->chain) {
38082fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
38092fbbc30dSEli Cohen 				   "Goto lower numbered chain isn't supported");
38102fbbc30dSEli Cohen 		return -EOPNOTSUPP;
38112fbbc30dSEli Cohen 	}
38122fbbc30dSEli Cohen 	if (dest_chain > max_chain) {
38132fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
38142fbbc30dSEli Cohen 				   "Requested destination chain is out of supported range");
38152fbbc30dSEli Cohen 		return -EOPNOTSUPP;
38162fbbc30dSEli Cohen 	}
38172fbbc30dSEli Cohen 
38182fbbc30dSEli Cohen 	if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
38192fbbc30dSEli Cohen 		       MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
38202fbbc30dSEli Cohen 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
38212fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
38222fbbc30dSEli Cohen 				   "Goto chain is not allowed if action has reformat or decap");
38232fbbc30dSEli Cohen 		return -EOPNOTSUPP;
38242fbbc30dSEli Cohen 	}
38252fbbc30dSEli Cohen 
38262fbbc30dSEli Cohen 	return 0;
38272fbbc30dSEli Cohen }
38282fbbc30dSEli Cohen 
3829613f53feSEli Cohen static int verify_uplink_forwarding(struct mlx5e_priv *priv,
3830613f53feSEli Cohen 				    struct mlx5e_tc_flow *flow,
3831613f53feSEli Cohen 				    struct net_device *out_dev,
3832613f53feSEli Cohen 				    struct netlink_ext_ack *extack)
3833613f53feSEli Cohen {
3834613f53feSEli Cohen 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3835613f53feSEli Cohen 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3836613f53feSEli Cohen 	struct mlx5e_rep_priv *rep_priv;
3837613f53feSEli Cohen 
3838613f53feSEli Cohen 	/* Forwarding non encapsulated traffic between
3839613f53feSEli Cohen 	 * uplink ports is allowed only if
3840613f53feSEli Cohen 	 * termination_table_raw_traffic cap is set.
3841613f53feSEli Cohen 	 *
3842613f53feSEli Cohen 	 * Input vport was stored esw_attr->in_rep.
3843613f53feSEli Cohen 	 * In LAG case, *priv* is the private data of
3844613f53feSEli Cohen 	 * uplink which may be not the input vport.
3845613f53feSEli Cohen 	 */
3846613f53feSEli Cohen 	rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
3847613f53feSEli Cohen 
3848613f53feSEli Cohen 	if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
3849613f53feSEli Cohen 	      mlx5e_eswitch_uplink_rep(out_dev)))
3850613f53feSEli Cohen 		return 0;
3851613f53feSEli Cohen 
3852613f53feSEli Cohen 	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
3853613f53feSEli Cohen 					termination_table_raw_traffic)) {
3854613f53feSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
3855613f53feSEli Cohen 				   "devices are both uplink, can't offload forwarding");
3856613f53feSEli Cohen 			pr_err("devices %s %s are both uplink, can't offload forwarding\n",
3857613f53feSEli Cohen 			       priv->netdev->name, out_dev->name);
3858613f53feSEli Cohen 			return -EOPNOTSUPP;
3859613f53feSEli Cohen 	} else if (out_dev != rep_priv->netdev) {
3860613f53feSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
3861613f53feSEli Cohen 				   "devices are not the same uplink, can't offload forwarding");
3862613f53feSEli Cohen 		pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
3863613f53feSEli Cohen 		       priv->netdev->name, out_dev->name);
3864613f53feSEli Cohen 		return -EOPNOTSUPP;
3865613f53feSEli Cohen 	}
3866613f53feSEli Cohen 	return 0;
3867613f53feSEli Cohen }
3868613f53feSEli Cohen 
386973867881SPablo Neira Ayuso static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
387073867881SPablo Neira Ayuso 				struct flow_action *flow_action,
3871e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
387214e6b038SEli Cohen 				struct netlink_ext_ack *extack,
387314e6b038SEli Cohen 				struct net_device *filter_dev)
3874a54e20b4SHadar Hen Zion {
387573867881SPablo Neira Ayuso 	struct pedit_headers_action hdrs[2] = {};
3876bf07aa73SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3877ecf5bb79SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
38786f9af8ffSTonghao Zhang 	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
38791d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
388073867881SPablo Neira Ayuso 	const struct ip_tunnel_info *info = NULL;
3881554fe75cSDmytro Linkin 	int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
388284179981SPaul Blakey 	bool ft_flow = mlx5e_is_ft_flow(flow);
388373867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
38840a7fcb78SPaul Blakey 	bool encap = false, decap = false;
38850a7fcb78SPaul Blakey 	u32 action = attr->action;
3886554fe75cSDmytro Linkin 	int err, i, if_count = 0;
3887f828ca6aSEli Cohen 	bool mpls_push = false;
388803a9d11eSOr Gerlitz 
388973867881SPablo Neira Ayuso 	if (!flow_action_has_entries(flow_action))
389003a9d11eSOr Gerlitz 		return -EINVAL;
389103a9d11eSOr Gerlitz 
389253eca1f3SJakub Kicinski 	if (!flow_action_hw_stats_check(flow_action, extack,
389353eca1f3SJakub Kicinski 					FLOW_ACTION_HW_STATS_DELAYED_BIT))
3894319a1d19SJiri Pirko 		return -EOPNOTSUPP;
3895319a1d19SJiri Pirko 
389673867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
389773867881SPablo Neira Ayuso 		switch (act->id) {
389873867881SPablo Neira Ayuso 		case FLOW_ACTION_DROP:
38991cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
390003a9d11eSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
390173867881SPablo Neira Ayuso 			break;
3902f828ca6aSEli Cohen 		case FLOW_ACTION_MPLS_PUSH:
3903f828ca6aSEli Cohen 			if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
3904f828ca6aSEli Cohen 							reformat_l2_to_l3_tunnel) ||
3905f828ca6aSEli Cohen 			    act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
3906f828ca6aSEli Cohen 				NL_SET_ERR_MSG_MOD(extack,
3907f828ca6aSEli Cohen 						   "mpls push is supported only for mpls_uc protocol");
3908f828ca6aSEli Cohen 				return -EOPNOTSUPP;
3909f828ca6aSEli Cohen 			}
3910f828ca6aSEli Cohen 			mpls_push = true;
3911f828ca6aSEli Cohen 			break;
391214e6b038SEli Cohen 		case FLOW_ACTION_MPLS_POP:
391314e6b038SEli Cohen 			/* we only support mpls pop if it is the first action
391414e6b038SEli Cohen 			 * and the filter net device is bareudp. Subsequent
391514e6b038SEli Cohen 			 * actions can be pedit and the last can be mirred
391614e6b038SEli Cohen 			 * egress redirect.
391714e6b038SEli Cohen 			 */
391814e6b038SEli Cohen 			if (i) {
391914e6b038SEli Cohen 				NL_SET_ERR_MSG_MOD(extack,
392014e6b038SEli Cohen 						   "mpls pop supported only as first action");
392114e6b038SEli Cohen 				return -EOPNOTSUPP;
392214e6b038SEli Cohen 			}
392314e6b038SEli Cohen 			if (!netif_is_bareudp(filter_dev)) {
392414e6b038SEli Cohen 				NL_SET_ERR_MSG_MOD(extack,
392514e6b038SEli Cohen 						   "mpls pop supported only on bareudp devices");
392614e6b038SEli Cohen 				return -EOPNOTSUPP;
392714e6b038SEli Cohen 			}
392814e6b038SEli Cohen 
392914e6b038SEli Cohen 			parse_attr->eth.h_proto = act->mpls_pop.proto;
393014e6b038SEli Cohen 			action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
393114e6b038SEli Cohen 			flow_flag_set(flow, L3_TO_L2_DECAP);
393214e6b038SEli Cohen 			break;
393373867881SPablo Neira Ayuso 		case FLOW_ACTION_MANGLE:
393473867881SPablo Neira Ayuso 		case FLOW_ACTION_ADD:
393514e6b038SEli Cohen 			if (flow_flag_test(flow, L3_TO_L2_DECAP))
393614e6b038SEli Cohen 				return -EOPNOTSUPP;
393714e6b038SEli Cohen 
393873867881SPablo Neira Ayuso 			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3939dec481c8SEli Cohen 						    hdrs, extack);
3940d7e75a32SOr Gerlitz 			if (err)
3941d7e75a32SOr Gerlitz 				return err;
3942d7e75a32SOr Gerlitz 
39431cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3944e85e02baSEli Britstein 			attr->split_count = attr->out_count;
394573867881SPablo Neira Ayuso 			break;
394673867881SPablo Neira Ayuso 		case FLOW_ACTION_CSUM:
39471cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
394873867881SPablo Neira Ayuso 						   act->csum_flags, extack))
394973867881SPablo Neira Ayuso 				break;
395026c02749SOr Gerlitz 
395126c02749SOr Gerlitz 			return -EOPNOTSUPP;
395273867881SPablo Neira Ayuso 		case FLOW_ACTION_REDIRECT:
395373867881SPablo Neira Ayuso 		case FLOW_ACTION_MIRRED: {
395403a9d11eSOr Gerlitz 			struct mlx5e_priv *out_priv;
3955592d3651SChris Mi 			struct net_device *out_dev;
395603a9d11eSOr Gerlitz 
395773867881SPablo Neira Ayuso 			out_dev = act->dev;
3958ef381359SOz Shlomo 			if (!out_dev) {
3959ef381359SOz Shlomo 				/* out_dev is NULL when filters with
3960ef381359SOz Shlomo 				 * non-existing mirred device are replayed to
3961ef381359SOz Shlomo 				 * the driver.
3962ef381359SOz Shlomo 				 */
3963ef381359SOz Shlomo 				return -EINVAL;
3964ef381359SOz Shlomo 			}
396503a9d11eSOr Gerlitz 
3966f828ca6aSEli Cohen 			if (mpls_push && !netif_is_bareudp(out_dev)) {
3967f828ca6aSEli Cohen 				NL_SET_ERR_MSG_MOD(extack,
3968f828ca6aSEli Cohen 						   "mpls is supported only through a bareudp device");
3969f828ca6aSEli Cohen 				return -EOPNOTSUPP;
3970f828ca6aSEli Cohen 			}
3971f828ca6aSEli Cohen 
397284179981SPaul Blakey 			if (ft_flow && out_dev == priv->netdev) {
397384179981SPaul Blakey 				/* Ignore forward to self rules generated
397484179981SPaul Blakey 				 * by adding both mlx5 devs to the flow table
397584179981SPaul Blakey 				 * block on a normal nft offload setup.
397684179981SPaul Blakey 				 */
397784179981SPaul Blakey 				return -EOPNOTSUPP;
397884179981SPaul Blakey 			}
397984179981SPaul Blakey 
3980592d3651SChris Mi 			if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
3981e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3982e98bedf5SEli Britstein 						   "can't support more output ports, can't offload forwarding");
39834ccd83f4SRoi Dayan 				netdev_warn(priv->netdev,
39844ccd83f4SRoi Dayan 					    "can't support more than %d output ports, can't offload forwarding\n",
3985592d3651SChris Mi 					    attr->out_count);
3986592d3651SChris Mi 				return -EOPNOTSUPP;
3987592d3651SChris Mi 			}
3988592d3651SChris Mi 
3989f493f155SEli Britstein 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3990f493f155SEli Britstein 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
3991b6a4ac24SVlad Buslov 			if (encap) {
3992b6a4ac24SVlad Buslov 				parse_attr->mirred_ifindex[attr->out_count] =
3993b6a4ac24SVlad Buslov 					out_dev->ifindex;
3994b6a4ac24SVlad Buslov 				parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
3995b6a4ac24SVlad Buslov 				if (!parse_attr->tun_info[attr->out_count])
3996b6a4ac24SVlad Buslov 					return -ENOMEM;
3997b6a4ac24SVlad Buslov 				encap = false;
3998b6a4ac24SVlad Buslov 				attr->dests[attr->out_count].flags |=
3999b6a4ac24SVlad Buslov 					MLX5_ESW_DEST_ENCAP;
4000b6a4ac24SVlad Buslov 				attr->out_count++;
4001b6a4ac24SVlad Buslov 				/* attr->dests[].rep is resolved when we
4002b6a4ac24SVlad Buslov 				 * handle encap
4003b6a4ac24SVlad Buslov 				 */
4004b6a4ac24SVlad Buslov 			} else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
40057ba58ba7SRabie Loulou 				struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
40067ba58ba7SRabie Loulou 				struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
4007fa833bd5SVlad Buslov 				struct net_device *uplink_upper;
40087ba58ba7SRabie Loulou 
4009554fe75cSDmytro Linkin 				if (is_duplicated_output_device(priv->netdev,
4010554fe75cSDmytro Linkin 								out_dev,
4011554fe75cSDmytro Linkin 								ifindexes,
4012554fe75cSDmytro Linkin 								if_count,
4013554fe75cSDmytro Linkin 								extack))
4014554fe75cSDmytro Linkin 					return -EOPNOTSUPP;
4015554fe75cSDmytro Linkin 
4016554fe75cSDmytro Linkin 				ifindexes[if_count] = out_dev->ifindex;
4017554fe75cSDmytro Linkin 				if_count++;
4018554fe75cSDmytro Linkin 
4019fa833bd5SVlad Buslov 				rcu_read_lock();
4020fa833bd5SVlad Buslov 				uplink_upper =
4021fa833bd5SVlad Buslov 					netdev_master_upper_dev_get_rcu(uplink_dev);
40227ba58ba7SRabie Loulou 				if (uplink_upper &&
40237ba58ba7SRabie Loulou 				    netif_is_lag_master(uplink_upper) &&
40247ba58ba7SRabie Loulou 				    uplink_upper == out_dev)
40257ba58ba7SRabie Loulou 					out_dev = uplink_dev;
4026fa833bd5SVlad Buslov 				rcu_read_unlock();
40277ba58ba7SRabie Loulou 
4028278748a9SEli Britstein 				if (is_vlan_dev(out_dev)) {
4029278748a9SEli Britstein 					err = add_vlan_push_action(priv, attr,
4030278748a9SEli Britstein 								   &out_dev,
4031278748a9SEli Britstein 								   &action);
4032278748a9SEli Britstein 					if (err)
4033278748a9SEli Britstein 						return err;
4034278748a9SEli Britstein 				}
4035f6dc1264SPaul Blakey 
403635a605dbSEli Britstein 				if (is_vlan_dev(parse_attr->filter_dev)) {
403735a605dbSEli Britstein 					err = add_vlan_pop_action(priv, attr,
403835a605dbSEli Britstein 								  &action);
403935a605dbSEli Britstein 					if (err)
404035a605dbSEli Britstein 						return err;
404135a605dbSEli Britstein 				}
4042278748a9SEli Britstein 
4043613f53feSEli Cohen 				err = verify_uplink_forwarding(priv, flow, out_dev, extack);
4044613f53feSEli Cohen 				if (err)
4045613f53feSEli Cohen 					return err;
4046ffec9702STonghao Zhang 
4047f6dc1264SPaul Blakey 				if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
4048f6dc1264SPaul Blakey 					NL_SET_ERR_MSG_MOD(extack,
4049f6dc1264SPaul Blakey 							   "devices are not on same switch HW, can't offload forwarding");
40504ccd83f4SRoi Dayan 					netdev_warn(priv->netdev,
40514ccd83f4SRoi Dayan 						    "devices %s %s not on same switch HW, can't offload forwarding\n",
40524ccd83f4SRoi Dayan 						    priv->netdev->name,
40534ccd83f4SRoi Dayan 						    out_dev->name);
4054a0646c88SEli Britstein 					return -EOPNOTSUPP;
4055f6dc1264SPaul Blakey 				}
4056a0646c88SEli Britstein 
405703a9d11eSOr Gerlitz 				out_priv = netdev_priv(out_dev);
40581d447a39SSaeed Mahameed 				rpriv = out_priv->ppriv;
4059df65a573SEli Britstein 				attr->dests[attr->out_count].rep = rpriv->rep;
4060df65a573SEli Britstein 				attr->dests[attr->out_count].mdev = out_priv->mdev;
4061df65a573SEli Britstein 				attr->out_count++;
4062ef381359SOz Shlomo 			} else if (parse_attr->filter_dev != priv->netdev) {
4063ef381359SOz Shlomo 				/* All mlx5 devices are called to configure
4064ef381359SOz Shlomo 				 * high level device filters. Therefore, the
4065ef381359SOz Shlomo 				 * *attempt* to  install a filter on invalid
4066ef381359SOz Shlomo 				 * eswitch should not trigger an explicit error
4067ef381359SOz Shlomo 				 */
4068ef381359SOz Shlomo 				return -EINVAL;
4069a54e20b4SHadar Hen Zion 			} else {
4070e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
4071e98bedf5SEli Britstein 						   "devices are not on same switch HW, can't offload forwarding");
40724ccd83f4SRoi Dayan 				netdev_warn(priv->netdev,
40734ccd83f4SRoi Dayan 					    "devices %s %s not on same switch HW, can't offload forwarding\n",
40744ccd83f4SRoi Dayan 					    priv->netdev->name,
40754ccd83f4SRoi Dayan 					    out_dev->name);
4076a54e20b4SHadar Hen Zion 				return -EINVAL;
4077a54e20b4SHadar Hen Zion 			}
4078a54e20b4SHadar Hen Zion 			}
407973867881SPablo Neira Ayuso 			break;
408073867881SPablo Neira Ayuso 		case FLOW_ACTION_TUNNEL_ENCAP:
408173867881SPablo Neira Ayuso 			info = act->tunnel;
4082a54e20b4SHadar Hen Zion 			if (info)
4083a54e20b4SHadar Hen Zion 				encap = true;
4084a54e20b4SHadar Hen Zion 			else
4085a54e20b4SHadar Hen Zion 				return -EOPNOTSUPP;
408603a9d11eSOr Gerlitz 
408773867881SPablo Neira Ayuso 			break;
408873867881SPablo Neira Ayuso 		case FLOW_ACTION_VLAN_PUSH:
408973867881SPablo Neira Ayuso 		case FLOW_ACTION_VLAN_POP:
409076b496b1SEli Britstein 			if (act->id == FLOW_ACTION_VLAN_PUSH &&
409176b496b1SEli Britstein 			    (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
409276b496b1SEli Britstein 				/* Replace vlan pop+push with vlan modify */
409376b496b1SEli Britstein 				action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
409476b496b1SEli Britstein 				err = add_vlan_rewrite_action(priv,
409576b496b1SEli Britstein 							      MLX5_FLOW_NAMESPACE_FDB,
409676b496b1SEli Britstein 							      act, parse_attr, hdrs,
409776b496b1SEli Britstein 							      &action, extack);
409876b496b1SEli Britstein 			} else {
409973867881SPablo Neira Ayuso 				err = parse_tc_vlan_action(priv, act, attr, &action);
410076b496b1SEli Britstein 			}
41011482bd3dSJianbo Liu 			if (err)
41021482bd3dSJianbo Liu 				return err;
41031482bd3dSJianbo Liu 
4104e85e02baSEli Britstein 			attr->split_count = attr->out_count;
410573867881SPablo Neira Ayuso 			break;
4106bdc837eeSEli Britstein 		case FLOW_ACTION_VLAN_MANGLE:
4107bdc837eeSEli Britstein 			err = add_vlan_rewrite_action(priv,
4108bdc837eeSEli Britstein 						      MLX5_FLOW_NAMESPACE_FDB,
4109bdc837eeSEli Britstein 						      act, parse_attr, hdrs,
4110bdc837eeSEli Britstein 						      &action, extack);
4111bdc837eeSEli Britstein 			if (err)
4112bdc837eeSEli Britstein 				return err;
4113bdc837eeSEli Britstein 
4114bdc837eeSEli Britstein 			attr->split_count = attr->out_count;
4115bdc837eeSEli Britstein 			break;
411673867881SPablo Neira Ayuso 		case FLOW_ACTION_TUNNEL_DECAP:
41170a7fcb78SPaul Blakey 			decap = true;
411873867881SPablo Neira Ayuso 			break;
41192fbbc30dSEli Cohen 		case FLOW_ACTION_GOTO:
41202fbbc30dSEli Cohen 			err = mlx5_validate_goto_chain(esw, flow, act, action,
41212fbbc30dSEli Cohen 						       extack);
41222fbbc30dSEli Cohen 			if (err)
41232fbbc30dSEli Cohen 				return err;
4124bf07aa73SPaul Blakey 
4125e88afe75SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
41262fbbc30dSEli Cohen 			attr->dest_chain = act->chain_index;
412773867881SPablo Neira Ayuso 			break;
41284c3844d9SPaul Blakey 		case FLOW_ACTION_CT:
41294c3844d9SPaul Blakey 			err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
41304c3844d9SPaul Blakey 			if (err)
41314c3844d9SPaul Blakey 				return err;
41324c3844d9SPaul Blakey 
41334c3844d9SPaul Blakey 			flow_flag_set(flow, CT);
41344c3844d9SPaul Blakey 			break;
413573867881SPablo Neira Ayuso 		default:
41362cc1cb1dSTonghao Zhang 			NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
41372cc1cb1dSTonghao Zhang 			return -EOPNOTSUPP;
413803a9d11eSOr Gerlitz 		}
413973867881SPablo Neira Ayuso 	}
4140bdd66ac0SOr Gerlitz 
41410bac1194SEli Britstein 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
41420bac1194SEli Britstein 	    action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
41430bac1194SEli Britstein 		/* For prio tag mode, replace vlan pop with rewrite vlan prio
41440bac1194SEli Britstein 		 * tag rewrite.
41450bac1194SEli Britstein 		 */
41460bac1194SEli Britstein 		action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
41470bac1194SEli Britstein 		err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
41480bac1194SEli Britstein 						       &action, extack);
41490bac1194SEli Britstein 		if (err)
41500bac1194SEli Britstein 			return err;
41510bac1194SEli Britstein 	}
41520bac1194SEli Britstein 
4153c500c86bSPablo Neira Ayuso 	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
4154c500c86bSPablo Neira Ayuso 	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
415584be899fSTonghao Zhang 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
415627c11b6bSEli Britstein 					    parse_attr, hdrs, &action, extack);
4157c500c86bSPablo Neira Ayuso 		if (err)
4158c500c86bSPablo Neira Ayuso 			return err;
415927c11b6bSEli Britstein 		/* in case all pedit actions are skipped, remove the MOD_HDR
416027c11b6bSEli Britstein 		 * flag. we might have set split_count either by pedit or
416127c11b6bSEli Britstein 		 * pop/push. if there is no pop/push either, reset it too.
416227c11b6bSEli Britstein 		 */
41636ae4a6a5SPaul Blakey 		if (parse_attr->mod_hdr_acts.num_actions == 0) {
416427c11b6bSEli Britstein 			action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
41656ae4a6a5SPaul Blakey 			dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
416627c11b6bSEli Britstein 			if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
416727c11b6bSEli Britstein 			      (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
416827c11b6bSEli Britstein 				attr->split_count = 0;
416927c11b6bSEli Britstein 		}
4170c500c86bSPablo Neira Ayuso 	}
4171c500c86bSPablo Neira Ayuso 
41721cab1cd7SOr Gerlitz 	attr->action = action;
417373867881SPablo Neira Ayuso 	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
4174bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
4175bdd66ac0SOr Gerlitz 
4176e88afe75SOr Gerlitz 	if (attr->dest_chain) {
41770a7fcb78SPaul Blakey 		if (decap) {
41780a7fcb78SPaul Blakey 			/* It can be supported if we'll create a mapping for
41790a7fcb78SPaul Blakey 			 * the tunnel device only (without tunnel), and set
41800a7fcb78SPaul Blakey 			 * this tunnel id with this decap flow.
41810a7fcb78SPaul Blakey 			 *
41820a7fcb78SPaul Blakey 			 * On restore (miss), we'll just set this saved tunnel
41830a7fcb78SPaul Blakey 			 * device.
41840a7fcb78SPaul Blakey 			 */
41850a7fcb78SPaul Blakey 
41860a7fcb78SPaul Blakey 			NL_SET_ERR_MSG(extack,
41870a7fcb78SPaul Blakey 				       "Decap with goto isn't supported");
41880a7fcb78SPaul Blakey 			netdev_warn(priv->netdev,
41890a7fcb78SPaul Blakey 				    "Decap with goto isn't supported");
41900a7fcb78SPaul Blakey 			return -EOPNOTSUPP;
41910a7fcb78SPaul Blakey 		}
41920a7fcb78SPaul Blakey 
4193e88afe75SOr Gerlitz 		if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
419461644c3dSRoi Dayan 			NL_SET_ERR_MSG_MOD(extack,
419561644c3dSRoi Dayan 					   "Mirroring goto chain rules isn't supported");
4196e88afe75SOr Gerlitz 			return -EOPNOTSUPP;
4197e88afe75SOr Gerlitz 		}
4198e88afe75SOr Gerlitz 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4199e88afe75SOr Gerlitz 	}
4200e88afe75SOr Gerlitz 
4201ae2741e2SVlad Buslov 	if (!(attr->action &
4202ae2741e2SVlad Buslov 	      (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
420361644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
420461644c3dSRoi Dayan 				   "Rule must have at least one forward/drop action");
4205ae2741e2SVlad Buslov 		return -EOPNOTSUPP;
4206ae2741e2SVlad Buslov 	}
4207ae2741e2SVlad Buslov 
4208e85e02baSEli Britstein 	if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
4209e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
4210e98bedf5SEli Britstein 				   "current firmware doesn't support split rule for port mirroring");
4211592d3651SChris Mi 		netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
4212592d3651SChris Mi 		return -EOPNOTSUPP;
4213592d3651SChris Mi 	}
4214592d3651SChris Mi 
421531c8eba5SOr Gerlitz 	return 0;
421603a9d11eSOr Gerlitz }
421703a9d11eSOr Gerlitz 
4218226f2ca3SVlad Buslov static void get_flags(int flags, unsigned long *flow_flags)
421960bd4af8SOr Gerlitz {
4220226f2ca3SVlad Buslov 	unsigned long __flow_flags = 0;
422160bd4af8SOr Gerlitz 
4222226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(INGRESS))
4223226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4224226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(EGRESS))
4225226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
422660bd4af8SOr Gerlitz 
4227226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4228226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4229226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4230226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
423184179981SPaul Blakey 	if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
423284179981SPaul Blakey 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4233d9ee0491SOr Gerlitz 
423460bd4af8SOr Gerlitz 	*flow_flags = __flow_flags;
423560bd4af8SOr Gerlitz }
423660bd4af8SOr Gerlitz 
423705866c82SOr Gerlitz static const struct rhashtable_params tc_ht_params = {
423805866c82SOr Gerlitz 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
423905866c82SOr Gerlitz 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
424005866c82SOr Gerlitz 	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
424105866c82SOr Gerlitz 	.automatic_shrinking = true,
424205866c82SOr Gerlitz };
424305866c82SOr Gerlitz 
4244226f2ca3SVlad Buslov static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4245226f2ca3SVlad Buslov 				    unsigned long flags)
424605866c82SOr Gerlitz {
4247655dc3d2SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4248655dc3d2SOr Gerlitz 	struct mlx5e_rep_priv *uplink_rpriv;
4249655dc3d2SOr Gerlitz 
4250226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4251655dc3d2SOr Gerlitz 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4252ec1366c2SOz Shlomo 		return &uplink_rpriv->uplink_priv.tc_ht;
4253d9ee0491SOr Gerlitz 	} else /* NIC offload */
425405866c82SOr Gerlitz 		return &priv->fs.tc.ht;
425505866c82SOr Gerlitz }
425605866c82SOr Gerlitz 
425704de7ddaSRoi Dayan static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
425804de7ddaSRoi Dayan {
42591418ddd9SAviv Heller 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
4260b05af6aaSBodong Wang 	bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4261226f2ca3SVlad Buslov 		flow_flag_test(flow, INGRESS);
42621418ddd9SAviv Heller 	bool act_is_encap = !!(attr->action &
42631418ddd9SAviv Heller 			       MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
42641418ddd9SAviv Heller 	bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
42651418ddd9SAviv Heller 						MLX5_DEVCOM_ESW_OFFLOADS);
42661418ddd9SAviv Heller 
426710fbb1cdSRoi Dayan 	if (!esw_paired)
426810fbb1cdSRoi Dayan 		return false;
426910fbb1cdSRoi Dayan 
427010fbb1cdSRoi Dayan 	if ((mlx5_lag_is_sriov(attr->in_mdev) ||
427110fbb1cdSRoi Dayan 	     mlx5_lag_is_multipath(attr->in_mdev)) &&
427210fbb1cdSRoi Dayan 	    (is_rep_ingress || act_is_encap))
427310fbb1cdSRoi Dayan 		return true;
427410fbb1cdSRoi Dayan 
427510fbb1cdSRoi Dayan 	return false;
427604de7ddaSRoi Dayan }
427704de7ddaSRoi Dayan 
4278a88780a9SRoi Dayan static int
4279a88780a9SRoi Dayan mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4280226f2ca3SVlad Buslov 		 struct flow_cls_offload *f, unsigned long flow_flags,
4281a88780a9SRoi Dayan 		 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4282a88780a9SRoi Dayan 		 struct mlx5e_tc_flow **__flow)
4283e3a2b7edSAmir Vadai {
428417091853SOr Gerlitz 	struct mlx5e_tc_flow_parse_attr *parse_attr;
42853bc4b7bfSOr Gerlitz 	struct mlx5e_tc_flow *flow;
42865a7e5bcbSVlad Buslov 	int out_index, err;
4287776b12b6SOr Gerlitz 
428865ba8fb7SOr Gerlitz 	flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
42891b9a07eeSLeon Romanovsky 	parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
429017091853SOr Gerlitz 	if (!parse_attr || !flow) {
4291e3a2b7edSAmir Vadai 		err = -ENOMEM;
4292e3a2b7edSAmir Vadai 		goto err_free;
4293e3a2b7edSAmir Vadai 	}
4294e3a2b7edSAmir Vadai 
4295e3a2b7edSAmir Vadai 	flow->cookie = f->cookie;
429665ba8fb7SOr Gerlitz 	flow->flags = flow_flags;
4297655dc3d2SOr Gerlitz 	flow->priv = priv;
42985a7e5bcbSVlad Buslov 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
42995a7e5bcbSVlad Buslov 		INIT_LIST_HEAD(&flow->encaps[out_index].list);
43005a7e5bcbSVlad Buslov 	INIT_LIST_HEAD(&flow->mod_hdr);
43015a7e5bcbSVlad Buslov 	INIT_LIST_HEAD(&flow->hairpin);
430214e6b038SEli Cohen 	INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
43035a7e5bcbSVlad Buslov 	refcount_set(&flow->refcnt, 1);
430495435ad7SVlad Buslov 	init_completion(&flow->init_done);
4305e3a2b7edSAmir Vadai 
4306a88780a9SRoi Dayan 	*__flow = flow;
4307a88780a9SRoi Dayan 	*__parse_attr = parse_attr;
4308a88780a9SRoi Dayan 
4309a88780a9SRoi Dayan 	return 0;
4310a88780a9SRoi Dayan 
4311a88780a9SRoi Dayan err_free:
4312a88780a9SRoi Dayan 	kfree(flow);
4313a88780a9SRoi Dayan 	kvfree(parse_attr);
4314a88780a9SRoi Dayan 	return err;
4315adb4c123SOr Gerlitz }
4316adb4c123SOr Gerlitz 
4317988ab9c7STonghao Zhang static void
4318988ab9c7STonghao Zhang mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
4319988ab9c7STonghao Zhang 			 struct mlx5e_priv *priv,
4320988ab9c7STonghao Zhang 			 struct mlx5e_tc_flow_parse_attr *parse_attr,
4321f9e30088SPablo Neira Ayuso 			 struct flow_cls_offload *f,
4322988ab9c7STonghao Zhang 			 struct mlx5_eswitch_rep *in_rep,
4323988ab9c7STonghao Zhang 			 struct mlx5_core_dev *in_mdev)
4324988ab9c7STonghao Zhang {
4325988ab9c7STonghao Zhang 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4326988ab9c7STonghao Zhang 
4327988ab9c7STonghao Zhang 	esw_attr->parse_attr = parse_attr;
4328988ab9c7STonghao Zhang 	esw_attr->chain = f->common.chain_index;
4329ef01adaeSPablo Neira Ayuso 	esw_attr->prio = f->common.prio;
4330988ab9c7STonghao Zhang 
4331988ab9c7STonghao Zhang 	esw_attr->in_rep = in_rep;
4332988ab9c7STonghao Zhang 	esw_attr->in_mdev = in_mdev;
4333988ab9c7STonghao Zhang 
4334988ab9c7STonghao Zhang 	if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4335988ab9c7STonghao Zhang 	    MLX5_COUNTER_SOURCE_ESWITCH)
4336988ab9c7STonghao Zhang 		esw_attr->counter_dev = in_mdev;
4337988ab9c7STonghao Zhang 	else
4338988ab9c7STonghao Zhang 		esw_attr->counter_dev = priv->mdev;
4339988ab9c7STonghao Zhang }
4340988ab9c7STonghao Zhang 
434171129676SJason Gunthorpe static struct mlx5e_tc_flow *
434204de7ddaSRoi Dayan __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4343f9e30088SPablo Neira Ayuso 		     struct flow_cls_offload *f,
4344226f2ca3SVlad Buslov 		     unsigned long flow_flags,
4345d11afc26SOz Shlomo 		     struct net_device *filter_dev,
434604de7ddaSRoi Dayan 		     struct mlx5_eswitch_rep *in_rep,
434771129676SJason Gunthorpe 		     struct mlx5_core_dev *in_mdev)
4348a88780a9SRoi Dayan {
4349f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4350a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
4351a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4352a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
4353a88780a9SRoi Dayan 	int attr_size, err;
4354a88780a9SRoi Dayan 
4355226f2ca3SVlad Buslov 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4356a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_esw_flow_attr);
4357a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4358a88780a9SRoi Dayan 			       &parse_attr, &flow);
4359a88780a9SRoi Dayan 	if (err)
4360a88780a9SRoi Dayan 		goto out;
4361988ab9c7STonghao Zhang 
4362d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
4363988ab9c7STonghao Zhang 	mlx5e_flow_esw_attr_init(flow->esw_attr,
4364988ab9c7STonghao Zhang 				 priv, parse_attr,
4365988ab9c7STonghao Zhang 				 f, in_rep, in_mdev);
4366988ab9c7STonghao Zhang 
436754c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
436854c177caSOz Shlomo 			       f, filter_dev);
4369d11afc26SOz Shlomo 	if (err)
4370d11afc26SOz Shlomo 		goto err_free;
4371a88780a9SRoi Dayan 
437214e6b038SEli Cohen 	err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
4373a88780a9SRoi Dayan 	if (err)
4374a88780a9SRoi Dayan 		goto err_free;
4375a88780a9SRoi Dayan 
43764c3844d9SPaul Blakey 	err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
43774c3844d9SPaul Blakey 	if (err)
43784c3844d9SPaul Blakey 		goto err_free;
43794c3844d9SPaul Blakey 
43807040632dSTonghao Zhang 	err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
438195435ad7SVlad Buslov 	complete_all(&flow->init_done);
4382ef06c9eeSRoi Dayan 	if (err) {
4383ef06c9eeSRoi Dayan 		if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4384aa0cbbaeSOr Gerlitz 			goto err_free;
43855c40348cSOr Gerlitz 
4386b4a23329SRoi Dayan 		add_unready_flow(flow);
4387ef06c9eeSRoi Dayan 	}
4388ef06c9eeSRoi Dayan 
438971129676SJason Gunthorpe 	return flow;
4390e3a2b7edSAmir Vadai 
4391e3a2b7edSAmir Vadai err_free:
43925a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4393a88780a9SRoi Dayan out:
439471129676SJason Gunthorpe 	return ERR_PTR(err);
4395a88780a9SRoi Dayan }
4396a88780a9SRoi Dayan 
4397f9e30088SPablo Neira Ayuso static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
439895dc1902SRoi Dayan 				      struct mlx5e_tc_flow *flow,
4399226f2ca3SVlad Buslov 				      unsigned long flow_flags)
440004de7ddaSRoi Dayan {
440104de7ddaSRoi Dayan 	struct mlx5e_priv *priv = flow->priv, *peer_priv;
440204de7ddaSRoi Dayan 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
440304de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
440404de7ddaSRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
440504de7ddaSRoi Dayan 	struct mlx5e_rep_priv *peer_urpriv;
440604de7ddaSRoi Dayan 	struct mlx5e_tc_flow *peer_flow;
440704de7ddaSRoi Dayan 	struct mlx5_core_dev *in_mdev;
440804de7ddaSRoi Dayan 	int err = 0;
440904de7ddaSRoi Dayan 
441004de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
441104de7ddaSRoi Dayan 	if (!peer_esw)
441204de7ddaSRoi Dayan 		return -ENODEV;
441304de7ddaSRoi Dayan 
441404de7ddaSRoi Dayan 	peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
441504de7ddaSRoi Dayan 	peer_priv = netdev_priv(peer_urpriv->netdev);
441604de7ddaSRoi Dayan 
441704de7ddaSRoi Dayan 	/* in_mdev is assigned of which the packet originated from.
441804de7ddaSRoi Dayan 	 * So packets redirected to uplink use the same mdev of the
441904de7ddaSRoi Dayan 	 * original flow and packets redirected from uplink use the
442004de7ddaSRoi Dayan 	 * peer mdev.
442104de7ddaSRoi Dayan 	 */
4422b05af6aaSBodong Wang 	if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
442304de7ddaSRoi Dayan 		in_mdev = peer_priv->mdev;
442404de7ddaSRoi Dayan 	else
442504de7ddaSRoi Dayan 		in_mdev = priv->mdev;
442604de7ddaSRoi Dayan 
442704de7ddaSRoi Dayan 	parse_attr = flow->esw_attr->parse_attr;
442895dc1902SRoi Dayan 	peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
442904de7ddaSRoi Dayan 					 parse_attr->filter_dev,
443071129676SJason Gunthorpe 					 flow->esw_attr->in_rep, in_mdev);
443171129676SJason Gunthorpe 	if (IS_ERR(peer_flow)) {
443271129676SJason Gunthorpe 		err = PTR_ERR(peer_flow);
443304de7ddaSRoi Dayan 		goto out;
443471129676SJason Gunthorpe 	}
443504de7ddaSRoi Dayan 
443604de7ddaSRoi Dayan 	flow->peer_flow = peer_flow;
4437226f2ca3SVlad Buslov 	flow_flag_set(flow, DUP);
443804de7ddaSRoi Dayan 	mutex_lock(&esw->offloads.peer_mutex);
443904de7ddaSRoi Dayan 	list_add_tail(&flow->peer, &esw->offloads.peer_flows);
444004de7ddaSRoi Dayan 	mutex_unlock(&esw->offloads.peer_mutex);
444104de7ddaSRoi Dayan 
444204de7ddaSRoi Dayan out:
444304de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
444404de7ddaSRoi Dayan 	return err;
444504de7ddaSRoi Dayan }
444604de7ddaSRoi Dayan 
444704de7ddaSRoi Dayan static int
444804de7ddaSRoi Dayan mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4449f9e30088SPablo Neira Ayuso 		   struct flow_cls_offload *f,
4450226f2ca3SVlad Buslov 		   unsigned long flow_flags,
445104de7ddaSRoi Dayan 		   struct net_device *filter_dev,
445204de7ddaSRoi Dayan 		   struct mlx5e_tc_flow **__flow)
445304de7ddaSRoi Dayan {
445404de7ddaSRoi Dayan 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
445504de7ddaSRoi Dayan 	struct mlx5_eswitch_rep *in_rep = rpriv->rep;
445604de7ddaSRoi Dayan 	struct mlx5_core_dev *in_mdev = priv->mdev;
445704de7ddaSRoi Dayan 	struct mlx5e_tc_flow *flow;
445804de7ddaSRoi Dayan 	int err;
445904de7ddaSRoi Dayan 
446071129676SJason Gunthorpe 	flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
446171129676SJason Gunthorpe 				    in_mdev);
446271129676SJason Gunthorpe 	if (IS_ERR(flow))
446371129676SJason Gunthorpe 		return PTR_ERR(flow);
446404de7ddaSRoi Dayan 
446504de7ddaSRoi Dayan 	if (is_peer_flow_needed(flow)) {
446695dc1902SRoi Dayan 		err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
446704de7ddaSRoi Dayan 		if (err) {
446804de7ddaSRoi Dayan 			mlx5e_tc_del_fdb_flow(priv, flow);
446904de7ddaSRoi Dayan 			goto out;
447004de7ddaSRoi Dayan 		}
447104de7ddaSRoi Dayan 	}
447204de7ddaSRoi Dayan 
447304de7ddaSRoi Dayan 	*__flow = flow;
447404de7ddaSRoi Dayan 
447504de7ddaSRoi Dayan 	return 0;
447604de7ddaSRoi Dayan 
447704de7ddaSRoi Dayan out:
447804de7ddaSRoi Dayan 	return err;
447904de7ddaSRoi Dayan }
448004de7ddaSRoi Dayan 
4481a88780a9SRoi Dayan static int
4482a88780a9SRoi Dayan mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4483f9e30088SPablo Neira Ayuso 		   struct flow_cls_offload *f,
4484226f2ca3SVlad Buslov 		   unsigned long flow_flags,
4485d11afc26SOz Shlomo 		   struct net_device *filter_dev,
4486a88780a9SRoi Dayan 		   struct mlx5e_tc_flow **__flow)
4487a88780a9SRoi Dayan {
4488f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4489a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
4490a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4491a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
4492a88780a9SRoi Dayan 	int attr_size, err;
4493a88780a9SRoi Dayan 
4494bf07aa73SPaul Blakey 	/* multi-chain not supported for NIC rules */
4495bf07aa73SPaul Blakey 	if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4496bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
4497bf07aa73SPaul Blakey 
4498226f2ca3SVlad Buslov 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4499a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_nic_flow_attr);
4500a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4501a88780a9SRoi Dayan 			       &parse_attr, &flow);
4502a88780a9SRoi Dayan 	if (err)
4503a88780a9SRoi Dayan 		goto out;
4504a88780a9SRoi Dayan 
4505d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
450654c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
450754c177caSOz Shlomo 			       f, filter_dev);
4508d11afc26SOz Shlomo 	if (err)
4509d11afc26SOz Shlomo 		goto err_free;
4510d11afc26SOz Shlomo 
451173867881SPablo Neira Ayuso 	err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4512a88780a9SRoi Dayan 	if (err)
4513a88780a9SRoi Dayan 		goto err_free;
4514a88780a9SRoi Dayan 
4515a88780a9SRoi Dayan 	err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4516a88780a9SRoi Dayan 	if (err)
4517a88780a9SRoi Dayan 		goto err_free;
4518a88780a9SRoi Dayan 
4519226f2ca3SVlad Buslov 	flow_flag_set(flow, OFFLOADED);
4520a88780a9SRoi Dayan 	kvfree(parse_attr);
4521a88780a9SRoi Dayan 	*__flow = flow;
4522a88780a9SRoi Dayan 
4523a88780a9SRoi Dayan 	return 0;
4524a88780a9SRoi Dayan 
4525a88780a9SRoi Dayan err_free:
45265a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4527a88780a9SRoi Dayan 	kvfree(parse_attr);
4528a88780a9SRoi Dayan out:
4529a88780a9SRoi Dayan 	return err;
4530a88780a9SRoi Dayan }
4531a88780a9SRoi Dayan 
4532a88780a9SRoi Dayan static int
4533a88780a9SRoi Dayan mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4534f9e30088SPablo Neira Ayuso 		  struct flow_cls_offload *f,
4535226f2ca3SVlad Buslov 		  unsigned long flags,
4536d11afc26SOz Shlomo 		  struct net_device *filter_dev,
4537a88780a9SRoi Dayan 		  struct mlx5e_tc_flow **flow)
4538a88780a9SRoi Dayan {
4539a88780a9SRoi Dayan 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4540226f2ca3SVlad Buslov 	unsigned long flow_flags;
4541a88780a9SRoi Dayan 	int err;
4542a88780a9SRoi Dayan 
4543a88780a9SRoi Dayan 	get_flags(flags, &flow_flags);
4544a88780a9SRoi Dayan 
4545bf07aa73SPaul Blakey 	if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4546bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
4547bf07aa73SPaul Blakey 
4548f6455de0SBodong Wang 	if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4549d11afc26SOz Shlomo 		err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4550d11afc26SOz Shlomo 					 filter_dev, flow);
4551a88780a9SRoi Dayan 	else
4552d11afc26SOz Shlomo 		err = mlx5e_add_nic_flow(priv, f, flow_flags,
4553d11afc26SOz Shlomo 					 filter_dev, flow);
4554a88780a9SRoi Dayan 
4555a88780a9SRoi Dayan 	return err;
4556a88780a9SRoi Dayan }
4557a88780a9SRoi Dayan 
455871d82d2aSOz Shlomo int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4559226f2ca3SVlad Buslov 			   struct flow_cls_offload *f, unsigned long flags)
4560a88780a9SRoi Dayan {
4561a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
4562d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4563a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
4564a88780a9SRoi Dayan 	int err = 0;
4565a88780a9SRoi Dayan 
4566c5d326b2SVlad Buslov 	rcu_read_lock();
4567c5d326b2SVlad Buslov 	flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4568c5d326b2SVlad Buslov 	rcu_read_unlock();
4569a88780a9SRoi Dayan 	if (flow) {
4570a88780a9SRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
4571a88780a9SRoi Dayan 				   "flow cookie already exists, ignoring");
4572a88780a9SRoi Dayan 		netdev_warn_once(priv->netdev,
4573a88780a9SRoi Dayan 				 "flow cookie %lx already exists, ignoring\n",
4574a88780a9SRoi Dayan 				 f->cookie);
45750e1c1a2fSVlad Buslov 		err = -EEXIST;
4576a88780a9SRoi Dayan 		goto out;
4577a88780a9SRoi Dayan 	}
4578a88780a9SRoi Dayan 
45797a978759SDmytro Linkin 	trace_mlx5e_configure_flower(f);
4580d11afc26SOz Shlomo 	err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4581a88780a9SRoi Dayan 	if (err)
4582a88780a9SRoi Dayan 		goto out;
4583a88780a9SRoi Dayan 
4584c5d326b2SVlad Buslov 	err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4585a88780a9SRoi Dayan 	if (err)
4586a88780a9SRoi Dayan 		goto err_free;
4587a88780a9SRoi Dayan 
4588a88780a9SRoi Dayan 	return 0;
4589a88780a9SRoi Dayan 
4590a88780a9SRoi Dayan err_free:
45915a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4592a88780a9SRoi Dayan out:
4593e3a2b7edSAmir Vadai 	return err;
4594e3a2b7edSAmir Vadai }
4595e3a2b7edSAmir Vadai 
45968f8ae895SOr Gerlitz static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
45978f8ae895SOr Gerlitz {
4598226f2ca3SVlad Buslov 	bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4599226f2ca3SVlad Buslov 	bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
46008f8ae895SOr Gerlitz 
4601226f2ca3SVlad Buslov 	return flow_flag_test(flow, INGRESS) == dir_ingress &&
4602226f2ca3SVlad Buslov 		flow_flag_test(flow, EGRESS) == dir_egress;
46038f8ae895SOr Gerlitz }
46048f8ae895SOr Gerlitz 
460571d82d2aSOz Shlomo int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4606226f2ca3SVlad Buslov 			struct flow_cls_offload *f, unsigned long flags)
4607e3a2b7edSAmir Vadai {
4608d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4609e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
4610c5d326b2SVlad Buslov 	int err;
4611e3a2b7edSAmir Vadai 
4612c5d326b2SVlad Buslov 	rcu_read_lock();
4613ab818362STaehee Yoo 	flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4614c5d326b2SVlad Buslov 	if (!flow || !same_flow_direction(flow, flags)) {
4615c5d326b2SVlad Buslov 		err = -EINVAL;
4616c5d326b2SVlad Buslov 		goto errout;
4617c5d326b2SVlad Buslov 	}
4618e3a2b7edSAmir Vadai 
4619c5d326b2SVlad Buslov 	/* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4620c5d326b2SVlad Buslov 	 * set.
4621c5d326b2SVlad Buslov 	 */
4622c5d326b2SVlad Buslov 	if (flow_flag_test_and_set(flow, DELETED)) {
4623c5d326b2SVlad Buslov 		err = -EINVAL;
4624c5d326b2SVlad Buslov 		goto errout;
4625c5d326b2SVlad Buslov 	}
462605866c82SOr Gerlitz 	rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4627c5d326b2SVlad Buslov 	rcu_read_unlock();
4628e3a2b7edSAmir Vadai 
46297a978759SDmytro Linkin 	trace_mlx5e_delete_flower(f);
46305a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4631e3a2b7edSAmir Vadai 
4632e3a2b7edSAmir Vadai 	return 0;
4633c5d326b2SVlad Buslov 
4634c5d326b2SVlad Buslov errout:
4635c5d326b2SVlad Buslov 	rcu_read_unlock();
4636c5d326b2SVlad Buslov 	return err;
4637e3a2b7edSAmir Vadai }
4638e3a2b7edSAmir Vadai 
463971d82d2aSOz Shlomo int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4640226f2ca3SVlad Buslov 		       struct flow_cls_offload *f, unsigned long flags)
4641aad7e08dSAmir Vadai {
464204de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4643d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
464404de7ddaSRoi Dayan 	struct mlx5_eswitch *peer_esw;
4645aad7e08dSAmir Vadai 	struct mlx5e_tc_flow *flow;
4646aad7e08dSAmir Vadai 	struct mlx5_fc *counter;
4647316d5f72SRoi Dayan 	u64 lastuse = 0;
4648316d5f72SRoi Dayan 	u64 packets = 0;
4649316d5f72SRoi Dayan 	u64 bytes = 0;
46505a7e5bcbSVlad Buslov 	int err = 0;
4651aad7e08dSAmir Vadai 
4652c5d326b2SVlad Buslov 	rcu_read_lock();
4653c5d326b2SVlad Buslov 	flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
46545a7e5bcbSVlad Buslov 						tc_ht_params));
4655c5d326b2SVlad Buslov 	rcu_read_unlock();
46565a7e5bcbSVlad Buslov 	if (IS_ERR(flow))
46575a7e5bcbSVlad Buslov 		return PTR_ERR(flow);
46585a7e5bcbSVlad Buslov 
46595a7e5bcbSVlad Buslov 	if (!same_flow_direction(flow, flags)) {
46605a7e5bcbSVlad Buslov 		err = -EINVAL;
46615a7e5bcbSVlad Buslov 		goto errout;
46625a7e5bcbSVlad Buslov 	}
4663aad7e08dSAmir Vadai 
46644c3844d9SPaul Blakey 	if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4665b8aee822SMark Bloch 		counter = mlx5e_tc_get_counter(flow);
4666aad7e08dSAmir Vadai 		if (!counter)
46675a7e5bcbSVlad Buslov 			goto errout;
4668aad7e08dSAmir Vadai 
4669aad7e08dSAmir Vadai 		mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4670316d5f72SRoi Dayan 	}
4671aad7e08dSAmir Vadai 
4672316d5f72SRoi Dayan 	/* Under multipath it's possible for one rule to be currently
4673316d5f72SRoi Dayan 	 * un-offloaded while the other rule is offloaded.
4674316d5f72SRoi Dayan 	 */
467504de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
467604de7ddaSRoi Dayan 	if (!peer_esw)
467704de7ddaSRoi Dayan 		goto out;
467804de7ddaSRoi Dayan 
4679226f2ca3SVlad Buslov 	if (flow_flag_test(flow, DUP) &&
4680226f2ca3SVlad Buslov 	    flow_flag_test(flow->peer_flow, OFFLOADED)) {
468104de7ddaSRoi Dayan 		u64 bytes2;
468204de7ddaSRoi Dayan 		u64 packets2;
468304de7ddaSRoi Dayan 		u64 lastuse2;
468404de7ddaSRoi Dayan 
468504de7ddaSRoi Dayan 		counter = mlx5e_tc_get_counter(flow->peer_flow);
4686316d5f72SRoi Dayan 		if (!counter)
4687316d5f72SRoi Dayan 			goto no_peer_counter;
468804de7ddaSRoi Dayan 		mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
468904de7ddaSRoi Dayan 
469004de7ddaSRoi Dayan 		bytes += bytes2;
469104de7ddaSRoi Dayan 		packets += packets2;
469204de7ddaSRoi Dayan 		lastuse = max_t(u64, lastuse, lastuse2);
469304de7ddaSRoi Dayan 	}
469404de7ddaSRoi Dayan 
4695316d5f72SRoi Dayan no_peer_counter:
469604de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
469704de7ddaSRoi Dayan out:
469893a129ebSJiri Pirko 	flow_stats_update(&f->stats, bytes, packets, lastuse,
469993a129ebSJiri Pirko 			  FLOW_ACTION_HW_STATS_DELAYED);
47007a978759SDmytro Linkin 	trace_mlx5e_stats_flower(f);
47015a7e5bcbSVlad Buslov errout:
47025a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
47035a7e5bcbSVlad Buslov 	return err;
4704aad7e08dSAmir Vadai }
4705aad7e08dSAmir Vadai 
4706fcb64c0fSEli Cohen static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
4707fcb64c0fSEli Cohen 			       struct netlink_ext_ack *extack)
4708fcb64c0fSEli Cohen {
4709fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4710fcb64c0fSEli Cohen 	struct mlx5_eswitch *esw;
4711fcb64c0fSEli Cohen 	u16 vport_num;
4712fcb64c0fSEli Cohen 	u32 rate_mbps;
4713fcb64c0fSEli Cohen 	int err;
4714fcb64c0fSEli Cohen 
4715e401a184SEli Cohen 	vport_num = rpriv->rep->vport;
4716e401a184SEli Cohen 	if (vport_num >= MLX5_VPORT_ECPF) {
4717e401a184SEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
4718e401a184SEli Cohen 				   "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4719e401a184SEli Cohen 		return -EOPNOTSUPP;
4720e401a184SEli Cohen 	}
4721e401a184SEli Cohen 
4722fcb64c0fSEli Cohen 	esw = priv->mdev->priv.eswitch;
4723fcb64c0fSEli Cohen 	/* rate is given in bytes/sec.
4724fcb64c0fSEli Cohen 	 * First convert to bits/sec and then round to the nearest mbit/secs.
4725fcb64c0fSEli Cohen 	 * mbit means million bits.
4726fcb64c0fSEli Cohen 	 * Moreover, if rate is non zero we choose to configure to a minimum of
4727fcb64c0fSEli Cohen 	 * 1 mbit/sec.
4728fcb64c0fSEli Cohen 	 */
4729fcb64c0fSEli Cohen 	rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
4730fcb64c0fSEli Cohen 	err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
4731fcb64c0fSEli Cohen 	if (err)
4732fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4733fcb64c0fSEli Cohen 
4734fcb64c0fSEli Cohen 	return err;
4735fcb64c0fSEli Cohen }
4736fcb64c0fSEli Cohen 
4737fcb64c0fSEli Cohen static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4738fcb64c0fSEli Cohen 					struct flow_action *flow_action,
4739fcb64c0fSEli Cohen 					struct netlink_ext_ack *extack)
4740fcb64c0fSEli Cohen {
4741fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4742fcb64c0fSEli Cohen 	const struct flow_action_entry *act;
4743fcb64c0fSEli Cohen 	int err;
4744fcb64c0fSEli Cohen 	int i;
4745fcb64c0fSEli Cohen 
4746fcb64c0fSEli Cohen 	if (!flow_action_has_entries(flow_action)) {
4747fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4748fcb64c0fSEli Cohen 		return -EINVAL;
4749fcb64c0fSEli Cohen 	}
4750fcb64c0fSEli Cohen 
4751fcb64c0fSEli Cohen 	if (!flow_offload_has_one_action(flow_action)) {
4752fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4753fcb64c0fSEli Cohen 		return -EOPNOTSUPP;
4754fcb64c0fSEli Cohen 	}
4755fcb64c0fSEli Cohen 
475653eca1f3SJakub Kicinski 	if (!flow_action_basic_hw_stats_check(flow_action, extack))
4757319a1d19SJiri Pirko 		return -EOPNOTSUPP;
4758319a1d19SJiri Pirko 
4759fcb64c0fSEli Cohen 	flow_action_for_each(i, act, flow_action) {
4760fcb64c0fSEli Cohen 		switch (act->id) {
4761fcb64c0fSEli Cohen 		case FLOW_ACTION_POLICE:
4762fcb64c0fSEli Cohen 			err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4763fcb64c0fSEli Cohen 			if (err)
4764fcb64c0fSEli Cohen 				return err;
4765fcb64c0fSEli Cohen 
4766fcb64c0fSEli Cohen 			rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4767fcb64c0fSEli Cohen 			break;
4768fcb64c0fSEli Cohen 		default:
4769fcb64c0fSEli Cohen 			NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4770fcb64c0fSEli Cohen 			return -EOPNOTSUPP;
4771fcb64c0fSEli Cohen 		}
4772fcb64c0fSEli Cohen 	}
4773fcb64c0fSEli Cohen 
4774fcb64c0fSEli Cohen 	return 0;
4775fcb64c0fSEli Cohen }
4776fcb64c0fSEli Cohen 
4777fcb64c0fSEli Cohen int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4778fcb64c0fSEli Cohen 				struct tc_cls_matchall_offload *ma)
4779fcb64c0fSEli Cohen {
4780b5f814ccSEli Cohen 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4781fcb64c0fSEli Cohen 	struct netlink_ext_ack *extack = ma->common.extack;
4782fcb64c0fSEli Cohen 
4783b5f814ccSEli Cohen 	if (!mlx5_esw_qos_enabled(esw)) {
4784b5f814ccSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
4785b5f814ccSEli Cohen 		return -EOPNOTSUPP;
4786b5f814ccSEli Cohen 	}
4787b5f814ccSEli Cohen 
47887b83355fSEli Cohen 	if (ma->common.prio != 1) {
4789fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4790fcb64c0fSEli Cohen 		return -EINVAL;
4791fcb64c0fSEli Cohen 	}
4792fcb64c0fSEli Cohen 
4793fcb64c0fSEli Cohen 	return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4794fcb64c0fSEli Cohen }
4795fcb64c0fSEli Cohen 
4796fcb64c0fSEli Cohen int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4797fcb64c0fSEli Cohen 			     struct tc_cls_matchall_offload *ma)
4798fcb64c0fSEli Cohen {
4799fcb64c0fSEli Cohen 	struct netlink_ext_ack *extack = ma->common.extack;
4800fcb64c0fSEli Cohen 
4801fcb64c0fSEli Cohen 	return apply_police_params(priv, 0, extack);
4802fcb64c0fSEli Cohen }
4803fcb64c0fSEli Cohen 
4804fcb64c0fSEli Cohen void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4805fcb64c0fSEli Cohen 			     struct tc_cls_matchall_offload *ma)
4806fcb64c0fSEli Cohen {
4807fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4808fcb64c0fSEli Cohen 	struct rtnl_link_stats64 cur_stats;
4809fcb64c0fSEli Cohen 	u64 dbytes;
4810fcb64c0fSEli Cohen 	u64 dpkts;
4811fcb64c0fSEli Cohen 
4812fcb64c0fSEli Cohen 	cur_stats = priv->stats.vf_vport;
4813fcb64c0fSEli Cohen 	dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4814fcb64c0fSEli Cohen 	dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4815fcb64c0fSEli Cohen 	rpriv->prev_vf_vport_stats = cur_stats;
481693a129ebSJiri Pirko 	flow_stats_update(&ma->stats, dpkts, dbytes, jiffies,
481793a129ebSJiri Pirko 			  FLOW_ACTION_HW_STATS_DELAYED);
4818fcb64c0fSEli Cohen }
4819fcb64c0fSEli Cohen 
48204d8fcf21SAlaa Hleihel static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
48214d8fcf21SAlaa Hleihel 					      struct mlx5e_priv *peer_priv)
48224d8fcf21SAlaa Hleihel {
48234d8fcf21SAlaa Hleihel 	struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4824db76ca24SVlad Buslov 	struct mlx5e_hairpin_entry *hpe, *tmp;
4825db76ca24SVlad Buslov 	LIST_HEAD(init_wait_list);
48264d8fcf21SAlaa Hleihel 	u16 peer_vhca_id;
48274d8fcf21SAlaa Hleihel 	int bkt;
48284d8fcf21SAlaa Hleihel 
48294d8fcf21SAlaa Hleihel 	if (!same_hw_devs(priv, peer_priv))
48304d8fcf21SAlaa Hleihel 		return;
48314d8fcf21SAlaa Hleihel 
48324d8fcf21SAlaa Hleihel 	peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
48334d8fcf21SAlaa Hleihel 
4834b32accdaSVlad Buslov 	mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4835db76ca24SVlad Buslov 	hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4836db76ca24SVlad Buslov 		if (refcount_inc_not_zero(&hpe->refcnt))
4837db76ca24SVlad Buslov 			list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4838b32accdaSVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4839db76ca24SVlad Buslov 
4840db76ca24SVlad Buslov 	list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4841db76ca24SVlad Buslov 		wait_for_completion(&hpe->res_ready);
4842db76ca24SVlad Buslov 		if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4843db76ca24SVlad Buslov 			hpe->hp->pair->peer_gone = true;
4844db76ca24SVlad Buslov 
4845db76ca24SVlad Buslov 		mlx5e_hairpin_put(priv, hpe);
4846db76ca24SVlad Buslov 	}
48474d8fcf21SAlaa Hleihel }
48484d8fcf21SAlaa Hleihel 
48494d8fcf21SAlaa Hleihel static int mlx5e_tc_netdev_event(struct notifier_block *this,
48504d8fcf21SAlaa Hleihel 				 unsigned long event, void *ptr)
48514d8fcf21SAlaa Hleihel {
48524d8fcf21SAlaa Hleihel 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
48534d8fcf21SAlaa Hleihel 	struct mlx5e_flow_steering *fs;
48544d8fcf21SAlaa Hleihel 	struct mlx5e_priv *peer_priv;
48554d8fcf21SAlaa Hleihel 	struct mlx5e_tc_table *tc;
48564d8fcf21SAlaa Hleihel 	struct mlx5e_priv *priv;
48574d8fcf21SAlaa Hleihel 
48584d8fcf21SAlaa Hleihel 	if (ndev->netdev_ops != &mlx5e_netdev_ops ||
48594d8fcf21SAlaa Hleihel 	    event != NETDEV_UNREGISTER ||
48604d8fcf21SAlaa Hleihel 	    ndev->reg_state == NETREG_REGISTERED)
48614d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
48624d8fcf21SAlaa Hleihel 
48634d8fcf21SAlaa Hleihel 	tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
48644d8fcf21SAlaa Hleihel 	fs = container_of(tc, struct mlx5e_flow_steering, tc);
48654d8fcf21SAlaa Hleihel 	priv = container_of(fs, struct mlx5e_priv, fs);
48664d8fcf21SAlaa Hleihel 	peer_priv = netdev_priv(ndev);
48674d8fcf21SAlaa Hleihel 	if (priv == peer_priv ||
48684d8fcf21SAlaa Hleihel 	    !(priv->netdev->features & NETIF_F_HW_TC))
48694d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
48704d8fcf21SAlaa Hleihel 
48714d8fcf21SAlaa Hleihel 	mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
48724d8fcf21SAlaa Hleihel 
48734d8fcf21SAlaa Hleihel 	return NOTIFY_DONE;
48744d8fcf21SAlaa Hleihel }
48754d8fcf21SAlaa Hleihel 
4876655dc3d2SOr Gerlitz int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4877e8f887acSAmir Vadai {
4878acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
48794d8fcf21SAlaa Hleihel 	int err;
4880e8f887acSAmir Vadai 
4881b6fac0b4SVlad Buslov 	mutex_init(&tc->t_lock);
4882d2faae25SVlad Buslov 	mutex_init(&tc->mod_hdr.lock);
4883dd58edc3SVlad Buslov 	hash_init(tc->mod_hdr.hlist);
4884b32accdaSVlad Buslov 	mutex_init(&tc->hairpin_tbl_lock);
48855c65c564SOr Gerlitz 	hash_init(tc->hairpin_tbl);
488611c9c548SOr Gerlitz 
48874d8fcf21SAlaa Hleihel 	err = rhashtable_init(&tc->ht, &tc_ht_params);
48884d8fcf21SAlaa Hleihel 	if (err)
48894d8fcf21SAlaa Hleihel 		return err;
48904d8fcf21SAlaa Hleihel 
48914d8fcf21SAlaa Hleihel 	tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4892d48834f9SJiri Pirko 	err = register_netdevice_notifier_dev_net(priv->netdev,
4893d48834f9SJiri Pirko 						  &tc->netdevice_nb,
4894d48834f9SJiri Pirko 						  &tc->netdevice_nn);
4895d48834f9SJiri Pirko 	if (err) {
48964d8fcf21SAlaa Hleihel 		tc->netdevice_nb.notifier_call = NULL;
48974d8fcf21SAlaa Hleihel 		mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
48984d8fcf21SAlaa Hleihel 	}
48994d8fcf21SAlaa Hleihel 
49004d8fcf21SAlaa Hleihel 	return err;
4901e8f887acSAmir Vadai }
4902e8f887acSAmir Vadai 
4903e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4904e8f887acSAmir Vadai {
4905e8f887acSAmir Vadai 	struct mlx5e_tc_flow *flow = ptr;
4906655dc3d2SOr Gerlitz 	struct mlx5e_priv *priv = flow->priv;
4907e8f887acSAmir Vadai 
4908961e8979SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
4909e8f887acSAmir Vadai 	kfree(flow);
4910e8f887acSAmir Vadai }
4911e8f887acSAmir Vadai 
4912655dc3d2SOr Gerlitz void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4913e8f887acSAmir Vadai {
4914acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
4915e8f887acSAmir Vadai 
49164d8fcf21SAlaa Hleihel 	if (tc->netdevice_nb.notifier_call)
4917d48834f9SJiri Pirko 		unregister_netdevice_notifier_dev_net(priv->netdev,
4918d48834f9SJiri Pirko 						      &tc->netdevice_nb,
4919d48834f9SJiri Pirko 						      &tc->netdevice_nn);
49204d8fcf21SAlaa Hleihel 
4921d2faae25SVlad Buslov 	mutex_destroy(&tc->mod_hdr.lock);
4922b32accdaSVlad Buslov 	mutex_destroy(&tc->hairpin_tbl_lock);
4923b32accdaSVlad Buslov 
4924d9ee0491SOr Gerlitz 	rhashtable_destroy(&tc->ht);
4925e8f887acSAmir Vadai 
4926acff797cSMaor Gottlieb 	if (!IS_ERR_OR_NULL(tc->t)) {
4927acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(tc->t);
4928acff797cSMaor Gottlieb 		tc->t = NULL;
4929e8f887acSAmir Vadai 	}
4930b6fac0b4SVlad Buslov 	mutex_destroy(&tc->t_lock);
4931e8f887acSAmir Vadai }
4932655dc3d2SOr Gerlitz 
4933655dc3d2SOr Gerlitz int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4934655dc3d2SOr Gerlitz {
4935d7a42ad0SRoi Dayan 	const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
49360a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
49370a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *priv;
49380a7fcb78SPaul Blakey 	struct mapping_ctx *mapping;
49390a7fcb78SPaul Blakey 	int err;
49400a7fcb78SPaul Blakey 
49410a7fcb78SPaul Blakey 	uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
49420a7fcb78SPaul Blakey 	priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
49430a7fcb78SPaul Blakey 
49444c3844d9SPaul Blakey 	err = mlx5_tc_ct_init(uplink_priv);
49454c3844d9SPaul Blakey 	if (err)
49464c3844d9SPaul Blakey 		goto err_ct;
49474c3844d9SPaul Blakey 
49480a7fcb78SPaul Blakey 	mapping = mapping_create(sizeof(struct tunnel_match_key),
49490a7fcb78SPaul Blakey 				 TUNNEL_INFO_BITS_MASK, true);
49500a7fcb78SPaul Blakey 	if (IS_ERR(mapping)) {
49510a7fcb78SPaul Blakey 		err = PTR_ERR(mapping);
49520a7fcb78SPaul Blakey 		goto err_tun_mapping;
49530a7fcb78SPaul Blakey 	}
49540a7fcb78SPaul Blakey 	uplink_priv->tunnel_mapping = mapping;
49550a7fcb78SPaul Blakey 
49560a7fcb78SPaul Blakey 	mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
49570a7fcb78SPaul Blakey 	if (IS_ERR(mapping)) {
49580a7fcb78SPaul Blakey 		err = PTR_ERR(mapping);
49590a7fcb78SPaul Blakey 		goto err_enc_opts_mapping;
49600a7fcb78SPaul Blakey 	}
49610a7fcb78SPaul Blakey 	uplink_priv->tunnel_enc_opts_mapping = mapping;
49620a7fcb78SPaul Blakey 
49630a7fcb78SPaul Blakey 	err = rhashtable_init(tc_ht, &tc_ht_params);
49640a7fcb78SPaul Blakey 	if (err)
49650a7fcb78SPaul Blakey 		goto err_ht_init;
49660a7fcb78SPaul Blakey 
49670a7fcb78SPaul Blakey 	return err;
49680a7fcb78SPaul Blakey 
49690a7fcb78SPaul Blakey err_ht_init:
49700a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
49710a7fcb78SPaul Blakey err_enc_opts_mapping:
49720a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_mapping);
49730a7fcb78SPaul Blakey err_tun_mapping:
49744c3844d9SPaul Blakey 	mlx5_tc_ct_clean(uplink_priv);
49754c3844d9SPaul Blakey err_ct:
49760a7fcb78SPaul Blakey 	netdev_warn(priv->netdev,
49770a7fcb78SPaul Blakey 		    "Failed to initialize tc (eswitch), err: %d", err);
49780a7fcb78SPaul Blakey 	return err;
4979655dc3d2SOr Gerlitz }
4980655dc3d2SOr Gerlitz 
4981655dc3d2SOr Gerlitz void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
4982655dc3d2SOr Gerlitz {
49830a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
49840a7fcb78SPaul Blakey 
4985655dc3d2SOr Gerlitz 	rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
49860a7fcb78SPaul Blakey 
49870a7fcb78SPaul Blakey 	uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
49880a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
49890a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_mapping);
49904c3844d9SPaul Blakey 
49914c3844d9SPaul Blakey 	mlx5_tc_ct_clean(uplink_priv);
4992655dc3d2SOr Gerlitz }
499301252a27SOr Gerlitz 
4994226f2ca3SVlad Buslov int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
499501252a27SOr Gerlitz {
4996d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
499701252a27SOr Gerlitz 
499801252a27SOr Gerlitz 	return atomic_read(&tc_ht->nelems);
499901252a27SOr Gerlitz }
500004de7ddaSRoi Dayan 
500104de7ddaSRoi Dayan void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
500204de7ddaSRoi Dayan {
500304de7ddaSRoi Dayan 	struct mlx5e_tc_flow *flow, *tmp;
500404de7ddaSRoi Dayan 
500504de7ddaSRoi Dayan 	list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
500604de7ddaSRoi Dayan 		__mlx5e_tc_del_fdb_peer_flow(flow);
500704de7ddaSRoi Dayan }
5008b4a23329SRoi Dayan 
5009b4a23329SRoi Dayan void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5010b4a23329SRoi Dayan {
5011b4a23329SRoi Dayan 	struct mlx5_rep_uplink_priv *rpriv =
5012b4a23329SRoi Dayan 		container_of(work, struct mlx5_rep_uplink_priv,
5013b4a23329SRoi Dayan 			     reoffload_flows_work);
5014b4a23329SRoi Dayan 	struct mlx5e_tc_flow *flow, *tmp;
5015b4a23329SRoi Dayan 
5016ad86755bSVlad Buslov 	mutex_lock(&rpriv->unready_flows_lock);
5017b4a23329SRoi Dayan 	list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5018b4a23329SRoi Dayan 		if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5019ad86755bSVlad Buslov 			unready_flow_del(flow);
5020b4a23329SRoi Dayan 	}
5021ad86755bSVlad Buslov 	mutex_unlock(&rpriv->unready_flows_lock);
5022b4a23329SRoi Dayan }
5023e2394a61SVlad Buslov 
5024e2394a61SVlad Buslov static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5025e2394a61SVlad Buslov 				     struct flow_cls_offload *cls_flower,
5026e2394a61SVlad Buslov 				     unsigned long flags)
5027e2394a61SVlad Buslov {
5028e2394a61SVlad Buslov 	switch (cls_flower->command) {
5029e2394a61SVlad Buslov 	case FLOW_CLS_REPLACE:
5030e2394a61SVlad Buslov 		return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5031e2394a61SVlad Buslov 					      flags);
5032e2394a61SVlad Buslov 	case FLOW_CLS_DESTROY:
5033e2394a61SVlad Buslov 		return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5034e2394a61SVlad Buslov 					   flags);
5035e2394a61SVlad Buslov 	case FLOW_CLS_STATS:
5036e2394a61SVlad Buslov 		return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5037e2394a61SVlad Buslov 					  flags);
5038e2394a61SVlad Buslov 	default:
5039e2394a61SVlad Buslov 		return -EOPNOTSUPP;
5040e2394a61SVlad Buslov 	}
5041e2394a61SVlad Buslov }
5042e2394a61SVlad Buslov 
5043e2394a61SVlad Buslov int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5044e2394a61SVlad Buslov 			    void *cb_priv)
5045e2394a61SVlad Buslov {
5046e2394a61SVlad Buslov 	unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
5047e2394a61SVlad Buslov 	struct mlx5e_priv *priv = cb_priv;
5048e2394a61SVlad Buslov 
5049e2394a61SVlad Buslov 	switch (type) {
5050e2394a61SVlad Buslov 	case TC_SETUP_CLSFLOWER:
5051e2394a61SVlad Buslov 		return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5052e2394a61SVlad Buslov 	default:
5053e2394a61SVlad Buslov 		return -EOPNOTSUPP;
5054e2394a61SVlad Buslov 	}
5055e2394a61SVlad Buslov }
5056