1e8f887acSAmir Vadai /*
2e8f887acSAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3e8f887acSAmir Vadai  *
4e8f887acSAmir Vadai  * This software is available to you under a choice of one of two
5e8f887acSAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
6e8f887acSAmir Vadai  * General Public License (GPL) Version 2, available from the file
7e8f887acSAmir Vadai  * COPYING in the main directory of this source tree, or the
8e8f887acSAmir Vadai  * OpenIB.org BSD license below:
9e8f887acSAmir Vadai  *
10e8f887acSAmir Vadai  *     Redistribution and use in source and binary forms, with or
11e8f887acSAmir Vadai  *     without modification, are permitted provided that the following
12e8f887acSAmir Vadai  *     conditions are met:
13e8f887acSAmir Vadai  *
14e8f887acSAmir Vadai  *      - Redistributions of source code must retain the above
15e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
16e8f887acSAmir Vadai  *        disclaimer.
17e8f887acSAmir Vadai  *
18e8f887acSAmir Vadai  *      - Redistributions in binary form must reproduce the above
19e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
20e8f887acSAmir Vadai  *        disclaimer in the documentation and/or other materials
21e8f887acSAmir Vadai  *        provided with the distribution.
22e8f887acSAmir Vadai  *
23e8f887acSAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e8f887acSAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e8f887acSAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e8f887acSAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e8f887acSAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e8f887acSAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e8f887acSAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e8f887acSAmir Vadai  * SOFTWARE.
31e8f887acSAmir Vadai  */
32e8f887acSAmir Vadai 
33e3a2b7edSAmir Vadai #include <net/flow_dissector.h>
343f7d0eb4SOr Gerlitz #include <net/sch_generic.h>
35e3a2b7edSAmir Vadai #include <net/pkt_cls.h>
36e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h>
3712185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h>
38e8f887acSAmir Vadai #include <linux/mlx5/fs.h>
39e8f887acSAmir Vadai #include <linux/mlx5/device.h>
40e8f887acSAmir Vadai #include <linux/rhashtable.h>
415a7e5bcbSVlad Buslov #include <linux/refcount.h>
42db76ca24SVlad Buslov #include <linux/completion.h>
4303a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h>
44776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h>
45bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h>
46d79b6df6SOr Gerlitz #include <net/tc_act/tc_pedit.h>
4726c02749SOr Gerlitz #include <net/tc_act/tc_csum.h>
48f6dfb4c3SHadar Hen Zion #include <net/arp.h>
493616d08bSDavid Ahern #include <net/ipv6_stubs.h>
50e8f887acSAmir Vadai #include "en.h"
511d447a39SSaeed Mahameed #include "en_rep.h"
52232c0013SHadar Hen Zion #include "en_tc.h"
5303a9d11eSOr Gerlitz #include "eswitch.h"
5449964352SSaeed Mahameed #include "esw/chains.h"
553f6d08d1SOr Gerlitz #include "fs_core.h"
562c81bfd5SHuy Nguyen #include "en/port.h"
57101f4de9SOz Shlomo #include "en/tc_tun.h"
580a7fcb78SPaul Blakey #include "en/mapping.h"
594c3844d9SPaul Blakey #include "en/tc_ct.h"
6004de7ddaSRoi Dayan #include "lib/devcom.h"
619272e3dfSYevgeny Kliteynik #include "lib/geneve.h"
627a978759SDmytro Linkin #include "diag/en_tc_tracepoint.h"
63e8f887acSAmir Vadai 
640a7fcb78SPaul Blakey #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
650a7fcb78SPaul Blakey 
663bc4b7bfSOr Gerlitz struct mlx5_nic_flow_attr {
673bc4b7bfSOr Gerlitz 	u32 action;
683bc4b7bfSOr Gerlitz 	u32 flow_tag;
692b688ea5SMaor Gottlieb 	struct mlx5_modify_hdr *modify_hdr;
705c65c564SOr Gerlitz 	u32 hairpin_tirn;
7138aa51c1SOr Gerlitz 	u8 match_level;
723f6d08d1SOr Gerlitz 	struct mlx5_flow_table	*hairpin_ft;
73b8aee822SMark Bloch 	struct mlx5_fc		*counter;
743bc4b7bfSOr Gerlitz };
753bc4b7bfSOr Gerlitz 
76226f2ca3SVlad Buslov #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
7760bd4af8SOr Gerlitz 
7865ba8fb7SOr Gerlitz enum {
79226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_INGRESS	= MLX5E_TC_FLAG_INGRESS_BIT,
80226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_EGRESS	= MLX5E_TC_FLAG_EGRESS_BIT,
81226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_ESWITCH	= MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
8284179981SPaul Blakey 	MLX5E_TC_FLOW_FLAG_FT		= MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
83226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_NIC		= MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
84226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_OFFLOADED	= MLX5E_TC_FLOW_BASE,
85226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_HAIRPIN	= MLX5E_TC_FLOW_BASE + 1,
86226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS	= MLX5E_TC_FLOW_BASE + 2,
87226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_SLOW		= MLX5E_TC_FLOW_BASE + 3,
88226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_DUP		= MLX5E_TC_FLOW_BASE + 4,
89226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_NOT_READY	= MLX5E_TC_FLOW_BASE + 5,
90c5d326b2SVlad Buslov 	MLX5E_TC_FLOW_FLAG_DELETED	= MLX5E_TC_FLOW_BASE + 6,
914c3844d9SPaul Blakey 	MLX5E_TC_FLOW_FLAG_CT		= MLX5E_TC_FLOW_BASE + 7,
9265ba8fb7SOr Gerlitz };
9365ba8fb7SOr Gerlitz 
94e4ad91f2SChris Mi #define MLX5E_TC_MAX_SPLITS 1
95e4ad91f2SChris Mi 
9679baaec7SEli Britstein /* Helper struct for accessing a struct containing list_head array.
9779baaec7SEli Britstein  * Containing struct
9879baaec7SEli Britstein  *   |- Helper array
9979baaec7SEli Britstein  *      [0] Helper item 0
10079baaec7SEli Britstein  *          |- list_head item 0
10179baaec7SEli Britstein  *          |- index (0)
10279baaec7SEli Britstein  *      [1] Helper item 1
10379baaec7SEli Britstein  *          |- list_head item 1
10479baaec7SEli Britstein  *          |- index (1)
10579baaec7SEli Britstein  * To access the containing struct from one of the list_head items:
10679baaec7SEli Britstein  * 1. Get the helper item from the list_head item using
10779baaec7SEli Britstein  *    helper item =
10879baaec7SEli Britstein  *        container_of(list_head item, helper struct type, list_head field)
10979baaec7SEli Britstein  * 2. Get the contining struct from the helper item and its index in the array:
11079baaec7SEli Britstein  *    containing struct =
11179baaec7SEli Britstein  *        container_of(helper item, containing struct type, helper field[index])
11279baaec7SEli Britstein  */
11379baaec7SEli Britstein struct encap_flow_item {
114948993f2SVlad Buslov 	struct mlx5e_encap_entry *e; /* attached encap instance */
11579baaec7SEli Britstein 	struct list_head list;
11679baaec7SEli Britstein 	int index;
11779baaec7SEli Britstein };
11879baaec7SEli Britstein 
119e8f887acSAmir Vadai struct mlx5e_tc_flow {
120e8f887acSAmir Vadai 	struct rhash_head	node;
121655dc3d2SOr Gerlitz 	struct mlx5e_priv	*priv;
122e8f887acSAmir Vadai 	u64			cookie;
123226f2ca3SVlad Buslov 	unsigned long		flags;
124e4ad91f2SChris Mi 	struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
12579baaec7SEli Britstein 	/* Flow can be associated with multiple encap IDs.
12679baaec7SEli Britstein 	 * The number of encaps is bounded by the number of supported
12779baaec7SEli Britstein 	 * destinations.
12879baaec7SEli Britstein 	 */
12979baaec7SEli Britstein 	struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
13004de7ddaSRoi Dayan 	struct mlx5e_tc_flow    *peer_flow;
131dd58edc3SVlad Buslov 	struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
13211c9c548SOr Gerlitz 	struct list_head	mod_hdr; /* flows sharing the same mod hdr ID */
133e4f9abbdSVlad Buslov 	struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
1345c65c564SOr Gerlitz 	struct list_head	hairpin; /* flows sharing the same hairpin */
13504de7ddaSRoi Dayan 	struct list_head	peer;    /* flows with peer flow */
136b4a23329SRoi Dayan 	struct list_head	unready; /* flows not ready to be offloaded (e.g due to missing route) */
1372a1f1768SVlad Buslov 	int			tmp_efi_index;
1386a06c2f7SVlad Buslov 	struct list_head	tmp_list; /* temporary flow list used by neigh update */
1395a7e5bcbSVlad Buslov 	refcount_t		refcnt;
140c5d326b2SVlad Buslov 	struct rcu_head		rcu_head;
14195435ad7SVlad Buslov 	struct completion	init_done;
1420a7fcb78SPaul Blakey 	int tunnel_id; /* the mapped tunnel id of this flow */
1430a7fcb78SPaul Blakey 
1443bc4b7bfSOr Gerlitz 	union {
145ecf5bb79SOr Gerlitz 		struct mlx5_esw_flow_attr esw_attr[0];
1463bc4b7bfSOr Gerlitz 		struct mlx5_nic_flow_attr nic_attr[0];
1473bc4b7bfSOr Gerlitz 	};
148e8f887acSAmir Vadai };
149e8f887acSAmir Vadai 
15017091853SOr Gerlitz struct mlx5e_tc_flow_parse_attr {
1511f6da306SYevgeny Kliteynik 	const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
152d11afc26SOz Shlomo 	struct net_device *filter_dev;
15317091853SOr Gerlitz 	struct mlx5_flow_spec spec;
1546ae4a6a5SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
15598b66cb1SEli Britstein 	int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
15617091853SOr Gerlitz };
15717091853SOr Gerlitz 
158acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4
159b3a433deSOr Gerlitz #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
160e8f887acSAmir Vadai 
1610a7fcb78SPaul Blakey struct tunnel_match_key {
1620a7fcb78SPaul Blakey 	struct flow_dissector_key_control enc_control;
1630a7fcb78SPaul Blakey 	struct flow_dissector_key_keyid enc_key_id;
1640a7fcb78SPaul Blakey 	struct flow_dissector_key_ports enc_tp;
1650a7fcb78SPaul Blakey 	struct flow_dissector_key_ip enc_ip;
1660a7fcb78SPaul Blakey 	union {
1670a7fcb78SPaul Blakey 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
1680a7fcb78SPaul Blakey 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
1690a7fcb78SPaul Blakey 	};
1700a7fcb78SPaul Blakey 
1710a7fcb78SPaul Blakey 	int filter_ifindex;
1720a7fcb78SPaul Blakey };
1730a7fcb78SPaul Blakey 
1740a7fcb78SPaul Blakey /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
1750a7fcb78SPaul Blakey  * Upper TUNNEL_INFO_BITS for general tunnel info.
1760a7fcb78SPaul Blakey  * Lower ENC_OPTS_BITS bits for enc_opts.
1770a7fcb78SPaul Blakey  */
1780a7fcb78SPaul Blakey #define TUNNEL_INFO_BITS 6
1790a7fcb78SPaul Blakey #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
1800a7fcb78SPaul Blakey #define ENC_OPTS_BITS 2
1810a7fcb78SPaul Blakey #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
1820a7fcb78SPaul Blakey #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
1830a7fcb78SPaul Blakey #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
1840a7fcb78SPaul Blakey 
1858f1e0b97SPaul Blakey struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
1868f1e0b97SPaul Blakey 	[CHAIN_TO_REG] = {
1878f1e0b97SPaul Blakey 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
1888f1e0b97SPaul Blakey 		.moffset = 0,
1898f1e0b97SPaul Blakey 		.mlen = 2,
1908f1e0b97SPaul Blakey 	},
1910a7fcb78SPaul Blakey 	[TUNNEL_TO_REG] = {
1920a7fcb78SPaul Blakey 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
1930a7fcb78SPaul Blakey 		.moffset = 3,
1940a7fcb78SPaul Blakey 		.mlen = 1,
1950a7fcb78SPaul Blakey 		.soffset = MLX5_BYTE_OFF(fte_match_param,
1960a7fcb78SPaul Blakey 					 misc_parameters_2.metadata_reg_c_1),
1970a7fcb78SPaul Blakey 	},
1984c3844d9SPaul Blakey 	[ZONE_TO_REG] = zone_to_reg_ct,
1994c3844d9SPaul Blakey 	[CTSTATE_TO_REG] = ctstate_to_reg_ct,
2004c3844d9SPaul Blakey 	[MARK_TO_REG] = mark_to_reg_ct,
2014c3844d9SPaul Blakey 	[LABELS_TO_REG] = labels_to_reg_ct,
2024c3844d9SPaul Blakey 	[FTEID_TO_REG] = fteid_to_reg_ct,
2035c6b9460SPaul Blakey 	[TUPLEID_TO_REG] = tupleid_to_reg_ct,
2048f1e0b97SPaul Blakey };
2058f1e0b97SPaul Blakey 
2060a7fcb78SPaul Blakey static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
2070a7fcb78SPaul Blakey 
2080a7fcb78SPaul Blakey void
2090a7fcb78SPaul Blakey mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
2100a7fcb78SPaul Blakey 			    enum mlx5e_tc_attr_to_reg type,
2110a7fcb78SPaul Blakey 			    u32 data,
2120a7fcb78SPaul Blakey 			    u32 mask)
2130a7fcb78SPaul Blakey {
2140a7fcb78SPaul Blakey 	int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
2150a7fcb78SPaul Blakey 	int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
2160a7fcb78SPaul Blakey 	void *headers_c = spec->match_criteria;
2170a7fcb78SPaul Blakey 	void *headers_v = spec->match_value;
2180a7fcb78SPaul Blakey 	void *fmask, *fval;
2190a7fcb78SPaul Blakey 
2200a7fcb78SPaul Blakey 	fmask = headers_c + soffset;
2210a7fcb78SPaul Blakey 	fval = headers_v + soffset;
2220a7fcb78SPaul Blakey 
2230a7fcb78SPaul Blakey 	mask = cpu_to_be32(mask) >> (32 - (match_len * 8));
2240a7fcb78SPaul Blakey 	data = cpu_to_be32(data) >> (32 - (match_len * 8));
2250a7fcb78SPaul Blakey 
2260a7fcb78SPaul Blakey 	memcpy(fmask, &mask, match_len);
2270a7fcb78SPaul Blakey 	memcpy(fval, &data, match_len);
2280a7fcb78SPaul Blakey 
2290a7fcb78SPaul Blakey 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
2300a7fcb78SPaul Blakey }
2310a7fcb78SPaul Blakey 
2320a7fcb78SPaul Blakey int
2330a7fcb78SPaul Blakey mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
2340a7fcb78SPaul Blakey 			  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
2350a7fcb78SPaul Blakey 			  enum mlx5e_tc_attr_to_reg type,
2360a7fcb78SPaul Blakey 			  u32 data)
2370a7fcb78SPaul Blakey {
2380a7fcb78SPaul Blakey 	int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
2390a7fcb78SPaul Blakey 	int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
2400a7fcb78SPaul Blakey 	int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
2410a7fcb78SPaul Blakey 	char *modact;
2420a7fcb78SPaul Blakey 	int err;
2430a7fcb78SPaul Blakey 
2440a7fcb78SPaul Blakey 	err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
2450a7fcb78SPaul Blakey 				    mod_hdr_acts);
2460a7fcb78SPaul Blakey 	if (err)
2470a7fcb78SPaul Blakey 		return err;
2480a7fcb78SPaul Blakey 
2490a7fcb78SPaul Blakey 	modact = mod_hdr_acts->actions +
2500a7fcb78SPaul Blakey 		 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
2510a7fcb78SPaul Blakey 
2520a7fcb78SPaul Blakey 	/* Firmware has 5bit length field and 0 means 32bits */
2530a7fcb78SPaul Blakey 	if (mlen == 4)
2540a7fcb78SPaul Blakey 		mlen = 0;
2550a7fcb78SPaul Blakey 
2560a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
2570a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, field, mfield);
2580a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, offset, moffset * 8);
2590a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, length, mlen * 8);
2600a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, data, data);
2610a7fcb78SPaul Blakey 	mod_hdr_acts->num_actions++;
2620a7fcb78SPaul Blakey 
2630a7fcb78SPaul Blakey 	return 0;
2640a7fcb78SPaul Blakey }
2650a7fcb78SPaul Blakey 
26677ab67b7SOr Gerlitz struct mlx5e_hairpin {
26777ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
26877ab67b7SOr Gerlitz 
26977ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev;
2703f6d08d1SOr Gerlitz 	struct mlx5e_priv *func_priv;
27177ab67b7SOr Gerlitz 	u32 tdn;
27277ab67b7SOr Gerlitz 	u32 tirn;
2733f6d08d1SOr Gerlitz 
2743f6d08d1SOr Gerlitz 	int num_channels;
2753f6d08d1SOr Gerlitz 	struct mlx5e_rqt indir_rqt;
2763f6d08d1SOr Gerlitz 	u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
2773f6d08d1SOr Gerlitz 	struct mlx5e_ttc_table ttc;
27877ab67b7SOr Gerlitz };
27977ab67b7SOr Gerlitz 
2805c65c564SOr Gerlitz struct mlx5e_hairpin_entry {
2815c65c564SOr Gerlitz 	/* a node of a hash table which keeps all the  hairpin entries */
2825c65c564SOr Gerlitz 	struct hlist_node hairpin_hlist;
2835c65c564SOr Gerlitz 
28473edca73SVlad Buslov 	/* protects flows list */
28573edca73SVlad Buslov 	spinlock_t flows_lock;
2865c65c564SOr Gerlitz 	/* flows sharing the same hairpin */
2875c65c564SOr Gerlitz 	struct list_head flows;
288db76ca24SVlad Buslov 	/* hpe's that were not fully initialized when dead peer update event
289db76ca24SVlad Buslov 	 * function traversed them.
290db76ca24SVlad Buslov 	 */
291db76ca24SVlad Buslov 	struct list_head dead_peer_wait_list;
2925c65c564SOr Gerlitz 
293d8822868SOr Gerlitz 	u16 peer_vhca_id;
294106be53bSOr Gerlitz 	u8 prio;
2955c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
296e4f9abbdSVlad Buslov 	refcount_t refcnt;
297db76ca24SVlad Buslov 	struct completion res_ready;
2985c65c564SOr Gerlitz };
2995c65c564SOr Gerlitz 
30011c9c548SOr Gerlitz struct mod_hdr_key {
30111c9c548SOr Gerlitz 	int num_actions;
30211c9c548SOr Gerlitz 	void *actions;
30311c9c548SOr Gerlitz };
30411c9c548SOr Gerlitz 
30511c9c548SOr Gerlitz struct mlx5e_mod_hdr_entry {
30611c9c548SOr Gerlitz 	/* a node of a hash table which keeps all the mod_hdr entries */
30711c9c548SOr Gerlitz 	struct hlist_node mod_hdr_hlist;
30811c9c548SOr Gerlitz 
30983a52f0dSVlad Buslov 	/* protects flows list */
31083a52f0dSVlad Buslov 	spinlock_t flows_lock;
31111c9c548SOr Gerlitz 	/* flows sharing the same mod_hdr entry */
31211c9c548SOr Gerlitz 	struct list_head flows;
31311c9c548SOr Gerlitz 
31411c9c548SOr Gerlitz 	struct mod_hdr_key key;
31511c9c548SOr Gerlitz 
3162b688ea5SMaor Gottlieb 	struct mlx5_modify_hdr *modify_hdr;
317dd58edc3SVlad Buslov 
318dd58edc3SVlad Buslov 	refcount_t refcnt;
319a734d007SVlad Buslov 	struct completion res_ready;
320a734d007SVlad Buslov 	int compl_result;
32111c9c548SOr Gerlitz };
32211c9c548SOr Gerlitz 
3235a7e5bcbSVlad Buslov static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
3245a7e5bcbSVlad Buslov 			      struct mlx5e_tc_flow *flow);
3255a7e5bcbSVlad Buslov 
3265a7e5bcbSVlad Buslov static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
3275a7e5bcbSVlad Buslov {
3285a7e5bcbSVlad Buslov 	if (!flow || !refcount_inc_not_zero(&flow->refcnt))
3295a7e5bcbSVlad Buslov 		return ERR_PTR(-EINVAL);
3305a7e5bcbSVlad Buslov 	return flow;
3315a7e5bcbSVlad Buslov }
3325a7e5bcbSVlad Buslov 
3335a7e5bcbSVlad Buslov static void mlx5e_flow_put(struct mlx5e_priv *priv,
3345a7e5bcbSVlad Buslov 			   struct mlx5e_tc_flow *flow)
3355a7e5bcbSVlad Buslov {
3365a7e5bcbSVlad Buslov 	if (refcount_dec_and_test(&flow->refcnt)) {
3375a7e5bcbSVlad Buslov 		mlx5e_tc_del_flow(priv, flow);
338c5d326b2SVlad Buslov 		kfree_rcu(flow, rcu_head);
3395a7e5bcbSVlad Buslov 	}
3405a7e5bcbSVlad Buslov }
3415a7e5bcbSVlad Buslov 
342226f2ca3SVlad Buslov static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
343226f2ca3SVlad Buslov {
344226f2ca3SVlad Buslov 	/* Complete all memory stores before setting bit. */
345226f2ca3SVlad Buslov 	smp_mb__before_atomic();
346226f2ca3SVlad Buslov 	set_bit(flag, &flow->flags);
347226f2ca3SVlad Buslov }
348226f2ca3SVlad Buslov 
349226f2ca3SVlad Buslov #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
350226f2ca3SVlad Buslov 
351c5d326b2SVlad Buslov static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
352c5d326b2SVlad Buslov 				     unsigned long flag)
353c5d326b2SVlad Buslov {
354c5d326b2SVlad Buslov 	/* test_and_set_bit() provides all necessary barriers */
355c5d326b2SVlad Buslov 	return test_and_set_bit(flag, &flow->flags);
356c5d326b2SVlad Buslov }
357c5d326b2SVlad Buslov 
358c5d326b2SVlad Buslov #define flow_flag_test_and_set(flow, flag)			\
359c5d326b2SVlad Buslov 	__flow_flag_test_and_set(flow,				\
360c5d326b2SVlad Buslov 				 MLX5E_TC_FLOW_FLAG_##flag)
361c5d326b2SVlad Buslov 
362226f2ca3SVlad Buslov static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
363226f2ca3SVlad Buslov {
364226f2ca3SVlad Buslov 	/* Complete all memory stores before clearing bit. */
365226f2ca3SVlad Buslov 	smp_mb__before_atomic();
366226f2ca3SVlad Buslov 	clear_bit(flag, &flow->flags);
367226f2ca3SVlad Buslov }
368226f2ca3SVlad Buslov 
369226f2ca3SVlad Buslov #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
370226f2ca3SVlad Buslov 						      MLX5E_TC_FLOW_FLAG_##flag)
371226f2ca3SVlad Buslov 
372226f2ca3SVlad Buslov static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
373226f2ca3SVlad Buslov {
374226f2ca3SVlad Buslov 	bool ret = test_bit(flag, &flow->flags);
375226f2ca3SVlad Buslov 
376226f2ca3SVlad Buslov 	/* Read fields of flow structure only after checking flags. */
377226f2ca3SVlad Buslov 	smp_mb__after_atomic();
378226f2ca3SVlad Buslov 	return ret;
379226f2ca3SVlad Buslov }
380226f2ca3SVlad Buslov 
381226f2ca3SVlad Buslov #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
382226f2ca3SVlad Buslov 						    MLX5E_TC_FLOW_FLAG_##flag)
383226f2ca3SVlad Buslov 
384226f2ca3SVlad Buslov static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
385226f2ca3SVlad Buslov {
386226f2ca3SVlad Buslov 	return flow_flag_test(flow, ESWITCH);
387226f2ca3SVlad Buslov }
388226f2ca3SVlad Buslov 
38984179981SPaul Blakey static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
39084179981SPaul Blakey {
39184179981SPaul Blakey 	return flow_flag_test(flow, FT);
39284179981SPaul Blakey }
39384179981SPaul Blakey 
394226f2ca3SVlad Buslov static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
395226f2ca3SVlad Buslov {
396226f2ca3SVlad Buslov 	return flow_flag_test(flow, OFFLOADED);
397226f2ca3SVlad Buslov }
398226f2ca3SVlad Buslov 
39911c9c548SOr Gerlitz static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
40011c9c548SOr Gerlitz {
40111c9c548SOr Gerlitz 	return jhash(key->actions,
40211c9c548SOr Gerlitz 		     key->num_actions * MLX5_MH_ACT_SZ, 0);
40311c9c548SOr Gerlitz }
40411c9c548SOr Gerlitz 
40511c9c548SOr Gerlitz static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
40611c9c548SOr Gerlitz 				   struct mod_hdr_key *b)
40711c9c548SOr Gerlitz {
40811c9c548SOr Gerlitz 	if (a->num_actions != b->num_actions)
40911c9c548SOr Gerlitz 		return 1;
41011c9c548SOr Gerlitz 
41111c9c548SOr Gerlitz 	return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
41211c9c548SOr Gerlitz }
41311c9c548SOr Gerlitz 
414dd58edc3SVlad Buslov static struct mod_hdr_tbl *
415dd58edc3SVlad Buslov get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
416dd58edc3SVlad Buslov {
417dd58edc3SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
418dd58edc3SVlad Buslov 
419dd58edc3SVlad Buslov 	return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
420dd58edc3SVlad Buslov 		&priv->fs.tc.mod_hdr;
421dd58edc3SVlad Buslov }
422dd58edc3SVlad Buslov 
423dd58edc3SVlad Buslov static struct mlx5e_mod_hdr_entry *
424dd58edc3SVlad Buslov mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
425dd58edc3SVlad Buslov {
426dd58edc3SVlad Buslov 	struct mlx5e_mod_hdr_entry *mh, *found = NULL;
427dd58edc3SVlad Buslov 
428dd58edc3SVlad Buslov 	hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
429dd58edc3SVlad Buslov 		if (!cmp_mod_hdr_info(&mh->key, key)) {
430dd58edc3SVlad Buslov 			refcount_inc(&mh->refcnt);
431dd58edc3SVlad Buslov 			found = mh;
432dd58edc3SVlad Buslov 			break;
433dd58edc3SVlad Buslov 		}
434dd58edc3SVlad Buslov 	}
435dd58edc3SVlad Buslov 
436dd58edc3SVlad Buslov 	return found;
437dd58edc3SVlad Buslov }
438dd58edc3SVlad Buslov 
439dd58edc3SVlad Buslov static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
440d2faae25SVlad Buslov 			      struct mlx5e_mod_hdr_entry *mh,
441d2faae25SVlad Buslov 			      int namespace)
442dd58edc3SVlad Buslov {
443d2faae25SVlad Buslov 	struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
444d2faae25SVlad Buslov 
445d2faae25SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
446dd58edc3SVlad Buslov 		return;
447d2faae25SVlad Buslov 	hash_del(&mh->mod_hdr_hlist);
448d2faae25SVlad Buslov 	mutex_unlock(&tbl->lock);
449dd58edc3SVlad Buslov 
450dd58edc3SVlad Buslov 	WARN_ON(!list_empty(&mh->flows));
451a734d007SVlad Buslov 	if (mh->compl_result > 0)
4522b688ea5SMaor Gottlieb 		mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
453d2faae25SVlad Buslov 
454dd58edc3SVlad Buslov 	kfree(mh);
455dd58edc3SVlad Buslov }
456dd58edc3SVlad Buslov 
457d2faae25SVlad Buslov static int get_flow_name_space(struct mlx5e_tc_flow *flow)
458d2faae25SVlad Buslov {
459d2faae25SVlad Buslov 	return mlx5e_is_eswitch_flow(flow) ?
460d2faae25SVlad Buslov 		MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
461d2faae25SVlad Buslov }
46211c9c548SOr Gerlitz static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
46311c9c548SOr Gerlitz 				struct mlx5e_tc_flow *flow,
46411c9c548SOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr)
46511c9c548SOr Gerlitz {
46611c9c548SOr Gerlitz 	int num_actions, actions_size, namespace, err;
46711c9c548SOr Gerlitz 	struct mlx5e_mod_hdr_entry *mh;
468dd58edc3SVlad Buslov 	struct mod_hdr_tbl *tbl;
46911c9c548SOr Gerlitz 	struct mod_hdr_key key;
47011c9c548SOr Gerlitz 	u32 hash_key;
47111c9c548SOr Gerlitz 
4726ae4a6a5SPaul Blakey 	num_actions  = parse_attr->mod_hdr_acts.num_actions;
47311c9c548SOr Gerlitz 	actions_size = MLX5_MH_ACT_SZ * num_actions;
47411c9c548SOr Gerlitz 
4756ae4a6a5SPaul Blakey 	key.actions = parse_attr->mod_hdr_acts.actions;
47611c9c548SOr Gerlitz 	key.num_actions = num_actions;
47711c9c548SOr Gerlitz 
47811c9c548SOr Gerlitz 	hash_key = hash_mod_hdr_info(&key);
47911c9c548SOr Gerlitz 
480d2faae25SVlad Buslov 	namespace = get_flow_name_space(flow);
481dd58edc3SVlad Buslov 	tbl = get_mod_hdr_table(priv, namespace);
48211c9c548SOr Gerlitz 
483d2faae25SVlad Buslov 	mutex_lock(&tbl->lock);
484dd58edc3SVlad Buslov 	mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
485a734d007SVlad Buslov 	if (mh) {
486a734d007SVlad Buslov 		mutex_unlock(&tbl->lock);
487a734d007SVlad Buslov 		wait_for_completion(&mh->res_ready);
488a734d007SVlad Buslov 
489a734d007SVlad Buslov 		if (mh->compl_result < 0) {
490a734d007SVlad Buslov 			err = -EREMOTEIO;
491a734d007SVlad Buslov 			goto attach_header_err;
492a734d007SVlad Buslov 		}
49311c9c548SOr Gerlitz 		goto attach_flow;
494a734d007SVlad Buslov 	}
49511c9c548SOr Gerlitz 
49611c9c548SOr Gerlitz 	mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
497d2faae25SVlad Buslov 	if (!mh) {
498a734d007SVlad Buslov 		mutex_unlock(&tbl->lock);
499a734d007SVlad Buslov 		return -ENOMEM;
500d2faae25SVlad Buslov 	}
50111c9c548SOr Gerlitz 
50211c9c548SOr Gerlitz 	mh->key.actions = (void *)mh + sizeof(*mh);
50311c9c548SOr Gerlitz 	memcpy(mh->key.actions, key.actions, actions_size);
50411c9c548SOr Gerlitz 	mh->key.num_actions = num_actions;
50583a52f0dSVlad Buslov 	spin_lock_init(&mh->flows_lock);
50611c9c548SOr Gerlitz 	INIT_LIST_HEAD(&mh->flows);
507dd58edc3SVlad Buslov 	refcount_set(&mh->refcnt, 1);
508a734d007SVlad Buslov 	init_completion(&mh->res_ready);
509a734d007SVlad Buslov 
510a734d007SVlad Buslov 	hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
511a734d007SVlad Buslov 	mutex_unlock(&tbl->lock);
51211c9c548SOr Gerlitz 
5132b688ea5SMaor Gottlieb 	mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace,
51411c9c548SOr Gerlitz 						  mh->key.num_actions,
5152b688ea5SMaor Gottlieb 						  mh->key.actions);
5162b688ea5SMaor Gottlieb 	if (IS_ERR(mh->modify_hdr)) {
5172b688ea5SMaor Gottlieb 		err = PTR_ERR(mh->modify_hdr);
518a734d007SVlad Buslov 		mh->compl_result = err;
519a734d007SVlad Buslov 		goto alloc_header_err;
520a734d007SVlad Buslov 	}
521a734d007SVlad Buslov 	mh->compl_result = 1;
522a734d007SVlad Buslov 	complete_all(&mh->res_ready);
52311c9c548SOr Gerlitz 
52411c9c548SOr Gerlitz attach_flow:
525dd58edc3SVlad Buslov 	flow->mh = mh;
52683a52f0dSVlad Buslov 	spin_lock(&mh->flows_lock);
52711c9c548SOr Gerlitz 	list_add(&flow->mod_hdr, &mh->flows);
52883a52f0dSVlad Buslov 	spin_unlock(&mh->flows_lock);
529d2faae25SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
5302b688ea5SMaor Gottlieb 		flow->esw_attr->modify_hdr = mh->modify_hdr;
53111c9c548SOr Gerlitz 	else
5322b688ea5SMaor Gottlieb 		flow->nic_attr->modify_hdr = mh->modify_hdr;
53311c9c548SOr Gerlitz 
53411c9c548SOr Gerlitz 	return 0;
53511c9c548SOr Gerlitz 
536a734d007SVlad Buslov alloc_header_err:
537a734d007SVlad Buslov 	complete_all(&mh->res_ready);
538a734d007SVlad Buslov attach_header_err:
539a734d007SVlad Buslov 	mlx5e_mod_hdr_put(priv, mh, namespace);
54011c9c548SOr Gerlitz 	return err;
54111c9c548SOr Gerlitz }
54211c9c548SOr Gerlitz 
54311c9c548SOr Gerlitz static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
54411c9c548SOr Gerlitz 				 struct mlx5e_tc_flow *flow)
54511c9c548SOr Gerlitz {
5465a7e5bcbSVlad Buslov 	/* flow wasn't fully initialized */
547dd58edc3SVlad Buslov 	if (!flow->mh)
5485a7e5bcbSVlad Buslov 		return;
5495a7e5bcbSVlad Buslov 
55083a52f0dSVlad Buslov 	spin_lock(&flow->mh->flows_lock);
55111c9c548SOr Gerlitz 	list_del(&flow->mod_hdr);
55283a52f0dSVlad Buslov 	spin_unlock(&flow->mh->flows_lock);
55311c9c548SOr Gerlitz 
554d2faae25SVlad Buslov 	mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
555dd58edc3SVlad Buslov 	flow->mh = NULL;
55611c9c548SOr Gerlitz }
55711c9c548SOr Gerlitz 
55877ab67b7SOr Gerlitz static
55977ab67b7SOr Gerlitz struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
56077ab67b7SOr Gerlitz {
56177ab67b7SOr Gerlitz 	struct net_device *netdev;
56277ab67b7SOr Gerlitz 	struct mlx5e_priv *priv;
56377ab67b7SOr Gerlitz 
56477ab67b7SOr Gerlitz 	netdev = __dev_get_by_index(net, ifindex);
56577ab67b7SOr Gerlitz 	priv = netdev_priv(netdev);
56677ab67b7SOr Gerlitz 	return priv->mdev;
56777ab67b7SOr Gerlitz }
56877ab67b7SOr Gerlitz 
56977ab67b7SOr Gerlitz static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
57077ab67b7SOr Gerlitz {
57177ab67b7SOr Gerlitz 	u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
57277ab67b7SOr Gerlitz 	void *tirc;
57377ab67b7SOr Gerlitz 	int err;
57477ab67b7SOr Gerlitz 
57577ab67b7SOr Gerlitz 	err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
57677ab67b7SOr Gerlitz 	if (err)
57777ab67b7SOr Gerlitz 		goto alloc_tdn_err;
57877ab67b7SOr Gerlitz 
57977ab67b7SOr Gerlitz 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
58077ab67b7SOr Gerlitz 
58177ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
582ddae74acSOr Gerlitz 	MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
58377ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
58477ab67b7SOr Gerlitz 
58577ab67b7SOr Gerlitz 	err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
58677ab67b7SOr Gerlitz 	if (err)
58777ab67b7SOr Gerlitz 		goto create_tir_err;
58877ab67b7SOr Gerlitz 
58977ab67b7SOr Gerlitz 	return 0;
59077ab67b7SOr Gerlitz 
59177ab67b7SOr Gerlitz create_tir_err:
59277ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
59377ab67b7SOr Gerlitz alloc_tdn_err:
59477ab67b7SOr Gerlitz 	return err;
59577ab67b7SOr Gerlitz }
59677ab67b7SOr Gerlitz 
59777ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
59877ab67b7SOr Gerlitz {
59977ab67b7SOr Gerlitz 	mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
60077ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
60177ab67b7SOr Gerlitz }
60277ab67b7SOr Gerlitz 
6033f6d08d1SOr Gerlitz static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
6043f6d08d1SOr Gerlitz {
6053f6d08d1SOr Gerlitz 	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
6063f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
6073f6d08d1SOr Gerlitz 	int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
6083f6d08d1SOr Gerlitz 
6093f6d08d1SOr Gerlitz 	mlx5e_build_default_indir_rqt(indirection_rqt, sz,
6103f6d08d1SOr Gerlitz 				      hp->num_channels);
6113f6d08d1SOr Gerlitz 
6123f6d08d1SOr Gerlitz 	for (i = 0; i < sz; i++) {
6133f6d08d1SOr Gerlitz 		ix = i;
614bbeb53b8SAya Levin 		if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
6153f6d08d1SOr Gerlitz 			ix = mlx5e_bits_invert(i, ilog2(sz));
6163f6d08d1SOr Gerlitz 		ix = indirection_rqt[ix];
6173f6d08d1SOr Gerlitz 		rqn = hp->pair->rqn[ix];
6183f6d08d1SOr Gerlitz 		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
6193f6d08d1SOr Gerlitz 	}
6203f6d08d1SOr Gerlitz }
6213f6d08d1SOr Gerlitz 
6223f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
6233f6d08d1SOr Gerlitz {
6243f6d08d1SOr Gerlitz 	int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
6253f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
6263f6d08d1SOr Gerlitz 	struct mlx5_core_dev *mdev = priv->mdev;
6273f6d08d1SOr Gerlitz 	void *rqtc;
6283f6d08d1SOr Gerlitz 	u32 *in;
6293f6d08d1SOr Gerlitz 
6303f6d08d1SOr Gerlitz 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
6313f6d08d1SOr Gerlitz 	in = kvzalloc(inlen, GFP_KERNEL);
6323f6d08d1SOr Gerlitz 	if (!in)
6333f6d08d1SOr Gerlitz 		return -ENOMEM;
6343f6d08d1SOr Gerlitz 
6353f6d08d1SOr Gerlitz 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
6363f6d08d1SOr Gerlitz 
6373f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
6383f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
6393f6d08d1SOr Gerlitz 
6403f6d08d1SOr Gerlitz 	mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
6413f6d08d1SOr Gerlitz 
6423f6d08d1SOr Gerlitz 	err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
6433f6d08d1SOr Gerlitz 	if (!err)
6443f6d08d1SOr Gerlitz 		hp->indir_rqt.enabled = true;
6453f6d08d1SOr Gerlitz 
6463f6d08d1SOr Gerlitz 	kvfree(in);
6473f6d08d1SOr Gerlitz 	return err;
6483f6d08d1SOr Gerlitz }
6493f6d08d1SOr Gerlitz 
6503f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
6513f6d08d1SOr Gerlitz {
6523f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
6533f6d08d1SOr Gerlitz 	u32 in[MLX5_ST_SZ_DW(create_tir_in)];
6543f6d08d1SOr Gerlitz 	int tt, i, err;
6553f6d08d1SOr Gerlitz 	void *tirc;
6563f6d08d1SOr Gerlitz 
6573f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
658d930ac79SAya Levin 		struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
659d930ac79SAya Levin 
6603f6d08d1SOr Gerlitz 		memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
6613f6d08d1SOr Gerlitz 		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6623f6d08d1SOr Gerlitz 
6633f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
6643f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
6653f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
666bbeb53b8SAya Levin 		mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
667bbeb53b8SAya Levin 
6683f6d08d1SOr Gerlitz 		err = mlx5_core_create_tir(hp->func_mdev, in,
6693f6d08d1SOr Gerlitz 					   MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
6703f6d08d1SOr Gerlitz 		if (err) {
6713f6d08d1SOr Gerlitz 			mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
6723f6d08d1SOr Gerlitz 			goto err_destroy_tirs;
6733f6d08d1SOr Gerlitz 		}
6743f6d08d1SOr Gerlitz 	}
6753f6d08d1SOr Gerlitz 	return 0;
6763f6d08d1SOr Gerlitz 
6773f6d08d1SOr Gerlitz err_destroy_tirs:
6783f6d08d1SOr Gerlitz 	for (i = 0; i < tt; i++)
6793f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
6803f6d08d1SOr Gerlitz 	return err;
6813f6d08d1SOr Gerlitz }
6823f6d08d1SOr Gerlitz 
6833f6d08d1SOr Gerlitz static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
6843f6d08d1SOr Gerlitz {
6853f6d08d1SOr Gerlitz 	int tt;
6863f6d08d1SOr Gerlitz 
6873f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
6883f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
6893f6d08d1SOr Gerlitz }
6903f6d08d1SOr Gerlitz 
6913f6d08d1SOr Gerlitz static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
6923f6d08d1SOr Gerlitz 					 struct ttc_params *ttc_params)
6933f6d08d1SOr Gerlitz {
6943f6d08d1SOr Gerlitz 	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
6953f6d08d1SOr Gerlitz 	int tt;
6963f6d08d1SOr Gerlitz 
6973f6d08d1SOr Gerlitz 	memset(ttc_params, 0, sizeof(*ttc_params));
6983f6d08d1SOr Gerlitz 
6993f6d08d1SOr Gerlitz 	ttc_params->any_tt_tirn = hp->tirn;
7003f6d08d1SOr Gerlitz 
7013f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
7023f6d08d1SOr Gerlitz 		ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
7033f6d08d1SOr Gerlitz 
7046412bb39SEli Cohen 	ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
7053f6d08d1SOr Gerlitz 	ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
7063f6d08d1SOr Gerlitz 	ft_attr->prio = MLX5E_TC_PRIO;
7073f6d08d1SOr Gerlitz }
7083f6d08d1SOr Gerlitz 
7093f6d08d1SOr Gerlitz static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
7103f6d08d1SOr Gerlitz {
7113f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
7123f6d08d1SOr Gerlitz 	struct ttc_params ttc_params;
7133f6d08d1SOr Gerlitz 	int err;
7143f6d08d1SOr Gerlitz 
7153f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_rqt(hp);
7163f6d08d1SOr Gerlitz 	if (err)
7173f6d08d1SOr Gerlitz 		return err;
7183f6d08d1SOr Gerlitz 
7193f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_tirs(hp);
7203f6d08d1SOr Gerlitz 	if (err)
7213f6d08d1SOr Gerlitz 		goto err_create_indirect_tirs;
7223f6d08d1SOr Gerlitz 
7233f6d08d1SOr Gerlitz 	mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
7243f6d08d1SOr Gerlitz 	err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
7253f6d08d1SOr Gerlitz 	if (err)
7263f6d08d1SOr Gerlitz 		goto err_create_ttc_table;
7273f6d08d1SOr Gerlitz 
7283f6d08d1SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
7293f6d08d1SOr Gerlitz 		   hp->num_channels, hp->ttc.ft.t->id);
7303f6d08d1SOr Gerlitz 
7313f6d08d1SOr Gerlitz 	return 0;
7323f6d08d1SOr Gerlitz 
7333f6d08d1SOr Gerlitz err_create_ttc_table:
7343f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
7353f6d08d1SOr Gerlitz err_create_indirect_tirs:
7363f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
7373f6d08d1SOr Gerlitz 
7383f6d08d1SOr Gerlitz 	return err;
7393f6d08d1SOr Gerlitz }
7403f6d08d1SOr Gerlitz 
7413f6d08d1SOr Gerlitz static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
7423f6d08d1SOr Gerlitz {
7433f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
7443f6d08d1SOr Gerlitz 
7453f6d08d1SOr Gerlitz 	mlx5e_destroy_ttc_table(priv, &hp->ttc);
7463f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
7473f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
7483f6d08d1SOr Gerlitz }
7493f6d08d1SOr Gerlitz 
75077ab67b7SOr Gerlitz static struct mlx5e_hairpin *
75177ab67b7SOr Gerlitz mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
75277ab67b7SOr Gerlitz 		     int peer_ifindex)
75377ab67b7SOr Gerlitz {
75477ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev, *peer_mdev;
75577ab67b7SOr Gerlitz 	struct mlx5e_hairpin *hp;
75677ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
75777ab67b7SOr Gerlitz 	int err;
75877ab67b7SOr Gerlitz 
75977ab67b7SOr Gerlitz 	hp = kzalloc(sizeof(*hp), GFP_KERNEL);
76077ab67b7SOr Gerlitz 	if (!hp)
76177ab67b7SOr Gerlitz 		return ERR_PTR(-ENOMEM);
76277ab67b7SOr Gerlitz 
76377ab67b7SOr Gerlitz 	func_mdev = priv->mdev;
76477ab67b7SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
76577ab67b7SOr Gerlitz 
76677ab67b7SOr Gerlitz 	pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
76777ab67b7SOr Gerlitz 	if (IS_ERR(pair)) {
76877ab67b7SOr Gerlitz 		err = PTR_ERR(pair);
76977ab67b7SOr Gerlitz 		goto create_pair_err;
77077ab67b7SOr Gerlitz 	}
77177ab67b7SOr Gerlitz 	hp->pair = pair;
77277ab67b7SOr Gerlitz 	hp->func_mdev = func_mdev;
7733f6d08d1SOr Gerlitz 	hp->func_priv = priv;
7743f6d08d1SOr Gerlitz 	hp->num_channels = params->num_channels;
77577ab67b7SOr Gerlitz 
77677ab67b7SOr Gerlitz 	err = mlx5e_hairpin_create_transport(hp);
77777ab67b7SOr Gerlitz 	if (err)
77877ab67b7SOr Gerlitz 		goto create_transport_err;
77977ab67b7SOr Gerlitz 
7803f6d08d1SOr Gerlitz 	if (hp->num_channels > 1) {
7813f6d08d1SOr Gerlitz 		err = mlx5e_hairpin_rss_init(hp);
7823f6d08d1SOr Gerlitz 		if (err)
7833f6d08d1SOr Gerlitz 			goto rss_init_err;
7843f6d08d1SOr Gerlitz 	}
7853f6d08d1SOr Gerlitz 
78677ab67b7SOr Gerlitz 	return hp;
78777ab67b7SOr Gerlitz 
7883f6d08d1SOr Gerlitz rss_init_err:
7893f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
79077ab67b7SOr Gerlitz create_transport_err:
79177ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
79277ab67b7SOr Gerlitz create_pair_err:
79377ab67b7SOr Gerlitz 	kfree(hp);
79477ab67b7SOr Gerlitz 	return ERR_PTR(err);
79577ab67b7SOr Gerlitz }
79677ab67b7SOr Gerlitz 
79777ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
79877ab67b7SOr Gerlitz {
7993f6d08d1SOr Gerlitz 	if (hp->num_channels > 1)
8003f6d08d1SOr Gerlitz 		mlx5e_hairpin_rss_cleanup(hp);
80177ab67b7SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
80277ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
80377ab67b7SOr Gerlitz 	kvfree(hp);
80477ab67b7SOr Gerlitz }
80577ab67b7SOr Gerlitz 
806106be53bSOr Gerlitz static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
807106be53bSOr Gerlitz {
808106be53bSOr Gerlitz 	return (peer_vhca_id << 16 | prio);
809106be53bSOr Gerlitz }
810106be53bSOr Gerlitz 
8115c65c564SOr Gerlitz static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
812106be53bSOr Gerlitz 						     u16 peer_vhca_id, u8 prio)
8135c65c564SOr Gerlitz {
8145c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
815106be53bSOr Gerlitz 	u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
8165c65c564SOr Gerlitz 
8175c65c564SOr Gerlitz 	hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
818106be53bSOr Gerlitz 			       hairpin_hlist, hash_key) {
819e4f9abbdSVlad Buslov 		if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
820e4f9abbdSVlad Buslov 			refcount_inc(&hpe->refcnt);
8215c65c564SOr Gerlitz 			return hpe;
8225c65c564SOr Gerlitz 		}
823e4f9abbdSVlad Buslov 	}
8245c65c564SOr Gerlitz 
8255c65c564SOr Gerlitz 	return NULL;
8265c65c564SOr Gerlitz }
8275c65c564SOr Gerlitz 
828e4f9abbdSVlad Buslov static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
829e4f9abbdSVlad Buslov 			      struct mlx5e_hairpin_entry *hpe)
830e4f9abbdSVlad Buslov {
831e4f9abbdSVlad Buslov 	/* no more hairpin flows for us, release the hairpin pair */
832b32accdaSVlad Buslov 	if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
833e4f9abbdSVlad Buslov 		return;
834b32accdaSVlad Buslov 	hash_del(&hpe->hairpin_hlist);
835b32accdaSVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
836e4f9abbdSVlad Buslov 
837db76ca24SVlad Buslov 	if (!IS_ERR_OR_NULL(hpe->hp)) {
838e4f9abbdSVlad Buslov 		netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
839e4f9abbdSVlad Buslov 			   dev_name(hpe->hp->pair->peer_mdev->device));
840e4f9abbdSVlad Buslov 
841e4f9abbdSVlad Buslov 		mlx5e_hairpin_destroy(hpe->hp);
842db76ca24SVlad Buslov 	}
843db76ca24SVlad Buslov 
844db76ca24SVlad Buslov 	WARN_ON(!list_empty(&hpe->flows));
845e4f9abbdSVlad Buslov 	kfree(hpe);
846e4f9abbdSVlad Buslov }
847e4f9abbdSVlad Buslov 
848106be53bSOr Gerlitz #define UNKNOWN_MATCH_PRIO 8
849106be53bSOr Gerlitz 
850106be53bSOr Gerlitz static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
851e98bedf5SEli Britstein 				  struct mlx5_flow_spec *spec, u8 *match_prio,
852e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
853106be53bSOr Gerlitz {
854106be53bSOr Gerlitz 	void *headers_c, *headers_v;
855106be53bSOr Gerlitz 	u8 prio_val, prio_mask = 0;
856106be53bSOr Gerlitz 	bool vlan_present;
857106be53bSOr Gerlitz 
858106be53bSOr Gerlitz #ifdef CONFIG_MLX5_CORE_EN_DCB
859106be53bSOr Gerlitz 	if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
860e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
861e98bedf5SEli Britstein 				   "only PCP trust state supported for hairpin");
862106be53bSOr Gerlitz 		return -EOPNOTSUPP;
863106be53bSOr Gerlitz 	}
864106be53bSOr Gerlitz #endif
865106be53bSOr Gerlitz 	headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
866106be53bSOr Gerlitz 	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
867106be53bSOr Gerlitz 
868106be53bSOr Gerlitz 	vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
869106be53bSOr Gerlitz 	if (vlan_present) {
870106be53bSOr Gerlitz 		prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
871106be53bSOr Gerlitz 		prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
872106be53bSOr Gerlitz 	}
873106be53bSOr Gerlitz 
874106be53bSOr Gerlitz 	if (!vlan_present || !prio_mask) {
875106be53bSOr Gerlitz 		prio_val = UNKNOWN_MATCH_PRIO;
876106be53bSOr Gerlitz 	} else if (prio_mask != 0x7) {
877e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
878e98bedf5SEli Britstein 				   "masked priority match not supported for hairpin");
879106be53bSOr Gerlitz 		return -EOPNOTSUPP;
880106be53bSOr Gerlitz 	}
881106be53bSOr Gerlitz 
882106be53bSOr Gerlitz 	*match_prio = prio_val;
883106be53bSOr Gerlitz 	return 0;
884106be53bSOr Gerlitz }
885106be53bSOr Gerlitz 
8865c65c564SOr Gerlitz static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
8875c65c564SOr Gerlitz 				  struct mlx5e_tc_flow *flow,
888e98bedf5SEli Britstein 				  struct mlx5e_tc_flow_parse_attr *parse_attr,
889e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
8905c65c564SOr Gerlitz {
89198b66cb1SEli Britstein 	int peer_ifindex = parse_attr->mirred_ifindex[0];
8925c65c564SOr Gerlitz 	struct mlx5_hairpin_params params;
893d8822868SOr Gerlitz 	struct mlx5_core_dev *peer_mdev;
8945c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
8955c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
8963f6d08d1SOr Gerlitz 	u64 link_speed64;
8973f6d08d1SOr Gerlitz 	u32 link_speed;
898106be53bSOr Gerlitz 	u8 match_prio;
899d8822868SOr Gerlitz 	u16 peer_id;
9005c65c564SOr Gerlitz 	int err;
9015c65c564SOr Gerlitz 
902d8822868SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
903d8822868SOr Gerlitz 	if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
904e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
9055c65c564SOr Gerlitz 		return -EOPNOTSUPP;
9065c65c564SOr Gerlitz 	}
9075c65c564SOr Gerlitz 
908d8822868SOr Gerlitz 	peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
909e98bedf5SEli Britstein 	err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
910e98bedf5SEli Britstein 				     extack);
911106be53bSOr Gerlitz 	if (err)
912106be53bSOr Gerlitz 		return err;
913b32accdaSVlad Buslov 
914b32accdaSVlad Buslov 	mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
915106be53bSOr Gerlitz 	hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
916db76ca24SVlad Buslov 	if (hpe) {
917db76ca24SVlad Buslov 		mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
918db76ca24SVlad Buslov 		wait_for_completion(&hpe->res_ready);
919db76ca24SVlad Buslov 
920db76ca24SVlad Buslov 		if (IS_ERR(hpe->hp)) {
921db76ca24SVlad Buslov 			err = -EREMOTEIO;
922db76ca24SVlad Buslov 			goto out_err;
923db76ca24SVlad Buslov 		}
9245c65c564SOr Gerlitz 		goto attach_flow;
925db76ca24SVlad Buslov 	}
9265c65c564SOr Gerlitz 
9275c65c564SOr Gerlitz 	hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
928b32accdaSVlad Buslov 	if (!hpe) {
929db76ca24SVlad Buslov 		mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
930db76ca24SVlad Buslov 		return -ENOMEM;
931b32accdaSVlad Buslov 	}
9325c65c564SOr Gerlitz 
93373edca73SVlad Buslov 	spin_lock_init(&hpe->flows_lock);
9345c65c564SOr Gerlitz 	INIT_LIST_HEAD(&hpe->flows);
935db76ca24SVlad Buslov 	INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
936d8822868SOr Gerlitz 	hpe->peer_vhca_id = peer_id;
937106be53bSOr Gerlitz 	hpe->prio = match_prio;
938e4f9abbdSVlad Buslov 	refcount_set(&hpe->refcnt, 1);
939db76ca24SVlad Buslov 	init_completion(&hpe->res_ready);
940db76ca24SVlad Buslov 
941db76ca24SVlad Buslov 	hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
942db76ca24SVlad Buslov 		 hash_hairpin_info(peer_id, match_prio));
943db76ca24SVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
9445c65c564SOr Gerlitz 
9455c65c564SOr Gerlitz 	params.log_data_size = 15;
9465c65c564SOr Gerlitz 	params.log_data_size = min_t(u8, params.log_data_size,
9475c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
9485c65c564SOr Gerlitz 	params.log_data_size = max_t(u8, params.log_data_size,
9495c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
9505c65c564SOr Gerlitz 
951eb9180f7SOr Gerlitz 	params.log_num_packets = params.log_data_size -
952eb9180f7SOr Gerlitz 				 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
953eb9180f7SOr Gerlitz 	params.log_num_packets = min_t(u8, params.log_num_packets,
954eb9180f7SOr Gerlitz 				       MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
955eb9180f7SOr Gerlitz 
956eb9180f7SOr Gerlitz 	params.q_counter = priv->q_counter;
9573f6d08d1SOr Gerlitz 	/* set hairpin pair per each 50Gbs share of the link */
9582c81bfd5SHuy Nguyen 	mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
9593f6d08d1SOr Gerlitz 	link_speed = max_t(u32, link_speed, 50000);
9603f6d08d1SOr Gerlitz 	link_speed64 = link_speed;
9613f6d08d1SOr Gerlitz 	do_div(link_speed64, 50000);
9623f6d08d1SOr Gerlitz 	params.num_channels = link_speed64;
9633f6d08d1SOr Gerlitz 
9645c65c564SOr Gerlitz 	hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
965db76ca24SVlad Buslov 	hpe->hp = hp;
966db76ca24SVlad Buslov 	complete_all(&hpe->res_ready);
9675c65c564SOr Gerlitz 	if (IS_ERR(hp)) {
9685c65c564SOr Gerlitz 		err = PTR_ERR(hp);
969db76ca24SVlad Buslov 		goto out_err;
9705c65c564SOr Gerlitz 	}
9715c65c564SOr Gerlitz 
972eb9180f7SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
97327b942fbSParav Pandit 		   hp->tirn, hp->pair->rqn[0],
97427b942fbSParav Pandit 		   dev_name(hp->pair->peer_mdev->device),
975eb9180f7SOr Gerlitz 		   hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
9765c65c564SOr Gerlitz 
9775c65c564SOr Gerlitz attach_flow:
9783f6d08d1SOr Gerlitz 	if (hpe->hp->num_channels > 1) {
979226f2ca3SVlad Buslov 		flow_flag_set(flow, HAIRPIN_RSS);
9803f6d08d1SOr Gerlitz 		flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
9813f6d08d1SOr Gerlitz 	} else {
9825c65c564SOr Gerlitz 		flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
9833f6d08d1SOr Gerlitz 	}
984b32accdaSVlad Buslov 
985e4f9abbdSVlad Buslov 	flow->hpe = hpe;
98673edca73SVlad Buslov 	spin_lock(&hpe->flows_lock);
9875c65c564SOr Gerlitz 	list_add(&flow->hairpin, &hpe->flows);
98873edca73SVlad Buslov 	spin_unlock(&hpe->flows_lock);
9893f6d08d1SOr Gerlitz 
9905c65c564SOr Gerlitz 	return 0;
9915c65c564SOr Gerlitz 
992db76ca24SVlad Buslov out_err:
993db76ca24SVlad Buslov 	mlx5e_hairpin_put(priv, hpe);
9945c65c564SOr Gerlitz 	return err;
9955c65c564SOr Gerlitz }
9965c65c564SOr Gerlitz 
9975c65c564SOr Gerlitz static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
9985c65c564SOr Gerlitz 				   struct mlx5e_tc_flow *flow)
9995c65c564SOr Gerlitz {
10005a7e5bcbSVlad Buslov 	/* flow wasn't fully initialized */
1001e4f9abbdSVlad Buslov 	if (!flow->hpe)
10025a7e5bcbSVlad Buslov 		return;
10035a7e5bcbSVlad Buslov 
100473edca73SVlad Buslov 	spin_lock(&flow->hpe->flows_lock);
10055c65c564SOr Gerlitz 	list_del(&flow->hairpin);
100673edca73SVlad Buslov 	spin_unlock(&flow->hpe->flows_lock);
100773edca73SVlad Buslov 
1008e4f9abbdSVlad Buslov 	mlx5e_hairpin_put(priv, flow->hpe);
1009e4f9abbdSVlad Buslov 	flow->hpe = NULL;
10105c65c564SOr Gerlitz }
10115c65c564SOr Gerlitz 
1012c83954abSRabie Loulou static int
101374491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
101417091853SOr Gerlitz 		      struct mlx5e_tc_flow_parse_attr *parse_attr,
1015e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
1016e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
1017e8f887acSAmir Vadai {
1018bb0ee7dcSJianbo Liu 	struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
1019aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1020aad7e08dSAmir Vadai 	struct mlx5_core_dev *dev = priv->mdev;
10215c65c564SOr Gerlitz 	struct mlx5_flow_destination dest[2] = {};
102266958ed9SHadar Hen Zion 	struct mlx5_flow_act flow_act = {
10233bc4b7bfSOr Gerlitz 		.action = attr->action,
1024bb0ee7dcSJianbo Liu 		.flags    = FLOW_ACT_NO_APPEND,
102566958ed9SHadar Hen Zion 	};
1026aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
10275c65c564SOr Gerlitz 	int err, dest_ix = 0;
1028e8f887acSAmir Vadai 
1029bb0ee7dcSJianbo Liu 	flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1030bb0ee7dcSJianbo Liu 	flow_context->flow_tag = attr->flow_tag;
1031bb0ee7dcSJianbo Liu 
1032226f2ca3SVlad Buslov 	if (flow_flag_test(flow, HAIRPIN)) {
1033e98bedf5SEli Britstein 		err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
10345a7e5bcbSVlad Buslov 		if (err)
10355a7e5bcbSVlad Buslov 			return err;
10365a7e5bcbSVlad Buslov 
1037226f2ca3SVlad Buslov 		if (flow_flag_test(flow, HAIRPIN_RSS)) {
10383f6d08d1SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
10393f6d08d1SOr Gerlitz 			dest[dest_ix].ft = attr->hairpin_ft;
10403f6d08d1SOr Gerlitz 		} else {
10415c65c564SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
10425c65c564SOr Gerlitz 			dest[dest_ix].tir_num = attr->hairpin_tirn;
10433f6d08d1SOr Gerlitz 		}
10443f6d08d1SOr Gerlitz 		dest_ix++;
10453f6d08d1SOr Gerlitz 	} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
10465c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
10475c65c564SOr Gerlitz 		dest[dest_ix].ft = priv->fs.vlan.ft.t;
10485c65c564SOr Gerlitz 		dest_ix++;
10495c65c564SOr Gerlitz 	}
1050aad7e08dSAmir Vadai 
10515c65c564SOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
10525c65c564SOr Gerlitz 		counter = mlx5_fc_create(dev, true);
10535a7e5bcbSVlad Buslov 		if (IS_ERR(counter))
10545a7e5bcbSVlad Buslov 			return PTR_ERR(counter);
10555a7e5bcbSVlad Buslov 
10565c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1057171c7625SMark Bloch 		dest[dest_ix].counter_id = mlx5_fc_id(counter);
10585c65c564SOr Gerlitz 		dest_ix++;
1059b8aee822SMark Bloch 		attr->counter = counter;
1060aad7e08dSAmir Vadai 	}
1061aad7e08dSAmir Vadai 
10622f4fe4caSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
10633099eb5aSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
10642b688ea5SMaor Gottlieb 		flow_act.modify_hdr = attr->modify_hdr;
10656ae4a6a5SPaul Blakey 		dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1066c83954abSRabie Loulou 		if (err)
10675a7e5bcbSVlad Buslov 			return err;
10682f4fe4caSOr Gerlitz 	}
10692f4fe4caSOr Gerlitz 
1070b6fac0b4SVlad Buslov 	mutex_lock(&priv->fs.tc.t_lock);
1071acff797cSMaor Gottlieb 	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
107261dc7b01SPaul Blakey 		struct mlx5_flow_table_attr ft_attr = {};
107361dc7b01SPaul Blakey 		int tc_grp_size, tc_tbl_size, tc_num_grps;
107421b9c144SOr Gerlitz 		u32 max_flow_counter;
107521b9c144SOr Gerlitz 
107621b9c144SOr Gerlitz 		max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
107721b9c144SOr Gerlitz 				    MLX5_CAP_GEN(dev, max_flow_counter_15_0);
107821b9c144SOr Gerlitz 
107921b9c144SOr Gerlitz 		tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
108021b9c144SOr Gerlitz 
108121b9c144SOr Gerlitz 		tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
108221b9c144SOr Gerlitz 				    BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
108361dc7b01SPaul Blakey 		tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
108421b9c144SOr Gerlitz 
108561dc7b01SPaul Blakey 		ft_attr.prio = MLX5E_TC_PRIO;
108661dc7b01SPaul Blakey 		ft_attr.max_fte = tc_tbl_size;
108761dc7b01SPaul Blakey 		ft_attr.level = MLX5E_TC_FT_LEVEL;
108861dc7b01SPaul Blakey 		ft_attr.autogroup.max_num_groups = tc_num_grps;
1089acff797cSMaor Gottlieb 		priv->fs.tc.t =
1090acff797cSMaor Gottlieb 			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
109161dc7b01SPaul Blakey 							    &ft_attr);
1092acff797cSMaor Gottlieb 		if (IS_ERR(priv->fs.tc.t)) {
1093b6fac0b4SVlad Buslov 			mutex_unlock(&priv->fs.tc.t_lock);
1094e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
1095e98bedf5SEli Britstein 					   "Failed to create tc offload table\n");
1096e8f887acSAmir Vadai 			netdev_err(priv->netdev,
1097e8f887acSAmir Vadai 				   "Failed to create tc offload table\n");
10985a7e5bcbSVlad Buslov 			return PTR_ERR(priv->fs.tc.t);
1099e8f887acSAmir Vadai 		}
1100e8f887acSAmir Vadai 	}
1101e8f887acSAmir Vadai 
110238aa51c1SOr Gerlitz 	if (attr->match_level != MLX5_MATCH_NONE)
1103d4a18e16SYevgeny Kliteynik 		parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
110438aa51c1SOr Gerlitz 
1105c83954abSRabie Loulou 	flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
11065c65c564SOr Gerlitz 					    &flow_act, dest, dest_ix);
1107b6fac0b4SVlad Buslov 	mutex_unlock(&priv->fs.tc.t_lock);
1108e8f887acSAmir Vadai 
1109a2b7189bSzhong jiang 	return PTR_ERR_OR_ZERO(flow->rule[0]);
1110e8f887acSAmir Vadai }
1111e8f887acSAmir Vadai 
1112d85cdccbSOr Gerlitz static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1113d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
1114d85cdccbSOr Gerlitz {
1115513f8f7fSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1116d85cdccbSOr Gerlitz 	struct mlx5_fc *counter = NULL;
1117d85cdccbSOr Gerlitz 
1118b8aee822SMark Bloch 	counter = attr->counter;
11195a7e5bcbSVlad Buslov 	if (!IS_ERR_OR_NULL(flow->rule[0]))
1120e4ad91f2SChris Mi 		mlx5_del_flow_rules(flow->rule[0]);
1121d85cdccbSOr Gerlitz 	mlx5_fc_destroy(priv->mdev, counter);
1122d85cdccbSOr Gerlitz 
1123b6fac0b4SVlad Buslov 	mutex_lock(&priv->fs.tc.t_lock);
1124226f2ca3SVlad Buslov 	if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
1125d85cdccbSOr Gerlitz 		mlx5_destroy_flow_table(priv->fs.tc.t);
1126d85cdccbSOr Gerlitz 		priv->fs.tc.t = NULL;
1127d85cdccbSOr Gerlitz 	}
1128b6fac0b4SVlad Buslov 	mutex_unlock(&priv->fs.tc.t_lock);
11292f4fe4caSOr Gerlitz 
1130513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
11313099eb5aSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
11325c65c564SOr Gerlitz 
1133226f2ca3SVlad Buslov 	if (flow_flag_test(flow, HAIRPIN))
11345c65c564SOr Gerlitz 		mlx5e_hairpin_flow_del(priv, flow);
1135d85cdccbSOr Gerlitz }
1136d85cdccbSOr Gerlitz 
1137aa0cbbaeSOr Gerlitz static void mlx5e_detach_encap(struct mlx5e_priv *priv,
11388c4dc42bSEli Britstein 			       struct mlx5e_tc_flow *flow, int out_index);
1139aa0cbbaeSOr Gerlitz 
11403c37745eSOr Gerlitz static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1141e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
1142733d4f36SRoi Dayan 			      struct net_device *mirred_dev,
1143733d4f36SRoi Dayan 			      int out_index,
11448c4dc42bSEli Britstein 			      struct netlink_ext_ack *extack,
11450ad060eeSRoi Dayan 			      struct net_device **encap_dev,
11460ad060eeSRoi Dayan 			      bool *encap_valid);
11473c37745eSOr Gerlitz 
11486d2a3ed0SOr Gerlitz static struct mlx5_flow_handle *
11496d2a3ed0SOr Gerlitz mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
11506d2a3ed0SOr Gerlitz 			   struct mlx5e_tc_flow *flow,
11516d2a3ed0SOr Gerlitz 			   struct mlx5_flow_spec *spec,
11526d2a3ed0SOr Gerlitz 			   struct mlx5_esw_flow_attr *attr)
11536d2a3ed0SOr Gerlitz {
11541ef3018fSPaul Blakey 	struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
11556d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
11564c3844d9SPaul Blakey 
11571ef3018fSPaul Blakey 	if (flow_flag_test(flow, CT)) {
11581ef3018fSPaul Blakey 		mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
11591ef3018fSPaul Blakey 
11601ef3018fSPaul Blakey 		return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
11611ef3018fSPaul Blakey 					       mod_hdr_acts);
11621ef3018fSPaul Blakey 	}
11636d2a3ed0SOr Gerlitz 
11646d2a3ed0SOr Gerlitz 	rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
11656d2a3ed0SOr Gerlitz 	if (IS_ERR(rule))
11666d2a3ed0SOr Gerlitz 		return rule;
11676d2a3ed0SOr Gerlitz 
1168e85e02baSEli Britstein 	if (attr->split_count) {
11696d2a3ed0SOr Gerlitz 		flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
11706d2a3ed0SOr Gerlitz 		if (IS_ERR(flow->rule[1])) {
11716d2a3ed0SOr Gerlitz 			mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
11726d2a3ed0SOr Gerlitz 			return flow->rule[1];
11736d2a3ed0SOr Gerlitz 		}
11746d2a3ed0SOr Gerlitz 	}
11756d2a3ed0SOr Gerlitz 
11766d2a3ed0SOr Gerlitz 	return rule;
11776d2a3ed0SOr Gerlitz }
11786d2a3ed0SOr Gerlitz 
11796d2a3ed0SOr Gerlitz static void
11806d2a3ed0SOr Gerlitz mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
11816d2a3ed0SOr Gerlitz 			     struct mlx5e_tc_flow *flow,
11826d2a3ed0SOr Gerlitz 			     struct mlx5_esw_flow_attr *attr)
11836d2a3ed0SOr Gerlitz {
1184226f2ca3SVlad Buslov 	flow_flag_clear(flow, OFFLOADED);
11856d2a3ed0SOr Gerlitz 
11864c3844d9SPaul Blakey 	if (flow_flag_test(flow, CT)) {
11874c3844d9SPaul Blakey 		mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
11884c3844d9SPaul Blakey 		return;
11894c3844d9SPaul Blakey 	}
11904c3844d9SPaul Blakey 
1191e85e02baSEli Britstein 	if (attr->split_count)
11926d2a3ed0SOr Gerlitz 		mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
11936d2a3ed0SOr Gerlitz 
11946d2a3ed0SOr Gerlitz 	mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
11956d2a3ed0SOr Gerlitz }
11966d2a3ed0SOr Gerlitz 
11975dbe906fSPaul Blakey static struct mlx5_flow_handle *
11985dbe906fSPaul Blakey mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
11995dbe906fSPaul Blakey 			      struct mlx5e_tc_flow *flow,
1200178f69b4SEli Cohen 			      struct mlx5_flow_spec *spec)
12015dbe906fSPaul Blakey {
1202178f69b4SEli Cohen 	struct mlx5_esw_flow_attr slow_attr;
12035dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
12045dbe906fSPaul Blakey 
1205178f69b4SEli Cohen 	memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1206178f69b4SEli Cohen 	slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1207178f69b4SEli Cohen 	slow_attr.split_count = 0;
1208178f69b4SEli Cohen 	slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
12095dbe906fSPaul Blakey 
1210178f69b4SEli Cohen 	rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
12115dbe906fSPaul Blakey 	if (!IS_ERR(rule))
1212226f2ca3SVlad Buslov 		flow_flag_set(flow, SLOW);
12135dbe906fSPaul Blakey 
12145dbe906fSPaul Blakey 	return rule;
12155dbe906fSPaul Blakey }
12165dbe906fSPaul Blakey 
12175dbe906fSPaul Blakey static void
12185dbe906fSPaul Blakey mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1219178f69b4SEli Cohen 				  struct mlx5e_tc_flow *flow)
12205dbe906fSPaul Blakey {
1221178f69b4SEli Cohen 	struct mlx5_esw_flow_attr slow_attr;
1222178f69b4SEli Cohen 
1223178f69b4SEli Cohen 	memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1224178f69b4SEli Cohen 	slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1225178f69b4SEli Cohen 	slow_attr.split_count = 0;
1226178f69b4SEli Cohen 	slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1227178f69b4SEli Cohen 	mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
1228226f2ca3SVlad Buslov 	flow_flag_clear(flow, SLOW);
12295dbe906fSPaul Blakey }
12305dbe906fSPaul Blakey 
1231ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1232ad86755bSVlad Buslov  * function.
1233ad86755bSVlad Buslov  */
1234ad86755bSVlad Buslov static void unready_flow_add(struct mlx5e_tc_flow *flow,
1235ad86755bSVlad Buslov 			     struct list_head *unready_flows)
1236ad86755bSVlad Buslov {
1237ad86755bSVlad Buslov 	flow_flag_set(flow, NOT_READY);
1238ad86755bSVlad Buslov 	list_add_tail(&flow->unready, unready_flows);
1239ad86755bSVlad Buslov }
1240ad86755bSVlad Buslov 
1241ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1242ad86755bSVlad Buslov  * function.
1243ad86755bSVlad Buslov  */
1244ad86755bSVlad Buslov static void unready_flow_del(struct mlx5e_tc_flow *flow)
1245ad86755bSVlad Buslov {
1246ad86755bSVlad Buslov 	list_del(&flow->unready);
1247ad86755bSVlad Buslov 	flow_flag_clear(flow, NOT_READY);
1248ad86755bSVlad Buslov }
1249ad86755bSVlad Buslov 
1250b4a23329SRoi Dayan static void add_unready_flow(struct mlx5e_tc_flow *flow)
1251b4a23329SRoi Dayan {
1252b4a23329SRoi Dayan 	struct mlx5_rep_uplink_priv *uplink_priv;
1253b4a23329SRoi Dayan 	struct mlx5e_rep_priv *rpriv;
1254b4a23329SRoi Dayan 	struct mlx5_eswitch *esw;
1255b4a23329SRoi Dayan 
1256b4a23329SRoi Dayan 	esw = flow->priv->mdev->priv.eswitch;
1257b4a23329SRoi Dayan 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1258b4a23329SRoi Dayan 	uplink_priv = &rpriv->uplink_priv;
1259b4a23329SRoi Dayan 
1260ad86755bSVlad Buslov 	mutex_lock(&uplink_priv->unready_flows_lock);
1261ad86755bSVlad Buslov 	unready_flow_add(flow, &uplink_priv->unready_flows);
1262ad86755bSVlad Buslov 	mutex_unlock(&uplink_priv->unready_flows_lock);
1263b4a23329SRoi Dayan }
1264b4a23329SRoi Dayan 
1265b4a23329SRoi Dayan static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1266b4a23329SRoi Dayan {
1267ad86755bSVlad Buslov 	struct mlx5_rep_uplink_priv *uplink_priv;
1268ad86755bSVlad Buslov 	struct mlx5e_rep_priv *rpriv;
1269ad86755bSVlad Buslov 	struct mlx5_eswitch *esw;
1270ad86755bSVlad Buslov 
1271ad86755bSVlad Buslov 	esw = flow->priv->mdev->priv.eswitch;
1272ad86755bSVlad Buslov 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1273ad86755bSVlad Buslov 	uplink_priv = &rpriv->uplink_priv;
1274ad86755bSVlad Buslov 
1275ad86755bSVlad Buslov 	mutex_lock(&uplink_priv->unready_flows_lock);
1276ad86755bSVlad Buslov 	unready_flow_del(flow);
1277ad86755bSVlad Buslov 	mutex_unlock(&uplink_priv->unready_flows_lock);
1278b4a23329SRoi Dayan }
1279b4a23329SRoi Dayan 
1280c83954abSRabie Loulou static int
128174491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1282e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
1283e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
1284adb4c123SOr Gerlitz {
1285adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1286aa0cbbaeSOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
12877040632dSTonghao Zhang 	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
12883c37745eSOr Gerlitz 	struct net_device *out_dev, *encap_dev = NULL;
1289b8aee822SMark Bloch 	struct mlx5_fc *counter = NULL;
12903c37745eSOr Gerlitz 	struct mlx5e_rep_priv *rpriv;
12913c37745eSOr Gerlitz 	struct mlx5e_priv *out_priv;
12920ad060eeSRoi Dayan 	bool encap_valid = true;
129339ac237cSPaul Blakey 	u32 max_prio, max_chain;
12940ad060eeSRoi Dayan 	int err = 0;
1295f493f155SEli Britstein 	int out_index;
12968b32580dSOr Gerlitz 
129739ac237cSPaul Blakey 	if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
129861644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
129961644c3dSRoi Dayan 				   "E-switch priorities unsupported, upgrade FW");
1300d14f6f2aSOr Gerlitz 		return -EOPNOTSUPP;
1301d14f6f2aSOr Gerlitz 	}
1302e52c2802SPaul Blakey 
130384179981SPaul Blakey 	/* We check chain range only for tc flows.
130484179981SPaul Blakey 	 * For ft flows, we checked attr->chain was originally 0 and set it to
130584179981SPaul Blakey 	 * FDB_FT_CHAIN which is outside tc range.
130684179981SPaul Blakey 	 * See mlx5e_rep_setup_ft_cb().
130784179981SPaul Blakey 	 */
130839ac237cSPaul Blakey 	max_chain = mlx5_esw_chains_get_chain_range(esw);
130984179981SPaul Blakey 	if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
131061644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
131161644c3dSRoi Dayan 				   "Requested chain is out of supported range");
13125a7e5bcbSVlad Buslov 		return -EOPNOTSUPP;
1313bf07aa73SPaul Blakey 	}
1314bf07aa73SPaul Blakey 
131539ac237cSPaul Blakey 	max_prio = mlx5_esw_chains_get_prio_range(esw);
1316bf07aa73SPaul Blakey 	if (attr->prio > max_prio) {
131761644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
131861644c3dSRoi Dayan 				   "Requested priority is out of supported range");
13195a7e5bcbSVlad Buslov 		return -EOPNOTSUPP;
1320bf07aa73SPaul Blakey 	}
1321bf07aa73SPaul Blakey 
1322f493f155SEli Britstein 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
13238c4dc42bSEli Britstein 		int mirred_ifindex;
13248c4dc42bSEli Britstein 
1325f493f155SEli Britstein 		if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1326f493f155SEli Britstein 			continue;
1327f493f155SEli Britstein 
13287040632dSTonghao Zhang 		mirred_ifindex = parse_attr->mirred_ifindex[out_index];
13293c37745eSOr Gerlitz 		out_dev = __dev_get_by_index(dev_net(priv->netdev),
13308c4dc42bSEli Britstein 					     mirred_ifindex);
1331733d4f36SRoi Dayan 		err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
13320ad060eeSRoi Dayan 					 extack, &encap_dev, &encap_valid);
13330ad060eeSRoi Dayan 		if (err)
13345a7e5bcbSVlad Buslov 			return err;
13350ad060eeSRoi Dayan 
13363c37745eSOr Gerlitz 		out_priv = netdev_priv(encap_dev);
13373c37745eSOr Gerlitz 		rpriv = out_priv->ppriv;
13381cc26d74SEli Britstein 		attr->dests[out_index].rep = rpriv->rep;
13391cc26d74SEli Britstein 		attr->dests[out_index].mdev = out_priv->mdev;
13403c37745eSOr Gerlitz 	}
13413c37745eSOr Gerlitz 
13428b32580dSOr Gerlitz 	err = mlx5_eswitch_add_vlan_action(esw, attr);
1343c83954abSRabie Loulou 	if (err)
13445a7e5bcbSVlad Buslov 		return err;
1345adb4c123SOr Gerlitz 
1346d7e75a32SOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
13471a9527bbSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
13486ae4a6a5SPaul Blakey 		dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1349c83954abSRabie Loulou 		if (err)
13505a7e5bcbSVlad Buslov 			return err;
1351d7e75a32SOr Gerlitz 	}
1352d7e75a32SOr Gerlitz 
1353b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1354f9392795SShahar Klein 		counter = mlx5_fc_create(attr->counter_dev, true);
13555a7e5bcbSVlad Buslov 		if (IS_ERR(counter))
13565a7e5bcbSVlad Buslov 			return PTR_ERR(counter);
1357b8aee822SMark Bloch 
1358b8aee822SMark Bloch 		attr->counter = counter;
1359b8aee822SMark Bloch 	}
1360b8aee822SMark Bloch 
13610ad060eeSRoi Dayan 	/* we get here if one of the following takes place:
13620ad060eeSRoi Dayan 	 * (1) there's no error
13630ad060eeSRoi Dayan 	 * (2) there's an encap action and we don't have valid neigh
13643c37745eSOr Gerlitz 	 */
1365bc1d75faSRoi Dayan 	if (!encap_valid)
1366178f69b4SEli Cohen 		flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1367bc1d75faSRoi Dayan 	else
13686d2a3ed0SOr Gerlitz 		flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
13695dbe906fSPaul Blakey 
13705a7e5bcbSVlad Buslov 	if (IS_ERR(flow->rule[0]))
13715a7e5bcbSVlad Buslov 		return PTR_ERR(flow->rule[0]);
1372226f2ca3SVlad Buslov 	else
1373226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1374c83954abSRabie Loulou 
13755dbe906fSPaul Blakey 	return 0;
1376aa0cbbaeSOr Gerlitz }
1377d85cdccbSOr Gerlitz 
13789272e3dfSYevgeny Kliteynik static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
13799272e3dfSYevgeny Kliteynik {
13809272e3dfSYevgeny Kliteynik 	struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
13819272e3dfSYevgeny Kliteynik 	void *headers_v = MLX5_ADDR_OF(fte_match_param,
13829272e3dfSYevgeny Kliteynik 				       spec->match_value,
13839272e3dfSYevgeny Kliteynik 				       misc_parameters_3);
13849272e3dfSYevgeny Kliteynik 	u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
13859272e3dfSYevgeny Kliteynik 					     headers_v,
13869272e3dfSYevgeny Kliteynik 					     geneve_tlv_option_0_data);
13879272e3dfSYevgeny Kliteynik 
13889272e3dfSYevgeny Kliteynik 	return !!geneve_tlv_opt_0_data;
13899272e3dfSYevgeny Kliteynik }
13909272e3dfSYevgeny Kliteynik 
1391d85cdccbSOr Gerlitz static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1392d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
1393d85cdccbSOr Gerlitz {
1394d85cdccbSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1395d7e75a32SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1396f493f155SEli Britstein 	int out_index;
1397d85cdccbSOr Gerlitz 
13980a7fcb78SPaul Blakey 	mlx5e_put_flow_tunnel_id(flow);
13990a7fcb78SPaul Blakey 
1400226f2ca3SVlad Buslov 	if (flow_flag_test(flow, NOT_READY)) {
1401b4a23329SRoi Dayan 		remove_unready_flow(flow);
1402ef06c9eeSRoi Dayan 		kvfree(attr->parse_attr);
1403ef06c9eeSRoi Dayan 		return;
1404ef06c9eeSRoi Dayan 	}
1405ef06c9eeSRoi Dayan 
1406226f2ca3SVlad Buslov 	if (mlx5e_is_offloaded_flow(flow)) {
1407226f2ca3SVlad Buslov 		if (flow_flag_test(flow, SLOW))
1408178f69b4SEli Cohen 			mlx5e_tc_unoffload_from_slow_path(esw, flow);
14095dbe906fSPaul Blakey 		else
14105dbe906fSPaul Blakey 			mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
14115dbe906fSPaul Blakey 	}
1412d85cdccbSOr Gerlitz 
14139272e3dfSYevgeny Kliteynik 	if (mlx5_flow_has_geneve_opt(flow))
14149272e3dfSYevgeny Kliteynik 		mlx5_geneve_tlv_option_del(priv->mdev->geneve);
14159272e3dfSYevgeny Kliteynik 
1416513f8f7fSOr Gerlitz 	mlx5_eswitch_del_vlan_action(esw, attr);
1417d85cdccbSOr Gerlitz 
1418f493f155SEli Britstein 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
14192a4b6526SVlad Buslov 		if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
14208c4dc42bSEli Britstein 			mlx5e_detach_encap(priv, flow, out_index);
14212a4b6526SVlad Buslov 			kfree(attr->parse_attr->tun_info[out_index]);
14222a4b6526SVlad Buslov 		}
1423f493f155SEli Britstein 	kvfree(attr->parse_attr);
1424d7e75a32SOr Gerlitz 
1425513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
14261a9527bbSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
1427b8aee822SMark Bloch 
1428b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1429f9392795SShahar Klein 		mlx5_fc_destroy(attr->counter_dev, attr->counter);
1430d85cdccbSOr Gerlitz }
1431d85cdccbSOr Gerlitz 
1432232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
14332a1f1768SVlad Buslov 			      struct mlx5e_encap_entry *e,
14342a1f1768SVlad Buslov 			      struct list_head *flow_list)
1435232c0013SHadar Hen Zion {
14363c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1437178f69b4SEli Cohen 	struct mlx5_esw_flow_attr *esw_attr;
14386d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
14396d2a3ed0SOr Gerlitz 	struct mlx5_flow_spec *spec;
1440232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1441232c0013SHadar Hen Zion 	int err;
1442232c0013SHadar Hen Zion 
14432b688ea5SMaor Gottlieb 	e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
144454c177caSOz Shlomo 						     e->reformat_type,
1445232c0013SHadar Hen Zion 						     e->encap_size, e->encap_header,
14462b688ea5SMaor Gottlieb 						     MLX5_FLOW_NAMESPACE_FDB);
14472b688ea5SMaor Gottlieb 	if (IS_ERR(e->pkt_reformat)) {
14482b688ea5SMaor Gottlieb 		mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
14492b688ea5SMaor Gottlieb 			       PTR_ERR(e->pkt_reformat));
1450232c0013SHadar Hen Zion 		return;
1451232c0013SHadar Hen Zion 	}
1452232c0013SHadar Hen Zion 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
1453f6dfb4c3SHadar Hen Zion 	mlx5e_rep_queue_neigh_stats_work(priv);
1454232c0013SHadar Hen Zion 
14552a1f1768SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
14568c4dc42bSEli Britstein 		bool all_flow_encaps_valid = true;
14578c4dc42bSEli Britstein 		int i;
14588c4dc42bSEli Britstein 
145995435ad7SVlad Buslov 		if (!mlx5e_is_offloaded_flow(flow))
146095435ad7SVlad Buslov 			continue;
14613c37745eSOr Gerlitz 		esw_attr = flow->esw_attr;
14626d2a3ed0SOr Gerlitz 		spec = &esw_attr->parse_attr->spec;
14636d2a3ed0SOr Gerlitz 
14642b688ea5SMaor Gottlieb 		esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
14652a1f1768SVlad Buslov 		esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
14668c4dc42bSEli Britstein 		/* Flow can be associated with multiple encap entries.
14678c4dc42bSEli Britstein 		 * Before offloading the flow verify that all of them have
14688c4dc42bSEli Britstein 		 * a valid neighbour.
14698c4dc42bSEli Britstein 		 */
14708c4dc42bSEli Britstein 		for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
14718c4dc42bSEli Britstein 			if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
14728c4dc42bSEli Britstein 				continue;
14738c4dc42bSEli Britstein 			if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
14748c4dc42bSEli Britstein 				all_flow_encaps_valid = false;
14758c4dc42bSEli Britstein 				break;
14768c4dc42bSEli Britstein 			}
14778c4dc42bSEli Britstein 		}
14788c4dc42bSEli Britstein 		/* Do not offload flows with unresolved neighbors */
14798c4dc42bSEli Britstein 		if (!all_flow_encaps_valid)
14802a1f1768SVlad Buslov 			continue;
14815dbe906fSPaul Blakey 		/* update from slow path rule to encap rule */
14826d2a3ed0SOr Gerlitz 		rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
14836d2a3ed0SOr Gerlitz 		if (IS_ERR(rule)) {
14846d2a3ed0SOr Gerlitz 			err = PTR_ERR(rule);
1485232c0013SHadar Hen Zion 			mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1486232c0013SHadar Hen Zion 				       err);
14872a1f1768SVlad Buslov 			continue;
1488232c0013SHadar Hen Zion 		}
14895dbe906fSPaul Blakey 
1490178f69b4SEli Cohen 		mlx5e_tc_unoffload_from_slow_path(esw, flow);
14916d2a3ed0SOr Gerlitz 		flow->rule[0] = rule;
1492226f2ca3SVlad Buslov 		/* was unset when slow path rule removed */
1493226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1494232c0013SHadar Hen Zion 	}
1495232c0013SHadar Hen Zion }
1496232c0013SHadar Hen Zion 
1497232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
14982a1f1768SVlad Buslov 			      struct mlx5e_encap_entry *e,
14992a1f1768SVlad Buslov 			      struct list_head *flow_list)
1500232c0013SHadar Hen Zion {
15013c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
15025dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
15035dbe906fSPaul Blakey 	struct mlx5_flow_spec *spec;
1504232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
15055dbe906fSPaul Blakey 	int err;
1506232c0013SHadar Hen Zion 
15072a1f1768SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
150895435ad7SVlad Buslov 		if (!mlx5e_is_offloaded_flow(flow))
150995435ad7SVlad Buslov 			continue;
15105dbe906fSPaul Blakey 		spec = &flow->esw_attr->parse_attr->spec;
15115dbe906fSPaul Blakey 
15125dbe906fSPaul Blakey 		/* update from encap rule to slow path rule */
1513178f69b4SEli Cohen 		rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
15148c4dc42bSEli Britstein 		/* mark the flow's encap dest as non-valid */
15152a1f1768SVlad Buslov 		flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
15165dbe906fSPaul Blakey 
15175dbe906fSPaul Blakey 		if (IS_ERR(rule)) {
15185dbe906fSPaul Blakey 			err = PTR_ERR(rule);
15195dbe906fSPaul Blakey 			mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
15205dbe906fSPaul Blakey 				       err);
15212a1f1768SVlad Buslov 			continue;
15225dbe906fSPaul Blakey 		}
15235dbe906fSPaul Blakey 
15246d2a3ed0SOr Gerlitz 		mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
15255dbe906fSPaul Blakey 		flow->rule[0] = rule;
1526226f2ca3SVlad Buslov 		/* was unset when fast path rule removed */
1527226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1528232c0013SHadar Hen Zion 	}
1529232c0013SHadar Hen Zion 
153061c806daSOr Gerlitz 	/* we know that the encap is valid */
1531232c0013SHadar Hen Zion 	e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
15322b688ea5SMaor Gottlieb 	mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1533232c0013SHadar Hen Zion }
1534232c0013SHadar Hen Zion 
1535b8aee822SMark Bloch static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1536b8aee822SMark Bloch {
1537226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
1538b8aee822SMark Bloch 		return flow->esw_attr->counter;
1539b8aee822SMark Bloch 	else
1540b8aee822SMark Bloch 		return flow->nic_attr->counter;
1541b8aee822SMark Bloch }
1542b8aee822SMark Bloch 
15432a1f1768SVlad Buslov /* Takes reference to all flows attached to encap and adds the flows to
15442a1f1768SVlad Buslov  * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
15452a1f1768SVlad Buslov  */
15462a1f1768SVlad Buslov void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
15472a1f1768SVlad Buslov {
15482a1f1768SVlad Buslov 	struct encap_flow_item *efi;
15492a1f1768SVlad Buslov 	struct mlx5e_tc_flow *flow;
15502a1f1768SVlad Buslov 
15512a1f1768SVlad Buslov 	list_for_each_entry(efi, &e->flows, list) {
15522a1f1768SVlad Buslov 		flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
15532a1f1768SVlad Buslov 		if (IS_ERR(mlx5e_flow_get(flow)))
15542a1f1768SVlad Buslov 			continue;
155595435ad7SVlad Buslov 		wait_for_completion(&flow->init_done);
15562a1f1768SVlad Buslov 
15572a1f1768SVlad Buslov 		flow->tmp_efi_index = efi->index;
15582a1f1768SVlad Buslov 		list_add(&flow->tmp_list, flow_list);
15592a1f1768SVlad Buslov 	}
15602a1f1768SVlad Buslov }
15612a1f1768SVlad Buslov 
15626a06c2f7SVlad Buslov /* Iterate over tmp_list of flows attached to flow_list head. */
15632a1f1768SVlad Buslov void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
15646a06c2f7SVlad Buslov {
15656a06c2f7SVlad Buslov 	struct mlx5e_tc_flow *flow, *tmp;
15666a06c2f7SVlad Buslov 
15676a06c2f7SVlad Buslov 	list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
15686a06c2f7SVlad Buslov 		mlx5e_flow_put(priv, flow);
15696a06c2f7SVlad Buslov }
15706a06c2f7SVlad Buslov 
1571ac0d9176SVlad Buslov static struct mlx5e_encap_entry *
1572ac0d9176SVlad Buslov mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1573ac0d9176SVlad Buslov 			   struct mlx5e_encap_entry *e)
1574ac0d9176SVlad Buslov {
1575ac0d9176SVlad Buslov 	struct mlx5e_encap_entry *next = NULL;
1576ac0d9176SVlad Buslov 
1577ac0d9176SVlad Buslov retry:
1578ac0d9176SVlad Buslov 	rcu_read_lock();
1579ac0d9176SVlad Buslov 
1580ac0d9176SVlad Buslov 	/* find encap with non-zero reference counter value */
1581ac0d9176SVlad Buslov 	for (next = e ?
1582ac0d9176SVlad Buslov 		     list_next_or_null_rcu(&nhe->encap_list,
1583ac0d9176SVlad Buslov 					   &e->encap_list,
1584ac0d9176SVlad Buslov 					   struct mlx5e_encap_entry,
1585ac0d9176SVlad Buslov 					   encap_list) :
1586ac0d9176SVlad Buslov 		     list_first_or_null_rcu(&nhe->encap_list,
1587ac0d9176SVlad Buslov 					    struct mlx5e_encap_entry,
1588ac0d9176SVlad Buslov 					    encap_list);
1589ac0d9176SVlad Buslov 	     next;
1590ac0d9176SVlad Buslov 	     next = list_next_or_null_rcu(&nhe->encap_list,
1591ac0d9176SVlad Buslov 					  &next->encap_list,
1592ac0d9176SVlad Buslov 					  struct mlx5e_encap_entry,
1593ac0d9176SVlad Buslov 					  encap_list))
1594ac0d9176SVlad Buslov 		if (mlx5e_encap_take(next))
1595ac0d9176SVlad Buslov 			break;
1596ac0d9176SVlad Buslov 
1597ac0d9176SVlad Buslov 	rcu_read_unlock();
1598ac0d9176SVlad Buslov 
1599ac0d9176SVlad Buslov 	/* release starting encap */
1600ac0d9176SVlad Buslov 	if (e)
1601ac0d9176SVlad Buslov 		mlx5e_encap_put(netdev_priv(e->out_dev), e);
1602ac0d9176SVlad Buslov 	if (!next)
1603ac0d9176SVlad Buslov 		return next;
1604ac0d9176SVlad Buslov 
1605ac0d9176SVlad Buslov 	/* wait for encap to be fully initialized */
1606ac0d9176SVlad Buslov 	wait_for_completion(&next->res_ready);
1607ac0d9176SVlad Buslov 	/* continue searching if encap entry is not in valid state after completion */
1608ac0d9176SVlad Buslov 	if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1609ac0d9176SVlad Buslov 		e = next;
1610ac0d9176SVlad Buslov 		goto retry;
1611ac0d9176SVlad Buslov 	}
1612ac0d9176SVlad Buslov 
1613ac0d9176SVlad Buslov 	return next;
1614ac0d9176SVlad Buslov }
1615ac0d9176SVlad Buslov 
1616f6dfb4c3SHadar Hen Zion void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1617f6dfb4c3SHadar Hen Zion {
1618f6dfb4c3SHadar Hen Zion 	struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1619ac0d9176SVlad Buslov 	struct mlx5e_encap_entry *e = NULL;
1620f6dfb4c3SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1621f6dfb4c3SHadar Hen Zion 	struct mlx5_fc *counter;
1622f6dfb4c3SHadar Hen Zion 	struct neigh_table *tbl;
1623f6dfb4c3SHadar Hen Zion 	bool neigh_used = false;
1624f6dfb4c3SHadar Hen Zion 	struct neighbour *n;
162590bb7692SAriel Levkovich 	u64 lastuse;
1626f6dfb4c3SHadar Hen Zion 
1627f6dfb4c3SHadar Hen Zion 	if (m_neigh->family == AF_INET)
1628f6dfb4c3SHadar Hen Zion 		tbl = &arp_tbl;
1629f6dfb4c3SHadar Hen Zion #if IS_ENABLED(CONFIG_IPV6)
1630f6dfb4c3SHadar Hen Zion 	else if (m_neigh->family == AF_INET6)
16315cc3a8c6SSaeed Mahameed 		tbl = ipv6_stub->nd_tbl;
1632f6dfb4c3SHadar Hen Zion #endif
1633f6dfb4c3SHadar Hen Zion 	else
1634f6dfb4c3SHadar Hen Zion 		return;
1635f6dfb4c3SHadar Hen Zion 
1636ac0d9176SVlad Buslov 	/* mlx5e_get_next_valid_encap() releases previous encap before returning
1637ac0d9176SVlad Buslov 	 * next one.
1638ac0d9176SVlad Buslov 	 */
1639ac0d9176SVlad Buslov 	while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
16406a06c2f7SVlad Buslov 		struct mlx5e_priv *priv = netdev_priv(e->out_dev);
16415a7e5bcbSVlad Buslov 		struct encap_flow_item *efi, *tmp;
16426a06c2f7SVlad Buslov 		struct mlx5_eswitch *esw;
16436a06c2f7SVlad Buslov 		LIST_HEAD(flow_list);
1644948993f2SVlad Buslov 
16456a06c2f7SVlad Buslov 		esw = priv->mdev->priv.eswitch;
16466a06c2f7SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
16475a7e5bcbSVlad Buslov 		list_for_each_entry_safe(efi, tmp, &e->flows, list) {
164879baaec7SEli Britstein 			flow = container_of(efi, struct mlx5e_tc_flow,
164979baaec7SEli Britstein 					    encaps[efi->index]);
16505a7e5bcbSVlad Buslov 			if (IS_ERR(mlx5e_flow_get(flow)))
16515a7e5bcbSVlad Buslov 				continue;
16526a06c2f7SVlad Buslov 			list_add(&flow->tmp_list, &flow_list);
16535a7e5bcbSVlad Buslov 
1654226f2ca3SVlad Buslov 			if (mlx5e_is_offloaded_flow(flow)) {
1655b8aee822SMark Bloch 				counter = mlx5e_tc_get_counter(flow);
165690bb7692SAriel Levkovich 				lastuse = mlx5_fc_query_lastuse(counter);
1657f6dfb4c3SHadar Hen Zion 				if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1658f6dfb4c3SHadar Hen Zion 					neigh_used = true;
1659f6dfb4c3SHadar Hen Zion 					break;
1660f6dfb4c3SHadar Hen Zion 				}
1661f6dfb4c3SHadar Hen Zion 			}
1662f6dfb4c3SHadar Hen Zion 		}
16636a06c2f7SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
1664948993f2SVlad Buslov 
16656a06c2f7SVlad Buslov 		mlx5e_put_encap_flow_list(priv, &flow_list);
1666ac0d9176SVlad Buslov 		if (neigh_used) {
1667ac0d9176SVlad Buslov 			/* release current encap before breaking the loop */
16686a06c2f7SVlad Buslov 			mlx5e_encap_put(priv, e);
1669e36d4810SRoi Dayan 			break;
1670f6dfb4c3SHadar Hen Zion 		}
1671ac0d9176SVlad Buslov 	}
1672f6dfb4c3SHadar Hen Zion 
1673c786fe59SVlad Buslov 	trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
1674c786fe59SVlad Buslov 
1675f6dfb4c3SHadar Hen Zion 	if (neigh_used) {
1676f6dfb4c3SHadar Hen Zion 		nhe->reported_lastuse = jiffies;
1677f6dfb4c3SHadar Hen Zion 
1678f6dfb4c3SHadar Hen Zion 		/* find the relevant neigh according to the cached device and
1679f6dfb4c3SHadar Hen Zion 		 * dst ip pair
1680f6dfb4c3SHadar Hen Zion 		 */
1681f6dfb4c3SHadar Hen Zion 		n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1682c7f7ba8dSRoi Dayan 		if (!n)
1683f6dfb4c3SHadar Hen Zion 			return;
1684f6dfb4c3SHadar Hen Zion 
1685f6dfb4c3SHadar Hen Zion 		neigh_event_send(n, NULL);
1686f6dfb4c3SHadar Hen Zion 		neigh_release(n);
1687f6dfb4c3SHadar Hen Zion 	}
1688f6dfb4c3SHadar Hen Zion }
1689f6dfb4c3SHadar Hen Zion 
169061086f39SVlad Buslov static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1691d85cdccbSOr Gerlitz {
1692948993f2SVlad Buslov 	WARN_ON(!list_empty(&e->flows));
16933c140dd5SVlad Buslov 
16943c140dd5SVlad Buslov 	if (e->compl_result > 0) {
1695232c0013SHadar Hen Zion 		mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1696232c0013SHadar Hen Zion 
1697232c0013SHadar Hen Zion 		if (e->flags & MLX5_ENCAP_ENTRY_VALID)
16982b688ea5SMaor Gottlieb 			mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
16993c140dd5SVlad Buslov 	}
1700232c0013SHadar Hen Zion 
17012a4b6526SVlad Buslov 	kfree(e->tun_info);
1702232c0013SHadar Hen Zion 	kfree(e->encap_header);
1703ac0d9176SVlad Buslov 	kfree_rcu(e, rcu);
17045067b602SRoi Dayan }
1705948993f2SVlad Buslov 
170661086f39SVlad Buslov void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
170761086f39SVlad Buslov {
170861086f39SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
170961086f39SVlad Buslov 
171061086f39SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
171161086f39SVlad Buslov 		return;
171261086f39SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
171361086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
171461086f39SVlad Buslov 
171561086f39SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
171661086f39SVlad Buslov }
171761086f39SVlad Buslov 
1718948993f2SVlad Buslov static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1719948993f2SVlad Buslov 			       struct mlx5e_tc_flow *flow, int out_index)
1720948993f2SVlad Buslov {
172161086f39SVlad Buslov 	struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
172261086f39SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
172361086f39SVlad Buslov 
1724948993f2SVlad Buslov 	/* flow wasn't fully initialized */
172561086f39SVlad Buslov 	if (!e)
1726948993f2SVlad Buslov 		return;
1727948993f2SVlad Buslov 
172861086f39SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
1729948993f2SVlad Buslov 	list_del(&flow->encaps[out_index].list);
1730948993f2SVlad Buslov 	flow->encaps[out_index].e = NULL;
173161086f39SVlad Buslov 	if (!refcount_dec_and_test(&e->refcnt)) {
173261086f39SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
173361086f39SVlad Buslov 		return;
173461086f39SVlad Buslov 	}
173561086f39SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
173661086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
173761086f39SVlad Buslov 
173861086f39SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
17395067b602SRoi Dayan }
17405067b602SRoi Dayan 
174104de7ddaSRoi Dayan static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
174204de7ddaSRoi Dayan {
174304de7ddaSRoi Dayan 	struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
174404de7ddaSRoi Dayan 
1745226f2ca3SVlad Buslov 	if (!flow_flag_test(flow, ESWITCH) ||
1746226f2ca3SVlad Buslov 	    !flow_flag_test(flow, DUP))
174704de7ddaSRoi Dayan 		return;
174804de7ddaSRoi Dayan 
174904de7ddaSRoi Dayan 	mutex_lock(&esw->offloads.peer_mutex);
175004de7ddaSRoi Dayan 	list_del(&flow->peer);
175104de7ddaSRoi Dayan 	mutex_unlock(&esw->offloads.peer_mutex);
175204de7ddaSRoi Dayan 
1753226f2ca3SVlad Buslov 	flow_flag_clear(flow, DUP);
175404de7ddaSRoi Dayan 
1755eb252c3aSRoi Dayan 	if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
175604de7ddaSRoi Dayan 		mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1757a23dae79SRoi Dayan 		kfree(flow->peer_flow);
1758eb252c3aSRoi Dayan 	}
1759eb252c3aSRoi Dayan 
176004de7ddaSRoi Dayan 	flow->peer_flow = NULL;
176104de7ddaSRoi Dayan }
176204de7ddaSRoi Dayan 
176304de7ddaSRoi Dayan static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
176404de7ddaSRoi Dayan {
176504de7ddaSRoi Dayan 	struct mlx5_core_dev *dev = flow->priv->mdev;
176604de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = dev->priv.devcom;
176704de7ddaSRoi Dayan 	struct mlx5_eswitch *peer_esw;
176804de7ddaSRoi Dayan 
176904de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
177004de7ddaSRoi Dayan 	if (!peer_esw)
177104de7ddaSRoi Dayan 		return;
177204de7ddaSRoi Dayan 
177304de7ddaSRoi Dayan 	__mlx5e_tc_del_fdb_peer_flow(flow);
177404de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
177504de7ddaSRoi Dayan }
177604de7ddaSRoi Dayan 
1777e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1778961e8979SRoi Dayan 			      struct mlx5e_tc_flow *flow)
1779e8f887acSAmir Vadai {
1780226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow)) {
178104de7ddaSRoi Dayan 		mlx5e_tc_del_fdb_peer_flow(flow);
1782d85cdccbSOr Gerlitz 		mlx5e_tc_del_fdb_flow(priv, flow);
178304de7ddaSRoi Dayan 	} else {
1784d85cdccbSOr Gerlitz 		mlx5e_tc_del_nic_flow(priv, flow);
1785e8f887acSAmir Vadai 	}
178604de7ddaSRoi Dayan }
1787e8f887acSAmir Vadai 
17880a7fcb78SPaul Blakey static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1789bbd00f7eSHadar Hen Zion {
1790f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
17910a7fcb78SPaul Blakey 	struct flow_action *flow_action = &rule->action;
17920a7fcb78SPaul Blakey 	const struct flow_action_entry *act;
17930a7fcb78SPaul Blakey 	int i;
1794bbd00f7eSHadar Hen Zion 
17950a7fcb78SPaul Blakey 	flow_action_for_each(i, act, flow_action) {
17960a7fcb78SPaul Blakey 		switch (act->id) {
17970a7fcb78SPaul Blakey 		case FLOW_ACTION_GOTO:
17980a7fcb78SPaul Blakey 			return true;
17990a7fcb78SPaul Blakey 		default:
18000a7fcb78SPaul Blakey 			continue;
1801fe1587a7SDmytro Linkin 		}
18022e72eb43SOr Gerlitz 	}
1803bbd00f7eSHadar Hen Zion 
18040a7fcb78SPaul Blakey 	return false;
18050a7fcb78SPaul Blakey }
1806bcef735cSOr Gerlitz 
18070a7fcb78SPaul Blakey static int
18080a7fcb78SPaul Blakey enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
18090a7fcb78SPaul Blakey 				    struct flow_dissector_key_enc_opts *opts,
18100a7fcb78SPaul Blakey 				    struct netlink_ext_ack *extack,
18110a7fcb78SPaul Blakey 				    bool *dont_care)
18120a7fcb78SPaul Blakey {
18130a7fcb78SPaul Blakey 	struct geneve_opt *opt;
18140a7fcb78SPaul Blakey 	int off = 0;
1815bcef735cSOr Gerlitz 
18160a7fcb78SPaul Blakey 	*dont_care = true;
1817bcef735cSOr Gerlitz 
18180a7fcb78SPaul Blakey 	while (opts->len > off) {
18190a7fcb78SPaul Blakey 		opt = (struct geneve_opt *)&opts->data[off];
1820e98bedf5SEli Britstein 
18210a7fcb78SPaul Blakey 		if (!(*dont_care) || opt->opt_class || opt->type ||
18220a7fcb78SPaul Blakey 		    memchr_inv(opt->opt_data, 0, opt->length * 4)) {
18230a7fcb78SPaul Blakey 			*dont_care = false;
18240a7fcb78SPaul Blakey 
18250a7fcb78SPaul Blakey 			if (opt->opt_class != U16_MAX ||
18260a7fcb78SPaul Blakey 			    opt->type != U8_MAX ||
18270a7fcb78SPaul Blakey 			    memchr_inv(opt->opt_data, 0xFF,
18280a7fcb78SPaul Blakey 				       opt->length * 4)) {
18290a7fcb78SPaul Blakey 				NL_SET_ERR_MSG(extack,
18300a7fcb78SPaul Blakey 					       "Partial match of tunnel options in chain > 0 isn't supported");
18310a7fcb78SPaul Blakey 				netdev_warn(priv->netdev,
18320a7fcb78SPaul Blakey 					    "Partial match of tunnel options in chain > 0 isn't supported");
1833e98bedf5SEli Britstein 				return -EOPNOTSUPP;
1834e98bedf5SEli Britstein 			}
1835bcef735cSOr Gerlitz 		}
1836bcef735cSOr Gerlitz 
18370a7fcb78SPaul Blakey 		off += sizeof(struct geneve_opt) + opt->length * 4;
1838bbd00f7eSHadar Hen Zion 	}
1839bbd00f7eSHadar Hen Zion 
1840bbd00f7eSHadar Hen Zion 	return 0;
1841bbd00f7eSHadar Hen Zion }
1842bbd00f7eSHadar Hen Zion 
18430a7fcb78SPaul Blakey #define COPY_DISSECTOR(rule, diss_key, dst)\
18440a7fcb78SPaul Blakey ({ \
18450a7fcb78SPaul Blakey 	struct flow_rule *__rule = (rule);\
18460a7fcb78SPaul Blakey 	typeof(dst) __dst = dst;\
18470a7fcb78SPaul Blakey \
18480a7fcb78SPaul Blakey 	memcpy(__dst,\
18490a7fcb78SPaul Blakey 	       skb_flow_dissector_target(__rule->match.dissector,\
18500a7fcb78SPaul Blakey 					 diss_key,\
18510a7fcb78SPaul Blakey 					 __rule->match.key),\
18520a7fcb78SPaul Blakey 	       sizeof(*__dst));\
18530a7fcb78SPaul Blakey })
18540a7fcb78SPaul Blakey 
18550a7fcb78SPaul Blakey static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
18560a7fcb78SPaul Blakey 				    struct mlx5e_tc_flow *flow,
18570a7fcb78SPaul Blakey 				    struct flow_cls_offload *f,
18580a7fcb78SPaul Blakey 				    struct net_device *filter_dev)
18598377629eSEli Britstein {
18600a7fcb78SPaul Blakey 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
18610a7fcb78SPaul Blakey 	struct netlink_ext_ack *extack = f->common.extack;
18620a7fcb78SPaul Blakey 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
18630a7fcb78SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
18640a7fcb78SPaul Blakey 	struct flow_match_enc_opts enc_opts_match;
18650a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
18660a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *uplink_rpriv;
18670a7fcb78SPaul Blakey 	struct tunnel_match_key tunnel_key;
18680a7fcb78SPaul Blakey 	bool enc_opts_is_dont_care = true;
18690a7fcb78SPaul Blakey 	u32 tun_id, enc_opts_id = 0;
18700a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw;
18710a7fcb78SPaul Blakey 	u32 value, mask;
18720a7fcb78SPaul Blakey 	int err;
18730a7fcb78SPaul Blakey 
18740a7fcb78SPaul Blakey 	esw = priv->mdev->priv.eswitch;
18750a7fcb78SPaul Blakey 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
18760a7fcb78SPaul Blakey 	uplink_priv = &uplink_rpriv->uplink_priv;
18770a7fcb78SPaul Blakey 
18780a7fcb78SPaul Blakey 	memset(&tunnel_key, 0, sizeof(tunnel_key));
18790a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
18800a7fcb78SPaul Blakey 		       &tunnel_key.enc_control);
18810a7fcb78SPaul Blakey 	if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
18820a7fcb78SPaul Blakey 		COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
18830a7fcb78SPaul Blakey 			       &tunnel_key.enc_ipv4);
18840a7fcb78SPaul Blakey 	else
18850a7fcb78SPaul Blakey 		COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
18860a7fcb78SPaul Blakey 			       &tunnel_key.enc_ipv6);
18870a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
18880a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
18890a7fcb78SPaul Blakey 		       &tunnel_key.enc_tp);
18900a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
18910a7fcb78SPaul Blakey 		       &tunnel_key.enc_key_id);
18920a7fcb78SPaul Blakey 	tunnel_key.filter_ifindex = filter_dev->ifindex;
18930a7fcb78SPaul Blakey 
18940a7fcb78SPaul Blakey 	err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
18950a7fcb78SPaul Blakey 	if (err)
18960a7fcb78SPaul Blakey 		return err;
18970a7fcb78SPaul Blakey 
18980a7fcb78SPaul Blakey 	flow_rule_match_enc_opts(rule, &enc_opts_match);
18990a7fcb78SPaul Blakey 	err = enc_opts_is_dont_care_or_full_match(priv,
19000a7fcb78SPaul Blakey 						  enc_opts_match.mask,
19010a7fcb78SPaul Blakey 						  extack,
19020a7fcb78SPaul Blakey 						  &enc_opts_is_dont_care);
19030a7fcb78SPaul Blakey 	if (err)
19040a7fcb78SPaul Blakey 		goto err_enc_opts;
19050a7fcb78SPaul Blakey 
19060a7fcb78SPaul Blakey 	if (!enc_opts_is_dont_care) {
19070a7fcb78SPaul Blakey 		err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
19080a7fcb78SPaul Blakey 				  enc_opts_match.key, &enc_opts_id);
19090a7fcb78SPaul Blakey 		if (err)
19100a7fcb78SPaul Blakey 			goto err_enc_opts;
19110a7fcb78SPaul Blakey 	}
19120a7fcb78SPaul Blakey 
19130a7fcb78SPaul Blakey 	value = tun_id << ENC_OPTS_BITS | enc_opts_id;
19140a7fcb78SPaul Blakey 	mask = enc_opts_id ? TUNNEL_ID_MASK :
19150a7fcb78SPaul Blakey 			     (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
19160a7fcb78SPaul Blakey 
19170a7fcb78SPaul Blakey 	if (attr->chain) {
19180a7fcb78SPaul Blakey 		mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
19190a7fcb78SPaul Blakey 					    TUNNEL_TO_REG, value, mask);
19200a7fcb78SPaul Blakey 	} else {
19210a7fcb78SPaul Blakey 		mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
19220a7fcb78SPaul Blakey 		err = mlx5e_tc_match_to_reg_set(priv->mdev,
19230a7fcb78SPaul Blakey 						mod_hdr_acts,
19240a7fcb78SPaul Blakey 						TUNNEL_TO_REG, value);
19250a7fcb78SPaul Blakey 		if (err)
19260a7fcb78SPaul Blakey 			goto err_set;
19270a7fcb78SPaul Blakey 
19280a7fcb78SPaul Blakey 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
19290a7fcb78SPaul Blakey 	}
19300a7fcb78SPaul Blakey 
19310a7fcb78SPaul Blakey 	flow->tunnel_id = value;
19320a7fcb78SPaul Blakey 	return 0;
19330a7fcb78SPaul Blakey 
19340a7fcb78SPaul Blakey err_set:
19350a7fcb78SPaul Blakey 	if (enc_opts_id)
19360a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
19370a7fcb78SPaul Blakey 			       enc_opts_id);
19380a7fcb78SPaul Blakey err_enc_opts:
19390a7fcb78SPaul Blakey 	mapping_remove(uplink_priv->tunnel_mapping, tun_id);
19400a7fcb78SPaul Blakey 	return err;
19410a7fcb78SPaul Blakey }
19420a7fcb78SPaul Blakey 
19430a7fcb78SPaul Blakey static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
19440a7fcb78SPaul Blakey {
19450a7fcb78SPaul Blakey 	u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
19460a7fcb78SPaul Blakey 	u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
19470a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
19480a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *uplink_rpriv;
19490a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw;
19500a7fcb78SPaul Blakey 
19510a7fcb78SPaul Blakey 	esw = flow->priv->mdev->priv.eswitch;
19520a7fcb78SPaul Blakey 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
19530a7fcb78SPaul Blakey 	uplink_priv = &uplink_rpriv->uplink_priv;
19540a7fcb78SPaul Blakey 
19550a7fcb78SPaul Blakey 	if (tun_id)
19560a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_mapping, tun_id);
19570a7fcb78SPaul Blakey 	if (enc_opts_id)
19580a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
19590a7fcb78SPaul Blakey 			       enc_opts_id);
19600a7fcb78SPaul Blakey }
19610a7fcb78SPaul Blakey 
19624c3844d9SPaul Blakey u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
19634c3844d9SPaul Blakey {
19644c3844d9SPaul Blakey 	return flow->tunnel_id;
19654c3844d9SPaul Blakey }
19664c3844d9SPaul Blakey 
19670a7fcb78SPaul Blakey static int parse_tunnel_attr(struct mlx5e_priv *priv,
19680a7fcb78SPaul Blakey 			     struct mlx5e_tc_flow *flow,
19690a7fcb78SPaul Blakey 			     struct mlx5_flow_spec *spec,
19700a7fcb78SPaul Blakey 			     struct flow_cls_offload *f,
19710a7fcb78SPaul Blakey 			     struct net_device *filter_dev,
19720a7fcb78SPaul Blakey 			     u8 *match_level,
19730a7fcb78SPaul Blakey 			     bool *match_inner)
19740a7fcb78SPaul Blakey {
19750a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
19760a7fcb78SPaul Blakey 	struct netlink_ext_ack *extack = f->common.extack;
19770a7fcb78SPaul Blakey 	bool needs_mapping, sets_mapping;
19780a7fcb78SPaul Blakey 	int err;
19790a7fcb78SPaul Blakey 
19800a7fcb78SPaul Blakey 	if (!mlx5e_is_eswitch_flow(flow))
19810a7fcb78SPaul Blakey 		return -EOPNOTSUPP;
19820a7fcb78SPaul Blakey 
19830a7fcb78SPaul Blakey 	needs_mapping = !!flow->esw_attr->chain;
19840a7fcb78SPaul Blakey 	sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
19850a7fcb78SPaul Blakey 	*match_inner = !needs_mapping;
19860a7fcb78SPaul Blakey 
19870a7fcb78SPaul Blakey 	if ((needs_mapping || sets_mapping) &&
1988636bb968SPaul Blakey 	    !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
19890a7fcb78SPaul Blakey 		NL_SET_ERR_MSG(extack,
1990636bb968SPaul Blakey 			       "Chains on tunnel devices isn't supported without register loopback support");
19910a7fcb78SPaul Blakey 		netdev_warn(priv->netdev,
1992636bb968SPaul Blakey 			    "Chains on tunnel devices isn't supported without register loopback support");
19930a7fcb78SPaul Blakey 		return -EOPNOTSUPP;
19940a7fcb78SPaul Blakey 	}
19950a7fcb78SPaul Blakey 
19960a7fcb78SPaul Blakey 	if (!flow->esw_attr->chain) {
19970a7fcb78SPaul Blakey 		err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
19980a7fcb78SPaul Blakey 					 match_level);
19990a7fcb78SPaul Blakey 		if (err) {
20000a7fcb78SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
20010a7fcb78SPaul Blakey 					   "Failed to parse tunnel attributes");
20020a7fcb78SPaul Blakey 			netdev_warn(priv->netdev,
20030a7fcb78SPaul Blakey 				    "Failed to parse tunnel attributes");
20040a7fcb78SPaul Blakey 			return err;
20050a7fcb78SPaul Blakey 		}
20060a7fcb78SPaul Blakey 
20070a7fcb78SPaul Blakey 		flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
20080a7fcb78SPaul Blakey 	}
20090a7fcb78SPaul Blakey 
20100a7fcb78SPaul Blakey 	if (!needs_mapping && !sets_mapping)
20110a7fcb78SPaul Blakey 		return 0;
20120a7fcb78SPaul Blakey 
20130a7fcb78SPaul Blakey 	return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
20140a7fcb78SPaul Blakey }
20150a7fcb78SPaul Blakey 
20160a7fcb78SPaul Blakey static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
20170a7fcb78SPaul Blakey {
20180a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
20190a7fcb78SPaul Blakey 			    inner_headers);
20200a7fcb78SPaul Blakey }
20210a7fcb78SPaul Blakey 
20220a7fcb78SPaul Blakey static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
20230a7fcb78SPaul Blakey {
20240a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_value,
20250a7fcb78SPaul Blakey 			    inner_headers);
20260a7fcb78SPaul Blakey }
20270a7fcb78SPaul Blakey 
20280a7fcb78SPaul Blakey static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
20290a7fcb78SPaul Blakey {
20300a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
20310a7fcb78SPaul Blakey 			    outer_headers);
20320a7fcb78SPaul Blakey }
20330a7fcb78SPaul Blakey 
20340a7fcb78SPaul Blakey static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
20350a7fcb78SPaul Blakey {
20360a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_value,
20378377629eSEli Britstein 			    outer_headers);
20388377629eSEli Britstein }
20398377629eSEli Britstein 
20408377629eSEli Britstein static void *get_match_headers_value(u32 flags,
20418377629eSEli Britstein 				     struct mlx5_flow_spec *spec)
20428377629eSEli Britstein {
20438377629eSEli Britstein 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
20440a7fcb78SPaul Blakey 		get_match_inner_headers_value(spec) :
20450a7fcb78SPaul Blakey 		get_match_outer_headers_value(spec);
20460a7fcb78SPaul Blakey }
20470a7fcb78SPaul Blakey 
20480a7fcb78SPaul Blakey static void *get_match_headers_criteria(u32 flags,
20490a7fcb78SPaul Blakey 					struct mlx5_flow_spec *spec)
20500a7fcb78SPaul Blakey {
20510a7fcb78SPaul Blakey 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
20520a7fcb78SPaul Blakey 		get_match_inner_headers_criteria(spec) :
20530a7fcb78SPaul Blakey 		get_match_outer_headers_criteria(spec);
20548377629eSEli Britstein }
20558377629eSEli Britstein 
20566d65bc64Swenxu static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
20576d65bc64Swenxu 				   struct flow_cls_offload *f)
20586d65bc64Swenxu {
20596d65bc64Swenxu 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
20606d65bc64Swenxu 	struct netlink_ext_ack *extack = f->common.extack;
20616d65bc64Swenxu 	struct net_device *ingress_dev;
20626d65bc64Swenxu 	struct flow_match_meta match;
20636d65bc64Swenxu 
20646d65bc64Swenxu 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
20656d65bc64Swenxu 		return 0;
20666d65bc64Swenxu 
20676d65bc64Swenxu 	flow_rule_match_meta(rule, &match);
20686d65bc64Swenxu 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
20696d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
20706d65bc64Swenxu 		return -EINVAL;
20716d65bc64Swenxu 	}
20726d65bc64Swenxu 
20736d65bc64Swenxu 	ingress_dev = __dev_get_by_index(dev_net(filter_dev),
20746d65bc64Swenxu 					 match.key->ingress_ifindex);
20756d65bc64Swenxu 	if (!ingress_dev) {
20766d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack,
20776d65bc64Swenxu 				   "Can't find the ingress port to match on");
20786d65bc64Swenxu 		return -EINVAL;
20796d65bc64Swenxu 	}
20806d65bc64Swenxu 
20816d65bc64Swenxu 	if (ingress_dev != filter_dev) {
20826d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack,
20836d65bc64Swenxu 				   "Can't match on the ingress filter port");
20846d65bc64Swenxu 		return -EINVAL;
20856d65bc64Swenxu 	}
20866d65bc64Swenxu 
20876d65bc64Swenxu 	return 0;
20886d65bc64Swenxu }
20896d65bc64Swenxu 
2090de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv,
20910a7fcb78SPaul Blakey 			      struct mlx5e_tc_flow *flow,
2092de0af0bfSRoi Dayan 			      struct mlx5_flow_spec *spec,
2093f9e30088SPablo Neira Ayuso 			      struct flow_cls_offload *f,
209454c177caSOz Shlomo 			      struct net_device *filter_dev,
209593b3586eSHuy Nguyen 			      u8 *inner_match_level, u8 *outer_match_level)
2096e3a2b7edSAmir Vadai {
2097e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
2098c5bb1730SMaor Gottlieb 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2099c5bb1730SMaor Gottlieb 				       outer_headers);
2100c5bb1730SMaor Gottlieb 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2101c5bb1730SMaor Gottlieb 				       outer_headers);
2102699e96ddSJianbo Liu 	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2103699e96ddSJianbo Liu 				    misc_parameters);
2104699e96ddSJianbo Liu 	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2105699e96ddSJianbo Liu 				    misc_parameters);
2106f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
21078f256622SPablo Neira Ayuso 	struct flow_dissector *dissector = rule->match.dissector;
2108e3a2b7edSAmir Vadai 	u16 addr_type = 0;
2109e3a2b7edSAmir Vadai 	u8 ip_proto = 0;
211093b3586eSHuy Nguyen 	u8 *match_level;
21116d65bc64Swenxu 	int err;
2112e3a2b7edSAmir Vadai 
211393b3586eSHuy Nguyen 	match_level = outer_match_level;
2114de0af0bfSRoi Dayan 
21158f256622SPablo Neira Ayuso 	if (dissector->used_keys &
21163d144578SVlad Buslov 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
21173d144578SVlad Buslov 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2118e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
2119e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2120095b6cfdSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
2121699e96ddSJianbo Liu 	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2122e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2123e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2124bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
2125bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2126bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2127bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2128bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
2129e77834ecSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2130fd7da28bSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
2131bcef735cSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_IP)  |
21324c3844d9SPaul Blakey 	      BIT(FLOW_DISSECTOR_KEY_CT) |
21339272e3dfSYevgeny Kliteynik 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
21349272e3dfSYevgeny Kliteynik 	      BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
2135e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2136e3a2b7edSAmir Vadai 		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
21378f256622SPablo Neira Ayuso 			    dissector->used_keys);
2138e3a2b7edSAmir Vadai 		return -EOPNOTSUPP;
2139e3a2b7edSAmir Vadai 	}
2140e3a2b7edSAmir Vadai 
2141075973c7SVlad Buslov 	if (mlx5e_get_tc_tun(filter_dev)) {
21420a7fcb78SPaul Blakey 		bool match_inner = false;
2143bbd00f7eSHadar Hen Zion 
21440a7fcb78SPaul Blakey 		err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
21450a7fcb78SPaul Blakey 					outer_match_level, &match_inner);
21460a7fcb78SPaul Blakey 		if (err)
21470a7fcb78SPaul Blakey 			return err;
21480a7fcb78SPaul Blakey 
21490a7fcb78SPaul Blakey 		if (match_inner) {
21500a7fcb78SPaul Blakey 			/* header pointers should point to the inner headers
21510a7fcb78SPaul Blakey 			 * if the packet was decapsulated already.
21520a7fcb78SPaul Blakey 			 * outer headers are set by parse_tunnel_attr.
2153bbd00f7eSHadar Hen Zion 			 */
215493b3586eSHuy Nguyen 			match_level = inner_match_level;
21550a7fcb78SPaul Blakey 			headers_c = get_match_inner_headers_criteria(spec);
21560a7fcb78SPaul Blakey 			headers_v = get_match_inner_headers_value(spec);
21570a7fcb78SPaul Blakey 		}
2158bbd00f7eSHadar Hen Zion 	}
2159bbd00f7eSHadar Hen Zion 
21606d65bc64Swenxu 	err = mlx5e_flower_parse_meta(filter_dev, f);
21616d65bc64Swenxu 	if (err)
21626d65bc64Swenxu 		return err;
21636d65bc64Swenxu 
21648f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
21658f256622SPablo Neira Ayuso 		struct flow_match_basic match;
2166e3a2b7edSAmir Vadai 
21678f256622SPablo Neira Ayuso 		flow_rule_match_basic(rule, &match);
21688f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
21698f256622SPablo Neira Ayuso 			 ntohs(match.mask->n_proto));
21708f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
21718f256622SPablo Neira Ayuso 			 ntohs(match.key->n_proto));
21728f256622SPablo Neira Ayuso 
21738f256622SPablo Neira Ayuso 		if (match.mask->n_proto)
2174d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
2175e3a2b7edSAmir Vadai 	}
217635a605dbSEli Britstein 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
217735a605dbSEli Britstein 	    is_vlan_dev(filter_dev)) {
217835a605dbSEli Britstein 		struct flow_dissector_key_vlan filter_dev_mask;
217935a605dbSEli Britstein 		struct flow_dissector_key_vlan filter_dev_key;
21808f256622SPablo Neira Ayuso 		struct flow_match_vlan match;
21818f256622SPablo Neira Ayuso 
218235a605dbSEli Britstein 		if (is_vlan_dev(filter_dev)) {
218335a605dbSEli Britstein 			match.key = &filter_dev_key;
218435a605dbSEli Britstein 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
218535a605dbSEli Britstein 			match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
218635a605dbSEli Britstein 			match.key->vlan_priority = 0;
218735a605dbSEli Britstein 			match.mask = &filter_dev_mask;
218835a605dbSEli Britstein 			memset(match.mask, 0xff, sizeof(*match.mask));
218935a605dbSEli Britstein 			match.mask->vlan_priority = 0;
219035a605dbSEli Britstein 		} else {
21918f256622SPablo Neira Ayuso 			flow_rule_match_vlan(rule, &match);
219235a605dbSEli Britstein 		}
21938f256622SPablo Neira Ayuso 		if (match.mask->vlan_id ||
21948f256622SPablo Neira Ayuso 		    match.mask->vlan_priority ||
21958f256622SPablo Neira Ayuso 		    match.mask->vlan_tpid) {
21968f256622SPablo Neira Ayuso 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2197699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2198699e96ddSJianbo Liu 					 svlan_tag, 1);
2199699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2200699e96ddSJianbo Liu 					 svlan_tag, 1);
2201699e96ddSJianbo Liu 			} else {
2202699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2203699e96ddSJianbo Liu 					 cvlan_tag, 1);
2204699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2205699e96ddSJianbo Liu 					 cvlan_tag, 1);
2206699e96ddSJianbo Liu 			}
2207095b6cfdSOr Gerlitz 
22088f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
22098f256622SPablo Neira Ayuso 				 match.mask->vlan_id);
22108f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
22118f256622SPablo Neira Ayuso 				 match.key->vlan_id);
2212358d79a4SOr Gerlitz 
22138f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
22148f256622SPablo Neira Ayuso 				 match.mask->vlan_priority);
22158f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
22168f256622SPablo Neira Ayuso 				 match.key->vlan_priority);
221754782900SOr Gerlitz 
2218d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
2219095b6cfdSOr Gerlitz 		}
2220d3a80bb5SOr Gerlitz 	} else if (*match_level != MLX5_MATCH_NONE) {
2221fc603294SMark Bloch 		/* cvlan_tag enabled in match criteria and
2222fc603294SMark Bloch 		 * disabled in match value means both S & C tags
2223fc603294SMark Bloch 		 * don't exist (untagged of both)
2224fc603294SMark Bloch 		 */
2225cee26487SJianbo Liu 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2226d3a80bb5SOr Gerlitz 		*match_level = MLX5_MATCH_L2;
2227095b6cfdSOr Gerlitz 	}
2228095b6cfdSOr Gerlitz 
22298f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
22308f256622SPablo Neira Ayuso 		struct flow_match_vlan match;
22318f256622SPablo Neira Ayuso 
223212d5cbf8SJianbo Liu 		flow_rule_match_cvlan(rule, &match);
22338f256622SPablo Neira Ayuso 		if (match.mask->vlan_id ||
22348f256622SPablo Neira Ayuso 		    match.mask->vlan_priority ||
22358f256622SPablo Neira Ayuso 		    match.mask->vlan_tpid) {
22368f256622SPablo Neira Ayuso 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2237699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
2238699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
2239699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
2240699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
2241699e96ddSJianbo Liu 			} else {
2242699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
2243699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
2244699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
2245699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
2246699e96ddSJianbo Liu 			}
2247699e96ddSJianbo Liu 
2248699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
22498f256622SPablo Neira Ayuso 				 match.mask->vlan_id);
2250699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
22518f256622SPablo Neira Ayuso 				 match.key->vlan_id);
2252699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
22538f256622SPablo Neira Ayuso 				 match.mask->vlan_priority);
2254699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
22558f256622SPablo Neira Ayuso 				 match.key->vlan_priority);
2256699e96ddSJianbo Liu 
2257699e96ddSJianbo Liu 			*match_level = MLX5_MATCH_L2;
2258699e96ddSJianbo Liu 		}
2259699e96ddSJianbo Liu 	}
2260699e96ddSJianbo Liu 
22618f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
22628f256622SPablo Neira Ayuso 		struct flow_match_eth_addrs match;
226354782900SOr Gerlitz 
22648f256622SPablo Neira Ayuso 		flow_rule_match_eth_addrs(rule, &match);
2265d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2266d3a80bb5SOr Gerlitz 					     dmac_47_16),
22678f256622SPablo Neira Ayuso 				match.mask->dst);
2268d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2269d3a80bb5SOr Gerlitz 					     dmac_47_16),
22708f256622SPablo Neira Ayuso 				match.key->dst);
2271d3a80bb5SOr Gerlitz 
2272d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2273d3a80bb5SOr Gerlitz 					     smac_47_16),
22748f256622SPablo Neira Ayuso 				match.mask->src);
2275d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2276d3a80bb5SOr Gerlitz 					     smac_47_16),
22778f256622SPablo Neira Ayuso 				match.key->src);
2278d3a80bb5SOr Gerlitz 
22798f256622SPablo Neira Ayuso 		if (!is_zero_ether_addr(match.mask->src) ||
22808f256622SPablo Neira Ayuso 		    !is_zero_ether_addr(match.mask->dst))
2281d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
228254782900SOr Gerlitz 	}
228354782900SOr Gerlitz 
22848f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
22858f256622SPablo Neira Ayuso 		struct flow_match_control match;
228654782900SOr Gerlitz 
22878f256622SPablo Neira Ayuso 		flow_rule_match_control(rule, &match);
22888f256622SPablo Neira Ayuso 		addr_type = match.key->addr_type;
228954782900SOr Gerlitz 
229054782900SOr Gerlitz 		/* the HW doesn't support frag first/later */
22918f256622SPablo Neira Ayuso 		if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
229254782900SOr Gerlitz 			return -EOPNOTSUPP;
229354782900SOr Gerlitz 
22948f256622SPablo Neira Ayuso 		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
229554782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
229654782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
22978f256622SPablo Neira Ayuso 				 match.key->flags & FLOW_DIS_IS_FRAGMENT);
229854782900SOr Gerlitz 
229954782900SOr Gerlitz 			/* the HW doesn't need L3 inline to match on frag=no */
23008f256622SPablo Neira Ayuso 			if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
230183621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L2;
230254782900SOr Gerlitz 	/* ***  L2 attributes parsing up to here *** */
230354782900SOr Gerlitz 			else
230483621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L3;
230554782900SOr Gerlitz 		}
230654782900SOr Gerlitz 	}
230754782900SOr Gerlitz 
23088f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
23098f256622SPablo Neira Ayuso 		struct flow_match_basic match;
23108f256622SPablo Neira Ayuso 
23118f256622SPablo Neira Ayuso 		flow_rule_match_basic(rule, &match);
23128f256622SPablo Neira Ayuso 		ip_proto = match.key->ip_proto;
231354782900SOr Gerlitz 
231454782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
23158f256622SPablo Neira Ayuso 			 match.mask->ip_proto);
231654782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
23178f256622SPablo Neira Ayuso 			 match.key->ip_proto);
231854782900SOr Gerlitz 
23198f256622SPablo Neira Ayuso 		if (match.mask->ip_proto)
2320d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
232154782900SOr Gerlitz 	}
232254782900SOr Gerlitz 
2323e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
23248f256622SPablo Neira Ayuso 		struct flow_match_ipv4_addrs match;
2325e3a2b7edSAmir Vadai 
23268f256622SPablo Neira Ayuso 		flow_rule_match_ipv4_addrs(rule, &match);
2327e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2328e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
23298f256622SPablo Neira Ayuso 		       &match.mask->src, sizeof(match.mask->src));
2330e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2331e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
23328f256622SPablo Neira Ayuso 		       &match.key->src, sizeof(match.key->src));
2333e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2334e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
23358f256622SPablo Neira Ayuso 		       &match.mask->dst, sizeof(match.mask->dst));
2336e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2337e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
23388f256622SPablo Neira Ayuso 		       &match.key->dst, sizeof(match.key->dst));
2339de0af0bfSRoi Dayan 
23408f256622SPablo Neira Ayuso 		if (match.mask->src || match.mask->dst)
2341d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
2342e3a2b7edSAmir Vadai 	}
2343e3a2b7edSAmir Vadai 
2344e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
23458f256622SPablo Neira Ayuso 		struct flow_match_ipv6_addrs match;
2346e3a2b7edSAmir Vadai 
23478f256622SPablo Neira Ayuso 		flow_rule_match_ipv6_addrs(rule, &match);
2348e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2349e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
23508f256622SPablo Neira Ayuso 		       &match.mask->src, sizeof(match.mask->src));
2351e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2352e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
23538f256622SPablo Neira Ayuso 		       &match.key->src, sizeof(match.key->src));
2354e3a2b7edSAmir Vadai 
2355e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2356e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
23578f256622SPablo Neira Ayuso 		       &match.mask->dst, sizeof(match.mask->dst));
2358e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2359e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
23608f256622SPablo Neira Ayuso 		       &match.key->dst, sizeof(match.key->dst));
2361de0af0bfSRoi Dayan 
23628f256622SPablo Neira Ayuso 		if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
23638f256622SPablo Neira Ayuso 		    ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2364d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
2365e3a2b7edSAmir Vadai 	}
2366e3a2b7edSAmir Vadai 
23678f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
23688f256622SPablo Neira Ayuso 		struct flow_match_ip match;
23691f97a526SOr Gerlitz 
23708f256622SPablo Neira Ayuso 		flow_rule_match_ip(rule, &match);
23718f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
23728f256622SPablo Neira Ayuso 			 match.mask->tos & 0x3);
23738f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
23748f256622SPablo Neira Ayuso 			 match.key->tos & 0x3);
23751f97a526SOr Gerlitz 
23768f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
23778f256622SPablo Neira Ayuso 			 match.mask->tos >> 2);
23788f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
23798f256622SPablo Neira Ayuso 			 match.key->tos  >> 2);
23801f97a526SOr Gerlitz 
23818f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
23828f256622SPablo Neira Ayuso 			 match.mask->ttl);
23838f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
23848f256622SPablo Neira Ayuso 			 match.key->ttl);
23851f97a526SOr Gerlitz 
23868f256622SPablo Neira Ayuso 		if (match.mask->ttl &&
2387a8ade55fSOr Gerlitz 		    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2388e98bedf5SEli Britstein 						ft_field_support.outer_ipv4_ttl)) {
2389e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2390e98bedf5SEli Britstein 					   "Matching on TTL is not supported");
23911f97a526SOr Gerlitz 			return -EOPNOTSUPP;
2392e98bedf5SEli Britstein 		}
2393a8ade55fSOr Gerlitz 
23948f256622SPablo Neira Ayuso 		if (match.mask->tos || match.mask->ttl)
2395d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
23961f97a526SOr Gerlitz 	}
23971f97a526SOr Gerlitz 
239854782900SOr Gerlitz 	/* ***  L3 attributes parsing up to here *** */
239954782900SOr Gerlitz 
24008f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
24018f256622SPablo Neira Ayuso 		struct flow_match_ports match;
24028f256622SPablo Neira Ayuso 
24038f256622SPablo Neira Ayuso 		flow_rule_match_ports(rule, &match);
2404e3a2b7edSAmir Vadai 		switch (ip_proto) {
2405e3a2b7edSAmir Vadai 		case IPPROTO_TCP:
2406e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24078f256622SPablo Neira Ayuso 				 tcp_sport, ntohs(match.mask->src));
2408e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24098f256622SPablo Neira Ayuso 				 tcp_sport, ntohs(match.key->src));
2410e3a2b7edSAmir Vadai 
2411e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24128f256622SPablo Neira Ayuso 				 tcp_dport, ntohs(match.mask->dst));
2413e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24148f256622SPablo Neira Ayuso 				 tcp_dport, ntohs(match.key->dst));
2415e3a2b7edSAmir Vadai 			break;
2416e3a2b7edSAmir Vadai 
2417e3a2b7edSAmir Vadai 		case IPPROTO_UDP:
2418e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24198f256622SPablo Neira Ayuso 				 udp_sport, ntohs(match.mask->src));
2420e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24218f256622SPablo Neira Ayuso 				 udp_sport, ntohs(match.key->src));
2422e3a2b7edSAmir Vadai 
2423e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
24248f256622SPablo Neira Ayuso 				 udp_dport, ntohs(match.mask->dst));
2425e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24268f256622SPablo Neira Ayuso 				 udp_dport, ntohs(match.key->dst));
2427e3a2b7edSAmir Vadai 			break;
2428e3a2b7edSAmir Vadai 		default:
2429e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2430e98bedf5SEli Britstein 					   "Only UDP and TCP transports are supported for L4 matching");
2431e3a2b7edSAmir Vadai 			netdev_err(priv->netdev,
2432e3a2b7edSAmir Vadai 				   "Only UDP and TCP transport are supported\n");
2433e3a2b7edSAmir Vadai 			return -EINVAL;
2434e3a2b7edSAmir Vadai 		}
2435de0af0bfSRoi Dayan 
24368f256622SPablo Neira Ayuso 		if (match.mask->src || match.mask->dst)
2437d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
2438e3a2b7edSAmir Vadai 	}
2439e3a2b7edSAmir Vadai 
24408f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
24418f256622SPablo Neira Ayuso 		struct flow_match_tcp match;
2442e77834ecSOr Gerlitz 
24438f256622SPablo Neira Ayuso 		flow_rule_match_tcp(rule, &match);
2444e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
24458f256622SPablo Neira Ayuso 			 ntohs(match.mask->flags));
2446e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
24478f256622SPablo Neira Ayuso 			 ntohs(match.key->flags));
2448e77834ecSOr Gerlitz 
24498f256622SPablo Neira Ayuso 		if (match.mask->flags)
2450d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
2451e77834ecSOr Gerlitz 	}
2452e77834ecSOr Gerlitz 
2453e3a2b7edSAmir Vadai 	return 0;
2454e3a2b7edSAmir Vadai }
2455e3a2b7edSAmir Vadai 
2456de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv,
245765ba8fb7SOr Gerlitz 			    struct mlx5e_tc_flow *flow,
2458de0af0bfSRoi Dayan 			    struct mlx5_flow_spec *spec,
2459f9e30088SPablo Neira Ayuso 			    struct flow_cls_offload *f,
246054c177caSOz Shlomo 			    struct net_device *filter_dev)
2461de0af0bfSRoi Dayan {
246293b3586eSHuy Nguyen 	u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2463e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
2464de0af0bfSRoi Dayan 	struct mlx5_core_dev *dev = priv->mdev;
2465de0af0bfSRoi Dayan 	struct mlx5_eswitch *esw = dev->priv.eswitch;
24661d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
24671d447a39SSaeed Mahameed 	struct mlx5_eswitch_rep *rep;
2468226f2ca3SVlad Buslov 	bool is_eswitch_flow;
2469de0af0bfSRoi Dayan 	int err;
2470de0af0bfSRoi Dayan 
247193b3586eSHuy Nguyen 	inner_match_level = MLX5_MATCH_NONE;
247293b3586eSHuy Nguyen 	outer_match_level = MLX5_MATCH_NONE;
247393b3586eSHuy Nguyen 
24740a7fcb78SPaul Blakey 	err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
24750a7fcb78SPaul Blakey 				 &inner_match_level, &outer_match_level);
247693b3586eSHuy Nguyen 	non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
247793b3586eSHuy Nguyen 				 outer_match_level : inner_match_level;
2478de0af0bfSRoi Dayan 
2479226f2ca3SVlad Buslov 	is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2480226f2ca3SVlad Buslov 	if (!err && is_eswitch_flow) {
24811d447a39SSaeed Mahameed 		rep = rpriv->rep;
2482b05af6aaSBodong Wang 		if (rep->vport != MLX5_VPORT_UPLINK &&
24831d447a39SSaeed Mahameed 		    (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
248493b3586eSHuy Nguyen 		    esw->offloads.inline_mode < non_tunnel_match_level)) {
2485e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2486e98bedf5SEli Britstein 					   "Flow is not offloaded due to min inline setting");
2487de0af0bfSRoi Dayan 			netdev_warn(priv->netdev,
2488de0af0bfSRoi Dayan 				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
248993b3586eSHuy Nguyen 				    non_tunnel_match_level, esw->offloads.inline_mode);
2490de0af0bfSRoi Dayan 			return -EOPNOTSUPP;
2491de0af0bfSRoi Dayan 		}
2492de0af0bfSRoi Dayan 	}
2493de0af0bfSRoi Dayan 
2494226f2ca3SVlad Buslov 	if (is_eswitch_flow) {
249593b3586eSHuy Nguyen 		flow->esw_attr->inner_match_level = inner_match_level;
249693b3586eSHuy Nguyen 		flow->esw_attr->outer_match_level = outer_match_level;
24976363651dSOr Gerlitz 	} else {
249893b3586eSHuy Nguyen 		flow->nic_attr->match_level = non_tunnel_match_level;
24996363651dSOr Gerlitz 	}
250038aa51c1SOr Gerlitz 
2501de0af0bfSRoi Dayan 	return err;
2502de0af0bfSRoi Dayan }
2503de0af0bfSRoi Dayan 
2504d79b6df6SOr Gerlitz struct pedit_headers {
2505d79b6df6SOr Gerlitz 	struct ethhdr  eth;
25060eb69bb9SEli Britstein 	struct vlan_hdr vlan;
2507d79b6df6SOr Gerlitz 	struct iphdr   ip4;
2508d79b6df6SOr Gerlitz 	struct ipv6hdr ip6;
2509d79b6df6SOr Gerlitz 	struct tcphdr  tcp;
2510d79b6df6SOr Gerlitz 	struct udphdr  udp;
2511d79b6df6SOr Gerlitz };
2512d79b6df6SOr Gerlitz 
2513c500c86bSPablo Neira Ayuso struct pedit_headers_action {
2514c500c86bSPablo Neira Ayuso 	struct pedit_headers	vals;
2515c500c86bSPablo Neira Ayuso 	struct pedit_headers	masks;
2516c500c86bSPablo Neira Ayuso 	u32			pedits;
2517c500c86bSPablo Neira Ayuso };
2518c500c86bSPablo Neira Ayuso 
2519d79b6df6SOr Gerlitz static int pedit_header_offsets[] = {
252073867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
252173867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
252273867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
252373867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
252473867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2525d79b6df6SOr Gerlitz };
2526d79b6df6SOr Gerlitz 
2527d79b6df6SOr Gerlitz #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2528d79b6df6SOr Gerlitz 
2529d79b6df6SOr Gerlitz static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2530c500c86bSPablo Neira Ayuso 			 struct pedit_headers_action *hdrs)
2531d79b6df6SOr Gerlitz {
2532d79b6df6SOr Gerlitz 	u32 *curr_pmask, *curr_pval;
2533d79b6df6SOr Gerlitz 
2534c500c86bSPablo Neira Ayuso 	curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2535c500c86bSPablo Neira Ayuso 	curr_pval  = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2536d79b6df6SOr Gerlitz 
2537d79b6df6SOr Gerlitz 	if (*curr_pmask & mask)  /* disallow acting twice on the same location */
2538d79b6df6SOr Gerlitz 		goto out_err;
2539d79b6df6SOr Gerlitz 
2540d79b6df6SOr Gerlitz 	*curr_pmask |= mask;
2541d79b6df6SOr Gerlitz 	*curr_pval  |= (val & mask);
2542d79b6df6SOr Gerlitz 
2543d79b6df6SOr Gerlitz 	return 0;
2544d79b6df6SOr Gerlitz 
2545d79b6df6SOr Gerlitz out_err:
2546d79b6df6SOr Gerlitz 	return -EOPNOTSUPP;
2547d79b6df6SOr Gerlitz }
2548d79b6df6SOr Gerlitz 
2549d79b6df6SOr Gerlitz struct mlx5_fields {
2550d79b6df6SOr Gerlitz 	u8  field;
255188f30bbcSDmytro Linkin 	u8  field_bsize;
255288f30bbcSDmytro Linkin 	u32 field_mask;
2553d79b6df6SOr Gerlitz 	u32 offset;
255427c11b6bSEli Britstein 	u32 match_offset;
2555d79b6df6SOr Gerlitz };
2556d79b6df6SOr Gerlitz 
255788f30bbcSDmytro Linkin #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
255888f30bbcSDmytro Linkin 		{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
255927c11b6bSEli Britstein 		 offsetof(struct pedit_headers, field) + (off), \
256027c11b6bSEli Britstein 		 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
256127c11b6bSEli Britstein 
25622ef86872SEli Britstein /* masked values are the same and there are no rewrites that do not have a
25632ef86872SEli Britstein  * match.
25642ef86872SEli Britstein  */
25652ef86872SEli Britstein #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
25662ef86872SEli Britstein 	type matchmaskx = *(type *)(matchmaskp); \
25672ef86872SEli Britstein 	type matchvalx = *(type *)(matchvalp); \
25682ef86872SEli Britstein 	type maskx = *(type *)(maskp); \
25692ef86872SEli Britstein 	type valx = *(type *)(valp); \
25702ef86872SEli Britstein 	\
25712ef86872SEli Britstein 	(valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
25722ef86872SEli Britstein 								 matchmaskx)); \
25732ef86872SEli Britstein })
25742ef86872SEli Britstein 
257527c11b6bSEli Britstein static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
257688f30bbcSDmytro Linkin 			 void *matchmaskp, u8 bsize)
257727c11b6bSEli Britstein {
257827c11b6bSEli Britstein 	bool same = false;
257927c11b6bSEli Britstein 
258088f30bbcSDmytro Linkin 	switch (bsize) {
258188f30bbcSDmytro Linkin 	case 8:
25822ef86872SEli Britstein 		same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
258327c11b6bSEli Britstein 		break;
258488f30bbcSDmytro Linkin 	case 16:
25852ef86872SEli Britstein 		same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
258627c11b6bSEli Britstein 		break;
258788f30bbcSDmytro Linkin 	case 32:
25882ef86872SEli Britstein 		same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
258927c11b6bSEli Britstein 		break;
259027c11b6bSEli Britstein 	}
259127c11b6bSEli Britstein 
259227c11b6bSEli Britstein 	return same;
259327c11b6bSEli Britstein }
2594a8e4f0c4SOr Gerlitz 
2595d79b6df6SOr Gerlitz static struct mlx5_fields fields[] = {
259688f30bbcSDmytro Linkin 	OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
259788f30bbcSDmytro Linkin 	OFFLOAD(DMAC_15_0,  16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
259888f30bbcSDmytro Linkin 	OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
259988f30bbcSDmytro Linkin 	OFFLOAD(SMAC_15_0,  16, U16_MAX, eth.h_source[4], 0, smac_15_0),
260088f30bbcSDmytro Linkin 	OFFLOAD(ETHERTYPE,  16, U16_MAX, eth.h_proto, 0, ethertype),
260188f30bbcSDmytro Linkin 	OFFLOAD(FIRST_VID,  16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2602d79b6df6SOr Gerlitz 
2603ab9341b5SDmytro Linkin 	OFFLOAD(IP_DSCP, 8,    0xfc, ip4.tos,   0, ip_dscp),
260488f30bbcSDmytro Linkin 	OFFLOAD(IP_TTL,  8,  U8_MAX, ip4.ttl,   0, ttl_hoplimit),
260588f30bbcSDmytro Linkin 	OFFLOAD(SIPV4,  32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
260688f30bbcSDmytro Linkin 	OFFLOAD(DIPV4,  32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2607d79b6df6SOr Gerlitz 
260888f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
260927c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
261088f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_95_64,  32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
261127c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
261288f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_63_32,  32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
261327c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
261488f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_31_0,   32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
261527c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
261688f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
261727c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
261888f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_95_64,  32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
261927c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
262088f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_63_32,  32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
262127c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
262288f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_31_0,   32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
262327c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
262488f30bbcSDmytro Linkin 	OFFLOAD(IPV6_HOPLIMIT, 8,  U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2625d79b6df6SOr Gerlitz 
262688f30bbcSDmytro Linkin 	OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source,  0, tcp_sport),
262788f30bbcSDmytro Linkin 	OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest,    0, tcp_dport),
262888f30bbcSDmytro Linkin 	/* in linux iphdr tcp_flags is 8 bits long */
262988f30bbcSDmytro Linkin 	OFFLOAD(TCP_FLAGS,  8,  U8_MAX, tcp.ack_seq, 5, tcp_flags),
2630d79b6df6SOr Gerlitz 
263188f30bbcSDmytro Linkin 	OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
263288f30bbcSDmytro Linkin 	OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest,   0, udp_dport),
2633d79b6df6SOr Gerlitz };
2634d79b6df6SOr Gerlitz 
26356ae4a6a5SPaul Blakey static int offload_pedit_fields(struct mlx5e_priv *priv,
26366ae4a6a5SPaul Blakey 				int namespace,
26376ae4a6a5SPaul Blakey 				struct pedit_headers_action *hdrs,
2638e98bedf5SEli Britstein 				struct mlx5e_tc_flow_parse_attr *parse_attr,
263927c11b6bSEli Britstein 				u32 *action_flags,
2640e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
2641d79b6df6SOr Gerlitz {
2642d79b6df6SOr Gerlitz 	struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
26436ae4a6a5SPaul Blakey 	int i, action_size, first, last, next_z;
264488f30bbcSDmytro Linkin 	void *headers_c, *headers_v, *action, *vals_p;
264588f30bbcSDmytro Linkin 	u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
26466ae4a6a5SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts *mod_acts;
2647d79b6df6SOr Gerlitz 	struct mlx5_fields *f;
2648d79b6df6SOr Gerlitz 	unsigned long mask;
26492b64bebaSOr Gerlitz 	__be32 mask_be32;
26502b64bebaSOr Gerlitz 	__be16 mask_be16;
26516ae4a6a5SPaul Blakey 	int err;
265288f30bbcSDmytro Linkin 	u8 cmd;
265388f30bbcSDmytro Linkin 
26546ae4a6a5SPaul Blakey 	mod_acts = &parse_attr->mod_hdr_acts;
265588f30bbcSDmytro Linkin 	headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
265688f30bbcSDmytro Linkin 	headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2657d79b6df6SOr Gerlitz 
265873867881SPablo Neira Ayuso 	set_masks = &hdrs[0].masks;
265973867881SPablo Neira Ayuso 	add_masks = &hdrs[1].masks;
266073867881SPablo Neira Ayuso 	set_vals = &hdrs[0].vals;
266173867881SPablo Neira Ayuso 	add_vals = &hdrs[1].vals;
2662d79b6df6SOr Gerlitz 
2663d79b6df6SOr Gerlitz 	action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2664d79b6df6SOr Gerlitz 
2665d79b6df6SOr Gerlitz 	for (i = 0; i < ARRAY_SIZE(fields); i++) {
266627c11b6bSEli Britstein 		bool skip;
266727c11b6bSEli Britstein 
2668d79b6df6SOr Gerlitz 		f = &fields[i];
2669d79b6df6SOr Gerlitz 		/* avoid seeing bits set from previous iterations */
2670e3ca4e05SOr Gerlitz 		s_mask = 0;
2671e3ca4e05SOr Gerlitz 		a_mask = 0;
2672d79b6df6SOr Gerlitz 
2673d79b6df6SOr Gerlitz 		s_masks_p = (void *)set_masks + f->offset;
2674d79b6df6SOr Gerlitz 		a_masks_p = (void *)add_masks + f->offset;
2675d79b6df6SOr Gerlitz 
267688f30bbcSDmytro Linkin 		s_mask = *s_masks_p & f->field_mask;
267788f30bbcSDmytro Linkin 		a_mask = *a_masks_p & f->field_mask;
2678d79b6df6SOr Gerlitz 
2679d79b6df6SOr Gerlitz 		if (!s_mask && !a_mask) /* nothing to offload here */
2680d79b6df6SOr Gerlitz 			continue;
2681d79b6df6SOr Gerlitz 
2682d79b6df6SOr Gerlitz 		if (s_mask && a_mask) {
2683e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2684e98bedf5SEli Britstein 					   "can't set and add to the same HW field");
2685d79b6df6SOr Gerlitz 			printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2686d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2687d79b6df6SOr Gerlitz 		}
2688d79b6df6SOr Gerlitz 
268927c11b6bSEli Britstein 		skip = false;
2690d79b6df6SOr Gerlitz 		if (s_mask) {
269127c11b6bSEli Britstein 			void *match_mask = headers_c + f->match_offset;
269227c11b6bSEli Britstein 			void *match_val = headers_v + f->match_offset;
269327c11b6bSEli Britstein 
2694d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_SET;
2695d79b6df6SOr Gerlitz 			mask = s_mask;
2696d79b6df6SOr Gerlitz 			vals_p = (void *)set_vals + f->offset;
269727c11b6bSEli Britstein 			/* don't rewrite if we have a match on the same value */
269827c11b6bSEli Britstein 			if (cmp_val_mask(vals_p, s_masks_p, match_val,
269988f30bbcSDmytro Linkin 					 match_mask, f->field_bsize))
270027c11b6bSEli Britstein 				skip = true;
2701d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
270288f30bbcSDmytro Linkin 			*s_masks_p &= ~f->field_mask;
2703d79b6df6SOr Gerlitz 		} else {
2704d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_ADD;
2705d79b6df6SOr Gerlitz 			mask = a_mask;
2706d79b6df6SOr Gerlitz 			vals_p = (void *)add_vals + f->offset;
270727c11b6bSEli Britstein 			/* add 0 is no change */
270888f30bbcSDmytro Linkin 			if ((*(u32 *)vals_p & f->field_mask) == 0)
270927c11b6bSEli Britstein 				skip = true;
2710d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
271188f30bbcSDmytro Linkin 			*a_masks_p &= ~f->field_mask;
2712d79b6df6SOr Gerlitz 		}
271327c11b6bSEli Britstein 		if (skip)
271427c11b6bSEli Britstein 			continue;
2715d79b6df6SOr Gerlitz 
271688f30bbcSDmytro Linkin 		if (f->field_bsize == 32) {
2717404402abSSebastian Hense 			mask_be32 = (__be32)mask;
27182b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
271988f30bbcSDmytro Linkin 		} else if (f->field_bsize == 16) {
2720404402abSSebastian Hense 			mask_be32 = (__be32)mask;
2721404402abSSebastian Hense 			mask_be16 = *(__be16 *)&mask_be32;
27222b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
27232b64bebaSOr Gerlitz 		}
27242b64bebaSOr Gerlitz 
272588f30bbcSDmytro Linkin 		first = find_first_bit(&mask, f->field_bsize);
272688f30bbcSDmytro Linkin 		next_z = find_next_zero_bit(&mask, f->field_bsize, first);
272788f30bbcSDmytro Linkin 		last  = find_last_bit(&mask, f->field_bsize);
27282b64bebaSOr Gerlitz 		if (first < next_z && next_z < last) {
2729e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2730e98bedf5SEli Britstein 					   "rewrite of few sub-fields isn't supported");
27312b64bebaSOr Gerlitz 			printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2732d79b6df6SOr Gerlitz 			       mask);
2733d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2734d79b6df6SOr Gerlitz 		}
2735d79b6df6SOr Gerlitz 
27366ae4a6a5SPaul Blakey 		err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
27376ae4a6a5SPaul Blakey 		if (err) {
27386ae4a6a5SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
27396ae4a6a5SPaul Blakey 					   "too many pedit actions, can't offload");
27406ae4a6a5SPaul Blakey 			mlx5_core_warn(priv->mdev,
27416ae4a6a5SPaul Blakey 				       "mlx5: parsed %d pedit actions, can't do more\n",
27426ae4a6a5SPaul Blakey 				       mod_acts->num_actions);
27436ae4a6a5SPaul Blakey 			return err;
27446ae4a6a5SPaul Blakey 		}
27456ae4a6a5SPaul Blakey 
27466ae4a6a5SPaul Blakey 		action = mod_acts->actions +
27476ae4a6a5SPaul Blakey 			 (mod_acts->num_actions * action_size);
2748d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, action_type, cmd);
2749d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, field, f->field);
2750d79b6df6SOr Gerlitz 
2751d79b6df6SOr Gerlitz 		if (cmd == MLX5_ACTION_TYPE_SET) {
275288f30bbcSDmytro Linkin 			int start;
275388f30bbcSDmytro Linkin 
275488f30bbcSDmytro Linkin 			/* if field is bit sized it can start not from first bit */
275588f30bbcSDmytro Linkin 			start = find_first_bit((unsigned long *)&f->field_mask,
275688f30bbcSDmytro Linkin 					       f->field_bsize);
275788f30bbcSDmytro Linkin 
275888f30bbcSDmytro Linkin 			MLX5_SET(set_action_in, action, offset, first - start);
2759d79b6df6SOr Gerlitz 			/* length is num of bits to be written, zero means length of 32 */
27602b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, length, (last - first + 1));
2761d79b6df6SOr Gerlitz 		}
2762d79b6df6SOr Gerlitz 
276388f30bbcSDmytro Linkin 		if (f->field_bsize == 32)
27642b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
276588f30bbcSDmytro Linkin 		else if (f->field_bsize == 16)
27662b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
276788f30bbcSDmytro Linkin 		else if (f->field_bsize == 8)
27682b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2769d79b6df6SOr Gerlitz 
27706ae4a6a5SPaul Blakey 		++mod_acts->num_actions;
2771d79b6df6SOr Gerlitz 	}
2772d79b6df6SOr Gerlitz 
2773d79b6df6SOr Gerlitz 	return 0;
2774d79b6df6SOr Gerlitz }
2775d79b6df6SOr Gerlitz 
27762cc1cb1dSTonghao Zhang static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
27772cc1cb1dSTonghao Zhang 						  int namespace)
27782cc1cb1dSTonghao Zhang {
27792cc1cb1dSTonghao Zhang 	if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
27802cc1cb1dSTonghao Zhang 		return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
27812cc1cb1dSTonghao Zhang 	else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
27822cc1cb1dSTonghao Zhang 		return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
27832cc1cb1dSTonghao Zhang }
27842cc1cb1dSTonghao Zhang 
27856ae4a6a5SPaul Blakey int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2786c500c86bSPablo Neira Ayuso 			  int namespace,
27876ae4a6a5SPaul Blakey 			  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2788d79b6df6SOr Gerlitz {
27896ae4a6a5SPaul Blakey 	int action_size, new_num_actions, max_hw_actions;
27906ae4a6a5SPaul Blakey 	size_t new_sz, old_sz;
27916ae4a6a5SPaul Blakey 	void *ret;
2792d79b6df6SOr Gerlitz 
27936ae4a6a5SPaul Blakey 	if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
27946ae4a6a5SPaul Blakey 		return 0;
27956ae4a6a5SPaul Blakey 
2796d79b6df6SOr Gerlitz 	action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2797d79b6df6SOr Gerlitz 
27986ae4a6a5SPaul Blakey 	max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
27996ae4a6a5SPaul Blakey 								namespace);
28006ae4a6a5SPaul Blakey 	new_num_actions = min(max_hw_actions,
28016ae4a6a5SPaul Blakey 			      mod_hdr_acts->actions ?
28026ae4a6a5SPaul Blakey 			      mod_hdr_acts->max_actions * 2 : 1);
28036ae4a6a5SPaul Blakey 	if (mod_hdr_acts->max_actions == new_num_actions)
28046ae4a6a5SPaul Blakey 		return -ENOSPC;
2805d79b6df6SOr Gerlitz 
28066ae4a6a5SPaul Blakey 	new_sz = action_size * new_num_actions;
28076ae4a6a5SPaul Blakey 	old_sz = mod_hdr_acts->max_actions * action_size;
28086ae4a6a5SPaul Blakey 	ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
28096ae4a6a5SPaul Blakey 	if (!ret)
2810d79b6df6SOr Gerlitz 		return -ENOMEM;
2811d79b6df6SOr Gerlitz 
28126ae4a6a5SPaul Blakey 	memset(ret + old_sz, 0, new_sz - old_sz);
28136ae4a6a5SPaul Blakey 	mod_hdr_acts->actions = ret;
28146ae4a6a5SPaul Blakey 	mod_hdr_acts->max_actions = new_num_actions;
28156ae4a6a5SPaul Blakey 
2816d79b6df6SOr Gerlitz 	return 0;
2817d79b6df6SOr Gerlitz }
2818d79b6df6SOr Gerlitz 
28196ae4a6a5SPaul Blakey void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
28206ae4a6a5SPaul Blakey {
28216ae4a6a5SPaul Blakey 	kfree(mod_hdr_acts->actions);
28226ae4a6a5SPaul Blakey 	mod_hdr_acts->actions = NULL;
28236ae4a6a5SPaul Blakey 	mod_hdr_acts->num_actions = 0;
28246ae4a6a5SPaul Blakey 	mod_hdr_acts->max_actions = 0;
28256ae4a6a5SPaul Blakey }
28266ae4a6a5SPaul Blakey 
2827d79b6df6SOr Gerlitz static const struct pedit_headers zero_masks = {};
2828d79b6df6SOr Gerlitz 
2829d79b6df6SOr Gerlitz static int parse_tc_pedit_action(struct mlx5e_priv *priv,
283073867881SPablo Neira Ayuso 				 const struct flow_action_entry *act, int namespace,
2831c500c86bSPablo Neira Ayuso 				 struct pedit_headers_action *hdrs,
2832e98bedf5SEli Britstein 				 struct netlink_ext_ack *extack)
2833d79b6df6SOr Gerlitz {
283473867881SPablo Neira Ayuso 	u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
283573867881SPablo Neira Ayuso 	int err = -EOPNOTSUPP;
2836d79b6df6SOr Gerlitz 	u32 mask, val, offset;
283773867881SPablo Neira Ayuso 	u8 htype;
2838d79b6df6SOr Gerlitz 
283973867881SPablo Neira Ayuso 	htype = act->mangle.htype;
2840d79b6df6SOr Gerlitz 	err = -EOPNOTSUPP; /* can't be all optimistic */
2841d79b6df6SOr Gerlitz 
284273867881SPablo Neira Ayuso 	if (htype == FLOW_ACT_MANGLE_UNSPEC) {
284373867881SPablo Neira Ayuso 		NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2844d79b6df6SOr Gerlitz 		goto out_err;
2845d79b6df6SOr Gerlitz 	}
2846d79b6df6SOr Gerlitz 
28472cc1cb1dSTonghao Zhang 	if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
28482cc1cb1dSTonghao Zhang 		NL_SET_ERR_MSG_MOD(extack,
28492cc1cb1dSTonghao Zhang 				   "The pedit offload action is not supported");
28502cc1cb1dSTonghao Zhang 		goto out_err;
28512cc1cb1dSTonghao Zhang 	}
28522cc1cb1dSTonghao Zhang 
285373867881SPablo Neira Ayuso 	mask = act->mangle.mask;
285473867881SPablo Neira Ayuso 	val = act->mangle.val;
285573867881SPablo Neira Ayuso 	offset = act->mangle.offset;
2856d79b6df6SOr Gerlitz 
2857c500c86bSPablo Neira Ayuso 	err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2858d79b6df6SOr Gerlitz 	if (err)
2859d79b6df6SOr Gerlitz 		goto out_err;
2860c500c86bSPablo Neira Ayuso 
2861c500c86bSPablo Neira Ayuso 	hdrs[cmd].pedits++;
2862d79b6df6SOr Gerlitz 
2863c500c86bSPablo Neira Ayuso 	return 0;
2864c500c86bSPablo Neira Ayuso out_err:
2865c500c86bSPablo Neira Ayuso 	return err;
2866c500c86bSPablo Neira Ayuso }
2867c500c86bSPablo Neira Ayuso 
2868c500c86bSPablo Neira Ayuso static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2869c500c86bSPablo Neira Ayuso 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
2870c500c86bSPablo Neira Ayuso 				 struct pedit_headers_action *hdrs,
287127c11b6bSEli Britstein 				 u32 *action_flags,
2872c500c86bSPablo Neira Ayuso 				 struct netlink_ext_ack *extack)
2873c500c86bSPablo Neira Ayuso {
2874c500c86bSPablo Neira Ayuso 	struct pedit_headers *cmd_masks;
2875c500c86bSPablo Neira Ayuso 	int err;
2876c500c86bSPablo Neira Ayuso 	u8 cmd;
2877c500c86bSPablo Neira Ayuso 
28786ae4a6a5SPaul Blakey 	err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
28796ae4a6a5SPaul Blakey 				   action_flags, extack);
2880d79b6df6SOr Gerlitz 	if (err < 0)
2881d79b6df6SOr Gerlitz 		goto out_dealloc_parsed_actions;
2882d79b6df6SOr Gerlitz 
2883d79b6df6SOr Gerlitz 	for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2884c500c86bSPablo Neira Ayuso 		cmd_masks = &hdrs[cmd].masks;
2885d79b6df6SOr Gerlitz 		if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2886e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2887e98bedf5SEli Britstein 					   "attempt to offload an unsupported field");
2888b3a433deSOr Gerlitz 			netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2889d79b6df6SOr Gerlitz 			print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2890d79b6df6SOr Gerlitz 				       16, 1, cmd_masks, sizeof(zero_masks), true);
2891d79b6df6SOr Gerlitz 			err = -EOPNOTSUPP;
2892d79b6df6SOr Gerlitz 			goto out_dealloc_parsed_actions;
2893d79b6df6SOr Gerlitz 		}
2894d79b6df6SOr Gerlitz 	}
2895d79b6df6SOr Gerlitz 
2896d79b6df6SOr Gerlitz 	return 0;
2897d79b6df6SOr Gerlitz 
2898d79b6df6SOr Gerlitz out_dealloc_parsed_actions:
28996ae4a6a5SPaul Blakey 	dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
2900d79b6df6SOr Gerlitz 	return err;
2901d79b6df6SOr Gerlitz }
2902d79b6df6SOr Gerlitz 
2903e98bedf5SEli Britstein static bool csum_offload_supported(struct mlx5e_priv *priv,
2904e98bedf5SEli Britstein 				   u32 action,
2905e98bedf5SEli Britstein 				   u32 update_flags,
2906e98bedf5SEli Britstein 				   struct netlink_ext_ack *extack)
290726c02749SOr Gerlitz {
290826c02749SOr Gerlitz 	u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
290926c02749SOr Gerlitz 			 TCA_CSUM_UPDATE_FLAG_UDP;
291026c02749SOr Gerlitz 
291126c02749SOr Gerlitz 	/*  The HW recalcs checksums only if re-writing headers */
291226c02749SOr Gerlitz 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2913e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2914e98bedf5SEli Britstein 				   "TC csum action is only offloaded with pedit");
291526c02749SOr Gerlitz 		netdev_warn(priv->netdev,
291626c02749SOr Gerlitz 			    "TC csum action is only offloaded with pedit\n");
291726c02749SOr Gerlitz 		return false;
291826c02749SOr Gerlitz 	}
291926c02749SOr Gerlitz 
292026c02749SOr Gerlitz 	if (update_flags & ~prot_flags) {
2921e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2922e98bedf5SEli Britstein 				   "can't offload TC csum action for some header/s");
292326c02749SOr Gerlitz 		netdev_warn(priv->netdev,
292426c02749SOr Gerlitz 			    "can't offload TC csum action for some header/s - flags %#x\n",
292526c02749SOr Gerlitz 			    update_flags);
292626c02749SOr Gerlitz 		return false;
292726c02749SOr Gerlitz 	}
292826c02749SOr Gerlitz 
292926c02749SOr Gerlitz 	return true;
293026c02749SOr Gerlitz }
293126c02749SOr Gerlitz 
29328998576bSDmytro Linkin struct ip_ttl_word {
29338998576bSDmytro Linkin 	__u8	ttl;
29348998576bSDmytro Linkin 	__u8	protocol;
29358998576bSDmytro Linkin 	__sum16	check;
29368998576bSDmytro Linkin };
29378998576bSDmytro Linkin 
29388998576bSDmytro Linkin struct ipv6_hoplimit_word {
29398998576bSDmytro Linkin 	__be16	payload_len;
29408998576bSDmytro Linkin 	__u8	nexthdr;
29418998576bSDmytro Linkin 	__u8	hop_limit;
29428998576bSDmytro Linkin };
29438998576bSDmytro Linkin 
29444c3844d9SPaul Blakey static int is_action_keys_supported(const struct flow_action_entry *act,
29454c3844d9SPaul Blakey 				    bool ct_flow, bool *modify_ip_header,
29464c3844d9SPaul Blakey 				    struct netlink_ext_ack *extack)
29478998576bSDmytro Linkin {
29488998576bSDmytro Linkin 	u32 mask, offset;
29498998576bSDmytro Linkin 	u8 htype;
29508998576bSDmytro Linkin 
29518998576bSDmytro Linkin 	htype = act->mangle.htype;
29528998576bSDmytro Linkin 	offset = act->mangle.offset;
29538998576bSDmytro Linkin 	mask = ~act->mangle.mask;
29548998576bSDmytro Linkin 	/* For IPv4 & IPv6 header check 4 byte word,
29558998576bSDmytro Linkin 	 * to determine that modified fields
29568998576bSDmytro Linkin 	 * are NOT ttl & hop_limit only.
29578998576bSDmytro Linkin 	 */
29588998576bSDmytro Linkin 	if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
29598998576bSDmytro Linkin 		struct ip_ttl_word *ttl_word =
29608998576bSDmytro Linkin 			(struct ip_ttl_word *)&mask;
29618998576bSDmytro Linkin 
29628998576bSDmytro Linkin 		if (offset != offsetof(struct iphdr, ttl) ||
29638998576bSDmytro Linkin 		    ttl_word->protocol ||
29648998576bSDmytro Linkin 		    ttl_word->check) {
29654c3844d9SPaul Blakey 			*modify_ip_header = true;
29664c3844d9SPaul Blakey 		}
29674c3844d9SPaul Blakey 
29684c3844d9SPaul Blakey 		if (ct_flow && offset >= offsetof(struct iphdr, saddr)) {
29694c3844d9SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
29704c3844d9SPaul Blakey 					   "can't offload re-write of ipv4 address with action ct");
29714c3844d9SPaul Blakey 			return -EOPNOTSUPP;
29728998576bSDmytro Linkin 		}
29738998576bSDmytro Linkin 	} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
29748998576bSDmytro Linkin 		struct ipv6_hoplimit_word *hoplimit_word =
29758998576bSDmytro Linkin 			(struct ipv6_hoplimit_word *)&mask;
29768998576bSDmytro Linkin 
29778998576bSDmytro Linkin 		if (offset != offsetof(struct ipv6hdr, payload_len) ||
29788998576bSDmytro Linkin 		    hoplimit_word->payload_len ||
29798998576bSDmytro Linkin 		    hoplimit_word->nexthdr) {
29804c3844d9SPaul Blakey 			*modify_ip_header = true;
29818998576bSDmytro Linkin 		}
29824c3844d9SPaul Blakey 
29834c3844d9SPaul Blakey 		if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) {
29844c3844d9SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
29854c3844d9SPaul Blakey 					   "can't offload re-write of ipv6 address with action ct");
29864c3844d9SPaul Blakey 			return -EOPNOTSUPP;
29878998576bSDmytro Linkin 		}
29884c3844d9SPaul Blakey 	} else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
29894c3844d9SPaul Blakey 			       htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) {
29904c3844d9SPaul Blakey 		NL_SET_ERR_MSG_MOD(extack,
29914c3844d9SPaul Blakey 				   "can't offload re-write of transport header ports with action ct");
29924c3844d9SPaul Blakey 		return -EOPNOTSUPP;
29934c3844d9SPaul Blakey 	}
29944c3844d9SPaul Blakey 
29954c3844d9SPaul Blakey 	return 0;
29968998576bSDmytro Linkin }
29978998576bSDmytro Linkin 
2998bdd66ac0SOr Gerlitz static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
299973867881SPablo Neira Ayuso 					  struct flow_action *flow_action,
30004c3844d9SPaul Blakey 					  u32 actions, bool ct_flow,
3001e98bedf5SEli Britstein 					  struct netlink_ext_ack *extack)
3002bdd66ac0SOr Gerlitz {
300373867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
3004bdd66ac0SOr Gerlitz 	bool modify_ip_header;
3005bdd66ac0SOr Gerlitz 	void *headers_v;
3006bdd66ac0SOr Gerlitz 	u16 ethertype;
30078998576bSDmytro Linkin 	u8 ip_proto;
30084c3844d9SPaul Blakey 	int i, err;
3009bdd66ac0SOr Gerlitz 
30108377629eSEli Britstein 	headers_v = get_match_headers_value(actions, spec);
3011bdd66ac0SOr Gerlitz 	ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3012bdd66ac0SOr Gerlitz 
3013bdd66ac0SOr Gerlitz 	/* for non-IP we only re-write MACs, so we're okay */
3014bdd66ac0SOr Gerlitz 	if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3015bdd66ac0SOr Gerlitz 		goto out_ok;
3016bdd66ac0SOr Gerlitz 
3017bdd66ac0SOr Gerlitz 	modify_ip_header = false;
301873867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
301973867881SPablo Neira Ayuso 		if (act->id != FLOW_ACTION_MANGLE &&
302073867881SPablo Neira Ayuso 		    act->id != FLOW_ACTION_ADD)
3021bdd66ac0SOr Gerlitz 			continue;
3022bdd66ac0SOr Gerlitz 
30234c3844d9SPaul Blakey 		err = is_action_keys_supported(act, ct_flow,
30244c3844d9SPaul Blakey 					       &modify_ip_header, extack);
30254c3844d9SPaul Blakey 		if (err)
30264c3844d9SPaul Blakey 			return err;
3027bdd66ac0SOr Gerlitz 	}
3028bdd66ac0SOr Gerlitz 
3029bdd66ac0SOr Gerlitz 	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
30301ccef350SJianbo Liu 	if (modify_ip_header && ip_proto != IPPROTO_TCP &&
30311ccef350SJianbo Liu 	    ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3032e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
3033e98bedf5SEli Britstein 				   "can't offload re-write of non TCP/UDP");
3034bdd66ac0SOr Gerlitz 		pr_info("can't offload re-write of ip proto %d\n", ip_proto);
3035bdd66ac0SOr Gerlitz 		return false;
3036bdd66ac0SOr Gerlitz 	}
3037bdd66ac0SOr Gerlitz 
3038bdd66ac0SOr Gerlitz out_ok:
3039bdd66ac0SOr Gerlitz 	return true;
3040bdd66ac0SOr Gerlitz }
3041bdd66ac0SOr Gerlitz 
3042bdd66ac0SOr Gerlitz static bool actions_match_supported(struct mlx5e_priv *priv,
304373867881SPablo Neira Ayuso 				    struct flow_action *flow_action,
3044bdd66ac0SOr Gerlitz 				    struct mlx5e_tc_flow_parse_attr *parse_attr,
3045e98bedf5SEli Britstein 				    struct mlx5e_tc_flow *flow,
3046e98bedf5SEli Britstein 				    struct netlink_ext_ack *extack)
3047bdd66ac0SOr Gerlitz {
3048d0645b37SRoi Dayan 	bool ct_flow;
3049bdd66ac0SOr Gerlitz 	u32 actions;
3050bdd66ac0SOr Gerlitz 
30514c3844d9SPaul Blakey 	ct_flow = flow_flag_test(flow, CT);
30524c3844d9SPaul Blakey 	if (mlx5e_is_eswitch_flow(flow)) {
3053bdd66ac0SOr Gerlitz 		actions = flow->esw_attr->action;
30544c3844d9SPaul Blakey 
30554c3844d9SPaul Blakey 		if (flow->esw_attr->split_count && ct_flow) {
30564c3844d9SPaul Blakey 			/* All registers used by ct are cleared when using
30574c3844d9SPaul Blakey 			 * split rules.
30584c3844d9SPaul Blakey 			 */
30594c3844d9SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
30604c3844d9SPaul Blakey 					   "Can't offload mirroring with action ct");
306149397b80SDan Carpenter 			return false;
30624c3844d9SPaul Blakey 		}
30634c3844d9SPaul Blakey 	} else {
3064bdd66ac0SOr Gerlitz 		actions = flow->nic_attr->action;
30654c3844d9SPaul Blakey 	}
3066bdd66ac0SOr Gerlitz 
3067bdd66ac0SOr Gerlitz 	if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
306873867881SPablo Neira Ayuso 		return modify_header_match_supported(&parse_attr->spec,
3069a655fe9fSDavid S. Miller 						     flow_action, actions,
30704c3844d9SPaul Blakey 						     ct_flow, extack);
3071bdd66ac0SOr Gerlitz 
3072bdd66ac0SOr Gerlitz 	return true;
3073bdd66ac0SOr Gerlitz }
3074bdd66ac0SOr Gerlitz 
30755c65c564SOr Gerlitz static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
30765c65c564SOr Gerlitz {
30775c65c564SOr Gerlitz 	struct mlx5_core_dev *fmdev, *pmdev;
3078816f6706SOr Gerlitz 	u64 fsystem_guid, psystem_guid;
30795c65c564SOr Gerlitz 
30805c65c564SOr Gerlitz 	fmdev = priv->mdev;
30815c65c564SOr Gerlitz 	pmdev = peer_priv->mdev;
30825c65c564SOr Gerlitz 
308359c9d35eSAlaa Hleihel 	fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
308459c9d35eSAlaa Hleihel 	psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
30855c65c564SOr Gerlitz 
3086816f6706SOr Gerlitz 	return (fsystem_guid == psystem_guid);
30875c65c564SOr Gerlitz }
30885c65c564SOr Gerlitz 
3089bdc837eeSEli Britstein static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3090bdc837eeSEli Britstein 				   const struct flow_action_entry *act,
3091bdc837eeSEli Britstein 				   struct mlx5e_tc_flow_parse_attr *parse_attr,
3092bdc837eeSEli Britstein 				   struct pedit_headers_action *hdrs,
3093bdc837eeSEli Britstein 				   u32 *action, struct netlink_ext_ack *extack)
3094bdc837eeSEli Britstein {
3095bdc837eeSEli Britstein 	u16 mask16 = VLAN_VID_MASK;
3096bdc837eeSEli Britstein 	u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3097bdc837eeSEli Britstein 	const struct flow_action_entry pedit_act = {
3098bdc837eeSEli Britstein 		.id = FLOW_ACTION_MANGLE,
3099bdc837eeSEli Britstein 		.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3100bdc837eeSEli Britstein 		.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3101bdc837eeSEli Britstein 		.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3102bdc837eeSEli Britstein 		.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3103bdc837eeSEli Britstein 	};
31046fca9d1eSEli Britstein 	u8 match_prio_mask, match_prio_val;
3105bf2f3bcaSEli Britstein 	void *headers_c, *headers_v;
3106bdc837eeSEli Britstein 	int err;
3107bdc837eeSEli Britstein 
3108bf2f3bcaSEli Britstein 	headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3109bf2f3bcaSEli Britstein 	headers_v = get_match_headers_value(*action, &parse_attr->spec);
3110bf2f3bcaSEli Britstein 
3111bf2f3bcaSEli Britstein 	if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3112bf2f3bcaSEli Britstein 	      MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3113bf2f3bcaSEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
3114bf2f3bcaSEli Britstein 				   "VLAN rewrite action must have VLAN protocol match");
3115bf2f3bcaSEli Britstein 		return -EOPNOTSUPP;
3116bf2f3bcaSEli Britstein 	}
3117bf2f3bcaSEli Britstein 
31186fca9d1eSEli Britstein 	match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
31196fca9d1eSEli Britstein 	match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
31206fca9d1eSEli Britstein 	if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
31216fca9d1eSEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
31226fca9d1eSEli Britstein 				   "Changing VLAN prio is not supported");
3123bdc837eeSEli Britstein 		return -EOPNOTSUPP;
3124bdc837eeSEli Britstein 	}
3125bdc837eeSEli Britstein 
3126dec481c8SEli Cohen 	err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL);
3127bdc837eeSEli Britstein 	*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3128bdc837eeSEli Britstein 
3129bdc837eeSEli Britstein 	return err;
3130bdc837eeSEli Britstein }
3131bdc837eeSEli Britstein 
31320bac1194SEli Britstein static int
31330bac1194SEli Britstein add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
31340bac1194SEli Britstein 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
31350bac1194SEli Britstein 				 struct pedit_headers_action *hdrs,
31360bac1194SEli Britstein 				 u32 *action, struct netlink_ext_ack *extack)
31370bac1194SEli Britstein {
31380bac1194SEli Britstein 	const struct flow_action_entry prio_tag_act = {
31390bac1194SEli Britstein 		.vlan.vid = 0,
31400bac1194SEli Britstein 		.vlan.prio =
31410bac1194SEli Britstein 			MLX5_GET(fte_match_set_lyr_2_4,
31420bac1194SEli Britstein 				 get_match_headers_value(*action,
31430bac1194SEli Britstein 							 &parse_attr->spec),
31440bac1194SEli Britstein 				 first_prio) &
31450bac1194SEli Britstein 			MLX5_GET(fte_match_set_lyr_2_4,
31460bac1194SEli Britstein 				 get_match_headers_criteria(*action,
31470bac1194SEli Britstein 							    &parse_attr->spec),
31480bac1194SEli Britstein 				 first_prio),
31490bac1194SEli Britstein 	};
31500bac1194SEli Britstein 
31510bac1194SEli Britstein 	return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
31520bac1194SEli Britstein 				       &prio_tag_act, parse_attr, hdrs, action,
31530bac1194SEli Britstein 				       extack);
31540bac1194SEli Britstein }
31550bac1194SEli Britstein 
315673867881SPablo Neira Ayuso static int parse_tc_nic_actions(struct mlx5e_priv *priv,
315773867881SPablo Neira Ayuso 				struct flow_action *flow_action,
3158aa0cbbaeSOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr,
3159e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
3160e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
3161e3a2b7edSAmir Vadai {
3162aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
316373867881SPablo Neira Ayuso 	struct pedit_headers_action hdrs[2] = {};
316473867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
31651cab1cd7SOr Gerlitz 	u32 action = 0;
3166244cd96aSCong Wang 	int err, i;
3167e3a2b7edSAmir Vadai 
316873867881SPablo Neira Ayuso 	if (!flow_action_has_entries(flow_action))
3169e3a2b7edSAmir Vadai 		return -EINVAL;
3170e3a2b7edSAmir Vadai 
317153eca1f3SJakub Kicinski 	if (!flow_action_hw_stats_check(flow_action, extack,
317253eca1f3SJakub Kicinski 					FLOW_ACTION_HW_STATS_DELAYED_BIT))
3173319a1d19SJiri Pirko 		return -EOPNOTSUPP;
3174319a1d19SJiri Pirko 
31753bc4b7bfSOr Gerlitz 	attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3176e3a2b7edSAmir Vadai 
317773867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
317873867881SPablo Neira Ayuso 		switch (act->id) {
317915fc92ecSTonghao Zhang 		case FLOW_ACTION_ACCEPT:
318015fc92ecSTonghao Zhang 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
318115fc92ecSTonghao Zhang 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
318215fc92ecSTonghao Zhang 			break;
318373867881SPablo Neira Ayuso 		case FLOW_ACTION_DROP:
31841cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3185aad7e08dSAmir Vadai 			if (MLX5_CAP_FLOWTABLE(priv->mdev,
3186aad7e08dSAmir Vadai 					       flow_table_properties_nic_receive.flow_counter))
31871cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
318873867881SPablo Neira Ayuso 			break;
318973867881SPablo Neira Ayuso 		case FLOW_ACTION_MANGLE:
319073867881SPablo Neira Ayuso 		case FLOW_ACTION_ADD:
319173867881SPablo Neira Ayuso 			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3192dec481c8SEli Cohen 						    hdrs, extack);
31932f4fe4caSOr Gerlitz 			if (err)
31942f4fe4caSOr Gerlitz 				return err;
31952f4fe4caSOr Gerlitz 
31961cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
31972f4fe4caSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
319873867881SPablo Neira Ayuso 			break;
3199bdc837eeSEli Britstein 		case FLOW_ACTION_VLAN_MANGLE:
3200bdc837eeSEli Britstein 			err = add_vlan_rewrite_action(priv,
3201bdc837eeSEli Britstein 						      MLX5_FLOW_NAMESPACE_KERNEL,
3202bdc837eeSEli Britstein 						      act, parse_attr, hdrs,
3203bdc837eeSEli Britstein 						      &action, extack);
3204bdc837eeSEli Britstein 			if (err)
3205bdc837eeSEli Britstein 				return err;
3206bdc837eeSEli Britstein 
3207bdc837eeSEli Britstein 			break;
320873867881SPablo Neira Ayuso 		case FLOW_ACTION_CSUM:
32091cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
321073867881SPablo Neira Ayuso 						   act->csum_flags,
3211e98bedf5SEli Britstein 						   extack))
321273867881SPablo Neira Ayuso 				break;
321326c02749SOr Gerlitz 
321426c02749SOr Gerlitz 			return -EOPNOTSUPP;
321573867881SPablo Neira Ayuso 		case FLOW_ACTION_REDIRECT: {
321673867881SPablo Neira Ayuso 			struct net_device *peer_dev = act->dev;
32175c65c564SOr Gerlitz 
32185c65c564SOr Gerlitz 			if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
32195c65c564SOr Gerlitz 			    same_hw_devs(priv, netdev_priv(peer_dev))) {
322098b66cb1SEli Britstein 				parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3221226f2ca3SVlad Buslov 				flow_flag_set(flow, HAIRPIN);
32221cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
32235c65c564SOr Gerlitz 					  MLX5_FLOW_CONTEXT_ACTION_COUNT;
32245c65c564SOr Gerlitz 			} else {
3225e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3226e98bedf5SEli Britstein 						   "device is not on same HW, can't offload");
32275c65c564SOr Gerlitz 				netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
32285c65c564SOr Gerlitz 					    peer_dev->name);
32295c65c564SOr Gerlitz 				return -EINVAL;
32305c65c564SOr Gerlitz 			}
32315c65c564SOr Gerlitz 			}
323273867881SPablo Neira Ayuso 			break;
323373867881SPablo Neira Ayuso 		case FLOW_ACTION_MARK: {
323473867881SPablo Neira Ayuso 			u32 mark = act->mark;
3235e3a2b7edSAmir Vadai 
3236e3a2b7edSAmir Vadai 			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3237e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3238e98bedf5SEli Britstein 						   "Bad flow mark - only 16 bit is supported");
3239e3a2b7edSAmir Vadai 				return -EINVAL;
3240e3a2b7edSAmir Vadai 			}
3241e3a2b7edSAmir Vadai 
32423bc4b7bfSOr Gerlitz 			attr->flow_tag = mark;
32431cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3244e3a2b7edSAmir Vadai 			}
324573867881SPablo Neira Ayuso 			break;
324673867881SPablo Neira Ayuso 		default:
32472cc1cb1dSTonghao Zhang 			NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
32482cc1cb1dSTonghao Zhang 			return -EOPNOTSUPP;
3249e3a2b7edSAmir Vadai 		}
325073867881SPablo Neira Ayuso 	}
3251e3a2b7edSAmir Vadai 
3252c500c86bSPablo Neira Ayuso 	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3253c500c86bSPablo Neira Ayuso 	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3254c500c86bSPablo Neira Ayuso 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
325527c11b6bSEli Britstein 					    parse_attr, hdrs, &action, extack);
3256c500c86bSPablo Neira Ayuso 		if (err)
3257c500c86bSPablo Neira Ayuso 			return err;
325827c11b6bSEli Britstein 		/* in case all pedit actions are skipped, remove the MOD_HDR
325927c11b6bSEli Britstein 		 * flag.
326027c11b6bSEli Britstein 		 */
32616ae4a6a5SPaul Blakey 		if (parse_attr->mod_hdr_acts.num_actions == 0) {
326227c11b6bSEli Britstein 			action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
32636ae4a6a5SPaul Blakey 			dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3264e7739a60SEli Britstein 		}
3265c500c86bSPablo Neira Ayuso 	}
3266c500c86bSPablo Neira Ayuso 
32671cab1cd7SOr Gerlitz 	attr->action = action;
326873867881SPablo Neira Ayuso 	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3269bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
3270bdd66ac0SOr Gerlitz 
3271e3a2b7edSAmir Vadai 	return 0;
3272e3a2b7edSAmir Vadai }
3273e3a2b7edSAmir Vadai 
32747f1a546eSEli Britstein struct encap_key {
32751f6da306SYevgeny Kliteynik 	const struct ip_tunnel_key *ip_tun_key;
3276d386939aSYevgeny Kliteynik 	struct mlx5e_tc_tunnel *tc_tunnel;
32777f1a546eSEli Britstein };
32787f1a546eSEli Britstein 
32797f1a546eSEli Britstein static inline int cmp_encap_info(struct encap_key *a,
32807f1a546eSEli Britstein 				 struct encap_key *b)
3281a54e20b4SHadar Hen Zion {
32827f1a546eSEli Britstein 	return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
3283d386939aSYevgeny Kliteynik 	       a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
3284a54e20b4SHadar Hen Zion }
3285a54e20b4SHadar Hen Zion 
32867f1a546eSEli Britstein static inline int hash_encap_info(struct encap_key *key)
3287a54e20b4SHadar Hen Zion {
32887f1a546eSEli Britstein 	return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
3289d386939aSYevgeny Kliteynik 		     key->tc_tunnel->tunnel_type);
3290a54e20b4SHadar Hen Zion }
3291a54e20b4SHadar Hen Zion 
3292a54e20b4SHadar Hen Zion 
3293b1d90e6bSRabie Loulou static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
3294b1d90e6bSRabie Loulou 				  struct net_device *peer_netdev)
3295b1d90e6bSRabie Loulou {
3296b1d90e6bSRabie Loulou 	struct mlx5e_priv *peer_priv;
3297b1d90e6bSRabie Loulou 
3298b1d90e6bSRabie Loulou 	peer_priv = netdev_priv(peer_netdev);
3299b1d90e6bSRabie Loulou 
3300b1d90e6bSRabie Loulou 	return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
330168931c7dSRoi Dayan 		mlx5e_eswitch_rep(priv->netdev) &&
330268931c7dSRoi Dayan 		mlx5e_eswitch_rep(peer_netdev) &&
330368931c7dSRoi Dayan 		same_hw_devs(priv, peer_priv));
3304b1d90e6bSRabie Loulou }
3305b1d90e6bSRabie Loulou 
3306ce99f6b9SOr Gerlitz 
330754c177caSOz Shlomo 
3308948993f2SVlad Buslov bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
3309948993f2SVlad Buslov {
3310948993f2SVlad Buslov 	return refcount_inc_not_zero(&e->refcnt);
3311948993f2SVlad Buslov }
3312948993f2SVlad Buslov 
3313948993f2SVlad Buslov static struct mlx5e_encap_entry *
3314948993f2SVlad Buslov mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
3315948993f2SVlad Buslov 		uintptr_t hash_key)
3316948993f2SVlad Buslov {
3317948993f2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3318948993f2SVlad Buslov 	struct mlx5e_encap_entry *e;
3319948993f2SVlad Buslov 	struct encap_key e_key;
3320948993f2SVlad Buslov 
3321948993f2SVlad Buslov 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
3322948993f2SVlad Buslov 				   encap_hlist, hash_key) {
3323948993f2SVlad Buslov 		e_key.ip_tun_key = &e->tun_info->key;
3324948993f2SVlad Buslov 		e_key.tc_tunnel = e->tunnel;
3325948993f2SVlad Buslov 		if (!cmp_encap_info(&e_key, key) &&
3326948993f2SVlad Buslov 		    mlx5e_encap_take(e))
3327948993f2SVlad Buslov 			return e;
3328948993f2SVlad Buslov 	}
3329948993f2SVlad Buslov 
3330948993f2SVlad Buslov 	return NULL;
3331948993f2SVlad Buslov }
3332948993f2SVlad Buslov 
33332a4b6526SVlad Buslov static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
33342a4b6526SVlad Buslov {
33352a4b6526SVlad Buslov 	size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
33362a4b6526SVlad Buslov 
33372a4b6526SVlad Buslov 	return kmemdup(tun_info, tun_size, GFP_KERNEL);
33382a4b6526SVlad Buslov }
33392a4b6526SVlad Buslov 
3340554fe75cSDmytro Linkin static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
3341554fe75cSDmytro Linkin 				      struct mlx5e_tc_flow *flow,
3342554fe75cSDmytro Linkin 				      int out_index,
3343554fe75cSDmytro Linkin 				      struct mlx5e_encap_entry *e,
3344554fe75cSDmytro Linkin 				      struct netlink_ext_ack *extack)
3345554fe75cSDmytro Linkin {
3346554fe75cSDmytro Linkin 	int i;
3347554fe75cSDmytro Linkin 
3348554fe75cSDmytro Linkin 	for (i = 0; i < out_index; i++) {
3349554fe75cSDmytro Linkin 		if (flow->encaps[i].e != e)
3350554fe75cSDmytro Linkin 			continue;
3351554fe75cSDmytro Linkin 		NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
3352554fe75cSDmytro Linkin 		netdev_err(priv->netdev, "can't duplicate encap action\n");
3353554fe75cSDmytro Linkin 		return true;
3354554fe75cSDmytro Linkin 	}
3355554fe75cSDmytro Linkin 
3356554fe75cSDmytro Linkin 	return false;
3357554fe75cSDmytro Linkin }
3358554fe75cSDmytro Linkin 
3359a54e20b4SHadar Hen Zion static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3360e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
3361733d4f36SRoi Dayan 			      struct net_device *mirred_dev,
3362733d4f36SRoi Dayan 			      int out_index,
33638c4dc42bSEli Britstein 			      struct netlink_ext_ack *extack,
33640ad060eeSRoi Dayan 			      struct net_device **encap_dev,
33650ad060eeSRoi Dayan 			      bool *encap_valid)
336603a9d11eSOr Gerlitz {
3367a54e20b4SHadar Hen Zion 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
336845247bf2SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3369733d4f36SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
33701f6da306SYevgeny Kliteynik 	const struct ip_tunnel_info *tun_info;
3371948993f2SVlad Buslov 	struct encap_key key;
3372c1ae1152SOr Gerlitz 	struct mlx5e_encap_entry *e;
3373733d4f36SRoi Dayan 	unsigned short family;
3374a54e20b4SHadar Hen Zion 	uintptr_t hash_key;
337554c177caSOz Shlomo 	int err = 0;
3376a54e20b4SHadar Hen Zion 
3377733d4f36SRoi Dayan 	parse_attr = attr->parse_attr;
33781f6da306SYevgeny Kliteynik 	tun_info = parse_attr->tun_info[out_index];
3379733d4f36SRoi Dayan 	family = ip_tunnel_info_af(tun_info);
33807f1a546eSEli Britstein 	key.ip_tun_key = &tun_info->key;
3381d386939aSYevgeny Kliteynik 	key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
3382d71f895cSEli Cohen 	if (!key.tc_tunnel) {
3383d71f895cSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
3384d71f895cSEli Cohen 		return -EOPNOTSUPP;
3385d71f895cSEli Cohen 	}
3386733d4f36SRoi Dayan 
33877f1a546eSEli Britstein 	hash_key = hash_encap_info(&key);
3388a54e20b4SHadar Hen Zion 
338961086f39SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
3390948993f2SVlad Buslov 	e = mlx5e_encap_get(priv, &key, hash_key);
3391a54e20b4SHadar Hen Zion 
3392b2812089SVlad Buslov 	/* must verify if encap is valid or not */
3393d589e785SVlad Buslov 	if (e) {
3394554fe75cSDmytro Linkin 		/* Check that entry was not already attached to this flow */
3395554fe75cSDmytro Linkin 		if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
3396554fe75cSDmytro Linkin 			err = -EOPNOTSUPP;
3397554fe75cSDmytro Linkin 			goto out_err;
3398554fe75cSDmytro Linkin 		}
3399554fe75cSDmytro Linkin 
3400d589e785SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
3401d589e785SVlad Buslov 		wait_for_completion(&e->res_ready);
3402d589e785SVlad Buslov 
3403d589e785SVlad Buslov 		/* Protect against concurrent neigh update. */
3404d589e785SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
34053c140dd5SVlad Buslov 		if (e->compl_result < 0) {
3406d589e785SVlad Buslov 			err = -EREMOTEIO;
3407d589e785SVlad Buslov 			goto out_err;
3408d589e785SVlad Buslov 		}
340945247bf2SOr Gerlitz 		goto attach_flow;
3410d589e785SVlad Buslov 	}
3411a54e20b4SHadar Hen Zion 
3412a54e20b4SHadar Hen Zion 	e = kzalloc(sizeof(*e), GFP_KERNEL);
341361086f39SVlad Buslov 	if (!e) {
341461086f39SVlad Buslov 		err = -ENOMEM;
341561086f39SVlad Buslov 		goto out_err;
341661086f39SVlad Buslov 	}
3417a54e20b4SHadar Hen Zion 
3418948993f2SVlad Buslov 	refcount_set(&e->refcnt, 1);
3419d589e785SVlad Buslov 	init_completion(&e->res_ready);
3420d589e785SVlad Buslov 
34212a4b6526SVlad Buslov 	tun_info = dup_tun_info(tun_info);
34222a4b6526SVlad Buslov 	if (!tun_info) {
34232a4b6526SVlad Buslov 		err = -ENOMEM;
34242a4b6526SVlad Buslov 		goto out_err_init;
34252a4b6526SVlad Buslov 	}
34261f6da306SYevgeny Kliteynik 	e->tun_info = tun_info;
3427101f4de9SOz Shlomo 	err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
34282a4b6526SVlad Buslov 	if (err)
34292a4b6526SVlad Buslov 		goto out_err_init;
343054c177caSOz Shlomo 
3431a54e20b4SHadar Hen Zion 	INIT_LIST_HEAD(&e->flows);
3432d589e785SVlad Buslov 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3433d589e785SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
3434a54e20b4SHadar Hen Zion 
3435ce99f6b9SOr Gerlitz 	if (family == AF_INET)
3436101f4de9SOz Shlomo 		err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3437ce99f6b9SOr Gerlitz 	else if (family == AF_INET6)
3438101f4de9SOz Shlomo 		err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3439ce99f6b9SOr Gerlitz 
3440d589e785SVlad Buslov 	/* Protect against concurrent neigh update. */
3441d589e785SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
3442d589e785SVlad Buslov 	complete_all(&e->res_ready);
3443d589e785SVlad Buslov 	if (err) {
3444d589e785SVlad Buslov 		e->compl_result = err;
3445a54e20b4SHadar Hen Zion 		goto out_err;
3446d589e785SVlad Buslov 	}
34473c140dd5SVlad Buslov 	e->compl_result = 1;
3448a54e20b4SHadar Hen Zion 
344945247bf2SOr Gerlitz attach_flow:
3450948993f2SVlad Buslov 	flow->encaps[out_index].e = e;
34518c4dc42bSEli Britstein 	list_add(&flow->encaps[out_index].list, &e->flows);
34528c4dc42bSEli Britstein 	flow->encaps[out_index].index = out_index;
345345247bf2SOr Gerlitz 	*encap_dev = e->out_dev;
34548c4dc42bSEli Britstein 	if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
34552b688ea5SMaor Gottlieb 		attr->dests[out_index].pkt_reformat = e->pkt_reformat;
34568c4dc42bSEli Britstein 		attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
34570ad060eeSRoi Dayan 		*encap_valid = true;
34588c4dc42bSEli Britstein 	} else {
34590ad060eeSRoi Dayan 		*encap_valid = false;
34608c4dc42bSEli Britstein 	}
346161086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
346245247bf2SOr Gerlitz 
3463232c0013SHadar Hen Zion 	return err;
3464a54e20b4SHadar Hen Zion 
3465a54e20b4SHadar Hen Zion out_err:
346661086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
3467d589e785SVlad Buslov 	if (e)
3468d589e785SVlad Buslov 		mlx5e_encap_put(priv, e);
3469a54e20b4SHadar Hen Zion 	return err;
34702a4b6526SVlad Buslov 
34712a4b6526SVlad Buslov out_err_init:
34722a4b6526SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
34732a4b6526SVlad Buslov 	kfree(tun_info);
34742a4b6526SVlad Buslov 	kfree(e);
34752a4b6526SVlad Buslov 	return err;
3476a54e20b4SHadar Hen Zion }
3477a54e20b4SHadar Hen Zion 
34781482bd3dSJianbo Liu static int parse_tc_vlan_action(struct mlx5e_priv *priv,
347973867881SPablo Neira Ayuso 				const struct flow_action_entry *act,
34801482bd3dSJianbo Liu 				struct mlx5_esw_flow_attr *attr,
34811482bd3dSJianbo Liu 				u32 *action)
34821482bd3dSJianbo Liu {
3483cc495188SJianbo Liu 	u8 vlan_idx = attr->total_vlan;
3484cc495188SJianbo Liu 
3485cc495188SJianbo Liu 	if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
34861482bd3dSJianbo Liu 		return -EOPNOTSUPP;
3487cc495188SJianbo Liu 
348873867881SPablo Neira Ayuso 	switch (act->id) {
348973867881SPablo Neira Ayuso 	case FLOW_ACTION_VLAN_POP:
3490cc495188SJianbo Liu 		if (vlan_idx) {
3491cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3492cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
3493cc495188SJianbo Liu 				return -EOPNOTSUPP;
3494cc495188SJianbo Liu 
3495cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3496cc495188SJianbo Liu 		} else {
3497cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3498cc495188SJianbo Liu 		}
349973867881SPablo Neira Ayuso 		break;
350073867881SPablo Neira Ayuso 	case FLOW_ACTION_VLAN_PUSH:
350173867881SPablo Neira Ayuso 		attr->vlan_vid[vlan_idx] = act->vlan.vid;
350273867881SPablo Neira Ayuso 		attr->vlan_prio[vlan_idx] = act->vlan.prio;
350373867881SPablo Neira Ayuso 		attr->vlan_proto[vlan_idx] = act->vlan.proto;
3504cc495188SJianbo Liu 		if (!attr->vlan_proto[vlan_idx])
3505cc495188SJianbo Liu 			attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3506cc495188SJianbo Liu 
3507cc495188SJianbo Liu 		if (vlan_idx) {
3508cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3509cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
3510cc495188SJianbo Liu 				return -EOPNOTSUPP;
3511cc495188SJianbo Liu 
3512cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3513cc495188SJianbo Liu 		} else {
3514cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
351573867881SPablo Neira Ayuso 			    (act->vlan.proto != htons(ETH_P_8021Q) ||
351673867881SPablo Neira Ayuso 			     act->vlan.prio))
3517cc495188SJianbo Liu 				return -EOPNOTSUPP;
3518cc495188SJianbo Liu 
3519cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
35201482bd3dSJianbo Liu 		}
352173867881SPablo Neira Ayuso 		break;
352273867881SPablo Neira Ayuso 	default:
3523bdc837eeSEli Britstein 		return -EINVAL;
35241482bd3dSJianbo Liu 	}
35251482bd3dSJianbo Liu 
3526cc495188SJianbo Liu 	attr->total_vlan = vlan_idx + 1;
3527cc495188SJianbo Liu 
35281482bd3dSJianbo Liu 	return 0;
35291482bd3dSJianbo Liu }
35301482bd3dSJianbo Liu 
3531278748a9SEli Britstein static int add_vlan_push_action(struct mlx5e_priv *priv,
3532278748a9SEli Britstein 				struct mlx5_esw_flow_attr *attr,
3533278748a9SEli Britstein 				struct net_device **out_dev,
3534278748a9SEli Britstein 				u32 *action)
3535278748a9SEli Britstein {
3536278748a9SEli Britstein 	struct net_device *vlan_dev = *out_dev;
3537278748a9SEli Britstein 	struct flow_action_entry vlan_act = {
3538278748a9SEli Britstein 		.id = FLOW_ACTION_VLAN_PUSH,
3539278748a9SEli Britstein 		.vlan.vid = vlan_dev_vlan_id(vlan_dev),
3540278748a9SEli Britstein 		.vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3541278748a9SEli Britstein 		.vlan.prio = 0,
3542278748a9SEli Britstein 	};
3543278748a9SEli Britstein 	int err;
3544278748a9SEli Britstein 
3545278748a9SEli Britstein 	err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3546278748a9SEli Britstein 	if (err)
3547278748a9SEli Britstein 		return err;
3548278748a9SEli Britstein 
3549278748a9SEli Britstein 	*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3550278748a9SEli Britstein 					dev_get_iflink(vlan_dev));
3551278748a9SEli Britstein 	if (is_vlan_dev(*out_dev))
3552278748a9SEli Britstein 		err = add_vlan_push_action(priv, attr, out_dev, action);
3553278748a9SEli Britstein 
3554278748a9SEli Britstein 	return err;
3555278748a9SEli Britstein }
3556278748a9SEli Britstein 
355735a605dbSEli Britstein static int add_vlan_pop_action(struct mlx5e_priv *priv,
355835a605dbSEli Britstein 			       struct mlx5_esw_flow_attr *attr,
355935a605dbSEli Britstein 			       u32 *action)
356035a605dbSEli Britstein {
3561f3b0a18bSTaehee Yoo 	int nest_level = attr->parse_attr->filter_dev->lower_level;
356235a605dbSEli Britstein 	struct flow_action_entry vlan_act = {
356335a605dbSEli Britstein 		.id = FLOW_ACTION_VLAN_POP,
356435a605dbSEli Britstein 	};
356535a605dbSEli Britstein 	int err = 0;
356635a605dbSEli Britstein 
356735a605dbSEli Britstein 	while (nest_level--) {
356835a605dbSEli Britstein 		err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
356935a605dbSEli Britstein 		if (err)
357035a605dbSEli Britstein 			return err;
357135a605dbSEli Britstein 	}
357235a605dbSEli Britstein 
357335a605dbSEli Britstein 	return err;
357435a605dbSEli Britstein }
357535a605dbSEli Britstein 
3576f6dc1264SPaul Blakey bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3577f6dc1264SPaul Blakey 				    struct net_device *out_dev)
3578f6dc1264SPaul Blakey {
3579f6dc1264SPaul Blakey 	if (is_merged_eswitch_dev(priv, out_dev))
3580f6dc1264SPaul Blakey 		return true;
3581f6dc1264SPaul Blakey 
3582f6dc1264SPaul Blakey 	return mlx5e_eswitch_rep(out_dev) &&
3583f6dc1264SPaul Blakey 	       same_hw_devs(priv, netdev_priv(out_dev));
3584f6dc1264SPaul Blakey }
3585f6dc1264SPaul Blakey 
3586554fe75cSDmytro Linkin static bool is_duplicated_output_device(struct net_device *dev,
3587554fe75cSDmytro Linkin 					struct net_device *out_dev,
3588554fe75cSDmytro Linkin 					int *ifindexes, int if_count,
3589554fe75cSDmytro Linkin 					struct netlink_ext_ack *extack)
3590554fe75cSDmytro Linkin {
3591554fe75cSDmytro Linkin 	int i;
3592554fe75cSDmytro Linkin 
3593554fe75cSDmytro Linkin 	for (i = 0; i < if_count; i++) {
3594554fe75cSDmytro Linkin 		if (ifindexes[i] == out_dev->ifindex) {
3595554fe75cSDmytro Linkin 			NL_SET_ERR_MSG_MOD(extack,
3596554fe75cSDmytro Linkin 					   "can't duplicate output to same device");
3597554fe75cSDmytro Linkin 			netdev_err(dev, "can't duplicate output to same device: %s\n",
3598554fe75cSDmytro Linkin 				   out_dev->name);
3599554fe75cSDmytro Linkin 			return true;
3600554fe75cSDmytro Linkin 		}
3601554fe75cSDmytro Linkin 	}
3602554fe75cSDmytro Linkin 
3603554fe75cSDmytro Linkin 	return false;
3604554fe75cSDmytro Linkin }
3605554fe75cSDmytro Linkin 
36062fbbc30dSEli Cohen static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
36072fbbc30dSEli Cohen 				    struct mlx5e_tc_flow *flow,
36082fbbc30dSEli Cohen 				    const struct flow_action_entry *act,
36092fbbc30dSEli Cohen 				    u32 actions,
36102fbbc30dSEli Cohen 				    struct netlink_ext_ack *extack)
36112fbbc30dSEli Cohen {
36122fbbc30dSEli Cohen 	u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
36132fbbc30dSEli Cohen 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
36142fbbc30dSEli Cohen 	bool ft_flow = mlx5e_is_ft_flow(flow);
36152fbbc30dSEli Cohen 	u32 dest_chain = act->chain_index;
36162fbbc30dSEli Cohen 
36172fbbc30dSEli Cohen 	if (ft_flow) {
36182fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
36192fbbc30dSEli Cohen 		return -EOPNOTSUPP;
36202fbbc30dSEli Cohen 	}
36212fbbc30dSEli Cohen 
36222fbbc30dSEli Cohen 	if (!mlx5_esw_chains_backwards_supported(esw) &&
36232fbbc30dSEli Cohen 	    dest_chain <= attr->chain) {
36242fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
36252fbbc30dSEli Cohen 				   "Goto lower numbered chain isn't supported");
36262fbbc30dSEli Cohen 		return -EOPNOTSUPP;
36272fbbc30dSEli Cohen 	}
36282fbbc30dSEli Cohen 	if (dest_chain > max_chain) {
36292fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
36302fbbc30dSEli Cohen 				   "Requested destination chain is out of supported range");
36312fbbc30dSEli Cohen 		return -EOPNOTSUPP;
36322fbbc30dSEli Cohen 	}
36332fbbc30dSEli Cohen 
36342fbbc30dSEli Cohen 	if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
36352fbbc30dSEli Cohen 		       MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
36362fbbc30dSEli Cohen 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
36372fbbc30dSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
36382fbbc30dSEli Cohen 				   "Goto chain is not allowed if action has reformat or decap");
36392fbbc30dSEli Cohen 		return -EOPNOTSUPP;
36402fbbc30dSEli Cohen 	}
36412fbbc30dSEli Cohen 
36422fbbc30dSEli Cohen 	return 0;
36432fbbc30dSEli Cohen }
36442fbbc30dSEli Cohen 
3645613f53feSEli Cohen static int verify_uplink_forwarding(struct mlx5e_priv *priv,
3646613f53feSEli Cohen 				    struct mlx5e_tc_flow *flow,
3647613f53feSEli Cohen 				    struct net_device *out_dev,
3648613f53feSEli Cohen 				    struct netlink_ext_ack *extack)
3649613f53feSEli Cohen {
3650613f53feSEli Cohen 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3651613f53feSEli Cohen 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3652613f53feSEli Cohen 	struct mlx5e_rep_priv *rep_priv;
3653613f53feSEli Cohen 
3654613f53feSEli Cohen 	/* Forwarding non encapsulated traffic between
3655613f53feSEli Cohen 	 * uplink ports is allowed only if
3656613f53feSEli Cohen 	 * termination_table_raw_traffic cap is set.
3657613f53feSEli Cohen 	 *
3658613f53feSEli Cohen 	 * Input vport was stored esw_attr->in_rep.
3659613f53feSEli Cohen 	 * In LAG case, *priv* is the private data of
3660613f53feSEli Cohen 	 * uplink which may be not the input vport.
3661613f53feSEli Cohen 	 */
3662613f53feSEli Cohen 	rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
3663613f53feSEli Cohen 
3664613f53feSEli Cohen 	if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
3665613f53feSEli Cohen 	      mlx5e_eswitch_uplink_rep(out_dev)))
3666613f53feSEli Cohen 		return 0;
3667613f53feSEli Cohen 
3668613f53feSEli Cohen 	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
3669613f53feSEli Cohen 					termination_table_raw_traffic)) {
3670613f53feSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
3671613f53feSEli Cohen 				   "devices are both uplink, can't offload forwarding");
3672613f53feSEli Cohen 			pr_err("devices %s %s are both uplink, can't offload forwarding\n",
3673613f53feSEli Cohen 			       priv->netdev->name, out_dev->name);
3674613f53feSEli Cohen 			return -EOPNOTSUPP;
3675613f53feSEli Cohen 	} else if (out_dev != rep_priv->netdev) {
3676613f53feSEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
3677613f53feSEli Cohen 				   "devices are not the same uplink, can't offload forwarding");
3678613f53feSEli Cohen 		pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
3679613f53feSEli Cohen 		       priv->netdev->name, out_dev->name);
3680613f53feSEli Cohen 		return -EOPNOTSUPP;
3681613f53feSEli Cohen 	}
3682613f53feSEli Cohen 	return 0;
3683613f53feSEli Cohen }
3684613f53feSEli Cohen 
368573867881SPablo Neira Ayuso static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
368673867881SPablo Neira Ayuso 				struct flow_action *flow_action,
3687e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
3688e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
3689a54e20b4SHadar Hen Zion {
369073867881SPablo Neira Ayuso 	struct pedit_headers_action hdrs[2] = {};
3691bf07aa73SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3692ecf5bb79SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
36936f9af8ffSTonghao Zhang 	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
36941d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
369573867881SPablo Neira Ayuso 	const struct ip_tunnel_info *info = NULL;
3696554fe75cSDmytro Linkin 	int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
369784179981SPaul Blakey 	bool ft_flow = mlx5e_is_ft_flow(flow);
369873867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
36990a7fcb78SPaul Blakey 	bool encap = false, decap = false;
37000a7fcb78SPaul Blakey 	u32 action = attr->action;
3701554fe75cSDmytro Linkin 	int err, i, if_count = 0;
370203a9d11eSOr Gerlitz 
370373867881SPablo Neira Ayuso 	if (!flow_action_has_entries(flow_action))
370403a9d11eSOr Gerlitz 		return -EINVAL;
370503a9d11eSOr Gerlitz 
370653eca1f3SJakub Kicinski 	if (!flow_action_hw_stats_check(flow_action, extack,
370753eca1f3SJakub Kicinski 					FLOW_ACTION_HW_STATS_DELAYED_BIT))
3708319a1d19SJiri Pirko 		return -EOPNOTSUPP;
3709319a1d19SJiri Pirko 
371073867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
371173867881SPablo Neira Ayuso 		switch (act->id) {
371273867881SPablo Neira Ayuso 		case FLOW_ACTION_DROP:
37131cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
371403a9d11eSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
371573867881SPablo Neira Ayuso 			break;
371673867881SPablo Neira Ayuso 		case FLOW_ACTION_MANGLE:
371773867881SPablo Neira Ayuso 		case FLOW_ACTION_ADD:
371873867881SPablo Neira Ayuso 			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3719dec481c8SEli Cohen 						    hdrs, extack);
3720d7e75a32SOr Gerlitz 			if (err)
3721d7e75a32SOr Gerlitz 				return err;
3722d7e75a32SOr Gerlitz 
37231cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3724e85e02baSEli Britstein 			attr->split_count = attr->out_count;
372573867881SPablo Neira Ayuso 			break;
372673867881SPablo Neira Ayuso 		case FLOW_ACTION_CSUM:
37271cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
372873867881SPablo Neira Ayuso 						   act->csum_flags, extack))
372973867881SPablo Neira Ayuso 				break;
373026c02749SOr Gerlitz 
373126c02749SOr Gerlitz 			return -EOPNOTSUPP;
373273867881SPablo Neira Ayuso 		case FLOW_ACTION_REDIRECT:
373373867881SPablo Neira Ayuso 		case FLOW_ACTION_MIRRED: {
373403a9d11eSOr Gerlitz 			struct mlx5e_priv *out_priv;
3735592d3651SChris Mi 			struct net_device *out_dev;
373603a9d11eSOr Gerlitz 
373773867881SPablo Neira Ayuso 			out_dev = act->dev;
3738ef381359SOz Shlomo 			if (!out_dev) {
3739ef381359SOz Shlomo 				/* out_dev is NULL when filters with
3740ef381359SOz Shlomo 				 * non-existing mirred device are replayed to
3741ef381359SOz Shlomo 				 * the driver.
3742ef381359SOz Shlomo 				 */
3743ef381359SOz Shlomo 				return -EINVAL;
3744ef381359SOz Shlomo 			}
374503a9d11eSOr Gerlitz 
374684179981SPaul Blakey 			if (ft_flow && out_dev == priv->netdev) {
374784179981SPaul Blakey 				/* Ignore forward to self rules generated
374884179981SPaul Blakey 				 * by adding both mlx5 devs to the flow table
374984179981SPaul Blakey 				 * block on a normal nft offload setup.
375084179981SPaul Blakey 				 */
375184179981SPaul Blakey 				return -EOPNOTSUPP;
375284179981SPaul Blakey 			}
375384179981SPaul Blakey 
3754592d3651SChris Mi 			if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
3755e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3756e98bedf5SEli Britstein 						   "can't support more output ports, can't offload forwarding");
37574ccd83f4SRoi Dayan 				netdev_warn(priv->netdev,
37584ccd83f4SRoi Dayan 					    "can't support more than %d output ports, can't offload forwarding\n",
3759592d3651SChris Mi 					    attr->out_count);
3760592d3651SChris Mi 				return -EOPNOTSUPP;
3761592d3651SChris Mi 			}
3762592d3651SChris Mi 
3763f493f155SEli Britstein 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3764f493f155SEli Britstein 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
3765b6a4ac24SVlad Buslov 			if (encap) {
3766b6a4ac24SVlad Buslov 				parse_attr->mirred_ifindex[attr->out_count] =
3767b6a4ac24SVlad Buslov 					out_dev->ifindex;
3768b6a4ac24SVlad Buslov 				parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
3769b6a4ac24SVlad Buslov 				if (!parse_attr->tun_info[attr->out_count])
3770b6a4ac24SVlad Buslov 					return -ENOMEM;
3771b6a4ac24SVlad Buslov 				encap = false;
3772b6a4ac24SVlad Buslov 				attr->dests[attr->out_count].flags |=
3773b6a4ac24SVlad Buslov 					MLX5_ESW_DEST_ENCAP;
3774b6a4ac24SVlad Buslov 				attr->out_count++;
3775b6a4ac24SVlad Buslov 				/* attr->dests[].rep is resolved when we
3776b6a4ac24SVlad Buslov 				 * handle encap
3777b6a4ac24SVlad Buslov 				 */
3778b6a4ac24SVlad Buslov 			} else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
37797ba58ba7SRabie Loulou 				struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
37807ba58ba7SRabie Loulou 				struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
3781fa833bd5SVlad Buslov 				struct net_device *uplink_upper;
37827ba58ba7SRabie Loulou 
3783554fe75cSDmytro Linkin 				if (is_duplicated_output_device(priv->netdev,
3784554fe75cSDmytro Linkin 								out_dev,
3785554fe75cSDmytro Linkin 								ifindexes,
3786554fe75cSDmytro Linkin 								if_count,
3787554fe75cSDmytro Linkin 								extack))
3788554fe75cSDmytro Linkin 					return -EOPNOTSUPP;
3789554fe75cSDmytro Linkin 
3790554fe75cSDmytro Linkin 				ifindexes[if_count] = out_dev->ifindex;
3791554fe75cSDmytro Linkin 				if_count++;
3792554fe75cSDmytro Linkin 
3793fa833bd5SVlad Buslov 				rcu_read_lock();
3794fa833bd5SVlad Buslov 				uplink_upper =
3795fa833bd5SVlad Buslov 					netdev_master_upper_dev_get_rcu(uplink_dev);
37967ba58ba7SRabie Loulou 				if (uplink_upper &&
37977ba58ba7SRabie Loulou 				    netif_is_lag_master(uplink_upper) &&
37987ba58ba7SRabie Loulou 				    uplink_upper == out_dev)
37997ba58ba7SRabie Loulou 					out_dev = uplink_dev;
3800fa833bd5SVlad Buslov 				rcu_read_unlock();
38017ba58ba7SRabie Loulou 
3802278748a9SEli Britstein 				if (is_vlan_dev(out_dev)) {
3803278748a9SEli Britstein 					err = add_vlan_push_action(priv, attr,
3804278748a9SEli Britstein 								   &out_dev,
3805278748a9SEli Britstein 								   &action);
3806278748a9SEli Britstein 					if (err)
3807278748a9SEli Britstein 						return err;
3808278748a9SEli Britstein 				}
3809f6dc1264SPaul Blakey 
381035a605dbSEli Britstein 				if (is_vlan_dev(parse_attr->filter_dev)) {
381135a605dbSEli Britstein 					err = add_vlan_pop_action(priv, attr,
381235a605dbSEli Britstein 								  &action);
381335a605dbSEli Britstein 					if (err)
381435a605dbSEli Britstein 						return err;
381535a605dbSEli Britstein 				}
3816278748a9SEli Britstein 
3817613f53feSEli Cohen 				err = verify_uplink_forwarding(priv, flow, out_dev, extack);
3818613f53feSEli Cohen 				if (err)
3819613f53feSEli Cohen 					return err;
3820ffec9702STonghao Zhang 
3821f6dc1264SPaul Blakey 				if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
3822f6dc1264SPaul Blakey 					NL_SET_ERR_MSG_MOD(extack,
3823f6dc1264SPaul Blakey 							   "devices are not on same switch HW, can't offload forwarding");
38244ccd83f4SRoi Dayan 					netdev_warn(priv->netdev,
38254ccd83f4SRoi Dayan 						    "devices %s %s not on same switch HW, can't offload forwarding\n",
38264ccd83f4SRoi Dayan 						    priv->netdev->name,
38274ccd83f4SRoi Dayan 						    out_dev->name);
3828a0646c88SEli Britstein 					return -EOPNOTSUPP;
3829f6dc1264SPaul Blakey 				}
3830a0646c88SEli Britstein 
383103a9d11eSOr Gerlitz 				out_priv = netdev_priv(out_dev);
38321d447a39SSaeed Mahameed 				rpriv = out_priv->ppriv;
3833df65a573SEli Britstein 				attr->dests[attr->out_count].rep = rpriv->rep;
3834df65a573SEli Britstein 				attr->dests[attr->out_count].mdev = out_priv->mdev;
3835df65a573SEli Britstein 				attr->out_count++;
3836ef381359SOz Shlomo 			} else if (parse_attr->filter_dev != priv->netdev) {
3837ef381359SOz Shlomo 				/* All mlx5 devices are called to configure
3838ef381359SOz Shlomo 				 * high level device filters. Therefore, the
3839ef381359SOz Shlomo 				 * *attempt* to  install a filter on invalid
3840ef381359SOz Shlomo 				 * eswitch should not trigger an explicit error
3841ef381359SOz Shlomo 				 */
3842ef381359SOz Shlomo 				return -EINVAL;
3843a54e20b4SHadar Hen Zion 			} else {
3844e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3845e98bedf5SEli Britstein 						   "devices are not on same switch HW, can't offload forwarding");
38464ccd83f4SRoi Dayan 				netdev_warn(priv->netdev,
38474ccd83f4SRoi Dayan 					    "devices %s %s not on same switch HW, can't offload forwarding\n",
38484ccd83f4SRoi Dayan 					    priv->netdev->name,
38494ccd83f4SRoi Dayan 					    out_dev->name);
3850a54e20b4SHadar Hen Zion 				return -EINVAL;
3851a54e20b4SHadar Hen Zion 			}
3852a54e20b4SHadar Hen Zion 			}
385373867881SPablo Neira Ayuso 			break;
385473867881SPablo Neira Ayuso 		case FLOW_ACTION_TUNNEL_ENCAP:
385573867881SPablo Neira Ayuso 			info = act->tunnel;
3856a54e20b4SHadar Hen Zion 			if (info)
3857a54e20b4SHadar Hen Zion 				encap = true;
3858a54e20b4SHadar Hen Zion 			else
3859a54e20b4SHadar Hen Zion 				return -EOPNOTSUPP;
386003a9d11eSOr Gerlitz 
386173867881SPablo Neira Ayuso 			break;
386273867881SPablo Neira Ayuso 		case FLOW_ACTION_VLAN_PUSH:
386373867881SPablo Neira Ayuso 		case FLOW_ACTION_VLAN_POP:
386476b496b1SEli Britstein 			if (act->id == FLOW_ACTION_VLAN_PUSH &&
386576b496b1SEli Britstein 			    (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
386676b496b1SEli Britstein 				/* Replace vlan pop+push with vlan modify */
386776b496b1SEli Britstein 				action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
386876b496b1SEli Britstein 				err = add_vlan_rewrite_action(priv,
386976b496b1SEli Britstein 							      MLX5_FLOW_NAMESPACE_FDB,
387076b496b1SEli Britstein 							      act, parse_attr, hdrs,
387176b496b1SEli Britstein 							      &action, extack);
387276b496b1SEli Britstein 			} else {
387373867881SPablo Neira Ayuso 				err = parse_tc_vlan_action(priv, act, attr, &action);
387476b496b1SEli Britstein 			}
38751482bd3dSJianbo Liu 			if (err)
38761482bd3dSJianbo Liu 				return err;
38771482bd3dSJianbo Liu 
3878e85e02baSEli Britstein 			attr->split_count = attr->out_count;
387973867881SPablo Neira Ayuso 			break;
3880bdc837eeSEli Britstein 		case FLOW_ACTION_VLAN_MANGLE:
3881bdc837eeSEli Britstein 			err = add_vlan_rewrite_action(priv,
3882bdc837eeSEli Britstein 						      MLX5_FLOW_NAMESPACE_FDB,
3883bdc837eeSEli Britstein 						      act, parse_attr, hdrs,
3884bdc837eeSEli Britstein 						      &action, extack);
3885bdc837eeSEli Britstein 			if (err)
3886bdc837eeSEli Britstein 				return err;
3887bdc837eeSEli Britstein 
3888bdc837eeSEli Britstein 			attr->split_count = attr->out_count;
3889bdc837eeSEli Britstein 			break;
389073867881SPablo Neira Ayuso 		case FLOW_ACTION_TUNNEL_DECAP:
38910a7fcb78SPaul Blakey 			decap = true;
389273867881SPablo Neira Ayuso 			break;
38932fbbc30dSEli Cohen 		case FLOW_ACTION_GOTO:
38942fbbc30dSEli Cohen 			err = mlx5_validate_goto_chain(esw, flow, act, action,
38952fbbc30dSEli Cohen 						       extack);
38962fbbc30dSEli Cohen 			if (err)
38972fbbc30dSEli Cohen 				return err;
3898bf07aa73SPaul Blakey 
3899e88afe75SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
39002fbbc30dSEli Cohen 			attr->dest_chain = act->chain_index;
390173867881SPablo Neira Ayuso 			break;
39024c3844d9SPaul Blakey 		case FLOW_ACTION_CT:
39034c3844d9SPaul Blakey 			err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
39044c3844d9SPaul Blakey 			if (err)
39054c3844d9SPaul Blakey 				return err;
39064c3844d9SPaul Blakey 
39074c3844d9SPaul Blakey 			flow_flag_set(flow, CT);
39084c3844d9SPaul Blakey 			break;
390973867881SPablo Neira Ayuso 		default:
39102cc1cb1dSTonghao Zhang 			NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
39112cc1cb1dSTonghao Zhang 			return -EOPNOTSUPP;
391203a9d11eSOr Gerlitz 		}
391373867881SPablo Neira Ayuso 	}
3914bdd66ac0SOr Gerlitz 
39150bac1194SEli Britstein 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
39160bac1194SEli Britstein 	    action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
39170bac1194SEli Britstein 		/* For prio tag mode, replace vlan pop with rewrite vlan prio
39180bac1194SEli Britstein 		 * tag rewrite.
39190bac1194SEli Britstein 		 */
39200bac1194SEli Britstein 		action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
39210bac1194SEli Britstein 		err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
39220bac1194SEli Britstein 						       &action, extack);
39230bac1194SEli Britstein 		if (err)
39240bac1194SEli Britstein 			return err;
39250bac1194SEli Britstein 	}
39260bac1194SEli Britstein 
3927c500c86bSPablo Neira Ayuso 	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3928c500c86bSPablo Neira Ayuso 	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
392984be899fSTonghao Zhang 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
393027c11b6bSEli Britstein 					    parse_attr, hdrs, &action, extack);
3931c500c86bSPablo Neira Ayuso 		if (err)
3932c500c86bSPablo Neira Ayuso 			return err;
393327c11b6bSEli Britstein 		/* in case all pedit actions are skipped, remove the MOD_HDR
393427c11b6bSEli Britstein 		 * flag. we might have set split_count either by pedit or
393527c11b6bSEli Britstein 		 * pop/push. if there is no pop/push either, reset it too.
393627c11b6bSEli Britstein 		 */
39376ae4a6a5SPaul Blakey 		if (parse_attr->mod_hdr_acts.num_actions == 0) {
393827c11b6bSEli Britstein 			action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
39396ae4a6a5SPaul Blakey 			dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
394027c11b6bSEli Britstein 			if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
394127c11b6bSEli Britstein 			      (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
394227c11b6bSEli Britstein 				attr->split_count = 0;
394327c11b6bSEli Britstein 		}
3944c500c86bSPablo Neira Ayuso 	}
3945c500c86bSPablo Neira Ayuso 
39461cab1cd7SOr Gerlitz 	attr->action = action;
394773867881SPablo Neira Ayuso 	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3948bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
3949bdd66ac0SOr Gerlitz 
3950e88afe75SOr Gerlitz 	if (attr->dest_chain) {
39510a7fcb78SPaul Blakey 		if (decap) {
39520a7fcb78SPaul Blakey 			/* It can be supported if we'll create a mapping for
39530a7fcb78SPaul Blakey 			 * the tunnel device only (without tunnel), and set
39540a7fcb78SPaul Blakey 			 * this tunnel id with this decap flow.
39550a7fcb78SPaul Blakey 			 *
39560a7fcb78SPaul Blakey 			 * On restore (miss), we'll just set this saved tunnel
39570a7fcb78SPaul Blakey 			 * device.
39580a7fcb78SPaul Blakey 			 */
39590a7fcb78SPaul Blakey 
39600a7fcb78SPaul Blakey 			NL_SET_ERR_MSG(extack,
39610a7fcb78SPaul Blakey 				       "Decap with goto isn't supported");
39620a7fcb78SPaul Blakey 			netdev_warn(priv->netdev,
39630a7fcb78SPaul Blakey 				    "Decap with goto isn't supported");
39640a7fcb78SPaul Blakey 			return -EOPNOTSUPP;
39650a7fcb78SPaul Blakey 		}
39660a7fcb78SPaul Blakey 
3967e88afe75SOr Gerlitz 		if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
396861644c3dSRoi Dayan 			NL_SET_ERR_MSG_MOD(extack,
396961644c3dSRoi Dayan 					   "Mirroring goto chain rules isn't supported");
3970e88afe75SOr Gerlitz 			return -EOPNOTSUPP;
3971e88afe75SOr Gerlitz 		}
3972e88afe75SOr Gerlitz 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3973e88afe75SOr Gerlitz 	}
3974e88afe75SOr Gerlitz 
3975ae2741e2SVlad Buslov 	if (!(attr->action &
3976ae2741e2SVlad Buslov 	      (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
397761644c3dSRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
397861644c3dSRoi Dayan 				   "Rule must have at least one forward/drop action");
3979ae2741e2SVlad Buslov 		return -EOPNOTSUPP;
3980ae2741e2SVlad Buslov 	}
3981ae2741e2SVlad Buslov 
3982e85e02baSEli Britstein 	if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3983e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
3984e98bedf5SEli Britstein 				   "current firmware doesn't support split rule for port mirroring");
3985592d3651SChris Mi 		netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
3986592d3651SChris Mi 		return -EOPNOTSUPP;
3987592d3651SChris Mi 	}
3988592d3651SChris Mi 
398931c8eba5SOr Gerlitz 	return 0;
399003a9d11eSOr Gerlitz }
399103a9d11eSOr Gerlitz 
3992226f2ca3SVlad Buslov static void get_flags(int flags, unsigned long *flow_flags)
399360bd4af8SOr Gerlitz {
3994226f2ca3SVlad Buslov 	unsigned long __flow_flags = 0;
399560bd4af8SOr Gerlitz 
3996226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(INGRESS))
3997226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
3998226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(EGRESS))
3999226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
400060bd4af8SOr Gerlitz 
4001226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4002226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4003226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4004226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
400584179981SPaul Blakey 	if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
400684179981SPaul Blakey 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4007d9ee0491SOr Gerlitz 
400860bd4af8SOr Gerlitz 	*flow_flags = __flow_flags;
400960bd4af8SOr Gerlitz }
401060bd4af8SOr Gerlitz 
401105866c82SOr Gerlitz static const struct rhashtable_params tc_ht_params = {
401205866c82SOr Gerlitz 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
401305866c82SOr Gerlitz 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
401405866c82SOr Gerlitz 	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
401505866c82SOr Gerlitz 	.automatic_shrinking = true,
401605866c82SOr Gerlitz };
401705866c82SOr Gerlitz 
4018226f2ca3SVlad Buslov static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4019226f2ca3SVlad Buslov 				    unsigned long flags)
402005866c82SOr Gerlitz {
4021655dc3d2SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4022655dc3d2SOr Gerlitz 	struct mlx5e_rep_priv *uplink_rpriv;
4023655dc3d2SOr Gerlitz 
4024226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4025655dc3d2SOr Gerlitz 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4026ec1366c2SOz Shlomo 		return &uplink_rpriv->uplink_priv.tc_ht;
4027d9ee0491SOr Gerlitz 	} else /* NIC offload */
402805866c82SOr Gerlitz 		return &priv->fs.tc.ht;
402905866c82SOr Gerlitz }
403005866c82SOr Gerlitz 
403104de7ddaSRoi Dayan static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
403204de7ddaSRoi Dayan {
40331418ddd9SAviv Heller 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
4034b05af6aaSBodong Wang 	bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4035226f2ca3SVlad Buslov 		flow_flag_test(flow, INGRESS);
40361418ddd9SAviv Heller 	bool act_is_encap = !!(attr->action &
40371418ddd9SAviv Heller 			       MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
40381418ddd9SAviv Heller 	bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
40391418ddd9SAviv Heller 						MLX5_DEVCOM_ESW_OFFLOADS);
40401418ddd9SAviv Heller 
404110fbb1cdSRoi Dayan 	if (!esw_paired)
404210fbb1cdSRoi Dayan 		return false;
404310fbb1cdSRoi Dayan 
404410fbb1cdSRoi Dayan 	if ((mlx5_lag_is_sriov(attr->in_mdev) ||
404510fbb1cdSRoi Dayan 	     mlx5_lag_is_multipath(attr->in_mdev)) &&
404610fbb1cdSRoi Dayan 	    (is_rep_ingress || act_is_encap))
404710fbb1cdSRoi Dayan 		return true;
404810fbb1cdSRoi Dayan 
404910fbb1cdSRoi Dayan 	return false;
405004de7ddaSRoi Dayan }
405104de7ddaSRoi Dayan 
4052a88780a9SRoi Dayan static int
4053a88780a9SRoi Dayan mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4054226f2ca3SVlad Buslov 		 struct flow_cls_offload *f, unsigned long flow_flags,
4055a88780a9SRoi Dayan 		 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4056a88780a9SRoi Dayan 		 struct mlx5e_tc_flow **__flow)
4057e3a2b7edSAmir Vadai {
405817091853SOr Gerlitz 	struct mlx5e_tc_flow_parse_attr *parse_attr;
40593bc4b7bfSOr Gerlitz 	struct mlx5e_tc_flow *flow;
40605a7e5bcbSVlad Buslov 	int out_index, err;
4061776b12b6SOr Gerlitz 
406265ba8fb7SOr Gerlitz 	flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
40631b9a07eeSLeon Romanovsky 	parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
406417091853SOr Gerlitz 	if (!parse_attr || !flow) {
4065e3a2b7edSAmir Vadai 		err = -ENOMEM;
4066e3a2b7edSAmir Vadai 		goto err_free;
4067e3a2b7edSAmir Vadai 	}
4068e3a2b7edSAmir Vadai 
4069e3a2b7edSAmir Vadai 	flow->cookie = f->cookie;
407065ba8fb7SOr Gerlitz 	flow->flags = flow_flags;
4071655dc3d2SOr Gerlitz 	flow->priv = priv;
40725a7e5bcbSVlad Buslov 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
40735a7e5bcbSVlad Buslov 		INIT_LIST_HEAD(&flow->encaps[out_index].list);
40745a7e5bcbSVlad Buslov 	INIT_LIST_HEAD(&flow->mod_hdr);
40755a7e5bcbSVlad Buslov 	INIT_LIST_HEAD(&flow->hairpin);
40765a7e5bcbSVlad Buslov 	refcount_set(&flow->refcnt, 1);
407795435ad7SVlad Buslov 	init_completion(&flow->init_done);
4078e3a2b7edSAmir Vadai 
4079a88780a9SRoi Dayan 	*__flow = flow;
4080a88780a9SRoi Dayan 	*__parse_attr = parse_attr;
4081a88780a9SRoi Dayan 
4082a88780a9SRoi Dayan 	return 0;
4083a88780a9SRoi Dayan 
4084a88780a9SRoi Dayan err_free:
4085a88780a9SRoi Dayan 	kfree(flow);
4086a88780a9SRoi Dayan 	kvfree(parse_attr);
4087a88780a9SRoi Dayan 	return err;
4088adb4c123SOr Gerlitz }
4089adb4c123SOr Gerlitz 
4090988ab9c7STonghao Zhang static void
4091988ab9c7STonghao Zhang mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
4092988ab9c7STonghao Zhang 			 struct mlx5e_priv *priv,
4093988ab9c7STonghao Zhang 			 struct mlx5e_tc_flow_parse_attr *parse_attr,
4094f9e30088SPablo Neira Ayuso 			 struct flow_cls_offload *f,
4095988ab9c7STonghao Zhang 			 struct mlx5_eswitch_rep *in_rep,
4096988ab9c7STonghao Zhang 			 struct mlx5_core_dev *in_mdev)
4097988ab9c7STonghao Zhang {
4098988ab9c7STonghao Zhang 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4099988ab9c7STonghao Zhang 
4100988ab9c7STonghao Zhang 	esw_attr->parse_attr = parse_attr;
4101988ab9c7STonghao Zhang 	esw_attr->chain = f->common.chain_index;
4102ef01adaeSPablo Neira Ayuso 	esw_attr->prio = f->common.prio;
4103988ab9c7STonghao Zhang 
4104988ab9c7STonghao Zhang 	esw_attr->in_rep = in_rep;
4105988ab9c7STonghao Zhang 	esw_attr->in_mdev = in_mdev;
4106988ab9c7STonghao Zhang 
4107988ab9c7STonghao Zhang 	if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4108988ab9c7STonghao Zhang 	    MLX5_COUNTER_SOURCE_ESWITCH)
4109988ab9c7STonghao Zhang 		esw_attr->counter_dev = in_mdev;
4110988ab9c7STonghao Zhang 	else
4111988ab9c7STonghao Zhang 		esw_attr->counter_dev = priv->mdev;
4112988ab9c7STonghao Zhang }
4113988ab9c7STonghao Zhang 
411471129676SJason Gunthorpe static struct mlx5e_tc_flow *
411504de7ddaSRoi Dayan __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4116f9e30088SPablo Neira Ayuso 		     struct flow_cls_offload *f,
4117226f2ca3SVlad Buslov 		     unsigned long flow_flags,
4118d11afc26SOz Shlomo 		     struct net_device *filter_dev,
411904de7ddaSRoi Dayan 		     struct mlx5_eswitch_rep *in_rep,
412071129676SJason Gunthorpe 		     struct mlx5_core_dev *in_mdev)
4121a88780a9SRoi Dayan {
4122f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4123a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
4124a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4125a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
4126a88780a9SRoi Dayan 	int attr_size, err;
4127a88780a9SRoi Dayan 
4128226f2ca3SVlad Buslov 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4129a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_esw_flow_attr);
4130a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4131a88780a9SRoi Dayan 			       &parse_attr, &flow);
4132a88780a9SRoi Dayan 	if (err)
4133a88780a9SRoi Dayan 		goto out;
4134988ab9c7STonghao Zhang 
4135d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
4136988ab9c7STonghao Zhang 	mlx5e_flow_esw_attr_init(flow->esw_attr,
4137988ab9c7STonghao Zhang 				 priv, parse_attr,
4138988ab9c7STonghao Zhang 				 f, in_rep, in_mdev);
4139988ab9c7STonghao Zhang 
414054c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
414154c177caSOz Shlomo 			       f, filter_dev);
4142d11afc26SOz Shlomo 	if (err)
4143d11afc26SOz Shlomo 		goto err_free;
4144a88780a9SRoi Dayan 
41456f9af8ffSTonghao Zhang 	err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4146a88780a9SRoi Dayan 	if (err)
4147a88780a9SRoi Dayan 		goto err_free;
4148a88780a9SRoi Dayan 
41494c3844d9SPaul Blakey 	err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
41504c3844d9SPaul Blakey 	if (err)
41514c3844d9SPaul Blakey 		goto err_free;
41524c3844d9SPaul Blakey 
41537040632dSTonghao Zhang 	err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
415495435ad7SVlad Buslov 	complete_all(&flow->init_done);
4155ef06c9eeSRoi Dayan 	if (err) {
4156ef06c9eeSRoi Dayan 		if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4157aa0cbbaeSOr Gerlitz 			goto err_free;
41585c40348cSOr Gerlitz 
4159b4a23329SRoi Dayan 		add_unready_flow(flow);
4160ef06c9eeSRoi Dayan 	}
4161ef06c9eeSRoi Dayan 
416271129676SJason Gunthorpe 	return flow;
4163e3a2b7edSAmir Vadai 
4164e3a2b7edSAmir Vadai err_free:
41655a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4166a88780a9SRoi Dayan out:
416771129676SJason Gunthorpe 	return ERR_PTR(err);
4168a88780a9SRoi Dayan }
4169a88780a9SRoi Dayan 
4170f9e30088SPablo Neira Ayuso static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
417195dc1902SRoi Dayan 				      struct mlx5e_tc_flow *flow,
4172226f2ca3SVlad Buslov 				      unsigned long flow_flags)
417304de7ddaSRoi Dayan {
417404de7ddaSRoi Dayan 	struct mlx5e_priv *priv = flow->priv, *peer_priv;
417504de7ddaSRoi Dayan 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
417604de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
417704de7ddaSRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
417804de7ddaSRoi Dayan 	struct mlx5e_rep_priv *peer_urpriv;
417904de7ddaSRoi Dayan 	struct mlx5e_tc_flow *peer_flow;
418004de7ddaSRoi Dayan 	struct mlx5_core_dev *in_mdev;
418104de7ddaSRoi Dayan 	int err = 0;
418204de7ddaSRoi Dayan 
418304de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
418404de7ddaSRoi Dayan 	if (!peer_esw)
418504de7ddaSRoi Dayan 		return -ENODEV;
418604de7ddaSRoi Dayan 
418704de7ddaSRoi Dayan 	peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
418804de7ddaSRoi Dayan 	peer_priv = netdev_priv(peer_urpriv->netdev);
418904de7ddaSRoi Dayan 
419004de7ddaSRoi Dayan 	/* in_mdev is assigned of which the packet originated from.
419104de7ddaSRoi Dayan 	 * So packets redirected to uplink use the same mdev of the
419204de7ddaSRoi Dayan 	 * original flow and packets redirected from uplink use the
419304de7ddaSRoi Dayan 	 * peer mdev.
419404de7ddaSRoi Dayan 	 */
4195b05af6aaSBodong Wang 	if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
419604de7ddaSRoi Dayan 		in_mdev = peer_priv->mdev;
419704de7ddaSRoi Dayan 	else
419804de7ddaSRoi Dayan 		in_mdev = priv->mdev;
419904de7ddaSRoi Dayan 
420004de7ddaSRoi Dayan 	parse_attr = flow->esw_attr->parse_attr;
420195dc1902SRoi Dayan 	peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
420204de7ddaSRoi Dayan 					 parse_attr->filter_dev,
420371129676SJason Gunthorpe 					 flow->esw_attr->in_rep, in_mdev);
420471129676SJason Gunthorpe 	if (IS_ERR(peer_flow)) {
420571129676SJason Gunthorpe 		err = PTR_ERR(peer_flow);
420604de7ddaSRoi Dayan 		goto out;
420771129676SJason Gunthorpe 	}
420804de7ddaSRoi Dayan 
420904de7ddaSRoi Dayan 	flow->peer_flow = peer_flow;
4210226f2ca3SVlad Buslov 	flow_flag_set(flow, DUP);
421104de7ddaSRoi Dayan 	mutex_lock(&esw->offloads.peer_mutex);
421204de7ddaSRoi Dayan 	list_add_tail(&flow->peer, &esw->offloads.peer_flows);
421304de7ddaSRoi Dayan 	mutex_unlock(&esw->offloads.peer_mutex);
421404de7ddaSRoi Dayan 
421504de7ddaSRoi Dayan out:
421604de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
421704de7ddaSRoi Dayan 	return err;
421804de7ddaSRoi Dayan }
421904de7ddaSRoi Dayan 
422004de7ddaSRoi Dayan static int
422104de7ddaSRoi Dayan mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4222f9e30088SPablo Neira Ayuso 		   struct flow_cls_offload *f,
4223226f2ca3SVlad Buslov 		   unsigned long flow_flags,
422404de7ddaSRoi Dayan 		   struct net_device *filter_dev,
422504de7ddaSRoi Dayan 		   struct mlx5e_tc_flow **__flow)
422604de7ddaSRoi Dayan {
422704de7ddaSRoi Dayan 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
422804de7ddaSRoi Dayan 	struct mlx5_eswitch_rep *in_rep = rpriv->rep;
422904de7ddaSRoi Dayan 	struct mlx5_core_dev *in_mdev = priv->mdev;
423004de7ddaSRoi Dayan 	struct mlx5e_tc_flow *flow;
423104de7ddaSRoi Dayan 	int err;
423204de7ddaSRoi Dayan 
423371129676SJason Gunthorpe 	flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
423471129676SJason Gunthorpe 				    in_mdev);
423571129676SJason Gunthorpe 	if (IS_ERR(flow))
423671129676SJason Gunthorpe 		return PTR_ERR(flow);
423704de7ddaSRoi Dayan 
423804de7ddaSRoi Dayan 	if (is_peer_flow_needed(flow)) {
423995dc1902SRoi Dayan 		err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
424004de7ddaSRoi Dayan 		if (err) {
424104de7ddaSRoi Dayan 			mlx5e_tc_del_fdb_flow(priv, flow);
424204de7ddaSRoi Dayan 			goto out;
424304de7ddaSRoi Dayan 		}
424404de7ddaSRoi Dayan 	}
424504de7ddaSRoi Dayan 
424604de7ddaSRoi Dayan 	*__flow = flow;
424704de7ddaSRoi Dayan 
424804de7ddaSRoi Dayan 	return 0;
424904de7ddaSRoi Dayan 
425004de7ddaSRoi Dayan out:
425104de7ddaSRoi Dayan 	return err;
425204de7ddaSRoi Dayan }
425304de7ddaSRoi Dayan 
4254a88780a9SRoi Dayan static int
4255a88780a9SRoi Dayan mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4256f9e30088SPablo Neira Ayuso 		   struct flow_cls_offload *f,
4257226f2ca3SVlad Buslov 		   unsigned long flow_flags,
4258d11afc26SOz Shlomo 		   struct net_device *filter_dev,
4259a88780a9SRoi Dayan 		   struct mlx5e_tc_flow **__flow)
4260a88780a9SRoi Dayan {
4261f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4262a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
4263a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4264a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
4265a88780a9SRoi Dayan 	int attr_size, err;
4266a88780a9SRoi Dayan 
4267bf07aa73SPaul Blakey 	/* multi-chain not supported for NIC rules */
4268bf07aa73SPaul Blakey 	if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4269bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
4270bf07aa73SPaul Blakey 
4271226f2ca3SVlad Buslov 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4272a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_nic_flow_attr);
4273a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4274a88780a9SRoi Dayan 			       &parse_attr, &flow);
4275a88780a9SRoi Dayan 	if (err)
4276a88780a9SRoi Dayan 		goto out;
4277a88780a9SRoi Dayan 
4278d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
427954c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
428054c177caSOz Shlomo 			       f, filter_dev);
4281d11afc26SOz Shlomo 	if (err)
4282d11afc26SOz Shlomo 		goto err_free;
4283d11afc26SOz Shlomo 
428473867881SPablo Neira Ayuso 	err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4285a88780a9SRoi Dayan 	if (err)
4286a88780a9SRoi Dayan 		goto err_free;
4287a88780a9SRoi Dayan 
4288a88780a9SRoi Dayan 	err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4289a88780a9SRoi Dayan 	if (err)
4290a88780a9SRoi Dayan 		goto err_free;
4291a88780a9SRoi Dayan 
4292226f2ca3SVlad Buslov 	flow_flag_set(flow, OFFLOADED);
4293a88780a9SRoi Dayan 	kvfree(parse_attr);
4294a88780a9SRoi Dayan 	*__flow = flow;
4295a88780a9SRoi Dayan 
4296a88780a9SRoi Dayan 	return 0;
4297a88780a9SRoi Dayan 
4298a88780a9SRoi Dayan err_free:
42995a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4300a88780a9SRoi Dayan 	kvfree(parse_attr);
4301a88780a9SRoi Dayan out:
4302a88780a9SRoi Dayan 	return err;
4303a88780a9SRoi Dayan }
4304a88780a9SRoi Dayan 
4305a88780a9SRoi Dayan static int
4306a88780a9SRoi Dayan mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4307f9e30088SPablo Neira Ayuso 		  struct flow_cls_offload *f,
4308226f2ca3SVlad Buslov 		  unsigned long flags,
4309d11afc26SOz Shlomo 		  struct net_device *filter_dev,
4310a88780a9SRoi Dayan 		  struct mlx5e_tc_flow **flow)
4311a88780a9SRoi Dayan {
4312a88780a9SRoi Dayan 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4313226f2ca3SVlad Buslov 	unsigned long flow_flags;
4314a88780a9SRoi Dayan 	int err;
4315a88780a9SRoi Dayan 
4316a88780a9SRoi Dayan 	get_flags(flags, &flow_flags);
4317a88780a9SRoi Dayan 
4318bf07aa73SPaul Blakey 	if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4319bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
4320bf07aa73SPaul Blakey 
4321f6455de0SBodong Wang 	if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4322d11afc26SOz Shlomo 		err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4323d11afc26SOz Shlomo 					 filter_dev, flow);
4324a88780a9SRoi Dayan 	else
4325d11afc26SOz Shlomo 		err = mlx5e_add_nic_flow(priv, f, flow_flags,
4326d11afc26SOz Shlomo 					 filter_dev, flow);
4327a88780a9SRoi Dayan 
4328a88780a9SRoi Dayan 	return err;
4329a88780a9SRoi Dayan }
4330a88780a9SRoi Dayan 
433171d82d2aSOz Shlomo int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4332226f2ca3SVlad Buslov 			   struct flow_cls_offload *f, unsigned long flags)
4333a88780a9SRoi Dayan {
4334a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
4335d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4336a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
4337a88780a9SRoi Dayan 	int err = 0;
4338a88780a9SRoi Dayan 
4339c5d326b2SVlad Buslov 	rcu_read_lock();
4340c5d326b2SVlad Buslov 	flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4341c5d326b2SVlad Buslov 	rcu_read_unlock();
4342a88780a9SRoi Dayan 	if (flow) {
4343a88780a9SRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
4344a88780a9SRoi Dayan 				   "flow cookie already exists, ignoring");
4345a88780a9SRoi Dayan 		netdev_warn_once(priv->netdev,
4346a88780a9SRoi Dayan 				 "flow cookie %lx already exists, ignoring\n",
4347a88780a9SRoi Dayan 				 f->cookie);
43480e1c1a2fSVlad Buslov 		err = -EEXIST;
4349a88780a9SRoi Dayan 		goto out;
4350a88780a9SRoi Dayan 	}
4351a88780a9SRoi Dayan 
43527a978759SDmytro Linkin 	trace_mlx5e_configure_flower(f);
4353d11afc26SOz Shlomo 	err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4354a88780a9SRoi Dayan 	if (err)
4355a88780a9SRoi Dayan 		goto out;
4356a88780a9SRoi Dayan 
4357c5d326b2SVlad Buslov 	err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4358a88780a9SRoi Dayan 	if (err)
4359a88780a9SRoi Dayan 		goto err_free;
4360a88780a9SRoi Dayan 
4361a88780a9SRoi Dayan 	return 0;
4362a88780a9SRoi Dayan 
4363a88780a9SRoi Dayan err_free:
43645a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4365a88780a9SRoi Dayan out:
4366e3a2b7edSAmir Vadai 	return err;
4367e3a2b7edSAmir Vadai }
4368e3a2b7edSAmir Vadai 
43698f8ae895SOr Gerlitz static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
43708f8ae895SOr Gerlitz {
4371226f2ca3SVlad Buslov 	bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4372226f2ca3SVlad Buslov 	bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
43738f8ae895SOr Gerlitz 
4374226f2ca3SVlad Buslov 	return flow_flag_test(flow, INGRESS) == dir_ingress &&
4375226f2ca3SVlad Buslov 		flow_flag_test(flow, EGRESS) == dir_egress;
43768f8ae895SOr Gerlitz }
43778f8ae895SOr Gerlitz 
437871d82d2aSOz Shlomo int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4379226f2ca3SVlad Buslov 			struct flow_cls_offload *f, unsigned long flags)
4380e3a2b7edSAmir Vadai {
4381d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4382e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
4383c5d326b2SVlad Buslov 	int err;
4384e3a2b7edSAmir Vadai 
4385c5d326b2SVlad Buslov 	rcu_read_lock();
4386ab818362STaehee Yoo 	flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4387c5d326b2SVlad Buslov 	if (!flow || !same_flow_direction(flow, flags)) {
4388c5d326b2SVlad Buslov 		err = -EINVAL;
4389c5d326b2SVlad Buslov 		goto errout;
4390c5d326b2SVlad Buslov 	}
4391e3a2b7edSAmir Vadai 
4392c5d326b2SVlad Buslov 	/* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4393c5d326b2SVlad Buslov 	 * set.
4394c5d326b2SVlad Buslov 	 */
4395c5d326b2SVlad Buslov 	if (flow_flag_test_and_set(flow, DELETED)) {
4396c5d326b2SVlad Buslov 		err = -EINVAL;
4397c5d326b2SVlad Buslov 		goto errout;
4398c5d326b2SVlad Buslov 	}
439905866c82SOr Gerlitz 	rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4400c5d326b2SVlad Buslov 	rcu_read_unlock();
4401e3a2b7edSAmir Vadai 
44027a978759SDmytro Linkin 	trace_mlx5e_delete_flower(f);
44035a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4404e3a2b7edSAmir Vadai 
4405e3a2b7edSAmir Vadai 	return 0;
4406c5d326b2SVlad Buslov 
4407c5d326b2SVlad Buslov errout:
4408c5d326b2SVlad Buslov 	rcu_read_unlock();
4409c5d326b2SVlad Buslov 	return err;
4410e3a2b7edSAmir Vadai }
4411e3a2b7edSAmir Vadai 
441271d82d2aSOz Shlomo int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4413226f2ca3SVlad Buslov 		       struct flow_cls_offload *f, unsigned long flags)
4414aad7e08dSAmir Vadai {
441504de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4416d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
441704de7ddaSRoi Dayan 	struct mlx5_eswitch *peer_esw;
4418aad7e08dSAmir Vadai 	struct mlx5e_tc_flow *flow;
4419aad7e08dSAmir Vadai 	struct mlx5_fc *counter;
4420316d5f72SRoi Dayan 	u64 lastuse = 0;
4421316d5f72SRoi Dayan 	u64 packets = 0;
4422316d5f72SRoi Dayan 	u64 bytes = 0;
44235a7e5bcbSVlad Buslov 	int err = 0;
4424aad7e08dSAmir Vadai 
4425c5d326b2SVlad Buslov 	rcu_read_lock();
4426c5d326b2SVlad Buslov 	flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
44275a7e5bcbSVlad Buslov 						tc_ht_params));
4428c5d326b2SVlad Buslov 	rcu_read_unlock();
44295a7e5bcbSVlad Buslov 	if (IS_ERR(flow))
44305a7e5bcbSVlad Buslov 		return PTR_ERR(flow);
44315a7e5bcbSVlad Buslov 
44325a7e5bcbSVlad Buslov 	if (!same_flow_direction(flow, flags)) {
44335a7e5bcbSVlad Buslov 		err = -EINVAL;
44345a7e5bcbSVlad Buslov 		goto errout;
44355a7e5bcbSVlad Buslov 	}
4436aad7e08dSAmir Vadai 
44374c3844d9SPaul Blakey 	if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4438b8aee822SMark Bloch 		counter = mlx5e_tc_get_counter(flow);
4439aad7e08dSAmir Vadai 		if (!counter)
44405a7e5bcbSVlad Buslov 			goto errout;
4441aad7e08dSAmir Vadai 
4442aad7e08dSAmir Vadai 		mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4443316d5f72SRoi Dayan 	}
4444aad7e08dSAmir Vadai 
4445316d5f72SRoi Dayan 	/* Under multipath it's possible for one rule to be currently
4446316d5f72SRoi Dayan 	 * un-offloaded while the other rule is offloaded.
4447316d5f72SRoi Dayan 	 */
444804de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
444904de7ddaSRoi Dayan 	if (!peer_esw)
445004de7ddaSRoi Dayan 		goto out;
445104de7ddaSRoi Dayan 
4452226f2ca3SVlad Buslov 	if (flow_flag_test(flow, DUP) &&
4453226f2ca3SVlad Buslov 	    flow_flag_test(flow->peer_flow, OFFLOADED)) {
445404de7ddaSRoi Dayan 		u64 bytes2;
445504de7ddaSRoi Dayan 		u64 packets2;
445604de7ddaSRoi Dayan 		u64 lastuse2;
445704de7ddaSRoi Dayan 
445804de7ddaSRoi Dayan 		counter = mlx5e_tc_get_counter(flow->peer_flow);
4459316d5f72SRoi Dayan 		if (!counter)
4460316d5f72SRoi Dayan 			goto no_peer_counter;
446104de7ddaSRoi Dayan 		mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
446204de7ddaSRoi Dayan 
446304de7ddaSRoi Dayan 		bytes += bytes2;
446404de7ddaSRoi Dayan 		packets += packets2;
446504de7ddaSRoi Dayan 		lastuse = max_t(u64, lastuse, lastuse2);
446604de7ddaSRoi Dayan 	}
446704de7ddaSRoi Dayan 
4468316d5f72SRoi Dayan no_peer_counter:
446904de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
447004de7ddaSRoi Dayan out:
447193a129ebSJiri Pirko 	flow_stats_update(&f->stats, bytes, packets, lastuse,
447293a129ebSJiri Pirko 			  FLOW_ACTION_HW_STATS_DELAYED);
44737a978759SDmytro Linkin 	trace_mlx5e_stats_flower(f);
44745a7e5bcbSVlad Buslov errout:
44755a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
44765a7e5bcbSVlad Buslov 	return err;
4477aad7e08dSAmir Vadai }
4478aad7e08dSAmir Vadai 
4479fcb64c0fSEli Cohen static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
4480fcb64c0fSEli Cohen 			       struct netlink_ext_ack *extack)
4481fcb64c0fSEli Cohen {
4482fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4483fcb64c0fSEli Cohen 	struct mlx5_eswitch *esw;
4484fcb64c0fSEli Cohen 	u16 vport_num;
4485fcb64c0fSEli Cohen 	u32 rate_mbps;
4486fcb64c0fSEli Cohen 	int err;
4487fcb64c0fSEli Cohen 
4488e401a184SEli Cohen 	vport_num = rpriv->rep->vport;
4489e401a184SEli Cohen 	if (vport_num >= MLX5_VPORT_ECPF) {
4490e401a184SEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
4491e401a184SEli Cohen 				   "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4492e401a184SEli Cohen 		return -EOPNOTSUPP;
4493e401a184SEli Cohen 	}
4494e401a184SEli Cohen 
4495fcb64c0fSEli Cohen 	esw = priv->mdev->priv.eswitch;
4496fcb64c0fSEli Cohen 	/* rate is given in bytes/sec.
4497fcb64c0fSEli Cohen 	 * First convert to bits/sec and then round to the nearest mbit/secs.
4498fcb64c0fSEli Cohen 	 * mbit means million bits.
4499fcb64c0fSEli Cohen 	 * Moreover, if rate is non zero we choose to configure to a minimum of
4500fcb64c0fSEli Cohen 	 * 1 mbit/sec.
4501fcb64c0fSEli Cohen 	 */
4502fcb64c0fSEli Cohen 	rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
4503fcb64c0fSEli Cohen 	err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
4504fcb64c0fSEli Cohen 	if (err)
4505fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4506fcb64c0fSEli Cohen 
4507fcb64c0fSEli Cohen 	return err;
4508fcb64c0fSEli Cohen }
4509fcb64c0fSEli Cohen 
4510fcb64c0fSEli Cohen static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4511fcb64c0fSEli Cohen 					struct flow_action *flow_action,
4512fcb64c0fSEli Cohen 					struct netlink_ext_ack *extack)
4513fcb64c0fSEli Cohen {
4514fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4515fcb64c0fSEli Cohen 	const struct flow_action_entry *act;
4516fcb64c0fSEli Cohen 	int err;
4517fcb64c0fSEli Cohen 	int i;
4518fcb64c0fSEli Cohen 
4519fcb64c0fSEli Cohen 	if (!flow_action_has_entries(flow_action)) {
4520fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4521fcb64c0fSEli Cohen 		return -EINVAL;
4522fcb64c0fSEli Cohen 	}
4523fcb64c0fSEli Cohen 
4524fcb64c0fSEli Cohen 	if (!flow_offload_has_one_action(flow_action)) {
4525fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4526fcb64c0fSEli Cohen 		return -EOPNOTSUPP;
4527fcb64c0fSEli Cohen 	}
4528fcb64c0fSEli Cohen 
452953eca1f3SJakub Kicinski 	if (!flow_action_basic_hw_stats_check(flow_action, extack))
4530319a1d19SJiri Pirko 		return -EOPNOTSUPP;
4531319a1d19SJiri Pirko 
4532fcb64c0fSEli Cohen 	flow_action_for_each(i, act, flow_action) {
4533fcb64c0fSEli Cohen 		switch (act->id) {
4534fcb64c0fSEli Cohen 		case FLOW_ACTION_POLICE:
4535fcb64c0fSEli Cohen 			err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4536fcb64c0fSEli Cohen 			if (err)
4537fcb64c0fSEli Cohen 				return err;
4538fcb64c0fSEli Cohen 
4539fcb64c0fSEli Cohen 			rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4540fcb64c0fSEli Cohen 			break;
4541fcb64c0fSEli Cohen 		default:
4542fcb64c0fSEli Cohen 			NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4543fcb64c0fSEli Cohen 			return -EOPNOTSUPP;
4544fcb64c0fSEli Cohen 		}
4545fcb64c0fSEli Cohen 	}
4546fcb64c0fSEli Cohen 
4547fcb64c0fSEli Cohen 	return 0;
4548fcb64c0fSEli Cohen }
4549fcb64c0fSEli Cohen 
4550fcb64c0fSEli Cohen int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4551fcb64c0fSEli Cohen 				struct tc_cls_matchall_offload *ma)
4552fcb64c0fSEli Cohen {
4553b5f814ccSEli Cohen 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4554fcb64c0fSEli Cohen 	struct netlink_ext_ack *extack = ma->common.extack;
4555fcb64c0fSEli Cohen 
4556b5f814ccSEli Cohen 	if (!mlx5_esw_qos_enabled(esw)) {
4557b5f814ccSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
4558b5f814ccSEli Cohen 		return -EOPNOTSUPP;
4559b5f814ccSEli Cohen 	}
4560b5f814ccSEli Cohen 
45617b83355fSEli Cohen 	if (ma->common.prio != 1) {
4562fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4563fcb64c0fSEli Cohen 		return -EINVAL;
4564fcb64c0fSEli Cohen 	}
4565fcb64c0fSEli Cohen 
4566fcb64c0fSEli Cohen 	return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4567fcb64c0fSEli Cohen }
4568fcb64c0fSEli Cohen 
4569fcb64c0fSEli Cohen int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4570fcb64c0fSEli Cohen 			     struct tc_cls_matchall_offload *ma)
4571fcb64c0fSEli Cohen {
4572fcb64c0fSEli Cohen 	struct netlink_ext_ack *extack = ma->common.extack;
4573fcb64c0fSEli Cohen 
4574fcb64c0fSEli Cohen 	return apply_police_params(priv, 0, extack);
4575fcb64c0fSEli Cohen }
4576fcb64c0fSEli Cohen 
4577fcb64c0fSEli Cohen void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4578fcb64c0fSEli Cohen 			     struct tc_cls_matchall_offload *ma)
4579fcb64c0fSEli Cohen {
4580fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4581fcb64c0fSEli Cohen 	struct rtnl_link_stats64 cur_stats;
4582fcb64c0fSEli Cohen 	u64 dbytes;
4583fcb64c0fSEli Cohen 	u64 dpkts;
4584fcb64c0fSEli Cohen 
4585fcb64c0fSEli Cohen 	cur_stats = priv->stats.vf_vport;
4586fcb64c0fSEli Cohen 	dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4587fcb64c0fSEli Cohen 	dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4588fcb64c0fSEli Cohen 	rpriv->prev_vf_vport_stats = cur_stats;
458993a129ebSJiri Pirko 	flow_stats_update(&ma->stats, dpkts, dbytes, jiffies,
459093a129ebSJiri Pirko 			  FLOW_ACTION_HW_STATS_DELAYED);
4591fcb64c0fSEli Cohen }
4592fcb64c0fSEli Cohen 
45934d8fcf21SAlaa Hleihel static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
45944d8fcf21SAlaa Hleihel 					      struct mlx5e_priv *peer_priv)
45954d8fcf21SAlaa Hleihel {
45964d8fcf21SAlaa Hleihel 	struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4597db76ca24SVlad Buslov 	struct mlx5e_hairpin_entry *hpe, *tmp;
4598db76ca24SVlad Buslov 	LIST_HEAD(init_wait_list);
45994d8fcf21SAlaa Hleihel 	u16 peer_vhca_id;
46004d8fcf21SAlaa Hleihel 	int bkt;
46014d8fcf21SAlaa Hleihel 
46024d8fcf21SAlaa Hleihel 	if (!same_hw_devs(priv, peer_priv))
46034d8fcf21SAlaa Hleihel 		return;
46044d8fcf21SAlaa Hleihel 
46054d8fcf21SAlaa Hleihel 	peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
46064d8fcf21SAlaa Hleihel 
4607b32accdaSVlad Buslov 	mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4608db76ca24SVlad Buslov 	hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4609db76ca24SVlad Buslov 		if (refcount_inc_not_zero(&hpe->refcnt))
4610db76ca24SVlad Buslov 			list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4611b32accdaSVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4612db76ca24SVlad Buslov 
4613db76ca24SVlad Buslov 	list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4614db76ca24SVlad Buslov 		wait_for_completion(&hpe->res_ready);
4615db76ca24SVlad Buslov 		if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4616db76ca24SVlad Buslov 			hpe->hp->pair->peer_gone = true;
4617db76ca24SVlad Buslov 
4618db76ca24SVlad Buslov 		mlx5e_hairpin_put(priv, hpe);
4619db76ca24SVlad Buslov 	}
46204d8fcf21SAlaa Hleihel }
46214d8fcf21SAlaa Hleihel 
46224d8fcf21SAlaa Hleihel static int mlx5e_tc_netdev_event(struct notifier_block *this,
46234d8fcf21SAlaa Hleihel 				 unsigned long event, void *ptr)
46244d8fcf21SAlaa Hleihel {
46254d8fcf21SAlaa Hleihel 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
46264d8fcf21SAlaa Hleihel 	struct mlx5e_flow_steering *fs;
46274d8fcf21SAlaa Hleihel 	struct mlx5e_priv *peer_priv;
46284d8fcf21SAlaa Hleihel 	struct mlx5e_tc_table *tc;
46294d8fcf21SAlaa Hleihel 	struct mlx5e_priv *priv;
46304d8fcf21SAlaa Hleihel 
46314d8fcf21SAlaa Hleihel 	if (ndev->netdev_ops != &mlx5e_netdev_ops ||
46324d8fcf21SAlaa Hleihel 	    event != NETDEV_UNREGISTER ||
46334d8fcf21SAlaa Hleihel 	    ndev->reg_state == NETREG_REGISTERED)
46344d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
46354d8fcf21SAlaa Hleihel 
46364d8fcf21SAlaa Hleihel 	tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
46374d8fcf21SAlaa Hleihel 	fs = container_of(tc, struct mlx5e_flow_steering, tc);
46384d8fcf21SAlaa Hleihel 	priv = container_of(fs, struct mlx5e_priv, fs);
46394d8fcf21SAlaa Hleihel 	peer_priv = netdev_priv(ndev);
46404d8fcf21SAlaa Hleihel 	if (priv == peer_priv ||
46414d8fcf21SAlaa Hleihel 	    !(priv->netdev->features & NETIF_F_HW_TC))
46424d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
46434d8fcf21SAlaa Hleihel 
46444d8fcf21SAlaa Hleihel 	mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
46454d8fcf21SAlaa Hleihel 
46464d8fcf21SAlaa Hleihel 	return NOTIFY_DONE;
46474d8fcf21SAlaa Hleihel }
46484d8fcf21SAlaa Hleihel 
4649655dc3d2SOr Gerlitz int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4650e8f887acSAmir Vadai {
4651acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
46524d8fcf21SAlaa Hleihel 	int err;
4653e8f887acSAmir Vadai 
4654b6fac0b4SVlad Buslov 	mutex_init(&tc->t_lock);
4655d2faae25SVlad Buslov 	mutex_init(&tc->mod_hdr.lock);
4656dd58edc3SVlad Buslov 	hash_init(tc->mod_hdr.hlist);
4657b32accdaSVlad Buslov 	mutex_init(&tc->hairpin_tbl_lock);
46585c65c564SOr Gerlitz 	hash_init(tc->hairpin_tbl);
465911c9c548SOr Gerlitz 
46604d8fcf21SAlaa Hleihel 	err = rhashtable_init(&tc->ht, &tc_ht_params);
46614d8fcf21SAlaa Hleihel 	if (err)
46624d8fcf21SAlaa Hleihel 		return err;
46634d8fcf21SAlaa Hleihel 
46644d8fcf21SAlaa Hleihel 	tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4665d48834f9SJiri Pirko 	err = register_netdevice_notifier_dev_net(priv->netdev,
4666d48834f9SJiri Pirko 						  &tc->netdevice_nb,
4667d48834f9SJiri Pirko 						  &tc->netdevice_nn);
4668d48834f9SJiri Pirko 	if (err) {
46694d8fcf21SAlaa Hleihel 		tc->netdevice_nb.notifier_call = NULL;
46704d8fcf21SAlaa Hleihel 		mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
46714d8fcf21SAlaa Hleihel 	}
46724d8fcf21SAlaa Hleihel 
46734d8fcf21SAlaa Hleihel 	return err;
4674e8f887acSAmir Vadai }
4675e8f887acSAmir Vadai 
4676e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4677e8f887acSAmir Vadai {
4678e8f887acSAmir Vadai 	struct mlx5e_tc_flow *flow = ptr;
4679655dc3d2SOr Gerlitz 	struct mlx5e_priv *priv = flow->priv;
4680e8f887acSAmir Vadai 
4681961e8979SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
4682e8f887acSAmir Vadai 	kfree(flow);
4683e8f887acSAmir Vadai }
4684e8f887acSAmir Vadai 
4685655dc3d2SOr Gerlitz void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4686e8f887acSAmir Vadai {
4687acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
4688e8f887acSAmir Vadai 
46894d8fcf21SAlaa Hleihel 	if (tc->netdevice_nb.notifier_call)
4690d48834f9SJiri Pirko 		unregister_netdevice_notifier_dev_net(priv->netdev,
4691d48834f9SJiri Pirko 						      &tc->netdevice_nb,
4692d48834f9SJiri Pirko 						      &tc->netdevice_nn);
46934d8fcf21SAlaa Hleihel 
4694d2faae25SVlad Buslov 	mutex_destroy(&tc->mod_hdr.lock);
4695b32accdaSVlad Buslov 	mutex_destroy(&tc->hairpin_tbl_lock);
4696b32accdaSVlad Buslov 
4697d9ee0491SOr Gerlitz 	rhashtable_destroy(&tc->ht);
4698e8f887acSAmir Vadai 
4699acff797cSMaor Gottlieb 	if (!IS_ERR_OR_NULL(tc->t)) {
4700acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(tc->t);
4701acff797cSMaor Gottlieb 		tc->t = NULL;
4702e8f887acSAmir Vadai 	}
4703b6fac0b4SVlad Buslov 	mutex_destroy(&tc->t_lock);
4704e8f887acSAmir Vadai }
4705655dc3d2SOr Gerlitz 
4706655dc3d2SOr Gerlitz int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4707655dc3d2SOr Gerlitz {
47080a7fcb78SPaul Blakey 	const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts);
47090a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
47100a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *priv;
47110a7fcb78SPaul Blakey 	struct mapping_ctx *mapping;
47120a7fcb78SPaul Blakey 	int err;
47130a7fcb78SPaul Blakey 
47140a7fcb78SPaul Blakey 	uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
47150a7fcb78SPaul Blakey 	priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
47160a7fcb78SPaul Blakey 
47174c3844d9SPaul Blakey 	err = mlx5_tc_ct_init(uplink_priv);
47184c3844d9SPaul Blakey 	if (err)
47194c3844d9SPaul Blakey 		goto err_ct;
47204c3844d9SPaul Blakey 
47210a7fcb78SPaul Blakey 	mapping = mapping_create(sizeof(struct tunnel_match_key),
47220a7fcb78SPaul Blakey 				 TUNNEL_INFO_BITS_MASK, true);
47230a7fcb78SPaul Blakey 	if (IS_ERR(mapping)) {
47240a7fcb78SPaul Blakey 		err = PTR_ERR(mapping);
47250a7fcb78SPaul Blakey 		goto err_tun_mapping;
47260a7fcb78SPaul Blakey 	}
47270a7fcb78SPaul Blakey 	uplink_priv->tunnel_mapping = mapping;
47280a7fcb78SPaul Blakey 
47290a7fcb78SPaul Blakey 	mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
47300a7fcb78SPaul Blakey 	if (IS_ERR(mapping)) {
47310a7fcb78SPaul Blakey 		err = PTR_ERR(mapping);
47320a7fcb78SPaul Blakey 		goto err_enc_opts_mapping;
47330a7fcb78SPaul Blakey 	}
47340a7fcb78SPaul Blakey 	uplink_priv->tunnel_enc_opts_mapping = mapping;
47350a7fcb78SPaul Blakey 
47360a7fcb78SPaul Blakey 	err = rhashtable_init(tc_ht, &tc_ht_params);
47370a7fcb78SPaul Blakey 	if (err)
47380a7fcb78SPaul Blakey 		goto err_ht_init;
47390a7fcb78SPaul Blakey 
47400a7fcb78SPaul Blakey 	return err;
47410a7fcb78SPaul Blakey 
47420a7fcb78SPaul Blakey err_ht_init:
47430a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
47440a7fcb78SPaul Blakey err_enc_opts_mapping:
47450a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_mapping);
47460a7fcb78SPaul Blakey err_tun_mapping:
47474c3844d9SPaul Blakey 	mlx5_tc_ct_clean(uplink_priv);
47484c3844d9SPaul Blakey err_ct:
47490a7fcb78SPaul Blakey 	netdev_warn(priv->netdev,
47500a7fcb78SPaul Blakey 		    "Failed to initialize tc (eswitch), err: %d", err);
47510a7fcb78SPaul Blakey 	return err;
4752655dc3d2SOr Gerlitz }
4753655dc3d2SOr Gerlitz 
4754655dc3d2SOr Gerlitz void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
4755655dc3d2SOr Gerlitz {
47560a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
47570a7fcb78SPaul Blakey 
4758655dc3d2SOr Gerlitz 	rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
47590a7fcb78SPaul Blakey 
47600a7fcb78SPaul Blakey 	uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
47610a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
47620a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_mapping);
47634c3844d9SPaul Blakey 
47644c3844d9SPaul Blakey 	mlx5_tc_ct_clean(uplink_priv);
4765655dc3d2SOr Gerlitz }
476601252a27SOr Gerlitz 
4767226f2ca3SVlad Buslov int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
476801252a27SOr Gerlitz {
4769d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
477001252a27SOr Gerlitz 
477101252a27SOr Gerlitz 	return atomic_read(&tc_ht->nelems);
477201252a27SOr Gerlitz }
477304de7ddaSRoi Dayan 
477404de7ddaSRoi Dayan void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
477504de7ddaSRoi Dayan {
477604de7ddaSRoi Dayan 	struct mlx5e_tc_flow *flow, *tmp;
477704de7ddaSRoi Dayan 
477804de7ddaSRoi Dayan 	list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
477904de7ddaSRoi Dayan 		__mlx5e_tc_del_fdb_peer_flow(flow);
478004de7ddaSRoi Dayan }
4781b4a23329SRoi Dayan 
4782b4a23329SRoi Dayan void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
4783b4a23329SRoi Dayan {
4784b4a23329SRoi Dayan 	struct mlx5_rep_uplink_priv *rpriv =
4785b4a23329SRoi Dayan 		container_of(work, struct mlx5_rep_uplink_priv,
4786b4a23329SRoi Dayan 			     reoffload_flows_work);
4787b4a23329SRoi Dayan 	struct mlx5e_tc_flow *flow, *tmp;
4788b4a23329SRoi Dayan 
4789ad86755bSVlad Buslov 	mutex_lock(&rpriv->unready_flows_lock);
4790b4a23329SRoi Dayan 	list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
4791b4a23329SRoi Dayan 		if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
4792ad86755bSVlad Buslov 			unready_flow_del(flow);
4793b4a23329SRoi Dayan 	}
4794ad86755bSVlad Buslov 	mutex_unlock(&rpriv->unready_flows_lock);
4795b4a23329SRoi Dayan }
4796d6d27782SPaul Blakey 
4797b8ce9037SPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4798b8ce9037SPaul Blakey static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
4799b8ce9037SPaul Blakey 				 struct mlx5e_tc_update_priv *tc_priv,
4800b8ce9037SPaul Blakey 				 u32 tunnel_id)
4801b8ce9037SPaul Blakey {
4802b8ce9037SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4803b8ce9037SPaul Blakey 	struct flow_dissector_key_enc_opts enc_opts = {};
4804b8ce9037SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
4805b8ce9037SPaul Blakey 	struct mlx5e_rep_priv *uplink_rpriv;
4806b8ce9037SPaul Blakey 	struct metadata_dst *tun_dst;
4807b8ce9037SPaul Blakey 	struct tunnel_match_key key;
4808b8ce9037SPaul Blakey 	u32 tun_id, enc_opts_id;
4809b8ce9037SPaul Blakey 	struct net_device *dev;
4810b8ce9037SPaul Blakey 	int err;
4811b8ce9037SPaul Blakey 
4812b8ce9037SPaul Blakey 	enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
4813b8ce9037SPaul Blakey 	tun_id = tunnel_id >> ENC_OPTS_BITS;
4814b8ce9037SPaul Blakey 
4815b8ce9037SPaul Blakey 	if (!tun_id)
4816b8ce9037SPaul Blakey 		return true;
4817b8ce9037SPaul Blakey 
4818b8ce9037SPaul Blakey 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4819b8ce9037SPaul Blakey 	uplink_priv = &uplink_rpriv->uplink_priv;
4820b8ce9037SPaul Blakey 
4821b8ce9037SPaul Blakey 	err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
4822b8ce9037SPaul Blakey 	if (err) {
4823b8ce9037SPaul Blakey 		WARN_ON_ONCE(true);
4824b8ce9037SPaul Blakey 		netdev_dbg(priv->netdev,
4825b8ce9037SPaul Blakey 			   "Couldn't find tunnel for tun_id: %d, err: %d\n",
4826b8ce9037SPaul Blakey 			   tun_id, err);
4827b8ce9037SPaul Blakey 		return false;
4828b8ce9037SPaul Blakey 	}
4829b8ce9037SPaul Blakey 
4830b8ce9037SPaul Blakey 	if (enc_opts_id) {
4831b8ce9037SPaul Blakey 		err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
4832b8ce9037SPaul Blakey 				   enc_opts_id, &enc_opts);
4833b8ce9037SPaul Blakey 		if (err) {
4834b8ce9037SPaul Blakey 			netdev_dbg(priv->netdev,
4835b8ce9037SPaul Blakey 				   "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
4836b8ce9037SPaul Blakey 				   enc_opts_id, err);
4837b8ce9037SPaul Blakey 			return false;
4838b8ce9037SPaul Blakey 		}
4839b8ce9037SPaul Blakey 	}
4840b8ce9037SPaul Blakey 
4841b8ce9037SPaul Blakey 	tun_dst = tun_rx_dst(enc_opts.len);
4842b8ce9037SPaul Blakey 	if (!tun_dst) {
4843b8ce9037SPaul Blakey 		WARN_ON_ONCE(true);
4844b8ce9037SPaul Blakey 		return false;
4845b8ce9037SPaul Blakey 	}
4846b8ce9037SPaul Blakey 
4847b8ce9037SPaul Blakey 	ip_tunnel_key_init(&tun_dst->u.tun_info.key,
4848b8ce9037SPaul Blakey 			   key.enc_ipv4.src, key.enc_ipv4.dst,
4849b8ce9037SPaul Blakey 			   key.enc_ip.tos, key.enc_ip.ttl,
4850b8ce9037SPaul Blakey 			   0, /* label */
4851b8ce9037SPaul Blakey 			   key.enc_tp.src, key.enc_tp.dst,
4852b8ce9037SPaul Blakey 			   key32_to_tunnel_id(key.enc_key_id.keyid),
4853b8ce9037SPaul Blakey 			   TUNNEL_KEY);
4854b8ce9037SPaul Blakey 
4855b8ce9037SPaul Blakey 	if (enc_opts.len)
4856b8ce9037SPaul Blakey 		ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data,
4857b8ce9037SPaul Blakey 					enc_opts.len, enc_opts.dst_opt_type);
4858b8ce9037SPaul Blakey 
4859b8ce9037SPaul Blakey 	skb_dst_set(skb, (struct dst_entry *)tun_dst);
4860b8ce9037SPaul Blakey 	dev = dev_get_by_index(&init_net, key.filter_ifindex);
4861b8ce9037SPaul Blakey 	if (!dev) {
4862b8ce9037SPaul Blakey 		netdev_dbg(priv->netdev,
4863b8ce9037SPaul Blakey 			   "Couldn't find tunnel device with ifindex: %d\n",
4864b8ce9037SPaul Blakey 			   key.filter_ifindex);
4865b8ce9037SPaul Blakey 		return false;
4866b8ce9037SPaul Blakey 	}
4867b8ce9037SPaul Blakey 
4868b8ce9037SPaul Blakey 	/* Set tun_dev so we do dev_put() after datapath */
4869b8ce9037SPaul Blakey 	tc_priv->tun_dev = dev;
4870b8ce9037SPaul Blakey 
4871b8ce9037SPaul Blakey 	skb->dev = dev;
4872b8ce9037SPaul Blakey 
4873b8ce9037SPaul Blakey 	return true;
4874b8ce9037SPaul Blakey }
4875b8ce9037SPaul Blakey #endif /* CONFIG_NET_TC_SKB_EXT */
4876b8ce9037SPaul Blakey 
4877d6d27782SPaul Blakey bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe,
4878b8ce9037SPaul Blakey 			     struct sk_buff *skb,
4879b8ce9037SPaul Blakey 			     struct mlx5e_tc_update_priv *tc_priv)
4880d6d27782SPaul Blakey {
4881d6d27782SPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
48825c6b9460SPaul Blakey 	u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
48835c6b9460SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
48845c6b9460SPaul Blakey 	struct mlx5e_rep_priv *uplink_rpriv;
4885d6d27782SPaul Blakey 	struct tc_skb_ext *tc_skb_ext;
4886d6d27782SPaul Blakey 	struct mlx5_eswitch *esw;
4887d6d27782SPaul Blakey 	struct mlx5e_priv *priv;
4888b8ce9037SPaul Blakey 	int tunnel_moffset;
4889d6d27782SPaul Blakey 	int err;
4890d6d27782SPaul Blakey 
4891d6d27782SPaul Blakey 	reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
4892d6d27782SPaul Blakey 	if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
4893d6d27782SPaul Blakey 		reg_c0 = 0;
4894b8ce9037SPaul Blakey 	reg_c1 = be32_to_cpu(cqe->imm_inval_pkey);
4895d6d27782SPaul Blakey 
4896d6d27782SPaul Blakey 	if (!reg_c0)
4897d6d27782SPaul Blakey 		return true;
4898d6d27782SPaul Blakey 
4899d6d27782SPaul Blakey 	priv = netdev_priv(skb->dev);
4900d6d27782SPaul Blakey 	esw = priv->mdev->priv.eswitch;
4901d6d27782SPaul Blakey 
4902d6d27782SPaul Blakey 	err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
4903d6d27782SPaul Blakey 	if (err) {
4904d6d27782SPaul Blakey 		netdev_dbg(priv->netdev,
4905d6d27782SPaul Blakey 			   "Couldn't find chain for chain tag: %d, err: %d\n",
4906d6d27782SPaul Blakey 			   reg_c0, err);
4907d6d27782SPaul Blakey 		return false;
4908d6d27782SPaul Blakey 	}
4909d6d27782SPaul Blakey 
4910b8ce9037SPaul Blakey 	if (chain) {
4911d6d27782SPaul Blakey 		tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
4912d6d27782SPaul Blakey 		if (!tc_skb_ext) {
4913b8ce9037SPaul Blakey 			WARN_ON(1);
4914d6d27782SPaul Blakey 			return false;
4915d6d27782SPaul Blakey 		}
4916d6d27782SPaul Blakey 
4917d6d27782SPaul Blakey 		tc_skb_ext->chain = chain;
49185c6b9460SPaul Blakey 
49195c6b9460SPaul Blakey 		tuple_id = reg_c1 & TUPLE_ID_MAX;
49205c6b9460SPaul Blakey 
49215c6b9460SPaul Blakey 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
49225c6b9460SPaul Blakey 		uplink_priv = &uplink_rpriv->uplink_priv;
49235c6b9460SPaul Blakey 		if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
49245c6b9460SPaul Blakey 			return false;
4925b8ce9037SPaul Blakey 	}
4926b8ce9037SPaul Blakey 
4927b8ce9037SPaul Blakey 	tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
4928b8ce9037SPaul Blakey 	tunnel_id = reg_c1 >> (8 * tunnel_moffset);
4929b8ce9037SPaul Blakey 	return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
4930d6d27782SPaul Blakey #endif /* CONFIG_NET_TC_SKB_EXT */
4931d6d27782SPaul Blakey 
4932d6d27782SPaul Blakey 	return true;
4933d6d27782SPaul Blakey }
4934b8ce9037SPaul Blakey 
4935b8ce9037SPaul Blakey void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
4936b8ce9037SPaul Blakey {
4937b8ce9037SPaul Blakey 	if (tc_priv->tun_dev)
4938b8ce9037SPaul Blakey 		dev_put(tc_priv->tun_dev);
4939b8ce9037SPaul Blakey }
4940