1e8f887acSAmir Vadai /*
2e8f887acSAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3e8f887acSAmir Vadai  *
4e8f887acSAmir Vadai  * This software is available to you under a choice of one of two
5e8f887acSAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
6e8f887acSAmir Vadai  * General Public License (GPL) Version 2, available from the file
7e8f887acSAmir Vadai  * COPYING in the main directory of this source tree, or the
8e8f887acSAmir Vadai  * OpenIB.org BSD license below:
9e8f887acSAmir Vadai  *
10e8f887acSAmir Vadai  *     Redistribution and use in source and binary forms, with or
11e8f887acSAmir Vadai  *     without modification, are permitted provided that the following
12e8f887acSAmir Vadai  *     conditions are met:
13e8f887acSAmir Vadai  *
14e8f887acSAmir Vadai  *      - Redistributions of source code must retain the above
15e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
16e8f887acSAmir Vadai  *        disclaimer.
17e8f887acSAmir Vadai  *
18e8f887acSAmir Vadai  *      - Redistributions in binary form must reproduce the above
19e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
20e8f887acSAmir Vadai  *        disclaimer in the documentation and/or other materials
21e8f887acSAmir Vadai  *        provided with the distribution.
22e8f887acSAmir Vadai  *
23e8f887acSAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e8f887acSAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e8f887acSAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e8f887acSAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e8f887acSAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e8f887acSAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e8f887acSAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e8f887acSAmir Vadai  * SOFTWARE.
31e8f887acSAmir Vadai  */
32e8f887acSAmir Vadai 
33e3a2b7edSAmir Vadai #include <net/flow_dissector.h>
343f7d0eb4SOr Gerlitz #include <net/sch_generic.h>
35e3a2b7edSAmir Vadai #include <net/pkt_cls.h>
36e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h>
3712185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h>
38e8f887acSAmir Vadai #include <linux/mlx5/fs.h>
39e8f887acSAmir Vadai #include <linux/mlx5/device.h>
40e8f887acSAmir Vadai #include <linux/rhashtable.h>
415a7e5bcbSVlad Buslov #include <linux/refcount.h>
42db76ca24SVlad Buslov #include <linux/completion.h>
4303a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h>
44776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h>
45bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h>
46d79b6df6SOr Gerlitz #include <net/tc_act/tc_pedit.h>
4726c02749SOr Gerlitz #include <net/tc_act/tc_csum.h>
48f6dfb4c3SHadar Hen Zion #include <net/arp.h>
493616d08bSDavid Ahern #include <net/ipv6_stubs.h>
50e8f887acSAmir Vadai #include "en.h"
511d447a39SSaeed Mahameed #include "en_rep.h"
52232c0013SHadar Hen Zion #include "en_tc.h"
5303a9d11eSOr Gerlitz #include "eswitch.h"
5439ac237cSPaul Blakey #include "eswitch_offloads_chains.h"
553f6d08d1SOr Gerlitz #include "fs_core.h"
562c81bfd5SHuy Nguyen #include "en/port.h"
57101f4de9SOz Shlomo #include "en/tc_tun.h"
580a7fcb78SPaul Blakey #include "en/mapping.h"
5904de7ddaSRoi Dayan #include "lib/devcom.h"
609272e3dfSYevgeny Kliteynik #include "lib/geneve.h"
617a978759SDmytro Linkin #include "diag/en_tc_tracepoint.h"
62e8f887acSAmir Vadai 
630a7fcb78SPaul Blakey #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
640a7fcb78SPaul Blakey 
653bc4b7bfSOr Gerlitz struct mlx5_nic_flow_attr {
663bc4b7bfSOr Gerlitz 	u32 action;
673bc4b7bfSOr Gerlitz 	u32 flow_tag;
682b688ea5SMaor Gottlieb 	struct mlx5_modify_hdr *modify_hdr;
695c65c564SOr Gerlitz 	u32 hairpin_tirn;
7038aa51c1SOr Gerlitz 	u8 match_level;
713f6d08d1SOr Gerlitz 	struct mlx5_flow_table	*hairpin_ft;
72b8aee822SMark Bloch 	struct mlx5_fc		*counter;
733bc4b7bfSOr Gerlitz };
743bc4b7bfSOr Gerlitz 
75226f2ca3SVlad Buslov #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
7660bd4af8SOr Gerlitz 
7765ba8fb7SOr Gerlitz enum {
78226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_INGRESS	= MLX5E_TC_FLAG_INGRESS_BIT,
79226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_EGRESS	= MLX5E_TC_FLAG_EGRESS_BIT,
80226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_ESWITCH	= MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
8184179981SPaul Blakey 	MLX5E_TC_FLOW_FLAG_FT		= MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
82226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_NIC		= MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
83226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_OFFLOADED	= MLX5E_TC_FLOW_BASE,
84226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_HAIRPIN	= MLX5E_TC_FLOW_BASE + 1,
85226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS	= MLX5E_TC_FLOW_BASE + 2,
86226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_SLOW		= MLX5E_TC_FLOW_BASE + 3,
87226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_DUP		= MLX5E_TC_FLOW_BASE + 4,
88226f2ca3SVlad Buslov 	MLX5E_TC_FLOW_FLAG_NOT_READY	= MLX5E_TC_FLOW_BASE + 5,
89c5d326b2SVlad Buslov 	MLX5E_TC_FLOW_FLAG_DELETED	= MLX5E_TC_FLOW_BASE + 6,
9065ba8fb7SOr Gerlitz };
9165ba8fb7SOr Gerlitz 
92e4ad91f2SChris Mi #define MLX5E_TC_MAX_SPLITS 1
93e4ad91f2SChris Mi 
9479baaec7SEli Britstein /* Helper struct for accessing a struct containing list_head array.
9579baaec7SEli Britstein  * Containing struct
9679baaec7SEli Britstein  *   |- Helper array
9779baaec7SEli Britstein  *      [0] Helper item 0
9879baaec7SEli Britstein  *          |- list_head item 0
9979baaec7SEli Britstein  *          |- index (0)
10079baaec7SEli Britstein  *      [1] Helper item 1
10179baaec7SEli Britstein  *          |- list_head item 1
10279baaec7SEli Britstein  *          |- index (1)
10379baaec7SEli Britstein  * To access the containing struct from one of the list_head items:
10479baaec7SEli Britstein  * 1. Get the helper item from the list_head item using
10579baaec7SEli Britstein  *    helper item =
10679baaec7SEli Britstein  *        container_of(list_head item, helper struct type, list_head field)
10779baaec7SEli Britstein  * 2. Get the contining struct from the helper item and its index in the array:
10879baaec7SEli Britstein  *    containing struct =
10979baaec7SEli Britstein  *        container_of(helper item, containing struct type, helper field[index])
11079baaec7SEli Britstein  */
11179baaec7SEli Britstein struct encap_flow_item {
112948993f2SVlad Buslov 	struct mlx5e_encap_entry *e; /* attached encap instance */
11379baaec7SEli Britstein 	struct list_head list;
11479baaec7SEli Britstein 	int index;
11579baaec7SEli Britstein };
11679baaec7SEli Britstein 
117e8f887acSAmir Vadai struct mlx5e_tc_flow {
118e8f887acSAmir Vadai 	struct rhash_head	node;
119655dc3d2SOr Gerlitz 	struct mlx5e_priv	*priv;
120e8f887acSAmir Vadai 	u64			cookie;
121226f2ca3SVlad Buslov 	unsigned long		flags;
122e4ad91f2SChris Mi 	struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
12379baaec7SEli Britstein 	/* Flow can be associated with multiple encap IDs.
12479baaec7SEli Britstein 	 * The number of encaps is bounded by the number of supported
12579baaec7SEli Britstein 	 * destinations.
12679baaec7SEli Britstein 	 */
12779baaec7SEli Britstein 	struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
12804de7ddaSRoi Dayan 	struct mlx5e_tc_flow    *peer_flow;
129dd58edc3SVlad Buslov 	struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
13011c9c548SOr Gerlitz 	struct list_head	mod_hdr; /* flows sharing the same mod hdr ID */
131e4f9abbdSVlad Buslov 	struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
1325c65c564SOr Gerlitz 	struct list_head	hairpin; /* flows sharing the same hairpin */
13304de7ddaSRoi Dayan 	struct list_head	peer;    /* flows with peer flow */
134b4a23329SRoi Dayan 	struct list_head	unready; /* flows not ready to be offloaded (e.g due to missing route) */
1352a1f1768SVlad Buslov 	int			tmp_efi_index;
1366a06c2f7SVlad Buslov 	struct list_head	tmp_list; /* temporary flow list used by neigh update */
1375a7e5bcbSVlad Buslov 	refcount_t		refcnt;
138c5d326b2SVlad Buslov 	struct rcu_head		rcu_head;
13995435ad7SVlad Buslov 	struct completion	init_done;
1400a7fcb78SPaul Blakey 	int tunnel_id; /* the mapped tunnel id of this flow */
1410a7fcb78SPaul Blakey 
1423bc4b7bfSOr Gerlitz 	union {
143ecf5bb79SOr Gerlitz 		struct mlx5_esw_flow_attr esw_attr[0];
1443bc4b7bfSOr Gerlitz 		struct mlx5_nic_flow_attr nic_attr[0];
1453bc4b7bfSOr Gerlitz 	};
146e8f887acSAmir Vadai };
147e8f887acSAmir Vadai 
14817091853SOr Gerlitz struct mlx5e_tc_flow_parse_attr {
1491f6da306SYevgeny Kliteynik 	const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
150d11afc26SOz Shlomo 	struct net_device *filter_dev;
15117091853SOr Gerlitz 	struct mlx5_flow_spec spec;
1526ae4a6a5SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
15398b66cb1SEli Britstein 	int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
15417091853SOr Gerlitz };
15517091853SOr Gerlitz 
156acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4
157b3a433deSOr Gerlitz #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
158e8f887acSAmir Vadai 
1590a7fcb78SPaul Blakey struct tunnel_match_key {
1600a7fcb78SPaul Blakey 	struct flow_dissector_key_control enc_control;
1610a7fcb78SPaul Blakey 	struct flow_dissector_key_keyid enc_key_id;
1620a7fcb78SPaul Blakey 	struct flow_dissector_key_ports enc_tp;
1630a7fcb78SPaul Blakey 	struct flow_dissector_key_ip enc_ip;
1640a7fcb78SPaul Blakey 	union {
1650a7fcb78SPaul Blakey 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
1660a7fcb78SPaul Blakey 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
1670a7fcb78SPaul Blakey 	};
1680a7fcb78SPaul Blakey 
1690a7fcb78SPaul Blakey 	int filter_ifindex;
1700a7fcb78SPaul Blakey };
1710a7fcb78SPaul Blakey 
1720a7fcb78SPaul Blakey /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
1730a7fcb78SPaul Blakey  * Upper TUNNEL_INFO_BITS for general tunnel info.
1740a7fcb78SPaul Blakey  * Lower ENC_OPTS_BITS bits for enc_opts.
1750a7fcb78SPaul Blakey  */
1760a7fcb78SPaul Blakey #define TUNNEL_INFO_BITS 6
1770a7fcb78SPaul Blakey #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
1780a7fcb78SPaul Blakey #define ENC_OPTS_BITS 2
1790a7fcb78SPaul Blakey #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
1800a7fcb78SPaul Blakey #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
1810a7fcb78SPaul Blakey #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
1820a7fcb78SPaul Blakey 
1838f1e0b97SPaul Blakey struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
1848f1e0b97SPaul Blakey 	[CHAIN_TO_REG] = {
1858f1e0b97SPaul Blakey 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
1868f1e0b97SPaul Blakey 		.moffset = 0,
1878f1e0b97SPaul Blakey 		.mlen = 2,
1888f1e0b97SPaul Blakey 	},
1890a7fcb78SPaul Blakey 	[TUNNEL_TO_REG] = {
1900a7fcb78SPaul Blakey 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
1910a7fcb78SPaul Blakey 		.moffset = 3,
1920a7fcb78SPaul Blakey 		.mlen = 1,
1930a7fcb78SPaul Blakey 		.soffset = MLX5_BYTE_OFF(fte_match_param,
1940a7fcb78SPaul Blakey 					 misc_parameters_2.metadata_reg_c_1),
1950a7fcb78SPaul Blakey 	},
1968f1e0b97SPaul Blakey };
1978f1e0b97SPaul Blakey 
1980a7fcb78SPaul Blakey static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
1990a7fcb78SPaul Blakey 
2000a7fcb78SPaul Blakey void
2010a7fcb78SPaul Blakey mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
2020a7fcb78SPaul Blakey 			    enum mlx5e_tc_attr_to_reg type,
2030a7fcb78SPaul Blakey 			    u32 data,
2040a7fcb78SPaul Blakey 			    u32 mask)
2050a7fcb78SPaul Blakey {
2060a7fcb78SPaul Blakey 	int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
2070a7fcb78SPaul Blakey 	int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
2080a7fcb78SPaul Blakey 	void *headers_c = spec->match_criteria;
2090a7fcb78SPaul Blakey 	void *headers_v = spec->match_value;
2100a7fcb78SPaul Blakey 	void *fmask, *fval;
2110a7fcb78SPaul Blakey 
2120a7fcb78SPaul Blakey 	fmask = headers_c + soffset;
2130a7fcb78SPaul Blakey 	fval = headers_v + soffset;
2140a7fcb78SPaul Blakey 
2150a7fcb78SPaul Blakey 	mask = cpu_to_be32(mask) >> (32 - (match_len * 8));
2160a7fcb78SPaul Blakey 	data = cpu_to_be32(data) >> (32 - (match_len * 8));
2170a7fcb78SPaul Blakey 
2180a7fcb78SPaul Blakey 	memcpy(fmask, &mask, match_len);
2190a7fcb78SPaul Blakey 	memcpy(fval, &data, match_len);
2200a7fcb78SPaul Blakey 
2210a7fcb78SPaul Blakey 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
2220a7fcb78SPaul Blakey }
2230a7fcb78SPaul Blakey 
2240a7fcb78SPaul Blakey int
2250a7fcb78SPaul Blakey mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
2260a7fcb78SPaul Blakey 			  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
2270a7fcb78SPaul Blakey 			  enum mlx5e_tc_attr_to_reg type,
2280a7fcb78SPaul Blakey 			  u32 data)
2290a7fcb78SPaul Blakey {
2300a7fcb78SPaul Blakey 	int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
2310a7fcb78SPaul Blakey 	int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
2320a7fcb78SPaul Blakey 	int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
2330a7fcb78SPaul Blakey 	char *modact;
2340a7fcb78SPaul Blakey 	int err;
2350a7fcb78SPaul Blakey 
2360a7fcb78SPaul Blakey 	err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
2370a7fcb78SPaul Blakey 				    mod_hdr_acts);
2380a7fcb78SPaul Blakey 	if (err)
2390a7fcb78SPaul Blakey 		return err;
2400a7fcb78SPaul Blakey 
2410a7fcb78SPaul Blakey 	modact = mod_hdr_acts->actions +
2420a7fcb78SPaul Blakey 		 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
2430a7fcb78SPaul Blakey 
2440a7fcb78SPaul Blakey 	/* Firmware has 5bit length field and 0 means 32bits */
2450a7fcb78SPaul Blakey 	if (mlen == 4)
2460a7fcb78SPaul Blakey 		mlen = 0;
2470a7fcb78SPaul Blakey 
2480a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
2490a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, field, mfield);
2500a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, offset, moffset * 8);
2510a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, length, mlen * 8);
2520a7fcb78SPaul Blakey 	MLX5_SET(set_action_in, modact, data, data);
2530a7fcb78SPaul Blakey 	mod_hdr_acts->num_actions++;
2540a7fcb78SPaul Blakey 
2550a7fcb78SPaul Blakey 	return 0;
2560a7fcb78SPaul Blakey }
2570a7fcb78SPaul Blakey 
25877ab67b7SOr Gerlitz struct mlx5e_hairpin {
25977ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
26077ab67b7SOr Gerlitz 
26177ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev;
2623f6d08d1SOr Gerlitz 	struct mlx5e_priv *func_priv;
26377ab67b7SOr Gerlitz 	u32 tdn;
26477ab67b7SOr Gerlitz 	u32 tirn;
2653f6d08d1SOr Gerlitz 
2663f6d08d1SOr Gerlitz 	int num_channels;
2673f6d08d1SOr Gerlitz 	struct mlx5e_rqt indir_rqt;
2683f6d08d1SOr Gerlitz 	u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
2693f6d08d1SOr Gerlitz 	struct mlx5e_ttc_table ttc;
27077ab67b7SOr Gerlitz };
27177ab67b7SOr Gerlitz 
2725c65c564SOr Gerlitz struct mlx5e_hairpin_entry {
2735c65c564SOr Gerlitz 	/* a node of a hash table which keeps all the  hairpin entries */
2745c65c564SOr Gerlitz 	struct hlist_node hairpin_hlist;
2755c65c564SOr Gerlitz 
27673edca73SVlad Buslov 	/* protects flows list */
27773edca73SVlad Buslov 	spinlock_t flows_lock;
2785c65c564SOr Gerlitz 	/* flows sharing the same hairpin */
2795c65c564SOr Gerlitz 	struct list_head flows;
280db76ca24SVlad Buslov 	/* hpe's that were not fully initialized when dead peer update event
281db76ca24SVlad Buslov 	 * function traversed them.
282db76ca24SVlad Buslov 	 */
283db76ca24SVlad Buslov 	struct list_head dead_peer_wait_list;
2845c65c564SOr Gerlitz 
285d8822868SOr Gerlitz 	u16 peer_vhca_id;
286106be53bSOr Gerlitz 	u8 prio;
2875c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
288e4f9abbdSVlad Buslov 	refcount_t refcnt;
289db76ca24SVlad Buslov 	struct completion res_ready;
2905c65c564SOr Gerlitz };
2915c65c564SOr Gerlitz 
29211c9c548SOr Gerlitz struct mod_hdr_key {
29311c9c548SOr Gerlitz 	int num_actions;
29411c9c548SOr Gerlitz 	void *actions;
29511c9c548SOr Gerlitz };
29611c9c548SOr Gerlitz 
29711c9c548SOr Gerlitz struct mlx5e_mod_hdr_entry {
29811c9c548SOr Gerlitz 	/* a node of a hash table which keeps all the mod_hdr entries */
29911c9c548SOr Gerlitz 	struct hlist_node mod_hdr_hlist;
30011c9c548SOr Gerlitz 
30183a52f0dSVlad Buslov 	/* protects flows list */
30283a52f0dSVlad Buslov 	spinlock_t flows_lock;
30311c9c548SOr Gerlitz 	/* flows sharing the same mod_hdr entry */
30411c9c548SOr Gerlitz 	struct list_head flows;
30511c9c548SOr Gerlitz 
30611c9c548SOr Gerlitz 	struct mod_hdr_key key;
30711c9c548SOr Gerlitz 
3082b688ea5SMaor Gottlieb 	struct mlx5_modify_hdr *modify_hdr;
309dd58edc3SVlad Buslov 
310dd58edc3SVlad Buslov 	refcount_t refcnt;
311a734d007SVlad Buslov 	struct completion res_ready;
312a734d007SVlad Buslov 	int compl_result;
31311c9c548SOr Gerlitz };
31411c9c548SOr Gerlitz 
3155a7e5bcbSVlad Buslov static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
3165a7e5bcbSVlad Buslov 			      struct mlx5e_tc_flow *flow);
3175a7e5bcbSVlad Buslov 
3185a7e5bcbSVlad Buslov static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
3195a7e5bcbSVlad Buslov {
3205a7e5bcbSVlad Buslov 	if (!flow || !refcount_inc_not_zero(&flow->refcnt))
3215a7e5bcbSVlad Buslov 		return ERR_PTR(-EINVAL);
3225a7e5bcbSVlad Buslov 	return flow;
3235a7e5bcbSVlad Buslov }
3245a7e5bcbSVlad Buslov 
3255a7e5bcbSVlad Buslov static void mlx5e_flow_put(struct mlx5e_priv *priv,
3265a7e5bcbSVlad Buslov 			   struct mlx5e_tc_flow *flow)
3275a7e5bcbSVlad Buslov {
3285a7e5bcbSVlad Buslov 	if (refcount_dec_and_test(&flow->refcnt)) {
3295a7e5bcbSVlad Buslov 		mlx5e_tc_del_flow(priv, flow);
330c5d326b2SVlad Buslov 		kfree_rcu(flow, rcu_head);
3315a7e5bcbSVlad Buslov 	}
3325a7e5bcbSVlad Buslov }
3335a7e5bcbSVlad Buslov 
334226f2ca3SVlad Buslov static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
335226f2ca3SVlad Buslov {
336226f2ca3SVlad Buslov 	/* Complete all memory stores before setting bit. */
337226f2ca3SVlad Buslov 	smp_mb__before_atomic();
338226f2ca3SVlad Buslov 	set_bit(flag, &flow->flags);
339226f2ca3SVlad Buslov }
340226f2ca3SVlad Buslov 
341226f2ca3SVlad Buslov #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
342226f2ca3SVlad Buslov 
343c5d326b2SVlad Buslov static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
344c5d326b2SVlad Buslov 				     unsigned long flag)
345c5d326b2SVlad Buslov {
346c5d326b2SVlad Buslov 	/* test_and_set_bit() provides all necessary barriers */
347c5d326b2SVlad Buslov 	return test_and_set_bit(flag, &flow->flags);
348c5d326b2SVlad Buslov }
349c5d326b2SVlad Buslov 
350c5d326b2SVlad Buslov #define flow_flag_test_and_set(flow, flag)			\
351c5d326b2SVlad Buslov 	__flow_flag_test_and_set(flow,				\
352c5d326b2SVlad Buslov 				 MLX5E_TC_FLOW_FLAG_##flag)
353c5d326b2SVlad Buslov 
354226f2ca3SVlad Buslov static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
355226f2ca3SVlad Buslov {
356226f2ca3SVlad Buslov 	/* Complete all memory stores before clearing bit. */
357226f2ca3SVlad Buslov 	smp_mb__before_atomic();
358226f2ca3SVlad Buslov 	clear_bit(flag, &flow->flags);
359226f2ca3SVlad Buslov }
360226f2ca3SVlad Buslov 
361226f2ca3SVlad Buslov #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
362226f2ca3SVlad Buslov 						      MLX5E_TC_FLOW_FLAG_##flag)
363226f2ca3SVlad Buslov 
364226f2ca3SVlad Buslov static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
365226f2ca3SVlad Buslov {
366226f2ca3SVlad Buslov 	bool ret = test_bit(flag, &flow->flags);
367226f2ca3SVlad Buslov 
368226f2ca3SVlad Buslov 	/* Read fields of flow structure only after checking flags. */
369226f2ca3SVlad Buslov 	smp_mb__after_atomic();
370226f2ca3SVlad Buslov 	return ret;
371226f2ca3SVlad Buslov }
372226f2ca3SVlad Buslov 
373226f2ca3SVlad Buslov #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
374226f2ca3SVlad Buslov 						    MLX5E_TC_FLOW_FLAG_##flag)
375226f2ca3SVlad Buslov 
376226f2ca3SVlad Buslov static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
377226f2ca3SVlad Buslov {
378226f2ca3SVlad Buslov 	return flow_flag_test(flow, ESWITCH);
379226f2ca3SVlad Buslov }
380226f2ca3SVlad Buslov 
38184179981SPaul Blakey static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
38284179981SPaul Blakey {
38384179981SPaul Blakey 	return flow_flag_test(flow, FT);
38484179981SPaul Blakey }
38584179981SPaul Blakey 
386226f2ca3SVlad Buslov static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
387226f2ca3SVlad Buslov {
388226f2ca3SVlad Buslov 	return flow_flag_test(flow, OFFLOADED);
389226f2ca3SVlad Buslov }
390226f2ca3SVlad Buslov 
39111c9c548SOr Gerlitz static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
39211c9c548SOr Gerlitz {
39311c9c548SOr Gerlitz 	return jhash(key->actions,
39411c9c548SOr Gerlitz 		     key->num_actions * MLX5_MH_ACT_SZ, 0);
39511c9c548SOr Gerlitz }
39611c9c548SOr Gerlitz 
39711c9c548SOr Gerlitz static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
39811c9c548SOr Gerlitz 				   struct mod_hdr_key *b)
39911c9c548SOr Gerlitz {
40011c9c548SOr Gerlitz 	if (a->num_actions != b->num_actions)
40111c9c548SOr Gerlitz 		return 1;
40211c9c548SOr Gerlitz 
40311c9c548SOr Gerlitz 	return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
40411c9c548SOr Gerlitz }
40511c9c548SOr Gerlitz 
406dd58edc3SVlad Buslov static struct mod_hdr_tbl *
407dd58edc3SVlad Buslov get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
408dd58edc3SVlad Buslov {
409dd58edc3SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
410dd58edc3SVlad Buslov 
411dd58edc3SVlad Buslov 	return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
412dd58edc3SVlad Buslov 		&priv->fs.tc.mod_hdr;
413dd58edc3SVlad Buslov }
414dd58edc3SVlad Buslov 
415dd58edc3SVlad Buslov static struct mlx5e_mod_hdr_entry *
416dd58edc3SVlad Buslov mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
417dd58edc3SVlad Buslov {
418dd58edc3SVlad Buslov 	struct mlx5e_mod_hdr_entry *mh, *found = NULL;
419dd58edc3SVlad Buslov 
420dd58edc3SVlad Buslov 	hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
421dd58edc3SVlad Buslov 		if (!cmp_mod_hdr_info(&mh->key, key)) {
422dd58edc3SVlad Buslov 			refcount_inc(&mh->refcnt);
423dd58edc3SVlad Buslov 			found = mh;
424dd58edc3SVlad Buslov 			break;
425dd58edc3SVlad Buslov 		}
426dd58edc3SVlad Buslov 	}
427dd58edc3SVlad Buslov 
428dd58edc3SVlad Buslov 	return found;
429dd58edc3SVlad Buslov }
430dd58edc3SVlad Buslov 
431dd58edc3SVlad Buslov static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
432d2faae25SVlad Buslov 			      struct mlx5e_mod_hdr_entry *mh,
433d2faae25SVlad Buslov 			      int namespace)
434dd58edc3SVlad Buslov {
435d2faae25SVlad Buslov 	struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
436d2faae25SVlad Buslov 
437d2faae25SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
438dd58edc3SVlad Buslov 		return;
439d2faae25SVlad Buslov 	hash_del(&mh->mod_hdr_hlist);
440d2faae25SVlad Buslov 	mutex_unlock(&tbl->lock);
441dd58edc3SVlad Buslov 
442dd58edc3SVlad Buslov 	WARN_ON(!list_empty(&mh->flows));
443a734d007SVlad Buslov 	if (mh->compl_result > 0)
4442b688ea5SMaor Gottlieb 		mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
445d2faae25SVlad Buslov 
446dd58edc3SVlad Buslov 	kfree(mh);
447dd58edc3SVlad Buslov }
448dd58edc3SVlad Buslov 
449d2faae25SVlad Buslov static int get_flow_name_space(struct mlx5e_tc_flow *flow)
450d2faae25SVlad Buslov {
451d2faae25SVlad Buslov 	return mlx5e_is_eswitch_flow(flow) ?
452d2faae25SVlad Buslov 		MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
453d2faae25SVlad Buslov }
45411c9c548SOr Gerlitz static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
45511c9c548SOr Gerlitz 				struct mlx5e_tc_flow *flow,
45611c9c548SOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr)
45711c9c548SOr Gerlitz {
45811c9c548SOr Gerlitz 	int num_actions, actions_size, namespace, err;
45911c9c548SOr Gerlitz 	struct mlx5e_mod_hdr_entry *mh;
460dd58edc3SVlad Buslov 	struct mod_hdr_tbl *tbl;
46111c9c548SOr Gerlitz 	struct mod_hdr_key key;
46211c9c548SOr Gerlitz 	u32 hash_key;
46311c9c548SOr Gerlitz 
4646ae4a6a5SPaul Blakey 	num_actions  = parse_attr->mod_hdr_acts.num_actions;
46511c9c548SOr Gerlitz 	actions_size = MLX5_MH_ACT_SZ * num_actions;
46611c9c548SOr Gerlitz 
4676ae4a6a5SPaul Blakey 	key.actions = parse_attr->mod_hdr_acts.actions;
46811c9c548SOr Gerlitz 	key.num_actions = num_actions;
46911c9c548SOr Gerlitz 
47011c9c548SOr Gerlitz 	hash_key = hash_mod_hdr_info(&key);
47111c9c548SOr Gerlitz 
472d2faae25SVlad Buslov 	namespace = get_flow_name_space(flow);
473dd58edc3SVlad Buslov 	tbl = get_mod_hdr_table(priv, namespace);
47411c9c548SOr Gerlitz 
475d2faae25SVlad Buslov 	mutex_lock(&tbl->lock);
476dd58edc3SVlad Buslov 	mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
477a734d007SVlad Buslov 	if (mh) {
478a734d007SVlad Buslov 		mutex_unlock(&tbl->lock);
479a734d007SVlad Buslov 		wait_for_completion(&mh->res_ready);
480a734d007SVlad Buslov 
481a734d007SVlad Buslov 		if (mh->compl_result < 0) {
482a734d007SVlad Buslov 			err = -EREMOTEIO;
483a734d007SVlad Buslov 			goto attach_header_err;
484a734d007SVlad Buslov 		}
48511c9c548SOr Gerlitz 		goto attach_flow;
486a734d007SVlad Buslov 	}
48711c9c548SOr Gerlitz 
48811c9c548SOr Gerlitz 	mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
489d2faae25SVlad Buslov 	if (!mh) {
490a734d007SVlad Buslov 		mutex_unlock(&tbl->lock);
491a734d007SVlad Buslov 		return -ENOMEM;
492d2faae25SVlad Buslov 	}
49311c9c548SOr Gerlitz 
49411c9c548SOr Gerlitz 	mh->key.actions = (void *)mh + sizeof(*mh);
49511c9c548SOr Gerlitz 	memcpy(mh->key.actions, key.actions, actions_size);
49611c9c548SOr Gerlitz 	mh->key.num_actions = num_actions;
49783a52f0dSVlad Buslov 	spin_lock_init(&mh->flows_lock);
49811c9c548SOr Gerlitz 	INIT_LIST_HEAD(&mh->flows);
499dd58edc3SVlad Buslov 	refcount_set(&mh->refcnt, 1);
500a734d007SVlad Buslov 	init_completion(&mh->res_ready);
501a734d007SVlad Buslov 
502a734d007SVlad Buslov 	hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
503a734d007SVlad Buslov 	mutex_unlock(&tbl->lock);
50411c9c548SOr Gerlitz 
5052b688ea5SMaor Gottlieb 	mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace,
50611c9c548SOr Gerlitz 						  mh->key.num_actions,
5072b688ea5SMaor Gottlieb 						  mh->key.actions);
5082b688ea5SMaor Gottlieb 	if (IS_ERR(mh->modify_hdr)) {
5092b688ea5SMaor Gottlieb 		err = PTR_ERR(mh->modify_hdr);
510a734d007SVlad Buslov 		mh->compl_result = err;
511a734d007SVlad Buslov 		goto alloc_header_err;
512a734d007SVlad Buslov 	}
513a734d007SVlad Buslov 	mh->compl_result = 1;
514a734d007SVlad Buslov 	complete_all(&mh->res_ready);
51511c9c548SOr Gerlitz 
51611c9c548SOr Gerlitz attach_flow:
517dd58edc3SVlad Buslov 	flow->mh = mh;
51883a52f0dSVlad Buslov 	spin_lock(&mh->flows_lock);
51911c9c548SOr Gerlitz 	list_add(&flow->mod_hdr, &mh->flows);
52083a52f0dSVlad Buslov 	spin_unlock(&mh->flows_lock);
521d2faae25SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
5222b688ea5SMaor Gottlieb 		flow->esw_attr->modify_hdr = mh->modify_hdr;
52311c9c548SOr Gerlitz 	else
5242b688ea5SMaor Gottlieb 		flow->nic_attr->modify_hdr = mh->modify_hdr;
52511c9c548SOr Gerlitz 
52611c9c548SOr Gerlitz 	return 0;
52711c9c548SOr Gerlitz 
528a734d007SVlad Buslov alloc_header_err:
529a734d007SVlad Buslov 	complete_all(&mh->res_ready);
530a734d007SVlad Buslov attach_header_err:
531a734d007SVlad Buslov 	mlx5e_mod_hdr_put(priv, mh, namespace);
53211c9c548SOr Gerlitz 	return err;
53311c9c548SOr Gerlitz }
53411c9c548SOr Gerlitz 
53511c9c548SOr Gerlitz static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
53611c9c548SOr Gerlitz 				 struct mlx5e_tc_flow *flow)
53711c9c548SOr Gerlitz {
5385a7e5bcbSVlad Buslov 	/* flow wasn't fully initialized */
539dd58edc3SVlad Buslov 	if (!flow->mh)
5405a7e5bcbSVlad Buslov 		return;
5415a7e5bcbSVlad Buslov 
54283a52f0dSVlad Buslov 	spin_lock(&flow->mh->flows_lock);
54311c9c548SOr Gerlitz 	list_del(&flow->mod_hdr);
54483a52f0dSVlad Buslov 	spin_unlock(&flow->mh->flows_lock);
54511c9c548SOr Gerlitz 
546d2faae25SVlad Buslov 	mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
547dd58edc3SVlad Buslov 	flow->mh = NULL;
54811c9c548SOr Gerlitz }
54911c9c548SOr Gerlitz 
55077ab67b7SOr Gerlitz static
55177ab67b7SOr Gerlitz struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
55277ab67b7SOr Gerlitz {
55377ab67b7SOr Gerlitz 	struct net_device *netdev;
55477ab67b7SOr Gerlitz 	struct mlx5e_priv *priv;
55577ab67b7SOr Gerlitz 
55677ab67b7SOr Gerlitz 	netdev = __dev_get_by_index(net, ifindex);
55777ab67b7SOr Gerlitz 	priv = netdev_priv(netdev);
55877ab67b7SOr Gerlitz 	return priv->mdev;
55977ab67b7SOr Gerlitz }
56077ab67b7SOr Gerlitz 
56177ab67b7SOr Gerlitz static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
56277ab67b7SOr Gerlitz {
56377ab67b7SOr Gerlitz 	u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
56477ab67b7SOr Gerlitz 	void *tirc;
56577ab67b7SOr Gerlitz 	int err;
56677ab67b7SOr Gerlitz 
56777ab67b7SOr Gerlitz 	err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
56877ab67b7SOr Gerlitz 	if (err)
56977ab67b7SOr Gerlitz 		goto alloc_tdn_err;
57077ab67b7SOr Gerlitz 
57177ab67b7SOr Gerlitz 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
57277ab67b7SOr Gerlitz 
57377ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
574ddae74acSOr Gerlitz 	MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
57577ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
57677ab67b7SOr Gerlitz 
57777ab67b7SOr Gerlitz 	err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
57877ab67b7SOr Gerlitz 	if (err)
57977ab67b7SOr Gerlitz 		goto create_tir_err;
58077ab67b7SOr Gerlitz 
58177ab67b7SOr Gerlitz 	return 0;
58277ab67b7SOr Gerlitz 
58377ab67b7SOr Gerlitz create_tir_err:
58477ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
58577ab67b7SOr Gerlitz alloc_tdn_err:
58677ab67b7SOr Gerlitz 	return err;
58777ab67b7SOr Gerlitz }
58877ab67b7SOr Gerlitz 
58977ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
59077ab67b7SOr Gerlitz {
59177ab67b7SOr Gerlitz 	mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
59277ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
59377ab67b7SOr Gerlitz }
59477ab67b7SOr Gerlitz 
5953f6d08d1SOr Gerlitz static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
5963f6d08d1SOr Gerlitz {
5973f6d08d1SOr Gerlitz 	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
5983f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
5993f6d08d1SOr Gerlitz 	int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
6003f6d08d1SOr Gerlitz 
6013f6d08d1SOr Gerlitz 	mlx5e_build_default_indir_rqt(indirection_rqt, sz,
6023f6d08d1SOr Gerlitz 				      hp->num_channels);
6033f6d08d1SOr Gerlitz 
6043f6d08d1SOr Gerlitz 	for (i = 0; i < sz; i++) {
6053f6d08d1SOr Gerlitz 		ix = i;
606bbeb53b8SAya Levin 		if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
6073f6d08d1SOr Gerlitz 			ix = mlx5e_bits_invert(i, ilog2(sz));
6083f6d08d1SOr Gerlitz 		ix = indirection_rqt[ix];
6093f6d08d1SOr Gerlitz 		rqn = hp->pair->rqn[ix];
6103f6d08d1SOr Gerlitz 		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
6113f6d08d1SOr Gerlitz 	}
6123f6d08d1SOr Gerlitz }
6133f6d08d1SOr Gerlitz 
6143f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
6153f6d08d1SOr Gerlitz {
6163f6d08d1SOr Gerlitz 	int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
6173f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
6183f6d08d1SOr Gerlitz 	struct mlx5_core_dev *mdev = priv->mdev;
6193f6d08d1SOr Gerlitz 	void *rqtc;
6203f6d08d1SOr Gerlitz 	u32 *in;
6213f6d08d1SOr Gerlitz 
6223f6d08d1SOr Gerlitz 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
6233f6d08d1SOr Gerlitz 	in = kvzalloc(inlen, GFP_KERNEL);
6243f6d08d1SOr Gerlitz 	if (!in)
6253f6d08d1SOr Gerlitz 		return -ENOMEM;
6263f6d08d1SOr Gerlitz 
6273f6d08d1SOr Gerlitz 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
6283f6d08d1SOr Gerlitz 
6293f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
6303f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
6313f6d08d1SOr Gerlitz 
6323f6d08d1SOr Gerlitz 	mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
6333f6d08d1SOr Gerlitz 
6343f6d08d1SOr Gerlitz 	err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
6353f6d08d1SOr Gerlitz 	if (!err)
6363f6d08d1SOr Gerlitz 		hp->indir_rqt.enabled = true;
6373f6d08d1SOr Gerlitz 
6383f6d08d1SOr Gerlitz 	kvfree(in);
6393f6d08d1SOr Gerlitz 	return err;
6403f6d08d1SOr Gerlitz }
6413f6d08d1SOr Gerlitz 
6423f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
6433f6d08d1SOr Gerlitz {
6443f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
6453f6d08d1SOr Gerlitz 	u32 in[MLX5_ST_SZ_DW(create_tir_in)];
6463f6d08d1SOr Gerlitz 	int tt, i, err;
6473f6d08d1SOr Gerlitz 	void *tirc;
6483f6d08d1SOr Gerlitz 
6493f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
650d930ac79SAya Levin 		struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
651d930ac79SAya Levin 
6523f6d08d1SOr Gerlitz 		memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
6533f6d08d1SOr Gerlitz 		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6543f6d08d1SOr Gerlitz 
6553f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
6563f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
6573f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
658bbeb53b8SAya Levin 		mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
659bbeb53b8SAya Levin 
6603f6d08d1SOr Gerlitz 		err = mlx5_core_create_tir(hp->func_mdev, in,
6613f6d08d1SOr Gerlitz 					   MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
6623f6d08d1SOr Gerlitz 		if (err) {
6633f6d08d1SOr Gerlitz 			mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
6643f6d08d1SOr Gerlitz 			goto err_destroy_tirs;
6653f6d08d1SOr Gerlitz 		}
6663f6d08d1SOr Gerlitz 	}
6673f6d08d1SOr Gerlitz 	return 0;
6683f6d08d1SOr Gerlitz 
6693f6d08d1SOr Gerlitz err_destroy_tirs:
6703f6d08d1SOr Gerlitz 	for (i = 0; i < tt; i++)
6713f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
6723f6d08d1SOr Gerlitz 	return err;
6733f6d08d1SOr Gerlitz }
6743f6d08d1SOr Gerlitz 
6753f6d08d1SOr Gerlitz static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
6763f6d08d1SOr Gerlitz {
6773f6d08d1SOr Gerlitz 	int tt;
6783f6d08d1SOr Gerlitz 
6793f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
6803f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
6813f6d08d1SOr Gerlitz }
6823f6d08d1SOr Gerlitz 
6833f6d08d1SOr Gerlitz static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
6843f6d08d1SOr Gerlitz 					 struct ttc_params *ttc_params)
6853f6d08d1SOr Gerlitz {
6863f6d08d1SOr Gerlitz 	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
6873f6d08d1SOr Gerlitz 	int tt;
6883f6d08d1SOr Gerlitz 
6893f6d08d1SOr Gerlitz 	memset(ttc_params, 0, sizeof(*ttc_params));
6903f6d08d1SOr Gerlitz 
6913f6d08d1SOr Gerlitz 	ttc_params->any_tt_tirn = hp->tirn;
6923f6d08d1SOr Gerlitz 
6933f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
6943f6d08d1SOr Gerlitz 		ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
6953f6d08d1SOr Gerlitz 
6966412bb39SEli Cohen 	ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
6973f6d08d1SOr Gerlitz 	ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
6983f6d08d1SOr Gerlitz 	ft_attr->prio = MLX5E_TC_PRIO;
6993f6d08d1SOr Gerlitz }
7003f6d08d1SOr Gerlitz 
7013f6d08d1SOr Gerlitz static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
7023f6d08d1SOr Gerlitz {
7033f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
7043f6d08d1SOr Gerlitz 	struct ttc_params ttc_params;
7053f6d08d1SOr Gerlitz 	int err;
7063f6d08d1SOr Gerlitz 
7073f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_rqt(hp);
7083f6d08d1SOr Gerlitz 	if (err)
7093f6d08d1SOr Gerlitz 		return err;
7103f6d08d1SOr Gerlitz 
7113f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_tirs(hp);
7123f6d08d1SOr Gerlitz 	if (err)
7133f6d08d1SOr Gerlitz 		goto err_create_indirect_tirs;
7143f6d08d1SOr Gerlitz 
7153f6d08d1SOr Gerlitz 	mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
7163f6d08d1SOr Gerlitz 	err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
7173f6d08d1SOr Gerlitz 	if (err)
7183f6d08d1SOr Gerlitz 		goto err_create_ttc_table;
7193f6d08d1SOr Gerlitz 
7203f6d08d1SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
7213f6d08d1SOr Gerlitz 		   hp->num_channels, hp->ttc.ft.t->id);
7223f6d08d1SOr Gerlitz 
7233f6d08d1SOr Gerlitz 	return 0;
7243f6d08d1SOr Gerlitz 
7253f6d08d1SOr Gerlitz err_create_ttc_table:
7263f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
7273f6d08d1SOr Gerlitz err_create_indirect_tirs:
7283f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
7293f6d08d1SOr Gerlitz 
7303f6d08d1SOr Gerlitz 	return err;
7313f6d08d1SOr Gerlitz }
7323f6d08d1SOr Gerlitz 
7333f6d08d1SOr Gerlitz static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
7343f6d08d1SOr Gerlitz {
7353f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
7363f6d08d1SOr Gerlitz 
7373f6d08d1SOr Gerlitz 	mlx5e_destroy_ttc_table(priv, &hp->ttc);
7383f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
7393f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
7403f6d08d1SOr Gerlitz }
7413f6d08d1SOr Gerlitz 
74277ab67b7SOr Gerlitz static struct mlx5e_hairpin *
74377ab67b7SOr Gerlitz mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
74477ab67b7SOr Gerlitz 		     int peer_ifindex)
74577ab67b7SOr Gerlitz {
74677ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev, *peer_mdev;
74777ab67b7SOr Gerlitz 	struct mlx5e_hairpin *hp;
74877ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
74977ab67b7SOr Gerlitz 	int err;
75077ab67b7SOr Gerlitz 
75177ab67b7SOr Gerlitz 	hp = kzalloc(sizeof(*hp), GFP_KERNEL);
75277ab67b7SOr Gerlitz 	if (!hp)
75377ab67b7SOr Gerlitz 		return ERR_PTR(-ENOMEM);
75477ab67b7SOr Gerlitz 
75577ab67b7SOr Gerlitz 	func_mdev = priv->mdev;
75677ab67b7SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
75777ab67b7SOr Gerlitz 
75877ab67b7SOr Gerlitz 	pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
75977ab67b7SOr Gerlitz 	if (IS_ERR(pair)) {
76077ab67b7SOr Gerlitz 		err = PTR_ERR(pair);
76177ab67b7SOr Gerlitz 		goto create_pair_err;
76277ab67b7SOr Gerlitz 	}
76377ab67b7SOr Gerlitz 	hp->pair = pair;
76477ab67b7SOr Gerlitz 	hp->func_mdev = func_mdev;
7653f6d08d1SOr Gerlitz 	hp->func_priv = priv;
7663f6d08d1SOr Gerlitz 	hp->num_channels = params->num_channels;
76777ab67b7SOr Gerlitz 
76877ab67b7SOr Gerlitz 	err = mlx5e_hairpin_create_transport(hp);
76977ab67b7SOr Gerlitz 	if (err)
77077ab67b7SOr Gerlitz 		goto create_transport_err;
77177ab67b7SOr Gerlitz 
7723f6d08d1SOr Gerlitz 	if (hp->num_channels > 1) {
7733f6d08d1SOr Gerlitz 		err = mlx5e_hairpin_rss_init(hp);
7743f6d08d1SOr Gerlitz 		if (err)
7753f6d08d1SOr Gerlitz 			goto rss_init_err;
7763f6d08d1SOr Gerlitz 	}
7773f6d08d1SOr Gerlitz 
77877ab67b7SOr Gerlitz 	return hp;
77977ab67b7SOr Gerlitz 
7803f6d08d1SOr Gerlitz rss_init_err:
7813f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
78277ab67b7SOr Gerlitz create_transport_err:
78377ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
78477ab67b7SOr Gerlitz create_pair_err:
78577ab67b7SOr Gerlitz 	kfree(hp);
78677ab67b7SOr Gerlitz 	return ERR_PTR(err);
78777ab67b7SOr Gerlitz }
78877ab67b7SOr Gerlitz 
78977ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
79077ab67b7SOr Gerlitz {
7913f6d08d1SOr Gerlitz 	if (hp->num_channels > 1)
7923f6d08d1SOr Gerlitz 		mlx5e_hairpin_rss_cleanup(hp);
79377ab67b7SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
79477ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
79577ab67b7SOr Gerlitz 	kvfree(hp);
79677ab67b7SOr Gerlitz }
79777ab67b7SOr Gerlitz 
798106be53bSOr Gerlitz static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
799106be53bSOr Gerlitz {
800106be53bSOr Gerlitz 	return (peer_vhca_id << 16 | prio);
801106be53bSOr Gerlitz }
802106be53bSOr Gerlitz 
8035c65c564SOr Gerlitz static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
804106be53bSOr Gerlitz 						     u16 peer_vhca_id, u8 prio)
8055c65c564SOr Gerlitz {
8065c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
807106be53bSOr Gerlitz 	u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
8085c65c564SOr Gerlitz 
8095c65c564SOr Gerlitz 	hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
810106be53bSOr Gerlitz 			       hairpin_hlist, hash_key) {
811e4f9abbdSVlad Buslov 		if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
812e4f9abbdSVlad Buslov 			refcount_inc(&hpe->refcnt);
8135c65c564SOr Gerlitz 			return hpe;
8145c65c564SOr Gerlitz 		}
815e4f9abbdSVlad Buslov 	}
8165c65c564SOr Gerlitz 
8175c65c564SOr Gerlitz 	return NULL;
8185c65c564SOr Gerlitz }
8195c65c564SOr Gerlitz 
820e4f9abbdSVlad Buslov static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
821e4f9abbdSVlad Buslov 			      struct mlx5e_hairpin_entry *hpe)
822e4f9abbdSVlad Buslov {
823e4f9abbdSVlad Buslov 	/* no more hairpin flows for us, release the hairpin pair */
824b32accdaSVlad Buslov 	if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
825e4f9abbdSVlad Buslov 		return;
826b32accdaSVlad Buslov 	hash_del(&hpe->hairpin_hlist);
827b32accdaSVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
828e4f9abbdSVlad Buslov 
829db76ca24SVlad Buslov 	if (!IS_ERR_OR_NULL(hpe->hp)) {
830e4f9abbdSVlad Buslov 		netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
831e4f9abbdSVlad Buslov 			   dev_name(hpe->hp->pair->peer_mdev->device));
832e4f9abbdSVlad Buslov 
833e4f9abbdSVlad Buslov 		mlx5e_hairpin_destroy(hpe->hp);
834db76ca24SVlad Buslov 	}
835db76ca24SVlad Buslov 
836db76ca24SVlad Buslov 	WARN_ON(!list_empty(&hpe->flows));
837e4f9abbdSVlad Buslov 	kfree(hpe);
838e4f9abbdSVlad Buslov }
839e4f9abbdSVlad Buslov 
840106be53bSOr Gerlitz #define UNKNOWN_MATCH_PRIO 8
841106be53bSOr Gerlitz 
842106be53bSOr Gerlitz static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
843e98bedf5SEli Britstein 				  struct mlx5_flow_spec *spec, u8 *match_prio,
844e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
845106be53bSOr Gerlitz {
846106be53bSOr Gerlitz 	void *headers_c, *headers_v;
847106be53bSOr Gerlitz 	u8 prio_val, prio_mask = 0;
848106be53bSOr Gerlitz 	bool vlan_present;
849106be53bSOr Gerlitz 
850106be53bSOr Gerlitz #ifdef CONFIG_MLX5_CORE_EN_DCB
851106be53bSOr Gerlitz 	if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
852e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
853e98bedf5SEli Britstein 				   "only PCP trust state supported for hairpin");
854106be53bSOr Gerlitz 		return -EOPNOTSUPP;
855106be53bSOr Gerlitz 	}
856106be53bSOr Gerlitz #endif
857106be53bSOr Gerlitz 	headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
858106be53bSOr Gerlitz 	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
859106be53bSOr Gerlitz 
860106be53bSOr Gerlitz 	vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
861106be53bSOr Gerlitz 	if (vlan_present) {
862106be53bSOr Gerlitz 		prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
863106be53bSOr Gerlitz 		prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
864106be53bSOr Gerlitz 	}
865106be53bSOr Gerlitz 
866106be53bSOr Gerlitz 	if (!vlan_present || !prio_mask) {
867106be53bSOr Gerlitz 		prio_val = UNKNOWN_MATCH_PRIO;
868106be53bSOr Gerlitz 	} else if (prio_mask != 0x7) {
869e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
870e98bedf5SEli Britstein 				   "masked priority match not supported for hairpin");
871106be53bSOr Gerlitz 		return -EOPNOTSUPP;
872106be53bSOr Gerlitz 	}
873106be53bSOr Gerlitz 
874106be53bSOr Gerlitz 	*match_prio = prio_val;
875106be53bSOr Gerlitz 	return 0;
876106be53bSOr Gerlitz }
877106be53bSOr Gerlitz 
8785c65c564SOr Gerlitz static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
8795c65c564SOr Gerlitz 				  struct mlx5e_tc_flow *flow,
880e98bedf5SEli Britstein 				  struct mlx5e_tc_flow_parse_attr *parse_attr,
881e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
8825c65c564SOr Gerlitz {
88398b66cb1SEli Britstein 	int peer_ifindex = parse_attr->mirred_ifindex[0];
8845c65c564SOr Gerlitz 	struct mlx5_hairpin_params params;
885d8822868SOr Gerlitz 	struct mlx5_core_dev *peer_mdev;
8865c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
8875c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
8883f6d08d1SOr Gerlitz 	u64 link_speed64;
8893f6d08d1SOr Gerlitz 	u32 link_speed;
890106be53bSOr Gerlitz 	u8 match_prio;
891d8822868SOr Gerlitz 	u16 peer_id;
8925c65c564SOr Gerlitz 	int err;
8935c65c564SOr Gerlitz 
894d8822868SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
895d8822868SOr Gerlitz 	if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
896e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
8975c65c564SOr Gerlitz 		return -EOPNOTSUPP;
8985c65c564SOr Gerlitz 	}
8995c65c564SOr Gerlitz 
900d8822868SOr Gerlitz 	peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
901e98bedf5SEli Britstein 	err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
902e98bedf5SEli Britstein 				     extack);
903106be53bSOr Gerlitz 	if (err)
904106be53bSOr Gerlitz 		return err;
905b32accdaSVlad Buslov 
906b32accdaSVlad Buslov 	mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
907106be53bSOr Gerlitz 	hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
908db76ca24SVlad Buslov 	if (hpe) {
909db76ca24SVlad Buslov 		mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
910db76ca24SVlad Buslov 		wait_for_completion(&hpe->res_ready);
911db76ca24SVlad Buslov 
912db76ca24SVlad Buslov 		if (IS_ERR(hpe->hp)) {
913db76ca24SVlad Buslov 			err = -EREMOTEIO;
914db76ca24SVlad Buslov 			goto out_err;
915db76ca24SVlad Buslov 		}
9165c65c564SOr Gerlitz 		goto attach_flow;
917db76ca24SVlad Buslov 	}
9185c65c564SOr Gerlitz 
9195c65c564SOr Gerlitz 	hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
920b32accdaSVlad Buslov 	if (!hpe) {
921db76ca24SVlad Buslov 		mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
922db76ca24SVlad Buslov 		return -ENOMEM;
923b32accdaSVlad Buslov 	}
9245c65c564SOr Gerlitz 
92573edca73SVlad Buslov 	spin_lock_init(&hpe->flows_lock);
9265c65c564SOr Gerlitz 	INIT_LIST_HEAD(&hpe->flows);
927db76ca24SVlad Buslov 	INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
928d8822868SOr Gerlitz 	hpe->peer_vhca_id = peer_id;
929106be53bSOr Gerlitz 	hpe->prio = match_prio;
930e4f9abbdSVlad Buslov 	refcount_set(&hpe->refcnt, 1);
931db76ca24SVlad Buslov 	init_completion(&hpe->res_ready);
932db76ca24SVlad Buslov 
933db76ca24SVlad Buslov 	hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
934db76ca24SVlad Buslov 		 hash_hairpin_info(peer_id, match_prio));
935db76ca24SVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
9365c65c564SOr Gerlitz 
9375c65c564SOr Gerlitz 	params.log_data_size = 15;
9385c65c564SOr Gerlitz 	params.log_data_size = min_t(u8, params.log_data_size,
9395c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
9405c65c564SOr Gerlitz 	params.log_data_size = max_t(u8, params.log_data_size,
9415c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
9425c65c564SOr Gerlitz 
943eb9180f7SOr Gerlitz 	params.log_num_packets = params.log_data_size -
944eb9180f7SOr Gerlitz 				 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
945eb9180f7SOr Gerlitz 	params.log_num_packets = min_t(u8, params.log_num_packets,
946eb9180f7SOr Gerlitz 				       MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
947eb9180f7SOr Gerlitz 
948eb9180f7SOr Gerlitz 	params.q_counter = priv->q_counter;
9493f6d08d1SOr Gerlitz 	/* set hairpin pair per each 50Gbs share of the link */
9502c81bfd5SHuy Nguyen 	mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
9513f6d08d1SOr Gerlitz 	link_speed = max_t(u32, link_speed, 50000);
9523f6d08d1SOr Gerlitz 	link_speed64 = link_speed;
9533f6d08d1SOr Gerlitz 	do_div(link_speed64, 50000);
9543f6d08d1SOr Gerlitz 	params.num_channels = link_speed64;
9553f6d08d1SOr Gerlitz 
9565c65c564SOr Gerlitz 	hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
957db76ca24SVlad Buslov 	hpe->hp = hp;
958db76ca24SVlad Buslov 	complete_all(&hpe->res_ready);
9595c65c564SOr Gerlitz 	if (IS_ERR(hp)) {
9605c65c564SOr Gerlitz 		err = PTR_ERR(hp);
961db76ca24SVlad Buslov 		goto out_err;
9625c65c564SOr Gerlitz 	}
9635c65c564SOr Gerlitz 
964eb9180f7SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
96527b942fbSParav Pandit 		   hp->tirn, hp->pair->rqn[0],
96627b942fbSParav Pandit 		   dev_name(hp->pair->peer_mdev->device),
967eb9180f7SOr Gerlitz 		   hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
9685c65c564SOr Gerlitz 
9695c65c564SOr Gerlitz attach_flow:
9703f6d08d1SOr Gerlitz 	if (hpe->hp->num_channels > 1) {
971226f2ca3SVlad Buslov 		flow_flag_set(flow, HAIRPIN_RSS);
9723f6d08d1SOr Gerlitz 		flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
9733f6d08d1SOr Gerlitz 	} else {
9745c65c564SOr Gerlitz 		flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
9753f6d08d1SOr Gerlitz 	}
976b32accdaSVlad Buslov 
977e4f9abbdSVlad Buslov 	flow->hpe = hpe;
97873edca73SVlad Buslov 	spin_lock(&hpe->flows_lock);
9795c65c564SOr Gerlitz 	list_add(&flow->hairpin, &hpe->flows);
98073edca73SVlad Buslov 	spin_unlock(&hpe->flows_lock);
9813f6d08d1SOr Gerlitz 
9825c65c564SOr Gerlitz 	return 0;
9835c65c564SOr Gerlitz 
984db76ca24SVlad Buslov out_err:
985db76ca24SVlad Buslov 	mlx5e_hairpin_put(priv, hpe);
9865c65c564SOr Gerlitz 	return err;
9875c65c564SOr Gerlitz }
9885c65c564SOr Gerlitz 
9895c65c564SOr Gerlitz static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
9905c65c564SOr Gerlitz 				   struct mlx5e_tc_flow *flow)
9915c65c564SOr Gerlitz {
9925a7e5bcbSVlad Buslov 	/* flow wasn't fully initialized */
993e4f9abbdSVlad Buslov 	if (!flow->hpe)
9945a7e5bcbSVlad Buslov 		return;
9955a7e5bcbSVlad Buslov 
99673edca73SVlad Buslov 	spin_lock(&flow->hpe->flows_lock);
9975c65c564SOr Gerlitz 	list_del(&flow->hairpin);
99873edca73SVlad Buslov 	spin_unlock(&flow->hpe->flows_lock);
99973edca73SVlad Buslov 
1000e4f9abbdSVlad Buslov 	mlx5e_hairpin_put(priv, flow->hpe);
1001e4f9abbdSVlad Buslov 	flow->hpe = NULL;
10025c65c564SOr Gerlitz }
10035c65c564SOr Gerlitz 
1004c83954abSRabie Loulou static int
100574491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
100617091853SOr Gerlitz 		      struct mlx5e_tc_flow_parse_attr *parse_attr,
1007e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
1008e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
1009e8f887acSAmir Vadai {
1010bb0ee7dcSJianbo Liu 	struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
1011aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1012aad7e08dSAmir Vadai 	struct mlx5_core_dev *dev = priv->mdev;
10135c65c564SOr Gerlitz 	struct mlx5_flow_destination dest[2] = {};
101466958ed9SHadar Hen Zion 	struct mlx5_flow_act flow_act = {
10153bc4b7bfSOr Gerlitz 		.action = attr->action,
1016bb0ee7dcSJianbo Liu 		.flags    = FLOW_ACT_NO_APPEND,
101766958ed9SHadar Hen Zion 	};
1018aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
10195c65c564SOr Gerlitz 	int err, dest_ix = 0;
1020e8f887acSAmir Vadai 
1021bb0ee7dcSJianbo Liu 	flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1022bb0ee7dcSJianbo Liu 	flow_context->flow_tag = attr->flow_tag;
1023bb0ee7dcSJianbo Liu 
1024226f2ca3SVlad Buslov 	if (flow_flag_test(flow, HAIRPIN)) {
1025e98bedf5SEli Britstein 		err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
10265a7e5bcbSVlad Buslov 		if (err)
10275a7e5bcbSVlad Buslov 			return err;
10285a7e5bcbSVlad Buslov 
1029226f2ca3SVlad Buslov 		if (flow_flag_test(flow, HAIRPIN_RSS)) {
10303f6d08d1SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
10313f6d08d1SOr Gerlitz 			dest[dest_ix].ft = attr->hairpin_ft;
10323f6d08d1SOr Gerlitz 		} else {
10335c65c564SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
10345c65c564SOr Gerlitz 			dest[dest_ix].tir_num = attr->hairpin_tirn;
10353f6d08d1SOr Gerlitz 		}
10363f6d08d1SOr Gerlitz 		dest_ix++;
10373f6d08d1SOr Gerlitz 	} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
10385c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
10395c65c564SOr Gerlitz 		dest[dest_ix].ft = priv->fs.vlan.ft.t;
10405c65c564SOr Gerlitz 		dest_ix++;
10415c65c564SOr Gerlitz 	}
1042aad7e08dSAmir Vadai 
10435c65c564SOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
10445c65c564SOr Gerlitz 		counter = mlx5_fc_create(dev, true);
10455a7e5bcbSVlad Buslov 		if (IS_ERR(counter))
10465a7e5bcbSVlad Buslov 			return PTR_ERR(counter);
10475a7e5bcbSVlad Buslov 
10485c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1049171c7625SMark Bloch 		dest[dest_ix].counter_id = mlx5_fc_id(counter);
10505c65c564SOr Gerlitz 		dest_ix++;
1051b8aee822SMark Bloch 		attr->counter = counter;
1052aad7e08dSAmir Vadai 	}
1053aad7e08dSAmir Vadai 
10542f4fe4caSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
10553099eb5aSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
10562b688ea5SMaor Gottlieb 		flow_act.modify_hdr = attr->modify_hdr;
10576ae4a6a5SPaul Blakey 		dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1058c83954abSRabie Loulou 		if (err)
10595a7e5bcbSVlad Buslov 			return err;
10602f4fe4caSOr Gerlitz 	}
10612f4fe4caSOr Gerlitz 
1062b6fac0b4SVlad Buslov 	mutex_lock(&priv->fs.tc.t_lock);
1063acff797cSMaor Gottlieb 	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
106461dc7b01SPaul Blakey 		struct mlx5_flow_table_attr ft_attr = {};
106561dc7b01SPaul Blakey 		int tc_grp_size, tc_tbl_size, tc_num_grps;
106621b9c144SOr Gerlitz 		u32 max_flow_counter;
106721b9c144SOr Gerlitz 
106821b9c144SOr Gerlitz 		max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
106921b9c144SOr Gerlitz 				    MLX5_CAP_GEN(dev, max_flow_counter_15_0);
107021b9c144SOr Gerlitz 
107121b9c144SOr Gerlitz 		tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
107221b9c144SOr Gerlitz 
107321b9c144SOr Gerlitz 		tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
107421b9c144SOr Gerlitz 				    BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
107561dc7b01SPaul Blakey 		tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
107621b9c144SOr Gerlitz 
107761dc7b01SPaul Blakey 		ft_attr.prio = MLX5E_TC_PRIO;
107861dc7b01SPaul Blakey 		ft_attr.max_fte = tc_tbl_size;
107961dc7b01SPaul Blakey 		ft_attr.level = MLX5E_TC_FT_LEVEL;
108061dc7b01SPaul Blakey 		ft_attr.autogroup.max_num_groups = tc_num_grps;
1081acff797cSMaor Gottlieb 		priv->fs.tc.t =
1082acff797cSMaor Gottlieb 			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
108361dc7b01SPaul Blakey 							    &ft_attr);
1084acff797cSMaor Gottlieb 		if (IS_ERR(priv->fs.tc.t)) {
1085b6fac0b4SVlad Buslov 			mutex_unlock(&priv->fs.tc.t_lock);
1086e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
1087e98bedf5SEli Britstein 					   "Failed to create tc offload table\n");
1088e8f887acSAmir Vadai 			netdev_err(priv->netdev,
1089e8f887acSAmir Vadai 				   "Failed to create tc offload table\n");
10905a7e5bcbSVlad Buslov 			return PTR_ERR(priv->fs.tc.t);
1091e8f887acSAmir Vadai 		}
1092e8f887acSAmir Vadai 	}
1093e8f887acSAmir Vadai 
109438aa51c1SOr Gerlitz 	if (attr->match_level != MLX5_MATCH_NONE)
1095d4a18e16SYevgeny Kliteynik 		parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
109638aa51c1SOr Gerlitz 
1097c83954abSRabie Loulou 	flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
10985c65c564SOr Gerlitz 					    &flow_act, dest, dest_ix);
1099b6fac0b4SVlad Buslov 	mutex_unlock(&priv->fs.tc.t_lock);
1100e8f887acSAmir Vadai 
1101a2b7189bSzhong jiang 	return PTR_ERR_OR_ZERO(flow->rule[0]);
1102e8f887acSAmir Vadai }
1103e8f887acSAmir Vadai 
1104d85cdccbSOr Gerlitz static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1105d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
1106d85cdccbSOr Gerlitz {
1107513f8f7fSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1108d85cdccbSOr Gerlitz 	struct mlx5_fc *counter = NULL;
1109d85cdccbSOr Gerlitz 
1110b8aee822SMark Bloch 	counter = attr->counter;
11115a7e5bcbSVlad Buslov 	if (!IS_ERR_OR_NULL(flow->rule[0]))
1112e4ad91f2SChris Mi 		mlx5_del_flow_rules(flow->rule[0]);
1113d85cdccbSOr Gerlitz 	mlx5_fc_destroy(priv->mdev, counter);
1114d85cdccbSOr Gerlitz 
1115b6fac0b4SVlad Buslov 	mutex_lock(&priv->fs.tc.t_lock);
1116226f2ca3SVlad Buslov 	if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
1117d85cdccbSOr Gerlitz 		mlx5_destroy_flow_table(priv->fs.tc.t);
1118d85cdccbSOr Gerlitz 		priv->fs.tc.t = NULL;
1119d85cdccbSOr Gerlitz 	}
1120b6fac0b4SVlad Buslov 	mutex_unlock(&priv->fs.tc.t_lock);
11212f4fe4caSOr Gerlitz 
1122513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
11233099eb5aSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
11245c65c564SOr Gerlitz 
1125226f2ca3SVlad Buslov 	if (flow_flag_test(flow, HAIRPIN))
11265c65c564SOr Gerlitz 		mlx5e_hairpin_flow_del(priv, flow);
1127d85cdccbSOr Gerlitz }
1128d85cdccbSOr Gerlitz 
1129aa0cbbaeSOr Gerlitz static void mlx5e_detach_encap(struct mlx5e_priv *priv,
11308c4dc42bSEli Britstein 			       struct mlx5e_tc_flow *flow, int out_index);
1131aa0cbbaeSOr Gerlitz 
11323c37745eSOr Gerlitz static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1133e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
1134733d4f36SRoi Dayan 			      struct net_device *mirred_dev,
1135733d4f36SRoi Dayan 			      int out_index,
11368c4dc42bSEli Britstein 			      struct netlink_ext_ack *extack,
11370ad060eeSRoi Dayan 			      struct net_device **encap_dev,
11380ad060eeSRoi Dayan 			      bool *encap_valid);
11393c37745eSOr Gerlitz 
11406d2a3ed0SOr Gerlitz static struct mlx5_flow_handle *
11416d2a3ed0SOr Gerlitz mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
11426d2a3ed0SOr Gerlitz 			   struct mlx5e_tc_flow *flow,
11436d2a3ed0SOr Gerlitz 			   struct mlx5_flow_spec *spec,
11446d2a3ed0SOr Gerlitz 			   struct mlx5_esw_flow_attr *attr)
11456d2a3ed0SOr Gerlitz {
11466d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
11476d2a3ed0SOr Gerlitz 
11486d2a3ed0SOr Gerlitz 	rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
11496d2a3ed0SOr Gerlitz 	if (IS_ERR(rule))
11506d2a3ed0SOr Gerlitz 		return rule;
11516d2a3ed0SOr Gerlitz 
1152e85e02baSEli Britstein 	if (attr->split_count) {
11536d2a3ed0SOr Gerlitz 		flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
11546d2a3ed0SOr Gerlitz 		if (IS_ERR(flow->rule[1])) {
11556d2a3ed0SOr Gerlitz 			mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
11566d2a3ed0SOr Gerlitz 			return flow->rule[1];
11576d2a3ed0SOr Gerlitz 		}
11586d2a3ed0SOr Gerlitz 	}
11596d2a3ed0SOr Gerlitz 
11606d2a3ed0SOr Gerlitz 	return rule;
11616d2a3ed0SOr Gerlitz }
11626d2a3ed0SOr Gerlitz 
11636d2a3ed0SOr Gerlitz static void
11646d2a3ed0SOr Gerlitz mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
11656d2a3ed0SOr Gerlitz 			     struct mlx5e_tc_flow *flow,
11666d2a3ed0SOr Gerlitz 			   struct mlx5_esw_flow_attr *attr)
11676d2a3ed0SOr Gerlitz {
1168226f2ca3SVlad Buslov 	flow_flag_clear(flow, OFFLOADED);
11696d2a3ed0SOr Gerlitz 
1170e85e02baSEli Britstein 	if (attr->split_count)
11716d2a3ed0SOr Gerlitz 		mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
11726d2a3ed0SOr Gerlitz 
11736d2a3ed0SOr Gerlitz 	mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
11746d2a3ed0SOr Gerlitz }
11756d2a3ed0SOr Gerlitz 
11765dbe906fSPaul Blakey static struct mlx5_flow_handle *
11775dbe906fSPaul Blakey mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
11785dbe906fSPaul Blakey 			      struct mlx5e_tc_flow *flow,
11795dbe906fSPaul Blakey 			      struct mlx5_flow_spec *spec,
11805dbe906fSPaul Blakey 			      struct mlx5_esw_flow_attr *slow_attr)
11815dbe906fSPaul Blakey {
11825dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
11835dbe906fSPaul Blakey 
11845dbe906fSPaul Blakey 	memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
1185154e62abSOr Gerlitz 	slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
11862be09de7SDavid S. Miller 	slow_attr->split_count = 0;
118739ac237cSPaul Blakey 	slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
11885dbe906fSPaul Blakey 
11895dbe906fSPaul Blakey 	rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
11905dbe906fSPaul Blakey 	if (!IS_ERR(rule))
1191226f2ca3SVlad Buslov 		flow_flag_set(flow, SLOW);
11925dbe906fSPaul Blakey 
11935dbe906fSPaul Blakey 	return rule;
11945dbe906fSPaul Blakey }
11955dbe906fSPaul Blakey 
11965dbe906fSPaul Blakey static void
11975dbe906fSPaul Blakey mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
11985dbe906fSPaul Blakey 				  struct mlx5e_tc_flow *flow,
11995dbe906fSPaul Blakey 				  struct mlx5_esw_flow_attr *slow_attr)
12005dbe906fSPaul Blakey {
12015dbe906fSPaul Blakey 	memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
1202154e62abSOr Gerlitz 	slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
12032be09de7SDavid S. Miller 	slow_attr->split_count = 0;
120439ac237cSPaul Blakey 	slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
12055dbe906fSPaul Blakey 	mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1206226f2ca3SVlad Buslov 	flow_flag_clear(flow, SLOW);
12075dbe906fSPaul Blakey }
12085dbe906fSPaul Blakey 
1209ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1210ad86755bSVlad Buslov  * function.
1211ad86755bSVlad Buslov  */
1212ad86755bSVlad Buslov static void unready_flow_add(struct mlx5e_tc_flow *flow,
1213ad86755bSVlad Buslov 			     struct list_head *unready_flows)
1214ad86755bSVlad Buslov {
1215ad86755bSVlad Buslov 	flow_flag_set(flow, NOT_READY);
1216ad86755bSVlad Buslov 	list_add_tail(&flow->unready, unready_flows);
1217ad86755bSVlad Buslov }
1218ad86755bSVlad Buslov 
1219ad86755bSVlad Buslov /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1220ad86755bSVlad Buslov  * function.
1221ad86755bSVlad Buslov  */
1222ad86755bSVlad Buslov static void unready_flow_del(struct mlx5e_tc_flow *flow)
1223ad86755bSVlad Buslov {
1224ad86755bSVlad Buslov 	list_del(&flow->unready);
1225ad86755bSVlad Buslov 	flow_flag_clear(flow, NOT_READY);
1226ad86755bSVlad Buslov }
1227ad86755bSVlad Buslov 
1228b4a23329SRoi Dayan static void add_unready_flow(struct mlx5e_tc_flow *flow)
1229b4a23329SRoi Dayan {
1230b4a23329SRoi Dayan 	struct mlx5_rep_uplink_priv *uplink_priv;
1231b4a23329SRoi Dayan 	struct mlx5e_rep_priv *rpriv;
1232b4a23329SRoi Dayan 	struct mlx5_eswitch *esw;
1233b4a23329SRoi Dayan 
1234b4a23329SRoi Dayan 	esw = flow->priv->mdev->priv.eswitch;
1235b4a23329SRoi Dayan 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1236b4a23329SRoi Dayan 	uplink_priv = &rpriv->uplink_priv;
1237b4a23329SRoi Dayan 
1238ad86755bSVlad Buslov 	mutex_lock(&uplink_priv->unready_flows_lock);
1239ad86755bSVlad Buslov 	unready_flow_add(flow, &uplink_priv->unready_flows);
1240ad86755bSVlad Buslov 	mutex_unlock(&uplink_priv->unready_flows_lock);
1241b4a23329SRoi Dayan }
1242b4a23329SRoi Dayan 
1243b4a23329SRoi Dayan static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1244b4a23329SRoi Dayan {
1245ad86755bSVlad Buslov 	struct mlx5_rep_uplink_priv *uplink_priv;
1246ad86755bSVlad Buslov 	struct mlx5e_rep_priv *rpriv;
1247ad86755bSVlad Buslov 	struct mlx5_eswitch *esw;
1248ad86755bSVlad Buslov 
1249ad86755bSVlad Buslov 	esw = flow->priv->mdev->priv.eswitch;
1250ad86755bSVlad Buslov 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1251ad86755bSVlad Buslov 	uplink_priv = &rpriv->uplink_priv;
1252ad86755bSVlad Buslov 
1253ad86755bSVlad Buslov 	mutex_lock(&uplink_priv->unready_flows_lock);
1254ad86755bSVlad Buslov 	unready_flow_del(flow);
1255ad86755bSVlad Buslov 	mutex_unlock(&uplink_priv->unready_flows_lock);
1256b4a23329SRoi Dayan }
1257b4a23329SRoi Dayan 
1258c83954abSRabie Loulou static int
125974491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1260e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
1261e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
1262adb4c123SOr Gerlitz {
1263adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1264aa0cbbaeSOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
12657040632dSTonghao Zhang 	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
12663c37745eSOr Gerlitz 	struct net_device *out_dev, *encap_dev = NULL;
1267b8aee822SMark Bloch 	struct mlx5_fc *counter = NULL;
12683c37745eSOr Gerlitz 	struct mlx5e_rep_priv *rpriv;
12693c37745eSOr Gerlitz 	struct mlx5e_priv *out_priv;
12700ad060eeSRoi Dayan 	bool encap_valid = true;
127139ac237cSPaul Blakey 	u32 max_prio, max_chain;
12720ad060eeSRoi Dayan 	int err = 0;
1273f493f155SEli Britstein 	int out_index;
12748b32580dSOr Gerlitz 
127539ac237cSPaul Blakey 	if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
1276d14f6f2aSOr Gerlitz 		NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
1277d14f6f2aSOr Gerlitz 		return -EOPNOTSUPP;
1278d14f6f2aSOr Gerlitz 	}
1279e52c2802SPaul Blakey 
128084179981SPaul Blakey 	/* We check chain range only for tc flows.
128184179981SPaul Blakey 	 * For ft flows, we checked attr->chain was originally 0 and set it to
128284179981SPaul Blakey 	 * FDB_FT_CHAIN which is outside tc range.
128384179981SPaul Blakey 	 * See mlx5e_rep_setup_ft_cb().
128484179981SPaul Blakey 	 */
128539ac237cSPaul Blakey 	max_chain = mlx5_esw_chains_get_chain_range(esw);
128684179981SPaul Blakey 	if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1287bf07aa73SPaul Blakey 		NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
12885a7e5bcbSVlad Buslov 		return -EOPNOTSUPP;
1289bf07aa73SPaul Blakey 	}
1290bf07aa73SPaul Blakey 
129139ac237cSPaul Blakey 	max_prio = mlx5_esw_chains_get_prio_range(esw);
1292bf07aa73SPaul Blakey 	if (attr->prio > max_prio) {
1293bf07aa73SPaul Blakey 		NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
12945a7e5bcbSVlad Buslov 		return -EOPNOTSUPP;
1295bf07aa73SPaul Blakey 	}
1296bf07aa73SPaul Blakey 
1297f493f155SEli Britstein 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
12988c4dc42bSEli Britstein 		int mirred_ifindex;
12998c4dc42bSEli Britstein 
1300f493f155SEli Britstein 		if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1301f493f155SEli Britstein 			continue;
1302f493f155SEli Britstein 
13037040632dSTonghao Zhang 		mirred_ifindex = parse_attr->mirred_ifindex[out_index];
13043c37745eSOr Gerlitz 		out_dev = __dev_get_by_index(dev_net(priv->netdev),
13058c4dc42bSEli Britstein 					     mirred_ifindex);
1306733d4f36SRoi Dayan 		err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
13070ad060eeSRoi Dayan 					 extack, &encap_dev, &encap_valid);
13080ad060eeSRoi Dayan 		if (err)
13095a7e5bcbSVlad Buslov 			return err;
13100ad060eeSRoi Dayan 
13113c37745eSOr Gerlitz 		out_priv = netdev_priv(encap_dev);
13123c37745eSOr Gerlitz 		rpriv = out_priv->ppriv;
13131cc26d74SEli Britstein 		attr->dests[out_index].rep = rpriv->rep;
13141cc26d74SEli Britstein 		attr->dests[out_index].mdev = out_priv->mdev;
13153c37745eSOr Gerlitz 	}
13163c37745eSOr Gerlitz 
13178b32580dSOr Gerlitz 	err = mlx5_eswitch_add_vlan_action(esw, attr);
1318c83954abSRabie Loulou 	if (err)
13195a7e5bcbSVlad Buslov 		return err;
1320adb4c123SOr Gerlitz 
1321d7e75a32SOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
13221a9527bbSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
13236ae4a6a5SPaul Blakey 		dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1324c83954abSRabie Loulou 		if (err)
13255a7e5bcbSVlad Buslov 			return err;
1326d7e75a32SOr Gerlitz 	}
1327d7e75a32SOr Gerlitz 
1328b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1329f9392795SShahar Klein 		counter = mlx5_fc_create(attr->counter_dev, true);
13305a7e5bcbSVlad Buslov 		if (IS_ERR(counter))
13315a7e5bcbSVlad Buslov 			return PTR_ERR(counter);
1332b8aee822SMark Bloch 
1333b8aee822SMark Bloch 		attr->counter = counter;
1334b8aee822SMark Bloch 	}
1335b8aee822SMark Bloch 
13360ad060eeSRoi Dayan 	/* we get here if one of the following takes place:
13370ad060eeSRoi Dayan 	 * (1) there's no error
13380ad060eeSRoi Dayan 	 * (2) there's an encap action and we don't have valid neigh
13393c37745eSOr Gerlitz 	 */
13400ad060eeSRoi Dayan 	if (!encap_valid) {
13415dbe906fSPaul Blakey 		/* continue with goto slow path rule instead */
13425dbe906fSPaul Blakey 		struct mlx5_esw_flow_attr slow_attr;
13435dbe906fSPaul Blakey 
13445dbe906fSPaul Blakey 		flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
13455dbe906fSPaul Blakey 	} else {
13466d2a3ed0SOr Gerlitz 		flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
13475dbe906fSPaul Blakey 	}
13485dbe906fSPaul Blakey 
13495a7e5bcbSVlad Buslov 	if (IS_ERR(flow->rule[0]))
13505a7e5bcbSVlad Buslov 		return PTR_ERR(flow->rule[0]);
1351226f2ca3SVlad Buslov 	else
1352226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1353c83954abSRabie Loulou 
13545dbe906fSPaul Blakey 	return 0;
1355aa0cbbaeSOr Gerlitz }
1356d85cdccbSOr Gerlitz 
13579272e3dfSYevgeny Kliteynik static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
13589272e3dfSYevgeny Kliteynik {
13599272e3dfSYevgeny Kliteynik 	struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
13609272e3dfSYevgeny Kliteynik 	void *headers_v = MLX5_ADDR_OF(fte_match_param,
13619272e3dfSYevgeny Kliteynik 				       spec->match_value,
13629272e3dfSYevgeny Kliteynik 				       misc_parameters_3);
13639272e3dfSYevgeny Kliteynik 	u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
13649272e3dfSYevgeny Kliteynik 					     headers_v,
13659272e3dfSYevgeny Kliteynik 					     geneve_tlv_option_0_data);
13669272e3dfSYevgeny Kliteynik 
13679272e3dfSYevgeny Kliteynik 	return !!geneve_tlv_opt_0_data;
13689272e3dfSYevgeny Kliteynik }
13699272e3dfSYevgeny Kliteynik 
1370d85cdccbSOr Gerlitz static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1371d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
1372d85cdccbSOr Gerlitz {
1373d85cdccbSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1374d7e75a32SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
13755dbe906fSPaul Blakey 	struct mlx5_esw_flow_attr slow_attr;
1376f493f155SEli Britstein 	int out_index;
1377d85cdccbSOr Gerlitz 
13780a7fcb78SPaul Blakey 	mlx5e_put_flow_tunnel_id(flow);
13790a7fcb78SPaul Blakey 
1380226f2ca3SVlad Buslov 	if (flow_flag_test(flow, NOT_READY)) {
1381b4a23329SRoi Dayan 		remove_unready_flow(flow);
1382ef06c9eeSRoi Dayan 		kvfree(attr->parse_attr);
1383ef06c9eeSRoi Dayan 		return;
1384ef06c9eeSRoi Dayan 	}
1385ef06c9eeSRoi Dayan 
1386226f2ca3SVlad Buslov 	if (mlx5e_is_offloaded_flow(flow)) {
1387226f2ca3SVlad Buslov 		if (flow_flag_test(flow, SLOW))
13885dbe906fSPaul Blakey 			mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
13895dbe906fSPaul Blakey 		else
13905dbe906fSPaul Blakey 			mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
13915dbe906fSPaul Blakey 	}
1392d85cdccbSOr Gerlitz 
13939272e3dfSYevgeny Kliteynik 	if (mlx5_flow_has_geneve_opt(flow))
13949272e3dfSYevgeny Kliteynik 		mlx5_geneve_tlv_option_del(priv->mdev->geneve);
13959272e3dfSYevgeny Kliteynik 
1396513f8f7fSOr Gerlitz 	mlx5_eswitch_del_vlan_action(esw, attr);
1397d85cdccbSOr Gerlitz 
1398f493f155SEli Britstein 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
13992a4b6526SVlad Buslov 		if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
14008c4dc42bSEli Britstein 			mlx5e_detach_encap(priv, flow, out_index);
14012a4b6526SVlad Buslov 			kfree(attr->parse_attr->tun_info[out_index]);
14022a4b6526SVlad Buslov 		}
1403f493f155SEli Britstein 	kvfree(attr->parse_attr);
1404d7e75a32SOr Gerlitz 
1405513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
14061a9527bbSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
1407b8aee822SMark Bloch 
1408b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1409f9392795SShahar Klein 		mlx5_fc_destroy(attr->counter_dev, attr->counter);
1410d85cdccbSOr Gerlitz }
1411d85cdccbSOr Gerlitz 
1412232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
14132a1f1768SVlad Buslov 			      struct mlx5e_encap_entry *e,
14142a1f1768SVlad Buslov 			      struct list_head *flow_list)
1415232c0013SHadar Hen Zion {
14163c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
14175dbe906fSPaul Blakey 	struct mlx5_esw_flow_attr slow_attr, *esw_attr;
14186d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
14196d2a3ed0SOr Gerlitz 	struct mlx5_flow_spec *spec;
1420232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1421232c0013SHadar Hen Zion 	int err;
1422232c0013SHadar Hen Zion 
14232b688ea5SMaor Gottlieb 	e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
142454c177caSOz Shlomo 						     e->reformat_type,
1425232c0013SHadar Hen Zion 						     e->encap_size, e->encap_header,
14262b688ea5SMaor Gottlieb 						     MLX5_FLOW_NAMESPACE_FDB);
14272b688ea5SMaor Gottlieb 	if (IS_ERR(e->pkt_reformat)) {
14282b688ea5SMaor Gottlieb 		mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
14292b688ea5SMaor Gottlieb 			       PTR_ERR(e->pkt_reformat));
1430232c0013SHadar Hen Zion 		return;
1431232c0013SHadar Hen Zion 	}
1432232c0013SHadar Hen Zion 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
1433f6dfb4c3SHadar Hen Zion 	mlx5e_rep_queue_neigh_stats_work(priv);
1434232c0013SHadar Hen Zion 
14352a1f1768SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
14368c4dc42bSEli Britstein 		bool all_flow_encaps_valid = true;
14378c4dc42bSEli Britstein 		int i;
14388c4dc42bSEli Britstein 
143995435ad7SVlad Buslov 		if (!mlx5e_is_offloaded_flow(flow))
144095435ad7SVlad Buslov 			continue;
14413c37745eSOr Gerlitz 		esw_attr = flow->esw_attr;
14426d2a3ed0SOr Gerlitz 		spec = &esw_attr->parse_attr->spec;
14436d2a3ed0SOr Gerlitz 
14442b688ea5SMaor Gottlieb 		esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
14452a1f1768SVlad Buslov 		esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
14468c4dc42bSEli Britstein 		/* Flow can be associated with multiple encap entries.
14478c4dc42bSEli Britstein 		 * Before offloading the flow verify that all of them have
14488c4dc42bSEli Britstein 		 * a valid neighbour.
14498c4dc42bSEli Britstein 		 */
14508c4dc42bSEli Britstein 		for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
14518c4dc42bSEli Britstein 			if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
14528c4dc42bSEli Britstein 				continue;
14538c4dc42bSEli Britstein 			if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
14548c4dc42bSEli Britstein 				all_flow_encaps_valid = false;
14558c4dc42bSEli Britstein 				break;
14568c4dc42bSEli Britstein 			}
14578c4dc42bSEli Britstein 		}
14588c4dc42bSEli Britstein 		/* Do not offload flows with unresolved neighbors */
14598c4dc42bSEli Britstein 		if (!all_flow_encaps_valid)
14602a1f1768SVlad Buslov 			continue;
14615dbe906fSPaul Blakey 		/* update from slow path rule to encap rule */
14626d2a3ed0SOr Gerlitz 		rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
14636d2a3ed0SOr Gerlitz 		if (IS_ERR(rule)) {
14646d2a3ed0SOr Gerlitz 			err = PTR_ERR(rule);
1465232c0013SHadar Hen Zion 			mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1466232c0013SHadar Hen Zion 				       err);
14672a1f1768SVlad Buslov 			continue;
1468232c0013SHadar Hen Zion 		}
14695dbe906fSPaul Blakey 
14705dbe906fSPaul Blakey 		mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
14716d2a3ed0SOr Gerlitz 		flow->rule[0] = rule;
1472226f2ca3SVlad Buslov 		/* was unset when slow path rule removed */
1473226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1474232c0013SHadar Hen Zion 	}
1475232c0013SHadar Hen Zion }
1476232c0013SHadar Hen Zion 
1477232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
14782a1f1768SVlad Buslov 			      struct mlx5e_encap_entry *e,
14792a1f1768SVlad Buslov 			      struct list_head *flow_list)
1480232c0013SHadar Hen Zion {
14813c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
14825dbe906fSPaul Blakey 	struct mlx5_esw_flow_attr slow_attr;
14835dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
14845dbe906fSPaul Blakey 	struct mlx5_flow_spec *spec;
1485232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
14865dbe906fSPaul Blakey 	int err;
1487232c0013SHadar Hen Zion 
14882a1f1768SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
148995435ad7SVlad Buslov 		if (!mlx5e_is_offloaded_flow(flow))
149095435ad7SVlad Buslov 			continue;
14915dbe906fSPaul Blakey 		spec = &flow->esw_attr->parse_attr->spec;
14925dbe906fSPaul Blakey 
14935dbe906fSPaul Blakey 		/* update from encap rule to slow path rule */
14945dbe906fSPaul Blakey 		rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
14958c4dc42bSEli Britstein 		/* mark the flow's encap dest as non-valid */
14962a1f1768SVlad Buslov 		flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
14975dbe906fSPaul Blakey 
14985dbe906fSPaul Blakey 		if (IS_ERR(rule)) {
14995dbe906fSPaul Blakey 			err = PTR_ERR(rule);
15005dbe906fSPaul Blakey 			mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
15015dbe906fSPaul Blakey 				       err);
15022a1f1768SVlad Buslov 			continue;
15035dbe906fSPaul Blakey 		}
15045dbe906fSPaul Blakey 
15056d2a3ed0SOr Gerlitz 		mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
15065dbe906fSPaul Blakey 		flow->rule[0] = rule;
1507226f2ca3SVlad Buslov 		/* was unset when fast path rule removed */
1508226f2ca3SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
1509232c0013SHadar Hen Zion 	}
1510232c0013SHadar Hen Zion 
151161c806daSOr Gerlitz 	/* we know that the encap is valid */
1512232c0013SHadar Hen Zion 	e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
15132b688ea5SMaor Gottlieb 	mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1514232c0013SHadar Hen Zion }
1515232c0013SHadar Hen Zion 
1516b8aee822SMark Bloch static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1517b8aee822SMark Bloch {
1518226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
1519b8aee822SMark Bloch 		return flow->esw_attr->counter;
1520b8aee822SMark Bloch 	else
1521b8aee822SMark Bloch 		return flow->nic_attr->counter;
1522b8aee822SMark Bloch }
1523b8aee822SMark Bloch 
15242a1f1768SVlad Buslov /* Takes reference to all flows attached to encap and adds the flows to
15252a1f1768SVlad Buslov  * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
15262a1f1768SVlad Buslov  */
15272a1f1768SVlad Buslov void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
15282a1f1768SVlad Buslov {
15292a1f1768SVlad Buslov 	struct encap_flow_item *efi;
15302a1f1768SVlad Buslov 	struct mlx5e_tc_flow *flow;
15312a1f1768SVlad Buslov 
15322a1f1768SVlad Buslov 	list_for_each_entry(efi, &e->flows, list) {
15332a1f1768SVlad Buslov 		flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
15342a1f1768SVlad Buslov 		if (IS_ERR(mlx5e_flow_get(flow)))
15352a1f1768SVlad Buslov 			continue;
153695435ad7SVlad Buslov 		wait_for_completion(&flow->init_done);
15372a1f1768SVlad Buslov 
15382a1f1768SVlad Buslov 		flow->tmp_efi_index = efi->index;
15392a1f1768SVlad Buslov 		list_add(&flow->tmp_list, flow_list);
15402a1f1768SVlad Buslov 	}
15412a1f1768SVlad Buslov }
15422a1f1768SVlad Buslov 
15436a06c2f7SVlad Buslov /* Iterate over tmp_list of flows attached to flow_list head. */
15442a1f1768SVlad Buslov void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
15456a06c2f7SVlad Buslov {
15466a06c2f7SVlad Buslov 	struct mlx5e_tc_flow *flow, *tmp;
15476a06c2f7SVlad Buslov 
15486a06c2f7SVlad Buslov 	list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
15496a06c2f7SVlad Buslov 		mlx5e_flow_put(priv, flow);
15506a06c2f7SVlad Buslov }
15516a06c2f7SVlad Buslov 
1552ac0d9176SVlad Buslov static struct mlx5e_encap_entry *
1553ac0d9176SVlad Buslov mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1554ac0d9176SVlad Buslov 			   struct mlx5e_encap_entry *e)
1555ac0d9176SVlad Buslov {
1556ac0d9176SVlad Buslov 	struct mlx5e_encap_entry *next = NULL;
1557ac0d9176SVlad Buslov 
1558ac0d9176SVlad Buslov retry:
1559ac0d9176SVlad Buslov 	rcu_read_lock();
1560ac0d9176SVlad Buslov 
1561ac0d9176SVlad Buslov 	/* find encap with non-zero reference counter value */
1562ac0d9176SVlad Buslov 	for (next = e ?
1563ac0d9176SVlad Buslov 		     list_next_or_null_rcu(&nhe->encap_list,
1564ac0d9176SVlad Buslov 					   &e->encap_list,
1565ac0d9176SVlad Buslov 					   struct mlx5e_encap_entry,
1566ac0d9176SVlad Buslov 					   encap_list) :
1567ac0d9176SVlad Buslov 		     list_first_or_null_rcu(&nhe->encap_list,
1568ac0d9176SVlad Buslov 					    struct mlx5e_encap_entry,
1569ac0d9176SVlad Buslov 					    encap_list);
1570ac0d9176SVlad Buslov 	     next;
1571ac0d9176SVlad Buslov 	     next = list_next_or_null_rcu(&nhe->encap_list,
1572ac0d9176SVlad Buslov 					  &next->encap_list,
1573ac0d9176SVlad Buslov 					  struct mlx5e_encap_entry,
1574ac0d9176SVlad Buslov 					  encap_list))
1575ac0d9176SVlad Buslov 		if (mlx5e_encap_take(next))
1576ac0d9176SVlad Buslov 			break;
1577ac0d9176SVlad Buslov 
1578ac0d9176SVlad Buslov 	rcu_read_unlock();
1579ac0d9176SVlad Buslov 
1580ac0d9176SVlad Buslov 	/* release starting encap */
1581ac0d9176SVlad Buslov 	if (e)
1582ac0d9176SVlad Buslov 		mlx5e_encap_put(netdev_priv(e->out_dev), e);
1583ac0d9176SVlad Buslov 	if (!next)
1584ac0d9176SVlad Buslov 		return next;
1585ac0d9176SVlad Buslov 
1586ac0d9176SVlad Buslov 	/* wait for encap to be fully initialized */
1587ac0d9176SVlad Buslov 	wait_for_completion(&next->res_ready);
1588ac0d9176SVlad Buslov 	/* continue searching if encap entry is not in valid state after completion */
1589ac0d9176SVlad Buslov 	if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1590ac0d9176SVlad Buslov 		e = next;
1591ac0d9176SVlad Buslov 		goto retry;
1592ac0d9176SVlad Buslov 	}
1593ac0d9176SVlad Buslov 
1594ac0d9176SVlad Buslov 	return next;
1595ac0d9176SVlad Buslov }
1596ac0d9176SVlad Buslov 
1597f6dfb4c3SHadar Hen Zion void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1598f6dfb4c3SHadar Hen Zion {
1599f6dfb4c3SHadar Hen Zion 	struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1600ac0d9176SVlad Buslov 	struct mlx5e_encap_entry *e = NULL;
1601f6dfb4c3SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1602f6dfb4c3SHadar Hen Zion 	struct mlx5_fc *counter;
1603f6dfb4c3SHadar Hen Zion 	struct neigh_table *tbl;
1604f6dfb4c3SHadar Hen Zion 	bool neigh_used = false;
1605f6dfb4c3SHadar Hen Zion 	struct neighbour *n;
160690bb7692SAriel Levkovich 	u64 lastuse;
1607f6dfb4c3SHadar Hen Zion 
1608f6dfb4c3SHadar Hen Zion 	if (m_neigh->family == AF_INET)
1609f6dfb4c3SHadar Hen Zion 		tbl = &arp_tbl;
1610f6dfb4c3SHadar Hen Zion #if IS_ENABLED(CONFIG_IPV6)
1611f6dfb4c3SHadar Hen Zion 	else if (m_neigh->family == AF_INET6)
16125cc3a8c6SSaeed Mahameed 		tbl = ipv6_stub->nd_tbl;
1613f6dfb4c3SHadar Hen Zion #endif
1614f6dfb4c3SHadar Hen Zion 	else
1615f6dfb4c3SHadar Hen Zion 		return;
1616f6dfb4c3SHadar Hen Zion 
1617ac0d9176SVlad Buslov 	/* mlx5e_get_next_valid_encap() releases previous encap before returning
1618ac0d9176SVlad Buslov 	 * next one.
1619ac0d9176SVlad Buslov 	 */
1620ac0d9176SVlad Buslov 	while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
16216a06c2f7SVlad Buslov 		struct mlx5e_priv *priv = netdev_priv(e->out_dev);
16225a7e5bcbSVlad Buslov 		struct encap_flow_item *efi, *tmp;
16236a06c2f7SVlad Buslov 		struct mlx5_eswitch *esw;
16246a06c2f7SVlad Buslov 		LIST_HEAD(flow_list);
1625948993f2SVlad Buslov 
16266a06c2f7SVlad Buslov 		esw = priv->mdev->priv.eswitch;
16276a06c2f7SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
16285a7e5bcbSVlad Buslov 		list_for_each_entry_safe(efi, tmp, &e->flows, list) {
162979baaec7SEli Britstein 			flow = container_of(efi, struct mlx5e_tc_flow,
163079baaec7SEli Britstein 					    encaps[efi->index]);
16315a7e5bcbSVlad Buslov 			if (IS_ERR(mlx5e_flow_get(flow)))
16325a7e5bcbSVlad Buslov 				continue;
16336a06c2f7SVlad Buslov 			list_add(&flow->tmp_list, &flow_list);
16345a7e5bcbSVlad Buslov 
1635226f2ca3SVlad Buslov 			if (mlx5e_is_offloaded_flow(flow)) {
1636b8aee822SMark Bloch 				counter = mlx5e_tc_get_counter(flow);
163790bb7692SAriel Levkovich 				lastuse = mlx5_fc_query_lastuse(counter);
1638f6dfb4c3SHadar Hen Zion 				if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1639f6dfb4c3SHadar Hen Zion 					neigh_used = true;
1640f6dfb4c3SHadar Hen Zion 					break;
1641f6dfb4c3SHadar Hen Zion 				}
1642f6dfb4c3SHadar Hen Zion 			}
1643f6dfb4c3SHadar Hen Zion 		}
16446a06c2f7SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
1645948993f2SVlad Buslov 
16466a06c2f7SVlad Buslov 		mlx5e_put_encap_flow_list(priv, &flow_list);
1647ac0d9176SVlad Buslov 		if (neigh_used) {
1648ac0d9176SVlad Buslov 			/* release current encap before breaking the loop */
16496a06c2f7SVlad Buslov 			mlx5e_encap_put(priv, e);
1650e36d4810SRoi Dayan 			break;
1651f6dfb4c3SHadar Hen Zion 		}
1652ac0d9176SVlad Buslov 	}
1653f6dfb4c3SHadar Hen Zion 
1654c786fe59SVlad Buslov 	trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
1655c786fe59SVlad Buslov 
1656f6dfb4c3SHadar Hen Zion 	if (neigh_used) {
1657f6dfb4c3SHadar Hen Zion 		nhe->reported_lastuse = jiffies;
1658f6dfb4c3SHadar Hen Zion 
1659f6dfb4c3SHadar Hen Zion 		/* find the relevant neigh according to the cached device and
1660f6dfb4c3SHadar Hen Zion 		 * dst ip pair
1661f6dfb4c3SHadar Hen Zion 		 */
1662f6dfb4c3SHadar Hen Zion 		n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1663c7f7ba8dSRoi Dayan 		if (!n)
1664f6dfb4c3SHadar Hen Zion 			return;
1665f6dfb4c3SHadar Hen Zion 
1666f6dfb4c3SHadar Hen Zion 		neigh_event_send(n, NULL);
1667f6dfb4c3SHadar Hen Zion 		neigh_release(n);
1668f6dfb4c3SHadar Hen Zion 	}
1669f6dfb4c3SHadar Hen Zion }
1670f6dfb4c3SHadar Hen Zion 
167161086f39SVlad Buslov static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1672d85cdccbSOr Gerlitz {
1673948993f2SVlad Buslov 	WARN_ON(!list_empty(&e->flows));
16743c140dd5SVlad Buslov 
16753c140dd5SVlad Buslov 	if (e->compl_result > 0) {
1676232c0013SHadar Hen Zion 		mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1677232c0013SHadar Hen Zion 
1678232c0013SHadar Hen Zion 		if (e->flags & MLX5_ENCAP_ENTRY_VALID)
16792b688ea5SMaor Gottlieb 			mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
16803c140dd5SVlad Buslov 	}
1681232c0013SHadar Hen Zion 
16822a4b6526SVlad Buslov 	kfree(e->tun_info);
1683232c0013SHadar Hen Zion 	kfree(e->encap_header);
1684ac0d9176SVlad Buslov 	kfree_rcu(e, rcu);
16855067b602SRoi Dayan }
1686948993f2SVlad Buslov 
168761086f39SVlad Buslov void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
168861086f39SVlad Buslov {
168961086f39SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
169061086f39SVlad Buslov 
169161086f39SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
169261086f39SVlad Buslov 		return;
169361086f39SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
169461086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
169561086f39SVlad Buslov 
169661086f39SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
169761086f39SVlad Buslov }
169861086f39SVlad Buslov 
1699948993f2SVlad Buslov static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1700948993f2SVlad Buslov 			       struct mlx5e_tc_flow *flow, int out_index)
1701948993f2SVlad Buslov {
170261086f39SVlad Buslov 	struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
170361086f39SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
170461086f39SVlad Buslov 
1705948993f2SVlad Buslov 	/* flow wasn't fully initialized */
170661086f39SVlad Buslov 	if (!e)
1707948993f2SVlad Buslov 		return;
1708948993f2SVlad Buslov 
170961086f39SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
1710948993f2SVlad Buslov 	list_del(&flow->encaps[out_index].list);
1711948993f2SVlad Buslov 	flow->encaps[out_index].e = NULL;
171261086f39SVlad Buslov 	if (!refcount_dec_and_test(&e->refcnt)) {
171361086f39SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
171461086f39SVlad Buslov 		return;
171561086f39SVlad Buslov 	}
171661086f39SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
171761086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
171861086f39SVlad Buslov 
171961086f39SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
17205067b602SRoi Dayan }
17215067b602SRoi Dayan 
172204de7ddaSRoi Dayan static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
172304de7ddaSRoi Dayan {
172404de7ddaSRoi Dayan 	struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
172504de7ddaSRoi Dayan 
1726226f2ca3SVlad Buslov 	if (!flow_flag_test(flow, ESWITCH) ||
1727226f2ca3SVlad Buslov 	    !flow_flag_test(flow, DUP))
172804de7ddaSRoi Dayan 		return;
172904de7ddaSRoi Dayan 
173004de7ddaSRoi Dayan 	mutex_lock(&esw->offloads.peer_mutex);
173104de7ddaSRoi Dayan 	list_del(&flow->peer);
173204de7ddaSRoi Dayan 	mutex_unlock(&esw->offloads.peer_mutex);
173304de7ddaSRoi Dayan 
1734226f2ca3SVlad Buslov 	flow_flag_clear(flow, DUP);
173504de7ddaSRoi Dayan 
1736eb252c3aSRoi Dayan 	if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
173704de7ddaSRoi Dayan 		mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1738a23dae79SRoi Dayan 		kfree(flow->peer_flow);
1739eb252c3aSRoi Dayan 	}
1740eb252c3aSRoi Dayan 
174104de7ddaSRoi Dayan 	flow->peer_flow = NULL;
174204de7ddaSRoi Dayan }
174304de7ddaSRoi Dayan 
174404de7ddaSRoi Dayan static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
174504de7ddaSRoi Dayan {
174604de7ddaSRoi Dayan 	struct mlx5_core_dev *dev = flow->priv->mdev;
174704de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = dev->priv.devcom;
174804de7ddaSRoi Dayan 	struct mlx5_eswitch *peer_esw;
174904de7ddaSRoi Dayan 
175004de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
175104de7ddaSRoi Dayan 	if (!peer_esw)
175204de7ddaSRoi Dayan 		return;
175304de7ddaSRoi Dayan 
175404de7ddaSRoi Dayan 	__mlx5e_tc_del_fdb_peer_flow(flow);
175504de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
175604de7ddaSRoi Dayan }
175704de7ddaSRoi Dayan 
1758e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1759961e8979SRoi Dayan 			      struct mlx5e_tc_flow *flow)
1760e8f887acSAmir Vadai {
1761226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow)) {
176204de7ddaSRoi Dayan 		mlx5e_tc_del_fdb_peer_flow(flow);
1763d85cdccbSOr Gerlitz 		mlx5e_tc_del_fdb_flow(priv, flow);
176404de7ddaSRoi Dayan 	} else {
1765d85cdccbSOr Gerlitz 		mlx5e_tc_del_nic_flow(priv, flow);
1766e8f887acSAmir Vadai 	}
176704de7ddaSRoi Dayan }
1768e8f887acSAmir Vadai 
17690a7fcb78SPaul Blakey static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1770bbd00f7eSHadar Hen Zion {
17710a7fcb78SPaul Blakey 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
17720a7fcb78SPaul Blakey 	struct flow_action *flow_action = &rule->action;
17730a7fcb78SPaul Blakey 	const struct flow_action_entry *act;
17740a7fcb78SPaul Blakey 	int i;
1775bbd00f7eSHadar Hen Zion 
17760a7fcb78SPaul Blakey 	flow_action_for_each(i, act, flow_action) {
17770a7fcb78SPaul Blakey 		switch (act->id) {
17780a7fcb78SPaul Blakey 		case FLOW_ACTION_GOTO:
17790a7fcb78SPaul Blakey 			return true;
17800a7fcb78SPaul Blakey 		default:
17810a7fcb78SPaul Blakey 			continue;
17820a7fcb78SPaul Blakey 		}
17830a7fcb78SPaul Blakey 	}
17840a7fcb78SPaul Blakey 
17850a7fcb78SPaul Blakey 	return false;
17860a7fcb78SPaul Blakey }
17870a7fcb78SPaul Blakey 
17880a7fcb78SPaul Blakey static int
17890a7fcb78SPaul Blakey enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
17900a7fcb78SPaul Blakey 				    struct flow_dissector_key_enc_opts *opts,
17910a7fcb78SPaul Blakey 				    struct netlink_ext_ack *extack,
17920a7fcb78SPaul Blakey 				    bool *dont_care)
17930a7fcb78SPaul Blakey {
17940a7fcb78SPaul Blakey 	struct geneve_opt *opt;
17950a7fcb78SPaul Blakey 	int off = 0;
17960a7fcb78SPaul Blakey 
17970a7fcb78SPaul Blakey 	*dont_care = true;
17980a7fcb78SPaul Blakey 
17990a7fcb78SPaul Blakey 	while (opts->len > off) {
18000a7fcb78SPaul Blakey 		opt = (struct geneve_opt *)&opts->data[off];
18010a7fcb78SPaul Blakey 
18020a7fcb78SPaul Blakey 		if (!(*dont_care) || opt->opt_class || opt->type ||
18030a7fcb78SPaul Blakey 		    memchr_inv(opt->opt_data, 0, opt->length * 4)) {
18040a7fcb78SPaul Blakey 			*dont_care = false;
18050a7fcb78SPaul Blakey 
18060a7fcb78SPaul Blakey 			if (opt->opt_class != U16_MAX ||
18070a7fcb78SPaul Blakey 			    opt->type != U8_MAX ||
18080a7fcb78SPaul Blakey 			    memchr_inv(opt->opt_data, 0xFF,
18090a7fcb78SPaul Blakey 				       opt->length * 4)) {
18100a7fcb78SPaul Blakey 				NL_SET_ERR_MSG(extack,
18110a7fcb78SPaul Blakey 					       "Partial match of tunnel options in chain > 0 isn't supported");
18120a7fcb78SPaul Blakey 				netdev_warn(priv->netdev,
18130a7fcb78SPaul Blakey 					    "Partial match of tunnel options in chain > 0 isn't supported");
18140a7fcb78SPaul Blakey 				return -EOPNOTSUPP;
18150a7fcb78SPaul Blakey 			}
18160a7fcb78SPaul Blakey 		}
18170a7fcb78SPaul Blakey 
18180a7fcb78SPaul Blakey 		off += sizeof(struct geneve_opt) + opt->length * 4;
1819bbd00f7eSHadar Hen Zion 	}
1820bbd00f7eSHadar Hen Zion 
1821bbd00f7eSHadar Hen Zion 	return 0;
1822bbd00f7eSHadar Hen Zion }
1823bbd00f7eSHadar Hen Zion 
18240a7fcb78SPaul Blakey #define COPY_DISSECTOR(rule, diss_key, dst)\
18250a7fcb78SPaul Blakey ({ \
18260a7fcb78SPaul Blakey 	struct flow_rule *__rule = (rule);\
18270a7fcb78SPaul Blakey 	typeof(dst) __dst = dst;\
18280a7fcb78SPaul Blakey \
18290a7fcb78SPaul Blakey 	memcpy(__dst,\
18300a7fcb78SPaul Blakey 	       skb_flow_dissector_target(__rule->match.dissector,\
18310a7fcb78SPaul Blakey 					 diss_key,\
18320a7fcb78SPaul Blakey 					 __rule->match.key),\
18330a7fcb78SPaul Blakey 	       sizeof(*__dst));\
18340a7fcb78SPaul Blakey })
18350a7fcb78SPaul Blakey 
18360a7fcb78SPaul Blakey static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
18370a7fcb78SPaul Blakey 				    struct mlx5e_tc_flow *flow,
18380a7fcb78SPaul Blakey 				    struct flow_cls_offload *f,
18390a7fcb78SPaul Blakey 				    struct net_device *filter_dev)
18408377629eSEli Britstein {
18410a7fcb78SPaul Blakey 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
18420a7fcb78SPaul Blakey 	struct netlink_ext_ack *extack = f->common.extack;
18430a7fcb78SPaul Blakey 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
18440a7fcb78SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
18450a7fcb78SPaul Blakey 	struct flow_match_enc_opts enc_opts_match;
18460a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
18470a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *uplink_rpriv;
18480a7fcb78SPaul Blakey 	struct tunnel_match_key tunnel_key;
18490a7fcb78SPaul Blakey 	bool enc_opts_is_dont_care = true;
18500a7fcb78SPaul Blakey 	u32 tun_id, enc_opts_id = 0;
18510a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw;
18520a7fcb78SPaul Blakey 	u32 value, mask;
18530a7fcb78SPaul Blakey 	int err;
18540a7fcb78SPaul Blakey 
18550a7fcb78SPaul Blakey 	esw = priv->mdev->priv.eswitch;
18560a7fcb78SPaul Blakey 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
18570a7fcb78SPaul Blakey 	uplink_priv = &uplink_rpriv->uplink_priv;
18580a7fcb78SPaul Blakey 
18590a7fcb78SPaul Blakey 	memset(&tunnel_key, 0, sizeof(tunnel_key));
18600a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
18610a7fcb78SPaul Blakey 		       &tunnel_key.enc_control);
18620a7fcb78SPaul Blakey 	if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
18630a7fcb78SPaul Blakey 		COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
18640a7fcb78SPaul Blakey 			       &tunnel_key.enc_ipv4);
18650a7fcb78SPaul Blakey 	else
18660a7fcb78SPaul Blakey 		COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
18670a7fcb78SPaul Blakey 			       &tunnel_key.enc_ipv6);
18680a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
18690a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
18700a7fcb78SPaul Blakey 		       &tunnel_key.enc_tp);
18710a7fcb78SPaul Blakey 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
18720a7fcb78SPaul Blakey 		       &tunnel_key.enc_key_id);
18730a7fcb78SPaul Blakey 	tunnel_key.filter_ifindex = filter_dev->ifindex;
18740a7fcb78SPaul Blakey 
18750a7fcb78SPaul Blakey 	err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
18760a7fcb78SPaul Blakey 	if (err)
18770a7fcb78SPaul Blakey 		return err;
18780a7fcb78SPaul Blakey 
18790a7fcb78SPaul Blakey 	flow_rule_match_enc_opts(rule, &enc_opts_match);
18800a7fcb78SPaul Blakey 	err = enc_opts_is_dont_care_or_full_match(priv,
18810a7fcb78SPaul Blakey 						  enc_opts_match.mask,
18820a7fcb78SPaul Blakey 						  extack,
18830a7fcb78SPaul Blakey 						  &enc_opts_is_dont_care);
18840a7fcb78SPaul Blakey 	if (err)
18850a7fcb78SPaul Blakey 		goto err_enc_opts;
18860a7fcb78SPaul Blakey 
18870a7fcb78SPaul Blakey 	if (!enc_opts_is_dont_care) {
18880a7fcb78SPaul Blakey 		err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
18890a7fcb78SPaul Blakey 				  enc_opts_match.key, &enc_opts_id);
18900a7fcb78SPaul Blakey 		if (err)
18910a7fcb78SPaul Blakey 			goto err_enc_opts;
18920a7fcb78SPaul Blakey 	}
18930a7fcb78SPaul Blakey 
18940a7fcb78SPaul Blakey 	value = tun_id << ENC_OPTS_BITS | enc_opts_id;
18950a7fcb78SPaul Blakey 	mask = enc_opts_id ? TUNNEL_ID_MASK :
18960a7fcb78SPaul Blakey 			     (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
18970a7fcb78SPaul Blakey 
18980a7fcb78SPaul Blakey 	if (attr->chain) {
18990a7fcb78SPaul Blakey 		mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
19000a7fcb78SPaul Blakey 					    TUNNEL_TO_REG, value, mask);
19010a7fcb78SPaul Blakey 	} else {
19020a7fcb78SPaul Blakey 		mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
19030a7fcb78SPaul Blakey 		err = mlx5e_tc_match_to_reg_set(priv->mdev,
19040a7fcb78SPaul Blakey 						mod_hdr_acts,
19050a7fcb78SPaul Blakey 						TUNNEL_TO_REG, value);
19060a7fcb78SPaul Blakey 		if (err)
19070a7fcb78SPaul Blakey 			goto err_set;
19080a7fcb78SPaul Blakey 
19090a7fcb78SPaul Blakey 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
19100a7fcb78SPaul Blakey 	}
19110a7fcb78SPaul Blakey 
19120a7fcb78SPaul Blakey 	flow->tunnel_id = value;
19130a7fcb78SPaul Blakey 	return 0;
19140a7fcb78SPaul Blakey 
19150a7fcb78SPaul Blakey err_set:
19160a7fcb78SPaul Blakey 	if (enc_opts_id)
19170a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
19180a7fcb78SPaul Blakey 			       enc_opts_id);
19190a7fcb78SPaul Blakey err_enc_opts:
19200a7fcb78SPaul Blakey 	mapping_remove(uplink_priv->tunnel_mapping, tun_id);
19210a7fcb78SPaul Blakey 	return err;
19220a7fcb78SPaul Blakey }
19230a7fcb78SPaul Blakey 
19240a7fcb78SPaul Blakey static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
19250a7fcb78SPaul Blakey {
19260a7fcb78SPaul Blakey 	u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
19270a7fcb78SPaul Blakey 	u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
19280a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
19290a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *uplink_rpriv;
19300a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw;
19310a7fcb78SPaul Blakey 
19320a7fcb78SPaul Blakey 	esw = flow->priv->mdev->priv.eswitch;
19330a7fcb78SPaul Blakey 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
19340a7fcb78SPaul Blakey 	uplink_priv = &uplink_rpriv->uplink_priv;
19350a7fcb78SPaul Blakey 
19360a7fcb78SPaul Blakey 	if (tun_id)
19370a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_mapping, tun_id);
19380a7fcb78SPaul Blakey 	if (enc_opts_id)
19390a7fcb78SPaul Blakey 		mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
19400a7fcb78SPaul Blakey 			       enc_opts_id);
19410a7fcb78SPaul Blakey }
19420a7fcb78SPaul Blakey 
19430a7fcb78SPaul Blakey static int parse_tunnel_attr(struct mlx5e_priv *priv,
19440a7fcb78SPaul Blakey 			     struct mlx5e_tc_flow *flow,
19450a7fcb78SPaul Blakey 			     struct mlx5_flow_spec *spec,
19460a7fcb78SPaul Blakey 			     struct flow_cls_offload *f,
19470a7fcb78SPaul Blakey 			     struct net_device *filter_dev,
19480a7fcb78SPaul Blakey 			     u8 *match_level,
19490a7fcb78SPaul Blakey 			     bool *match_inner)
19500a7fcb78SPaul Blakey {
19510a7fcb78SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
19520a7fcb78SPaul Blakey 	struct netlink_ext_ack *extack = f->common.extack;
19530a7fcb78SPaul Blakey 	bool needs_mapping, sets_mapping;
19540a7fcb78SPaul Blakey 	int err;
19550a7fcb78SPaul Blakey 
19560a7fcb78SPaul Blakey 	if (!mlx5e_is_eswitch_flow(flow))
19570a7fcb78SPaul Blakey 		return -EOPNOTSUPP;
19580a7fcb78SPaul Blakey 
19590a7fcb78SPaul Blakey 	needs_mapping = !!flow->esw_attr->chain;
19600a7fcb78SPaul Blakey 	sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
19610a7fcb78SPaul Blakey 	*match_inner = !needs_mapping;
19620a7fcb78SPaul Blakey 
19630a7fcb78SPaul Blakey 	if ((needs_mapping || sets_mapping) &&
19640a7fcb78SPaul Blakey 	    !mlx5_eswitch_vport_match_metadata_enabled(esw)) {
19650a7fcb78SPaul Blakey 		NL_SET_ERR_MSG(extack,
19660a7fcb78SPaul Blakey 			       "Chains on tunnel devices isn't supported without register metadata support");
19670a7fcb78SPaul Blakey 		netdev_warn(priv->netdev,
19680a7fcb78SPaul Blakey 			    "Chains on tunnel devices isn't supported without register metadata support");
19690a7fcb78SPaul Blakey 		return -EOPNOTSUPP;
19700a7fcb78SPaul Blakey 	}
19710a7fcb78SPaul Blakey 
19720a7fcb78SPaul Blakey 	if (!flow->esw_attr->chain) {
19730a7fcb78SPaul Blakey 		err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
19740a7fcb78SPaul Blakey 					 match_level);
19750a7fcb78SPaul Blakey 		if (err) {
19760a7fcb78SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
19770a7fcb78SPaul Blakey 					   "Failed to parse tunnel attributes");
19780a7fcb78SPaul Blakey 			netdev_warn(priv->netdev,
19790a7fcb78SPaul Blakey 				    "Failed to parse tunnel attributes");
19800a7fcb78SPaul Blakey 			return err;
19810a7fcb78SPaul Blakey 		}
19820a7fcb78SPaul Blakey 
19830a7fcb78SPaul Blakey 		flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
19840a7fcb78SPaul Blakey 	}
19850a7fcb78SPaul Blakey 
19860a7fcb78SPaul Blakey 	if (!needs_mapping && !sets_mapping)
19870a7fcb78SPaul Blakey 		return 0;
19880a7fcb78SPaul Blakey 
19890a7fcb78SPaul Blakey 	return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
19900a7fcb78SPaul Blakey }
19910a7fcb78SPaul Blakey 
19920a7fcb78SPaul Blakey static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
19930a7fcb78SPaul Blakey {
19940a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
19950a7fcb78SPaul Blakey 			    inner_headers);
19960a7fcb78SPaul Blakey }
19970a7fcb78SPaul Blakey 
19980a7fcb78SPaul Blakey static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
19990a7fcb78SPaul Blakey {
20000a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_value,
20010a7fcb78SPaul Blakey 			    inner_headers);
20020a7fcb78SPaul Blakey }
20030a7fcb78SPaul Blakey 
20040a7fcb78SPaul Blakey static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
20050a7fcb78SPaul Blakey {
20060a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
20070a7fcb78SPaul Blakey 			    outer_headers);
20080a7fcb78SPaul Blakey }
20090a7fcb78SPaul Blakey 
20100a7fcb78SPaul Blakey static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
20110a7fcb78SPaul Blakey {
20120a7fcb78SPaul Blakey 	return MLX5_ADDR_OF(fte_match_param, spec->match_value,
20138377629eSEli Britstein 			    outer_headers);
20148377629eSEli Britstein }
20158377629eSEli Britstein 
20168377629eSEli Britstein static void *get_match_headers_value(u32 flags,
20178377629eSEli Britstein 				     struct mlx5_flow_spec *spec)
20188377629eSEli Britstein {
20198377629eSEli Britstein 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
20200a7fcb78SPaul Blakey 		get_match_inner_headers_value(spec) :
20210a7fcb78SPaul Blakey 		get_match_outer_headers_value(spec);
20220a7fcb78SPaul Blakey }
20230a7fcb78SPaul Blakey 
20240a7fcb78SPaul Blakey static void *get_match_headers_criteria(u32 flags,
20250a7fcb78SPaul Blakey 					struct mlx5_flow_spec *spec)
20260a7fcb78SPaul Blakey {
20270a7fcb78SPaul Blakey 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
20280a7fcb78SPaul Blakey 		get_match_inner_headers_criteria(spec) :
20290a7fcb78SPaul Blakey 		get_match_outer_headers_criteria(spec);
20308377629eSEli Britstein }
20318377629eSEli Britstein 
20326d65bc64Swenxu static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
20336d65bc64Swenxu 				   struct flow_cls_offload *f)
20346d65bc64Swenxu {
20356d65bc64Swenxu 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
20366d65bc64Swenxu 	struct netlink_ext_ack *extack = f->common.extack;
20376d65bc64Swenxu 	struct net_device *ingress_dev;
20386d65bc64Swenxu 	struct flow_match_meta match;
20396d65bc64Swenxu 
20406d65bc64Swenxu 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
20416d65bc64Swenxu 		return 0;
20426d65bc64Swenxu 
20436d65bc64Swenxu 	flow_rule_match_meta(rule, &match);
20446d65bc64Swenxu 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
20456d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
20466d65bc64Swenxu 		return -EINVAL;
20476d65bc64Swenxu 	}
20486d65bc64Swenxu 
20496d65bc64Swenxu 	ingress_dev = __dev_get_by_index(dev_net(filter_dev),
20506d65bc64Swenxu 					 match.key->ingress_ifindex);
20516d65bc64Swenxu 	if (!ingress_dev) {
20526d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack,
20536d65bc64Swenxu 				   "Can't find the ingress port to match on");
20546d65bc64Swenxu 		return -EINVAL;
20556d65bc64Swenxu 	}
20566d65bc64Swenxu 
20576d65bc64Swenxu 	if (ingress_dev != filter_dev) {
20586d65bc64Swenxu 		NL_SET_ERR_MSG_MOD(extack,
20596d65bc64Swenxu 				   "Can't match on the ingress filter port");
20606d65bc64Swenxu 		return -EINVAL;
20616d65bc64Swenxu 	}
20626d65bc64Swenxu 
20636d65bc64Swenxu 	return 0;
20646d65bc64Swenxu }
20656d65bc64Swenxu 
2066de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv,
20670a7fcb78SPaul Blakey 			      struct mlx5e_tc_flow *flow,
2068de0af0bfSRoi Dayan 			      struct mlx5_flow_spec *spec,
2069f9e30088SPablo Neira Ayuso 			      struct flow_cls_offload *f,
207054c177caSOz Shlomo 			      struct net_device *filter_dev,
207193b3586eSHuy Nguyen 			      u8 *inner_match_level, u8 *outer_match_level)
2072e3a2b7edSAmir Vadai {
2073e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
2074c5bb1730SMaor Gottlieb 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2075c5bb1730SMaor Gottlieb 				       outer_headers);
2076c5bb1730SMaor Gottlieb 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2077c5bb1730SMaor Gottlieb 				       outer_headers);
2078699e96ddSJianbo Liu 	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2079699e96ddSJianbo Liu 				    misc_parameters);
2080699e96ddSJianbo Liu 	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2081699e96ddSJianbo Liu 				    misc_parameters);
2082f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
20838f256622SPablo Neira Ayuso 	struct flow_dissector *dissector = rule->match.dissector;
2084e3a2b7edSAmir Vadai 	u16 addr_type = 0;
2085e3a2b7edSAmir Vadai 	u8 ip_proto = 0;
208693b3586eSHuy Nguyen 	u8 *match_level;
20876d65bc64Swenxu 	int err;
2088e3a2b7edSAmir Vadai 
208993b3586eSHuy Nguyen 	match_level = outer_match_level;
2090de0af0bfSRoi Dayan 
20918f256622SPablo Neira Ayuso 	if (dissector->used_keys &
20923d144578SVlad Buslov 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
20933d144578SVlad Buslov 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2094e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
2095e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2096095b6cfdSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
2097699e96ddSJianbo Liu 	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2098e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2099e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2100bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
2101bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2102bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2103bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2104bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
2105e77834ecSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2106fd7da28bSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
2107bcef735cSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_IP)  |
21089272e3dfSYevgeny Kliteynik 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
21099272e3dfSYevgeny Kliteynik 	      BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
2110e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2111e3a2b7edSAmir Vadai 		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
21128f256622SPablo Neira Ayuso 			    dissector->used_keys);
2113e3a2b7edSAmir Vadai 		return -EOPNOTSUPP;
2114e3a2b7edSAmir Vadai 	}
2115e3a2b7edSAmir Vadai 
2116075973c7SVlad Buslov 	if (mlx5e_get_tc_tun(filter_dev)) {
21170a7fcb78SPaul Blakey 		bool match_inner = false;
2118bbd00f7eSHadar Hen Zion 
21190a7fcb78SPaul Blakey 		err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
21200a7fcb78SPaul Blakey 					outer_match_level, &match_inner);
21210a7fcb78SPaul Blakey 		if (err)
21220a7fcb78SPaul Blakey 			return err;
21230a7fcb78SPaul Blakey 
21240a7fcb78SPaul Blakey 		if (match_inner) {
21250a7fcb78SPaul Blakey 			/* header pointers should point to the inner headers
21260a7fcb78SPaul Blakey 			 * if the packet was decapsulated already.
21270a7fcb78SPaul Blakey 			 * outer headers are set by parse_tunnel_attr.
2128bbd00f7eSHadar Hen Zion 			 */
212993b3586eSHuy Nguyen 			match_level = inner_match_level;
21300a7fcb78SPaul Blakey 			headers_c = get_match_inner_headers_criteria(spec);
21310a7fcb78SPaul Blakey 			headers_v = get_match_inner_headers_value(spec);
21320a7fcb78SPaul Blakey 		}
2133bbd00f7eSHadar Hen Zion 	}
2134bbd00f7eSHadar Hen Zion 
21356d65bc64Swenxu 	err = mlx5e_flower_parse_meta(filter_dev, f);
21366d65bc64Swenxu 	if (err)
21376d65bc64Swenxu 		return err;
21386d65bc64Swenxu 
21398f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
21408f256622SPablo Neira Ayuso 		struct flow_match_basic match;
2141e3a2b7edSAmir Vadai 
21428f256622SPablo Neira Ayuso 		flow_rule_match_basic(rule, &match);
21438f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
21448f256622SPablo Neira Ayuso 			 ntohs(match.mask->n_proto));
21458f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
21468f256622SPablo Neira Ayuso 			 ntohs(match.key->n_proto));
21478f256622SPablo Neira Ayuso 
21488f256622SPablo Neira Ayuso 		if (match.mask->n_proto)
2149d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
2150e3a2b7edSAmir Vadai 	}
215135a605dbSEli Britstein 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
215235a605dbSEli Britstein 	    is_vlan_dev(filter_dev)) {
215335a605dbSEli Britstein 		struct flow_dissector_key_vlan filter_dev_mask;
215435a605dbSEli Britstein 		struct flow_dissector_key_vlan filter_dev_key;
21558f256622SPablo Neira Ayuso 		struct flow_match_vlan match;
21568f256622SPablo Neira Ayuso 
215735a605dbSEli Britstein 		if (is_vlan_dev(filter_dev)) {
215835a605dbSEli Britstein 			match.key = &filter_dev_key;
215935a605dbSEli Britstein 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
216035a605dbSEli Britstein 			match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
216135a605dbSEli Britstein 			match.key->vlan_priority = 0;
216235a605dbSEli Britstein 			match.mask = &filter_dev_mask;
216335a605dbSEli Britstein 			memset(match.mask, 0xff, sizeof(*match.mask));
216435a605dbSEli Britstein 			match.mask->vlan_priority = 0;
216535a605dbSEli Britstein 		} else {
21668f256622SPablo Neira Ayuso 			flow_rule_match_vlan(rule, &match);
216735a605dbSEli Britstein 		}
21688f256622SPablo Neira Ayuso 		if (match.mask->vlan_id ||
21698f256622SPablo Neira Ayuso 		    match.mask->vlan_priority ||
21708f256622SPablo Neira Ayuso 		    match.mask->vlan_tpid) {
21718f256622SPablo Neira Ayuso 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2172699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2173699e96ddSJianbo Liu 					 svlan_tag, 1);
2174699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2175699e96ddSJianbo Liu 					 svlan_tag, 1);
2176699e96ddSJianbo Liu 			} else {
2177699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2178699e96ddSJianbo Liu 					 cvlan_tag, 1);
2179699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2180699e96ddSJianbo Liu 					 cvlan_tag, 1);
2181699e96ddSJianbo Liu 			}
2182095b6cfdSOr Gerlitz 
21838f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
21848f256622SPablo Neira Ayuso 				 match.mask->vlan_id);
21858f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
21868f256622SPablo Neira Ayuso 				 match.key->vlan_id);
2187358d79a4SOr Gerlitz 
21888f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
21898f256622SPablo Neira Ayuso 				 match.mask->vlan_priority);
21908f256622SPablo Neira Ayuso 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
21918f256622SPablo Neira Ayuso 				 match.key->vlan_priority);
219254782900SOr Gerlitz 
2193d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
2194095b6cfdSOr Gerlitz 		}
2195d3a80bb5SOr Gerlitz 	} else if (*match_level != MLX5_MATCH_NONE) {
2196fc603294SMark Bloch 		/* cvlan_tag enabled in match criteria and
2197fc603294SMark Bloch 		 * disabled in match value means both S & C tags
2198fc603294SMark Bloch 		 * don't exist (untagged of both)
2199fc603294SMark Bloch 		 */
2200cee26487SJianbo Liu 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2201d3a80bb5SOr Gerlitz 		*match_level = MLX5_MATCH_L2;
2202095b6cfdSOr Gerlitz 	}
2203095b6cfdSOr Gerlitz 
22048f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
22058f256622SPablo Neira Ayuso 		struct flow_match_vlan match;
22068f256622SPablo Neira Ayuso 
220712d5cbf8SJianbo Liu 		flow_rule_match_cvlan(rule, &match);
22088f256622SPablo Neira Ayuso 		if (match.mask->vlan_id ||
22098f256622SPablo Neira Ayuso 		    match.mask->vlan_priority ||
22108f256622SPablo Neira Ayuso 		    match.mask->vlan_tpid) {
22118f256622SPablo Neira Ayuso 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2212699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
2213699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
2214699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
2215699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
2216699e96ddSJianbo Liu 			} else {
2217699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
2218699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
2219699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
2220699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
2221699e96ddSJianbo Liu 			}
2222699e96ddSJianbo Liu 
2223699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
22248f256622SPablo Neira Ayuso 				 match.mask->vlan_id);
2225699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
22268f256622SPablo Neira Ayuso 				 match.key->vlan_id);
2227699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
22288f256622SPablo Neira Ayuso 				 match.mask->vlan_priority);
2229699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
22308f256622SPablo Neira Ayuso 				 match.key->vlan_priority);
2231699e96ddSJianbo Liu 
2232699e96ddSJianbo Liu 			*match_level = MLX5_MATCH_L2;
2233699e96ddSJianbo Liu 		}
2234699e96ddSJianbo Liu 	}
2235699e96ddSJianbo Liu 
22368f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
22378f256622SPablo Neira Ayuso 		struct flow_match_eth_addrs match;
223854782900SOr Gerlitz 
22398f256622SPablo Neira Ayuso 		flow_rule_match_eth_addrs(rule, &match);
2240d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2241d3a80bb5SOr Gerlitz 					     dmac_47_16),
22428f256622SPablo Neira Ayuso 				match.mask->dst);
2243d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2244d3a80bb5SOr Gerlitz 					     dmac_47_16),
22458f256622SPablo Neira Ayuso 				match.key->dst);
2246d3a80bb5SOr Gerlitz 
2247d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2248d3a80bb5SOr Gerlitz 					     smac_47_16),
22498f256622SPablo Neira Ayuso 				match.mask->src);
2250d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2251d3a80bb5SOr Gerlitz 					     smac_47_16),
22528f256622SPablo Neira Ayuso 				match.key->src);
2253d3a80bb5SOr Gerlitz 
22548f256622SPablo Neira Ayuso 		if (!is_zero_ether_addr(match.mask->src) ||
22558f256622SPablo Neira Ayuso 		    !is_zero_ether_addr(match.mask->dst))
2256d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
225754782900SOr Gerlitz 	}
225854782900SOr Gerlitz 
22598f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
22608f256622SPablo Neira Ayuso 		struct flow_match_control match;
226154782900SOr Gerlitz 
22628f256622SPablo Neira Ayuso 		flow_rule_match_control(rule, &match);
22638f256622SPablo Neira Ayuso 		addr_type = match.key->addr_type;
226454782900SOr Gerlitz 
226554782900SOr Gerlitz 		/* the HW doesn't support frag first/later */
22668f256622SPablo Neira Ayuso 		if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
226754782900SOr Gerlitz 			return -EOPNOTSUPP;
226854782900SOr Gerlitz 
22698f256622SPablo Neira Ayuso 		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
227054782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
227154782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
22728f256622SPablo Neira Ayuso 				 match.key->flags & FLOW_DIS_IS_FRAGMENT);
227354782900SOr Gerlitz 
227454782900SOr Gerlitz 			/* the HW doesn't need L3 inline to match on frag=no */
22758f256622SPablo Neira Ayuso 			if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
227683621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L2;
227754782900SOr Gerlitz 	/* ***  L2 attributes parsing up to here *** */
227854782900SOr Gerlitz 			else
227983621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L3;
228054782900SOr Gerlitz 		}
228154782900SOr Gerlitz 	}
228254782900SOr Gerlitz 
22838f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
22848f256622SPablo Neira Ayuso 		struct flow_match_basic match;
22858f256622SPablo Neira Ayuso 
22868f256622SPablo Neira Ayuso 		flow_rule_match_basic(rule, &match);
22878f256622SPablo Neira Ayuso 		ip_proto = match.key->ip_proto;
228854782900SOr Gerlitz 
228954782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
22908f256622SPablo Neira Ayuso 			 match.mask->ip_proto);
229154782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
22928f256622SPablo Neira Ayuso 			 match.key->ip_proto);
229354782900SOr Gerlitz 
22948f256622SPablo Neira Ayuso 		if (match.mask->ip_proto)
2295d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
229654782900SOr Gerlitz 	}
229754782900SOr Gerlitz 
2298e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
22998f256622SPablo Neira Ayuso 		struct flow_match_ipv4_addrs match;
2300e3a2b7edSAmir Vadai 
23018f256622SPablo Neira Ayuso 		flow_rule_match_ipv4_addrs(rule, &match);
2302e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2303e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
23048f256622SPablo Neira Ayuso 		       &match.mask->src, sizeof(match.mask->src));
2305e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2306e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
23078f256622SPablo Neira Ayuso 		       &match.key->src, sizeof(match.key->src));
2308e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2309e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
23108f256622SPablo Neira Ayuso 		       &match.mask->dst, sizeof(match.mask->dst));
2311e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2312e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
23138f256622SPablo Neira Ayuso 		       &match.key->dst, sizeof(match.key->dst));
2314de0af0bfSRoi Dayan 
23158f256622SPablo Neira Ayuso 		if (match.mask->src || match.mask->dst)
2316d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
2317e3a2b7edSAmir Vadai 	}
2318e3a2b7edSAmir Vadai 
2319e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
23208f256622SPablo Neira Ayuso 		struct flow_match_ipv6_addrs match;
2321e3a2b7edSAmir Vadai 
23228f256622SPablo Neira Ayuso 		flow_rule_match_ipv6_addrs(rule, &match);
2323e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2324e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
23258f256622SPablo Neira Ayuso 		       &match.mask->src, sizeof(match.mask->src));
2326e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2327e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
23288f256622SPablo Neira Ayuso 		       &match.key->src, sizeof(match.key->src));
2329e3a2b7edSAmir Vadai 
2330e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2331e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
23328f256622SPablo Neira Ayuso 		       &match.mask->dst, sizeof(match.mask->dst));
2333e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2334e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
23358f256622SPablo Neira Ayuso 		       &match.key->dst, sizeof(match.key->dst));
2336de0af0bfSRoi Dayan 
23378f256622SPablo Neira Ayuso 		if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
23388f256622SPablo Neira Ayuso 		    ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2339d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
2340e3a2b7edSAmir Vadai 	}
2341e3a2b7edSAmir Vadai 
23428f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
23438f256622SPablo Neira Ayuso 		struct flow_match_ip match;
23441f97a526SOr Gerlitz 
23458f256622SPablo Neira Ayuso 		flow_rule_match_ip(rule, &match);
23468f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
23478f256622SPablo Neira Ayuso 			 match.mask->tos & 0x3);
23488f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
23498f256622SPablo Neira Ayuso 			 match.key->tos & 0x3);
23501f97a526SOr Gerlitz 
23518f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
23528f256622SPablo Neira Ayuso 			 match.mask->tos >> 2);
23538f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
23548f256622SPablo Neira Ayuso 			 match.key->tos  >> 2);
23551f97a526SOr Gerlitz 
23568f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
23578f256622SPablo Neira Ayuso 			 match.mask->ttl);
23588f256622SPablo Neira Ayuso 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
23598f256622SPablo Neira Ayuso 			 match.key->ttl);
23601f97a526SOr Gerlitz 
23618f256622SPablo Neira Ayuso 		if (match.mask->ttl &&
2362a8ade55fSOr Gerlitz 		    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2363e98bedf5SEli Britstein 						ft_field_support.outer_ipv4_ttl)) {
2364e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2365e98bedf5SEli Britstein 					   "Matching on TTL is not supported");
23661f97a526SOr Gerlitz 			return -EOPNOTSUPP;
2367e98bedf5SEli Britstein 		}
2368a8ade55fSOr Gerlitz 
23698f256622SPablo Neira Ayuso 		if (match.mask->tos || match.mask->ttl)
2370d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
23711f97a526SOr Gerlitz 	}
23721f97a526SOr Gerlitz 
237354782900SOr Gerlitz 	/* ***  L3 attributes parsing up to here *** */
237454782900SOr Gerlitz 
23758f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
23768f256622SPablo Neira Ayuso 		struct flow_match_ports match;
23778f256622SPablo Neira Ayuso 
23788f256622SPablo Neira Ayuso 		flow_rule_match_ports(rule, &match);
2379e3a2b7edSAmir Vadai 		switch (ip_proto) {
2380e3a2b7edSAmir Vadai 		case IPPROTO_TCP:
2381e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
23828f256622SPablo Neira Ayuso 				 tcp_sport, ntohs(match.mask->src));
2383e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
23848f256622SPablo Neira Ayuso 				 tcp_sport, ntohs(match.key->src));
2385e3a2b7edSAmir Vadai 
2386e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
23878f256622SPablo Neira Ayuso 				 tcp_dport, ntohs(match.mask->dst));
2388e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
23898f256622SPablo Neira Ayuso 				 tcp_dport, ntohs(match.key->dst));
2390e3a2b7edSAmir Vadai 			break;
2391e3a2b7edSAmir Vadai 
2392e3a2b7edSAmir Vadai 		case IPPROTO_UDP:
2393e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
23948f256622SPablo Neira Ayuso 				 udp_sport, ntohs(match.mask->src));
2395e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
23968f256622SPablo Neira Ayuso 				 udp_sport, ntohs(match.key->src));
2397e3a2b7edSAmir Vadai 
2398e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
23998f256622SPablo Neira Ayuso 				 udp_dport, ntohs(match.mask->dst));
2400e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
24018f256622SPablo Neira Ayuso 				 udp_dport, ntohs(match.key->dst));
2402e3a2b7edSAmir Vadai 			break;
2403e3a2b7edSAmir Vadai 		default:
2404e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2405e98bedf5SEli Britstein 					   "Only UDP and TCP transports are supported for L4 matching");
2406e3a2b7edSAmir Vadai 			netdev_err(priv->netdev,
2407e3a2b7edSAmir Vadai 				   "Only UDP and TCP transport are supported\n");
2408e3a2b7edSAmir Vadai 			return -EINVAL;
2409e3a2b7edSAmir Vadai 		}
2410de0af0bfSRoi Dayan 
24118f256622SPablo Neira Ayuso 		if (match.mask->src || match.mask->dst)
2412d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
2413e3a2b7edSAmir Vadai 	}
2414e3a2b7edSAmir Vadai 
24158f256622SPablo Neira Ayuso 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
24168f256622SPablo Neira Ayuso 		struct flow_match_tcp match;
2417e77834ecSOr Gerlitz 
24188f256622SPablo Neira Ayuso 		flow_rule_match_tcp(rule, &match);
2419e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
24208f256622SPablo Neira Ayuso 			 ntohs(match.mask->flags));
2421e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
24228f256622SPablo Neira Ayuso 			 ntohs(match.key->flags));
2423e77834ecSOr Gerlitz 
24248f256622SPablo Neira Ayuso 		if (match.mask->flags)
2425d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
2426e77834ecSOr Gerlitz 	}
2427e77834ecSOr Gerlitz 
2428e3a2b7edSAmir Vadai 	return 0;
2429e3a2b7edSAmir Vadai }
2430e3a2b7edSAmir Vadai 
2431de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv,
243265ba8fb7SOr Gerlitz 			    struct mlx5e_tc_flow *flow,
2433de0af0bfSRoi Dayan 			    struct mlx5_flow_spec *spec,
2434f9e30088SPablo Neira Ayuso 			    struct flow_cls_offload *f,
243554c177caSOz Shlomo 			    struct net_device *filter_dev)
2436de0af0bfSRoi Dayan {
243793b3586eSHuy Nguyen 	u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2438e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
2439de0af0bfSRoi Dayan 	struct mlx5_core_dev *dev = priv->mdev;
2440de0af0bfSRoi Dayan 	struct mlx5_eswitch *esw = dev->priv.eswitch;
24411d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
24421d447a39SSaeed Mahameed 	struct mlx5_eswitch_rep *rep;
2443226f2ca3SVlad Buslov 	bool is_eswitch_flow;
2444de0af0bfSRoi Dayan 	int err;
2445de0af0bfSRoi Dayan 
244693b3586eSHuy Nguyen 	inner_match_level = MLX5_MATCH_NONE;
244793b3586eSHuy Nguyen 	outer_match_level = MLX5_MATCH_NONE;
244893b3586eSHuy Nguyen 
24490a7fcb78SPaul Blakey 	err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
24500a7fcb78SPaul Blakey 				 &inner_match_level, &outer_match_level);
245193b3586eSHuy Nguyen 	non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
245293b3586eSHuy Nguyen 				 outer_match_level : inner_match_level;
2453de0af0bfSRoi Dayan 
2454226f2ca3SVlad Buslov 	is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2455226f2ca3SVlad Buslov 	if (!err && is_eswitch_flow) {
24561d447a39SSaeed Mahameed 		rep = rpriv->rep;
2457b05af6aaSBodong Wang 		if (rep->vport != MLX5_VPORT_UPLINK &&
24581d447a39SSaeed Mahameed 		    (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
245993b3586eSHuy Nguyen 		    esw->offloads.inline_mode < non_tunnel_match_level)) {
2460e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2461e98bedf5SEli Britstein 					   "Flow is not offloaded due to min inline setting");
2462de0af0bfSRoi Dayan 			netdev_warn(priv->netdev,
2463de0af0bfSRoi Dayan 				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
246493b3586eSHuy Nguyen 				    non_tunnel_match_level, esw->offloads.inline_mode);
2465de0af0bfSRoi Dayan 			return -EOPNOTSUPP;
2466de0af0bfSRoi Dayan 		}
2467de0af0bfSRoi Dayan 	}
2468de0af0bfSRoi Dayan 
2469226f2ca3SVlad Buslov 	if (is_eswitch_flow) {
247093b3586eSHuy Nguyen 		flow->esw_attr->inner_match_level = inner_match_level;
247193b3586eSHuy Nguyen 		flow->esw_attr->outer_match_level = outer_match_level;
24726363651dSOr Gerlitz 	} else {
247393b3586eSHuy Nguyen 		flow->nic_attr->match_level = non_tunnel_match_level;
24746363651dSOr Gerlitz 	}
247538aa51c1SOr Gerlitz 
2476de0af0bfSRoi Dayan 	return err;
2477de0af0bfSRoi Dayan }
2478de0af0bfSRoi Dayan 
2479d79b6df6SOr Gerlitz struct pedit_headers {
2480d79b6df6SOr Gerlitz 	struct ethhdr  eth;
24810eb69bb9SEli Britstein 	struct vlan_hdr vlan;
2482d79b6df6SOr Gerlitz 	struct iphdr   ip4;
2483d79b6df6SOr Gerlitz 	struct ipv6hdr ip6;
2484d79b6df6SOr Gerlitz 	struct tcphdr  tcp;
2485d79b6df6SOr Gerlitz 	struct udphdr  udp;
2486d79b6df6SOr Gerlitz };
2487d79b6df6SOr Gerlitz 
2488c500c86bSPablo Neira Ayuso struct pedit_headers_action {
2489c500c86bSPablo Neira Ayuso 	struct pedit_headers	vals;
2490c500c86bSPablo Neira Ayuso 	struct pedit_headers	masks;
2491c500c86bSPablo Neira Ayuso 	u32			pedits;
2492c500c86bSPablo Neira Ayuso };
2493c500c86bSPablo Neira Ayuso 
2494d79b6df6SOr Gerlitz static int pedit_header_offsets[] = {
249573867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
249673867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
249773867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
249873867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
249973867881SPablo Neira Ayuso 	[FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2500d79b6df6SOr Gerlitz };
2501d79b6df6SOr Gerlitz 
2502d79b6df6SOr Gerlitz #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2503d79b6df6SOr Gerlitz 
2504d79b6df6SOr Gerlitz static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2505c500c86bSPablo Neira Ayuso 			 struct pedit_headers_action *hdrs)
2506d79b6df6SOr Gerlitz {
2507d79b6df6SOr Gerlitz 	u32 *curr_pmask, *curr_pval;
2508d79b6df6SOr Gerlitz 
2509c500c86bSPablo Neira Ayuso 	curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2510c500c86bSPablo Neira Ayuso 	curr_pval  = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2511d79b6df6SOr Gerlitz 
2512d79b6df6SOr Gerlitz 	if (*curr_pmask & mask)  /* disallow acting twice on the same location */
2513d79b6df6SOr Gerlitz 		goto out_err;
2514d79b6df6SOr Gerlitz 
2515d79b6df6SOr Gerlitz 	*curr_pmask |= mask;
2516d79b6df6SOr Gerlitz 	*curr_pval  |= (val & mask);
2517d79b6df6SOr Gerlitz 
2518d79b6df6SOr Gerlitz 	return 0;
2519d79b6df6SOr Gerlitz 
2520d79b6df6SOr Gerlitz out_err:
2521d79b6df6SOr Gerlitz 	return -EOPNOTSUPP;
2522d79b6df6SOr Gerlitz }
2523d79b6df6SOr Gerlitz 
2524d79b6df6SOr Gerlitz struct mlx5_fields {
2525d79b6df6SOr Gerlitz 	u8  field;
252688f30bbcSDmytro Linkin 	u8  field_bsize;
252788f30bbcSDmytro Linkin 	u32 field_mask;
2528d79b6df6SOr Gerlitz 	u32 offset;
252927c11b6bSEli Britstein 	u32 match_offset;
2530d79b6df6SOr Gerlitz };
2531d79b6df6SOr Gerlitz 
253288f30bbcSDmytro Linkin #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
253388f30bbcSDmytro Linkin 		{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
253427c11b6bSEli Britstein 		 offsetof(struct pedit_headers, field) + (off), \
253527c11b6bSEli Britstein 		 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
253627c11b6bSEli Britstein 
25372ef86872SEli Britstein /* masked values are the same and there are no rewrites that do not have a
25382ef86872SEli Britstein  * match.
25392ef86872SEli Britstein  */
25402ef86872SEli Britstein #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
25412ef86872SEli Britstein 	type matchmaskx = *(type *)(matchmaskp); \
25422ef86872SEli Britstein 	type matchvalx = *(type *)(matchvalp); \
25432ef86872SEli Britstein 	type maskx = *(type *)(maskp); \
25442ef86872SEli Britstein 	type valx = *(type *)(valp); \
25452ef86872SEli Britstein 	\
25462ef86872SEli Britstein 	(valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
25472ef86872SEli Britstein 								 matchmaskx)); \
25482ef86872SEli Britstein })
25492ef86872SEli Britstein 
255027c11b6bSEli Britstein static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
255188f30bbcSDmytro Linkin 			 void *matchmaskp, u8 bsize)
255227c11b6bSEli Britstein {
255327c11b6bSEli Britstein 	bool same = false;
255427c11b6bSEli Britstein 
255588f30bbcSDmytro Linkin 	switch (bsize) {
255688f30bbcSDmytro Linkin 	case 8:
25572ef86872SEli Britstein 		same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
255827c11b6bSEli Britstein 		break;
255988f30bbcSDmytro Linkin 	case 16:
25602ef86872SEli Britstein 		same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
256127c11b6bSEli Britstein 		break;
256288f30bbcSDmytro Linkin 	case 32:
25632ef86872SEli Britstein 		same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
256427c11b6bSEli Britstein 		break;
256527c11b6bSEli Britstein 	}
256627c11b6bSEli Britstein 
256727c11b6bSEli Britstein 	return same;
256827c11b6bSEli Britstein }
2569a8e4f0c4SOr Gerlitz 
2570d79b6df6SOr Gerlitz static struct mlx5_fields fields[] = {
257188f30bbcSDmytro Linkin 	OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
257288f30bbcSDmytro Linkin 	OFFLOAD(DMAC_15_0,  16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
257388f30bbcSDmytro Linkin 	OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
257488f30bbcSDmytro Linkin 	OFFLOAD(SMAC_15_0,  16, U16_MAX, eth.h_source[4], 0, smac_15_0),
257588f30bbcSDmytro Linkin 	OFFLOAD(ETHERTYPE,  16, U16_MAX, eth.h_proto, 0, ethertype),
257688f30bbcSDmytro Linkin 	OFFLOAD(FIRST_VID,  16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2577d79b6df6SOr Gerlitz 
2578ab9341b5SDmytro Linkin 	OFFLOAD(IP_DSCP, 8,    0xfc, ip4.tos,   0, ip_dscp),
257988f30bbcSDmytro Linkin 	OFFLOAD(IP_TTL,  8,  U8_MAX, ip4.ttl,   0, ttl_hoplimit),
258088f30bbcSDmytro Linkin 	OFFLOAD(SIPV4,  32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
258188f30bbcSDmytro Linkin 	OFFLOAD(DIPV4,  32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2582d79b6df6SOr Gerlitz 
258388f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
258427c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
258588f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_95_64,  32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
258627c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
258788f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_63_32,  32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
258827c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
258988f30bbcSDmytro Linkin 	OFFLOAD(SIPV6_31_0,   32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
259027c11b6bSEli Britstein 		src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
259188f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
259227c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
259388f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_95_64,  32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
259427c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
259588f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_63_32,  32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
259627c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
259788f30bbcSDmytro Linkin 	OFFLOAD(DIPV6_31_0,   32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
259827c11b6bSEli Britstein 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
259988f30bbcSDmytro Linkin 	OFFLOAD(IPV6_HOPLIMIT, 8,  U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2600d79b6df6SOr Gerlitz 
260188f30bbcSDmytro Linkin 	OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source,  0, tcp_sport),
260288f30bbcSDmytro Linkin 	OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest,    0, tcp_dport),
260388f30bbcSDmytro Linkin 	/* in linux iphdr tcp_flags is 8 bits long */
260488f30bbcSDmytro Linkin 	OFFLOAD(TCP_FLAGS,  8,  U8_MAX, tcp.ack_seq, 5, tcp_flags),
2605d79b6df6SOr Gerlitz 
260688f30bbcSDmytro Linkin 	OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
260788f30bbcSDmytro Linkin 	OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest,   0, udp_dport),
2608d79b6df6SOr Gerlitz };
2609d79b6df6SOr Gerlitz 
26106ae4a6a5SPaul Blakey static int offload_pedit_fields(struct mlx5e_priv *priv,
26116ae4a6a5SPaul Blakey 				int namespace,
26126ae4a6a5SPaul Blakey 				struct pedit_headers_action *hdrs,
2613e98bedf5SEli Britstein 				struct mlx5e_tc_flow_parse_attr *parse_attr,
261427c11b6bSEli Britstein 				u32 *action_flags,
2615e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
2616d79b6df6SOr Gerlitz {
2617d79b6df6SOr Gerlitz 	struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
26186ae4a6a5SPaul Blakey 	int i, action_size, first, last, next_z;
261988f30bbcSDmytro Linkin 	void *headers_c, *headers_v, *action, *vals_p;
262088f30bbcSDmytro Linkin 	u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
26216ae4a6a5SPaul Blakey 	struct mlx5e_tc_mod_hdr_acts *mod_acts;
2622d79b6df6SOr Gerlitz 	struct mlx5_fields *f;
2623d79b6df6SOr Gerlitz 	unsigned long mask;
26242b64bebaSOr Gerlitz 	__be32 mask_be32;
26252b64bebaSOr Gerlitz 	__be16 mask_be16;
26266ae4a6a5SPaul Blakey 	int err;
262788f30bbcSDmytro Linkin 	u8 cmd;
262888f30bbcSDmytro Linkin 
26296ae4a6a5SPaul Blakey 	mod_acts = &parse_attr->mod_hdr_acts;
263088f30bbcSDmytro Linkin 	headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
263188f30bbcSDmytro Linkin 	headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2632d79b6df6SOr Gerlitz 
263373867881SPablo Neira Ayuso 	set_masks = &hdrs[0].masks;
263473867881SPablo Neira Ayuso 	add_masks = &hdrs[1].masks;
263573867881SPablo Neira Ayuso 	set_vals = &hdrs[0].vals;
263673867881SPablo Neira Ayuso 	add_vals = &hdrs[1].vals;
2637d79b6df6SOr Gerlitz 
2638d79b6df6SOr Gerlitz 	action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2639d79b6df6SOr Gerlitz 
2640d79b6df6SOr Gerlitz 	for (i = 0; i < ARRAY_SIZE(fields); i++) {
264127c11b6bSEli Britstein 		bool skip;
264227c11b6bSEli Britstein 
2643d79b6df6SOr Gerlitz 		f = &fields[i];
2644d79b6df6SOr Gerlitz 		/* avoid seeing bits set from previous iterations */
2645e3ca4e05SOr Gerlitz 		s_mask = 0;
2646e3ca4e05SOr Gerlitz 		a_mask = 0;
2647d79b6df6SOr Gerlitz 
2648d79b6df6SOr Gerlitz 		s_masks_p = (void *)set_masks + f->offset;
2649d79b6df6SOr Gerlitz 		a_masks_p = (void *)add_masks + f->offset;
2650d79b6df6SOr Gerlitz 
265188f30bbcSDmytro Linkin 		s_mask = *s_masks_p & f->field_mask;
265288f30bbcSDmytro Linkin 		a_mask = *a_masks_p & f->field_mask;
2653d79b6df6SOr Gerlitz 
2654d79b6df6SOr Gerlitz 		if (!s_mask && !a_mask) /* nothing to offload here */
2655d79b6df6SOr Gerlitz 			continue;
2656d79b6df6SOr Gerlitz 
2657d79b6df6SOr Gerlitz 		if (s_mask && a_mask) {
2658e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2659e98bedf5SEli Britstein 					   "can't set and add to the same HW field");
2660d79b6df6SOr Gerlitz 			printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2661d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2662d79b6df6SOr Gerlitz 		}
2663d79b6df6SOr Gerlitz 
266427c11b6bSEli Britstein 		skip = false;
2665d79b6df6SOr Gerlitz 		if (s_mask) {
266627c11b6bSEli Britstein 			void *match_mask = headers_c + f->match_offset;
266727c11b6bSEli Britstein 			void *match_val = headers_v + f->match_offset;
266827c11b6bSEli Britstein 
2669d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_SET;
2670d79b6df6SOr Gerlitz 			mask = s_mask;
2671d79b6df6SOr Gerlitz 			vals_p = (void *)set_vals + f->offset;
267227c11b6bSEli Britstein 			/* don't rewrite if we have a match on the same value */
267327c11b6bSEli Britstein 			if (cmp_val_mask(vals_p, s_masks_p, match_val,
267488f30bbcSDmytro Linkin 					 match_mask, f->field_bsize))
267527c11b6bSEli Britstein 				skip = true;
2676d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
267788f30bbcSDmytro Linkin 			*s_masks_p &= ~f->field_mask;
2678d79b6df6SOr Gerlitz 		} else {
2679d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_ADD;
2680d79b6df6SOr Gerlitz 			mask = a_mask;
2681d79b6df6SOr Gerlitz 			vals_p = (void *)add_vals + f->offset;
268227c11b6bSEli Britstein 			/* add 0 is no change */
268388f30bbcSDmytro Linkin 			if ((*(u32 *)vals_p & f->field_mask) == 0)
268427c11b6bSEli Britstein 				skip = true;
2685d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
268688f30bbcSDmytro Linkin 			*a_masks_p &= ~f->field_mask;
2687d79b6df6SOr Gerlitz 		}
268827c11b6bSEli Britstein 		if (skip)
268927c11b6bSEli Britstein 			continue;
2690d79b6df6SOr Gerlitz 
269188f30bbcSDmytro Linkin 		if (f->field_bsize == 32) {
26922b64bebaSOr Gerlitz 			mask_be32 = *(__be32 *)&mask;
26932b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
269488f30bbcSDmytro Linkin 		} else if (f->field_bsize == 16) {
26952b64bebaSOr Gerlitz 			mask_be16 = *(__be16 *)&mask;
26962b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
26972b64bebaSOr Gerlitz 		}
26982b64bebaSOr Gerlitz 
269988f30bbcSDmytro Linkin 		first = find_first_bit(&mask, f->field_bsize);
270088f30bbcSDmytro Linkin 		next_z = find_next_zero_bit(&mask, f->field_bsize, first);
270188f30bbcSDmytro Linkin 		last  = find_last_bit(&mask, f->field_bsize);
27022b64bebaSOr Gerlitz 		if (first < next_z && next_z < last) {
2703e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2704e98bedf5SEli Britstein 					   "rewrite of few sub-fields isn't supported");
27052b64bebaSOr Gerlitz 			printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2706d79b6df6SOr Gerlitz 			       mask);
2707d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2708d79b6df6SOr Gerlitz 		}
2709d79b6df6SOr Gerlitz 
27106ae4a6a5SPaul Blakey 		err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
27116ae4a6a5SPaul Blakey 		if (err) {
27126ae4a6a5SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack,
27136ae4a6a5SPaul Blakey 					   "too many pedit actions, can't offload");
27146ae4a6a5SPaul Blakey 			mlx5_core_warn(priv->mdev,
27156ae4a6a5SPaul Blakey 				       "mlx5: parsed %d pedit actions, can't do more\n",
27166ae4a6a5SPaul Blakey 				       mod_acts->num_actions);
27176ae4a6a5SPaul Blakey 			return err;
27186ae4a6a5SPaul Blakey 		}
27196ae4a6a5SPaul Blakey 
27206ae4a6a5SPaul Blakey 		action = mod_acts->actions +
27216ae4a6a5SPaul Blakey 			 (mod_acts->num_actions * action_size);
2722d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, action_type, cmd);
2723d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, field, f->field);
2724d79b6df6SOr Gerlitz 
2725d79b6df6SOr Gerlitz 		if (cmd == MLX5_ACTION_TYPE_SET) {
272688f30bbcSDmytro Linkin 			int start;
272788f30bbcSDmytro Linkin 
272888f30bbcSDmytro Linkin 			/* if field is bit sized it can start not from first bit */
272988f30bbcSDmytro Linkin 			start = find_first_bit((unsigned long *)&f->field_mask,
273088f30bbcSDmytro Linkin 					       f->field_bsize);
273188f30bbcSDmytro Linkin 
273288f30bbcSDmytro Linkin 			MLX5_SET(set_action_in, action, offset, first - start);
2733d79b6df6SOr Gerlitz 			/* length is num of bits to be written, zero means length of 32 */
27342b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, length, (last - first + 1));
2735d79b6df6SOr Gerlitz 		}
2736d79b6df6SOr Gerlitz 
273788f30bbcSDmytro Linkin 		if (f->field_bsize == 32)
27382b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
273988f30bbcSDmytro Linkin 		else if (f->field_bsize == 16)
27402b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
274188f30bbcSDmytro Linkin 		else if (f->field_bsize == 8)
27422b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2743d79b6df6SOr Gerlitz 
27446ae4a6a5SPaul Blakey 		++mod_acts->num_actions;
2745d79b6df6SOr Gerlitz 	}
2746d79b6df6SOr Gerlitz 
2747d79b6df6SOr Gerlitz 	return 0;
2748d79b6df6SOr Gerlitz }
2749d79b6df6SOr Gerlitz 
27502cc1cb1dSTonghao Zhang static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
27512cc1cb1dSTonghao Zhang 						  int namespace)
27522cc1cb1dSTonghao Zhang {
27532cc1cb1dSTonghao Zhang 	if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
27542cc1cb1dSTonghao Zhang 		return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
27552cc1cb1dSTonghao Zhang 	else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
27562cc1cb1dSTonghao Zhang 		return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
27572cc1cb1dSTonghao Zhang }
27582cc1cb1dSTonghao Zhang 
27596ae4a6a5SPaul Blakey int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2760c500c86bSPablo Neira Ayuso 			  int namespace,
27616ae4a6a5SPaul Blakey 			  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2762d79b6df6SOr Gerlitz {
27636ae4a6a5SPaul Blakey 	int action_size, new_num_actions, max_hw_actions;
27646ae4a6a5SPaul Blakey 	size_t new_sz, old_sz;
27656ae4a6a5SPaul Blakey 	void *ret;
2766d79b6df6SOr Gerlitz 
27676ae4a6a5SPaul Blakey 	if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
27686ae4a6a5SPaul Blakey 		return 0;
27696ae4a6a5SPaul Blakey 
2770d79b6df6SOr Gerlitz 	action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2771d79b6df6SOr Gerlitz 
27726ae4a6a5SPaul Blakey 	max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
27736ae4a6a5SPaul Blakey 								namespace);
27746ae4a6a5SPaul Blakey 	new_num_actions = min(max_hw_actions,
27756ae4a6a5SPaul Blakey 			      mod_hdr_acts->actions ?
27766ae4a6a5SPaul Blakey 			      mod_hdr_acts->max_actions * 2 : 1);
27776ae4a6a5SPaul Blakey 	if (mod_hdr_acts->max_actions == new_num_actions)
27786ae4a6a5SPaul Blakey 		return -ENOSPC;
2779d79b6df6SOr Gerlitz 
27806ae4a6a5SPaul Blakey 	new_sz = action_size * new_num_actions;
27816ae4a6a5SPaul Blakey 	old_sz = mod_hdr_acts->max_actions * action_size;
27826ae4a6a5SPaul Blakey 	ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
27836ae4a6a5SPaul Blakey 	if (!ret)
2784d79b6df6SOr Gerlitz 		return -ENOMEM;
2785d79b6df6SOr Gerlitz 
27866ae4a6a5SPaul Blakey 	memset(ret + old_sz, 0, new_sz - old_sz);
27876ae4a6a5SPaul Blakey 	mod_hdr_acts->actions = ret;
27886ae4a6a5SPaul Blakey 	mod_hdr_acts->max_actions = new_num_actions;
27896ae4a6a5SPaul Blakey 
2790d79b6df6SOr Gerlitz 	return 0;
2791d79b6df6SOr Gerlitz }
2792d79b6df6SOr Gerlitz 
27936ae4a6a5SPaul Blakey void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
27946ae4a6a5SPaul Blakey {
27956ae4a6a5SPaul Blakey 	kfree(mod_hdr_acts->actions);
27966ae4a6a5SPaul Blakey 	mod_hdr_acts->actions = NULL;
27976ae4a6a5SPaul Blakey 	mod_hdr_acts->num_actions = 0;
27986ae4a6a5SPaul Blakey 	mod_hdr_acts->max_actions = 0;
27996ae4a6a5SPaul Blakey }
28006ae4a6a5SPaul Blakey 
2801d79b6df6SOr Gerlitz static const struct pedit_headers zero_masks = {};
2802d79b6df6SOr Gerlitz 
2803d79b6df6SOr Gerlitz static int parse_tc_pedit_action(struct mlx5e_priv *priv,
280473867881SPablo Neira Ayuso 				 const struct flow_action_entry *act, int namespace,
2805e98bedf5SEli Britstein 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
2806c500c86bSPablo Neira Ayuso 				 struct pedit_headers_action *hdrs,
2807e98bedf5SEli Britstein 				 struct netlink_ext_ack *extack)
2808d79b6df6SOr Gerlitz {
280973867881SPablo Neira Ayuso 	u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
281073867881SPablo Neira Ayuso 	int err = -EOPNOTSUPP;
2811d79b6df6SOr Gerlitz 	u32 mask, val, offset;
281273867881SPablo Neira Ayuso 	u8 htype;
2813d79b6df6SOr Gerlitz 
281473867881SPablo Neira Ayuso 	htype = act->mangle.htype;
2815d79b6df6SOr Gerlitz 	err = -EOPNOTSUPP; /* can't be all optimistic */
2816d79b6df6SOr Gerlitz 
281773867881SPablo Neira Ayuso 	if (htype == FLOW_ACT_MANGLE_UNSPEC) {
281873867881SPablo Neira Ayuso 		NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2819d79b6df6SOr Gerlitz 		goto out_err;
2820d79b6df6SOr Gerlitz 	}
2821d79b6df6SOr Gerlitz 
28222cc1cb1dSTonghao Zhang 	if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
28232cc1cb1dSTonghao Zhang 		NL_SET_ERR_MSG_MOD(extack,
28242cc1cb1dSTonghao Zhang 				   "The pedit offload action is not supported");
28252cc1cb1dSTonghao Zhang 		goto out_err;
28262cc1cb1dSTonghao Zhang 	}
28272cc1cb1dSTonghao Zhang 
282873867881SPablo Neira Ayuso 	mask = act->mangle.mask;
282973867881SPablo Neira Ayuso 	val = act->mangle.val;
283073867881SPablo Neira Ayuso 	offset = act->mangle.offset;
2831d79b6df6SOr Gerlitz 
2832c500c86bSPablo Neira Ayuso 	err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2833d79b6df6SOr Gerlitz 	if (err)
2834d79b6df6SOr Gerlitz 		goto out_err;
2835c500c86bSPablo Neira Ayuso 
2836c500c86bSPablo Neira Ayuso 	hdrs[cmd].pedits++;
2837d79b6df6SOr Gerlitz 
2838c500c86bSPablo Neira Ayuso 	return 0;
2839c500c86bSPablo Neira Ayuso out_err:
2840c500c86bSPablo Neira Ayuso 	return err;
2841c500c86bSPablo Neira Ayuso }
2842c500c86bSPablo Neira Ayuso 
2843c500c86bSPablo Neira Ayuso static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2844c500c86bSPablo Neira Ayuso 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
2845c500c86bSPablo Neira Ayuso 				 struct pedit_headers_action *hdrs,
284627c11b6bSEli Britstein 				 u32 *action_flags,
2847c500c86bSPablo Neira Ayuso 				 struct netlink_ext_ack *extack)
2848c500c86bSPablo Neira Ayuso {
2849c500c86bSPablo Neira Ayuso 	struct pedit_headers *cmd_masks;
2850c500c86bSPablo Neira Ayuso 	int err;
2851c500c86bSPablo Neira Ayuso 	u8 cmd;
2852c500c86bSPablo Neira Ayuso 
28536ae4a6a5SPaul Blakey 	err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
28546ae4a6a5SPaul Blakey 				   action_flags, extack);
2855d79b6df6SOr Gerlitz 	if (err < 0)
2856d79b6df6SOr Gerlitz 		goto out_dealloc_parsed_actions;
2857d79b6df6SOr Gerlitz 
2858d79b6df6SOr Gerlitz 	for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2859c500c86bSPablo Neira Ayuso 		cmd_masks = &hdrs[cmd].masks;
2860d79b6df6SOr Gerlitz 		if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2861e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2862e98bedf5SEli Britstein 					   "attempt to offload an unsupported field");
2863b3a433deSOr Gerlitz 			netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2864d79b6df6SOr Gerlitz 			print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2865d79b6df6SOr Gerlitz 				       16, 1, cmd_masks, sizeof(zero_masks), true);
2866d79b6df6SOr Gerlitz 			err = -EOPNOTSUPP;
2867d79b6df6SOr Gerlitz 			goto out_dealloc_parsed_actions;
2868d79b6df6SOr Gerlitz 		}
2869d79b6df6SOr Gerlitz 	}
2870d79b6df6SOr Gerlitz 
2871d79b6df6SOr Gerlitz 	return 0;
2872d79b6df6SOr Gerlitz 
2873d79b6df6SOr Gerlitz out_dealloc_parsed_actions:
28746ae4a6a5SPaul Blakey 	dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
2875d79b6df6SOr Gerlitz 	return err;
2876d79b6df6SOr Gerlitz }
2877d79b6df6SOr Gerlitz 
2878e98bedf5SEli Britstein static bool csum_offload_supported(struct mlx5e_priv *priv,
2879e98bedf5SEli Britstein 				   u32 action,
2880e98bedf5SEli Britstein 				   u32 update_flags,
2881e98bedf5SEli Britstein 				   struct netlink_ext_ack *extack)
288226c02749SOr Gerlitz {
288326c02749SOr Gerlitz 	u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
288426c02749SOr Gerlitz 			 TCA_CSUM_UPDATE_FLAG_UDP;
288526c02749SOr Gerlitz 
288626c02749SOr Gerlitz 	/*  The HW recalcs checksums only if re-writing headers */
288726c02749SOr Gerlitz 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2888e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2889e98bedf5SEli Britstein 				   "TC csum action is only offloaded with pedit");
289026c02749SOr Gerlitz 		netdev_warn(priv->netdev,
289126c02749SOr Gerlitz 			    "TC csum action is only offloaded with pedit\n");
289226c02749SOr Gerlitz 		return false;
289326c02749SOr Gerlitz 	}
289426c02749SOr Gerlitz 
289526c02749SOr Gerlitz 	if (update_flags & ~prot_flags) {
2896e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2897e98bedf5SEli Britstein 				   "can't offload TC csum action for some header/s");
289826c02749SOr Gerlitz 		netdev_warn(priv->netdev,
289926c02749SOr Gerlitz 			    "can't offload TC csum action for some header/s - flags %#x\n",
290026c02749SOr Gerlitz 			    update_flags);
290126c02749SOr Gerlitz 		return false;
290226c02749SOr Gerlitz 	}
290326c02749SOr Gerlitz 
290426c02749SOr Gerlitz 	return true;
290526c02749SOr Gerlitz }
290626c02749SOr Gerlitz 
29078998576bSDmytro Linkin struct ip_ttl_word {
29088998576bSDmytro Linkin 	__u8	ttl;
29098998576bSDmytro Linkin 	__u8	protocol;
29108998576bSDmytro Linkin 	__sum16	check;
29118998576bSDmytro Linkin };
29128998576bSDmytro Linkin 
29138998576bSDmytro Linkin struct ipv6_hoplimit_word {
29148998576bSDmytro Linkin 	__be16	payload_len;
29158998576bSDmytro Linkin 	__u8	nexthdr;
29168998576bSDmytro Linkin 	__u8	hop_limit;
29178998576bSDmytro Linkin };
29188998576bSDmytro Linkin 
29198998576bSDmytro Linkin static bool is_action_keys_supported(const struct flow_action_entry *act)
29208998576bSDmytro Linkin {
29218998576bSDmytro Linkin 	u32 mask, offset;
29228998576bSDmytro Linkin 	u8 htype;
29238998576bSDmytro Linkin 
29248998576bSDmytro Linkin 	htype = act->mangle.htype;
29258998576bSDmytro Linkin 	offset = act->mangle.offset;
29268998576bSDmytro Linkin 	mask = ~act->mangle.mask;
29278998576bSDmytro Linkin 	/* For IPv4 & IPv6 header check 4 byte word,
29288998576bSDmytro Linkin 	 * to determine that modified fields
29298998576bSDmytro Linkin 	 * are NOT ttl & hop_limit only.
29308998576bSDmytro Linkin 	 */
29318998576bSDmytro Linkin 	if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
29328998576bSDmytro Linkin 		struct ip_ttl_word *ttl_word =
29338998576bSDmytro Linkin 			(struct ip_ttl_word *)&mask;
29348998576bSDmytro Linkin 
29358998576bSDmytro Linkin 		if (offset != offsetof(struct iphdr, ttl) ||
29368998576bSDmytro Linkin 		    ttl_word->protocol ||
29378998576bSDmytro Linkin 		    ttl_word->check) {
29388998576bSDmytro Linkin 			return true;
29398998576bSDmytro Linkin 		}
29408998576bSDmytro Linkin 	} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
29418998576bSDmytro Linkin 		struct ipv6_hoplimit_word *hoplimit_word =
29428998576bSDmytro Linkin 			(struct ipv6_hoplimit_word *)&mask;
29438998576bSDmytro Linkin 
29448998576bSDmytro Linkin 		if (offset != offsetof(struct ipv6hdr, payload_len) ||
29458998576bSDmytro Linkin 		    hoplimit_word->payload_len ||
29468998576bSDmytro Linkin 		    hoplimit_word->nexthdr) {
29478998576bSDmytro Linkin 			return true;
29488998576bSDmytro Linkin 		}
29498998576bSDmytro Linkin 	}
29508998576bSDmytro Linkin 	return false;
29518998576bSDmytro Linkin }
29528998576bSDmytro Linkin 
2953bdd66ac0SOr Gerlitz static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
295473867881SPablo Neira Ayuso 					  struct flow_action *flow_action,
29551651925dSGuy Shattah 					  u32 actions,
2956e98bedf5SEli Britstein 					  struct netlink_ext_ack *extack)
2957bdd66ac0SOr Gerlitz {
295873867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
2959bdd66ac0SOr Gerlitz 	bool modify_ip_header;
2960bdd66ac0SOr Gerlitz 	void *headers_v;
2961bdd66ac0SOr Gerlitz 	u16 ethertype;
29628998576bSDmytro Linkin 	u8 ip_proto;
296373867881SPablo Neira Ayuso 	int i;
2964bdd66ac0SOr Gerlitz 
29658377629eSEli Britstein 	headers_v = get_match_headers_value(actions, spec);
2966bdd66ac0SOr Gerlitz 	ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2967bdd66ac0SOr Gerlitz 
2968bdd66ac0SOr Gerlitz 	/* for non-IP we only re-write MACs, so we're okay */
2969bdd66ac0SOr Gerlitz 	if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2970bdd66ac0SOr Gerlitz 		goto out_ok;
2971bdd66ac0SOr Gerlitz 
2972bdd66ac0SOr Gerlitz 	modify_ip_header = false;
297373867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
297473867881SPablo Neira Ayuso 		if (act->id != FLOW_ACTION_MANGLE &&
297573867881SPablo Neira Ayuso 		    act->id != FLOW_ACTION_ADD)
2976bdd66ac0SOr Gerlitz 			continue;
2977bdd66ac0SOr Gerlitz 
29788998576bSDmytro Linkin 		if (is_action_keys_supported(act)) {
2979bdd66ac0SOr Gerlitz 			modify_ip_header = true;
2980bdd66ac0SOr Gerlitz 			break;
2981bdd66ac0SOr Gerlitz 		}
2982bdd66ac0SOr Gerlitz 	}
2983bdd66ac0SOr Gerlitz 
2984bdd66ac0SOr Gerlitz 	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
29851ccef350SJianbo Liu 	if (modify_ip_header && ip_proto != IPPROTO_TCP &&
29861ccef350SJianbo Liu 	    ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
2987e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2988e98bedf5SEli Britstein 				   "can't offload re-write of non TCP/UDP");
2989bdd66ac0SOr Gerlitz 		pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2990bdd66ac0SOr Gerlitz 		return false;
2991bdd66ac0SOr Gerlitz 	}
2992bdd66ac0SOr Gerlitz 
2993bdd66ac0SOr Gerlitz out_ok:
2994bdd66ac0SOr Gerlitz 	return true;
2995bdd66ac0SOr Gerlitz }
2996bdd66ac0SOr Gerlitz 
2997bdd66ac0SOr Gerlitz static bool actions_match_supported(struct mlx5e_priv *priv,
299873867881SPablo Neira Ayuso 				    struct flow_action *flow_action,
2999bdd66ac0SOr Gerlitz 				    struct mlx5e_tc_flow_parse_attr *parse_attr,
3000e98bedf5SEli Britstein 				    struct mlx5e_tc_flow *flow,
3001e98bedf5SEli Britstein 				    struct netlink_ext_ack *extack)
3002bdd66ac0SOr Gerlitz {
30037f2fd0a5SPaul Blakey 	struct net_device *filter_dev = parse_attr->filter_dev;
30040a7fcb78SPaul Blakey 	bool drop_action, pop_action;
3005bdd66ac0SOr Gerlitz 	u32 actions;
3006bdd66ac0SOr Gerlitz 
3007226f2ca3SVlad Buslov 	if (mlx5e_is_eswitch_flow(flow))
3008bdd66ac0SOr Gerlitz 		actions = flow->esw_attr->action;
3009bdd66ac0SOr Gerlitz 	else
3010bdd66ac0SOr Gerlitz 		actions = flow->nic_attr->action;
3011bdd66ac0SOr Gerlitz 
30127f2fd0a5SPaul Blakey 	drop_action = actions & MLX5_FLOW_CONTEXT_ACTION_DROP;
30137f2fd0a5SPaul Blakey 	pop_action = actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
30147f2fd0a5SPaul Blakey 
30157f2fd0a5SPaul Blakey 	if (flow_flag_test(flow, EGRESS) && !drop_action) {
30160a7fcb78SPaul Blakey 		/* We only support filters on tunnel device, or on vlan
30170a7fcb78SPaul Blakey 		 * devices if they have pop/drop action
30180a7fcb78SPaul Blakey 		 */
30190a7fcb78SPaul Blakey 		if (!mlx5e_get_tc_tun(filter_dev) ||
30200a7fcb78SPaul Blakey 		    (is_vlan_dev(filter_dev) && !pop_action))
30217e29392eSRoi Dayan 			return false;
30227f2fd0a5SPaul Blakey 	}
30237e29392eSRoi Dayan 
3024bdd66ac0SOr Gerlitz 	if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
302573867881SPablo Neira Ayuso 		return modify_header_match_supported(&parse_attr->spec,
3026a655fe9fSDavid S. Miller 						     flow_action, actions,
3027e98bedf5SEli Britstein 						     extack);
3028bdd66ac0SOr Gerlitz 
3029bdd66ac0SOr Gerlitz 	return true;
3030bdd66ac0SOr Gerlitz }
3031bdd66ac0SOr Gerlitz 
30325c65c564SOr Gerlitz static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
30335c65c564SOr Gerlitz {
30345c65c564SOr Gerlitz 	struct mlx5_core_dev *fmdev, *pmdev;
3035816f6706SOr Gerlitz 	u64 fsystem_guid, psystem_guid;
30365c65c564SOr Gerlitz 
30375c65c564SOr Gerlitz 	fmdev = priv->mdev;
30385c65c564SOr Gerlitz 	pmdev = peer_priv->mdev;
30395c65c564SOr Gerlitz 
304059c9d35eSAlaa Hleihel 	fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
304159c9d35eSAlaa Hleihel 	psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
30425c65c564SOr Gerlitz 
3043816f6706SOr Gerlitz 	return (fsystem_guid == psystem_guid);
30445c65c564SOr Gerlitz }
30455c65c564SOr Gerlitz 
3046bdc837eeSEli Britstein static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3047bdc837eeSEli Britstein 				   const struct flow_action_entry *act,
3048bdc837eeSEli Britstein 				   struct mlx5e_tc_flow_parse_attr *parse_attr,
3049bdc837eeSEli Britstein 				   struct pedit_headers_action *hdrs,
3050bdc837eeSEli Britstein 				   u32 *action, struct netlink_ext_ack *extack)
3051bdc837eeSEli Britstein {
3052bdc837eeSEli Britstein 	u16 mask16 = VLAN_VID_MASK;
3053bdc837eeSEli Britstein 	u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3054bdc837eeSEli Britstein 	const struct flow_action_entry pedit_act = {
3055bdc837eeSEli Britstein 		.id = FLOW_ACTION_MANGLE,
3056bdc837eeSEli Britstein 		.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3057bdc837eeSEli Britstein 		.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3058bdc837eeSEli Britstein 		.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3059bdc837eeSEli Britstein 		.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3060bdc837eeSEli Britstein 	};
30616fca9d1eSEli Britstein 	u8 match_prio_mask, match_prio_val;
3062bf2f3bcaSEli Britstein 	void *headers_c, *headers_v;
3063bdc837eeSEli Britstein 	int err;
3064bdc837eeSEli Britstein 
3065bf2f3bcaSEli Britstein 	headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3066bf2f3bcaSEli Britstein 	headers_v = get_match_headers_value(*action, &parse_attr->spec);
3067bf2f3bcaSEli Britstein 
3068bf2f3bcaSEli Britstein 	if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3069bf2f3bcaSEli Britstein 	      MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3070bf2f3bcaSEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
3071bf2f3bcaSEli Britstein 				   "VLAN rewrite action must have VLAN protocol match");
3072bf2f3bcaSEli Britstein 		return -EOPNOTSUPP;
3073bf2f3bcaSEli Britstein 	}
3074bf2f3bcaSEli Britstein 
30756fca9d1eSEli Britstein 	match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
30766fca9d1eSEli Britstein 	match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
30776fca9d1eSEli Britstein 	if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
30786fca9d1eSEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
30796fca9d1eSEli Britstein 				   "Changing VLAN prio is not supported");
3080bdc837eeSEli Britstein 		return -EOPNOTSUPP;
3081bdc837eeSEli Britstein 	}
3082bdc837eeSEli Britstein 
3083bdc837eeSEli Britstein 	err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
3084bdc837eeSEli Britstein 				    hdrs, NULL);
3085bdc837eeSEli Britstein 	*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3086bdc837eeSEli Britstein 
3087bdc837eeSEli Britstein 	return err;
3088bdc837eeSEli Britstein }
3089bdc837eeSEli Britstein 
30900bac1194SEli Britstein static int
30910bac1194SEli Britstein add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
30920bac1194SEli Britstein 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
30930bac1194SEli Britstein 				 struct pedit_headers_action *hdrs,
30940bac1194SEli Britstein 				 u32 *action, struct netlink_ext_ack *extack)
30950bac1194SEli Britstein {
30960bac1194SEli Britstein 	const struct flow_action_entry prio_tag_act = {
30970bac1194SEli Britstein 		.vlan.vid = 0,
30980bac1194SEli Britstein 		.vlan.prio =
30990bac1194SEli Britstein 			MLX5_GET(fte_match_set_lyr_2_4,
31000bac1194SEli Britstein 				 get_match_headers_value(*action,
31010bac1194SEli Britstein 							 &parse_attr->spec),
31020bac1194SEli Britstein 				 first_prio) &
31030bac1194SEli Britstein 			MLX5_GET(fte_match_set_lyr_2_4,
31040bac1194SEli Britstein 				 get_match_headers_criteria(*action,
31050bac1194SEli Britstein 							    &parse_attr->spec),
31060bac1194SEli Britstein 				 first_prio),
31070bac1194SEli Britstein 	};
31080bac1194SEli Britstein 
31090bac1194SEli Britstein 	return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
31100bac1194SEli Britstein 				       &prio_tag_act, parse_attr, hdrs, action,
31110bac1194SEli Britstein 				       extack);
31120bac1194SEli Britstein }
31130bac1194SEli Britstein 
311473867881SPablo Neira Ayuso static int parse_tc_nic_actions(struct mlx5e_priv *priv,
311573867881SPablo Neira Ayuso 				struct flow_action *flow_action,
3116aa0cbbaeSOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr,
3117e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
3118e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
3119e3a2b7edSAmir Vadai {
3120aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
312173867881SPablo Neira Ayuso 	struct pedit_headers_action hdrs[2] = {};
312273867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
31231cab1cd7SOr Gerlitz 	u32 action = 0;
3124244cd96aSCong Wang 	int err, i;
3125e3a2b7edSAmir Vadai 
312673867881SPablo Neira Ayuso 	if (!flow_action_has_entries(flow_action))
3127e3a2b7edSAmir Vadai 		return -EINVAL;
3128e3a2b7edSAmir Vadai 
31293bc4b7bfSOr Gerlitz 	attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3130e3a2b7edSAmir Vadai 
313173867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
313273867881SPablo Neira Ayuso 		switch (act->id) {
313315fc92ecSTonghao Zhang 		case FLOW_ACTION_ACCEPT:
313415fc92ecSTonghao Zhang 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
313515fc92ecSTonghao Zhang 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
313615fc92ecSTonghao Zhang 			break;
313773867881SPablo Neira Ayuso 		case FLOW_ACTION_DROP:
31381cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3139aad7e08dSAmir Vadai 			if (MLX5_CAP_FLOWTABLE(priv->mdev,
3140aad7e08dSAmir Vadai 					       flow_table_properties_nic_receive.flow_counter))
31411cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
314273867881SPablo Neira Ayuso 			break;
314373867881SPablo Neira Ayuso 		case FLOW_ACTION_MANGLE:
314473867881SPablo Neira Ayuso 		case FLOW_ACTION_ADD:
314573867881SPablo Neira Ayuso 			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3146c500c86bSPablo Neira Ayuso 						    parse_attr, hdrs, extack);
31472f4fe4caSOr Gerlitz 			if (err)
31482f4fe4caSOr Gerlitz 				return err;
31492f4fe4caSOr Gerlitz 
31501cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
31512f4fe4caSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
315273867881SPablo Neira Ayuso 			break;
3153bdc837eeSEli Britstein 		case FLOW_ACTION_VLAN_MANGLE:
3154bdc837eeSEli Britstein 			err = add_vlan_rewrite_action(priv,
3155bdc837eeSEli Britstein 						      MLX5_FLOW_NAMESPACE_KERNEL,
3156bdc837eeSEli Britstein 						      act, parse_attr, hdrs,
3157bdc837eeSEli Britstein 						      &action, extack);
3158bdc837eeSEli Britstein 			if (err)
3159bdc837eeSEli Britstein 				return err;
3160bdc837eeSEli Britstein 
3161bdc837eeSEli Britstein 			break;
316273867881SPablo Neira Ayuso 		case FLOW_ACTION_CSUM:
31631cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
316473867881SPablo Neira Ayuso 						   act->csum_flags,
3165e98bedf5SEli Britstein 						   extack))
316673867881SPablo Neira Ayuso 				break;
316726c02749SOr Gerlitz 
316826c02749SOr Gerlitz 			return -EOPNOTSUPP;
316973867881SPablo Neira Ayuso 		case FLOW_ACTION_REDIRECT: {
317073867881SPablo Neira Ayuso 			struct net_device *peer_dev = act->dev;
31715c65c564SOr Gerlitz 
31725c65c564SOr Gerlitz 			if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
31735c65c564SOr Gerlitz 			    same_hw_devs(priv, netdev_priv(peer_dev))) {
317498b66cb1SEli Britstein 				parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3175226f2ca3SVlad Buslov 				flow_flag_set(flow, HAIRPIN);
31761cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
31775c65c564SOr Gerlitz 					  MLX5_FLOW_CONTEXT_ACTION_COUNT;
31785c65c564SOr Gerlitz 			} else {
3179e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3180e98bedf5SEli Britstein 						   "device is not on same HW, can't offload");
31815c65c564SOr Gerlitz 				netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
31825c65c564SOr Gerlitz 					    peer_dev->name);
31835c65c564SOr Gerlitz 				return -EINVAL;
31845c65c564SOr Gerlitz 			}
31855c65c564SOr Gerlitz 			}
318673867881SPablo Neira Ayuso 			break;
318773867881SPablo Neira Ayuso 		case FLOW_ACTION_MARK: {
318873867881SPablo Neira Ayuso 			u32 mark = act->mark;
3189e3a2b7edSAmir Vadai 
3190e3a2b7edSAmir Vadai 			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3191e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3192e98bedf5SEli Britstein 						   "Bad flow mark - only 16 bit is supported");
3193e3a2b7edSAmir Vadai 				return -EINVAL;
3194e3a2b7edSAmir Vadai 			}
3195e3a2b7edSAmir Vadai 
31963bc4b7bfSOr Gerlitz 			attr->flow_tag = mark;
31971cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3198e3a2b7edSAmir Vadai 			}
319973867881SPablo Neira Ayuso 			break;
320073867881SPablo Neira Ayuso 		default:
32012cc1cb1dSTonghao Zhang 			NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
32022cc1cb1dSTonghao Zhang 			return -EOPNOTSUPP;
3203e3a2b7edSAmir Vadai 		}
320473867881SPablo Neira Ayuso 	}
3205e3a2b7edSAmir Vadai 
3206c500c86bSPablo Neira Ayuso 	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3207c500c86bSPablo Neira Ayuso 	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3208c500c86bSPablo Neira Ayuso 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
320927c11b6bSEli Britstein 					    parse_attr, hdrs, &action, extack);
3210c500c86bSPablo Neira Ayuso 		if (err)
3211c500c86bSPablo Neira Ayuso 			return err;
321227c11b6bSEli Britstein 		/* in case all pedit actions are skipped, remove the MOD_HDR
321327c11b6bSEli Britstein 		 * flag.
321427c11b6bSEli Britstein 		 */
32156ae4a6a5SPaul Blakey 		if (parse_attr->mod_hdr_acts.num_actions == 0) {
321627c11b6bSEli Britstein 			action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
32176ae4a6a5SPaul Blakey 			dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3218e7739a60SEli Britstein 		}
3219c500c86bSPablo Neira Ayuso 	}
3220c500c86bSPablo Neira Ayuso 
32211cab1cd7SOr Gerlitz 	attr->action = action;
322273867881SPablo Neira Ayuso 	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3223bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
3224bdd66ac0SOr Gerlitz 
3225e3a2b7edSAmir Vadai 	return 0;
3226e3a2b7edSAmir Vadai }
3227e3a2b7edSAmir Vadai 
32287f1a546eSEli Britstein struct encap_key {
32291f6da306SYevgeny Kliteynik 	const struct ip_tunnel_key *ip_tun_key;
3230d386939aSYevgeny Kliteynik 	struct mlx5e_tc_tunnel *tc_tunnel;
32317f1a546eSEli Britstein };
32327f1a546eSEli Britstein 
32337f1a546eSEli Britstein static inline int cmp_encap_info(struct encap_key *a,
32347f1a546eSEli Britstein 				 struct encap_key *b)
3235a54e20b4SHadar Hen Zion {
32367f1a546eSEli Britstein 	return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
3237d386939aSYevgeny Kliteynik 	       a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
3238a54e20b4SHadar Hen Zion }
3239a54e20b4SHadar Hen Zion 
32407f1a546eSEli Britstein static inline int hash_encap_info(struct encap_key *key)
3241a54e20b4SHadar Hen Zion {
32427f1a546eSEli Britstein 	return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
3243d386939aSYevgeny Kliteynik 		     key->tc_tunnel->tunnel_type);
3244a54e20b4SHadar Hen Zion }
3245a54e20b4SHadar Hen Zion 
3246a54e20b4SHadar Hen Zion 
3247b1d90e6bSRabie Loulou static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
3248b1d90e6bSRabie Loulou 				  struct net_device *peer_netdev)
3249b1d90e6bSRabie Loulou {
3250b1d90e6bSRabie Loulou 	struct mlx5e_priv *peer_priv;
3251b1d90e6bSRabie Loulou 
3252b1d90e6bSRabie Loulou 	peer_priv = netdev_priv(peer_netdev);
3253b1d90e6bSRabie Loulou 
3254b1d90e6bSRabie Loulou 	return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
325568931c7dSRoi Dayan 		mlx5e_eswitch_rep(priv->netdev) &&
325668931c7dSRoi Dayan 		mlx5e_eswitch_rep(peer_netdev) &&
325768931c7dSRoi Dayan 		same_hw_devs(priv, peer_priv));
3258b1d90e6bSRabie Loulou }
3259b1d90e6bSRabie Loulou 
3260ce99f6b9SOr Gerlitz 
326154c177caSOz Shlomo 
3262948993f2SVlad Buslov bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
3263948993f2SVlad Buslov {
3264948993f2SVlad Buslov 	return refcount_inc_not_zero(&e->refcnt);
3265948993f2SVlad Buslov }
3266948993f2SVlad Buslov 
3267948993f2SVlad Buslov static struct mlx5e_encap_entry *
3268948993f2SVlad Buslov mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
3269948993f2SVlad Buslov 		uintptr_t hash_key)
3270948993f2SVlad Buslov {
3271948993f2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3272948993f2SVlad Buslov 	struct mlx5e_encap_entry *e;
3273948993f2SVlad Buslov 	struct encap_key e_key;
3274948993f2SVlad Buslov 
3275948993f2SVlad Buslov 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
3276948993f2SVlad Buslov 				   encap_hlist, hash_key) {
3277948993f2SVlad Buslov 		e_key.ip_tun_key = &e->tun_info->key;
3278948993f2SVlad Buslov 		e_key.tc_tunnel = e->tunnel;
3279948993f2SVlad Buslov 		if (!cmp_encap_info(&e_key, key) &&
3280948993f2SVlad Buslov 		    mlx5e_encap_take(e))
3281948993f2SVlad Buslov 			return e;
3282948993f2SVlad Buslov 	}
3283948993f2SVlad Buslov 
3284948993f2SVlad Buslov 	return NULL;
3285948993f2SVlad Buslov }
3286948993f2SVlad Buslov 
32872a4b6526SVlad Buslov static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
32882a4b6526SVlad Buslov {
32892a4b6526SVlad Buslov 	size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
32902a4b6526SVlad Buslov 
32912a4b6526SVlad Buslov 	return kmemdup(tun_info, tun_size, GFP_KERNEL);
32922a4b6526SVlad Buslov }
32932a4b6526SVlad Buslov 
3294554fe75cSDmytro Linkin static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
3295554fe75cSDmytro Linkin 				      struct mlx5e_tc_flow *flow,
3296554fe75cSDmytro Linkin 				      int out_index,
3297554fe75cSDmytro Linkin 				      struct mlx5e_encap_entry *e,
3298554fe75cSDmytro Linkin 				      struct netlink_ext_ack *extack)
3299554fe75cSDmytro Linkin {
3300554fe75cSDmytro Linkin 	int i;
3301554fe75cSDmytro Linkin 
3302554fe75cSDmytro Linkin 	for (i = 0; i < out_index; i++) {
3303554fe75cSDmytro Linkin 		if (flow->encaps[i].e != e)
3304554fe75cSDmytro Linkin 			continue;
3305554fe75cSDmytro Linkin 		NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
3306554fe75cSDmytro Linkin 		netdev_err(priv->netdev, "can't duplicate encap action\n");
3307554fe75cSDmytro Linkin 		return true;
3308554fe75cSDmytro Linkin 	}
3309554fe75cSDmytro Linkin 
3310554fe75cSDmytro Linkin 	return false;
3311554fe75cSDmytro Linkin }
3312554fe75cSDmytro Linkin 
3313a54e20b4SHadar Hen Zion static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3314e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
3315733d4f36SRoi Dayan 			      struct net_device *mirred_dev,
3316733d4f36SRoi Dayan 			      int out_index,
33178c4dc42bSEli Britstein 			      struct netlink_ext_ack *extack,
33180ad060eeSRoi Dayan 			      struct net_device **encap_dev,
33190ad060eeSRoi Dayan 			      bool *encap_valid)
332003a9d11eSOr Gerlitz {
3321a54e20b4SHadar Hen Zion 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
332245247bf2SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3323733d4f36SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
33241f6da306SYevgeny Kliteynik 	const struct ip_tunnel_info *tun_info;
3325948993f2SVlad Buslov 	struct encap_key key;
3326c1ae1152SOr Gerlitz 	struct mlx5e_encap_entry *e;
3327733d4f36SRoi Dayan 	unsigned short family;
3328a54e20b4SHadar Hen Zion 	uintptr_t hash_key;
332954c177caSOz Shlomo 	int err = 0;
3330a54e20b4SHadar Hen Zion 
3331733d4f36SRoi Dayan 	parse_attr = attr->parse_attr;
33321f6da306SYevgeny Kliteynik 	tun_info = parse_attr->tun_info[out_index];
3333733d4f36SRoi Dayan 	family = ip_tunnel_info_af(tun_info);
33347f1a546eSEli Britstein 	key.ip_tun_key = &tun_info->key;
3335d386939aSYevgeny Kliteynik 	key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
3336d71f895cSEli Cohen 	if (!key.tc_tunnel) {
3337d71f895cSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
3338d71f895cSEli Cohen 		return -EOPNOTSUPP;
3339d71f895cSEli Cohen 	}
3340733d4f36SRoi Dayan 
33417f1a546eSEli Britstein 	hash_key = hash_encap_info(&key);
3342a54e20b4SHadar Hen Zion 
334361086f39SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
3344948993f2SVlad Buslov 	e = mlx5e_encap_get(priv, &key, hash_key);
3345a54e20b4SHadar Hen Zion 
3346b2812089SVlad Buslov 	/* must verify if encap is valid or not */
3347d589e785SVlad Buslov 	if (e) {
3348554fe75cSDmytro Linkin 		/* Check that entry was not already attached to this flow */
3349554fe75cSDmytro Linkin 		if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
3350554fe75cSDmytro Linkin 			err = -EOPNOTSUPP;
3351554fe75cSDmytro Linkin 			goto out_err;
3352554fe75cSDmytro Linkin 		}
3353554fe75cSDmytro Linkin 
3354d589e785SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
3355d589e785SVlad Buslov 		wait_for_completion(&e->res_ready);
3356d589e785SVlad Buslov 
3357d589e785SVlad Buslov 		/* Protect against concurrent neigh update. */
3358d589e785SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
33593c140dd5SVlad Buslov 		if (e->compl_result < 0) {
3360d589e785SVlad Buslov 			err = -EREMOTEIO;
3361d589e785SVlad Buslov 			goto out_err;
3362d589e785SVlad Buslov 		}
336345247bf2SOr Gerlitz 		goto attach_flow;
3364d589e785SVlad Buslov 	}
3365a54e20b4SHadar Hen Zion 
3366a54e20b4SHadar Hen Zion 	e = kzalloc(sizeof(*e), GFP_KERNEL);
336761086f39SVlad Buslov 	if (!e) {
336861086f39SVlad Buslov 		err = -ENOMEM;
336961086f39SVlad Buslov 		goto out_err;
337061086f39SVlad Buslov 	}
3371a54e20b4SHadar Hen Zion 
3372948993f2SVlad Buslov 	refcount_set(&e->refcnt, 1);
3373d589e785SVlad Buslov 	init_completion(&e->res_ready);
3374d589e785SVlad Buslov 
33752a4b6526SVlad Buslov 	tun_info = dup_tun_info(tun_info);
33762a4b6526SVlad Buslov 	if (!tun_info) {
33772a4b6526SVlad Buslov 		err = -ENOMEM;
33782a4b6526SVlad Buslov 		goto out_err_init;
33792a4b6526SVlad Buslov 	}
33801f6da306SYevgeny Kliteynik 	e->tun_info = tun_info;
3381101f4de9SOz Shlomo 	err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
33822a4b6526SVlad Buslov 	if (err)
33832a4b6526SVlad Buslov 		goto out_err_init;
338454c177caSOz Shlomo 
3385a54e20b4SHadar Hen Zion 	INIT_LIST_HEAD(&e->flows);
3386d589e785SVlad Buslov 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3387d589e785SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
3388a54e20b4SHadar Hen Zion 
3389ce99f6b9SOr Gerlitz 	if (family == AF_INET)
3390101f4de9SOz Shlomo 		err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3391ce99f6b9SOr Gerlitz 	else if (family == AF_INET6)
3392101f4de9SOz Shlomo 		err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3393ce99f6b9SOr Gerlitz 
3394d589e785SVlad Buslov 	/* Protect against concurrent neigh update. */
3395d589e785SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
3396d589e785SVlad Buslov 	complete_all(&e->res_ready);
3397d589e785SVlad Buslov 	if (err) {
3398d589e785SVlad Buslov 		e->compl_result = err;
3399a54e20b4SHadar Hen Zion 		goto out_err;
3400d589e785SVlad Buslov 	}
34013c140dd5SVlad Buslov 	e->compl_result = 1;
3402a54e20b4SHadar Hen Zion 
340345247bf2SOr Gerlitz attach_flow:
3404948993f2SVlad Buslov 	flow->encaps[out_index].e = e;
34058c4dc42bSEli Britstein 	list_add(&flow->encaps[out_index].list, &e->flows);
34068c4dc42bSEli Britstein 	flow->encaps[out_index].index = out_index;
340745247bf2SOr Gerlitz 	*encap_dev = e->out_dev;
34088c4dc42bSEli Britstein 	if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
34092b688ea5SMaor Gottlieb 		attr->dests[out_index].pkt_reformat = e->pkt_reformat;
34108c4dc42bSEli Britstein 		attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
34110ad060eeSRoi Dayan 		*encap_valid = true;
34128c4dc42bSEli Britstein 	} else {
34130ad060eeSRoi Dayan 		*encap_valid = false;
34148c4dc42bSEli Britstein 	}
341561086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
341645247bf2SOr Gerlitz 
3417232c0013SHadar Hen Zion 	return err;
3418a54e20b4SHadar Hen Zion 
3419a54e20b4SHadar Hen Zion out_err:
342061086f39SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
3421d589e785SVlad Buslov 	if (e)
3422d589e785SVlad Buslov 		mlx5e_encap_put(priv, e);
3423a54e20b4SHadar Hen Zion 	return err;
34242a4b6526SVlad Buslov 
34252a4b6526SVlad Buslov out_err_init:
34262a4b6526SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
34272a4b6526SVlad Buslov 	kfree(tun_info);
34282a4b6526SVlad Buslov 	kfree(e);
34292a4b6526SVlad Buslov 	return err;
3430a54e20b4SHadar Hen Zion }
3431a54e20b4SHadar Hen Zion 
34321482bd3dSJianbo Liu static int parse_tc_vlan_action(struct mlx5e_priv *priv,
343373867881SPablo Neira Ayuso 				const struct flow_action_entry *act,
34341482bd3dSJianbo Liu 				struct mlx5_esw_flow_attr *attr,
34351482bd3dSJianbo Liu 				u32 *action)
34361482bd3dSJianbo Liu {
3437cc495188SJianbo Liu 	u8 vlan_idx = attr->total_vlan;
3438cc495188SJianbo Liu 
3439cc495188SJianbo Liu 	if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
34401482bd3dSJianbo Liu 		return -EOPNOTSUPP;
3441cc495188SJianbo Liu 
344273867881SPablo Neira Ayuso 	switch (act->id) {
344373867881SPablo Neira Ayuso 	case FLOW_ACTION_VLAN_POP:
3444cc495188SJianbo Liu 		if (vlan_idx) {
3445cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3446cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
3447cc495188SJianbo Liu 				return -EOPNOTSUPP;
3448cc495188SJianbo Liu 
3449cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3450cc495188SJianbo Liu 		} else {
3451cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3452cc495188SJianbo Liu 		}
345373867881SPablo Neira Ayuso 		break;
345473867881SPablo Neira Ayuso 	case FLOW_ACTION_VLAN_PUSH:
345573867881SPablo Neira Ayuso 		attr->vlan_vid[vlan_idx] = act->vlan.vid;
345673867881SPablo Neira Ayuso 		attr->vlan_prio[vlan_idx] = act->vlan.prio;
345773867881SPablo Neira Ayuso 		attr->vlan_proto[vlan_idx] = act->vlan.proto;
3458cc495188SJianbo Liu 		if (!attr->vlan_proto[vlan_idx])
3459cc495188SJianbo Liu 			attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3460cc495188SJianbo Liu 
3461cc495188SJianbo Liu 		if (vlan_idx) {
3462cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3463cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
3464cc495188SJianbo Liu 				return -EOPNOTSUPP;
3465cc495188SJianbo Liu 
3466cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3467cc495188SJianbo Liu 		} else {
3468cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
346973867881SPablo Neira Ayuso 			    (act->vlan.proto != htons(ETH_P_8021Q) ||
347073867881SPablo Neira Ayuso 			     act->vlan.prio))
3471cc495188SJianbo Liu 				return -EOPNOTSUPP;
3472cc495188SJianbo Liu 
3473cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
34741482bd3dSJianbo Liu 		}
347573867881SPablo Neira Ayuso 		break;
347673867881SPablo Neira Ayuso 	default:
3477bdc837eeSEli Britstein 		return -EINVAL;
34781482bd3dSJianbo Liu 	}
34791482bd3dSJianbo Liu 
3480cc495188SJianbo Liu 	attr->total_vlan = vlan_idx + 1;
3481cc495188SJianbo Liu 
34821482bd3dSJianbo Liu 	return 0;
34831482bd3dSJianbo Liu }
34841482bd3dSJianbo Liu 
3485278748a9SEli Britstein static int add_vlan_push_action(struct mlx5e_priv *priv,
3486278748a9SEli Britstein 				struct mlx5_esw_flow_attr *attr,
3487278748a9SEli Britstein 				struct net_device **out_dev,
3488278748a9SEli Britstein 				u32 *action)
3489278748a9SEli Britstein {
3490278748a9SEli Britstein 	struct net_device *vlan_dev = *out_dev;
3491278748a9SEli Britstein 	struct flow_action_entry vlan_act = {
3492278748a9SEli Britstein 		.id = FLOW_ACTION_VLAN_PUSH,
3493278748a9SEli Britstein 		.vlan.vid = vlan_dev_vlan_id(vlan_dev),
3494278748a9SEli Britstein 		.vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3495278748a9SEli Britstein 		.vlan.prio = 0,
3496278748a9SEli Britstein 	};
3497278748a9SEli Britstein 	int err;
3498278748a9SEli Britstein 
3499278748a9SEli Britstein 	err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3500278748a9SEli Britstein 	if (err)
3501278748a9SEli Britstein 		return err;
3502278748a9SEli Britstein 
3503278748a9SEli Britstein 	*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3504278748a9SEli Britstein 					dev_get_iflink(vlan_dev));
3505278748a9SEli Britstein 	if (is_vlan_dev(*out_dev))
3506278748a9SEli Britstein 		err = add_vlan_push_action(priv, attr, out_dev, action);
3507278748a9SEli Britstein 
3508278748a9SEli Britstein 	return err;
3509278748a9SEli Britstein }
3510278748a9SEli Britstein 
351135a605dbSEli Britstein static int add_vlan_pop_action(struct mlx5e_priv *priv,
351235a605dbSEli Britstein 			       struct mlx5_esw_flow_attr *attr,
351335a605dbSEli Britstein 			       u32 *action)
351435a605dbSEli Britstein {
3515f3b0a18bSTaehee Yoo 	int nest_level = attr->parse_attr->filter_dev->lower_level;
351635a605dbSEli Britstein 	struct flow_action_entry vlan_act = {
351735a605dbSEli Britstein 		.id = FLOW_ACTION_VLAN_POP,
351835a605dbSEli Britstein 	};
351935a605dbSEli Britstein 	int err = 0;
352035a605dbSEli Britstein 
352135a605dbSEli Britstein 	while (nest_level--) {
352235a605dbSEli Britstein 		err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
352335a605dbSEli Britstein 		if (err)
352435a605dbSEli Britstein 			return err;
352535a605dbSEli Britstein 	}
352635a605dbSEli Britstein 
352735a605dbSEli Britstein 	return err;
352835a605dbSEli Britstein }
352935a605dbSEli Britstein 
3530f6dc1264SPaul Blakey bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3531f6dc1264SPaul Blakey 				    struct net_device *out_dev)
3532f6dc1264SPaul Blakey {
3533f6dc1264SPaul Blakey 	if (is_merged_eswitch_dev(priv, out_dev))
3534f6dc1264SPaul Blakey 		return true;
3535f6dc1264SPaul Blakey 
3536f6dc1264SPaul Blakey 	return mlx5e_eswitch_rep(out_dev) &&
3537f6dc1264SPaul Blakey 	       same_hw_devs(priv, netdev_priv(out_dev));
3538f6dc1264SPaul Blakey }
3539f6dc1264SPaul Blakey 
3540554fe75cSDmytro Linkin static bool is_duplicated_output_device(struct net_device *dev,
3541554fe75cSDmytro Linkin 					struct net_device *out_dev,
3542554fe75cSDmytro Linkin 					int *ifindexes, int if_count,
3543554fe75cSDmytro Linkin 					struct netlink_ext_ack *extack)
3544554fe75cSDmytro Linkin {
3545554fe75cSDmytro Linkin 	int i;
3546554fe75cSDmytro Linkin 
3547554fe75cSDmytro Linkin 	for (i = 0; i < if_count; i++) {
3548554fe75cSDmytro Linkin 		if (ifindexes[i] == out_dev->ifindex) {
3549554fe75cSDmytro Linkin 			NL_SET_ERR_MSG_MOD(extack,
3550554fe75cSDmytro Linkin 					   "can't duplicate output to same device");
3551554fe75cSDmytro Linkin 			netdev_err(dev, "can't duplicate output to same device: %s\n",
3552554fe75cSDmytro Linkin 				   out_dev->name);
3553554fe75cSDmytro Linkin 			return true;
3554554fe75cSDmytro Linkin 		}
3555554fe75cSDmytro Linkin 	}
3556554fe75cSDmytro Linkin 
3557554fe75cSDmytro Linkin 	return false;
3558554fe75cSDmytro Linkin }
3559554fe75cSDmytro Linkin 
356073867881SPablo Neira Ayuso static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
356173867881SPablo Neira Ayuso 				struct flow_action *flow_action,
3562e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
3563e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
3564a54e20b4SHadar Hen Zion {
356573867881SPablo Neira Ayuso 	struct pedit_headers_action hdrs[2] = {};
3566bf07aa73SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3567ecf5bb79SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
35686f9af8ffSTonghao Zhang 	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
35691d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
357073867881SPablo Neira Ayuso 	const struct ip_tunnel_info *info = NULL;
3571554fe75cSDmytro Linkin 	int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
357284179981SPaul Blakey 	bool ft_flow = mlx5e_is_ft_flow(flow);
357373867881SPablo Neira Ayuso 	const struct flow_action_entry *act;
35740a7fcb78SPaul Blakey 	bool encap = false, decap = false;
35750a7fcb78SPaul Blakey 	u32 action = attr->action;
3576554fe75cSDmytro Linkin 	int err, i, if_count = 0;
357703a9d11eSOr Gerlitz 
357873867881SPablo Neira Ayuso 	if (!flow_action_has_entries(flow_action))
357903a9d11eSOr Gerlitz 		return -EINVAL;
358003a9d11eSOr Gerlitz 
358173867881SPablo Neira Ayuso 	flow_action_for_each(i, act, flow_action) {
358273867881SPablo Neira Ayuso 		switch (act->id) {
358373867881SPablo Neira Ayuso 		case FLOW_ACTION_DROP:
35841cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
358503a9d11eSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
358673867881SPablo Neira Ayuso 			break;
358773867881SPablo Neira Ayuso 		case FLOW_ACTION_MANGLE:
358873867881SPablo Neira Ayuso 		case FLOW_ACTION_ADD:
358973867881SPablo Neira Ayuso 			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3590c500c86bSPablo Neira Ayuso 						    parse_attr, hdrs, extack);
3591d7e75a32SOr Gerlitz 			if (err)
3592d7e75a32SOr Gerlitz 				return err;
3593d7e75a32SOr Gerlitz 
35941cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3595e85e02baSEli Britstein 			attr->split_count = attr->out_count;
359673867881SPablo Neira Ayuso 			break;
359773867881SPablo Neira Ayuso 		case FLOW_ACTION_CSUM:
35981cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
359973867881SPablo Neira Ayuso 						   act->csum_flags, extack))
360073867881SPablo Neira Ayuso 				break;
360126c02749SOr Gerlitz 
360226c02749SOr Gerlitz 			return -EOPNOTSUPP;
360373867881SPablo Neira Ayuso 		case FLOW_ACTION_REDIRECT:
360473867881SPablo Neira Ayuso 		case FLOW_ACTION_MIRRED: {
360503a9d11eSOr Gerlitz 			struct mlx5e_priv *out_priv;
3606592d3651SChris Mi 			struct net_device *out_dev;
360703a9d11eSOr Gerlitz 
360873867881SPablo Neira Ayuso 			out_dev = act->dev;
3609ef381359SOz Shlomo 			if (!out_dev) {
3610ef381359SOz Shlomo 				/* out_dev is NULL when filters with
3611ef381359SOz Shlomo 				 * non-existing mirred device are replayed to
3612ef381359SOz Shlomo 				 * the driver.
3613ef381359SOz Shlomo 				 */
3614ef381359SOz Shlomo 				return -EINVAL;
3615ef381359SOz Shlomo 			}
361603a9d11eSOr Gerlitz 
361784179981SPaul Blakey 			if (ft_flow && out_dev == priv->netdev) {
361884179981SPaul Blakey 				/* Ignore forward to self rules generated
361984179981SPaul Blakey 				 * by adding both mlx5 devs to the flow table
362084179981SPaul Blakey 				 * block on a normal nft offload setup.
362184179981SPaul Blakey 				 */
362284179981SPaul Blakey 				return -EOPNOTSUPP;
362384179981SPaul Blakey 			}
362484179981SPaul Blakey 
3625592d3651SChris Mi 			if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
3626e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3627e98bedf5SEli Britstein 						   "can't support more output ports, can't offload forwarding");
3628592d3651SChris Mi 				pr_err("can't support more than %d output ports, can't offload forwarding\n",
3629592d3651SChris Mi 				       attr->out_count);
3630592d3651SChris Mi 				return -EOPNOTSUPP;
3631592d3651SChris Mi 			}
3632592d3651SChris Mi 
3633f493f155SEli Britstein 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3634f493f155SEli Britstein 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
3635b6a4ac24SVlad Buslov 			if (encap) {
3636b6a4ac24SVlad Buslov 				parse_attr->mirred_ifindex[attr->out_count] =
3637b6a4ac24SVlad Buslov 					out_dev->ifindex;
3638b6a4ac24SVlad Buslov 				parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
3639b6a4ac24SVlad Buslov 				if (!parse_attr->tun_info[attr->out_count])
3640b6a4ac24SVlad Buslov 					return -ENOMEM;
3641b6a4ac24SVlad Buslov 				encap = false;
3642b6a4ac24SVlad Buslov 				attr->dests[attr->out_count].flags |=
3643b6a4ac24SVlad Buslov 					MLX5_ESW_DEST_ENCAP;
3644b6a4ac24SVlad Buslov 				attr->out_count++;
3645b6a4ac24SVlad Buslov 				/* attr->dests[].rep is resolved when we
3646b6a4ac24SVlad Buslov 				 * handle encap
3647b6a4ac24SVlad Buslov 				 */
3648b6a4ac24SVlad Buslov 			} else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
36497ba58ba7SRabie Loulou 				struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
36507ba58ba7SRabie Loulou 				struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
3651fa833bd5SVlad Buslov 				struct net_device *uplink_upper;
36527ba58ba7SRabie Loulou 
3653554fe75cSDmytro Linkin 				if (is_duplicated_output_device(priv->netdev,
3654554fe75cSDmytro Linkin 								out_dev,
3655554fe75cSDmytro Linkin 								ifindexes,
3656554fe75cSDmytro Linkin 								if_count,
3657554fe75cSDmytro Linkin 								extack))
3658554fe75cSDmytro Linkin 					return -EOPNOTSUPP;
3659554fe75cSDmytro Linkin 
3660554fe75cSDmytro Linkin 				ifindexes[if_count] = out_dev->ifindex;
3661554fe75cSDmytro Linkin 				if_count++;
3662554fe75cSDmytro Linkin 
3663fa833bd5SVlad Buslov 				rcu_read_lock();
3664fa833bd5SVlad Buslov 				uplink_upper =
3665fa833bd5SVlad Buslov 					netdev_master_upper_dev_get_rcu(uplink_dev);
36667ba58ba7SRabie Loulou 				if (uplink_upper &&
36677ba58ba7SRabie Loulou 				    netif_is_lag_master(uplink_upper) &&
36687ba58ba7SRabie Loulou 				    uplink_upper == out_dev)
36697ba58ba7SRabie Loulou 					out_dev = uplink_dev;
3670fa833bd5SVlad Buslov 				rcu_read_unlock();
36717ba58ba7SRabie Loulou 
3672278748a9SEli Britstein 				if (is_vlan_dev(out_dev)) {
3673278748a9SEli Britstein 					err = add_vlan_push_action(priv, attr,
3674278748a9SEli Britstein 								   &out_dev,
3675278748a9SEli Britstein 								   &action);
3676278748a9SEli Britstein 					if (err)
3677278748a9SEli Britstein 						return err;
3678278748a9SEli Britstein 				}
3679f6dc1264SPaul Blakey 
368035a605dbSEli Britstein 				if (is_vlan_dev(parse_attr->filter_dev)) {
368135a605dbSEli Britstein 					err = add_vlan_pop_action(priv, attr,
368235a605dbSEli Britstein 								  &action);
368335a605dbSEli Britstein 					if (err)
368435a605dbSEli Britstein 						return err;
368535a605dbSEli Britstein 				}
3686278748a9SEli Britstein 
3687f6dc1264SPaul Blakey 				if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
3688f6dc1264SPaul Blakey 					NL_SET_ERR_MSG_MOD(extack,
3689f6dc1264SPaul Blakey 							   "devices are not on same switch HW, can't offload forwarding");
3690f6dc1264SPaul Blakey 					pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
3691f6dc1264SPaul Blakey 					       priv->netdev->name, out_dev->name);
3692a0646c88SEli Britstein 					return -EOPNOTSUPP;
3693f6dc1264SPaul Blakey 				}
3694a0646c88SEli Britstein 
369503a9d11eSOr Gerlitz 				out_priv = netdev_priv(out_dev);
36961d447a39SSaeed Mahameed 				rpriv = out_priv->ppriv;
3697df65a573SEli Britstein 				attr->dests[attr->out_count].rep = rpriv->rep;
3698df65a573SEli Britstein 				attr->dests[attr->out_count].mdev = out_priv->mdev;
3699df65a573SEli Britstein 				attr->out_count++;
3700ef381359SOz Shlomo 			} else if (parse_attr->filter_dev != priv->netdev) {
3701ef381359SOz Shlomo 				/* All mlx5 devices are called to configure
3702ef381359SOz Shlomo 				 * high level device filters. Therefore, the
3703ef381359SOz Shlomo 				 * *attempt* to  install a filter on invalid
3704ef381359SOz Shlomo 				 * eswitch should not trigger an explicit error
3705ef381359SOz Shlomo 				 */
3706ef381359SOz Shlomo 				return -EINVAL;
3707a54e20b4SHadar Hen Zion 			} else {
3708e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
3709e98bedf5SEli Britstein 						   "devices are not on same switch HW, can't offload forwarding");
3710a54e20b4SHadar Hen Zion 				pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
3711a54e20b4SHadar Hen Zion 				       priv->netdev->name, out_dev->name);
3712a54e20b4SHadar Hen Zion 				return -EINVAL;
3713a54e20b4SHadar Hen Zion 			}
3714a54e20b4SHadar Hen Zion 			}
371573867881SPablo Neira Ayuso 			break;
371673867881SPablo Neira Ayuso 		case FLOW_ACTION_TUNNEL_ENCAP:
371773867881SPablo Neira Ayuso 			info = act->tunnel;
3718a54e20b4SHadar Hen Zion 			if (info)
3719a54e20b4SHadar Hen Zion 				encap = true;
3720a54e20b4SHadar Hen Zion 			else
3721a54e20b4SHadar Hen Zion 				return -EOPNOTSUPP;
372203a9d11eSOr Gerlitz 
372373867881SPablo Neira Ayuso 			break;
372473867881SPablo Neira Ayuso 		case FLOW_ACTION_VLAN_PUSH:
372573867881SPablo Neira Ayuso 		case FLOW_ACTION_VLAN_POP:
372676b496b1SEli Britstein 			if (act->id == FLOW_ACTION_VLAN_PUSH &&
372776b496b1SEli Britstein 			    (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
372876b496b1SEli Britstein 				/* Replace vlan pop+push with vlan modify */
372976b496b1SEli Britstein 				action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
373076b496b1SEli Britstein 				err = add_vlan_rewrite_action(priv,
373176b496b1SEli Britstein 							      MLX5_FLOW_NAMESPACE_FDB,
373276b496b1SEli Britstein 							      act, parse_attr, hdrs,
373376b496b1SEli Britstein 							      &action, extack);
373476b496b1SEli Britstein 			} else {
373573867881SPablo Neira Ayuso 				err = parse_tc_vlan_action(priv, act, attr, &action);
373676b496b1SEli Britstein 			}
37371482bd3dSJianbo Liu 			if (err)
37381482bd3dSJianbo Liu 				return err;
37391482bd3dSJianbo Liu 
3740e85e02baSEli Britstein 			attr->split_count = attr->out_count;
374173867881SPablo Neira Ayuso 			break;
3742bdc837eeSEli Britstein 		case FLOW_ACTION_VLAN_MANGLE:
3743bdc837eeSEli Britstein 			err = add_vlan_rewrite_action(priv,
3744bdc837eeSEli Britstein 						      MLX5_FLOW_NAMESPACE_FDB,
3745bdc837eeSEli Britstein 						      act, parse_attr, hdrs,
3746bdc837eeSEli Britstein 						      &action, extack);
3747bdc837eeSEli Britstein 			if (err)
3748bdc837eeSEli Britstein 				return err;
3749bdc837eeSEli Britstein 
3750bdc837eeSEli Britstein 			attr->split_count = attr->out_count;
3751bdc837eeSEli Britstein 			break;
375273867881SPablo Neira Ayuso 		case FLOW_ACTION_TUNNEL_DECAP:
37530a7fcb78SPaul Blakey 			decap = true;
375473867881SPablo Neira Ayuso 			break;
375573867881SPablo Neira Ayuso 		case FLOW_ACTION_GOTO: {
375673867881SPablo Neira Ayuso 			u32 dest_chain = act->chain_index;
375739ac237cSPaul Blakey 			u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
3758bf07aa73SPaul Blakey 
375984179981SPaul Blakey 			if (ft_flow) {
376084179981SPaul Blakey 				NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
376184179981SPaul Blakey 				return -EOPNOTSUPP;
376284179981SPaul Blakey 			}
3763bf07aa73SPaul Blakey 			if (dest_chain <= attr->chain) {
3764bf07aa73SPaul Blakey 				NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
3765bf07aa73SPaul Blakey 				return -EOPNOTSUPP;
3766bf07aa73SPaul Blakey 			}
3767bf07aa73SPaul Blakey 			if (dest_chain > max_chain) {
3768bf07aa73SPaul Blakey 				NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
3769bf07aa73SPaul Blakey 				return -EOPNOTSUPP;
3770bf07aa73SPaul Blakey 			}
3771e88afe75SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3772bf07aa73SPaul Blakey 			attr->dest_chain = dest_chain;
377373867881SPablo Neira Ayuso 			break;
3774bf07aa73SPaul Blakey 			}
377573867881SPablo Neira Ayuso 		default:
37762cc1cb1dSTonghao Zhang 			NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
37772cc1cb1dSTonghao Zhang 			return -EOPNOTSUPP;
377803a9d11eSOr Gerlitz 		}
377973867881SPablo Neira Ayuso 	}
3780bdd66ac0SOr Gerlitz 
37810bac1194SEli Britstein 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
37820bac1194SEli Britstein 	    action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
37830bac1194SEli Britstein 		/* For prio tag mode, replace vlan pop with rewrite vlan prio
37840bac1194SEli Britstein 		 * tag rewrite.
37850bac1194SEli Britstein 		 */
37860bac1194SEli Britstein 		action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
37870bac1194SEli Britstein 		err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
37880bac1194SEli Britstein 						       &action, extack);
37890bac1194SEli Britstein 		if (err)
37900bac1194SEli Britstein 			return err;
37910bac1194SEli Britstein 	}
37920bac1194SEli Britstein 
3793c500c86bSPablo Neira Ayuso 	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3794c500c86bSPablo Neira Ayuso 	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
379584be899fSTonghao Zhang 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
379627c11b6bSEli Britstein 					    parse_attr, hdrs, &action, extack);
3797c500c86bSPablo Neira Ayuso 		if (err)
3798c500c86bSPablo Neira Ayuso 			return err;
379927c11b6bSEli Britstein 		/* in case all pedit actions are skipped, remove the MOD_HDR
380027c11b6bSEli Britstein 		 * flag. we might have set split_count either by pedit or
380127c11b6bSEli Britstein 		 * pop/push. if there is no pop/push either, reset it too.
380227c11b6bSEli Britstein 		 */
38036ae4a6a5SPaul Blakey 		if (parse_attr->mod_hdr_acts.num_actions == 0) {
380427c11b6bSEli Britstein 			action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
38056ae4a6a5SPaul Blakey 			dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
380627c11b6bSEli Britstein 			if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
380727c11b6bSEli Britstein 			      (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
380827c11b6bSEli Britstein 				attr->split_count = 0;
380927c11b6bSEli Britstein 		}
3810c500c86bSPablo Neira Ayuso 	}
3811c500c86bSPablo Neira Ayuso 
38121cab1cd7SOr Gerlitz 	attr->action = action;
381373867881SPablo Neira Ayuso 	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3814bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
3815bdd66ac0SOr Gerlitz 
3816e88afe75SOr Gerlitz 	if (attr->dest_chain) {
38170a7fcb78SPaul Blakey 		if (decap) {
38180a7fcb78SPaul Blakey 			/* It can be supported if we'll create a mapping for
38190a7fcb78SPaul Blakey 			 * the tunnel device only (without tunnel), and set
38200a7fcb78SPaul Blakey 			 * this tunnel id with this decap flow.
38210a7fcb78SPaul Blakey 			 *
38220a7fcb78SPaul Blakey 			 * On restore (miss), we'll just set this saved tunnel
38230a7fcb78SPaul Blakey 			 * device.
38240a7fcb78SPaul Blakey 			 */
38250a7fcb78SPaul Blakey 
38260a7fcb78SPaul Blakey 			NL_SET_ERR_MSG(extack,
38270a7fcb78SPaul Blakey 				       "Decap with goto isn't supported");
38280a7fcb78SPaul Blakey 			netdev_warn(priv->netdev,
38290a7fcb78SPaul Blakey 				    "Decap with goto isn't supported");
38300a7fcb78SPaul Blakey 			return -EOPNOTSUPP;
38310a7fcb78SPaul Blakey 		}
38320a7fcb78SPaul Blakey 
3833e88afe75SOr Gerlitz 		if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
3834e88afe75SOr Gerlitz 			NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
3835e88afe75SOr Gerlitz 			return -EOPNOTSUPP;
3836e88afe75SOr Gerlitz 		}
3837e88afe75SOr Gerlitz 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3838e88afe75SOr Gerlitz 	}
3839e88afe75SOr Gerlitz 
3840ae2741e2SVlad Buslov 	if (!(attr->action &
3841ae2741e2SVlad Buslov 	      (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3842ae2741e2SVlad Buslov 		NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action");
3843ae2741e2SVlad Buslov 		return -EOPNOTSUPP;
3844ae2741e2SVlad Buslov 	}
3845ae2741e2SVlad Buslov 
3846e85e02baSEli Britstein 	if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3847e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
3848e98bedf5SEli Britstein 				   "current firmware doesn't support split rule for port mirroring");
3849592d3651SChris Mi 		netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
3850592d3651SChris Mi 		return -EOPNOTSUPP;
3851592d3651SChris Mi 	}
3852592d3651SChris Mi 
385331c8eba5SOr Gerlitz 	return 0;
385403a9d11eSOr Gerlitz }
385503a9d11eSOr Gerlitz 
3856226f2ca3SVlad Buslov static void get_flags(int flags, unsigned long *flow_flags)
385760bd4af8SOr Gerlitz {
3858226f2ca3SVlad Buslov 	unsigned long __flow_flags = 0;
385960bd4af8SOr Gerlitz 
3860226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(INGRESS))
3861226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
3862226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(EGRESS))
3863226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
386460bd4af8SOr Gerlitz 
3865226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
3866226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3867226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
3868226f2ca3SVlad Buslov 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
386984179981SPaul Blakey 	if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
387084179981SPaul Blakey 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
3871d9ee0491SOr Gerlitz 
387260bd4af8SOr Gerlitz 	*flow_flags = __flow_flags;
387360bd4af8SOr Gerlitz }
387460bd4af8SOr Gerlitz 
387505866c82SOr Gerlitz static const struct rhashtable_params tc_ht_params = {
387605866c82SOr Gerlitz 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
387705866c82SOr Gerlitz 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
387805866c82SOr Gerlitz 	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
387905866c82SOr Gerlitz 	.automatic_shrinking = true,
388005866c82SOr Gerlitz };
388105866c82SOr Gerlitz 
3882226f2ca3SVlad Buslov static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
3883226f2ca3SVlad Buslov 				    unsigned long flags)
388405866c82SOr Gerlitz {
3885655dc3d2SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3886655dc3d2SOr Gerlitz 	struct mlx5e_rep_priv *uplink_rpriv;
3887655dc3d2SOr Gerlitz 
3888226f2ca3SVlad Buslov 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
3889655dc3d2SOr Gerlitz 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
3890ec1366c2SOz Shlomo 		return &uplink_rpriv->uplink_priv.tc_ht;
3891d9ee0491SOr Gerlitz 	} else /* NIC offload */
389205866c82SOr Gerlitz 		return &priv->fs.tc.ht;
389305866c82SOr Gerlitz }
389405866c82SOr Gerlitz 
389504de7ddaSRoi Dayan static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
389604de7ddaSRoi Dayan {
38971418ddd9SAviv Heller 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3898b05af6aaSBodong Wang 	bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
3899226f2ca3SVlad Buslov 		flow_flag_test(flow, INGRESS);
39001418ddd9SAviv Heller 	bool act_is_encap = !!(attr->action &
39011418ddd9SAviv Heller 			       MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
39021418ddd9SAviv Heller 	bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
39031418ddd9SAviv Heller 						MLX5_DEVCOM_ESW_OFFLOADS);
39041418ddd9SAviv Heller 
390510fbb1cdSRoi Dayan 	if (!esw_paired)
390610fbb1cdSRoi Dayan 		return false;
390710fbb1cdSRoi Dayan 
390810fbb1cdSRoi Dayan 	if ((mlx5_lag_is_sriov(attr->in_mdev) ||
390910fbb1cdSRoi Dayan 	     mlx5_lag_is_multipath(attr->in_mdev)) &&
391010fbb1cdSRoi Dayan 	    (is_rep_ingress || act_is_encap))
391110fbb1cdSRoi Dayan 		return true;
391210fbb1cdSRoi Dayan 
391310fbb1cdSRoi Dayan 	return false;
391404de7ddaSRoi Dayan }
391504de7ddaSRoi Dayan 
3916a88780a9SRoi Dayan static int
3917a88780a9SRoi Dayan mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
3918226f2ca3SVlad Buslov 		 struct flow_cls_offload *f, unsigned long flow_flags,
3919a88780a9SRoi Dayan 		 struct mlx5e_tc_flow_parse_attr **__parse_attr,
3920a88780a9SRoi Dayan 		 struct mlx5e_tc_flow **__flow)
3921e3a2b7edSAmir Vadai {
392217091853SOr Gerlitz 	struct mlx5e_tc_flow_parse_attr *parse_attr;
39233bc4b7bfSOr Gerlitz 	struct mlx5e_tc_flow *flow;
39245a7e5bcbSVlad Buslov 	int out_index, err;
3925776b12b6SOr Gerlitz 
392665ba8fb7SOr Gerlitz 	flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
39271b9a07eeSLeon Romanovsky 	parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
392817091853SOr Gerlitz 	if (!parse_attr || !flow) {
3929e3a2b7edSAmir Vadai 		err = -ENOMEM;
3930e3a2b7edSAmir Vadai 		goto err_free;
3931e3a2b7edSAmir Vadai 	}
3932e3a2b7edSAmir Vadai 
3933e3a2b7edSAmir Vadai 	flow->cookie = f->cookie;
393465ba8fb7SOr Gerlitz 	flow->flags = flow_flags;
3935655dc3d2SOr Gerlitz 	flow->priv = priv;
39365a7e5bcbSVlad Buslov 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
39375a7e5bcbSVlad Buslov 		INIT_LIST_HEAD(&flow->encaps[out_index].list);
39385a7e5bcbSVlad Buslov 	INIT_LIST_HEAD(&flow->mod_hdr);
39395a7e5bcbSVlad Buslov 	INIT_LIST_HEAD(&flow->hairpin);
39405a7e5bcbSVlad Buslov 	refcount_set(&flow->refcnt, 1);
394195435ad7SVlad Buslov 	init_completion(&flow->init_done);
3942e3a2b7edSAmir Vadai 
3943a88780a9SRoi Dayan 	*__flow = flow;
3944a88780a9SRoi Dayan 	*__parse_attr = parse_attr;
3945a88780a9SRoi Dayan 
3946a88780a9SRoi Dayan 	return 0;
3947a88780a9SRoi Dayan 
3948a88780a9SRoi Dayan err_free:
3949a88780a9SRoi Dayan 	kfree(flow);
3950a88780a9SRoi Dayan 	kvfree(parse_attr);
3951a88780a9SRoi Dayan 	return err;
3952adb4c123SOr Gerlitz }
3953adb4c123SOr Gerlitz 
3954988ab9c7STonghao Zhang static void
3955988ab9c7STonghao Zhang mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
3956988ab9c7STonghao Zhang 			 struct mlx5e_priv *priv,
3957988ab9c7STonghao Zhang 			 struct mlx5e_tc_flow_parse_attr *parse_attr,
3958f9e30088SPablo Neira Ayuso 			 struct flow_cls_offload *f,
3959988ab9c7STonghao Zhang 			 struct mlx5_eswitch_rep *in_rep,
3960988ab9c7STonghao Zhang 			 struct mlx5_core_dev *in_mdev)
3961988ab9c7STonghao Zhang {
3962988ab9c7STonghao Zhang 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3963988ab9c7STonghao Zhang 
3964988ab9c7STonghao Zhang 	esw_attr->parse_attr = parse_attr;
3965988ab9c7STonghao Zhang 	esw_attr->chain = f->common.chain_index;
3966ef01adaeSPablo Neira Ayuso 	esw_attr->prio = f->common.prio;
3967988ab9c7STonghao Zhang 
3968988ab9c7STonghao Zhang 	esw_attr->in_rep = in_rep;
3969988ab9c7STonghao Zhang 	esw_attr->in_mdev = in_mdev;
3970988ab9c7STonghao Zhang 
3971988ab9c7STonghao Zhang 	if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
3972988ab9c7STonghao Zhang 	    MLX5_COUNTER_SOURCE_ESWITCH)
3973988ab9c7STonghao Zhang 		esw_attr->counter_dev = in_mdev;
3974988ab9c7STonghao Zhang 	else
3975988ab9c7STonghao Zhang 		esw_attr->counter_dev = priv->mdev;
3976988ab9c7STonghao Zhang }
3977988ab9c7STonghao Zhang 
397871129676SJason Gunthorpe static struct mlx5e_tc_flow *
397904de7ddaSRoi Dayan __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
3980f9e30088SPablo Neira Ayuso 		     struct flow_cls_offload *f,
3981226f2ca3SVlad Buslov 		     unsigned long flow_flags,
3982d11afc26SOz Shlomo 		     struct net_device *filter_dev,
398304de7ddaSRoi Dayan 		     struct mlx5_eswitch_rep *in_rep,
398471129676SJason Gunthorpe 		     struct mlx5_core_dev *in_mdev)
3985a88780a9SRoi Dayan {
3986f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3987a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
3988a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
3989a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
3990a88780a9SRoi Dayan 	int attr_size, err;
3991a88780a9SRoi Dayan 
3992226f2ca3SVlad Buslov 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3993a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_esw_flow_attr);
3994a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3995a88780a9SRoi Dayan 			       &parse_attr, &flow);
3996a88780a9SRoi Dayan 	if (err)
3997a88780a9SRoi Dayan 		goto out;
3998988ab9c7STonghao Zhang 
3999d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
4000988ab9c7STonghao Zhang 	mlx5e_flow_esw_attr_init(flow->esw_attr,
4001988ab9c7STonghao Zhang 				 priv, parse_attr,
4002988ab9c7STonghao Zhang 				 f, in_rep, in_mdev);
4003988ab9c7STonghao Zhang 
400454c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
400554c177caSOz Shlomo 			       f, filter_dev);
4006d11afc26SOz Shlomo 	if (err)
4007d11afc26SOz Shlomo 		goto err_free;
4008a88780a9SRoi Dayan 
40096f9af8ffSTonghao Zhang 	err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4010a88780a9SRoi Dayan 	if (err)
4011a88780a9SRoi Dayan 		goto err_free;
4012a88780a9SRoi Dayan 
40137040632dSTonghao Zhang 	err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
401495435ad7SVlad Buslov 	complete_all(&flow->init_done);
4015ef06c9eeSRoi Dayan 	if (err) {
4016ef06c9eeSRoi Dayan 		if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4017aa0cbbaeSOr Gerlitz 			goto err_free;
40185c40348cSOr Gerlitz 
4019b4a23329SRoi Dayan 		add_unready_flow(flow);
4020ef06c9eeSRoi Dayan 	}
4021ef06c9eeSRoi Dayan 
402271129676SJason Gunthorpe 	return flow;
4023e3a2b7edSAmir Vadai 
4024e3a2b7edSAmir Vadai err_free:
40255a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4026a88780a9SRoi Dayan out:
402771129676SJason Gunthorpe 	return ERR_PTR(err);
4028a88780a9SRoi Dayan }
4029a88780a9SRoi Dayan 
4030f9e30088SPablo Neira Ayuso static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
403195dc1902SRoi Dayan 				      struct mlx5e_tc_flow *flow,
4032226f2ca3SVlad Buslov 				      unsigned long flow_flags)
403304de7ddaSRoi Dayan {
403404de7ddaSRoi Dayan 	struct mlx5e_priv *priv = flow->priv, *peer_priv;
403504de7ddaSRoi Dayan 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
403604de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
403704de7ddaSRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
403804de7ddaSRoi Dayan 	struct mlx5e_rep_priv *peer_urpriv;
403904de7ddaSRoi Dayan 	struct mlx5e_tc_flow *peer_flow;
404004de7ddaSRoi Dayan 	struct mlx5_core_dev *in_mdev;
404104de7ddaSRoi Dayan 	int err = 0;
404204de7ddaSRoi Dayan 
404304de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
404404de7ddaSRoi Dayan 	if (!peer_esw)
404504de7ddaSRoi Dayan 		return -ENODEV;
404604de7ddaSRoi Dayan 
404704de7ddaSRoi Dayan 	peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
404804de7ddaSRoi Dayan 	peer_priv = netdev_priv(peer_urpriv->netdev);
404904de7ddaSRoi Dayan 
405004de7ddaSRoi Dayan 	/* in_mdev is assigned of which the packet originated from.
405104de7ddaSRoi Dayan 	 * So packets redirected to uplink use the same mdev of the
405204de7ddaSRoi Dayan 	 * original flow and packets redirected from uplink use the
405304de7ddaSRoi Dayan 	 * peer mdev.
405404de7ddaSRoi Dayan 	 */
4055b05af6aaSBodong Wang 	if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
405604de7ddaSRoi Dayan 		in_mdev = peer_priv->mdev;
405704de7ddaSRoi Dayan 	else
405804de7ddaSRoi Dayan 		in_mdev = priv->mdev;
405904de7ddaSRoi Dayan 
406004de7ddaSRoi Dayan 	parse_attr = flow->esw_attr->parse_attr;
406195dc1902SRoi Dayan 	peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
406204de7ddaSRoi Dayan 					 parse_attr->filter_dev,
406371129676SJason Gunthorpe 					 flow->esw_attr->in_rep, in_mdev);
406471129676SJason Gunthorpe 	if (IS_ERR(peer_flow)) {
406571129676SJason Gunthorpe 		err = PTR_ERR(peer_flow);
406604de7ddaSRoi Dayan 		goto out;
406771129676SJason Gunthorpe 	}
406804de7ddaSRoi Dayan 
406904de7ddaSRoi Dayan 	flow->peer_flow = peer_flow;
4070226f2ca3SVlad Buslov 	flow_flag_set(flow, DUP);
407104de7ddaSRoi Dayan 	mutex_lock(&esw->offloads.peer_mutex);
407204de7ddaSRoi Dayan 	list_add_tail(&flow->peer, &esw->offloads.peer_flows);
407304de7ddaSRoi Dayan 	mutex_unlock(&esw->offloads.peer_mutex);
407404de7ddaSRoi Dayan 
407504de7ddaSRoi Dayan out:
407604de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
407704de7ddaSRoi Dayan 	return err;
407804de7ddaSRoi Dayan }
407904de7ddaSRoi Dayan 
408004de7ddaSRoi Dayan static int
408104de7ddaSRoi Dayan mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4082f9e30088SPablo Neira Ayuso 		   struct flow_cls_offload *f,
4083226f2ca3SVlad Buslov 		   unsigned long flow_flags,
408404de7ddaSRoi Dayan 		   struct net_device *filter_dev,
408504de7ddaSRoi Dayan 		   struct mlx5e_tc_flow **__flow)
408604de7ddaSRoi Dayan {
408704de7ddaSRoi Dayan 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
408804de7ddaSRoi Dayan 	struct mlx5_eswitch_rep *in_rep = rpriv->rep;
408904de7ddaSRoi Dayan 	struct mlx5_core_dev *in_mdev = priv->mdev;
409004de7ddaSRoi Dayan 	struct mlx5e_tc_flow *flow;
409104de7ddaSRoi Dayan 	int err;
409204de7ddaSRoi Dayan 
409371129676SJason Gunthorpe 	flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
409471129676SJason Gunthorpe 				    in_mdev);
409571129676SJason Gunthorpe 	if (IS_ERR(flow))
409671129676SJason Gunthorpe 		return PTR_ERR(flow);
409704de7ddaSRoi Dayan 
409804de7ddaSRoi Dayan 	if (is_peer_flow_needed(flow)) {
409995dc1902SRoi Dayan 		err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
410004de7ddaSRoi Dayan 		if (err) {
410104de7ddaSRoi Dayan 			mlx5e_tc_del_fdb_flow(priv, flow);
410204de7ddaSRoi Dayan 			goto out;
410304de7ddaSRoi Dayan 		}
410404de7ddaSRoi Dayan 	}
410504de7ddaSRoi Dayan 
410604de7ddaSRoi Dayan 	*__flow = flow;
410704de7ddaSRoi Dayan 
410804de7ddaSRoi Dayan 	return 0;
410904de7ddaSRoi Dayan 
411004de7ddaSRoi Dayan out:
411104de7ddaSRoi Dayan 	return err;
411204de7ddaSRoi Dayan }
411304de7ddaSRoi Dayan 
4114a88780a9SRoi Dayan static int
4115a88780a9SRoi Dayan mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4116f9e30088SPablo Neira Ayuso 		   struct flow_cls_offload *f,
4117226f2ca3SVlad Buslov 		   unsigned long flow_flags,
4118d11afc26SOz Shlomo 		   struct net_device *filter_dev,
4119a88780a9SRoi Dayan 		   struct mlx5e_tc_flow **__flow)
4120a88780a9SRoi Dayan {
4121f9e30088SPablo Neira Ayuso 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4122a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
4123a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4124a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
4125a88780a9SRoi Dayan 	int attr_size, err;
4126a88780a9SRoi Dayan 
4127bf07aa73SPaul Blakey 	/* multi-chain not supported for NIC rules */
4128bf07aa73SPaul Blakey 	if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4129bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
4130bf07aa73SPaul Blakey 
4131226f2ca3SVlad Buslov 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4132a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_nic_flow_attr);
4133a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4134a88780a9SRoi Dayan 			       &parse_attr, &flow);
4135a88780a9SRoi Dayan 	if (err)
4136a88780a9SRoi Dayan 		goto out;
4137a88780a9SRoi Dayan 
4138d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
413954c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
414054c177caSOz Shlomo 			       f, filter_dev);
4141d11afc26SOz Shlomo 	if (err)
4142d11afc26SOz Shlomo 		goto err_free;
4143d11afc26SOz Shlomo 
414473867881SPablo Neira Ayuso 	err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4145a88780a9SRoi Dayan 	if (err)
4146a88780a9SRoi Dayan 		goto err_free;
4147a88780a9SRoi Dayan 
4148a88780a9SRoi Dayan 	err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4149a88780a9SRoi Dayan 	if (err)
4150a88780a9SRoi Dayan 		goto err_free;
4151a88780a9SRoi Dayan 
4152226f2ca3SVlad Buslov 	flow_flag_set(flow, OFFLOADED);
4153a88780a9SRoi Dayan 	kvfree(parse_attr);
4154a88780a9SRoi Dayan 	*__flow = flow;
4155a88780a9SRoi Dayan 
4156a88780a9SRoi Dayan 	return 0;
4157a88780a9SRoi Dayan 
4158a88780a9SRoi Dayan err_free:
41595a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4160a88780a9SRoi Dayan 	kvfree(parse_attr);
4161a88780a9SRoi Dayan out:
4162a88780a9SRoi Dayan 	return err;
4163a88780a9SRoi Dayan }
4164a88780a9SRoi Dayan 
4165a88780a9SRoi Dayan static int
4166a88780a9SRoi Dayan mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4167f9e30088SPablo Neira Ayuso 		  struct flow_cls_offload *f,
4168226f2ca3SVlad Buslov 		  unsigned long flags,
4169d11afc26SOz Shlomo 		  struct net_device *filter_dev,
4170a88780a9SRoi Dayan 		  struct mlx5e_tc_flow **flow)
4171a88780a9SRoi Dayan {
4172a88780a9SRoi Dayan 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4173226f2ca3SVlad Buslov 	unsigned long flow_flags;
4174a88780a9SRoi Dayan 	int err;
4175a88780a9SRoi Dayan 
4176a88780a9SRoi Dayan 	get_flags(flags, &flow_flags);
4177a88780a9SRoi Dayan 
4178bf07aa73SPaul Blakey 	if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4179bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
4180bf07aa73SPaul Blakey 
4181f6455de0SBodong Wang 	if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4182d11afc26SOz Shlomo 		err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4183d11afc26SOz Shlomo 					 filter_dev, flow);
4184a88780a9SRoi Dayan 	else
4185d11afc26SOz Shlomo 		err = mlx5e_add_nic_flow(priv, f, flow_flags,
4186d11afc26SOz Shlomo 					 filter_dev, flow);
4187a88780a9SRoi Dayan 
4188a88780a9SRoi Dayan 	return err;
4189a88780a9SRoi Dayan }
4190a88780a9SRoi Dayan 
419171d82d2aSOz Shlomo int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4192226f2ca3SVlad Buslov 			   struct flow_cls_offload *f, unsigned long flags)
4193a88780a9SRoi Dayan {
4194a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
4195d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4196a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
4197a88780a9SRoi Dayan 	int err = 0;
4198a88780a9SRoi Dayan 
4199c5d326b2SVlad Buslov 	rcu_read_lock();
4200c5d326b2SVlad Buslov 	flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4201c5d326b2SVlad Buslov 	rcu_read_unlock();
4202a88780a9SRoi Dayan 	if (flow) {
4203a88780a9SRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
4204a88780a9SRoi Dayan 				   "flow cookie already exists, ignoring");
4205a88780a9SRoi Dayan 		netdev_warn_once(priv->netdev,
4206a88780a9SRoi Dayan 				 "flow cookie %lx already exists, ignoring\n",
4207a88780a9SRoi Dayan 				 f->cookie);
42080e1c1a2fSVlad Buslov 		err = -EEXIST;
4209a88780a9SRoi Dayan 		goto out;
4210a88780a9SRoi Dayan 	}
4211a88780a9SRoi Dayan 
42127a978759SDmytro Linkin 	trace_mlx5e_configure_flower(f);
4213d11afc26SOz Shlomo 	err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4214a88780a9SRoi Dayan 	if (err)
4215a88780a9SRoi Dayan 		goto out;
4216a88780a9SRoi Dayan 
4217c5d326b2SVlad Buslov 	err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4218a88780a9SRoi Dayan 	if (err)
4219a88780a9SRoi Dayan 		goto err_free;
4220a88780a9SRoi Dayan 
4221a88780a9SRoi Dayan 	return 0;
4222a88780a9SRoi Dayan 
4223a88780a9SRoi Dayan err_free:
42245a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4225a88780a9SRoi Dayan out:
4226e3a2b7edSAmir Vadai 	return err;
4227e3a2b7edSAmir Vadai }
4228e3a2b7edSAmir Vadai 
42298f8ae895SOr Gerlitz static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
42308f8ae895SOr Gerlitz {
4231226f2ca3SVlad Buslov 	bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4232226f2ca3SVlad Buslov 	bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
42338f8ae895SOr Gerlitz 
4234226f2ca3SVlad Buslov 	return flow_flag_test(flow, INGRESS) == dir_ingress &&
4235226f2ca3SVlad Buslov 		flow_flag_test(flow, EGRESS) == dir_egress;
42368f8ae895SOr Gerlitz }
42378f8ae895SOr Gerlitz 
423871d82d2aSOz Shlomo int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4239226f2ca3SVlad Buslov 			struct flow_cls_offload *f, unsigned long flags)
4240e3a2b7edSAmir Vadai {
4241d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4242e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
4243c5d326b2SVlad Buslov 	int err;
4244e3a2b7edSAmir Vadai 
4245c5d326b2SVlad Buslov 	rcu_read_lock();
4246ab818362STaehee Yoo 	flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4247c5d326b2SVlad Buslov 	if (!flow || !same_flow_direction(flow, flags)) {
4248c5d326b2SVlad Buslov 		err = -EINVAL;
4249c5d326b2SVlad Buslov 		goto errout;
4250c5d326b2SVlad Buslov 	}
4251e3a2b7edSAmir Vadai 
4252c5d326b2SVlad Buslov 	/* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4253c5d326b2SVlad Buslov 	 * set.
4254c5d326b2SVlad Buslov 	 */
4255c5d326b2SVlad Buslov 	if (flow_flag_test_and_set(flow, DELETED)) {
4256c5d326b2SVlad Buslov 		err = -EINVAL;
4257c5d326b2SVlad Buslov 		goto errout;
4258c5d326b2SVlad Buslov 	}
425905866c82SOr Gerlitz 	rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4260c5d326b2SVlad Buslov 	rcu_read_unlock();
4261e3a2b7edSAmir Vadai 
42627a978759SDmytro Linkin 	trace_mlx5e_delete_flower(f);
42635a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
4264e3a2b7edSAmir Vadai 
4265e3a2b7edSAmir Vadai 	return 0;
4266c5d326b2SVlad Buslov 
4267c5d326b2SVlad Buslov errout:
4268c5d326b2SVlad Buslov 	rcu_read_unlock();
4269c5d326b2SVlad Buslov 	return err;
4270e3a2b7edSAmir Vadai }
4271e3a2b7edSAmir Vadai 
427271d82d2aSOz Shlomo int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4273226f2ca3SVlad Buslov 		       struct flow_cls_offload *f, unsigned long flags)
4274aad7e08dSAmir Vadai {
427504de7ddaSRoi Dayan 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4276d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
427704de7ddaSRoi Dayan 	struct mlx5_eswitch *peer_esw;
4278aad7e08dSAmir Vadai 	struct mlx5e_tc_flow *flow;
4279aad7e08dSAmir Vadai 	struct mlx5_fc *counter;
4280316d5f72SRoi Dayan 	u64 lastuse = 0;
4281316d5f72SRoi Dayan 	u64 packets = 0;
4282316d5f72SRoi Dayan 	u64 bytes = 0;
42835a7e5bcbSVlad Buslov 	int err = 0;
4284aad7e08dSAmir Vadai 
4285c5d326b2SVlad Buslov 	rcu_read_lock();
4286c5d326b2SVlad Buslov 	flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
42875a7e5bcbSVlad Buslov 						tc_ht_params));
4288c5d326b2SVlad Buslov 	rcu_read_unlock();
42895a7e5bcbSVlad Buslov 	if (IS_ERR(flow))
42905a7e5bcbSVlad Buslov 		return PTR_ERR(flow);
42915a7e5bcbSVlad Buslov 
42925a7e5bcbSVlad Buslov 	if (!same_flow_direction(flow, flags)) {
42935a7e5bcbSVlad Buslov 		err = -EINVAL;
42945a7e5bcbSVlad Buslov 		goto errout;
42955a7e5bcbSVlad Buslov 	}
4296aad7e08dSAmir Vadai 
4297226f2ca3SVlad Buslov 	if (mlx5e_is_offloaded_flow(flow)) {
4298b8aee822SMark Bloch 		counter = mlx5e_tc_get_counter(flow);
4299aad7e08dSAmir Vadai 		if (!counter)
43005a7e5bcbSVlad Buslov 			goto errout;
4301aad7e08dSAmir Vadai 
4302aad7e08dSAmir Vadai 		mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4303316d5f72SRoi Dayan 	}
4304aad7e08dSAmir Vadai 
4305316d5f72SRoi Dayan 	/* Under multipath it's possible for one rule to be currently
4306316d5f72SRoi Dayan 	 * un-offloaded while the other rule is offloaded.
4307316d5f72SRoi Dayan 	 */
430804de7ddaSRoi Dayan 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
430904de7ddaSRoi Dayan 	if (!peer_esw)
431004de7ddaSRoi Dayan 		goto out;
431104de7ddaSRoi Dayan 
4312226f2ca3SVlad Buslov 	if (flow_flag_test(flow, DUP) &&
4313226f2ca3SVlad Buslov 	    flow_flag_test(flow->peer_flow, OFFLOADED)) {
431404de7ddaSRoi Dayan 		u64 bytes2;
431504de7ddaSRoi Dayan 		u64 packets2;
431604de7ddaSRoi Dayan 		u64 lastuse2;
431704de7ddaSRoi Dayan 
431804de7ddaSRoi Dayan 		counter = mlx5e_tc_get_counter(flow->peer_flow);
4319316d5f72SRoi Dayan 		if (!counter)
4320316d5f72SRoi Dayan 			goto no_peer_counter;
432104de7ddaSRoi Dayan 		mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
432204de7ddaSRoi Dayan 
432304de7ddaSRoi Dayan 		bytes += bytes2;
432404de7ddaSRoi Dayan 		packets += packets2;
432504de7ddaSRoi Dayan 		lastuse = max_t(u64, lastuse, lastuse2);
432604de7ddaSRoi Dayan 	}
432704de7ddaSRoi Dayan 
4328316d5f72SRoi Dayan no_peer_counter:
432904de7ddaSRoi Dayan 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
433004de7ddaSRoi Dayan out:
43313b1903efSPablo Neira Ayuso 	flow_stats_update(&f->stats, bytes, packets, lastuse);
43327a978759SDmytro Linkin 	trace_mlx5e_stats_flower(f);
43335a7e5bcbSVlad Buslov errout:
43345a7e5bcbSVlad Buslov 	mlx5e_flow_put(priv, flow);
43355a7e5bcbSVlad Buslov 	return err;
4336aad7e08dSAmir Vadai }
4337aad7e08dSAmir Vadai 
4338fcb64c0fSEli Cohen static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
4339fcb64c0fSEli Cohen 			       struct netlink_ext_ack *extack)
4340fcb64c0fSEli Cohen {
4341fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4342fcb64c0fSEli Cohen 	struct mlx5_eswitch *esw;
4343fcb64c0fSEli Cohen 	u16 vport_num;
4344fcb64c0fSEli Cohen 	u32 rate_mbps;
4345fcb64c0fSEli Cohen 	int err;
4346fcb64c0fSEli Cohen 
4347e401a184SEli Cohen 	vport_num = rpriv->rep->vport;
4348e401a184SEli Cohen 	if (vport_num >= MLX5_VPORT_ECPF) {
4349e401a184SEli Cohen 		NL_SET_ERR_MSG_MOD(extack,
4350e401a184SEli Cohen 				   "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4351e401a184SEli Cohen 		return -EOPNOTSUPP;
4352e401a184SEli Cohen 	}
4353e401a184SEli Cohen 
4354fcb64c0fSEli Cohen 	esw = priv->mdev->priv.eswitch;
4355fcb64c0fSEli Cohen 	/* rate is given in bytes/sec.
4356fcb64c0fSEli Cohen 	 * First convert to bits/sec and then round to the nearest mbit/secs.
4357fcb64c0fSEli Cohen 	 * mbit means million bits.
4358fcb64c0fSEli Cohen 	 * Moreover, if rate is non zero we choose to configure to a minimum of
4359fcb64c0fSEli Cohen 	 * 1 mbit/sec.
4360fcb64c0fSEli Cohen 	 */
4361fcb64c0fSEli Cohen 	rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
4362fcb64c0fSEli Cohen 	err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
4363fcb64c0fSEli Cohen 	if (err)
4364fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4365fcb64c0fSEli Cohen 
4366fcb64c0fSEli Cohen 	return err;
4367fcb64c0fSEli Cohen }
4368fcb64c0fSEli Cohen 
4369fcb64c0fSEli Cohen static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4370fcb64c0fSEli Cohen 					struct flow_action *flow_action,
4371fcb64c0fSEli Cohen 					struct netlink_ext_ack *extack)
4372fcb64c0fSEli Cohen {
4373fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4374fcb64c0fSEli Cohen 	const struct flow_action_entry *act;
4375fcb64c0fSEli Cohen 	int err;
4376fcb64c0fSEli Cohen 	int i;
4377fcb64c0fSEli Cohen 
4378fcb64c0fSEli Cohen 	if (!flow_action_has_entries(flow_action)) {
4379fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4380fcb64c0fSEli Cohen 		return -EINVAL;
4381fcb64c0fSEli Cohen 	}
4382fcb64c0fSEli Cohen 
4383fcb64c0fSEli Cohen 	if (!flow_offload_has_one_action(flow_action)) {
4384fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4385fcb64c0fSEli Cohen 		return -EOPNOTSUPP;
4386fcb64c0fSEli Cohen 	}
4387fcb64c0fSEli Cohen 
4388fcb64c0fSEli Cohen 	flow_action_for_each(i, act, flow_action) {
4389fcb64c0fSEli Cohen 		switch (act->id) {
4390fcb64c0fSEli Cohen 		case FLOW_ACTION_POLICE:
4391fcb64c0fSEli Cohen 			err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4392fcb64c0fSEli Cohen 			if (err)
4393fcb64c0fSEli Cohen 				return err;
4394fcb64c0fSEli Cohen 
4395fcb64c0fSEli Cohen 			rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4396fcb64c0fSEli Cohen 			break;
4397fcb64c0fSEli Cohen 		default:
4398fcb64c0fSEli Cohen 			NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4399fcb64c0fSEli Cohen 			return -EOPNOTSUPP;
4400fcb64c0fSEli Cohen 		}
4401fcb64c0fSEli Cohen 	}
4402fcb64c0fSEli Cohen 
4403fcb64c0fSEli Cohen 	return 0;
4404fcb64c0fSEli Cohen }
4405fcb64c0fSEli Cohen 
4406fcb64c0fSEli Cohen int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4407fcb64c0fSEli Cohen 				struct tc_cls_matchall_offload *ma)
4408fcb64c0fSEli Cohen {
4409fcb64c0fSEli Cohen 	struct netlink_ext_ack *extack = ma->common.extack;
4410fcb64c0fSEli Cohen 
44117b83355fSEli Cohen 	if (ma->common.prio != 1) {
4412fcb64c0fSEli Cohen 		NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4413fcb64c0fSEli Cohen 		return -EINVAL;
4414fcb64c0fSEli Cohen 	}
4415fcb64c0fSEli Cohen 
4416fcb64c0fSEli Cohen 	return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4417fcb64c0fSEli Cohen }
4418fcb64c0fSEli Cohen 
4419fcb64c0fSEli Cohen int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4420fcb64c0fSEli Cohen 			     struct tc_cls_matchall_offload *ma)
4421fcb64c0fSEli Cohen {
4422fcb64c0fSEli Cohen 	struct netlink_ext_ack *extack = ma->common.extack;
4423fcb64c0fSEli Cohen 
4424fcb64c0fSEli Cohen 	return apply_police_params(priv, 0, extack);
4425fcb64c0fSEli Cohen }
4426fcb64c0fSEli Cohen 
4427fcb64c0fSEli Cohen void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4428fcb64c0fSEli Cohen 			     struct tc_cls_matchall_offload *ma)
4429fcb64c0fSEli Cohen {
4430fcb64c0fSEli Cohen 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4431fcb64c0fSEli Cohen 	struct rtnl_link_stats64 cur_stats;
4432fcb64c0fSEli Cohen 	u64 dbytes;
4433fcb64c0fSEli Cohen 	u64 dpkts;
4434fcb64c0fSEli Cohen 
4435fcb64c0fSEli Cohen 	cur_stats = priv->stats.vf_vport;
4436fcb64c0fSEli Cohen 	dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4437fcb64c0fSEli Cohen 	dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4438fcb64c0fSEli Cohen 	rpriv->prev_vf_vport_stats = cur_stats;
4439fcb64c0fSEli Cohen 	flow_stats_update(&ma->stats, dpkts, dbytes, jiffies);
4440fcb64c0fSEli Cohen }
4441fcb64c0fSEli Cohen 
44424d8fcf21SAlaa Hleihel static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
44434d8fcf21SAlaa Hleihel 					      struct mlx5e_priv *peer_priv)
44444d8fcf21SAlaa Hleihel {
44454d8fcf21SAlaa Hleihel 	struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4446db76ca24SVlad Buslov 	struct mlx5e_hairpin_entry *hpe, *tmp;
4447db76ca24SVlad Buslov 	LIST_HEAD(init_wait_list);
44484d8fcf21SAlaa Hleihel 	u16 peer_vhca_id;
44494d8fcf21SAlaa Hleihel 	int bkt;
44504d8fcf21SAlaa Hleihel 
44514d8fcf21SAlaa Hleihel 	if (!same_hw_devs(priv, peer_priv))
44524d8fcf21SAlaa Hleihel 		return;
44534d8fcf21SAlaa Hleihel 
44544d8fcf21SAlaa Hleihel 	peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
44554d8fcf21SAlaa Hleihel 
4456b32accdaSVlad Buslov 	mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4457db76ca24SVlad Buslov 	hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4458db76ca24SVlad Buslov 		if (refcount_inc_not_zero(&hpe->refcnt))
4459db76ca24SVlad Buslov 			list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4460b32accdaSVlad Buslov 	mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4461db76ca24SVlad Buslov 
4462db76ca24SVlad Buslov 	list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4463db76ca24SVlad Buslov 		wait_for_completion(&hpe->res_ready);
4464db76ca24SVlad Buslov 		if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4465db76ca24SVlad Buslov 			hpe->hp->pair->peer_gone = true;
4466db76ca24SVlad Buslov 
4467db76ca24SVlad Buslov 		mlx5e_hairpin_put(priv, hpe);
4468db76ca24SVlad Buslov 	}
44694d8fcf21SAlaa Hleihel }
44704d8fcf21SAlaa Hleihel 
44714d8fcf21SAlaa Hleihel static int mlx5e_tc_netdev_event(struct notifier_block *this,
44724d8fcf21SAlaa Hleihel 				 unsigned long event, void *ptr)
44734d8fcf21SAlaa Hleihel {
44744d8fcf21SAlaa Hleihel 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
44754d8fcf21SAlaa Hleihel 	struct mlx5e_flow_steering *fs;
44764d8fcf21SAlaa Hleihel 	struct mlx5e_priv *peer_priv;
44774d8fcf21SAlaa Hleihel 	struct mlx5e_tc_table *tc;
44784d8fcf21SAlaa Hleihel 	struct mlx5e_priv *priv;
44794d8fcf21SAlaa Hleihel 
44804d8fcf21SAlaa Hleihel 	if (ndev->netdev_ops != &mlx5e_netdev_ops ||
44814d8fcf21SAlaa Hleihel 	    event != NETDEV_UNREGISTER ||
44824d8fcf21SAlaa Hleihel 	    ndev->reg_state == NETREG_REGISTERED)
44834d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
44844d8fcf21SAlaa Hleihel 
44854d8fcf21SAlaa Hleihel 	tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
44864d8fcf21SAlaa Hleihel 	fs = container_of(tc, struct mlx5e_flow_steering, tc);
44874d8fcf21SAlaa Hleihel 	priv = container_of(fs, struct mlx5e_priv, fs);
44884d8fcf21SAlaa Hleihel 	peer_priv = netdev_priv(ndev);
44894d8fcf21SAlaa Hleihel 	if (priv == peer_priv ||
44904d8fcf21SAlaa Hleihel 	    !(priv->netdev->features & NETIF_F_HW_TC))
44914d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
44924d8fcf21SAlaa Hleihel 
44934d8fcf21SAlaa Hleihel 	mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
44944d8fcf21SAlaa Hleihel 
44954d8fcf21SAlaa Hleihel 	return NOTIFY_DONE;
44964d8fcf21SAlaa Hleihel }
44974d8fcf21SAlaa Hleihel 
4498655dc3d2SOr Gerlitz int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4499e8f887acSAmir Vadai {
4500acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
45014d8fcf21SAlaa Hleihel 	int err;
4502e8f887acSAmir Vadai 
4503b6fac0b4SVlad Buslov 	mutex_init(&tc->t_lock);
4504d2faae25SVlad Buslov 	mutex_init(&tc->mod_hdr.lock);
4505dd58edc3SVlad Buslov 	hash_init(tc->mod_hdr.hlist);
4506b32accdaSVlad Buslov 	mutex_init(&tc->hairpin_tbl_lock);
45075c65c564SOr Gerlitz 	hash_init(tc->hairpin_tbl);
450811c9c548SOr Gerlitz 
45094d8fcf21SAlaa Hleihel 	err = rhashtable_init(&tc->ht, &tc_ht_params);
45104d8fcf21SAlaa Hleihel 	if (err)
45114d8fcf21SAlaa Hleihel 		return err;
45124d8fcf21SAlaa Hleihel 
45134d8fcf21SAlaa Hleihel 	tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4514d48834f9SJiri Pirko 	err = register_netdevice_notifier_dev_net(priv->netdev,
4515d48834f9SJiri Pirko 						  &tc->netdevice_nb,
4516d48834f9SJiri Pirko 						  &tc->netdevice_nn);
4517d48834f9SJiri Pirko 	if (err) {
45184d8fcf21SAlaa Hleihel 		tc->netdevice_nb.notifier_call = NULL;
45194d8fcf21SAlaa Hleihel 		mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
45204d8fcf21SAlaa Hleihel 	}
45214d8fcf21SAlaa Hleihel 
45224d8fcf21SAlaa Hleihel 	return err;
4523e8f887acSAmir Vadai }
4524e8f887acSAmir Vadai 
4525e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4526e8f887acSAmir Vadai {
4527e8f887acSAmir Vadai 	struct mlx5e_tc_flow *flow = ptr;
4528655dc3d2SOr Gerlitz 	struct mlx5e_priv *priv = flow->priv;
4529e8f887acSAmir Vadai 
4530961e8979SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
4531e8f887acSAmir Vadai 	kfree(flow);
4532e8f887acSAmir Vadai }
4533e8f887acSAmir Vadai 
4534655dc3d2SOr Gerlitz void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4535e8f887acSAmir Vadai {
4536acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
4537e8f887acSAmir Vadai 
45384d8fcf21SAlaa Hleihel 	if (tc->netdevice_nb.notifier_call)
4539d48834f9SJiri Pirko 		unregister_netdevice_notifier_dev_net(priv->netdev,
4540d48834f9SJiri Pirko 						      &tc->netdevice_nb,
4541d48834f9SJiri Pirko 						      &tc->netdevice_nn);
45424d8fcf21SAlaa Hleihel 
4543d2faae25SVlad Buslov 	mutex_destroy(&tc->mod_hdr.lock);
4544b32accdaSVlad Buslov 	mutex_destroy(&tc->hairpin_tbl_lock);
4545b32accdaSVlad Buslov 
4546d9ee0491SOr Gerlitz 	rhashtable_destroy(&tc->ht);
4547e8f887acSAmir Vadai 
4548acff797cSMaor Gottlieb 	if (!IS_ERR_OR_NULL(tc->t)) {
4549acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(tc->t);
4550acff797cSMaor Gottlieb 		tc->t = NULL;
4551e8f887acSAmir Vadai 	}
4552b6fac0b4SVlad Buslov 	mutex_destroy(&tc->t_lock);
4553e8f887acSAmir Vadai }
4554655dc3d2SOr Gerlitz 
4555655dc3d2SOr Gerlitz int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4556655dc3d2SOr Gerlitz {
45570a7fcb78SPaul Blakey 	const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts);
45580a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
45590a7fcb78SPaul Blakey 	struct mlx5e_rep_priv *priv;
45600a7fcb78SPaul Blakey 	struct mapping_ctx *mapping;
45610a7fcb78SPaul Blakey 	int err;
45620a7fcb78SPaul Blakey 
45630a7fcb78SPaul Blakey 	uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
45640a7fcb78SPaul Blakey 	priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
45650a7fcb78SPaul Blakey 
45660a7fcb78SPaul Blakey 	mapping = mapping_create(sizeof(struct tunnel_match_key),
45670a7fcb78SPaul Blakey 				 TUNNEL_INFO_BITS_MASK, true);
45680a7fcb78SPaul Blakey 	if (IS_ERR(mapping)) {
45690a7fcb78SPaul Blakey 		err = PTR_ERR(mapping);
45700a7fcb78SPaul Blakey 		goto err_tun_mapping;
45710a7fcb78SPaul Blakey 	}
45720a7fcb78SPaul Blakey 	uplink_priv->tunnel_mapping = mapping;
45730a7fcb78SPaul Blakey 
45740a7fcb78SPaul Blakey 	mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
45750a7fcb78SPaul Blakey 	if (IS_ERR(mapping)) {
45760a7fcb78SPaul Blakey 		err = PTR_ERR(mapping);
45770a7fcb78SPaul Blakey 		goto err_enc_opts_mapping;
45780a7fcb78SPaul Blakey 	}
45790a7fcb78SPaul Blakey 	uplink_priv->tunnel_enc_opts_mapping = mapping;
45800a7fcb78SPaul Blakey 
45810a7fcb78SPaul Blakey 	err = rhashtable_init(tc_ht, &tc_ht_params);
45820a7fcb78SPaul Blakey 	if (err)
45830a7fcb78SPaul Blakey 		goto err_ht_init;
45840a7fcb78SPaul Blakey 
45850a7fcb78SPaul Blakey 	return err;
45860a7fcb78SPaul Blakey 
45870a7fcb78SPaul Blakey err_ht_init:
45880a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
45890a7fcb78SPaul Blakey err_enc_opts_mapping:
45900a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_mapping);
45910a7fcb78SPaul Blakey err_tun_mapping:
45920a7fcb78SPaul Blakey 	netdev_warn(priv->netdev,
45930a7fcb78SPaul Blakey 		    "Failed to initialize tc (eswitch), err: %d", err);
45940a7fcb78SPaul Blakey 	return err;
4595655dc3d2SOr Gerlitz }
4596655dc3d2SOr Gerlitz 
4597655dc3d2SOr Gerlitz void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
4598655dc3d2SOr Gerlitz {
45990a7fcb78SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
46000a7fcb78SPaul Blakey 
4601655dc3d2SOr Gerlitz 	rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
46020a7fcb78SPaul Blakey 
46030a7fcb78SPaul Blakey 	uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
46040a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
46050a7fcb78SPaul Blakey 	mapping_destroy(uplink_priv->tunnel_mapping);
4606655dc3d2SOr Gerlitz }
460701252a27SOr Gerlitz 
4608226f2ca3SVlad Buslov int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
460901252a27SOr Gerlitz {
4610d9ee0491SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
461101252a27SOr Gerlitz 
461201252a27SOr Gerlitz 	return atomic_read(&tc_ht->nelems);
461301252a27SOr Gerlitz }
461404de7ddaSRoi Dayan 
461504de7ddaSRoi Dayan void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
461604de7ddaSRoi Dayan {
461704de7ddaSRoi Dayan 	struct mlx5e_tc_flow *flow, *tmp;
461804de7ddaSRoi Dayan 
461904de7ddaSRoi Dayan 	list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
462004de7ddaSRoi Dayan 		__mlx5e_tc_del_fdb_peer_flow(flow);
462104de7ddaSRoi Dayan }
4622b4a23329SRoi Dayan 
4623b4a23329SRoi Dayan void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
4624b4a23329SRoi Dayan {
4625b4a23329SRoi Dayan 	struct mlx5_rep_uplink_priv *rpriv =
4626b4a23329SRoi Dayan 		container_of(work, struct mlx5_rep_uplink_priv,
4627b4a23329SRoi Dayan 			     reoffload_flows_work);
4628b4a23329SRoi Dayan 	struct mlx5e_tc_flow *flow, *tmp;
4629b4a23329SRoi Dayan 
4630ad86755bSVlad Buslov 	mutex_lock(&rpriv->unready_flows_lock);
4631b4a23329SRoi Dayan 	list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
4632b4a23329SRoi Dayan 		if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
4633ad86755bSVlad Buslov 			unready_flow_del(flow);
4634b4a23329SRoi Dayan 	}
4635ad86755bSVlad Buslov 	mutex_unlock(&rpriv->unready_flows_lock);
4636b4a23329SRoi Dayan }
4637d6d27782SPaul Blakey 
4638b8ce9037SPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4639b8ce9037SPaul Blakey static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
4640b8ce9037SPaul Blakey 				 struct mlx5e_tc_update_priv *tc_priv,
4641b8ce9037SPaul Blakey 				 u32 tunnel_id)
4642b8ce9037SPaul Blakey {
4643b8ce9037SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4644b8ce9037SPaul Blakey 	struct flow_dissector_key_enc_opts enc_opts = {};
4645b8ce9037SPaul Blakey 	struct mlx5_rep_uplink_priv *uplink_priv;
4646b8ce9037SPaul Blakey 	struct mlx5e_rep_priv *uplink_rpriv;
4647b8ce9037SPaul Blakey 	struct metadata_dst *tun_dst;
4648b8ce9037SPaul Blakey 	struct tunnel_match_key key;
4649b8ce9037SPaul Blakey 	u32 tun_id, enc_opts_id;
4650b8ce9037SPaul Blakey 	struct net_device *dev;
4651b8ce9037SPaul Blakey 	int err;
4652b8ce9037SPaul Blakey 
4653b8ce9037SPaul Blakey 	enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
4654b8ce9037SPaul Blakey 	tun_id = tunnel_id >> ENC_OPTS_BITS;
4655b8ce9037SPaul Blakey 
4656b8ce9037SPaul Blakey 	if (!tun_id)
4657b8ce9037SPaul Blakey 		return true;
4658b8ce9037SPaul Blakey 
4659b8ce9037SPaul Blakey 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4660b8ce9037SPaul Blakey 	uplink_priv = &uplink_rpriv->uplink_priv;
4661b8ce9037SPaul Blakey 
4662b8ce9037SPaul Blakey 	err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
4663b8ce9037SPaul Blakey 	if (err) {
4664b8ce9037SPaul Blakey 		WARN_ON_ONCE(true);
4665b8ce9037SPaul Blakey 		netdev_dbg(priv->netdev,
4666b8ce9037SPaul Blakey 			   "Couldn't find tunnel for tun_id: %d, err: %d\n",
4667b8ce9037SPaul Blakey 			   tun_id, err);
4668b8ce9037SPaul Blakey 		return false;
4669b8ce9037SPaul Blakey 	}
4670b8ce9037SPaul Blakey 
4671b8ce9037SPaul Blakey 	if (enc_opts_id) {
4672b8ce9037SPaul Blakey 		err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
4673b8ce9037SPaul Blakey 				   enc_opts_id, &enc_opts);
4674b8ce9037SPaul Blakey 		if (err) {
4675b8ce9037SPaul Blakey 			netdev_dbg(priv->netdev,
4676b8ce9037SPaul Blakey 				   "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
4677b8ce9037SPaul Blakey 				   enc_opts_id, err);
4678b8ce9037SPaul Blakey 			return false;
4679b8ce9037SPaul Blakey 		}
4680b8ce9037SPaul Blakey 	}
4681b8ce9037SPaul Blakey 
4682b8ce9037SPaul Blakey 	tun_dst = tun_rx_dst(enc_opts.len);
4683b8ce9037SPaul Blakey 	if (!tun_dst) {
4684b8ce9037SPaul Blakey 		WARN_ON_ONCE(true);
4685b8ce9037SPaul Blakey 		return false;
4686b8ce9037SPaul Blakey 	}
4687b8ce9037SPaul Blakey 
4688b8ce9037SPaul Blakey 	ip_tunnel_key_init(&tun_dst->u.tun_info.key,
4689b8ce9037SPaul Blakey 			   key.enc_ipv4.src, key.enc_ipv4.dst,
4690b8ce9037SPaul Blakey 			   key.enc_ip.tos, key.enc_ip.ttl,
4691b8ce9037SPaul Blakey 			   0, /* label */
4692b8ce9037SPaul Blakey 			   key.enc_tp.src, key.enc_tp.dst,
4693b8ce9037SPaul Blakey 			   key32_to_tunnel_id(key.enc_key_id.keyid),
4694b8ce9037SPaul Blakey 			   TUNNEL_KEY);
4695b8ce9037SPaul Blakey 
4696b8ce9037SPaul Blakey 	if (enc_opts.len)
4697b8ce9037SPaul Blakey 		ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data,
4698b8ce9037SPaul Blakey 					enc_opts.len, enc_opts.dst_opt_type);
4699b8ce9037SPaul Blakey 
4700b8ce9037SPaul Blakey 	skb_dst_set(skb, (struct dst_entry *)tun_dst);
4701b8ce9037SPaul Blakey 	dev = dev_get_by_index(&init_net, key.filter_ifindex);
4702b8ce9037SPaul Blakey 	if (!dev) {
4703b8ce9037SPaul Blakey 		netdev_dbg(priv->netdev,
4704b8ce9037SPaul Blakey 			   "Couldn't find tunnel device with ifindex: %d\n",
4705b8ce9037SPaul Blakey 			   key.filter_ifindex);
4706b8ce9037SPaul Blakey 		return false;
4707b8ce9037SPaul Blakey 	}
4708b8ce9037SPaul Blakey 
4709b8ce9037SPaul Blakey 	/* Set tun_dev so we do dev_put() after datapath */
4710b8ce9037SPaul Blakey 	tc_priv->tun_dev = dev;
4711b8ce9037SPaul Blakey 
4712b8ce9037SPaul Blakey 	skb->dev = dev;
4713b8ce9037SPaul Blakey 
4714b8ce9037SPaul Blakey 	return true;
4715b8ce9037SPaul Blakey }
4716b8ce9037SPaul Blakey #endif /* CONFIG_NET_TC_SKB_EXT */
4717b8ce9037SPaul Blakey 
4718d6d27782SPaul Blakey bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe,
4719b8ce9037SPaul Blakey 			     struct sk_buff *skb,
4720b8ce9037SPaul Blakey 			     struct mlx5e_tc_update_priv *tc_priv)
4721d6d27782SPaul Blakey {
4722d6d27782SPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4723b8ce9037SPaul Blakey 	u32 chain = 0, reg_c0, reg_c1, tunnel_id;
4724d6d27782SPaul Blakey 	struct tc_skb_ext *tc_skb_ext;
4725d6d27782SPaul Blakey 	struct mlx5_eswitch *esw;
4726d6d27782SPaul Blakey 	struct mlx5e_priv *priv;
4727b8ce9037SPaul Blakey 	int tunnel_moffset;
4728d6d27782SPaul Blakey 	int err;
4729d6d27782SPaul Blakey 
4730d6d27782SPaul Blakey 	reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
4731d6d27782SPaul Blakey 	if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
4732d6d27782SPaul Blakey 		reg_c0 = 0;
4733b8ce9037SPaul Blakey 	reg_c1 = be32_to_cpu(cqe->imm_inval_pkey);
4734d6d27782SPaul Blakey 
4735d6d27782SPaul Blakey 	if (!reg_c0)
4736d6d27782SPaul Blakey 		return true;
4737d6d27782SPaul Blakey 
4738d6d27782SPaul Blakey 	priv = netdev_priv(skb->dev);
4739d6d27782SPaul Blakey 	esw = priv->mdev->priv.eswitch;
4740d6d27782SPaul Blakey 
4741d6d27782SPaul Blakey 	err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
4742d6d27782SPaul Blakey 	if (err) {
4743d6d27782SPaul Blakey 		netdev_dbg(priv->netdev,
4744d6d27782SPaul Blakey 			   "Couldn't find chain for chain tag: %d, err: %d\n",
4745d6d27782SPaul Blakey 			   reg_c0, err);
4746d6d27782SPaul Blakey 		return false;
4747d6d27782SPaul Blakey 	}
4748d6d27782SPaul Blakey 
4749b8ce9037SPaul Blakey 	if (chain) {
4750d6d27782SPaul Blakey 		tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
4751d6d27782SPaul Blakey 		if (!tc_skb_ext) {
4752b8ce9037SPaul Blakey 			WARN_ON(1);
4753d6d27782SPaul Blakey 			return false;
4754d6d27782SPaul Blakey 		}
4755d6d27782SPaul Blakey 
4756d6d27782SPaul Blakey 		tc_skb_ext->chain = chain;
4757b8ce9037SPaul Blakey 	}
4758b8ce9037SPaul Blakey 
4759b8ce9037SPaul Blakey 	tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
4760b8ce9037SPaul Blakey 	tunnel_id = reg_c1 >> (8 * tunnel_moffset);
4761b8ce9037SPaul Blakey 	return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
4762d6d27782SPaul Blakey #endif /* CONFIG_NET_TC_SKB_EXT */
4763d6d27782SPaul Blakey 
4764d6d27782SPaul Blakey 	return true;
4765d6d27782SPaul Blakey }
4766b8ce9037SPaul Blakey 
4767b8ce9037SPaul Blakey void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
4768b8ce9037SPaul Blakey {
4769b8ce9037SPaul Blakey 	if (tc_priv->tun_dev)
4770b8ce9037SPaul Blakey 		dev_put(tc_priv->tun_dev);
4771b8ce9037SPaul Blakey }
4772