1e8f887acSAmir Vadai /*
2e8f887acSAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3e8f887acSAmir Vadai  *
4e8f887acSAmir Vadai  * This software is available to you under a choice of one of two
5e8f887acSAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
6e8f887acSAmir Vadai  * General Public License (GPL) Version 2, available from the file
7e8f887acSAmir Vadai  * COPYING in the main directory of this source tree, or the
8e8f887acSAmir Vadai  * OpenIB.org BSD license below:
9e8f887acSAmir Vadai  *
10e8f887acSAmir Vadai  *     Redistribution and use in source and binary forms, with or
11e8f887acSAmir Vadai  *     without modification, are permitted provided that the following
12e8f887acSAmir Vadai  *     conditions are met:
13e8f887acSAmir Vadai  *
14e8f887acSAmir Vadai  *      - Redistributions of source code must retain the above
15e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
16e8f887acSAmir Vadai  *        disclaimer.
17e8f887acSAmir Vadai  *
18e8f887acSAmir Vadai  *      - Redistributions in binary form must reproduce the above
19e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
20e8f887acSAmir Vadai  *        disclaimer in the documentation and/or other materials
21e8f887acSAmir Vadai  *        provided with the distribution.
22e8f887acSAmir Vadai  *
23e8f887acSAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e8f887acSAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e8f887acSAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e8f887acSAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e8f887acSAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e8f887acSAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e8f887acSAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e8f887acSAmir Vadai  * SOFTWARE.
31e8f887acSAmir Vadai  */
32e8f887acSAmir Vadai 
33e3a2b7edSAmir Vadai #include <net/flow_dissector.h>
343f7d0eb4SOr Gerlitz #include <net/sch_generic.h>
35e3a2b7edSAmir Vadai #include <net/pkt_cls.h>
36e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h>
3712185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h>
38e8f887acSAmir Vadai #include <linux/mlx5/fs.h>
39e8f887acSAmir Vadai #include <linux/mlx5/device.h>
40e8f887acSAmir Vadai #include <linux/rhashtable.h>
4103a9d11eSOr Gerlitz #include <net/switchdev.h>
4203a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h>
43776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h>
44bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h>
45d79b6df6SOr Gerlitz #include <net/tc_act/tc_pedit.h>
4626c02749SOr Gerlitz #include <net/tc_act/tc_csum.h>
47a54e20b4SHadar Hen Zion #include <net/vxlan.h>
48f6dfb4c3SHadar Hen Zion #include <net/arp.h>
49e8f887acSAmir Vadai #include "en.h"
501d447a39SSaeed Mahameed #include "en_rep.h"
51232c0013SHadar Hen Zion #include "en_tc.h"
5203a9d11eSOr Gerlitz #include "eswitch.h"
53358aa5ceSSaeed Mahameed #include "lib/vxlan.h"
543f6d08d1SOr Gerlitz #include "fs_core.h"
552c81bfd5SHuy Nguyen #include "en/port.h"
56e8f887acSAmir Vadai 
573bc4b7bfSOr Gerlitz struct mlx5_nic_flow_attr {
583bc4b7bfSOr Gerlitz 	u32 action;
593bc4b7bfSOr Gerlitz 	u32 flow_tag;
602f4fe4caSOr Gerlitz 	u32 mod_hdr_id;
615c65c564SOr Gerlitz 	u32 hairpin_tirn;
6238aa51c1SOr Gerlitz 	u8 match_level;
633f6d08d1SOr Gerlitz 	struct mlx5_flow_table	*hairpin_ft;
64b8aee822SMark Bloch 	struct mlx5_fc		*counter;
653bc4b7bfSOr Gerlitz };
663bc4b7bfSOr Gerlitz 
6760bd4af8SOr Gerlitz #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
6860bd4af8SOr Gerlitz 
6965ba8fb7SOr Gerlitz enum {
7060bd4af8SOr Gerlitz 	MLX5E_TC_FLOW_INGRESS	= MLX5E_TC_INGRESS,
7160bd4af8SOr Gerlitz 	MLX5E_TC_FLOW_EGRESS	= MLX5E_TC_EGRESS,
7260bd4af8SOr Gerlitz 	MLX5E_TC_FLOW_ESWITCH	= BIT(MLX5E_TC_FLOW_BASE),
7360bd4af8SOr Gerlitz 	MLX5E_TC_FLOW_NIC	= BIT(MLX5E_TC_FLOW_BASE + 1),
7460bd4af8SOr Gerlitz 	MLX5E_TC_FLOW_OFFLOADED	= BIT(MLX5E_TC_FLOW_BASE + 2),
7560bd4af8SOr Gerlitz 	MLX5E_TC_FLOW_HAIRPIN	= BIT(MLX5E_TC_FLOW_BASE + 3),
7660bd4af8SOr Gerlitz 	MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
775dbe906fSPaul Blakey 	MLX5E_TC_FLOW_SLOW	  = BIT(MLX5E_TC_FLOW_BASE + 5),
7865ba8fb7SOr Gerlitz };
7965ba8fb7SOr Gerlitz 
80e4ad91f2SChris Mi #define MLX5E_TC_MAX_SPLITS 1
81e4ad91f2SChris Mi 
8254c177caSOz Shlomo enum {
8354c177caSOz Shlomo 	MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
8454c177caSOz Shlomo 	MLX5E_TC_TUNNEL_TYPE_VXLAN
8554c177caSOz Shlomo };
8654c177caSOz Shlomo 
8754c177caSOz Shlomo static int mlx5e_get_tunnel_type(struct net_device *tunnel_dev);
8854c177caSOz Shlomo 
89e8f887acSAmir Vadai struct mlx5e_tc_flow {
90e8f887acSAmir Vadai 	struct rhash_head	node;
91655dc3d2SOr Gerlitz 	struct mlx5e_priv	*priv;
92e8f887acSAmir Vadai 	u64			cookie;
935dbe906fSPaul Blakey 	u16			flags;
94e4ad91f2SChris Mi 	struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
9511c9c548SOr Gerlitz 	struct list_head	encap;   /* flows sharing the same encap ID */
9611c9c548SOr Gerlitz 	struct list_head	mod_hdr; /* flows sharing the same mod hdr ID */
975c65c564SOr Gerlitz 	struct list_head	hairpin; /* flows sharing the same hairpin */
983bc4b7bfSOr Gerlitz 	union {
99ecf5bb79SOr Gerlitz 		struct mlx5_esw_flow_attr esw_attr[0];
1003bc4b7bfSOr Gerlitz 		struct mlx5_nic_flow_attr nic_attr[0];
1013bc4b7bfSOr Gerlitz 	};
102e8f887acSAmir Vadai };
103e8f887acSAmir Vadai 
10417091853SOr Gerlitz struct mlx5e_tc_flow_parse_attr {
1053c37745eSOr Gerlitz 	struct ip_tunnel_info tun_info;
106d11afc26SOz Shlomo 	struct net_device *filter_dev;
10717091853SOr Gerlitz 	struct mlx5_flow_spec spec;
108d79b6df6SOr Gerlitz 	int num_mod_hdr_actions;
109d79b6df6SOr Gerlitz 	void *mod_hdr_actions;
1103c37745eSOr Gerlitz 	int mirred_ifindex;
11117091853SOr Gerlitz };
11217091853SOr Gerlitz 
113acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4
114b3a433deSOr Gerlitz #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
115e8f887acSAmir Vadai 
11677ab67b7SOr Gerlitz struct mlx5e_hairpin {
11777ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
11877ab67b7SOr Gerlitz 
11977ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev;
1203f6d08d1SOr Gerlitz 	struct mlx5e_priv *func_priv;
12177ab67b7SOr Gerlitz 	u32 tdn;
12277ab67b7SOr Gerlitz 	u32 tirn;
1233f6d08d1SOr Gerlitz 
1243f6d08d1SOr Gerlitz 	int num_channels;
1253f6d08d1SOr Gerlitz 	struct mlx5e_rqt indir_rqt;
1263f6d08d1SOr Gerlitz 	u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
1273f6d08d1SOr Gerlitz 	struct mlx5e_ttc_table ttc;
12877ab67b7SOr Gerlitz };
12977ab67b7SOr Gerlitz 
1305c65c564SOr Gerlitz struct mlx5e_hairpin_entry {
1315c65c564SOr Gerlitz 	/* a node of a hash table which keeps all the  hairpin entries */
1325c65c564SOr Gerlitz 	struct hlist_node hairpin_hlist;
1335c65c564SOr Gerlitz 
1345c65c564SOr Gerlitz 	/* flows sharing the same hairpin */
1355c65c564SOr Gerlitz 	struct list_head flows;
1365c65c564SOr Gerlitz 
137d8822868SOr Gerlitz 	u16 peer_vhca_id;
138106be53bSOr Gerlitz 	u8 prio;
1395c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
1405c65c564SOr Gerlitz };
1415c65c564SOr Gerlitz 
14211c9c548SOr Gerlitz struct mod_hdr_key {
14311c9c548SOr Gerlitz 	int num_actions;
14411c9c548SOr Gerlitz 	void *actions;
14511c9c548SOr Gerlitz };
14611c9c548SOr Gerlitz 
14711c9c548SOr Gerlitz struct mlx5e_mod_hdr_entry {
14811c9c548SOr Gerlitz 	/* a node of a hash table which keeps all the mod_hdr entries */
14911c9c548SOr Gerlitz 	struct hlist_node mod_hdr_hlist;
15011c9c548SOr Gerlitz 
15111c9c548SOr Gerlitz 	/* flows sharing the same mod_hdr entry */
15211c9c548SOr Gerlitz 	struct list_head flows;
15311c9c548SOr Gerlitz 
15411c9c548SOr Gerlitz 	struct mod_hdr_key key;
15511c9c548SOr Gerlitz 
15611c9c548SOr Gerlitz 	u32 mod_hdr_id;
15711c9c548SOr Gerlitz };
15811c9c548SOr Gerlitz 
15911c9c548SOr Gerlitz #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
16011c9c548SOr Gerlitz 
16111c9c548SOr Gerlitz static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
16211c9c548SOr Gerlitz {
16311c9c548SOr Gerlitz 	return jhash(key->actions,
16411c9c548SOr Gerlitz 		     key->num_actions * MLX5_MH_ACT_SZ, 0);
16511c9c548SOr Gerlitz }
16611c9c548SOr Gerlitz 
16711c9c548SOr Gerlitz static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
16811c9c548SOr Gerlitz 				   struct mod_hdr_key *b)
16911c9c548SOr Gerlitz {
17011c9c548SOr Gerlitz 	if (a->num_actions != b->num_actions)
17111c9c548SOr Gerlitz 		return 1;
17211c9c548SOr Gerlitz 
17311c9c548SOr Gerlitz 	return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
17411c9c548SOr Gerlitz }
17511c9c548SOr Gerlitz 
17611c9c548SOr Gerlitz static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
17711c9c548SOr Gerlitz 				struct mlx5e_tc_flow *flow,
17811c9c548SOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr)
17911c9c548SOr Gerlitz {
18011c9c548SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
18111c9c548SOr Gerlitz 	int num_actions, actions_size, namespace, err;
18211c9c548SOr Gerlitz 	struct mlx5e_mod_hdr_entry *mh;
18311c9c548SOr Gerlitz 	struct mod_hdr_key key;
18411c9c548SOr Gerlitz 	bool found = false;
18511c9c548SOr Gerlitz 	u32 hash_key;
18611c9c548SOr Gerlitz 
18711c9c548SOr Gerlitz 	num_actions  = parse_attr->num_mod_hdr_actions;
18811c9c548SOr Gerlitz 	actions_size = MLX5_MH_ACT_SZ * num_actions;
18911c9c548SOr Gerlitz 
19011c9c548SOr Gerlitz 	key.actions = parse_attr->mod_hdr_actions;
19111c9c548SOr Gerlitz 	key.num_actions = num_actions;
19211c9c548SOr Gerlitz 
19311c9c548SOr Gerlitz 	hash_key = hash_mod_hdr_info(&key);
19411c9c548SOr Gerlitz 
19511c9c548SOr Gerlitz 	if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
19611c9c548SOr Gerlitz 		namespace = MLX5_FLOW_NAMESPACE_FDB;
19711c9c548SOr Gerlitz 		hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
19811c9c548SOr Gerlitz 				       mod_hdr_hlist, hash_key) {
19911c9c548SOr Gerlitz 			if (!cmp_mod_hdr_info(&mh->key, &key)) {
20011c9c548SOr Gerlitz 				found = true;
20111c9c548SOr Gerlitz 				break;
20211c9c548SOr Gerlitz 			}
20311c9c548SOr Gerlitz 		}
20411c9c548SOr Gerlitz 	} else {
20511c9c548SOr Gerlitz 		namespace = MLX5_FLOW_NAMESPACE_KERNEL;
20611c9c548SOr Gerlitz 		hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
20711c9c548SOr Gerlitz 				       mod_hdr_hlist, hash_key) {
20811c9c548SOr Gerlitz 			if (!cmp_mod_hdr_info(&mh->key, &key)) {
20911c9c548SOr Gerlitz 				found = true;
21011c9c548SOr Gerlitz 				break;
21111c9c548SOr Gerlitz 			}
21211c9c548SOr Gerlitz 		}
21311c9c548SOr Gerlitz 	}
21411c9c548SOr Gerlitz 
21511c9c548SOr Gerlitz 	if (found)
21611c9c548SOr Gerlitz 		goto attach_flow;
21711c9c548SOr Gerlitz 
21811c9c548SOr Gerlitz 	mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
21911c9c548SOr Gerlitz 	if (!mh)
22011c9c548SOr Gerlitz 		return -ENOMEM;
22111c9c548SOr Gerlitz 
22211c9c548SOr Gerlitz 	mh->key.actions = (void *)mh + sizeof(*mh);
22311c9c548SOr Gerlitz 	memcpy(mh->key.actions, key.actions, actions_size);
22411c9c548SOr Gerlitz 	mh->key.num_actions = num_actions;
22511c9c548SOr Gerlitz 	INIT_LIST_HEAD(&mh->flows);
22611c9c548SOr Gerlitz 
22711c9c548SOr Gerlitz 	err = mlx5_modify_header_alloc(priv->mdev, namespace,
22811c9c548SOr Gerlitz 				       mh->key.num_actions,
22911c9c548SOr Gerlitz 				       mh->key.actions,
23011c9c548SOr Gerlitz 				       &mh->mod_hdr_id);
23111c9c548SOr Gerlitz 	if (err)
23211c9c548SOr Gerlitz 		goto out_err;
23311c9c548SOr Gerlitz 
23411c9c548SOr Gerlitz 	if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
23511c9c548SOr Gerlitz 		hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
23611c9c548SOr Gerlitz 	else
23711c9c548SOr Gerlitz 		hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
23811c9c548SOr Gerlitz 
23911c9c548SOr Gerlitz attach_flow:
24011c9c548SOr Gerlitz 	list_add(&flow->mod_hdr, &mh->flows);
24111c9c548SOr Gerlitz 	if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
24211c9c548SOr Gerlitz 		flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
24311c9c548SOr Gerlitz 	else
24411c9c548SOr Gerlitz 		flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
24511c9c548SOr Gerlitz 
24611c9c548SOr Gerlitz 	return 0;
24711c9c548SOr Gerlitz 
24811c9c548SOr Gerlitz out_err:
24911c9c548SOr Gerlitz 	kfree(mh);
25011c9c548SOr Gerlitz 	return err;
25111c9c548SOr Gerlitz }
25211c9c548SOr Gerlitz 
25311c9c548SOr Gerlitz static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
25411c9c548SOr Gerlitz 				 struct mlx5e_tc_flow *flow)
25511c9c548SOr Gerlitz {
25611c9c548SOr Gerlitz 	struct list_head *next = flow->mod_hdr.next;
25711c9c548SOr Gerlitz 
25811c9c548SOr Gerlitz 	list_del(&flow->mod_hdr);
25911c9c548SOr Gerlitz 
26011c9c548SOr Gerlitz 	if (list_empty(next)) {
26111c9c548SOr Gerlitz 		struct mlx5e_mod_hdr_entry *mh;
26211c9c548SOr Gerlitz 
26311c9c548SOr Gerlitz 		mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
26411c9c548SOr Gerlitz 
26511c9c548SOr Gerlitz 		mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
26611c9c548SOr Gerlitz 		hash_del(&mh->mod_hdr_hlist);
26711c9c548SOr Gerlitz 		kfree(mh);
26811c9c548SOr Gerlitz 	}
26911c9c548SOr Gerlitz }
27011c9c548SOr Gerlitz 
27177ab67b7SOr Gerlitz static
27277ab67b7SOr Gerlitz struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
27377ab67b7SOr Gerlitz {
27477ab67b7SOr Gerlitz 	struct net_device *netdev;
27577ab67b7SOr Gerlitz 	struct mlx5e_priv *priv;
27677ab67b7SOr Gerlitz 
27777ab67b7SOr Gerlitz 	netdev = __dev_get_by_index(net, ifindex);
27877ab67b7SOr Gerlitz 	priv = netdev_priv(netdev);
27977ab67b7SOr Gerlitz 	return priv->mdev;
28077ab67b7SOr Gerlitz }
28177ab67b7SOr Gerlitz 
28277ab67b7SOr Gerlitz static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
28377ab67b7SOr Gerlitz {
28477ab67b7SOr Gerlitz 	u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
28577ab67b7SOr Gerlitz 	void *tirc;
28677ab67b7SOr Gerlitz 	int err;
28777ab67b7SOr Gerlitz 
28877ab67b7SOr Gerlitz 	err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
28977ab67b7SOr Gerlitz 	if (err)
29077ab67b7SOr Gerlitz 		goto alloc_tdn_err;
29177ab67b7SOr Gerlitz 
29277ab67b7SOr Gerlitz 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
29377ab67b7SOr Gerlitz 
29477ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
295ddae74acSOr Gerlitz 	MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
29677ab67b7SOr Gerlitz 	MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
29777ab67b7SOr Gerlitz 
29877ab67b7SOr Gerlitz 	err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
29977ab67b7SOr Gerlitz 	if (err)
30077ab67b7SOr Gerlitz 		goto create_tir_err;
30177ab67b7SOr Gerlitz 
30277ab67b7SOr Gerlitz 	return 0;
30377ab67b7SOr Gerlitz 
30477ab67b7SOr Gerlitz create_tir_err:
30577ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
30677ab67b7SOr Gerlitz alloc_tdn_err:
30777ab67b7SOr Gerlitz 	return err;
30877ab67b7SOr Gerlitz }
30977ab67b7SOr Gerlitz 
31077ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
31177ab67b7SOr Gerlitz {
31277ab67b7SOr Gerlitz 	mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
31377ab67b7SOr Gerlitz 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
31477ab67b7SOr Gerlitz }
31577ab67b7SOr Gerlitz 
3163f6d08d1SOr Gerlitz static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
3173f6d08d1SOr Gerlitz {
3183f6d08d1SOr Gerlitz 	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
3193f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
3203f6d08d1SOr Gerlitz 	int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
3213f6d08d1SOr Gerlitz 
3223f6d08d1SOr Gerlitz 	mlx5e_build_default_indir_rqt(indirection_rqt, sz,
3233f6d08d1SOr Gerlitz 				      hp->num_channels);
3243f6d08d1SOr Gerlitz 
3253f6d08d1SOr Gerlitz 	for (i = 0; i < sz; i++) {
3263f6d08d1SOr Gerlitz 		ix = i;
327bbeb53b8SAya Levin 		if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
3283f6d08d1SOr Gerlitz 			ix = mlx5e_bits_invert(i, ilog2(sz));
3293f6d08d1SOr Gerlitz 		ix = indirection_rqt[ix];
3303f6d08d1SOr Gerlitz 		rqn = hp->pair->rqn[ix];
3313f6d08d1SOr Gerlitz 		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
3323f6d08d1SOr Gerlitz 	}
3333f6d08d1SOr Gerlitz }
3343f6d08d1SOr Gerlitz 
3353f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
3363f6d08d1SOr Gerlitz {
3373f6d08d1SOr Gerlitz 	int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
3383f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
3393f6d08d1SOr Gerlitz 	struct mlx5_core_dev *mdev = priv->mdev;
3403f6d08d1SOr Gerlitz 	void *rqtc;
3413f6d08d1SOr Gerlitz 	u32 *in;
3423f6d08d1SOr Gerlitz 
3433f6d08d1SOr Gerlitz 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
3443f6d08d1SOr Gerlitz 	in = kvzalloc(inlen, GFP_KERNEL);
3453f6d08d1SOr Gerlitz 	if (!in)
3463f6d08d1SOr Gerlitz 		return -ENOMEM;
3473f6d08d1SOr Gerlitz 
3483f6d08d1SOr Gerlitz 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
3493f6d08d1SOr Gerlitz 
3503f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
3513f6d08d1SOr Gerlitz 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
3523f6d08d1SOr Gerlitz 
3533f6d08d1SOr Gerlitz 	mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
3543f6d08d1SOr Gerlitz 
3553f6d08d1SOr Gerlitz 	err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
3563f6d08d1SOr Gerlitz 	if (!err)
3573f6d08d1SOr Gerlitz 		hp->indir_rqt.enabled = true;
3583f6d08d1SOr Gerlitz 
3593f6d08d1SOr Gerlitz 	kvfree(in);
3603f6d08d1SOr Gerlitz 	return err;
3613f6d08d1SOr Gerlitz }
3623f6d08d1SOr Gerlitz 
3633f6d08d1SOr Gerlitz static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
3643f6d08d1SOr Gerlitz {
3653f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
3663f6d08d1SOr Gerlitz 	u32 in[MLX5_ST_SZ_DW(create_tir_in)];
3673f6d08d1SOr Gerlitz 	int tt, i, err;
3683f6d08d1SOr Gerlitz 	void *tirc;
3693f6d08d1SOr Gerlitz 
3703f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
371d930ac79SAya Levin 		struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
372d930ac79SAya Levin 
3733f6d08d1SOr Gerlitz 		memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
3743f6d08d1SOr Gerlitz 		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3753f6d08d1SOr Gerlitz 
3763f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
3773f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
3783f6d08d1SOr Gerlitz 		MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
379bbeb53b8SAya Levin 		mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
380bbeb53b8SAya Levin 
3813f6d08d1SOr Gerlitz 		err = mlx5_core_create_tir(hp->func_mdev, in,
3823f6d08d1SOr Gerlitz 					   MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
3833f6d08d1SOr Gerlitz 		if (err) {
3843f6d08d1SOr Gerlitz 			mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
3853f6d08d1SOr Gerlitz 			goto err_destroy_tirs;
3863f6d08d1SOr Gerlitz 		}
3873f6d08d1SOr Gerlitz 	}
3883f6d08d1SOr Gerlitz 	return 0;
3893f6d08d1SOr Gerlitz 
3903f6d08d1SOr Gerlitz err_destroy_tirs:
3913f6d08d1SOr Gerlitz 	for (i = 0; i < tt; i++)
3923f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
3933f6d08d1SOr Gerlitz 	return err;
3943f6d08d1SOr Gerlitz }
3953f6d08d1SOr Gerlitz 
3963f6d08d1SOr Gerlitz static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
3973f6d08d1SOr Gerlitz {
3983f6d08d1SOr Gerlitz 	int tt;
3993f6d08d1SOr Gerlitz 
4003f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
4013f6d08d1SOr Gerlitz 		mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
4023f6d08d1SOr Gerlitz }
4033f6d08d1SOr Gerlitz 
4043f6d08d1SOr Gerlitz static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
4053f6d08d1SOr Gerlitz 					 struct ttc_params *ttc_params)
4063f6d08d1SOr Gerlitz {
4073f6d08d1SOr Gerlitz 	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
4083f6d08d1SOr Gerlitz 	int tt;
4093f6d08d1SOr Gerlitz 
4103f6d08d1SOr Gerlitz 	memset(ttc_params, 0, sizeof(*ttc_params));
4113f6d08d1SOr Gerlitz 
4123f6d08d1SOr Gerlitz 	ttc_params->any_tt_tirn = hp->tirn;
4133f6d08d1SOr Gerlitz 
4143f6d08d1SOr Gerlitz 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
4153f6d08d1SOr Gerlitz 		ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
4163f6d08d1SOr Gerlitz 
4173f6d08d1SOr Gerlitz 	ft_attr->max_fte = MLX5E_NUM_TT;
4183f6d08d1SOr Gerlitz 	ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
4193f6d08d1SOr Gerlitz 	ft_attr->prio = MLX5E_TC_PRIO;
4203f6d08d1SOr Gerlitz }
4213f6d08d1SOr Gerlitz 
4223f6d08d1SOr Gerlitz static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
4233f6d08d1SOr Gerlitz {
4243f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
4253f6d08d1SOr Gerlitz 	struct ttc_params ttc_params;
4263f6d08d1SOr Gerlitz 	int err;
4273f6d08d1SOr Gerlitz 
4283f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_rqt(hp);
4293f6d08d1SOr Gerlitz 	if (err)
4303f6d08d1SOr Gerlitz 		return err;
4313f6d08d1SOr Gerlitz 
4323f6d08d1SOr Gerlitz 	err = mlx5e_hairpin_create_indirect_tirs(hp);
4333f6d08d1SOr Gerlitz 	if (err)
4343f6d08d1SOr Gerlitz 		goto err_create_indirect_tirs;
4353f6d08d1SOr Gerlitz 
4363f6d08d1SOr Gerlitz 	mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
4373f6d08d1SOr Gerlitz 	err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
4383f6d08d1SOr Gerlitz 	if (err)
4393f6d08d1SOr Gerlitz 		goto err_create_ttc_table;
4403f6d08d1SOr Gerlitz 
4413f6d08d1SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
4423f6d08d1SOr Gerlitz 		   hp->num_channels, hp->ttc.ft.t->id);
4433f6d08d1SOr Gerlitz 
4443f6d08d1SOr Gerlitz 	return 0;
4453f6d08d1SOr Gerlitz 
4463f6d08d1SOr Gerlitz err_create_ttc_table:
4473f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
4483f6d08d1SOr Gerlitz err_create_indirect_tirs:
4493f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
4503f6d08d1SOr Gerlitz 
4513f6d08d1SOr Gerlitz 	return err;
4523f6d08d1SOr Gerlitz }
4533f6d08d1SOr Gerlitz 
4543f6d08d1SOr Gerlitz static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
4553f6d08d1SOr Gerlitz {
4563f6d08d1SOr Gerlitz 	struct mlx5e_priv *priv = hp->func_priv;
4573f6d08d1SOr Gerlitz 
4583f6d08d1SOr Gerlitz 	mlx5e_destroy_ttc_table(priv, &hp->ttc);
4593f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_indirect_tirs(hp);
4603f6d08d1SOr Gerlitz 	mlx5e_destroy_rqt(priv, &hp->indir_rqt);
4613f6d08d1SOr Gerlitz }
4623f6d08d1SOr Gerlitz 
46377ab67b7SOr Gerlitz static struct mlx5e_hairpin *
46477ab67b7SOr Gerlitz mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
46577ab67b7SOr Gerlitz 		     int peer_ifindex)
46677ab67b7SOr Gerlitz {
46777ab67b7SOr Gerlitz 	struct mlx5_core_dev *func_mdev, *peer_mdev;
46877ab67b7SOr Gerlitz 	struct mlx5e_hairpin *hp;
46977ab67b7SOr Gerlitz 	struct mlx5_hairpin *pair;
47077ab67b7SOr Gerlitz 	int err;
47177ab67b7SOr Gerlitz 
47277ab67b7SOr Gerlitz 	hp = kzalloc(sizeof(*hp), GFP_KERNEL);
47377ab67b7SOr Gerlitz 	if (!hp)
47477ab67b7SOr Gerlitz 		return ERR_PTR(-ENOMEM);
47577ab67b7SOr Gerlitz 
47677ab67b7SOr Gerlitz 	func_mdev = priv->mdev;
47777ab67b7SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
47877ab67b7SOr Gerlitz 
47977ab67b7SOr Gerlitz 	pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
48077ab67b7SOr Gerlitz 	if (IS_ERR(pair)) {
48177ab67b7SOr Gerlitz 		err = PTR_ERR(pair);
48277ab67b7SOr Gerlitz 		goto create_pair_err;
48377ab67b7SOr Gerlitz 	}
48477ab67b7SOr Gerlitz 	hp->pair = pair;
48577ab67b7SOr Gerlitz 	hp->func_mdev = func_mdev;
4863f6d08d1SOr Gerlitz 	hp->func_priv = priv;
4873f6d08d1SOr Gerlitz 	hp->num_channels = params->num_channels;
48877ab67b7SOr Gerlitz 
48977ab67b7SOr Gerlitz 	err = mlx5e_hairpin_create_transport(hp);
49077ab67b7SOr Gerlitz 	if (err)
49177ab67b7SOr Gerlitz 		goto create_transport_err;
49277ab67b7SOr Gerlitz 
4933f6d08d1SOr Gerlitz 	if (hp->num_channels > 1) {
4943f6d08d1SOr Gerlitz 		err = mlx5e_hairpin_rss_init(hp);
4953f6d08d1SOr Gerlitz 		if (err)
4963f6d08d1SOr Gerlitz 			goto rss_init_err;
4973f6d08d1SOr Gerlitz 	}
4983f6d08d1SOr Gerlitz 
49977ab67b7SOr Gerlitz 	return hp;
50077ab67b7SOr Gerlitz 
5013f6d08d1SOr Gerlitz rss_init_err:
5023f6d08d1SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
50377ab67b7SOr Gerlitz create_transport_err:
50477ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
50577ab67b7SOr Gerlitz create_pair_err:
50677ab67b7SOr Gerlitz 	kfree(hp);
50777ab67b7SOr Gerlitz 	return ERR_PTR(err);
50877ab67b7SOr Gerlitz }
50977ab67b7SOr Gerlitz 
51077ab67b7SOr Gerlitz static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
51177ab67b7SOr Gerlitz {
5123f6d08d1SOr Gerlitz 	if (hp->num_channels > 1)
5133f6d08d1SOr Gerlitz 		mlx5e_hairpin_rss_cleanup(hp);
51477ab67b7SOr Gerlitz 	mlx5e_hairpin_destroy_transport(hp);
51577ab67b7SOr Gerlitz 	mlx5_core_hairpin_destroy(hp->pair);
51677ab67b7SOr Gerlitz 	kvfree(hp);
51777ab67b7SOr Gerlitz }
51877ab67b7SOr Gerlitz 
519106be53bSOr Gerlitz static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
520106be53bSOr Gerlitz {
521106be53bSOr Gerlitz 	return (peer_vhca_id << 16 | prio);
522106be53bSOr Gerlitz }
523106be53bSOr Gerlitz 
5245c65c564SOr Gerlitz static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
525106be53bSOr Gerlitz 						     u16 peer_vhca_id, u8 prio)
5265c65c564SOr Gerlitz {
5275c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
528106be53bSOr Gerlitz 	u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
5295c65c564SOr Gerlitz 
5305c65c564SOr Gerlitz 	hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
531106be53bSOr Gerlitz 			       hairpin_hlist, hash_key) {
532106be53bSOr Gerlitz 		if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
5335c65c564SOr Gerlitz 			return hpe;
5345c65c564SOr Gerlitz 	}
5355c65c564SOr Gerlitz 
5365c65c564SOr Gerlitz 	return NULL;
5375c65c564SOr Gerlitz }
5385c65c564SOr Gerlitz 
539106be53bSOr Gerlitz #define UNKNOWN_MATCH_PRIO 8
540106be53bSOr Gerlitz 
541106be53bSOr Gerlitz static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
542e98bedf5SEli Britstein 				  struct mlx5_flow_spec *spec, u8 *match_prio,
543e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
544106be53bSOr Gerlitz {
545106be53bSOr Gerlitz 	void *headers_c, *headers_v;
546106be53bSOr Gerlitz 	u8 prio_val, prio_mask = 0;
547106be53bSOr Gerlitz 	bool vlan_present;
548106be53bSOr Gerlitz 
549106be53bSOr Gerlitz #ifdef CONFIG_MLX5_CORE_EN_DCB
550106be53bSOr Gerlitz 	if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
551e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
552e98bedf5SEli Britstein 				   "only PCP trust state supported for hairpin");
553106be53bSOr Gerlitz 		return -EOPNOTSUPP;
554106be53bSOr Gerlitz 	}
555106be53bSOr Gerlitz #endif
556106be53bSOr Gerlitz 	headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
557106be53bSOr Gerlitz 	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
558106be53bSOr Gerlitz 
559106be53bSOr Gerlitz 	vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
560106be53bSOr Gerlitz 	if (vlan_present) {
561106be53bSOr Gerlitz 		prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
562106be53bSOr Gerlitz 		prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
563106be53bSOr Gerlitz 	}
564106be53bSOr Gerlitz 
565106be53bSOr Gerlitz 	if (!vlan_present || !prio_mask) {
566106be53bSOr Gerlitz 		prio_val = UNKNOWN_MATCH_PRIO;
567106be53bSOr Gerlitz 	} else if (prio_mask != 0x7) {
568e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
569e98bedf5SEli Britstein 				   "masked priority match not supported for hairpin");
570106be53bSOr Gerlitz 		return -EOPNOTSUPP;
571106be53bSOr Gerlitz 	}
572106be53bSOr Gerlitz 
573106be53bSOr Gerlitz 	*match_prio = prio_val;
574106be53bSOr Gerlitz 	return 0;
575106be53bSOr Gerlitz }
576106be53bSOr Gerlitz 
5775c65c564SOr Gerlitz static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
5785c65c564SOr Gerlitz 				  struct mlx5e_tc_flow *flow,
579e98bedf5SEli Britstein 				  struct mlx5e_tc_flow_parse_attr *parse_attr,
580e98bedf5SEli Britstein 				  struct netlink_ext_ack *extack)
5815c65c564SOr Gerlitz {
5825c65c564SOr Gerlitz 	int peer_ifindex = parse_attr->mirred_ifindex;
5835c65c564SOr Gerlitz 	struct mlx5_hairpin_params params;
584d8822868SOr Gerlitz 	struct mlx5_core_dev *peer_mdev;
5855c65c564SOr Gerlitz 	struct mlx5e_hairpin_entry *hpe;
5865c65c564SOr Gerlitz 	struct mlx5e_hairpin *hp;
5873f6d08d1SOr Gerlitz 	u64 link_speed64;
5883f6d08d1SOr Gerlitz 	u32 link_speed;
589106be53bSOr Gerlitz 	u8 match_prio;
590d8822868SOr Gerlitz 	u16 peer_id;
5915c65c564SOr Gerlitz 	int err;
5925c65c564SOr Gerlitz 
593d8822868SOr Gerlitz 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
594d8822868SOr Gerlitz 	if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
595e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
5965c65c564SOr Gerlitz 		return -EOPNOTSUPP;
5975c65c564SOr Gerlitz 	}
5985c65c564SOr Gerlitz 
599d8822868SOr Gerlitz 	peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
600e98bedf5SEli Britstein 	err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
601e98bedf5SEli Britstein 				     extack);
602106be53bSOr Gerlitz 	if (err)
603106be53bSOr Gerlitz 		return err;
604106be53bSOr Gerlitz 	hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
6055c65c564SOr Gerlitz 	if (hpe)
6065c65c564SOr Gerlitz 		goto attach_flow;
6075c65c564SOr Gerlitz 
6085c65c564SOr Gerlitz 	hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
6095c65c564SOr Gerlitz 	if (!hpe)
6105c65c564SOr Gerlitz 		return -ENOMEM;
6115c65c564SOr Gerlitz 
6125c65c564SOr Gerlitz 	INIT_LIST_HEAD(&hpe->flows);
613d8822868SOr Gerlitz 	hpe->peer_vhca_id = peer_id;
614106be53bSOr Gerlitz 	hpe->prio = match_prio;
6155c65c564SOr Gerlitz 
6165c65c564SOr Gerlitz 	params.log_data_size = 15;
6175c65c564SOr Gerlitz 	params.log_data_size = min_t(u8, params.log_data_size,
6185c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
6195c65c564SOr Gerlitz 	params.log_data_size = max_t(u8, params.log_data_size,
6205c65c564SOr Gerlitz 				     MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
6215c65c564SOr Gerlitz 
622eb9180f7SOr Gerlitz 	params.log_num_packets = params.log_data_size -
623eb9180f7SOr Gerlitz 				 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
624eb9180f7SOr Gerlitz 	params.log_num_packets = min_t(u8, params.log_num_packets,
625eb9180f7SOr Gerlitz 				       MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
626eb9180f7SOr Gerlitz 
627eb9180f7SOr Gerlitz 	params.q_counter = priv->q_counter;
6283f6d08d1SOr Gerlitz 	/* set hairpin pair per each 50Gbs share of the link */
6292c81bfd5SHuy Nguyen 	mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
6303f6d08d1SOr Gerlitz 	link_speed = max_t(u32, link_speed, 50000);
6313f6d08d1SOr Gerlitz 	link_speed64 = link_speed;
6323f6d08d1SOr Gerlitz 	do_div(link_speed64, 50000);
6333f6d08d1SOr Gerlitz 	params.num_channels = link_speed64;
6343f6d08d1SOr Gerlitz 
6355c65c564SOr Gerlitz 	hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
6365c65c564SOr Gerlitz 	if (IS_ERR(hp)) {
6375c65c564SOr Gerlitz 		err = PTR_ERR(hp);
6385c65c564SOr Gerlitz 		goto create_hairpin_err;
6395c65c564SOr Gerlitz 	}
6405c65c564SOr Gerlitz 
641eb9180f7SOr Gerlitz 	netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
642ddae74acSOr Gerlitz 		   hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
643eb9180f7SOr Gerlitz 		   hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
6445c65c564SOr Gerlitz 
6455c65c564SOr Gerlitz 	hpe->hp = hp;
646106be53bSOr Gerlitz 	hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
647106be53bSOr Gerlitz 		 hash_hairpin_info(peer_id, match_prio));
6485c65c564SOr Gerlitz 
6495c65c564SOr Gerlitz attach_flow:
6503f6d08d1SOr Gerlitz 	if (hpe->hp->num_channels > 1) {
6513f6d08d1SOr Gerlitz 		flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
6523f6d08d1SOr Gerlitz 		flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
6533f6d08d1SOr Gerlitz 	} else {
6545c65c564SOr Gerlitz 		flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
6553f6d08d1SOr Gerlitz 	}
6565c65c564SOr Gerlitz 	list_add(&flow->hairpin, &hpe->flows);
6573f6d08d1SOr Gerlitz 
6585c65c564SOr Gerlitz 	return 0;
6595c65c564SOr Gerlitz 
6605c65c564SOr Gerlitz create_hairpin_err:
6615c65c564SOr Gerlitz 	kfree(hpe);
6625c65c564SOr Gerlitz 	return err;
6635c65c564SOr Gerlitz }
6645c65c564SOr Gerlitz 
6655c65c564SOr Gerlitz static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
6665c65c564SOr Gerlitz 				   struct mlx5e_tc_flow *flow)
6675c65c564SOr Gerlitz {
6685c65c564SOr Gerlitz 	struct list_head *next = flow->hairpin.next;
6695c65c564SOr Gerlitz 
6705c65c564SOr Gerlitz 	list_del(&flow->hairpin);
6715c65c564SOr Gerlitz 
6725c65c564SOr Gerlitz 	/* no more hairpin flows for us, release the hairpin pair */
6735c65c564SOr Gerlitz 	if (list_empty(next)) {
6745c65c564SOr Gerlitz 		struct mlx5e_hairpin_entry *hpe;
6755c65c564SOr Gerlitz 
6765c65c564SOr Gerlitz 		hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
6775c65c564SOr Gerlitz 
6785c65c564SOr Gerlitz 		netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
6795c65c564SOr Gerlitz 			   hpe->hp->pair->peer_mdev->priv.name);
6805c65c564SOr Gerlitz 
6815c65c564SOr Gerlitz 		mlx5e_hairpin_destroy(hpe->hp);
6825c65c564SOr Gerlitz 		hash_del(&hpe->hairpin_hlist);
6835c65c564SOr Gerlitz 		kfree(hpe);
6845c65c564SOr Gerlitz 	}
6855c65c564SOr Gerlitz }
6865c65c564SOr Gerlitz 
68754c177caSOz Shlomo static const char *mlx5e_netdev_kind(struct net_device *dev)
68854c177caSOz Shlomo {
68954c177caSOz Shlomo 	if (dev->rtnl_link_ops)
69054c177caSOz Shlomo 		return dev->rtnl_link_ops->kind;
69154c177caSOz Shlomo 	else
69254c177caSOz Shlomo 		return "";
69354c177caSOz Shlomo }
69454c177caSOz Shlomo 
695c83954abSRabie Loulou static int
69674491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
69717091853SOr Gerlitz 		      struct mlx5e_tc_flow_parse_attr *parse_attr,
698e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
699e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
700e8f887acSAmir Vadai {
701aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
702aad7e08dSAmir Vadai 	struct mlx5_core_dev *dev = priv->mdev;
7035c65c564SOr Gerlitz 	struct mlx5_flow_destination dest[2] = {};
70466958ed9SHadar Hen Zion 	struct mlx5_flow_act flow_act = {
7053bc4b7bfSOr Gerlitz 		.action = attr->action,
7063bc4b7bfSOr Gerlitz 		.flow_tag = attr->flow_tag,
70760786f09SMark Bloch 		.reformat_id = 0,
70842f7ad67SPaul Blakey 		.flags    = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
70966958ed9SHadar Hen Zion 	};
710aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
711e8f887acSAmir Vadai 	bool table_created = false;
7125c65c564SOr Gerlitz 	int err, dest_ix = 0;
713e8f887acSAmir Vadai 
7145c65c564SOr Gerlitz 	if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
715e98bedf5SEli Britstein 		err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
7165c65c564SOr Gerlitz 		if (err) {
7175c65c564SOr Gerlitz 			goto err_add_hairpin_flow;
7185c65c564SOr Gerlitz 		}
7193f6d08d1SOr Gerlitz 		if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
7203f6d08d1SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
7213f6d08d1SOr Gerlitz 			dest[dest_ix].ft = attr->hairpin_ft;
7223f6d08d1SOr Gerlitz 		} else {
7235c65c564SOr Gerlitz 			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
7245c65c564SOr Gerlitz 			dest[dest_ix].tir_num = attr->hairpin_tirn;
7253f6d08d1SOr Gerlitz 		}
7263f6d08d1SOr Gerlitz 		dest_ix++;
7273f6d08d1SOr Gerlitz 	} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
7285c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
7295c65c564SOr Gerlitz 		dest[dest_ix].ft = priv->fs.vlan.ft.t;
7305c65c564SOr Gerlitz 		dest_ix++;
7315c65c564SOr Gerlitz 	}
732aad7e08dSAmir Vadai 
7335c65c564SOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
7345c65c564SOr Gerlitz 		counter = mlx5_fc_create(dev, true);
7355c65c564SOr Gerlitz 		if (IS_ERR(counter)) {
736c83954abSRabie Loulou 			err = PTR_ERR(counter);
7375c65c564SOr Gerlitz 			goto err_fc_create;
7385c65c564SOr Gerlitz 		}
7395c65c564SOr Gerlitz 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
740171c7625SMark Bloch 		dest[dest_ix].counter_id = mlx5_fc_id(counter);
7415c65c564SOr Gerlitz 		dest_ix++;
742b8aee822SMark Bloch 		attr->counter = counter;
743aad7e08dSAmir Vadai 	}
744aad7e08dSAmir Vadai 
7452f4fe4caSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
7463099eb5aSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
747d7e75a32SOr Gerlitz 		flow_act.modify_id = attr->mod_hdr_id;
7482f4fe4caSOr Gerlitz 		kfree(parse_attr->mod_hdr_actions);
749c83954abSRabie Loulou 		if (err)
7502f4fe4caSOr Gerlitz 			goto err_create_mod_hdr_id;
7512f4fe4caSOr Gerlitz 	}
7522f4fe4caSOr Gerlitz 
753acff797cSMaor Gottlieb 	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
75421b9c144SOr Gerlitz 		int tc_grp_size, tc_tbl_size;
75521b9c144SOr Gerlitz 		u32 max_flow_counter;
75621b9c144SOr Gerlitz 
75721b9c144SOr Gerlitz 		max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
75821b9c144SOr Gerlitz 				    MLX5_CAP_GEN(dev, max_flow_counter_15_0);
75921b9c144SOr Gerlitz 
76021b9c144SOr Gerlitz 		tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
76121b9c144SOr Gerlitz 
76221b9c144SOr Gerlitz 		tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
76321b9c144SOr Gerlitz 				    BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
76421b9c144SOr Gerlitz 
765acff797cSMaor Gottlieb 		priv->fs.tc.t =
766acff797cSMaor Gottlieb 			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
767acff797cSMaor Gottlieb 							    MLX5E_TC_PRIO,
76821b9c144SOr Gerlitz 							    tc_tbl_size,
769acff797cSMaor Gottlieb 							    MLX5E_TC_TABLE_NUM_GROUPS,
7703f6d08d1SOr Gerlitz 							    MLX5E_TC_FT_LEVEL, 0);
771acff797cSMaor Gottlieb 		if (IS_ERR(priv->fs.tc.t)) {
772e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
773e98bedf5SEli Britstein 					   "Failed to create tc offload table\n");
774e8f887acSAmir Vadai 			netdev_err(priv->netdev,
775e8f887acSAmir Vadai 				   "Failed to create tc offload table\n");
776c83954abSRabie Loulou 			err = PTR_ERR(priv->fs.tc.t);
777aad7e08dSAmir Vadai 			goto err_create_ft;
778e8f887acSAmir Vadai 		}
779e8f887acSAmir Vadai 
780e8f887acSAmir Vadai 		table_created = true;
781e8f887acSAmir Vadai 	}
782e8f887acSAmir Vadai 
78338aa51c1SOr Gerlitz 	if (attr->match_level != MLX5_MATCH_NONE)
78417091853SOr Gerlitz 		parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
78538aa51c1SOr Gerlitz 
786c83954abSRabie Loulou 	flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
7875c65c564SOr Gerlitz 					    &flow_act, dest, dest_ix);
788e8f887acSAmir Vadai 
789c83954abSRabie Loulou 	if (IS_ERR(flow->rule[0])) {
790c83954abSRabie Loulou 		err = PTR_ERR(flow->rule[0]);
791aad7e08dSAmir Vadai 		goto err_add_rule;
792c83954abSRabie Loulou 	}
793aad7e08dSAmir Vadai 
794c83954abSRabie Loulou 	return 0;
795aad7e08dSAmir Vadai 
796aad7e08dSAmir Vadai err_add_rule:
797aad7e08dSAmir Vadai 	if (table_created) {
798acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(priv->fs.tc.t);
799acff797cSMaor Gottlieb 		priv->fs.tc.t = NULL;
800e8f887acSAmir Vadai 	}
801aad7e08dSAmir Vadai err_create_ft:
8022f4fe4caSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
8033099eb5aSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
8042f4fe4caSOr Gerlitz err_create_mod_hdr_id:
805aad7e08dSAmir Vadai 	mlx5_fc_destroy(dev, counter);
8065c65c564SOr Gerlitz err_fc_create:
8075c65c564SOr Gerlitz 	if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
8085c65c564SOr Gerlitz 		mlx5e_hairpin_flow_del(priv, flow);
8095c65c564SOr Gerlitz err_add_hairpin_flow:
810c83954abSRabie Loulou 	return err;
811e8f887acSAmir Vadai }
812e8f887acSAmir Vadai 
813d85cdccbSOr Gerlitz static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
814d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
815d85cdccbSOr Gerlitz {
816513f8f7fSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
817d85cdccbSOr Gerlitz 	struct mlx5_fc *counter = NULL;
818d85cdccbSOr Gerlitz 
819b8aee822SMark Bloch 	counter = attr->counter;
820e4ad91f2SChris Mi 	mlx5_del_flow_rules(flow->rule[0]);
821d85cdccbSOr Gerlitz 	mlx5_fc_destroy(priv->mdev, counter);
822d85cdccbSOr Gerlitz 
823b3a433deSOr Gerlitz 	if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
824d85cdccbSOr Gerlitz 		mlx5_destroy_flow_table(priv->fs.tc.t);
825d85cdccbSOr Gerlitz 		priv->fs.tc.t = NULL;
826d85cdccbSOr Gerlitz 	}
8272f4fe4caSOr Gerlitz 
828513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
8293099eb5aSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
8305c65c564SOr Gerlitz 
8315c65c564SOr Gerlitz 	if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
8325c65c564SOr Gerlitz 		mlx5e_hairpin_flow_del(priv, flow);
833d85cdccbSOr Gerlitz }
834d85cdccbSOr Gerlitz 
835aa0cbbaeSOr Gerlitz static void mlx5e_detach_encap(struct mlx5e_priv *priv,
836aa0cbbaeSOr Gerlitz 			       struct mlx5e_tc_flow *flow);
837aa0cbbaeSOr Gerlitz 
8383c37745eSOr Gerlitz static int mlx5e_attach_encap(struct mlx5e_priv *priv,
8393c37745eSOr Gerlitz 			      struct ip_tunnel_info *tun_info,
8403c37745eSOr Gerlitz 			      struct net_device *mirred_dev,
8413c37745eSOr Gerlitz 			      struct net_device **encap_dev,
842e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
843e98bedf5SEli Britstein 			      struct netlink_ext_ack *extack);
8443c37745eSOr Gerlitz 
8456d2a3ed0SOr Gerlitz static struct mlx5_flow_handle *
8466d2a3ed0SOr Gerlitz mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
8476d2a3ed0SOr Gerlitz 			   struct mlx5e_tc_flow *flow,
8486d2a3ed0SOr Gerlitz 			   struct mlx5_flow_spec *spec,
8496d2a3ed0SOr Gerlitz 			   struct mlx5_esw_flow_attr *attr)
8506d2a3ed0SOr Gerlitz {
8516d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
8526d2a3ed0SOr Gerlitz 
8536d2a3ed0SOr Gerlitz 	rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
8546d2a3ed0SOr Gerlitz 	if (IS_ERR(rule))
8556d2a3ed0SOr Gerlitz 		return rule;
8566d2a3ed0SOr Gerlitz 
8576d2a3ed0SOr Gerlitz 	if (attr->mirror_count) {
8586d2a3ed0SOr Gerlitz 		flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
8596d2a3ed0SOr Gerlitz 		if (IS_ERR(flow->rule[1])) {
8606d2a3ed0SOr Gerlitz 			mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
8616d2a3ed0SOr Gerlitz 			return flow->rule[1];
8626d2a3ed0SOr Gerlitz 		}
8636d2a3ed0SOr Gerlitz 	}
8646d2a3ed0SOr Gerlitz 
8656d2a3ed0SOr Gerlitz 	flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
8666d2a3ed0SOr Gerlitz 	return rule;
8676d2a3ed0SOr Gerlitz }
8686d2a3ed0SOr Gerlitz 
8696d2a3ed0SOr Gerlitz static void
8706d2a3ed0SOr Gerlitz mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
8716d2a3ed0SOr Gerlitz 			     struct mlx5e_tc_flow *flow,
8726d2a3ed0SOr Gerlitz 			   struct mlx5_esw_flow_attr *attr)
8736d2a3ed0SOr Gerlitz {
8746d2a3ed0SOr Gerlitz 	flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
8756d2a3ed0SOr Gerlitz 
8766d2a3ed0SOr Gerlitz 	if (attr->mirror_count)
8776d2a3ed0SOr Gerlitz 		mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
8786d2a3ed0SOr Gerlitz 
8796d2a3ed0SOr Gerlitz 	mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
8806d2a3ed0SOr Gerlitz }
8816d2a3ed0SOr Gerlitz 
8825dbe906fSPaul Blakey static struct mlx5_flow_handle *
8835dbe906fSPaul Blakey mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
8845dbe906fSPaul Blakey 			      struct mlx5e_tc_flow *flow,
8855dbe906fSPaul Blakey 			      struct mlx5_flow_spec *spec,
8865dbe906fSPaul Blakey 			      struct mlx5_esw_flow_attr *slow_attr)
8875dbe906fSPaul Blakey {
8885dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
8895dbe906fSPaul Blakey 
8905dbe906fSPaul Blakey 	memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
8915dbe906fSPaul Blakey 	slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
8925dbe906fSPaul Blakey 	slow_attr->mirror_count = 0,
8935dbe906fSPaul Blakey 	slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
8945dbe906fSPaul Blakey 
8955dbe906fSPaul Blakey 	rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
8965dbe906fSPaul Blakey 	if (!IS_ERR(rule))
8975dbe906fSPaul Blakey 		flow->flags |= MLX5E_TC_FLOW_SLOW;
8985dbe906fSPaul Blakey 
8995dbe906fSPaul Blakey 	return rule;
9005dbe906fSPaul Blakey }
9015dbe906fSPaul Blakey 
9025dbe906fSPaul Blakey static void
9035dbe906fSPaul Blakey mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
9045dbe906fSPaul Blakey 				  struct mlx5e_tc_flow *flow,
9055dbe906fSPaul Blakey 				  struct mlx5_esw_flow_attr *slow_attr)
9065dbe906fSPaul Blakey {
9075dbe906fSPaul Blakey 	memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
9085dbe906fSPaul Blakey 	mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
9095dbe906fSPaul Blakey 	flow->flags &= ~MLX5E_TC_FLOW_SLOW;
9105dbe906fSPaul Blakey }
9115dbe906fSPaul Blakey 
912c83954abSRabie Loulou static int
91374491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
91417091853SOr Gerlitz 		      struct mlx5e_tc_flow_parse_attr *parse_attr,
915e98bedf5SEli Britstein 		      struct mlx5e_tc_flow *flow,
916e98bedf5SEli Britstein 		      struct netlink_ext_ack *extack)
917adb4c123SOr Gerlitz {
918adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
919bf07aa73SPaul Blakey 	u32 max_chain = mlx5_eswitch_get_chain_range(esw);
920aa0cbbaeSOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
921bf07aa73SPaul Blakey 	u16 max_prio = mlx5_eswitch_get_prio_range(esw);
9223c37745eSOr Gerlitz 	struct net_device *out_dev, *encap_dev = NULL;
923b8aee822SMark Bloch 	struct mlx5_fc *counter = NULL;
9243c37745eSOr Gerlitz 	struct mlx5e_rep_priv *rpriv;
9253c37745eSOr Gerlitz 	struct mlx5e_priv *out_priv;
926c83954abSRabie Loulou 	int err = 0, encap_err = 0;
9278b32580dSOr Gerlitz 
928bf07aa73SPaul Blakey 	/* if prios are not supported, keep the old behaviour of using same prio
929bf07aa73SPaul Blakey 	 * for all offloaded rules.
930bf07aa73SPaul Blakey 	 */
931bf07aa73SPaul Blakey 	if (!mlx5_eswitch_prios_supported(esw))
932e52c2802SPaul Blakey 		attr->prio = 1;
933e52c2802SPaul Blakey 
934bf07aa73SPaul Blakey 	if (attr->chain > max_chain) {
935bf07aa73SPaul Blakey 		NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
936bf07aa73SPaul Blakey 		err = -EOPNOTSUPP;
937bf07aa73SPaul Blakey 		goto err_max_prio_chain;
938bf07aa73SPaul Blakey 	}
939bf07aa73SPaul Blakey 
940bf07aa73SPaul Blakey 	if (attr->prio > max_prio) {
941bf07aa73SPaul Blakey 		NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
942bf07aa73SPaul Blakey 		err = -EOPNOTSUPP;
943bf07aa73SPaul Blakey 		goto err_max_prio_chain;
944bf07aa73SPaul Blakey 	}
945bf07aa73SPaul Blakey 
94660786f09SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
9473c37745eSOr Gerlitz 		out_dev = __dev_get_by_index(dev_net(priv->netdev),
9483c37745eSOr Gerlitz 					     attr->parse_attr->mirred_ifindex);
949c83954abSRabie Loulou 		encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
950c83954abSRabie Loulou 					       out_dev, &encap_dev, flow,
951c83954abSRabie Loulou 					       extack);
952c83954abSRabie Loulou 		if (encap_err && encap_err != -EAGAIN) {
953c83954abSRabie Loulou 			err = encap_err;
9543c37745eSOr Gerlitz 			goto err_attach_encap;
9553c37745eSOr Gerlitz 		}
9563c37745eSOr Gerlitz 		out_priv = netdev_priv(encap_dev);
9573c37745eSOr Gerlitz 		rpriv = out_priv->ppriv;
958592d3651SChris Mi 		attr->out_rep[attr->out_count] = rpriv->rep;
959592d3651SChris Mi 		attr->out_mdev[attr->out_count++] = out_priv->mdev;
9603c37745eSOr Gerlitz 	}
9613c37745eSOr Gerlitz 
9628b32580dSOr Gerlitz 	err = mlx5_eswitch_add_vlan_action(esw, attr);
963c83954abSRabie Loulou 	if (err)
964aa0cbbaeSOr Gerlitz 		goto err_add_vlan;
965adb4c123SOr Gerlitz 
966d7e75a32SOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
9671a9527bbSOr Gerlitz 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
968d7e75a32SOr Gerlitz 		kfree(parse_attr->mod_hdr_actions);
969c83954abSRabie Loulou 		if (err)
970d7e75a32SOr Gerlitz 			goto err_mod_hdr;
971d7e75a32SOr Gerlitz 	}
972d7e75a32SOr Gerlitz 
973b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
974b8aee822SMark Bloch 		counter = mlx5_fc_create(esw->dev, true);
975b8aee822SMark Bloch 		if (IS_ERR(counter)) {
976c83954abSRabie Loulou 			err = PTR_ERR(counter);
977b8aee822SMark Bloch 			goto err_create_counter;
978b8aee822SMark Bloch 		}
979b8aee822SMark Bloch 
980b8aee822SMark Bloch 		attr->counter = counter;
981b8aee822SMark Bloch 	}
982b8aee822SMark Bloch 
983c83954abSRabie Loulou 	/* we get here if (1) there's no error or when
9843c37745eSOr Gerlitz 	 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
9853c37745eSOr Gerlitz 	 */
9865dbe906fSPaul Blakey 	if (encap_err == -EAGAIN) {
9875dbe906fSPaul Blakey 		/* continue with goto slow path rule instead */
9885dbe906fSPaul Blakey 		struct mlx5_esw_flow_attr slow_attr;
9895dbe906fSPaul Blakey 
9905dbe906fSPaul Blakey 		flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
9915dbe906fSPaul Blakey 	} else {
9926d2a3ed0SOr Gerlitz 		flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
9935dbe906fSPaul Blakey 	}
9945dbe906fSPaul Blakey 
995c83954abSRabie Loulou 	if (IS_ERR(flow->rule[0])) {
996c83954abSRabie Loulou 		err = PTR_ERR(flow->rule[0]);
997aa0cbbaeSOr Gerlitz 		goto err_add_rule;
998c83954abSRabie Loulou 	}
999c83954abSRabie Loulou 
10005dbe906fSPaul Blakey 	return 0;
1001aa0cbbaeSOr Gerlitz 
1002aa0cbbaeSOr Gerlitz err_add_rule:
1003b8aee822SMark Bloch 	mlx5_fc_destroy(esw->dev, counter);
1004b8aee822SMark Bloch err_create_counter:
1005513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
10061a9527bbSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
1007d7e75a32SOr Gerlitz err_mod_hdr:
1008aa0cbbaeSOr Gerlitz 	mlx5_eswitch_del_vlan_action(esw, attr);
1009aa0cbbaeSOr Gerlitz err_add_vlan:
101060786f09SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
1011aa0cbbaeSOr Gerlitz 		mlx5e_detach_encap(priv, flow);
10123c37745eSOr Gerlitz err_attach_encap:
1013bf07aa73SPaul Blakey err_max_prio_chain:
1014c83954abSRabie Loulou 	return err;
1015aa0cbbaeSOr Gerlitz }
1016d85cdccbSOr Gerlitz 
1017d85cdccbSOr Gerlitz static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1018d85cdccbSOr Gerlitz 				  struct mlx5e_tc_flow *flow)
1019d85cdccbSOr Gerlitz {
1020d85cdccbSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1021d7e75a32SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
10225dbe906fSPaul Blakey 	struct mlx5_esw_flow_attr slow_attr;
1023d85cdccbSOr Gerlitz 
10245dbe906fSPaul Blakey 	if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
10255dbe906fSPaul Blakey 		if (flow->flags & MLX5E_TC_FLOW_SLOW)
10265dbe906fSPaul Blakey 			mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
10275dbe906fSPaul Blakey 		else
10285dbe906fSPaul Blakey 			mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
10295dbe906fSPaul Blakey 	}
1030d85cdccbSOr Gerlitz 
1031513f8f7fSOr Gerlitz 	mlx5_eswitch_del_vlan_action(esw, attr);
1032d85cdccbSOr Gerlitz 
103360786f09SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
1034d85cdccbSOr Gerlitz 		mlx5e_detach_encap(priv, flow);
1035513f8f7fSOr Gerlitz 		kvfree(attr->parse_attr);
1036232c0013SHadar Hen Zion 	}
1037d7e75a32SOr Gerlitz 
1038513f8f7fSOr Gerlitz 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
10391a9527bbSOr Gerlitz 		mlx5e_detach_mod_hdr(priv, flow);
1040b8aee822SMark Bloch 
1041b8aee822SMark Bloch 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1042b8aee822SMark Bloch 		mlx5_fc_destroy(esw->dev, attr->counter);
1043d85cdccbSOr Gerlitz }
1044d85cdccbSOr Gerlitz 
1045232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1046232c0013SHadar Hen Zion 			      struct mlx5e_encap_entry *e)
1047232c0013SHadar Hen Zion {
10483c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
10495dbe906fSPaul Blakey 	struct mlx5_esw_flow_attr slow_attr, *esw_attr;
10506d2a3ed0SOr Gerlitz 	struct mlx5_flow_handle *rule;
10516d2a3ed0SOr Gerlitz 	struct mlx5_flow_spec *spec;
1052232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1053232c0013SHadar Hen Zion 	int err;
1054232c0013SHadar Hen Zion 
105554c177caSOz Shlomo 	err = mlx5_packet_reformat_alloc(priv->mdev,
105654c177caSOz Shlomo 					 e->reformat_type,
1057232c0013SHadar Hen Zion 					 e->encap_size, e->encap_header,
105831ca3648SMark Bloch 					 MLX5_FLOW_NAMESPACE_FDB,
1059232c0013SHadar Hen Zion 					 &e->encap_id);
1060232c0013SHadar Hen Zion 	if (err) {
1061232c0013SHadar Hen Zion 		mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
1062232c0013SHadar Hen Zion 			       err);
1063232c0013SHadar Hen Zion 		return;
1064232c0013SHadar Hen Zion 	}
1065232c0013SHadar Hen Zion 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
1066f6dfb4c3SHadar Hen Zion 	mlx5e_rep_queue_neigh_stats_work(priv);
1067232c0013SHadar Hen Zion 
1068232c0013SHadar Hen Zion 	list_for_each_entry(flow, &e->flows, encap) {
10693c37745eSOr Gerlitz 		esw_attr = flow->esw_attr;
10703c37745eSOr Gerlitz 		esw_attr->encap_id = e->encap_id;
10716d2a3ed0SOr Gerlitz 		spec = &esw_attr->parse_attr->spec;
10726d2a3ed0SOr Gerlitz 
10735dbe906fSPaul Blakey 		/* update from slow path rule to encap rule */
10746d2a3ed0SOr Gerlitz 		rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
10756d2a3ed0SOr Gerlitz 		if (IS_ERR(rule)) {
10766d2a3ed0SOr Gerlitz 			err = PTR_ERR(rule);
1077232c0013SHadar Hen Zion 			mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1078232c0013SHadar Hen Zion 				       err);
1079232c0013SHadar Hen Zion 			continue;
1080232c0013SHadar Hen Zion 		}
10815dbe906fSPaul Blakey 
10825dbe906fSPaul Blakey 		mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
10835dbe906fSPaul Blakey 		flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
10846d2a3ed0SOr Gerlitz 		flow->rule[0] = rule;
1085232c0013SHadar Hen Zion 	}
1086232c0013SHadar Hen Zion }
1087232c0013SHadar Hen Zion 
1088232c0013SHadar Hen Zion void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1089232c0013SHadar Hen Zion 			      struct mlx5e_encap_entry *e)
1090232c0013SHadar Hen Zion {
10913c37745eSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
10925dbe906fSPaul Blakey 	struct mlx5_esw_flow_attr slow_attr;
10935dbe906fSPaul Blakey 	struct mlx5_flow_handle *rule;
10945dbe906fSPaul Blakey 	struct mlx5_flow_spec *spec;
1095232c0013SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
10965dbe906fSPaul Blakey 	int err;
1097232c0013SHadar Hen Zion 
1098232c0013SHadar Hen Zion 	list_for_each_entry(flow, &e->flows, encap) {
10995dbe906fSPaul Blakey 		spec = &flow->esw_attr->parse_attr->spec;
11005dbe906fSPaul Blakey 
11015dbe906fSPaul Blakey 		/* update from encap rule to slow path rule */
11025dbe906fSPaul Blakey 		rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
11035dbe906fSPaul Blakey 
11045dbe906fSPaul Blakey 		if (IS_ERR(rule)) {
11055dbe906fSPaul Blakey 			err = PTR_ERR(rule);
11065dbe906fSPaul Blakey 			mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
11075dbe906fSPaul Blakey 				       err);
11085dbe906fSPaul Blakey 			continue;
11095dbe906fSPaul Blakey 		}
11105dbe906fSPaul Blakey 
11116d2a3ed0SOr Gerlitz 		mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
11125dbe906fSPaul Blakey 		flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
11135dbe906fSPaul Blakey 		flow->rule[0] = rule;
1114232c0013SHadar Hen Zion 	}
1115232c0013SHadar Hen Zion 
1116232c0013SHadar Hen Zion 	if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
1117232c0013SHadar Hen Zion 		e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
111860786f09SMark Bloch 		mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1119232c0013SHadar Hen Zion 	}
1120232c0013SHadar Hen Zion }
1121232c0013SHadar Hen Zion 
1122b8aee822SMark Bloch static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1123b8aee822SMark Bloch {
1124b8aee822SMark Bloch 	if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1125b8aee822SMark Bloch 		return flow->esw_attr->counter;
1126b8aee822SMark Bloch 	else
1127b8aee822SMark Bloch 		return flow->nic_attr->counter;
1128b8aee822SMark Bloch }
1129b8aee822SMark Bloch 
1130f6dfb4c3SHadar Hen Zion void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1131f6dfb4c3SHadar Hen Zion {
1132f6dfb4c3SHadar Hen Zion 	struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1133f6dfb4c3SHadar Hen Zion 	u64 bytes, packets, lastuse = 0;
1134f6dfb4c3SHadar Hen Zion 	struct mlx5e_tc_flow *flow;
1135f6dfb4c3SHadar Hen Zion 	struct mlx5e_encap_entry *e;
1136f6dfb4c3SHadar Hen Zion 	struct mlx5_fc *counter;
1137f6dfb4c3SHadar Hen Zion 	struct neigh_table *tbl;
1138f6dfb4c3SHadar Hen Zion 	bool neigh_used = false;
1139f6dfb4c3SHadar Hen Zion 	struct neighbour *n;
1140f6dfb4c3SHadar Hen Zion 
1141f6dfb4c3SHadar Hen Zion 	if (m_neigh->family == AF_INET)
1142f6dfb4c3SHadar Hen Zion 		tbl = &arp_tbl;
1143f6dfb4c3SHadar Hen Zion #if IS_ENABLED(CONFIG_IPV6)
1144f6dfb4c3SHadar Hen Zion 	else if (m_neigh->family == AF_INET6)
1145423c9db2SOr Gerlitz 		tbl = &nd_tbl;
1146f6dfb4c3SHadar Hen Zion #endif
1147f6dfb4c3SHadar Hen Zion 	else
1148f6dfb4c3SHadar Hen Zion 		return;
1149f6dfb4c3SHadar Hen Zion 
1150f6dfb4c3SHadar Hen Zion 	list_for_each_entry(e, &nhe->encap_list, encap_list) {
1151f6dfb4c3SHadar Hen Zion 		if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
1152f6dfb4c3SHadar Hen Zion 			continue;
1153f6dfb4c3SHadar Hen Zion 		list_for_each_entry(flow, &e->flows, encap) {
1154f6dfb4c3SHadar Hen Zion 			if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1155b8aee822SMark Bloch 				counter = mlx5e_tc_get_counter(flow);
1156f6dfb4c3SHadar Hen Zion 				mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1157f6dfb4c3SHadar Hen Zion 				if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1158f6dfb4c3SHadar Hen Zion 					neigh_used = true;
1159f6dfb4c3SHadar Hen Zion 					break;
1160f6dfb4c3SHadar Hen Zion 				}
1161f6dfb4c3SHadar Hen Zion 			}
1162f6dfb4c3SHadar Hen Zion 		}
1163e36d4810SRoi Dayan 		if (neigh_used)
1164e36d4810SRoi Dayan 			break;
1165f6dfb4c3SHadar Hen Zion 	}
1166f6dfb4c3SHadar Hen Zion 
1167f6dfb4c3SHadar Hen Zion 	if (neigh_used) {
1168f6dfb4c3SHadar Hen Zion 		nhe->reported_lastuse = jiffies;
1169f6dfb4c3SHadar Hen Zion 
1170f6dfb4c3SHadar Hen Zion 		/* find the relevant neigh according to the cached device and
1171f6dfb4c3SHadar Hen Zion 		 * dst ip pair
1172f6dfb4c3SHadar Hen Zion 		 */
1173f6dfb4c3SHadar Hen Zion 		n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1174c7f7ba8dSRoi Dayan 		if (!n)
1175f6dfb4c3SHadar Hen Zion 			return;
1176f6dfb4c3SHadar Hen Zion 
1177f6dfb4c3SHadar Hen Zion 		neigh_event_send(n, NULL);
1178f6dfb4c3SHadar Hen Zion 		neigh_release(n);
1179f6dfb4c3SHadar Hen Zion 	}
1180f6dfb4c3SHadar Hen Zion }
1181f6dfb4c3SHadar Hen Zion 
1182d85cdccbSOr Gerlitz static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1183d85cdccbSOr Gerlitz 			       struct mlx5e_tc_flow *flow)
1184d85cdccbSOr Gerlitz {
11855067b602SRoi Dayan 	struct list_head *next = flow->encap.next;
11865067b602SRoi Dayan 
11875067b602SRoi Dayan 	list_del(&flow->encap);
11885067b602SRoi Dayan 	if (list_empty(next)) {
1189c1ae1152SOr Gerlitz 		struct mlx5e_encap_entry *e;
11905067b602SRoi Dayan 
1191c1ae1152SOr Gerlitz 		e = list_entry(next, struct mlx5e_encap_entry, flows);
1192232c0013SHadar Hen Zion 		mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1193232c0013SHadar Hen Zion 
1194232c0013SHadar Hen Zion 		if (e->flags & MLX5_ENCAP_ENTRY_VALID)
119560786f09SMark Bloch 			mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1196232c0013SHadar Hen Zion 
1197cdc5a7f3SOr Gerlitz 		hash_del_rcu(&e->encap_hlist);
1198232c0013SHadar Hen Zion 		kfree(e->encap_header);
11995067b602SRoi Dayan 		kfree(e);
12005067b602SRoi Dayan 	}
12015067b602SRoi Dayan }
12025067b602SRoi Dayan 
1203e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1204961e8979SRoi Dayan 			      struct mlx5e_tc_flow *flow)
1205e8f887acSAmir Vadai {
1206d85cdccbSOr Gerlitz 	if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1207d85cdccbSOr Gerlitz 		mlx5e_tc_del_fdb_flow(priv, flow);
1208d85cdccbSOr Gerlitz 	else
1209d85cdccbSOr Gerlitz 		mlx5e_tc_del_nic_flow(priv, flow);
1210e8f887acSAmir Vadai }
1211e8f887acSAmir Vadai 
12124d70564dSOz Shlomo static int parse_tunnel_vxlan_attr(struct mlx5e_priv *priv,
12134d70564dSOz Shlomo 				   struct mlx5_flow_spec *spec,
12144d70564dSOz Shlomo 				   struct tc_cls_flower_offload *f,
12154d70564dSOz Shlomo 				   void *headers_c,
12164d70564dSOz Shlomo 				   void *headers_v)
1217bbd00f7eSHadar Hen Zion {
12184d70564dSOz Shlomo 	struct netlink_ext_ack *extack = f->common.extack;
12194d70564dSOz Shlomo 	struct flow_dissector_key_ports *key =
12204d70564dSOz Shlomo 		skb_flow_dissector_target(f->dissector,
12214d70564dSOz Shlomo 					  FLOW_DISSECTOR_KEY_ENC_PORTS,
12224d70564dSOz Shlomo 					  f->key);
12234d70564dSOz Shlomo 	struct flow_dissector_key_ports *mask =
12244d70564dSOz Shlomo 		skb_flow_dissector_target(f->dissector,
12254d70564dSOz Shlomo 					  FLOW_DISSECTOR_KEY_ENC_PORTS,
12264d70564dSOz Shlomo 					  f->mask);
12274d70564dSOz Shlomo 	void *misc_c = MLX5_ADDR_OF(fte_match_param,
12284d70564dSOz Shlomo 				    spec->match_criteria,
1229bbd00f7eSHadar Hen Zion 				    misc_parameters);
12304d70564dSOz Shlomo 	void *misc_v = MLX5_ADDR_OF(fte_match_param,
12314d70564dSOz Shlomo 				    spec->match_value,
1232bbd00f7eSHadar Hen Zion 				    misc_parameters);
1233bbd00f7eSHadar Hen Zion 
12344d70564dSOz Shlomo 	/* Full udp dst port must be given */
12354d70564dSOz Shlomo 	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
12364d70564dSOz Shlomo 	    memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) {
12374d70564dSOz Shlomo 		NL_SET_ERR_MSG_MOD(extack,
12384d70564dSOz Shlomo 				   "VXLAN decap filter must include enc_dst_port condition");
12394d70564dSOz Shlomo 		netdev_warn(priv->netdev,
12404d70564dSOz Shlomo 			    "VXLAN decap filter must include enc_dst_port condition\n");
12414d70564dSOz Shlomo 		return -EOPNOTSUPP;
12424d70564dSOz Shlomo 	}
12434d70564dSOz Shlomo 
12444d70564dSOz Shlomo 	/* udp dst port must be knonwn as a VXLAN port */
12454d70564dSOz Shlomo 	if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) {
12464d70564dSOz Shlomo 		NL_SET_ERR_MSG_MOD(extack,
12474d70564dSOz Shlomo 				   "Matched UDP port is not registered as a VXLAN port");
12484d70564dSOz Shlomo 		netdev_warn(priv->netdev,
12494d70564dSOz Shlomo 			    "UDP port %d is not registered as a VXLAN port\n",
12504d70564dSOz Shlomo 			    be16_to_cpu(key->dst));
12514d70564dSOz Shlomo 		return -EOPNOTSUPP;
12524d70564dSOz Shlomo 	}
12534d70564dSOz Shlomo 
12544d70564dSOz Shlomo 	/* dst UDP port is valid here */
1255bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
1256bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1257bbd00f7eSHadar Hen Zion 
12584d70564dSOz Shlomo 	MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst));
12594d70564dSOz Shlomo 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst));
12604d70564dSOz Shlomo 
12614d70564dSOz Shlomo 	MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src));
12624d70564dSOz Shlomo 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src));
12634d70564dSOz Shlomo 
12644d70564dSOz Shlomo 	/* match on VNI */
1265bbd00f7eSHadar Hen Zion 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1266bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_keyid *key =
1267bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
1268bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_KEYID,
1269bbd00f7eSHadar Hen Zion 						  f->key);
1270bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_keyid *mask =
1271bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
1272bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_KEYID,
1273bbd00f7eSHadar Hen Zion 						  f->mask);
1274bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
1275bbd00f7eSHadar Hen Zion 			 be32_to_cpu(mask->keyid));
1276bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
1277bbd00f7eSHadar Hen Zion 			 be32_to_cpu(key->keyid));
1278bbd00f7eSHadar Hen Zion 	}
12794d70564dSOz Shlomo 	return 0;
1280bbd00f7eSHadar Hen Zion }
1281bbd00f7eSHadar Hen Zion 
1282bbd00f7eSHadar Hen Zion static int parse_tunnel_attr(struct mlx5e_priv *priv,
1283bbd00f7eSHadar Hen Zion 			     struct mlx5_flow_spec *spec,
128454c177caSOz Shlomo 			     struct tc_cls_flower_offload *f,
128554c177caSOz Shlomo 			     struct net_device *filter_dev)
1286bbd00f7eSHadar Hen Zion {
1287e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
1288bbd00f7eSHadar Hen Zion 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1289bbd00f7eSHadar Hen Zion 				       outer_headers);
1290bbd00f7eSHadar Hen Zion 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1291bbd00f7eSHadar Hen Zion 				       outer_headers);
1292bbd00f7eSHadar Hen Zion 
12932e72eb43SOr Gerlitz 	struct flow_dissector_key_control *enc_control =
12942e72eb43SOr Gerlitz 		skb_flow_dissector_target(f->dissector,
12952e72eb43SOr Gerlitz 					  FLOW_DISSECTOR_KEY_ENC_CONTROL,
12962e72eb43SOr Gerlitz 					  f->key);
129754c177caSOz Shlomo 	int tunnel_type;
12984d70564dSOz Shlomo 	int err = 0;
1299bbd00f7eSHadar Hen Zion 
130054c177caSOz Shlomo 	tunnel_type = mlx5e_get_tunnel_type(filter_dev);
130154c177caSOz Shlomo 	if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
13024d70564dSOz Shlomo 		err = parse_tunnel_vxlan_attr(priv, spec, f,
13034d70564dSOz Shlomo 					      headers_c, headers_v);
130454c177caSOz Shlomo 	} else {
1305e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
130654c177caSOz Shlomo 				   "decapsulation offload is not supported");
13072fcd82e9SOr Gerlitz 		netdev_warn(priv->netdev,
130854c177caSOz Shlomo 			    "decapsulation offload is not supported for %s net device (%d)\n",
130954c177caSOz Shlomo 			    mlx5e_netdev_kind(filter_dev), tunnel_type);
131054c177caSOz Shlomo 		return -EOPNOTSUPP;
131154c177caSOz Shlomo 	}
131254c177caSOz Shlomo 
131354c177caSOz Shlomo 	if (err) {
131454c177caSOz Shlomo 		NL_SET_ERR_MSG_MOD(extack,
131554c177caSOz Shlomo 				   "failed to parse tunnel attributes");
131654c177caSOz Shlomo 		netdev_warn(priv->netdev,
131754c177caSOz Shlomo 			    "failed to parse %s tunnel attributes (%d)\n",
131854c177caSOz Shlomo 			    mlx5e_netdev_kind(filter_dev), tunnel_type);
1319bbd00f7eSHadar Hen Zion 		return -EOPNOTSUPP;
1320bbd00f7eSHadar Hen Zion 	}
1321bbd00f7eSHadar Hen Zion 
13222e72eb43SOr Gerlitz 	if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1323bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ipv4_addrs *key =
1324bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
1325bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1326bbd00f7eSHadar Hen Zion 						  f->key);
1327bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ipv4_addrs *mask =
1328bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
1329bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1330bbd00f7eSHadar Hen Zion 						  f->mask);
1331bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1332bbd00f7eSHadar Hen Zion 			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1333bbd00f7eSHadar Hen Zion 			 ntohl(mask->src));
1334bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1335bbd00f7eSHadar Hen Zion 			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1336bbd00f7eSHadar Hen Zion 			 ntohl(key->src));
1337bbd00f7eSHadar Hen Zion 
1338bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1339bbd00f7eSHadar Hen Zion 			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1340bbd00f7eSHadar Hen Zion 			 ntohl(mask->dst));
1341bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1342bbd00f7eSHadar Hen Zion 			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1343bbd00f7eSHadar Hen Zion 			 ntohl(key->dst));
1344bbd00f7eSHadar Hen Zion 
1345bbd00f7eSHadar Hen Zion 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1346bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
134719f44401SOr Gerlitz 	} else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
134819f44401SOr Gerlitz 		struct flow_dissector_key_ipv6_addrs *key =
134919f44401SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
135019f44401SOr Gerlitz 						  FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
135119f44401SOr Gerlitz 						  f->key);
135219f44401SOr Gerlitz 		struct flow_dissector_key_ipv6_addrs *mask =
135319f44401SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
135419f44401SOr Gerlitz 						  FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
135519f44401SOr Gerlitz 						  f->mask);
135619f44401SOr Gerlitz 
135719f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
135819f44401SOr Gerlitz 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
135919f44401SOr Gerlitz 		       &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
136019f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
136119f44401SOr Gerlitz 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
136219f44401SOr Gerlitz 		       &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
136319f44401SOr Gerlitz 
136419f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
136519f44401SOr Gerlitz 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
136619f44401SOr Gerlitz 		       &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
136719f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
136819f44401SOr Gerlitz 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
136919f44401SOr Gerlitz 		       &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
137019f44401SOr Gerlitz 
137119f44401SOr Gerlitz 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
137219f44401SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
13732e72eb43SOr Gerlitz 	}
1374bbd00f7eSHadar Hen Zion 
1375bcef735cSOr Gerlitz 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
1376bcef735cSOr Gerlitz 		struct flow_dissector_key_ip *key =
1377bcef735cSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
1378bcef735cSOr Gerlitz 						  FLOW_DISSECTOR_KEY_ENC_IP,
1379bcef735cSOr Gerlitz 						  f->key);
1380bcef735cSOr Gerlitz 		struct flow_dissector_key_ip *mask =
1381bcef735cSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
1382bcef735cSOr Gerlitz 						  FLOW_DISSECTOR_KEY_ENC_IP,
1383bcef735cSOr Gerlitz 						  f->mask);
1384bcef735cSOr Gerlitz 
1385bcef735cSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1386bcef735cSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1387bcef735cSOr Gerlitz 
1388bcef735cSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1389bcef735cSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos  >> 2);
1390bcef735cSOr Gerlitz 
1391bcef735cSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1392bcef735cSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1393e98bedf5SEli Britstein 
1394e98bedf5SEli Britstein 		if (mask->ttl &&
1395e98bedf5SEli Britstein 		    !MLX5_CAP_ESW_FLOWTABLE_FDB
1396e98bedf5SEli Britstein 			(priv->mdev,
1397e98bedf5SEli Britstein 			 ft_field_support.outer_ipv4_ttl)) {
1398e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
1399e98bedf5SEli Britstein 					   "Matching on TTL is not supported");
1400e98bedf5SEli Britstein 			return -EOPNOTSUPP;
1401e98bedf5SEli Britstein 		}
1402e98bedf5SEli Britstein 
1403bcef735cSOr Gerlitz 	}
1404bcef735cSOr Gerlitz 
1405bbd00f7eSHadar Hen Zion 	/* Enforce DMAC when offloading incoming tunneled flows.
1406bbd00f7eSHadar Hen Zion 	 * Flow counters require a match on the DMAC.
1407bbd00f7eSHadar Hen Zion 	 */
1408bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1409bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1410bbd00f7eSHadar Hen Zion 	ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1411bbd00f7eSHadar Hen Zion 				     dmac_47_16), priv->netdev->dev_addr);
1412bbd00f7eSHadar Hen Zion 
1413bbd00f7eSHadar Hen Zion 	/* let software handle IP fragments */
1414bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1415bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1416bbd00f7eSHadar Hen Zion 
1417bbd00f7eSHadar Hen Zion 	return 0;
1418bbd00f7eSHadar Hen Zion }
1419bbd00f7eSHadar Hen Zion 
1420de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv,
1421de0af0bfSRoi Dayan 			      struct mlx5_flow_spec *spec,
1422de0af0bfSRoi Dayan 			      struct tc_cls_flower_offload *f,
142354c177caSOz Shlomo 			      struct net_device *filter_dev,
1424d708f902SOr Gerlitz 			      u8 *match_level)
1425e3a2b7edSAmir Vadai {
1426e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
1427c5bb1730SMaor Gottlieb 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1428c5bb1730SMaor Gottlieb 				       outer_headers);
1429c5bb1730SMaor Gottlieb 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1430c5bb1730SMaor Gottlieb 				       outer_headers);
1431699e96ddSJianbo Liu 	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1432699e96ddSJianbo Liu 				    misc_parameters);
1433699e96ddSJianbo Liu 	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1434699e96ddSJianbo Liu 				    misc_parameters);
1435e3a2b7edSAmir Vadai 	u16 addr_type = 0;
1436e3a2b7edSAmir Vadai 	u8 ip_proto = 0;
1437e3a2b7edSAmir Vadai 
1438d708f902SOr Gerlitz 	*match_level = MLX5_MATCH_NONE;
1439de0af0bfSRoi Dayan 
1440e3a2b7edSAmir Vadai 	if (f->dissector->used_keys &
1441e3a2b7edSAmir Vadai 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1442e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
1443e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1444095b6cfdSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
1445699e96ddSJianbo Liu 	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1446e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1447e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1448bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
1449bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1450bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1451bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1452bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
1453e77834ecSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1454fd7da28bSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
1455bcef735cSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_IP)  |
1456bcef735cSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
1457e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
1458e3a2b7edSAmir Vadai 		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1459e3a2b7edSAmir Vadai 			    f->dissector->used_keys);
1460e3a2b7edSAmir Vadai 		return -EOPNOTSUPP;
1461e3a2b7edSAmir Vadai 	}
1462e3a2b7edSAmir Vadai 
1463bbd00f7eSHadar Hen Zion 	if ((dissector_uses_key(f->dissector,
1464bbd00f7eSHadar Hen Zion 				FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1465bbd00f7eSHadar Hen Zion 	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1466bbd00f7eSHadar Hen Zion 	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1467bbd00f7eSHadar Hen Zion 	    dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1468bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_control *key =
1469bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
1470bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
1471bbd00f7eSHadar Hen Zion 						  f->key);
1472bbd00f7eSHadar Hen Zion 		switch (key->addr_type) {
1473bbd00f7eSHadar Hen Zion 		case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
147419f44401SOr Gerlitz 		case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
147554c177caSOz Shlomo 			if (parse_tunnel_attr(priv, spec, f, filter_dev))
1476bbd00f7eSHadar Hen Zion 				return -EOPNOTSUPP;
1477bbd00f7eSHadar Hen Zion 			break;
1478bbd00f7eSHadar Hen Zion 		default:
1479bbd00f7eSHadar Hen Zion 			return -EOPNOTSUPP;
1480bbd00f7eSHadar Hen Zion 		}
1481bbd00f7eSHadar Hen Zion 
1482bbd00f7eSHadar Hen Zion 		/* In decap flow, header pointers should point to the inner
1483bbd00f7eSHadar Hen Zion 		 * headers, outer header were already set by parse_tunnel_attr
1484bbd00f7eSHadar Hen Zion 		 */
1485bbd00f7eSHadar Hen Zion 		headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1486bbd00f7eSHadar Hen Zion 					 inner_headers);
1487bbd00f7eSHadar Hen Zion 		headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1488bbd00f7eSHadar Hen Zion 					 inner_headers);
1489bbd00f7eSHadar Hen Zion 	}
1490bbd00f7eSHadar Hen Zion 
1491d3a80bb5SOr Gerlitz 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1492d3a80bb5SOr Gerlitz 		struct flow_dissector_key_basic *key =
1493e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
1494d3a80bb5SOr Gerlitz 						  FLOW_DISSECTOR_KEY_BASIC,
1495e3a2b7edSAmir Vadai 						  f->key);
1496d3a80bb5SOr Gerlitz 		struct flow_dissector_key_basic *mask =
1497e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
1498d3a80bb5SOr Gerlitz 						  FLOW_DISSECTOR_KEY_BASIC,
1499e3a2b7edSAmir Vadai 						  f->mask);
1500d3a80bb5SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1501d3a80bb5SOr Gerlitz 			 ntohs(mask->n_proto));
1502d3a80bb5SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1503d3a80bb5SOr Gerlitz 			 ntohs(key->n_proto));
1504e3a2b7edSAmir Vadai 
1505d3a80bb5SOr Gerlitz 		if (mask->n_proto)
1506d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
1507e3a2b7edSAmir Vadai 	}
1508e3a2b7edSAmir Vadai 
1509095b6cfdSOr Gerlitz 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
1510095b6cfdSOr Gerlitz 		struct flow_dissector_key_vlan *key =
1511095b6cfdSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
1512095b6cfdSOr Gerlitz 						  FLOW_DISSECTOR_KEY_VLAN,
1513095b6cfdSOr Gerlitz 						  f->key);
1514095b6cfdSOr Gerlitz 		struct flow_dissector_key_vlan *mask =
1515095b6cfdSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
1516095b6cfdSOr Gerlitz 						  FLOW_DISSECTOR_KEY_VLAN,
1517095b6cfdSOr Gerlitz 						  f->mask);
1518699e96ddSJianbo Liu 		if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1519699e96ddSJianbo Liu 			if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1520699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1521699e96ddSJianbo Liu 					 svlan_tag, 1);
1522699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1523699e96ddSJianbo Liu 					 svlan_tag, 1);
1524699e96ddSJianbo Liu 			} else {
1525699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1526699e96ddSJianbo Liu 					 cvlan_tag, 1);
1527699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1528699e96ddSJianbo Liu 					 cvlan_tag, 1);
1529699e96ddSJianbo Liu 			}
1530095b6cfdSOr Gerlitz 
1531095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
1532095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
1533358d79a4SOr Gerlitz 
1534358d79a4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
1535358d79a4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
153654782900SOr Gerlitz 
1537d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
1538095b6cfdSOr Gerlitz 		}
1539d3a80bb5SOr Gerlitz 	} else if (*match_level != MLX5_MATCH_NONE) {
1540cee26487SJianbo Liu 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1541cee26487SJianbo Liu 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1542d3a80bb5SOr Gerlitz 		*match_level = MLX5_MATCH_L2;
1543095b6cfdSOr Gerlitz 	}
1544095b6cfdSOr Gerlitz 
1545699e96ddSJianbo Liu 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
1546699e96ddSJianbo Liu 		struct flow_dissector_key_vlan *key =
1547699e96ddSJianbo Liu 			skb_flow_dissector_target(f->dissector,
1548699e96ddSJianbo Liu 						  FLOW_DISSECTOR_KEY_CVLAN,
1549699e96ddSJianbo Liu 						  f->key);
1550699e96ddSJianbo Liu 		struct flow_dissector_key_vlan *mask =
1551699e96ddSJianbo Liu 			skb_flow_dissector_target(f->dissector,
1552699e96ddSJianbo Liu 						  FLOW_DISSECTOR_KEY_CVLAN,
1553699e96ddSJianbo Liu 						  f->mask);
1554699e96ddSJianbo Liu 		if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1555699e96ddSJianbo Liu 			if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1556699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
1557699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
1558699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
1559699e96ddSJianbo Liu 					 outer_second_svlan_tag, 1);
1560699e96ddSJianbo Liu 			} else {
1561699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_c,
1562699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
1563699e96ddSJianbo Liu 				MLX5_SET(fte_match_set_misc, misc_v,
1564699e96ddSJianbo Liu 					 outer_second_cvlan_tag, 1);
1565699e96ddSJianbo Liu 			}
1566699e96ddSJianbo Liu 
1567699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1568699e96ddSJianbo Liu 				 mask->vlan_id);
1569699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1570699e96ddSJianbo Liu 				 key->vlan_id);
1571699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1572699e96ddSJianbo Liu 				 mask->vlan_priority);
1573699e96ddSJianbo Liu 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1574699e96ddSJianbo Liu 				 key->vlan_priority);
1575699e96ddSJianbo Liu 
1576699e96ddSJianbo Liu 			*match_level = MLX5_MATCH_L2;
1577699e96ddSJianbo Liu 		}
1578699e96ddSJianbo Liu 	}
1579699e96ddSJianbo Liu 
1580d3a80bb5SOr Gerlitz 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1581d3a80bb5SOr Gerlitz 		struct flow_dissector_key_eth_addrs *key =
158254782900SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
1583d3a80bb5SOr Gerlitz 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
158454782900SOr Gerlitz 						  f->key);
1585d3a80bb5SOr Gerlitz 		struct flow_dissector_key_eth_addrs *mask =
158654782900SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
1587d3a80bb5SOr Gerlitz 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
158854782900SOr Gerlitz 						  f->mask);
158954782900SOr Gerlitz 
1590d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1591d3a80bb5SOr Gerlitz 					     dmac_47_16),
1592d3a80bb5SOr Gerlitz 				mask->dst);
1593d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1594d3a80bb5SOr Gerlitz 					     dmac_47_16),
1595d3a80bb5SOr Gerlitz 				key->dst);
1596d3a80bb5SOr Gerlitz 
1597d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1598d3a80bb5SOr Gerlitz 					     smac_47_16),
1599d3a80bb5SOr Gerlitz 				mask->src);
1600d3a80bb5SOr Gerlitz 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1601d3a80bb5SOr Gerlitz 					     smac_47_16),
1602d3a80bb5SOr Gerlitz 				key->src);
1603d3a80bb5SOr Gerlitz 
1604d3a80bb5SOr Gerlitz 		if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
1605d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L2;
160654782900SOr Gerlitz 	}
160754782900SOr Gerlitz 
160854782900SOr Gerlitz 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
160954782900SOr Gerlitz 		struct flow_dissector_key_control *key =
161054782900SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
161154782900SOr Gerlitz 						  FLOW_DISSECTOR_KEY_CONTROL,
161254782900SOr Gerlitz 						  f->key);
161354782900SOr Gerlitz 
161454782900SOr Gerlitz 		struct flow_dissector_key_control *mask =
161554782900SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
161654782900SOr Gerlitz 						  FLOW_DISSECTOR_KEY_CONTROL,
161754782900SOr Gerlitz 						  f->mask);
161854782900SOr Gerlitz 		addr_type = key->addr_type;
161954782900SOr Gerlitz 
162054782900SOr Gerlitz 		/* the HW doesn't support frag first/later */
162154782900SOr Gerlitz 		if (mask->flags & FLOW_DIS_FIRST_FRAG)
162254782900SOr Gerlitz 			return -EOPNOTSUPP;
162354782900SOr Gerlitz 
162454782900SOr Gerlitz 		if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
162554782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
162654782900SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
162754782900SOr Gerlitz 				 key->flags & FLOW_DIS_IS_FRAGMENT);
162854782900SOr Gerlitz 
162954782900SOr Gerlitz 			/* the HW doesn't need L3 inline to match on frag=no */
163054782900SOr Gerlitz 			if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
163183621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L2;
163254782900SOr Gerlitz 	/* ***  L2 attributes parsing up to here *** */
163354782900SOr Gerlitz 			else
163483621b7dSOr Gerlitz 				*match_level = MLX5_MATCH_L3;
163554782900SOr Gerlitz 		}
163654782900SOr Gerlitz 	}
163754782900SOr Gerlitz 
163854782900SOr Gerlitz 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
163954782900SOr Gerlitz 		struct flow_dissector_key_basic *key =
164054782900SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
164154782900SOr Gerlitz 						  FLOW_DISSECTOR_KEY_BASIC,
164254782900SOr Gerlitz 						  f->key);
164354782900SOr Gerlitz 		struct flow_dissector_key_basic *mask =
164454782900SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
164554782900SOr Gerlitz 						  FLOW_DISSECTOR_KEY_BASIC,
164654782900SOr Gerlitz 						  f->mask);
164754782900SOr Gerlitz 		ip_proto = key->ip_proto;
164854782900SOr Gerlitz 
164954782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
165054782900SOr Gerlitz 			 mask->ip_proto);
165154782900SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
165254782900SOr Gerlitz 			 key->ip_proto);
165354782900SOr Gerlitz 
165454782900SOr Gerlitz 		if (mask->ip_proto)
1655d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
165654782900SOr Gerlitz 	}
165754782900SOr Gerlitz 
1658e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1659e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv4_addrs *key =
1660e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
1661e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1662e3a2b7edSAmir Vadai 						  f->key);
1663e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv4_addrs *mask =
1664e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
1665e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1666e3a2b7edSAmir Vadai 						  f->mask);
1667e3a2b7edSAmir Vadai 
1668e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1669e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
1670e3a2b7edSAmir Vadai 		       &mask->src, sizeof(mask->src));
1671e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1672e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
1673e3a2b7edSAmir Vadai 		       &key->src, sizeof(key->src));
1674e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1675e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1676e3a2b7edSAmir Vadai 		       &mask->dst, sizeof(mask->dst));
1677e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1678e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1679e3a2b7edSAmir Vadai 		       &key->dst, sizeof(key->dst));
1680de0af0bfSRoi Dayan 
1681de0af0bfSRoi Dayan 		if (mask->src || mask->dst)
1682d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
1683e3a2b7edSAmir Vadai 	}
1684e3a2b7edSAmir Vadai 
1685e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1686e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv6_addrs *key =
1687e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
1688e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1689e3a2b7edSAmir Vadai 						  f->key);
1690e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv6_addrs *mask =
1691e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
1692e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1693e3a2b7edSAmir Vadai 						  f->mask);
1694e3a2b7edSAmir Vadai 
1695e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1696e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
1697e3a2b7edSAmir Vadai 		       &mask->src, sizeof(mask->src));
1698e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1699e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
1700e3a2b7edSAmir Vadai 		       &key->src, sizeof(key->src));
1701e3a2b7edSAmir Vadai 
1702e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1703e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1704e3a2b7edSAmir Vadai 		       &mask->dst, sizeof(mask->dst));
1705e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1706e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1707e3a2b7edSAmir Vadai 		       &key->dst, sizeof(key->dst));
1708de0af0bfSRoi Dayan 
1709de0af0bfSRoi Dayan 		if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
1710de0af0bfSRoi Dayan 		    ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
1711d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
1712e3a2b7edSAmir Vadai 	}
1713e3a2b7edSAmir Vadai 
17141f97a526SOr Gerlitz 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
17151f97a526SOr Gerlitz 		struct flow_dissector_key_ip *key =
17161f97a526SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
17171f97a526SOr Gerlitz 						  FLOW_DISSECTOR_KEY_IP,
17181f97a526SOr Gerlitz 						  f->key);
17191f97a526SOr Gerlitz 		struct flow_dissector_key_ip *mask =
17201f97a526SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
17211f97a526SOr Gerlitz 						  FLOW_DISSECTOR_KEY_IP,
17221f97a526SOr Gerlitz 						  f->mask);
17231f97a526SOr Gerlitz 
17241f97a526SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
17251f97a526SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
17261f97a526SOr Gerlitz 
17271f97a526SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
17281f97a526SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos  >> 2);
17291f97a526SOr Gerlitz 
1730a8ade55fSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1731a8ade55fSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
17321f97a526SOr Gerlitz 
1733a8ade55fSOr Gerlitz 		if (mask->ttl &&
1734a8ade55fSOr Gerlitz 		    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1735e98bedf5SEli Britstein 						ft_field_support.outer_ipv4_ttl)) {
1736e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
1737e98bedf5SEli Britstein 					   "Matching on TTL is not supported");
17381f97a526SOr Gerlitz 			return -EOPNOTSUPP;
1739e98bedf5SEli Britstein 		}
1740a8ade55fSOr Gerlitz 
1741a8ade55fSOr Gerlitz 		if (mask->tos || mask->ttl)
1742d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L3;
17431f97a526SOr Gerlitz 	}
17441f97a526SOr Gerlitz 
174554782900SOr Gerlitz 	/* ***  L3 attributes parsing up to here *** */
174654782900SOr Gerlitz 
1747e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
1748e3a2b7edSAmir Vadai 		struct flow_dissector_key_ports *key =
1749e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
1750e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_PORTS,
1751e3a2b7edSAmir Vadai 						  f->key);
1752e3a2b7edSAmir Vadai 		struct flow_dissector_key_ports *mask =
1753e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
1754e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_PORTS,
1755e3a2b7edSAmir Vadai 						  f->mask);
1756e3a2b7edSAmir Vadai 		switch (ip_proto) {
1757e3a2b7edSAmir Vadai 		case IPPROTO_TCP:
1758e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1759e3a2b7edSAmir Vadai 				 tcp_sport, ntohs(mask->src));
1760e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1761e3a2b7edSAmir Vadai 				 tcp_sport, ntohs(key->src));
1762e3a2b7edSAmir Vadai 
1763e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1764e3a2b7edSAmir Vadai 				 tcp_dport, ntohs(mask->dst));
1765e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1766e3a2b7edSAmir Vadai 				 tcp_dport, ntohs(key->dst));
1767e3a2b7edSAmir Vadai 			break;
1768e3a2b7edSAmir Vadai 
1769e3a2b7edSAmir Vadai 		case IPPROTO_UDP:
1770e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1771e3a2b7edSAmir Vadai 				 udp_sport, ntohs(mask->src));
1772e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1773e3a2b7edSAmir Vadai 				 udp_sport, ntohs(key->src));
1774e3a2b7edSAmir Vadai 
1775e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1776e3a2b7edSAmir Vadai 				 udp_dport, ntohs(mask->dst));
1777e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1778e3a2b7edSAmir Vadai 				 udp_dport, ntohs(key->dst));
1779e3a2b7edSAmir Vadai 			break;
1780e3a2b7edSAmir Vadai 		default:
1781e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
1782e98bedf5SEli Britstein 					   "Only UDP and TCP transports are supported for L4 matching");
1783e3a2b7edSAmir Vadai 			netdev_err(priv->netdev,
1784e3a2b7edSAmir Vadai 				   "Only UDP and TCP transport are supported\n");
1785e3a2b7edSAmir Vadai 			return -EINVAL;
1786e3a2b7edSAmir Vadai 		}
1787de0af0bfSRoi Dayan 
1788de0af0bfSRoi Dayan 		if (mask->src || mask->dst)
1789d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
1790e3a2b7edSAmir Vadai 	}
1791e3a2b7edSAmir Vadai 
1792e77834ecSOr Gerlitz 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1793e77834ecSOr Gerlitz 		struct flow_dissector_key_tcp *key =
1794e77834ecSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
1795e77834ecSOr Gerlitz 						  FLOW_DISSECTOR_KEY_TCP,
1796e77834ecSOr Gerlitz 						  f->key);
1797e77834ecSOr Gerlitz 		struct flow_dissector_key_tcp *mask =
1798e77834ecSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
1799e77834ecSOr Gerlitz 						  FLOW_DISSECTOR_KEY_TCP,
1800e77834ecSOr Gerlitz 						  f->mask);
1801e77834ecSOr Gerlitz 
1802e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1803e77834ecSOr Gerlitz 			 ntohs(mask->flags));
1804e77834ecSOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1805e77834ecSOr Gerlitz 			 ntohs(key->flags));
1806e77834ecSOr Gerlitz 
1807e77834ecSOr Gerlitz 		if (mask->flags)
1808d708f902SOr Gerlitz 			*match_level = MLX5_MATCH_L4;
1809e77834ecSOr Gerlitz 	}
1810e77834ecSOr Gerlitz 
1811e3a2b7edSAmir Vadai 	return 0;
1812e3a2b7edSAmir Vadai }
1813e3a2b7edSAmir Vadai 
1814de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv,
181565ba8fb7SOr Gerlitz 			    struct mlx5e_tc_flow *flow,
1816de0af0bfSRoi Dayan 			    struct mlx5_flow_spec *spec,
181754c177caSOz Shlomo 			    struct tc_cls_flower_offload *f,
181854c177caSOz Shlomo 			    struct net_device *filter_dev)
1819de0af0bfSRoi Dayan {
1820e98bedf5SEli Britstein 	struct netlink_ext_ack *extack = f->common.extack;
1821de0af0bfSRoi Dayan 	struct mlx5_core_dev *dev = priv->mdev;
1822de0af0bfSRoi Dayan 	struct mlx5_eswitch *esw = dev->priv.eswitch;
18231d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
18241d447a39SSaeed Mahameed 	struct mlx5_eswitch_rep *rep;
1825d708f902SOr Gerlitz 	u8 match_level;
1826de0af0bfSRoi Dayan 	int err;
1827de0af0bfSRoi Dayan 
182854c177caSOz Shlomo 	err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level);
1829de0af0bfSRoi Dayan 
18301d447a39SSaeed Mahameed 	if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
18311d447a39SSaeed Mahameed 		rep = rpriv->rep;
18321d447a39SSaeed Mahameed 		if (rep->vport != FDB_UPLINK_VPORT &&
18331d447a39SSaeed Mahameed 		    (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1834d708f902SOr Gerlitz 		    esw->offloads.inline_mode < match_level)) {
1835e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
1836e98bedf5SEli Britstein 					   "Flow is not offloaded due to min inline setting");
1837de0af0bfSRoi Dayan 			netdev_warn(priv->netdev,
1838de0af0bfSRoi Dayan 				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1839d708f902SOr Gerlitz 				    match_level, esw->offloads.inline_mode);
1840de0af0bfSRoi Dayan 			return -EOPNOTSUPP;
1841de0af0bfSRoi Dayan 		}
1842de0af0bfSRoi Dayan 	}
1843de0af0bfSRoi Dayan 
184438aa51c1SOr Gerlitz 	if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
184538aa51c1SOr Gerlitz 		flow->esw_attr->match_level = match_level;
184638aa51c1SOr Gerlitz 	else
184738aa51c1SOr Gerlitz 		flow->nic_attr->match_level = match_level;
184838aa51c1SOr Gerlitz 
1849de0af0bfSRoi Dayan 	return err;
1850de0af0bfSRoi Dayan }
1851de0af0bfSRoi Dayan 
1852d79b6df6SOr Gerlitz struct pedit_headers {
1853d79b6df6SOr Gerlitz 	struct ethhdr  eth;
1854d79b6df6SOr Gerlitz 	struct iphdr   ip4;
1855d79b6df6SOr Gerlitz 	struct ipv6hdr ip6;
1856d79b6df6SOr Gerlitz 	struct tcphdr  tcp;
1857d79b6df6SOr Gerlitz 	struct udphdr  udp;
1858d79b6df6SOr Gerlitz };
1859d79b6df6SOr Gerlitz 
1860d79b6df6SOr Gerlitz static int pedit_header_offsets[] = {
1861d79b6df6SOr Gerlitz 	[TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1862d79b6df6SOr Gerlitz 	[TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1863d79b6df6SOr Gerlitz 	[TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1864d79b6df6SOr Gerlitz 	[TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1865d79b6df6SOr Gerlitz 	[TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1866d79b6df6SOr Gerlitz };
1867d79b6df6SOr Gerlitz 
1868d79b6df6SOr Gerlitz #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1869d79b6df6SOr Gerlitz 
1870d79b6df6SOr Gerlitz static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1871d79b6df6SOr Gerlitz 			 struct pedit_headers *masks,
1872d79b6df6SOr Gerlitz 			 struct pedit_headers *vals)
1873d79b6df6SOr Gerlitz {
1874d79b6df6SOr Gerlitz 	u32 *curr_pmask, *curr_pval;
1875d79b6df6SOr Gerlitz 
1876d79b6df6SOr Gerlitz 	if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1877d79b6df6SOr Gerlitz 		goto out_err;
1878d79b6df6SOr Gerlitz 
1879d79b6df6SOr Gerlitz 	curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1880d79b6df6SOr Gerlitz 	curr_pval  = (u32 *)(pedit_header(vals, hdr_type) + offset);
1881d79b6df6SOr Gerlitz 
1882d79b6df6SOr Gerlitz 	if (*curr_pmask & mask)  /* disallow acting twice on the same location */
1883d79b6df6SOr Gerlitz 		goto out_err;
1884d79b6df6SOr Gerlitz 
1885d79b6df6SOr Gerlitz 	*curr_pmask |= mask;
1886d79b6df6SOr Gerlitz 	*curr_pval  |= (val & mask);
1887d79b6df6SOr Gerlitz 
1888d79b6df6SOr Gerlitz 	return 0;
1889d79b6df6SOr Gerlitz 
1890d79b6df6SOr Gerlitz out_err:
1891d79b6df6SOr Gerlitz 	return -EOPNOTSUPP;
1892d79b6df6SOr Gerlitz }
1893d79b6df6SOr Gerlitz 
1894d79b6df6SOr Gerlitz struct mlx5_fields {
1895d79b6df6SOr Gerlitz 	u8  field;
1896d79b6df6SOr Gerlitz 	u8  size;
1897d79b6df6SOr Gerlitz 	u32 offset;
1898d79b6df6SOr Gerlitz };
1899d79b6df6SOr Gerlitz 
1900a8e4f0c4SOr Gerlitz #define OFFLOAD(fw_field, size, field, off) \
1901a8e4f0c4SOr Gerlitz 		{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1902a8e4f0c4SOr Gerlitz 
1903d79b6df6SOr Gerlitz static struct mlx5_fields fields[] = {
1904a8e4f0c4SOr Gerlitz 	OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1905a8e4f0c4SOr Gerlitz 	OFFLOAD(DMAC_15_0,  2, eth.h_dest[4], 0),
1906a8e4f0c4SOr Gerlitz 	OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1907a8e4f0c4SOr Gerlitz 	OFFLOAD(SMAC_15_0,  2, eth.h_source[4], 0),
1908a8e4f0c4SOr Gerlitz 	OFFLOAD(ETHERTYPE,  2, eth.h_proto, 0),
1909d79b6df6SOr Gerlitz 
1910a8e4f0c4SOr Gerlitz 	OFFLOAD(IP_TTL, 1, ip4.ttl,   0),
1911a8e4f0c4SOr Gerlitz 	OFFLOAD(SIPV4,  4, ip4.saddr, 0),
1912a8e4f0c4SOr Gerlitz 	OFFLOAD(DIPV4,  4, ip4.daddr, 0),
1913d79b6df6SOr Gerlitz 
1914a8e4f0c4SOr Gerlitz 	OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1915a8e4f0c4SOr Gerlitz 	OFFLOAD(SIPV6_95_64,  4, ip6.saddr.s6_addr32[1], 0),
1916a8e4f0c4SOr Gerlitz 	OFFLOAD(SIPV6_63_32,  4, ip6.saddr.s6_addr32[2], 0),
1917a8e4f0c4SOr Gerlitz 	OFFLOAD(SIPV6_31_0,   4, ip6.saddr.s6_addr32[3], 0),
1918a8e4f0c4SOr Gerlitz 	OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1919a8e4f0c4SOr Gerlitz 	OFFLOAD(DIPV6_95_64,  4, ip6.daddr.s6_addr32[1], 0),
1920a8e4f0c4SOr Gerlitz 	OFFLOAD(DIPV6_63_32,  4, ip6.daddr.s6_addr32[2], 0),
1921a8e4f0c4SOr Gerlitz 	OFFLOAD(DIPV6_31_0,   4, ip6.daddr.s6_addr32[3], 0),
19220c0316f5SOr Gerlitz 	OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1923d79b6df6SOr Gerlitz 
1924a8e4f0c4SOr Gerlitz 	OFFLOAD(TCP_SPORT, 2, tcp.source,  0),
1925a8e4f0c4SOr Gerlitz 	OFFLOAD(TCP_DPORT, 2, tcp.dest,    0),
1926a8e4f0c4SOr Gerlitz 	OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1927d79b6df6SOr Gerlitz 
1928a8e4f0c4SOr Gerlitz 	OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1929a8e4f0c4SOr Gerlitz 	OFFLOAD(UDP_DPORT, 2, udp.dest,   0),
1930d79b6df6SOr Gerlitz };
1931d79b6df6SOr Gerlitz 
1932d79b6df6SOr Gerlitz /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1933d79b6df6SOr Gerlitz  * max from the SW pedit action. On success, it says how many HW actions were
1934d79b6df6SOr Gerlitz  * actually parsed.
1935d79b6df6SOr Gerlitz  */
1936d79b6df6SOr Gerlitz static int offload_pedit_fields(struct pedit_headers *masks,
1937d79b6df6SOr Gerlitz 				struct pedit_headers *vals,
1938e98bedf5SEli Britstein 				struct mlx5e_tc_flow_parse_attr *parse_attr,
1939e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
1940d79b6df6SOr Gerlitz {
1941d79b6df6SOr Gerlitz 	struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
19422b64bebaSOr Gerlitz 	int i, action_size, nactions, max_actions, first, last, next_z;
1943d79b6df6SOr Gerlitz 	void *s_masks_p, *a_masks_p, *vals_p;
1944d79b6df6SOr Gerlitz 	struct mlx5_fields *f;
1945d79b6df6SOr Gerlitz 	u8 cmd, field_bsize;
1946e3ca4e05SOr Gerlitz 	u32 s_mask, a_mask;
1947d79b6df6SOr Gerlitz 	unsigned long mask;
19482b64bebaSOr Gerlitz 	__be32 mask_be32;
19492b64bebaSOr Gerlitz 	__be16 mask_be16;
1950d79b6df6SOr Gerlitz 	void *action;
1951d79b6df6SOr Gerlitz 
1952d79b6df6SOr Gerlitz 	set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1953d79b6df6SOr Gerlitz 	add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1954d79b6df6SOr Gerlitz 	set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1955d79b6df6SOr Gerlitz 	add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1956d79b6df6SOr Gerlitz 
1957d79b6df6SOr Gerlitz 	action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1958d79b6df6SOr Gerlitz 	action = parse_attr->mod_hdr_actions;
1959d79b6df6SOr Gerlitz 	max_actions = parse_attr->num_mod_hdr_actions;
1960d79b6df6SOr Gerlitz 	nactions = 0;
1961d79b6df6SOr Gerlitz 
1962d79b6df6SOr Gerlitz 	for (i = 0; i < ARRAY_SIZE(fields); i++) {
1963d79b6df6SOr Gerlitz 		f = &fields[i];
1964d79b6df6SOr Gerlitz 		/* avoid seeing bits set from previous iterations */
1965e3ca4e05SOr Gerlitz 		s_mask = 0;
1966e3ca4e05SOr Gerlitz 		a_mask = 0;
1967d79b6df6SOr Gerlitz 
1968d79b6df6SOr Gerlitz 		s_masks_p = (void *)set_masks + f->offset;
1969d79b6df6SOr Gerlitz 		a_masks_p = (void *)add_masks + f->offset;
1970d79b6df6SOr Gerlitz 
1971d79b6df6SOr Gerlitz 		memcpy(&s_mask, s_masks_p, f->size);
1972d79b6df6SOr Gerlitz 		memcpy(&a_mask, a_masks_p, f->size);
1973d79b6df6SOr Gerlitz 
1974d79b6df6SOr Gerlitz 		if (!s_mask && !a_mask) /* nothing to offload here */
1975d79b6df6SOr Gerlitz 			continue;
1976d79b6df6SOr Gerlitz 
1977d79b6df6SOr Gerlitz 		if (s_mask && a_mask) {
1978e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
1979e98bedf5SEli Britstein 					   "can't set and add to the same HW field");
1980d79b6df6SOr Gerlitz 			printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1981d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
1982d79b6df6SOr Gerlitz 		}
1983d79b6df6SOr Gerlitz 
1984d79b6df6SOr Gerlitz 		if (nactions == max_actions) {
1985e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
1986e98bedf5SEli Britstein 					   "too many pedit actions, can't offload");
1987d79b6df6SOr Gerlitz 			printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1988d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
1989d79b6df6SOr Gerlitz 		}
1990d79b6df6SOr Gerlitz 
1991d79b6df6SOr Gerlitz 		if (s_mask) {
1992d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_SET;
1993d79b6df6SOr Gerlitz 			mask = s_mask;
1994d79b6df6SOr Gerlitz 			vals_p = (void *)set_vals + f->offset;
1995d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
1996d79b6df6SOr Gerlitz 			memset(s_masks_p, 0, f->size);
1997d79b6df6SOr Gerlitz 		} else {
1998d79b6df6SOr Gerlitz 			cmd  = MLX5_ACTION_TYPE_ADD;
1999d79b6df6SOr Gerlitz 			mask = a_mask;
2000d79b6df6SOr Gerlitz 			vals_p = (void *)add_vals + f->offset;
2001d79b6df6SOr Gerlitz 			/* clear to denote we consumed this field */
2002d79b6df6SOr Gerlitz 			memset(a_masks_p, 0, f->size);
2003d79b6df6SOr Gerlitz 		}
2004d79b6df6SOr Gerlitz 
2005d79b6df6SOr Gerlitz 		field_bsize = f->size * BITS_PER_BYTE;
2006e3ca4e05SOr Gerlitz 
20072b64bebaSOr Gerlitz 		if (field_bsize == 32) {
20082b64bebaSOr Gerlitz 			mask_be32 = *(__be32 *)&mask;
20092b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
20102b64bebaSOr Gerlitz 		} else if (field_bsize == 16) {
20112b64bebaSOr Gerlitz 			mask_be16 = *(__be16 *)&mask;
20122b64bebaSOr Gerlitz 			mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
20132b64bebaSOr Gerlitz 		}
20142b64bebaSOr Gerlitz 
2015d79b6df6SOr Gerlitz 		first = find_first_bit(&mask, field_bsize);
20162b64bebaSOr Gerlitz 		next_z = find_next_zero_bit(&mask, field_bsize, first);
2017d79b6df6SOr Gerlitz 		last  = find_last_bit(&mask, field_bsize);
20182b64bebaSOr Gerlitz 		if (first < next_z && next_z < last) {
2019e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2020e98bedf5SEli Britstein 					   "rewrite of few sub-fields isn't supported");
20212b64bebaSOr Gerlitz 			printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2022d79b6df6SOr Gerlitz 			       mask);
2023d79b6df6SOr Gerlitz 			return -EOPNOTSUPP;
2024d79b6df6SOr Gerlitz 		}
2025d79b6df6SOr Gerlitz 
2026d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, action_type, cmd);
2027d79b6df6SOr Gerlitz 		MLX5_SET(set_action_in, action, field, f->field);
2028d79b6df6SOr Gerlitz 
2029d79b6df6SOr Gerlitz 		if (cmd == MLX5_ACTION_TYPE_SET) {
20302b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, offset, first);
2031d79b6df6SOr Gerlitz 			/* length is num of bits to be written, zero means length of 32 */
20322b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, length, (last - first + 1));
2033d79b6df6SOr Gerlitz 		}
2034d79b6df6SOr Gerlitz 
2035d79b6df6SOr Gerlitz 		if (field_bsize == 32)
20362b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2037d79b6df6SOr Gerlitz 		else if (field_bsize == 16)
20382b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2039d79b6df6SOr Gerlitz 		else if (field_bsize == 8)
20402b64bebaSOr Gerlitz 			MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2041d79b6df6SOr Gerlitz 
2042d79b6df6SOr Gerlitz 		action += action_size;
2043d79b6df6SOr Gerlitz 		nactions++;
2044d79b6df6SOr Gerlitz 	}
2045d79b6df6SOr Gerlitz 
2046d79b6df6SOr Gerlitz 	parse_attr->num_mod_hdr_actions = nactions;
2047d79b6df6SOr Gerlitz 	return 0;
2048d79b6df6SOr Gerlitz }
2049d79b6df6SOr Gerlitz 
2050d79b6df6SOr Gerlitz static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2051d79b6df6SOr Gerlitz 				 const struct tc_action *a, int namespace,
2052d79b6df6SOr Gerlitz 				 struct mlx5e_tc_flow_parse_attr *parse_attr)
2053d79b6df6SOr Gerlitz {
2054d79b6df6SOr Gerlitz 	int nkeys, action_size, max_actions;
2055d79b6df6SOr Gerlitz 
2056d79b6df6SOr Gerlitz 	nkeys = tcf_pedit_nkeys(a);
2057d79b6df6SOr Gerlitz 	action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2058d79b6df6SOr Gerlitz 
2059d79b6df6SOr Gerlitz 	if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2060d79b6df6SOr Gerlitz 		max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
2061d79b6df6SOr Gerlitz 	else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2062d79b6df6SOr Gerlitz 		max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
2063d79b6df6SOr Gerlitz 
2064d79b6df6SOr Gerlitz 	/* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
2065d79b6df6SOr Gerlitz 	max_actions = min(max_actions, nkeys * 16);
2066d79b6df6SOr Gerlitz 
2067d79b6df6SOr Gerlitz 	parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
2068d79b6df6SOr Gerlitz 	if (!parse_attr->mod_hdr_actions)
2069d79b6df6SOr Gerlitz 		return -ENOMEM;
2070d79b6df6SOr Gerlitz 
2071d79b6df6SOr Gerlitz 	parse_attr->num_mod_hdr_actions = max_actions;
2072d79b6df6SOr Gerlitz 	return 0;
2073d79b6df6SOr Gerlitz }
2074d79b6df6SOr Gerlitz 
2075d79b6df6SOr Gerlitz static const struct pedit_headers zero_masks = {};
2076d79b6df6SOr Gerlitz 
2077d79b6df6SOr Gerlitz static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2078d79b6df6SOr Gerlitz 				 const struct tc_action *a, int namespace,
2079e98bedf5SEli Britstein 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
2080e98bedf5SEli Britstein 				 struct netlink_ext_ack *extack)
2081d79b6df6SOr Gerlitz {
2082d79b6df6SOr Gerlitz 	struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
2083d79b6df6SOr Gerlitz 	int nkeys, i, err = -EOPNOTSUPP;
2084d79b6df6SOr Gerlitz 	u32 mask, val, offset;
2085d79b6df6SOr Gerlitz 	u8 cmd, htype;
2086d79b6df6SOr Gerlitz 
2087d79b6df6SOr Gerlitz 	nkeys = tcf_pedit_nkeys(a);
2088d79b6df6SOr Gerlitz 
2089d79b6df6SOr Gerlitz 	memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
2090d79b6df6SOr Gerlitz 	memset(vals,  0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
2091d79b6df6SOr Gerlitz 
2092d79b6df6SOr Gerlitz 	for (i = 0; i < nkeys; i++) {
2093d79b6df6SOr Gerlitz 		htype = tcf_pedit_htype(a, i);
2094d79b6df6SOr Gerlitz 		cmd = tcf_pedit_cmd(a, i);
2095d79b6df6SOr Gerlitz 		err = -EOPNOTSUPP; /* can't be all optimistic */
2096d79b6df6SOr Gerlitz 
2097d79b6df6SOr Gerlitz 		if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
2098e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2099e98bedf5SEli Britstein 					   "legacy pedit isn't offloaded");
2100d79b6df6SOr Gerlitz 			goto out_err;
2101d79b6df6SOr Gerlitz 		}
2102d79b6df6SOr Gerlitz 
2103d79b6df6SOr Gerlitz 		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
2104e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
2105d79b6df6SOr Gerlitz 			goto out_err;
2106d79b6df6SOr Gerlitz 		}
2107d79b6df6SOr Gerlitz 
2108d79b6df6SOr Gerlitz 		mask = tcf_pedit_mask(a, i);
2109d79b6df6SOr Gerlitz 		val = tcf_pedit_val(a, i);
2110d79b6df6SOr Gerlitz 		offset = tcf_pedit_offset(a, i);
2111d79b6df6SOr Gerlitz 
2112d79b6df6SOr Gerlitz 		err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
2113d79b6df6SOr Gerlitz 		if (err)
2114d79b6df6SOr Gerlitz 			goto out_err;
2115d79b6df6SOr Gerlitz 	}
2116d79b6df6SOr Gerlitz 
2117d79b6df6SOr Gerlitz 	err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
2118d79b6df6SOr Gerlitz 	if (err)
2119d79b6df6SOr Gerlitz 		goto out_err;
2120d79b6df6SOr Gerlitz 
2121e98bedf5SEli Britstein 	err = offload_pedit_fields(masks, vals, parse_attr, extack);
2122d79b6df6SOr Gerlitz 	if (err < 0)
2123d79b6df6SOr Gerlitz 		goto out_dealloc_parsed_actions;
2124d79b6df6SOr Gerlitz 
2125d79b6df6SOr Gerlitz 	for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2126d79b6df6SOr Gerlitz 		cmd_masks = &masks[cmd];
2127d79b6df6SOr Gerlitz 		if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2128e98bedf5SEli Britstein 			NL_SET_ERR_MSG_MOD(extack,
2129e98bedf5SEli Britstein 					   "attempt to offload an unsupported field");
2130b3a433deSOr Gerlitz 			netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2131d79b6df6SOr Gerlitz 			print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2132d79b6df6SOr Gerlitz 				       16, 1, cmd_masks, sizeof(zero_masks), true);
2133d79b6df6SOr Gerlitz 			err = -EOPNOTSUPP;
2134d79b6df6SOr Gerlitz 			goto out_dealloc_parsed_actions;
2135d79b6df6SOr Gerlitz 		}
2136d79b6df6SOr Gerlitz 	}
2137d79b6df6SOr Gerlitz 
2138d79b6df6SOr Gerlitz 	return 0;
2139d79b6df6SOr Gerlitz 
2140d79b6df6SOr Gerlitz out_dealloc_parsed_actions:
2141d79b6df6SOr Gerlitz 	kfree(parse_attr->mod_hdr_actions);
2142d79b6df6SOr Gerlitz out_err:
2143d79b6df6SOr Gerlitz 	return err;
2144d79b6df6SOr Gerlitz }
2145d79b6df6SOr Gerlitz 
2146e98bedf5SEli Britstein static bool csum_offload_supported(struct mlx5e_priv *priv,
2147e98bedf5SEli Britstein 				   u32 action,
2148e98bedf5SEli Britstein 				   u32 update_flags,
2149e98bedf5SEli Britstein 				   struct netlink_ext_ack *extack)
215026c02749SOr Gerlitz {
215126c02749SOr Gerlitz 	u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
215226c02749SOr Gerlitz 			 TCA_CSUM_UPDATE_FLAG_UDP;
215326c02749SOr Gerlitz 
215426c02749SOr Gerlitz 	/*  The HW recalcs checksums only if re-writing headers */
215526c02749SOr Gerlitz 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2156e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2157e98bedf5SEli Britstein 				   "TC csum action is only offloaded with pedit");
215826c02749SOr Gerlitz 		netdev_warn(priv->netdev,
215926c02749SOr Gerlitz 			    "TC csum action is only offloaded with pedit\n");
216026c02749SOr Gerlitz 		return false;
216126c02749SOr Gerlitz 	}
216226c02749SOr Gerlitz 
216326c02749SOr Gerlitz 	if (update_flags & ~prot_flags) {
2164e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2165e98bedf5SEli Britstein 				   "can't offload TC csum action for some header/s");
216626c02749SOr Gerlitz 		netdev_warn(priv->netdev,
216726c02749SOr Gerlitz 			    "can't offload TC csum action for some header/s - flags %#x\n",
216826c02749SOr Gerlitz 			    update_flags);
216926c02749SOr Gerlitz 		return false;
217026c02749SOr Gerlitz 	}
217126c02749SOr Gerlitz 
217226c02749SOr Gerlitz 	return true;
217326c02749SOr Gerlitz }
217426c02749SOr Gerlitz 
2175bdd66ac0SOr Gerlitz static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2176e98bedf5SEli Britstein 					  struct tcf_exts *exts,
2177e98bedf5SEli Britstein 					  struct netlink_ext_ack *extack)
2178bdd66ac0SOr Gerlitz {
2179bdd66ac0SOr Gerlitz 	const struct tc_action *a;
2180bdd66ac0SOr Gerlitz 	bool modify_ip_header;
2181bdd66ac0SOr Gerlitz 	LIST_HEAD(actions);
2182bdd66ac0SOr Gerlitz 	u8 htype, ip_proto;
2183bdd66ac0SOr Gerlitz 	void *headers_v;
2184bdd66ac0SOr Gerlitz 	u16 ethertype;
2185bdd66ac0SOr Gerlitz 	int nkeys, i;
2186bdd66ac0SOr Gerlitz 
2187bdd66ac0SOr Gerlitz 	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2188bdd66ac0SOr Gerlitz 	ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2189bdd66ac0SOr Gerlitz 
2190bdd66ac0SOr Gerlitz 	/* for non-IP we only re-write MACs, so we're okay */
2191bdd66ac0SOr Gerlitz 	if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2192bdd66ac0SOr Gerlitz 		goto out_ok;
2193bdd66ac0SOr Gerlitz 
2194bdd66ac0SOr Gerlitz 	modify_ip_header = false;
2195244cd96aSCong Wang 	tcf_exts_for_each_action(i, a, exts) {
2196244cd96aSCong Wang 		int k;
2197244cd96aSCong Wang 
2198bdd66ac0SOr Gerlitz 		if (!is_tcf_pedit(a))
2199bdd66ac0SOr Gerlitz 			continue;
2200bdd66ac0SOr Gerlitz 
2201bdd66ac0SOr Gerlitz 		nkeys = tcf_pedit_nkeys(a);
2202244cd96aSCong Wang 		for (k = 0; k < nkeys; k++) {
2203244cd96aSCong Wang 			htype = tcf_pedit_htype(a, k);
2204bdd66ac0SOr Gerlitz 			if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
2205bdd66ac0SOr Gerlitz 			    htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
2206bdd66ac0SOr Gerlitz 				modify_ip_header = true;
2207bdd66ac0SOr Gerlitz 				break;
2208bdd66ac0SOr Gerlitz 			}
2209bdd66ac0SOr Gerlitz 		}
2210bdd66ac0SOr Gerlitz 	}
2211bdd66ac0SOr Gerlitz 
2212bdd66ac0SOr Gerlitz 	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
22131ccef350SJianbo Liu 	if (modify_ip_header && ip_proto != IPPROTO_TCP &&
22141ccef350SJianbo Liu 	    ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
2215e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
2216e98bedf5SEli Britstein 				   "can't offload re-write of non TCP/UDP");
2217bdd66ac0SOr Gerlitz 		pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2218bdd66ac0SOr Gerlitz 		return false;
2219bdd66ac0SOr Gerlitz 	}
2220bdd66ac0SOr Gerlitz 
2221bdd66ac0SOr Gerlitz out_ok:
2222bdd66ac0SOr Gerlitz 	return true;
2223bdd66ac0SOr Gerlitz }
2224bdd66ac0SOr Gerlitz 
2225bdd66ac0SOr Gerlitz static bool actions_match_supported(struct mlx5e_priv *priv,
2226bdd66ac0SOr Gerlitz 				    struct tcf_exts *exts,
2227bdd66ac0SOr Gerlitz 				    struct mlx5e_tc_flow_parse_attr *parse_attr,
2228e98bedf5SEli Britstein 				    struct mlx5e_tc_flow *flow,
2229e98bedf5SEli Britstein 				    struct netlink_ext_ack *extack)
2230bdd66ac0SOr Gerlitz {
2231bdd66ac0SOr Gerlitz 	u32 actions;
2232bdd66ac0SOr Gerlitz 
2233bdd66ac0SOr Gerlitz 	if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
2234bdd66ac0SOr Gerlitz 		actions = flow->esw_attr->action;
2235bdd66ac0SOr Gerlitz 	else
2236bdd66ac0SOr Gerlitz 		actions = flow->nic_attr->action;
2237bdd66ac0SOr Gerlitz 
22387e29392eSRoi Dayan 	if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
22397e29392eSRoi Dayan 	    !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
22407e29392eSRoi Dayan 		return false;
22417e29392eSRoi Dayan 
2242bdd66ac0SOr Gerlitz 	if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2243e98bedf5SEli Britstein 		return modify_header_match_supported(&parse_attr->spec, exts,
2244e98bedf5SEli Britstein 						     extack);
2245bdd66ac0SOr Gerlitz 
2246bdd66ac0SOr Gerlitz 	return true;
2247bdd66ac0SOr Gerlitz }
2248bdd66ac0SOr Gerlitz 
22495c65c564SOr Gerlitz static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
22505c65c564SOr Gerlitz {
22515c65c564SOr Gerlitz 	struct mlx5_core_dev *fmdev, *pmdev;
2252816f6706SOr Gerlitz 	u64 fsystem_guid, psystem_guid;
22535c65c564SOr Gerlitz 
22545c65c564SOr Gerlitz 	fmdev = priv->mdev;
22555c65c564SOr Gerlitz 	pmdev = peer_priv->mdev;
22565c65c564SOr Gerlitz 
225759c9d35eSAlaa Hleihel 	fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
225859c9d35eSAlaa Hleihel 	psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
22595c65c564SOr Gerlitz 
2260816f6706SOr Gerlitz 	return (fsystem_guid == psystem_guid);
22615c65c564SOr Gerlitz }
22625c65c564SOr Gerlitz 
22635c40348cSOr Gerlitz static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2264aa0cbbaeSOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr,
2265e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
2266e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
2267e3a2b7edSAmir Vadai {
2268aa0cbbaeSOr Gerlitz 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
2269e3a2b7edSAmir Vadai 	const struct tc_action *a;
227022dc13c8SWANG Cong 	LIST_HEAD(actions);
22711cab1cd7SOr Gerlitz 	u32 action = 0;
2272244cd96aSCong Wang 	int err, i;
2273e3a2b7edSAmir Vadai 
22743bcc0cecSJiri Pirko 	if (!tcf_exts_has_actions(exts))
2275e3a2b7edSAmir Vadai 		return -EINVAL;
2276e3a2b7edSAmir Vadai 
22773bc4b7bfSOr Gerlitz 	attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2278e3a2b7edSAmir Vadai 
2279244cd96aSCong Wang 	tcf_exts_for_each_action(i, a, exts) {
2280e3a2b7edSAmir Vadai 		if (is_tcf_gact_shot(a)) {
22811cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2282aad7e08dSAmir Vadai 			if (MLX5_CAP_FLOWTABLE(priv->mdev,
2283aad7e08dSAmir Vadai 					       flow_table_properties_nic_receive.flow_counter))
22841cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2285e3a2b7edSAmir Vadai 			continue;
2286e3a2b7edSAmir Vadai 		}
2287e3a2b7edSAmir Vadai 
22882f4fe4caSOr Gerlitz 		if (is_tcf_pedit(a)) {
22892f4fe4caSOr Gerlitz 			err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
2290e98bedf5SEli Britstein 						    parse_attr, extack);
22912f4fe4caSOr Gerlitz 			if (err)
22922f4fe4caSOr Gerlitz 				return err;
22932f4fe4caSOr Gerlitz 
22941cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
22952f4fe4caSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
22962f4fe4caSOr Gerlitz 			continue;
22972f4fe4caSOr Gerlitz 		}
22982f4fe4caSOr Gerlitz 
229926c02749SOr Gerlitz 		if (is_tcf_csum(a)) {
23001cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
2301e98bedf5SEli Britstein 						   tcf_csum_update_flags(a),
2302e98bedf5SEli Britstein 						   extack))
230326c02749SOr Gerlitz 				continue;
230426c02749SOr Gerlitz 
230526c02749SOr Gerlitz 			return -EOPNOTSUPP;
230626c02749SOr Gerlitz 		}
230726c02749SOr Gerlitz 
23085c65c564SOr Gerlitz 		if (is_tcf_mirred_egress_redirect(a)) {
23095c65c564SOr Gerlitz 			struct net_device *peer_dev = tcf_mirred_dev(a);
23105c65c564SOr Gerlitz 
23115c65c564SOr Gerlitz 			if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
23125c65c564SOr Gerlitz 			    same_hw_devs(priv, netdev_priv(peer_dev))) {
23135c65c564SOr Gerlitz 				parse_attr->mirred_ifindex = peer_dev->ifindex;
23145c65c564SOr Gerlitz 				flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
23151cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
23165c65c564SOr Gerlitz 					  MLX5_FLOW_CONTEXT_ACTION_COUNT;
23175c65c564SOr Gerlitz 			} else {
2318e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
2319e98bedf5SEli Britstein 						   "device is not on same HW, can't offload");
23205c65c564SOr Gerlitz 				netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
23215c65c564SOr Gerlitz 					    peer_dev->name);
23225c65c564SOr Gerlitz 				return -EINVAL;
23235c65c564SOr Gerlitz 			}
23245c65c564SOr Gerlitz 			continue;
23255c65c564SOr Gerlitz 		}
23265c65c564SOr Gerlitz 
2327e3a2b7edSAmir Vadai 		if (is_tcf_skbedit_mark(a)) {
2328e3a2b7edSAmir Vadai 			u32 mark = tcf_skbedit_mark(a);
2329e3a2b7edSAmir Vadai 
2330e3a2b7edSAmir Vadai 			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2331e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
2332e98bedf5SEli Britstein 						   "Bad flow mark - only 16 bit is supported");
2333e3a2b7edSAmir Vadai 				return -EINVAL;
2334e3a2b7edSAmir Vadai 			}
2335e3a2b7edSAmir Vadai 
23363bc4b7bfSOr Gerlitz 			attr->flow_tag = mark;
23371cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2338e3a2b7edSAmir Vadai 			continue;
2339e3a2b7edSAmir Vadai 		}
2340e3a2b7edSAmir Vadai 
2341e3a2b7edSAmir Vadai 		return -EINVAL;
2342e3a2b7edSAmir Vadai 	}
2343e3a2b7edSAmir Vadai 
23441cab1cd7SOr Gerlitz 	attr->action = action;
2345e98bedf5SEli Britstein 	if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
2346bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
2347bdd66ac0SOr Gerlitz 
2348e3a2b7edSAmir Vadai 	return 0;
2349e3a2b7edSAmir Vadai }
2350e3a2b7edSAmir Vadai 
235176f7444dSOr Gerlitz static inline int cmp_encap_info(struct ip_tunnel_key *a,
235276f7444dSOr Gerlitz 				 struct ip_tunnel_key *b)
2353a54e20b4SHadar Hen Zion {
2354a54e20b4SHadar Hen Zion 	return memcmp(a, b, sizeof(*a));
2355a54e20b4SHadar Hen Zion }
2356a54e20b4SHadar Hen Zion 
235776f7444dSOr Gerlitz static inline int hash_encap_info(struct ip_tunnel_key *key)
2358a54e20b4SHadar Hen Zion {
235976f7444dSOr Gerlitz 	return jhash(key, sizeof(*key), 0);
2360a54e20b4SHadar Hen Zion }
2361a54e20b4SHadar Hen Zion 
2362a54e20b4SHadar Hen Zion static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
2363a54e20b4SHadar Hen Zion 				   struct net_device *mirred_dev,
2364a54e20b4SHadar Hen Zion 				   struct net_device **out_dev,
2365a54e20b4SHadar Hen Zion 				   struct flowi4 *fl4,
2366a54e20b4SHadar Hen Zion 				   struct neighbour **out_n,
23676360cd62SOr Gerlitz 				   u8 *out_ttl)
2368a54e20b4SHadar Hen Zion {
23693e621b19SHadar Hen Zion 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
23705ed99fb4SMark Bloch 	struct mlx5e_rep_priv *uplink_rpriv;
2371a54e20b4SHadar Hen Zion 	struct rtable *rt;
2372a54e20b4SHadar Hen Zion 	struct neighbour *n = NULL;
2373a54e20b4SHadar Hen Zion 
2374a54e20b4SHadar Hen Zion #if IS_ENABLED(CONFIG_INET)
2375abeffce9SArnd Bergmann 	int ret;
2376abeffce9SArnd Bergmann 
2377a54e20b4SHadar Hen Zion 	rt = ip_route_output_key(dev_net(mirred_dev), fl4);
2378abeffce9SArnd Bergmann 	ret = PTR_ERR_OR_ZERO(rt);
2379abeffce9SArnd Bergmann 	if (ret)
2380abeffce9SArnd Bergmann 		return ret;
2381a54e20b4SHadar Hen Zion #else
2382a54e20b4SHadar Hen Zion 	return -EOPNOTSUPP;
2383a54e20b4SHadar Hen Zion #endif
2384a4b97ab4SMark Bloch 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
23853e621b19SHadar Hen Zion 	/* if the egress device isn't on the same HW e-switch, we use the uplink */
23863e621b19SHadar Hen Zion 	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
23875ed99fb4SMark Bloch 		*out_dev = uplink_rpriv->netdev;
23883e621b19SHadar Hen Zion 	else
23893e621b19SHadar Hen Zion 		*out_dev = rt->dst.dev;
2390a54e20b4SHadar Hen Zion 
23916360cd62SOr Gerlitz 	if (!(*out_ttl))
239275c33da8SOr Gerlitz 		*out_ttl = ip4_dst_hoplimit(&rt->dst);
2393a54e20b4SHadar Hen Zion 	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
2394a54e20b4SHadar Hen Zion 	ip_rt_put(rt);
2395a54e20b4SHadar Hen Zion 	if (!n)
2396a54e20b4SHadar Hen Zion 		return -ENOMEM;
2397a54e20b4SHadar Hen Zion 
2398a54e20b4SHadar Hen Zion 	*out_n = n;
2399a54e20b4SHadar Hen Zion 	return 0;
2400a54e20b4SHadar Hen Zion }
2401a54e20b4SHadar Hen Zion 
2402b1d90e6bSRabie Loulou static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2403b1d90e6bSRabie Loulou 				  struct net_device *peer_netdev)
2404b1d90e6bSRabie Loulou {
2405b1d90e6bSRabie Loulou 	struct mlx5e_priv *peer_priv;
2406b1d90e6bSRabie Loulou 
2407b1d90e6bSRabie Loulou 	peer_priv = netdev_priv(peer_netdev);
2408b1d90e6bSRabie Loulou 
2409b1d90e6bSRabie Loulou 	return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2410b1d90e6bSRabie Loulou 		(priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
2411b1d90e6bSRabie Loulou 		same_hw_devs(priv, peer_priv) &&
2412b1d90e6bSRabie Loulou 		MLX5_VPORT_MANAGER(peer_priv->mdev) &&
2413b1d90e6bSRabie Loulou 		(peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
2414b1d90e6bSRabie Loulou }
2415b1d90e6bSRabie Loulou 
2416ce99f6b9SOr Gerlitz static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
2417ce99f6b9SOr Gerlitz 				   struct net_device *mirred_dev,
2418ce99f6b9SOr Gerlitz 				   struct net_device **out_dev,
2419ce99f6b9SOr Gerlitz 				   struct flowi6 *fl6,
2420ce99f6b9SOr Gerlitz 				   struct neighbour **out_n,
24216360cd62SOr Gerlitz 				   u8 *out_ttl)
2422ce99f6b9SOr Gerlitz {
2423ce99f6b9SOr Gerlitz 	struct neighbour *n = NULL;
2424ce99f6b9SOr Gerlitz 	struct dst_entry *dst;
2425ce99f6b9SOr Gerlitz 
2426ce99f6b9SOr Gerlitz #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
242774bd5d56SArnd Bergmann 	struct mlx5e_rep_priv *uplink_rpriv;
2428ce99f6b9SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2429ce99f6b9SOr Gerlitz 	int ret;
2430ce99f6b9SOr Gerlitz 
243108820528SPaul Blakey 	ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
243208820528SPaul Blakey 					 fl6);
243308820528SPaul Blakey 	if (ret < 0)
2434ce99f6b9SOr Gerlitz 		return ret;
2435ce99f6b9SOr Gerlitz 
24366360cd62SOr Gerlitz 	if (!(*out_ttl))
2437ce99f6b9SOr Gerlitz 		*out_ttl = ip6_dst_hoplimit(dst);
2438ce99f6b9SOr Gerlitz 
2439a4b97ab4SMark Bloch 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2440ce99f6b9SOr Gerlitz 	/* if the egress device isn't on the same HW e-switch, we use the uplink */
2441ce99f6b9SOr Gerlitz 	if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
24425ed99fb4SMark Bloch 		*out_dev = uplink_rpriv->netdev;
2443ce99f6b9SOr Gerlitz 	else
2444ce99f6b9SOr Gerlitz 		*out_dev = dst->dev;
2445ce99f6b9SOr Gerlitz #else
2446ce99f6b9SOr Gerlitz 	return -EOPNOTSUPP;
2447ce99f6b9SOr Gerlitz #endif
2448ce99f6b9SOr Gerlitz 
2449ce99f6b9SOr Gerlitz 	n = dst_neigh_lookup(dst, &fl6->daddr);
2450ce99f6b9SOr Gerlitz 	dst_release(dst);
2451ce99f6b9SOr Gerlitz 	if (!n)
2452ce99f6b9SOr Gerlitz 		return -ENOMEM;
2453ce99f6b9SOr Gerlitz 
2454ce99f6b9SOr Gerlitz 	*out_n = n;
2455ce99f6b9SOr Gerlitz 	return 0;
2456ce99f6b9SOr Gerlitz }
2457ce99f6b9SOr Gerlitz 
2458ea7162acSOz Shlomo static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
2459a54e20b4SHadar Hen Zion {
2460ea7162acSOz Shlomo 	__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
2461ea7162acSOz Shlomo 	struct udphdr *udp = (struct udphdr *)(buf);
2462ea7162acSOz Shlomo 	struct vxlanhdr *vxh = (struct vxlanhdr *)
2463ea7162acSOz Shlomo 			       ((char *)udp + sizeof(struct udphdr));
2464a54e20b4SHadar Hen Zion 
2465ea7162acSOz Shlomo 	udp->dest = tun_key->tp_dst;
2466a54e20b4SHadar Hen Zion 	vxh->vx_flags = VXLAN_HF_VNI;
2467ea7162acSOz Shlomo 	vxh->vx_vni = vxlan_vni_field(tun_id);
2468ea7162acSOz Shlomo 
2469ea7162acSOz Shlomo 	return 0;
2470a54e20b4SHadar Hen Zion }
2471a54e20b4SHadar Hen Zion 
2472ea7162acSOz Shlomo static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
247354c177caSOz Shlomo 				      struct mlx5e_encap_entry *e)
2474ce99f6b9SOr Gerlitz {
247554c177caSOz Shlomo 	int err = 0;
247654c177caSOz Shlomo 	struct ip_tunnel_key *key = &e->tun_info.key;
247754c177caSOz Shlomo 
247854c177caSOz Shlomo 	if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
2479ea7162acSOz Shlomo 		*ip_proto = IPPROTO_UDP;
248054c177caSOz Shlomo 		err = mlx5e_gen_vxlan_header(buf, key);
248154c177caSOz Shlomo 	} else {
248254c177caSOz Shlomo 		pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
248354c177caSOz Shlomo 			, e->tunnel_type);
248454c177caSOz Shlomo 		err = -EOPNOTSUPP;
248554c177caSOz Shlomo 	}
248654c177caSOz Shlomo 
248754c177caSOz Shlomo 	return err;
2488ce99f6b9SOr Gerlitz }
2489ce99f6b9SOr Gerlitz 
2490a54e20b4SHadar Hen Zion static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
2491a54e20b4SHadar Hen Zion 					  struct net_device *mirred_dev,
24921a8552bdSHadar Hen Zion 					  struct mlx5e_encap_entry *e)
2493a54e20b4SHadar Hen Zion {
2494a54e20b4SHadar Hen Zion 	int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2495ea7162acSOz Shlomo 	int ipv4_encap_size = ETH_HLEN +
2496ea7162acSOz Shlomo 			      sizeof(struct iphdr) +
249754c177caSOz Shlomo 			      e->tunnel_hlen;
249876f7444dSOr Gerlitz 	struct ip_tunnel_key *tun_key = &e->tun_info.key;
24991a8552bdSHadar Hen Zion 	struct net_device *out_dev;
2500a42485ebSOr Gerlitz 	struct neighbour *n = NULL;
2501a54e20b4SHadar Hen Zion 	struct flowi4 fl4 = {};
2502a54e20b4SHadar Hen Zion 	char *encap_header;
2503ea7162acSOz Shlomo 	struct ethhdr *eth;
2504ea7162acSOz Shlomo 	u8 nud_state, ttl;
2505ea7162acSOz Shlomo 	struct iphdr *ip;
25066360cd62SOr Gerlitz 	int err;
2507a54e20b4SHadar Hen Zion 
250832f3671fSOr Gerlitz 	if (max_encap_size < ipv4_encap_size) {
250932f3671fSOr Gerlitz 		mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
251032f3671fSOr Gerlitz 			       ipv4_encap_size, max_encap_size);
251132f3671fSOr Gerlitz 		return -EOPNOTSUPP;
251232f3671fSOr Gerlitz 	}
251332f3671fSOr Gerlitz 
251432f3671fSOr Gerlitz 	encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
2515a54e20b4SHadar Hen Zion 	if (!encap_header)
2516a54e20b4SHadar Hen Zion 		return -ENOMEM;
2517a54e20b4SHadar Hen Zion 
2518ea7162acSOz Shlomo 	/* add the IP fields */
25199a941117SOr Gerlitz 	fl4.flowi4_tos = tun_key->tos;
252076f7444dSOr Gerlitz 	fl4.daddr = tun_key->u.ipv4.dst;
25219a941117SOr Gerlitz 	fl4.saddr = tun_key->u.ipv4.src;
2522ea7162acSOz Shlomo 	ttl = tun_key->ttl;
2523a54e20b4SHadar Hen Zion 
25241a8552bdSHadar Hen Zion 	err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
25259a941117SOr Gerlitz 				      &fl4, &n, &ttl);
2526a54e20b4SHadar Hen Zion 	if (err)
2527ace74321SPaul Blakey 		goto free_encap;
2528a54e20b4SHadar Hen Zion 
2529232c0013SHadar Hen Zion 	/* used by mlx5e_detach_encap to lookup a neigh hash table
2530232c0013SHadar Hen Zion 	 * entry in the neigh hash table when a user deletes a rule
2531232c0013SHadar Hen Zion 	 */
2532232c0013SHadar Hen Zion 	e->m_neigh.dev = n->dev;
2533f6dfb4c3SHadar Hen Zion 	e->m_neigh.family = n->ops->family;
2534232c0013SHadar Hen Zion 	memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2535232c0013SHadar Hen Zion 	e->out_dev = out_dev;
2536232c0013SHadar Hen Zion 
2537ea7162acSOz Shlomo 	/* It's important to add the neigh to the hash table before checking
2538232c0013SHadar Hen Zion 	 * the neigh validity state. So if we'll get a notification, in case the
2539232c0013SHadar Hen Zion 	 * neigh changes it's validity state, we would find the relevant neigh
2540232c0013SHadar Hen Zion 	 * in the hash.
2541232c0013SHadar Hen Zion 	 */
2542232c0013SHadar Hen Zion 	err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2543232c0013SHadar Hen Zion 	if (err)
2544ace74321SPaul Blakey 		goto free_encap;
2545232c0013SHadar Hen Zion 
2546033354d5SHadar Hen Zion 	read_lock_bh(&n->lock);
2547033354d5SHadar Hen Zion 	nud_state = n->nud_state;
2548033354d5SHadar Hen Zion 	ether_addr_copy(e->h_dest, n->ha);
2549033354d5SHadar Hen Zion 	read_unlock_bh(&n->lock);
2550033354d5SHadar Hen Zion 
2551ea7162acSOz Shlomo 	/* add ethernet header */
2552ea7162acSOz Shlomo 	eth = (struct ethhdr *)encap_header;
2553ea7162acSOz Shlomo 	ether_addr_copy(eth->h_dest, e->h_dest);
2554ea7162acSOz Shlomo 	ether_addr_copy(eth->h_source, out_dev->dev_addr);
2555ea7162acSOz Shlomo 	eth->h_proto = htons(ETH_P_IP);
2556ea7162acSOz Shlomo 
2557ea7162acSOz Shlomo 	/* add ip header */
2558ea7162acSOz Shlomo 	ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
2559ea7162acSOz Shlomo 	ip->tos = tun_key->tos;
2560ea7162acSOz Shlomo 	ip->version = 0x4;
2561ea7162acSOz Shlomo 	ip->ihl = 0x5;
2562ea7162acSOz Shlomo 	ip->ttl = ttl;
2563ea7162acSOz Shlomo 	ip->daddr = fl4.daddr;
2564ea7162acSOz Shlomo 	ip->saddr = fl4.saddr;
2565ea7162acSOz Shlomo 
2566ea7162acSOz Shlomo 	/* add tunneling protocol header */
2567ea7162acSOz Shlomo 	err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
256854c177caSOz Shlomo 					  &ip->protocol, e);
2569ea7162acSOz Shlomo 	if (err)
2570232c0013SHadar Hen Zion 		goto destroy_neigh_entry;
2571ea7162acSOz Shlomo 
2572232c0013SHadar Hen Zion 	e->encap_size = ipv4_encap_size;
2573232c0013SHadar Hen Zion 	e->encap_header = encap_header;
2574232c0013SHadar Hen Zion 
2575232c0013SHadar Hen Zion 	if (!(nud_state & NUD_VALID)) {
2576232c0013SHadar Hen Zion 		neigh_event_send(n, NULL);
257727902f08SWei Yongjun 		err = -EAGAIN;
257827902f08SWei Yongjun 		goto out;
2579a54e20b4SHadar Hen Zion 	}
2580a54e20b4SHadar Hen Zion 
258154c177caSOz Shlomo 	err = mlx5_packet_reformat_alloc(priv->mdev,
258254c177caSOz Shlomo 					 e->reformat_type,
258360786f09SMark Bloch 					 ipv4_encap_size, encap_header,
258431ca3648SMark Bloch 					 MLX5_FLOW_NAMESPACE_FDB,
258560786f09SMark Bloch 					 &e->encap_id);
2586232c0013SHadar Hen Zion 	if (err)
2587232c0013SHadar Hen Zion 		goto destroy_neigh_entry;
2588232c0013SHadar Hen Zion 
2589232c0013SHadar Hen Zion 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
2590f6dfb4c3SHadar Hen Zion 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2591a42485ebSOr Gerlitz 	neigh_release(n);
2592232c0013SHadar Hen Zion 	return err;
2593232c0013SHadar Hen Zion 
2594232c0013SHadar Hen Zion destroy_neigh_entry:
2595232c0013SHadar Hen Zion 	mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2596ace74321SPaul Blakey free_encap:
2597a54e20b4SHadar Hen Zion 	kfree(encap_header);
2598ace74321SPaul Blakey out:
2599232c0013SHadar Hen Zion 	if (n)
2600232c0013SHadar Hen Zion 		neigh_release(n);
2601a54e20b4SHadar Hen Zion 	return err;
2602a54e20b4SHadar Hen Zion }
2603a54e20b4SHadar Hen Zion 
2604ce99f6b9SOr Gerlitz static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
2605ce99f6b9SOr Gerlitz 					  struct net_device *mirred_dev,
26061a8552bdSHadar Hen Zion 					  struct mlx5e_encap_entry *e)
2607ce99f6b9SOr Gerlitz {
2608ce99f6b9SOr Gerlitz 	int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2609ea7162acSOz Shlomo 	int ipv6_encap_size = ETH_HLEN +
2610ea7162acSOz Shlomo 			      sizeof(struct ipv6hdr) +
261154c177caSOz Shlomo 			      e->tunnel_hlen;
2612ce99f6b9SOr Gerlitz 	struct ip_tunnel_key *tun_key = &e->tun_info.key;
26131a8552bdSHadar Hen Zion 	struct net_device *out_dev;
2614ce99f6b9SOr Gerlitz 	struct neighbour *n = NULL;
2615ce99f6b9SOr Gerlitz 	struct flowi6 fl6 = {};
2616ea7162acSOz Shlomo 	struct ipv6hdr *ip6h;
2617ce99f6b9SOr Gerlitz 	char *encap_header;
2618ea7162acSOz Shlomo 	struct ethhdr *eth;
2619ea7162acSOz Shlomo 	u8 nud_state, ttl;
26206360cd62SOr Gerlitz 	int err;
2621ce99f6b9SOr Gerlitz 
2622225aabafSOr Gerlitz 	if (max_encap_size < ipv6_encap_size) {
2623225aabafSOr Gerlitz 		mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2624225aabafSOr Gerlitz 			       ipv6_encap_size, max_encap_size);
2625225aabafSOr Gerlitz 		return -EOPNOTSUPP;
2626225aabafSOr Gerlitz 	}
2627225aabafSOr Gerlitz 
2628225aabafSOr Gerlitz 	encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
2629ce99f6b9SOr Gerlitz 	if (!encap_header)
2630ce99f6b9SOr Gerlitz 		return -ENOMEM;
2631ce99f6b9SOr Gerlitz 
2632f35f800dSOr Gerlitz 	ttl = tun_key->ttl;
26336360cd62SOr Gerlitz 
2634ce99f6b9SOr Gerlitz 	fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
2635ce99f6b9SOr Gerlitz 	fl6.daddr = tun_key->u.ipv6.dst;
2636ce99f6b9SOr Gerlitz 	fl6.saddr = tun_key->u.ipv6.src;
2637ce99f6b9SOr Gerlitz 
26381a8552bdSHadar Hen Zion 	err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
2639ce99f6b9SOr Gerlitz 				      &fl6, &n, &ttl);
2640ce99f6b9SOr Gerlitz 	if (err)
2641ace74321SPaul Blakey 		goto free_encap;
2642ce99f6b9SOr Gerlitz 
2643232c0013SHadar Hen Zion 	/* used by mlx5e_detach_encap to lookup a neigh hash table
2644232c0013SHadar Hen Zion 	 * entry in the neigh hash table when a user deletes a rule
2645232c0013SHadar Hen Zion 	 */
2646232c0013SHadar Hen Zion 	e->m_neigh.dev = n->dev;
2647f6dfb4c3SHadar Hen Zion 	e->m_neigh.family = n->ops->family;
2648232c0013SHadar Hen Zion 	memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2649232c0013SHadar Hen Zion 	e->out_dev = out_dev;
2650232c0013SHadar Hen Zion 
2651232c0013SHadar Hen Zion 	/* It's importent to add the neigh to the hash table before checking
2652232c0013SHadar Hen Zion 	 * the neigh validity state. So if we'll get a notification, in case the
2653232c0013SHadar Hen Zion 	 * neigh changes it's validity state, we would find the relevant neigh
2654232c0013SHadar Hen Zion 	 * in the hash.
2655232c0013SHadar Hen Zion 	 */
2656232c0013SHadar Hen Zion 	err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2657232c0013SHadar Hen Zion 	if (err)
2658ace74321SPaul Blakey 		goto free_encap;
2659232c0013SHadar Hen Zion 
2660033354d5SHadar Hen Zion 	read_lock_bh(&n->lock);
2661033354d5SHadar Hen Zion 	nud_state = n->nud_state;
2662033354d5SHadar Hen Zion 	ether_addr_copy(e->h_dest, n->ha);
2663033354d5SHadar Hen Zion 	read_unlock_bh(&n->lock);
2664033354d5SHadar Hen Zion 
2665ea7162acSOz Shlomo 	/* add ethernet header */
2666ea7162acSOz Shlomo 	eth = (struct ethhdr *)encap_header;
2667ea7162acSOz Shlomo 	ether_addr_copy(eth->h_dest, e->h_dest);
2668ea7162acSOz Shlomo 	ether_addr_copy(eth->h_source, out_dev->dev_addr);
2669ea7162acSOz Shlomo 	eth->h_proto = htons(ETH_P_IPV6);
2670ea7162acSOz Shlomo 
2671ea7162acSOz Shlomo 	/* add ip header */
2672ea7162acSOz Shlomo 	ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
2673ea7162acSOz Shlomo 	ip6_flow_hdr(ip6h, tun_key->tos, 0);
2674ea7162acSOz Shlomo 	/* the HW fills up ipv6 payload len */
2675ea7162acSOz Shlomo 	ip6h->hop_limit   = ttl;
2676ea7162acSOz Shlomo 	ip6h->daddr	  = fl6.daddr;
2677ea7162acSOz Shlomo 	ip6h->saddr	  = fl6.saddr;
2678ea7162acSOz Shlomo 
2679ea7162acSOz Shlomo 	/* add tunneling protocol header */
2680ea7162acSOz Shlomo 	err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
268154c177caSOz Shlomo 					 &ip6h->nexthdr, e);
2682ea7162acSOz Shlomo 	if (err)
2683232c0013SHadar Hen Zion 		goto destroy_neigh_entry;
2684232c0013SHadar Hen Zion 
2685232c0013SHadar Hen Zion 	e->encap_size = ipv6_encap_size;
2686232c0013SHadar Hen Zion 	e->encap_header = encap_header;
2687232c0013SHadar Hen Zion 
2688232c0013SHadar Hen Zion 	if (!(nud_state & NUD_VALID)) {
2689232c0013SHadar Hen Zion 		neigh_event_send(n, NULL);
269027902f08SWei Yongjun 		err = -EAGAIN;
269127902f08SWei Yongjun 		goto out;
2692ce99f6b9SOr Gerlitz 	}
2693ce99f6b9SOr Gerlitz 
269454c177caSOz Shlomo 	err = mlx5_packet_reformat_alloc(priv->mdev,
269554c177caSOz Shlomo 					 e->reformat_type,
269660786f09SMark Bloch 					 ipv6_encap_size, encap_header,
269731ca3648SMark Bloch 					 MLX5_FLOW_NAMESPACE_FDB,
269860786f09SMark Bloch 					 &e->encap_id);
2699232c0013SHadar Hen Zion 	if (err)
2700232c0013SHadar Hen Zion 		goto destroy_neigh_entry;
2701232c0013SHadar Hen Zion 
2702232c0013SHadar Hen Zion 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
2703f6dfb4c3SHadar Hen Zion 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2704ce99f6b9SOr Gerlitz 	neigh_release(n);
2705232c0013SHadar Hen Zion 	return err;
2706232c0013SHadar Hen Zion 
2707232c0013SHadar Hen Zion destroy_neigh_entry:
2708232c0013SHadar Hen Zion 	mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2709ace74321SPaul Blakey free_encap:
2710ce99f6b9SOr Gerlitz 	kfree(encap_header);
2711ace74321SPaul Blakey out:
2712232c0013SHadar Hen Zion 	if (n)
2713232c0013SHadar Hen Zion 		neigh_release(n);
2714ce99f6b9SOr Gerlitz 	return err;
2715ce99f6b9SOr Gerlitz }
2716ce99f6b9SOr Gerlitz 
271754c177caSOz Shlomo static int mlx5e_get_tunnel_type(struct net_device *tunnel_dev)
271854c177caSOz Shlomo {
271954c177caSOz Shlomo 	if (netif_is_vxlan(tunnel_dev))
272054c177caSOz Shlomo 		return MLX5E_TC_TUNNEL_TYPE_VXLAN;
272154c177caSOz Shlomo 	else
272254c177caSOz Shlomo 		return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
272354c177caSOz Shlomo }
272454c177caSOz Shlomo 
2725f5bc2c5dSOz Shlomo bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
2726f5bc2c5dSOz Shlomo 				    struct net_device *netdev)
2727f5bc2c5dSOz Shlomo {
272854c177caSOz Shlomo 	int tunnel_type = mlx5e_get_tunnel_type(netdev);
272954c177caSOz Shlomo 
273054c177caSOz Shlomo 	if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
2731f5bc2c5dSOz Shlomo 	    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
2732f5bc2c5dSOz Shlomo 		return true;
273354c177caSOz Shlomo 	else
2734f5bc2c5dSOz Shlomo 		return false;
2735f5bc2c5dSOz Shlomo }
2736f5bc2c5dSOz Shlomo 
273754c177caSOz Shlomo static int mlx5e_init_tunnel_attr(struct net_device *tunnel_dev,
273854c177caSOz Shlomo 				  struct mlx5e_priv *priv,
273954c177caSOz Shlomo 				  struct mlx5e_encap_entry *e,
274054c177caSOz Shlomo 				  struct netlink_ext_ack *extack)
274154c177caSOz Shlomo {
274254c177caSOz Shlomo 	e->tunnel_type = mlx5e_get_tunnel_type(tunnel_dev);
274354c177caSOz Shlomo 
274454c177caSOz Shlomo 	if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
274554c177caSOz Shlomo 		int dst_port =  be16_to_cpu(e->tun_info.key.tp_dst);
274654c177caSOz Shlomo 
274754c177caSOz Shlomo 		if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
274854c177caSOz Shlomo 			NL_SET_ERR_MSG_MOD(extack,
274954c177caSOz Shlomo 					   "vxlan udp dport was not registered with the HW");
275054c177caSOz Shlomo 			netdev_warn(priv->netdev,
275154c177caSOz Shlomo 				    "%d isn't an offloaded vxlan udp dport\n",
275254c177caSOz Shlomo 				    dst_port);
275354c177caSOz Shlomo 			return -EOPNOTSUPP;
275454c177caSOz Shlomo 		}
275554c177caSOz Shlomo 		e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
275654c177caSOz Shlomo 		e->tunnel_hlen = VXLAN_HLEN;
275754c177caSOz Shlomo 	} else {
275854c177caSOz Shlomo 		e->reformat_type = -1;
275954c177caSOz Shlomo 		e->tunnel_hlen = -1;
276054c177caSOz Shlomo 		return -EOPNOTSUPP;
276154c177caSOz Shlomo 	}
276254c177caSOz Shlomo 	return 0;
276354c177caSOz Shlomo }
276454c177caSOz Shlomo 
2765a54e20b4SHadar Hen Zion static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2766a54e20b4SHadar Hen Zion 			      struct ip_tunnel_info *tun_info,
2767a54e20b4SHadar Hen Zion 			      struct net_device *mirred_dev,
276845247bf2SOr Gerlitz 			      struct net_device **encap_dev,
2769e98bedf5SEli Britstein 			      struct mlx5e_tc_flow *flow,
2770e98bedf5SEli Britstein 			      struct netlink_ext_ack *extack)
277103a9d11eSOr Gerlitz {
2772a54e20b4SHadar Hen Zion 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2773a54e20b4SHadar Hen Zion 	unsigned short family = ip_tunnel_info_af(tun_info);
277445247bf2SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2775a54e20b4SHadar Hen Zion 	struct ip_tunnel_key *key = &tun_info->key;
2776c1ae1152SOr Gerlitz 	struct mlx5e_encap_entry *e;
2777a54e20b4SHadar Hen Zion 	uintptr_t hash_key;
2778a54e20b4SHadar Hen Zion 	bool found = false;
277954c177caSOz Shlomo 	int err = 0;
2780a54e20b4SHadar Hen Zion 
278176f7444dSOr Gerlitz 	hash_key = hash_encap_info(key);
2782a54e20b4SHadar Hen Zion 
2783a54e20b4SHadar Hen Zion 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2784a54e20b4SHadar Hen Zion 				   encap_hlist, hash_key) {
278576f7444dSOr Gerlitz 		if (!cmp_encap_info(&e->tun_info.key, key)) {
2786a54e20b4SHadar Hen Zion 			found = true;
2787a54e20b4SHadar Hen Zion 			break;
2788a54e20b4SHadar Hen Zion 		}
2789a54e20b4SHadar Hen Zion 	}
2790a54e20b4SHadar Hen Zion 
2791b2812089SVlad Buslov 	/* must verify if encap is valid or not */
279245247bf2SOr Gerlitz 	if (found)
279345247bf2SOr Gerlitz 		goto attach_flow;
2794a54e20b4SHadar Hen Zion 
2795a54e20b4SHadar Hen Zion 	e = kzalloc(sizeof(*e), GFP_KERNEL);
2796a54e20b4SHadar Hen Zion 	if (!e)
2797a54e20b4SHadar Hen Zion 		return -ENOMEM;
2798a54e20b4SHadar Hen Zion 
279976f7444dSOr Gerlitz 	e->tun_info = *tun_info;
280054c177caSOz Shlomo 	err = mlx5e_init_tunnel_attr(mirred_dev, priv, e, extack);
280154c177caSOz Shlomo 	if (err)
280254c177caSOz Shlomo 		goto out_err;
280354c177caSOz Shlomo 
2804a54e20b4SHadar Hen Zion 	INIT_LIST_HEAD(&e->flows);
2805a54e20b4SHadar Hen Zion 
2806ce99f6b9SOr Gerlitz 	if (family == AF_INET)
28071a8552bdSHadar Hen Zion 		err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
2808ce99f6b9SOr Gerlitz 	else if (family == AF_INET6)
28091a8552bdSHadar Hen Zion 		err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
2810ce99f6b9SOr Gerlitz 
2811232c0013SHadar Hen Zion 	if (err && err != -EAGAIN)
2812a54e20b4SHadar Hen Zion 		goto out_err;
2813a54e20b4SHadar Hen Zion 
2814a54e20b4SHadar Hen Zion 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2815a54e20b4SHadar Hen Zion 
281645247bf2SOr Gerlitz attach_flow:
281745247bf2SOr Gerlitz 	list_add(&flow->encap, &e->flows);
281845247bf2SOr Gerlitz 	*encap_dev = e->out_dev;
2819232c0013SHadar Hen Zion 	if (e->flags & MLX5_ENCAP_ENTRY_VALID)
282045247bf2SOr Gerlitz 		attr->encap_id = e->encap_id;
2821b2812089SVlad Buslov 	else
2822b2812089SVlad Buslov 		err = -EAGAIN;
282345247bf2SOr Gerlitz 
2824232c0013SHadar Hen Zion 	return err;
2825a54e20b4SHadar Hen Zion 
2826a54e20b4SHadar Hen Zion out_err:
2827a54e20b4SHadar Hen Zion 	kfree(e);
2828a54e20b4SHadar Hen Zion 	return err;
2829a54e20b4SHadar Hen Zion }
2830a54e20b4SHadar Hen Zion 
28311482bd3dSJianbo Liu static int parse_tc_vlan_action(struct mlx5e_priv *priv,
28321482bd3dSJianbo Liu 				const struct tc_action *a,
28331482bd3dSJianbo Liu 				struct mlx5_esw_flow_attr *attr,
28341482bd3dSJianbo Liu 				u32 *action)
28351482bd3dSJianbo Liu {
2836cc495188SJianbo Liu 	u8 vlan_idx = attr->total_vlan;
2837cc495188SJianbo Liu 
2838cc495188SJianbo Liu 	if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
28391482bd3dSJianbo Liu 		return -EOPNOTSUPP;
2840cc495188SJianbo Liu 
2841cc495188SJianbo Liu 	if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2842cc495188SJianbo Liu 		if (vlan_idx) {
2843cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2844cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
2845cc495188SJianbo Liu 				return -EOPNOTSUPP;
2846cc495188SJianbo Liu 
2847cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
2848cc495188SJianbo Liu 		} else {
2849cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2850cc495188SJianbo Liu 		}
2851cc495188SJianbo Liu 	} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2852cc495188SJianbo Liu 		attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
2853cc495188SJianbo Liu 		attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
2854cc495188SJianbo Liu 		attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
2855cc495188SJianbo Liu 		if (!attr->vlan_proto[vlan_idx])
2856cc495188SJianbo Liu 			attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
2857cc495188SJianbo Liu 
2858cc495188SJianbo Liu 		if (vlan_idx) {
2859cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2860cc495188SJianbo Liu 								 MLX5_FS_VLAN_DEPTH))
2861cc495188SJianbo Liu 				return -EOPNOTSUPP;
2862cc495188SJianbo Liu 
2863cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
2864cc495188SJianbo Liu 		} else {
2865cc495188SJianbo Liu 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
2866cc495188SJianbo Liu 			    (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2867cc495188SJianbo Liu 			     tcf_vlan_push_prio(a)))
2868cc495188SJianbo Liu 				return -EOPNOTSUPP;
2869cc495188SJianbo Liu 
2870cc495188SJianbo Liu 			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
28711482bd3dSJianbo Liu 		}
28721482bd3dSJianbo Liu 	} else { /* action is TCA_VLAN_ACT_MODIFY */
28731482bd3dSJianbo Liu 		return -EOPNOTSUPP;
28741482bd3dSJianbo Liu 	}
28751482bd3dSJianbo Liu 
2876cc495188SJianbo Liu 	attr->total_vlan = vlan_idx + 1;
2877cc495188SJianbo Liu 
28781482bd3dSJianbo Liu 	return 0;
28791482bd3dSJianbo Liu }
28801482bd3dSJianbo Liu 
2881a54e20b4SHadar Hen Zion static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2882d7e75a32SOr Gerlitz 				struct mlx5e_tc_flow_parse_attr *parse_attr,
2883e98bedf5SEli Britstein 				struct mlx5e_tc_flow *flow,
2884e98bedf5SEli Britstein 				struct netlink_ext_ack *extack)
2885a54e20b4SHadar Hen Zion {
2886bf07aa73SPaul Blakey 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2887ecf5bb79SOr Gerlitz 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
28881d447a39SSaeed Mahameed 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
2889a54e20b4SHadar Hen Zion 	struct ip_tunnel_info *info = NULL;
289003a9d11eSOr Gerlitz 	const struct tc_action *a;
289122dc13c8SWANG Cong 	LIST_HEAD(actions);
2892a54e20b4SHadar Hen Zion 	bool encap = false;
28931cab1cd7SOr Gerlitz 	u32 action = 0;
2894244cd96aSCong Wang 	int err, i;
289503a9d11eSOr Gerlitz 
28963bcc0cecSJiri Pirko 	if (!tcf_exts_has_actions(exts))
289703a9d11eSOr Gerlitz 		return -EINVAL;
289803a9d11eSOr Gerlitz 
28991d447a39SSaeed Mahameed 	attr->in_rep = rpriv->rep;
290010ff5359SShahar Klein 	attr->in_mdev = priv->mdev;
290103a9d11eSOr Gerlitz 
2902244cd96aSCong Wang 	tcf_exts_for_each_action(i, a, exts) {
290303a9d11eSOr Gerlitz 		if (is_tcf_gact_shot(a)) {
29041cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
290503a9d11eSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
290603a9d11eSOr Gerlitz 			continue;
290703a9d11eSOr Gerlitz 		}
290803a9d11eSOr Gerlitz 
2909d7e75a32SOr Gerlitz 		if (is_tcf_pedit(a)) {
2910d7e75a32SOr Gerlitz 			err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
2911e98bedf5SEli Britstein 						    parse_attr, extack);
2912d7e75a32SOr Gerlitz 			if (err)
2913d7e75a32SOr Gerlitz 				return err;
2914d7e75a32SOr Gerlitz 
29151cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2916592d3651SChris Mi 			attr->mirror_count = attr->out_count;
2917d7e75a32SOr Gerlitz 			continue;
2918d7e75a32SOr Gerlitz 		}
2919d7e75a32SOr Gerlitz 
292026c02749SOr Gerlitz 		if (is_tcf_csum(a)) {
29211cab1cd7SOr Gerlitz 			if (csum_offload_supported(priv, action,
2922e98bedf5SEli Britstein 						   tcf_csum_update_flags(a),
2923e98bedf5SEli Britstein 						   extack))
292426c02749SOr Gerlitz 				continue;
292526c02749SOr Gerlitz 
292626c02749SOr Gerlitz 			return -EOPNOTSUPP;
292726c02749SOr Gerlitz 		}
292826c02749SOr Gerlitz 
2929592d3651SChris Mi 		if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
293003a9d11eSOr Gerlitz 			struct mlx5e_priv *out_priv;
2931592d3651SChris Mi 			struct net_device *out_dev;
293203a9d11eSOr Gerlitz 
29339f8a739eSCong Wang 			out_dev = tcf_mirred_dev(a);
2934ef381359SOz Shlomo 			if (!out_dev) {
2935ef381359SOz Shlomo 				/* out_dev is NULL when filters with
2936ef381359SOz Shlomo 				 * non-existing mirred device are replayed to
2937ef381359SOz Shlomo 				 * the driver.
2938ef381359SOz Shlomo 				 */
2939ef381359SOz Shlomo 				return -EINVAL;
2940ef381359SOz Shlomo 			}
294103a9d11eSOr Gerlitz 
2942592d3651SChris Mi 			if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
2943e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
2944e98bedf5SEli Britstein 						   "can't support more output ports, can't offload forwarding");
2945592d3651SChris Mi 				pr_err("can't support more than %d output ports, can't offload forwarding\n",
2946592d3651SChris Mi 				       attr->out_count);
2947592d3651SChris Mi 				return -EOPNOTSUPP;
2948592d3651SChris Mi 			}
2949592d3651SChris Mi 
2950a54e20b4SHadar Hen Zion 			if (switchdev_port_same_parent_id(priv->netdev,
2951b1d90e6bSRabie Loulou 							  out_dev) ||
2952b1d90e6bSRabie Loulou 			    is_merged_eswitch_dev(priv, out_dev)) {
29531cab1cd7SOr Gerlitz 				action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2954e37a79e5SMark Bloch 					  MLX5_FLOW_CONTEXT_ACTION_COUNT;
295503a9d11eSOr Gerlitz 				out_priv = netdev_priv(out_dev);
29561d447a39SSaeed Mahameed 				rpriv = out_priv->ppriv;
2957592d3651SChris Mi 				attr->out_rep[attr->out_count] = rpriv->rep;
2958592d3651SChris Mi 				attr->out_mdev[attr->out_count++] = out_priv->mdev;
2959a54e20b4SHadar Hen Zion 			} else if (encap) {
29609f8a739eSCong Wang 				parse_attr->mirred_ifindex = out_dev->ifindex;
29613c37745eSOr Gerlitz 				parse_attr->tun_info = *info;
29623c37745eSOr Gerlitz 				attr->parse_attr = parse_attr;
296360786f09SMark Bloch 				action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
2964a54e20b4SHadar Hen Zion 					  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2965a54e20b4SHadar Hen Zion 					  MLX5_FLOW_CONTEXT_ACTION_COUNT;
29663c37745eSOr Gerlitz 				/* attr->out_rep is resolved when we handle encap */
2967ef381359SOz Shlomo 			} else if (parse_attr->filter_dev != priv->netdev) {
2968ef381359SOz Shlomo 				/* All mlx5 devices are called to configure
2969ef381359SOz Shlomo 				 * high level device filters. Therefore, the
2970ef381359SOz Shlomo 				 * *attempt* to  install a filter on invalid
2971ef381359SOz Shlomo 				 * eswitch should not trigger an explicit error
2972ef381359SOz Shlomo 				 */
2973ef381359SOz Shlomo 				return -EINVAL;
2974a54e20b4SHadar Hen Zion 			} else {
2975e98bedf5SEli Britstein 				NL_SET_ERR_MSG_MOD(extack,
2976e98bedf5SEli Britstein 						   "devices are not on same switch HW, can't offload forwarding");
2977a54e20b4SHadar Hen Zion 				pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2978a54e20b4SHadar Hen Zion 				       priv->netdev->name, out_dev->name);
2979a54e20b4SHadar Hen Zion 				return -EINVAL;
2980a54e20b4SHadar Hen Zion 			}
2981a54e20b4SHadar Hen Zion 			continue;
2982a54e20b4SHadar Hen Zion 		}
2983a54e20b4SHadar Hen Zion 
2984a54e20b4SHadar Hen Zion 		if (is_tcf_tunnel_set(a)) {
2985a54e20b4SHadar Hen Zion 			info = tcf_tunnel_info(a);
2986a54e20b4SHadar Hen Zion 			if (info)
2987a54e20b4SHadar Hen Zion 				encap = true;
2988a54e20b4SHadar Hen Zion 			else
2989a54e20b4SHadar Hen Zion 				return -EOPNOTSUPP;
2990592d3651SChris Mi 			attr->mirror_count = attr->out_count;
299103a9d11eSOr Gerlitz 			continue;
299203a9d11eSOr Gerlitz 		}
299303a9d11eSOr Gerlitz 
29948b32580dSOr Gerlitz 		if (is_tcf_vlan(a)) {
29951482bd3dSJianbo Liu 			err = parse_tc_vlan_action(priv, a, attr, &action);
29961482bd3dSJianbo Liu 
29971482bd3dSJianbo Liu 			if (err)
29981482bd3dSJianbo Liu 				return err;
29991482bd3dSJianbo Liu 
3000592d3651SChris Mi 			attr->mirror_count = attr->out_count;
30018b32580dSOr Gerlitz 			continue;
30028b32580dSOr Gerlitz 		}
30038b32580dSOr Gerlitz 
3004bbd00f7eSHadar Hen Zion 		if (is_tcf_tunnel_release(a)) {
30051cab1cd7SOr Gerlitz 			action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
3006bbd00f7eSHadar Hen Zion 			continue;
3007bbd00f7eSHadar Hen Zion 		}
3008bbd00f7eSHadar Hen Zion 
3009bf07aa73SPaul Blakey 		if (is_tcf_gact_goto_chain(a)) {
3010bf07aa73SPaul Blakey 			u32 dest_chain = tcf_gact_goto_chain_index(a);
3011bf07aa73SPaul Blakey 			u32 max_chain = mlx5_eswitch_get_chain_range(esw);
3012bf07aa73SPaul Blakey 
3013bf07aa73SPaul Blakey 			if (dest_chain <= attr->chain) {
3014bf07aa73SPaul Blakey 				NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
3015bf07aa73SPaul Blakey 				return -EOPNOTSUPP;
3016bf07aa73SPaul Blakey 			}
3017bf07aa73SPaul Blakey 			if (dest_chain > max_chain) {
3018bf07aa73SPaul Blakey 				NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
3019bf07aa73SPaul Blakey 				return -EOPNOTSUPP;
3020bf07aa73SPaul Blakey 			}
3021bf07aa73SPaul Blakey 			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3022bf07aa73SPaul Blakey 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
3023bf07aa73SPaul Blakey 			attr->dest_chain = dest_chain;
3024bf07aa73SPaul Blakey 
3025bf07aa73SPaul Blakey 			continue;
3026bf07aa73SPaul Blakey 		}
3027bf07aa73SPaul Blakey 
302803a9d11eSOr Gerlitz 		return -EINVAL;
302903a9d11eSOr Gerlitz 	}
3030bdd66ac0SOr Gerlitz 
30311cab1cd7SOr Gerlitz 	attr->action = action;
3032e98bedf5SEli Britstein 	if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
3033bdd66ac0SOr Gerlitz 		return -EOPNOTSUPP;
3034bdd66ac0SOr Gerlitz 
30351392f44bSRoi Dayan 	if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3036e98bedf5SEli Britstein 		NL_SET_ERR_MSG_MOD(extack,
3037e98bedf5SEli Britstein 				   "current firmware doesn't support split rule for port mirroring");
3038592d3651SChris Mi 		netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
3039592d3651SChris Mi 		return -EOPNOTSUPP;
3040592d3651SChris Mi 	}
3041592d3651SChris Mi 
304231c8eba5SOr Gerlitz 	return 0;
304303a9d11eSOr Gerlitz }
304403a9d11eSOr Gerlitz 
30455dbe906fSPaul Blakey static void get_flags(int flags, u16 *flow_flags)
304660bd4af8SOr Gerlitz {
30475dbe906fSPaul Blakey 	u16 __flow_flags = 0;
304860bd4af8SOr Gerlitz 
304960bd4af8SOr Gerlitz 	if (flags & MLX5E_TC_INGRESS)
305060bd4af8SOr Gerlitz 		__flow_flags |= MLX5E_TC_FLOW_INGRESS;
305160bd4af8SOr Gerlitz 	if (flags & MLX5E_TC_EGRESS)
305260bd4af8SOr Gerlitz 		__flow_flags |= MLX5E_TC_FLOW_EGRESS;
305360bd4af8SOr Gerlitz 
305460bd4af8SOr Gerlitz 	*flow_flags = __flow_flags;
305560bd4af8SOr Gerlitz }
305660bd4af8SOr Gerlitz 
305705866c82SOr Gerlitz static const struct rhashtable_params tc_ht_params = {
305805866c82SOr Gerlitz 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
305905866c82SOr Gerlitz 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
306005866c82SOr Gerlitz 	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
306105866c82SOr Gerlitz 	.automatic_shrinking = true,
306205866c82SOr Gerlitz };
306305866c82SOr Gerlitz 
306405866c82SOr Gerlitz static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
306505866c82SOr Gerlitz {
3066655dc3d2SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3067655dc3d2SOr Gerlitz 	struct mlx5e_rep_priv *uplink_rpriv;
3068655dc3d2SOr Gerlitz 
3069655dc3d2SOr Gerlitz 	if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
3070655dc3d2SOr Gerlitz 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
3071ec1366c2SOz Shlomo 		return &uplink_rpriv->uplink_priv.tc_ht;
3072655dc3d2SOr Gerlitz 	} else
307305866c82SOr Gerlitz 		return &priv->fs.tc.ht;
307405866c82SOr Gerlitz }
307505866c82SOr Gerlitz 
3076a88780a9SRoi Dayan static int
3077a88780a9SRoi Dayan mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
30785dbe906fSPaul Blakey 		 struct tc_cls_flower_offload *f, u16 flow_flags,
3079a88780a9SRoi Dayan 		 struct mlx5e_tc_flow_parse_attr **__parse_attr,
3080a88780a9SRoi Dayan 		 struct mlx5e_tc_flow **__flow)
3081e3a2b7edSAmir Vadai {
308217091853SOr Gerlitz 	struct mlx5e_tc_flow_parse_attr *parse_attr;
30833bc4b7bfSOr Gerlitz 	struct mlx5e_tc_flow *flow;
3084a88780a9SRoi Dayan 	int err;
3085776b12b6SOr Gerlitz 
308665ba8fb7SOr Gerlitz 	flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
30871b9a07eeSLeon Romanovsky 	parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
308817091853SOr Gerlitz 	if (!parse_attr || !flow) {
3089e3a2b7edSAmir Vadai 		err = -ENOMEM;
3090e3a2b7edSAmir Vadai 		goto err_free;
3091e3a2b7edSAmir Vadai 	}
3092e3a2b7edSAmir Vadai 
3093e3a2b7edSAmir Vadai 	flow->cookie = f->cookie;
309465ba8fb7SOr Gerlitz 	flow->flags = flow_flags;
3095655dc3d2SOr Gerlitz 	flow->priv = priv;
3096e3a2b7edSAmir Vadai 
3097a88780a9SRoi Dayan 	*__flow = flow;
3098a88780a9SRoi Dayan 	*__parse_attr = parse_attr;
3099a88780a9SRoi Dayan 
3100a88780a9SRoi Dayan 	return 0;
3101a88780a9SRoi Dayan 
3102a88780a9SRoi Dayan err_free:
3103a88780a9SRoi Dayan 	kfree(flow);
3104a88780a9SRoi Dayan 	kvfree(parse_attr);
3105a88780a9SRoi Dayan 	return err;
3106adb4c123SOr Gerlitz }
3107adb4c123SOr Gerlitz 
3108a88780a9SRoi Dayan static int
3109a88780a9SRoi Dayan mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
3110a88780a9SRoi Dayan 		   struct tc_cls_flower_offload *f,
31115dbe906fSPaul Blakey 		   u16 flow_flags,
3112d11afc26SOz Shlomo 		   struct net_device *filter_dev,
3113a88780a9SRoi Dayan 		   struct mlx5e_tc_flow **__flow)
3114a88780a9SRoi Dayan {
3115a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
3116a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
3117a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
3118a88780a9SRoi Dayan 	int attr_size, err;
3119a88780a9SRoi Dayan 
3120a88780a9SRoi Dayan 	flow_flags |= MLX5E_TC_FLOW_ESWITCH;
3121a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_esw_flow_attr);
3122a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3123a88780a9SRoi Dayan 			       &parse_attr, &flow);
3124a88780a9SRoi Dayan 	if (err)
3125a88780a9SRoi Dayan 		goto out;
3126d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
3127d11afc26SOz Shlomo 	flow->esw_attr->parse_attr = parse_attr;
312854c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
312954c177caSOz Shlomo 			       f, filter_dev);
3130d11afc26SOz Shlomo 	if (err)
3131d11afc26SOz Shlomo 		goto err_free;
3132a88780a9SRoi Dayan 
3133bf07aa73SPaul Blakey 	flow->esw_attr->chain = f->common.chain_index;
3134bf07aa73SPaul Blakey 	flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
3135a88780a9SRoi Dayan 	err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
3136a88780a9SRoi Dayan 	if (err)
3137a88780a9SRoi Dayan 		goto err_free;
3138a88780a9SRoi Dayan 
3139a88780a9SRoi Dayan 	err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
31405dbe906fSPaul Blakey 	if (err)
3141aa0cbbaeSOr Gerlitz 		goto err_free;
31425c40348cSOr Gerlitz 
3143a88780a9SRoi Dayan 	if (!(flow->esw_attr->action &
314460786f09SMark Bloch 	      MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
3145232c0013SHadar Hen Zion 		kvfree(parse_attr);
3146e3a2b7edSAmir Vadai 
3147a88780a9SRoi Dayan 	*__flow = flow;
3148af1607c3SJianbo Liu 
3149a88780a9SRoi Dayan 	return 0;
3150e3a2b7edSAmir Vadai 
3151e3a2b7edSAmir Vadai err_free:
3152232c0013SHadar Hen Zion 	kfree(flow);
3153a88780a9SRoi Dayan 	kvfree(parse_attr);
3154a88780a9SRoi Dayan out:
3155a88780a9SRoi Dayan 	return err;
3156a88780a9SRoi Dayan }
3157a88780a9SRoi Dayan 
3158a88780a9SRoi Dayan static int
3159a88780a9SRoi Dayan mlx5e_add_nic_flow(struct mlx5e_priv *priv,
3160a88780a9SRoi Dayan 		   struct tc_cls_flower_offload *f,
31615dbe906fSPaul Blakey 		   u16 flow_flags,
3162d11afc26SOz Shlomo 		   struct net_device *filter_dev,
3163a88780a9SRoi Dayan 		   struct mlx5e_tc_flow **__flow)
3164a88780a9SRoi Dayan {
3165a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
3166a88780a9SRoi Dayan 	struct mlx5e_tc_flow_parse_attr *parse_attr;
3167a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
3168a88780a9SRoi Dayan 	int attr_size, err;
3169a88780a9SRoi Dayan 
3170bf07aa73SPaul Blakey 	/* multi-chain not supported for NIC rules */
3171bf07aa73SPaul Blakey 	if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
3172bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
3173bf07aa73SPaul Blakey 
3174a88780a9SRoi Dayan 	flow_flags |= MLX5E_TC_FLOW_NIC;
3175a88780a9SRoi Dayan 	attr_size  = sizeof(struct mlx5_nic_flow_attr);
3176a88780a9SRoi Dayan 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3177a88780a9SRoi Dayan 			       &parse_attr, &flow);
3178a88780a9SRoi Dayan 	if (err)
3179a88780a9SRoi Dayan 		goto out;
3180a88780a9SRoi Dayan 
3181d11afc26SOz Shlomo 	parse_attr->filter_dev = filter_dev;
318254c177caSOz Shlomo 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
318354c177caSOz Shlomo 			       f, filter_dev);
3184d11afc26SOz Shlomo 	if (err)
3185d11afc26SOz Shlomo 		goto err_free;
3186d11afc26SOz Shlomo 
3187a88780a9SRoi Dayan 	err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
3188a88780a9SRoi Dayan 	if (err)
3189a88780a9SRoi Dayan 		goto err_free;
3190a88780a9SRoi Dayan 
3191a88780a9SRoi Dayan 	err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
3192a88780a9SRoi Dayan 	if (err)
3193a88780a9SRoi Dayan 		goto err_free;
3194a88780a9SRoi Dayan 
3195a88780a9SRoi Dayan 	flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
3196a88780a9SRoi Dayan 	kvfree(parse_attr);
3197a88780a9SRoi Dayan 	*__flow = flow;
3198a88780a9SRoi Dayan 
3199a88780a9SRoi Dayan 	return 0;
3200a88780a9SRoi Dayan 
3201a88780a9SRoi Dayan err_free:
3202a88780a9SRoi Dayan 	kfree(flow);
3203a88780a9SRoi Dayan 	kvfree(parse_attr);
3204a88780a9SRoi Dayan out:
3205a88780a9SRoi Dayan 	return err;
3206a88780a9SRoi Dayan }
3207a88780a9SRoi Dayan 
3208a88780a9SRoi Dayan static int
3209a88780a9SRoi Dayan mlx5e_tc_add_flow(struct mlx5e_priv *priv,
3210a88780a9SRoi Dayan 		  struct tc_cls_flower_offload *f,
3211a88780a9SRoi Dayan 		  int flags,
3212d11afc26SOz Shlomo 		  struct net_device *filter_dev,
3213a88780a9SRoi Dayan 		  struct mlx5e_tc_flow **flow)
3214a88780a9SRoi Dayan {
3215a88780a9SRoi Dayan 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
32165dbe906fSPaul Blakey 	u16 flow_flags;
3217a88780a9SRoi Dayan 	int err;
3218a88780a9SRoi Dayan 
3219a88780a9SRoi Dayan 	get_flags(flags, &flow_flags);
3220a88780a9SRoi Dayan 
3221bf07aa73SPaul Blakey 	if (!tc_can_offload_extack(priv->netdev, f->common.extack))
3222bf07aa73SPaul Blakey 		return -EOPNOTSUPP;
3223bf07aa73SPaul Blakey 
3224a88780a9SRoi Dayan 	if (esw && esw->mode == SRIOV_OFFLOADS)
3225d11afc26SOz Shlomo 		err = mlx5e_add_fdb_flow(priv, f, flow_flags,
3226d11afc26SOz Shlomo 					 filter_dev, flow);
3227a88780a9SRoi Dayan 	else
3228d11afc26SOz Shlomo 		err = mlx5e_add_nic_flow(priv, f, flow_flags,
3229d11afc26SOz Shlomo 					 filter_dev, flow);
3230a88780a9SRoi Dayan 
3231a88780a9SRoi Dayan 	return err;
3232a88780a9SRoi Dayan }
3233a88780a9SRoi Dayan 
323471d82d2aSOz Shlomo int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
3235a88780a9SRoi Dayan 			   struct tc_cls_flower_offload *f, int flags)
3236a88780a9SRoi Dayan {
3237a88780a9SRoi Dayan 	struct netlink_ext_ack *extack = f->common.extack;
3238a88780a9SRoi Dayan 	struct rhashtable *tc_ht = get_tc_ht(priv);
3239a88780a9SRoi Dayan 	struct mlx5e_tc_flow *flow;
3240a88780a9SRoi Dayan 	int err = 0;
3241a88780a9SRoi Dayan 
3242a88780a9SRoi Dayan 	flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3243a88780a9SRoi Dayan 	if (flow) {
3244a88780a9SRoi Dayan 		NL_SET_ERR_MSG_MOD(extack,
3245a88780a9SRoi Dayan 				   "flow cookie already exists, ignoring");
3246a88780a9SRoi Dayan 		netdev_warn_once(priv->netdev,
3247a88780a9SRoi Dayan 				 "flow cookie %lx already exists, ignoring\n",
3248a88780a9SRoi Dayan 				 f->cookie);
3249a88780a9SRoi Dayan 		goto out;
3250a88780a9SRoi Dayan 	}
3251a88780a9SRoi Dayan 
3252d11afc26SOz Shlomo 	err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
3253a88780a9SRoi Dayan 	if (err)
3254a88780a9SRoi Dayan 		goto out;
3255a88780a9SRoi Dayan 
3256a88780a9SRoi Dayan 	err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
3257a88780a9SRoi Dayan 	if (err)
3258a88780a9SRoi Dayan 		goto err_free;
3259a88780a9SRoi Dayan 
3260a88780a9SRoi Dayan 	return 0;
3261a88780a9SRoi Dayan 
3262a88780a9SRoi Dayan err_free:
3263a88780a9SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
3264a88780a9SRoi Dayan 	kfree(flow);
3265a88780a9SRoi Dayan out:
3266e3a2b7edSAmir Vadai 	return err;
3267e3a2b7edSAmir Vadai }
3268e3a2b7edSAmir Vadai 
32698f8ae895SOr Gerlitz #define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
32708f8ae895SOr Gerlitz #define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
32718f8ae895SOr Gerlitz 
32728f8ae895SOr Gerlitz static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
32738f8ae895SOr Gerlitz {
32748f8ae895SOr Gerlitz 	if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
32758f8ae895SOr Gerlitz 		return true;
32768f8ae895SOr Gerlitz 
32778f8ae895SOr Gerlitz 	return false;
32788f8ae895SOr Gerlitz }
32798f8ae895SOr Gerlitz 
328071d82d2aSOz Shlomo int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
328160bd4af8SOr Gerlitz 			struct tc_cls_flower_offload *f, int flags)
3282e3a2b7edSAmir Vadai {
328305866c82SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv);
3284e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
3285e3a2b7edSAmir Vadai 
328605866c82SOr Gerlitz 	flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
32878f8ae895SOr Gerlitz 	if (!flow || !same_flow_direction(flow, flags))
3288e3a2b7edSAmir Vadai 		return -EINVAL;
3289e3a2b7edSAmir Vadai 
329005866c82SOr Gerlitz 	rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
3291e3a2b7edSAmir Vadai 
3292961e8979SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
3293e3a2b7edSAmir Vadai 
3294e3a2b7edSAmir Vadai 	kfree(flow);
3295e3a2b7edSAmir Vadai 
3296e3a2b7edSAmir Vadai 	return 0;
3297e3a2b7edSAmir Vadai }
3298e3a2b7edSAmir Vadai 
329971d82d2aSOz Shlomo int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
330060bd4af8SOr Gerlitz 		       struct tc_cls_flower_offload *f, int flags)
3301aad7e08dSAmir Vadai {
330205866c82SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv);
3303aad7e08dSAmir Vadai 	struct mlx5e_tc_flow *flow;
3304aad7e08dSAmir Vadai 	struct mlx5_fc *counter;
3305aad7e08dSAmir Vadai 	u64 bytes;
3306aad7e08dSAmir Vadai 	u64 packets;
3307aad7e08dSAmir Vadai 	u64 lastuse;
3308aad7e08dSAmir Vadai 
330905866c82SOr Gerlitz 	flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
33108f8ae895SOr Gerlitz 	if (!flow || !same_flow_direction(flow, flags))
3311aad7e08dSAmir Vadai 		return -EINVAL;
3312aad7e08dSAmir Vadai 
33130b67a38fSHadar Hen Zion 	if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
33140b67a38fSHadar Hen Zion 		return 0;
33150b67a38fSHadar Hen Zion 
3316b8aee822SMark Bloch 	counter = mlx5e_tc_get_counter(flow);
3317aad7e08dSAmir Vadai 	if (!counter)
3318aad7e08dSAmir Vadai 		return 0;
3319aad7e08dSAmir Vadai 
3320aad7e08dSAmir Vadai 	mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3321aad7e08dSAmir Vadai 
3322d897a638SJakub Kicinski 	tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
3323fed06ee8SOr Gerlitz 
3324aad7e08dSAmir Vadai 	return 0;
3325aad7e08dSAmir Vadai }
3326aad7e08dSAmir Vadai 
33274d8fcf21SAlaa Hleihel static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
33284d8fcf21SAlaa Hleihel 					      struct mlx5e_priv *peer_priv)
33294d8fcf21SAlaa Hleihel {
33304d8fcf21SAlaa Hleihel 	struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
33314d8fcf21SAlaa Hleihel 	struct mlx5e_hairpin_entry *hpe;
33324d8fcf21SAlaa Hleihel 	u16 peer_vhca_id;
33334d8fcf21SAlaa Hleihel 	int bkt;
33344d8fcf21SAlaa Hleihel 
33354d8fcf21SAlaa Hleihel 	if (!same_hw_devs(priv, peer_priv))
33364d8fcf21SAlaa Hleihel 		return;
33374d8fcf21SAlaa Hleihel 
33384d8fcf21SAlaa Hleihel 	peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
33394d8fcf21SAlaa Hleihel 
33404d8fcf21SAlaa Hleihel 	hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
33414d8fcf21SAlaa Hleihel 		if (hpe->peer_vhca_id == peer_vhca_id)
33424d8fcf21SAlaa Hleihel 			hpe->hp->pair->peer_gone = true;
33434d8fcf21SAlaa Hleihel 	}
33444d8fcf21SAlaa Hleihel }
33454d8fcf21SAlaa Hleihel 
33464d8fcf21SAlaa Hleihel static int mlx5e_tc_netdev_event(struct notifier_block *this,
33474d8fcf21SAlaa Hleihel 				 unsigned long event, void *ptr)
33484d8fcf21SAlaa Hleihel {
33494d8fcf21SAlaa Hleihel 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
33504d8fcf21SAlaa Hleihel 	struct mlx5e_flow_steering *fs;
33514d8fcf21SAlaa Hleihel 	struct mlx5e_priv *peer_priv;
33524d8fcf21SAlaa Hleihel 	struct mlx5e_tc_table *tc;
33534d8fcf21SAlaa Hleihel 	struct mlx5e_priv *priv;
33544d8fcf21SAlaa Hleihel 
33554d8fcf21SAlaa Hleihel 	if (ndev->netdev_ops != &mlx5e_netdev_ops ||
33564d8fcf21SAlaa Hleihel 	    event != NETDEV_UNREGISTER ||
33574d8fcf21SAlaa Hleihel 	    ndev->reg_state == NETREG_REGISTERED)
33584d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
33594d8fcf21SAlaa Hleihel 
33604d8fcf21SAlaa Hleihel 	tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
33614d8fcf21SAlaa Hleihel 	fs = container_of(tc, struct mlx5e_flow_steering, tc);
33624d8fcf21SAlaa Hleihel 	priv = container_of(fs, struct mlx5e_priv, fs);
33634d8fcf21SAlaa Hleihel 	peer_priv = netdev_priv(ndev);
33644d8fcf21SAlaa Hleihel 	if (priv == peer_priv ||
33654d8fcf21SAlaa Hleihel 	    !(priv->netdev->features & NETIF_F_HW_TC))
33664d8fcf21SAlaa Hleihel 		return NOTIFY_DONE;
33674d8fcf21SAlaa Hleihel 
33684d8fcf21SAlaa Hleihel 	mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
33694d8fcf21SAlaa Hleihel 
33704d8fcf21SAlaa Hleihel 	return NOTIFY_DONE;
33714d8fcf21SAlaa Hleihel }
33724d8fcf21SAlaa Hleihel 
3373655dc3d2SOr Gerlitz int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
3374e8f887acSAmir Vadai {
3375acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
33764d8fcf21SAlaa Hleihel 	int err;
3377e8f887acSAmir Vadai 
337811c9c548SOr Gerlitz 	hash_init(tc->mod_hdr_tbl);
33795c65c564SOr Gerlitz 	hash_init(tc->hairpin_tbl);
338011c9c548SOr Gerlitz 
33814d8fcf21SAlaa Hleihel 	err = rhashtable_init(&tc->ht, &tc_ht_params);
33824d8fcf21SAlaa Hleihel 	if (err)
33834d8fcf21SAlaa Hleihel 		return err;
33844d8fcf21SAlaa Hleihel 
33854d8fcf21SAlaa Hleihel 	tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
33864d8fcf21SAlaa Hleihel 	if (register_netdevice_notifier(&tc->netdevice_nb)) {
33874d8fcf21SAlaa Hleihel 		tc->netdevice_nb.notifier_call = NULL;
33884d8fcf21SAlaa Hleihel 		mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
33894d8fcf21SAlaa Hleihel 	}
33904d8fcf21SAlaa Hleihel 
33914d8fcf21SAlaa Hleihel 	return err;
3392e8f887acSAmir Vadai }
3393e8f887acSAmir Vadai 
3394e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg)
3395e8f887acSAmir Vadai {
3396e8f887acSAmir Vadai 	struct mlx5e_tc_flow *flow = ptr;
3397655dc3d2SOr Gerlitz 	struct mlx5e_priv *priv = flow->priv;
3398e8f887acSAmir Vadai 
3399961e8979SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
3400e8f887acSAmir Vadai 	kfree(flow);
3401e8f887acSAmir Vadai }
3402e8f887acSAmir Vadai 
3403655dc3d2SOr Gerlitz void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
3404e8f887acSAmir Vadai {
3405acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
3406e8f887acSAmir Vadai 
34074d8fcf21SAlaa Hleihel 	if (tc->netdevice_nb.notifier_call)
34084d8fcf21SAlaa Hleihel 		unregister_netdevice_notifier(&tc->netdevice_nb);
34094d8fcf21SAlaa Hleihel 
3410655dc3d2SOr Gerlitz 	rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
3411e8f887acSAmir Vadai 
3412acff797cSMaor Gottlieb 	if (!IS_ERR_OR_NULL(tc->t)) {
3413acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(tc->t);
3414acff797cSMaor Gottlieb 		tc->t = NULL;
3415e8f887acSAmir Vadai 	}
3416e8f887acSAmir Vadai }
3417655dc3d2SOr Gerlitz 
3418655dc3d2SOr Gerlitz int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
3419655dc3d2SOr Gerlitz {
3420655dc3d2SOr Gerlitz 	return rhashtable_init(tc_ht, &tc_ht_params);
3421655dc3d2SOr Gerlitz }
3422655dc3d2SOr Gerlitz 
3423655dc3d2SOr Gerlitz void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
3424655dc3d2SOr Gerlitz {
3425655dc3d2SOr Gerlitz 	rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
3426655dc3d2SOr Gerlitz }
342701252a27SOr Gerlitz 
342801252a27SOr Gerlitz int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
342901252a27SOr Gerlitz {
343001252a27SOr Gerlitz 	struct rhashtable *tc_ht = get_tc_ht(priv);
343101252a27SOr Gerlitz 
343201252a27SOr Gerlitz 	return atomic_read(&tc_ht->nelems);
343301252a27SOr Gerlitz }
3434