1e8f887acSAmir Vadai /*
2e8f887acSAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3e8f887acSAmir Vadai  *
4e8f887acSAmir Vadai  * This software is available to you under a choice of one of two
5e8f887acSAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
6e8f887acSAmir Vadai  * General Public License (GPL) Version 2, available from the file
7e8f887acSAmir Vadai  * COPYING in the main directory of this source tree, or the
8e8f887acSAmir Vadai  * OpenIB.org BSD license below:
9e8f887acSAmir Vadai  *
10e8f887acSAmir Vadai  *     Redistribution and use in source and binary forms, with or
11e8f887acSAmir Vadai  *     without modification, are permitted provided that the following
12e8f887acSAmir Vadai  *     conditions are met:
13e8f887acSAmir Vadai  *
14e8f887acSAmir Vadai  *      - Redistributions of source code must retain the above
15e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
16e8f887acSAmir Vadai  *        disclaimer.
17e8f887acSAmir Vadai  *
18e8f887acSAmir Vadai  *      - Redistributions in binary form must reproduce the above
19e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
20e8f887acSAmir Vadai  *        disclaimer in the documentation and/or other materials
21e8f887acSAmir Vadai  *        provided with the distribution.
22e8f887acSAmir Vadai  *
23e8f887acSAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e8f887acSAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e8f887acSAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e8f887acSAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e8f887acSAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e8f887acSAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e8f887acSAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e8f887acSAmir Vadai  * SOFTWARE.
31e8f887acSAmir Vadai  */
32e8f887acSAmir Vadai 
33e3a2b7edSAmir Vadai #include <net/flow_dissector.h>
343f7d0eb4SOr Gerlitz #include <net/sch_generic.h>
35e3a2b7edSAmir Vadai #include <net/pkt_cls.h>
36e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h>
3712185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h>
38e8f887acSAmir Vadai #include <linux/mlx5/fs.h>
39e8f887acSAmir Vadai #include <linux/mlx5/device.h>
40e8f887acSAmir Vadai #include <linux/rhashtable.h>
4103a9d11eSOr Gerlitz #include <net/switchdev.h>
4203a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h>
43776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h>
44bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h>
45a54e20b4SHadar Hen Zion #include <net/vxlan.h>
46e8f887acSAmir Vadai #include "en.h"
47e8f887acSAmir Vadai #include "en_tc.h"
4803a9d11eSOr Gerlitz #include "eswitch.h"
49bbd00f7eSHadar Hen Zion #include "vxlan.h"
50e8f887acSAmir Vadai 
51e8f887acSAmir Vadai struct mlx5e_tc_flow {
52e8f887acSAmir Vadai 	struct rhash_head	node;
53e8f887acSAmir Vadai 	u64			cookie;
5474491de9SMark Bloch 	struct mlx5_flow_handle *rule;
55a54e20b4SHadar Hen Zion 	struct list_head	encap; /* flows sharing the same encap */
56776b12b6SOr Gerlitz 	struct mlx5_esw_flow_attr *attr;
57e8f887acSAmir Vadai };
58e8f887acSAmir Vadai 
59a54e20b4SHadar Hen Zion enum {
60a54e20b4SHadar Hen Zion 	MLX5_HEADER_TYPE_VXLAN = 0x0,
61a54e20b4SHadar Hen Zion 	MLX5_HEADER_TYPE_NVGRE = 0x1,
62a54e20b4SHadar Hen Zion };
63a54e20b4SHadar Hen Zion 
64acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
65acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4
66e8f887acSAmir Vadai 
6774491de9SMark Bloch static struct mlx5_flow_handle *
6874491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
69c5bb1730SMaor Gottlieb 		      struct mlx5_flow_spec *spec,
70e8f887acSAmir Vadai 		      u32 action, u32 flow_tag)
71e8f887acSAmir Vadai {
72aad7e08dSAmir Vadai 	struct mlx5_core_dev *dev = priv->mdev;
73aad7e08dSAmir Vadai 	struct mlx5_flow_destination dest = { 0 };
7466958ed9SHadar Hen Zion 	struct mlx5_flow_act flow_act = {
7566958ed9SHadar Hen Zion 		.action = action,
7666958ed9SHadar Hen Zion 		.flow_tag = flow_tag,
7766958ed9SHadar Hen Zion 		.encap_id = 0,
7866958ed9SHadar Hen Zion 	};
79aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
8074491de9SMark Bloch 	struct mlx5_flow_handle *rule;
81e8f887acSAmir Vadai 	bool table_created = false;
82e8f887acSAmir Vadai 
83aad7e08dSAmir Vadai 	if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
84aad7e08dSAmir Vadai 		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
85aad7e08dSAmir Vadai 		dest.ft = priv->fs.vlan.ft.t;
8655130287SOr Gerlitz 	} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
87aad7e08dSAmir Vadai 		counter = mlx5_fc_create(dev, true);
88aad7e08dSAmir Vadai 		if (IS_ERR(counter))
89aad7e08dSAmir Vadai 			return ERR_CAST(counter);
90aad7e08dSAmir Vadai 
91aad7e08dSAmir Vadai 		dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
92aad7e08dSAmir Vadai 		dest.counter = counter;
93aad7e08dSAmir Vadai 	}
94aad7e08dSAmir Vadai 
95acff797cSMaor Gottlieb 	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
96acff797cSMaor Gottlieb 		priv->fs.tc.t =
97acff797cSMaor Gottlieb 			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
98acff797cSMaor Gottlieb 							    MLX5E_TC_PRIO,
99acff797cSMaor Gottlieb 							    MLX5E_TC_TABLE_NUM_ENTRIES,
100acff797cSMaor Gottlieb 							    MLX5E_TC_TABLE_NUM_GROUPS,
101c9f1b073SHadar Hen Zion 							    0, 0);
102acff797cSMaor Gottlieb 		if (IS_ERR(priv->fs.tc.t)) {
103e8f887acSAmir Vadai 			netdev_err(priv->netdev,
104e8f887acSAmir Vadai 				   "Failed to create tc offload table\n");
105aad7e08dSAmir Vadai 			rule = ERR_CAST(priv->fs.tc.t);
106aad7e08dSAmir Vadai 			goto err_create_ft;
107e8f887acSAmir Vadai 		}
108e8f887acSAmir Vadai 
109e8f887acSAmir Vadai 		table_created = true;
110e8f887acSAmir Vadai 	}
111e8f887acSAmir Vadai 
112c5bb1730SMaor Gottlieb 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
11366958ed9SHadar Hen Zion 	rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
114e8f887acSAmir Vadai 
115aad7e08dSAmir Vadai 	if (IS_ERR(rule))
116aad7e08dSAmir Vadai 		goto err_add_rule;
117aad7e08dSAmir Vadai 
118aad7e08dSAmir Vadai 	return rule;
119aad7e08dSAmir Vadai 
120aad7e08dSAmir Vadai err_add_rule:
121aad7e08dSAmir Vadai 	if (table_created) {
122acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(priv->fs.tc.t);
123acff797cSMaor Gottlieb 		priv->fs.tc.t = NULL;
124e8f887acSAmir Vadai 	}
125aad7e08dSAmir Vadai err_create_ft:
126aad7e08dSAmir Vadai 	mlx5_fc_destroy(dev, counter);
127e8f887acSAmir Vadai 
128e8f887acSAmir Vadai 	return rule;
129e8f887acSAmir Vadai }
130e8f887acSAmir Vadai 
13174491de9SMark Bloch static struct mlx5_flow_handle *
13274491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
133adb4c123SOr Gerlitz 		      struct mlx5_flow_spec *spec,
134776b12b6SOr Gerlitz 		      struct mlx5_esw_flow_attr *attr)
135adb4c123SOr Gerlitz {
136adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1378b32580dSOr Gerlitz 	int err;
1388b32580dSOr Gerlitz 
1398b32580dSOr Gerlitz 	err = mlx5_eswitch_add_vlan_action(esw, attr);
1408b32580dSOr Gerlitz 	if (err)
1418b32580dSOr Gerlitz 		return ERR_PTR(err);
142adb4c123SOr Gerlitz 
143776b12b6SOr Gerlitz 	return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
144adb4c123SOr Gerlitz }
145adb4c123SOr Gerlitz 
1465067b602SRoi Dayan static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1475067b602SRoi Dayan 			       struct mlx5e_tc_flow *flow) {
1485067b602SRoi Dayan 	struct list_head *next = flow->encap.next;
1495067b602SRoi Dayan 
1505067b602SRoi Dayan 	list_del(&flow->encap);
1515067b602SRoi Dayan 	if (list_empty(next)) {
1525067b602SRoi Dayan 		struct mlx5_encap_entry *e;
1535067b602SRoi Dayan 
1545067b602SRoi Dayan 		e = list_entry(next, struct mlx5_encap_entry, flows);
1555067b602SRoi Dayan 		if (e->n) {
1565067b602SRoi Dayan 			mlx5_encap_dealloc(priv->mdev, e->encap_id);
1575067b602SRoi Dayan 			neigh_release(e->n);
1585067b602SRoi Dayan 		}
1595067b602SRoi Dayan 		hlist_del_rcu(&e->encap_hlist);
1605067b602SRoi Dayan 		kfree(e);
1615067b602SRoi Dayan 	}
1625067b602SRoi Dayan }
1635067b602SRoi Dayan 
1645e86397aSOr Gerlitz /* we get here also when setting rule to the FW failed, etc. It means that the
1655e86397aSOr Gerlitz  * flow rule itself might not exist, but some offloading related to the actions
1665e86397aSOr Gerlitz  * should be cleaned.
1675e86397aSOr Gerlitz  */
168e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
169961e8979SRoi Dayan 			      struct mlx5e_tc_flow *flow)
170e8f887acSAmir Vadai {
1718b32580dSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
172aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
173aad7e08dSAmir Vadai 
1745e86397aSOr Gerlitz 	if (!IS_ERR(flow->rule)) {
175961e8979SRoi Dayan 		counter = mlx5_flow_rule_counter(flow->rule);
176961e8979SRoi Dayan 		mlx5_del_flow_rules(flow->rule);
1775e86397aSOr Gerlitz 		mlx5_fc_destroy(priv->mdev, counter);
1785e86397aSOr Gerlitz 	}
17986a33ae1SRoi Dayan 
1805067b602SRoi Dayan 	if (esw && esw->mode == SRIOV_OFFLOADS) {
181961e8979SRoi Dayan 		mlx5_eswitch_del_vlan_action(esw, flow->attr);
1825067b602SRoi Dayan 		if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
1835067b602SRoi Dayan 			mlx5e_detach_encap(priv, flow);
1845067b602SRoi Dayan 	}
1858b32580dSOr Gerlitz 
1865c40348cSOr Gerlitz 	if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
187acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(priv->fs.tc.t);
188acff797cSMaor Gottlieb 		priv->fs.tc.t = NULL;
189e8f887acSAmir Vadai 	}
190e8f887acSAmir Vadai }
191e8f887acSAmir Vadai 
192bbd00f7eSHadar Hen Zion static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
193bbd00f7eSHadar Hen Zion 			     struct tc_cls_flower_offload *f)
194bbd00f7eSHadar Hen Zion {
195bbd00f7eSHadar Hen Zion 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196bbd00f7eSHadar Hen Zion 				       outer_headers);
197bbd00f7eSHadar Hen Zion 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198bbd00f7eSHadar Hen Zion 				       outer_headers);
199bbd00f7eSHadar Hen Zion 	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
200bbd00f7eSHadar Hen Zion 				    misc_parameters);
201bbd00f7eSHadar Hen Zion 	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
202bbd00f7eSHadar Hen Zion 				    misc_parameters);
203bbd00f7eSHadar Hen Zion 
204bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
205bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
206bbd00f7eSHadar Hen Zion 
207bbd00f7eSHadar Hen Zion 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
208bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_keyid *key =
209bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
210bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_KEYID,
211bbd00f7eSHadar Hen Zion 						  f->key);
212bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_keyid *mask =
213bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
214bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_KEYID,
215bbd00f7eSHadar Hen Zion 						  f->mask);
216bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
217bbd00f7eSHadar Hen Zion 			 be32_to_cpu(mask->keyid));
218bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
219bbd00f7eSHadar Hen Zion 			 be32_to_cpu(key->keyid));
220bbd00f7eSHadar Hen Zion 	}
221bbd00f7eSHadar Hen Zion }
222bbd00f7eSHadar Hen Zion 
223bbd00f7eSHadar Hen Zion static int parse_tunnel_attr(struct mlx5e_priv *priv,
224bbd00f7eSHadar Hen Zion 			     struct mlx5_flow_spec *spec,
225bbd00f7eSHadar Hen Zion 			     struct tc_cls_flower_offload *f)
226bbd00f7eSHadar Hen Zion {
227bbd00f7eSHadar Hen Zion 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
228bbd00f7eSHadar Hen Zion 				       outer_headers);
229bbd00f7eSHadar Hen Zion 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
230bbd00f7eSHadar Hen Zion 				       outer_headers);
231bbd00f7eSHadar Hen Zion 
2322e72eb43SOr Gerlitz 	struct flow_dissector_key_control *enc_control =
2332e72eb43SOr Gerlitz 		skb_flow_dissector_target(f->dissector,
2342e72eb43SOr Gerlitz 					  FLOW_DISSECTOR_KEY_ENC_CONTROL,
2352e72eb43SOr Gerlitz 					  f->key);
2362e72eb43SOr Gerlitz 
237bbd00f7eSHadar Hen Zion 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
238bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ports *key =
239bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
240bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
241bbd00f7eSHadar Hen Zion 						  f->key);
242bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ports *mask =
243bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
244bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
245bbd00f7eSHadar Hen Zion 						  f->mask);
246bbd00f7eSHadar Hen Zion 
247bbd00f7eSHadar Hen Zion 		/* Full udp dst port must be given */
248bbd00f7eSHadar Hen Zion 		if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2492fcd82e9SOr Gerlitz 			goto vxlan_match_offload_err;
250bbd00f7eSHadar Hen Zion 
251bbd00f7eSHadar Hen Zion 		if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
252bbd00f7eSHadar Hen Zion 		    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
253bbd00f7eSHadar Hen Zion 			parse_vxlan_attr(spec, f);
2542fcd82e9SOr Gerlitz 		else {
2552fcd82e9SOr Gerlitz 			netdev_warn(priv->netdev,
2562fcd82e9SOr Gerlitz 				    "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
257bbd00f7eSHadar Hen Zion 			return -EOPNOTSUPP;
2582fcd82e9SOr Gerlitz 		}
259bbd00f7eSHadar Hen Zion 
260bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
261bbd00f7eSHadar Hen Zion 			 udp_dport, ntohs(mask->dst));
262bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
263bbd00f7eSHadar Hen Zion 			 udp_dport, ntohs(key->dst));
264bbd00f7eSHadar Hen Zion 
265cd377663SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
266cd377663SOr Gerlitz 			 udp_sport, ntohs(mask->src));
267cd377663SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268cd377663SOr Gerlitz 			 udp_sport, ntohs(key->src));
269bbd00f7eSHadar Hen Zion 	} else { /* udp dst port must be given */
2702fcd82e9SOr Gerlitz vxlan_match_offload_err:
2712fcd82e9SOr Gerlitz 		netdev_warn(priv->netdev,
2722fcd82e9SOr Gerlitz 			    "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
273bbd00f7eSHadar Hen Zion 		return -EOPNOTSUPP;
274bbd00f7eSHadar Hen Zion 	}
275bbd00f7eSHadar Hen Zion 
2762e72eb43SOr Gerlitz 	if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
277bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ipv4_addrs *key =
278bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
279bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
280bbd00f7eSHadar Hen Zion 						  f->key);
281bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ipv4_addrs *mask =
282bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
283bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
284bbd00f7eSHadar Hen Zion 						  f->mask);
285bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
286bbd00f7eSHadar Hen Zion 			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
287bbd00f7eSHadar Hen Zion 			 ntohl(mask->src));
288bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
289bbd00f7eSHadar Hen Zion 			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
290bbd00f7eSHadar Hen Zion 			 ntohl(key->src));
291bbd00f7eSHadar Hen Zion 
292bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
293bbd00f7eSHadar Hen Zion 			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
294bbd00f7eSHadar Hen Zion 			 ntohl(mask->dst));
295bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
296bbd00f7eSHadar Hen Zion 			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
297bbd00f7eSHadar Hen Zion 			 ntohl(key->dst));
298bbd00f7eSHadar Hen Zion 
299bbd00f7eSHadar Hen Zion 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
300bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
30119f44401SOr Gerlitz 	} else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
30219f44401SOr Gerlitz 		struct flow_dissector_key_ipv6_addrs *key =
30319f44401SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
30419f44401SOr Gerlitz 						  FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
30519f44401SOr Gerlitz 						  f->key);
30619f44401SOr Gerlitz 		struct flow_dissector_key_ipv6_addrs *mask =
30719f44401SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
30819f44401SOr Gerlitz 						  FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
30919f44401SOr Gerlitz 						  f->mask);
31019f44401SOr Gerlitz 
31119f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
31219f44401SOr Gerlitz 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
31319f44401SOr Gerlitz 		       &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
31419f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
31519f44401SOr Gerlitz 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
31619f44401SOr Gerlitz 		       &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
31719f44401SOr Gerlitz 
31819f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
31919f44401SOr Gerlitz 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
32019f44401SOr Gerlitz 		       &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
32119f44401SOr Gerlitz 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
32219f44401SOr Gerlitz 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
32319f44401SOr Gerlitz 		       &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
32419f44401SOr Gerlitz 
32519f44401SOr Gerlitz 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
32619f44401SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
3272e72eb43SOr Gerlitz 	}
328bbd00f7eSHadar Hen Zion 
329bbd00f7eSHadar Hen Zion 	/* Enforce DMAC when offloading incoming tunneled flows.
330bbd00f7eSHadar Hen Zion 	 * Flow counters require a match on the DMAC.
331bbd00f7eSHadar Hen Zion 	 */
332bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
333bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
334bbd00f7eSHadar Hen Zion 	ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
335bbd00f7eSHadar Hen Zion 				     dmac_47_16), priv->netdev->dev_addr);
336bbd00f7eSHadar Hen Zion 
337bbd00f7eSHadar Hen Zion 	/* let software handle IP fragments */
338bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
339bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
340bbd00f7eSHadar Hen Zion 
341bbd00f7eSHadar Hen Zion 	return 0;
342bbd00f7eSHadar Hen Zion }
343bbd00f7eSHadar Hen Zion 
344de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv,
345de0af0bfSRoi Dayan 			      struct mlx5_flow_spec *spec,
346de0af0bfSRoi Dayan 			      struct tc_cls_flower_offload *f,
347de0af0bfSRoi Dayan 			      u8 *min_inline)
348e3a2b7edSAmir Vadai {
349c5bb1730SMaor Gottlieb 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
350c5bb1730SMaor Gottlieb 				       outer_headers);
351c5bb1730SMaor Gottlieb 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
352c5bb1730SMaor Gottlieb 				       outer_headers);
353e3a2b7edSAmir Vadai 	u16 addr_type = 0;
354e3a2b7edSAmir Vadai 	u8 ip_proto = 0;
355e3a2b7edSAmir Vadai 
356de0af0bfSRoi Dayan 	*min_inline = MLX5_INLINE_MODE_L2;
357de0af0bfSRoi Dayan 
358e3a2b7edSAmir Vadai 	if (f->dissector->used_keys &
359e3a2b7edSAmir Vadai 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
360e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
361e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
362095b6cfdSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
363e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
364e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
365bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
366bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
367bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
368bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
369bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
370bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
371e3a2b7edSAmir Vadai 		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
372e3a2b7edSAmir Vadai 			    f->dissector->used_keys);
373e3a2b7edSAmir Vadai 		return -EOPNOTSUPP;
374e3a2b7edSAmir Vadai 	}
375e3a2b7edSAmir Vadai 
376bbd00f7eSHadar Hen Zion 	if ((dissector_uses_key(f->dissector,
377bbd00f7eSHadar Hen Zion 				FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
378bbd00f7eSHadar Hen Zion 	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
379bbd00f7eSHadar Hen Zion 	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
380bbd00f7eSHadar Hen Zion 	    dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
381bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_control *key =
382bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
383bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
384bbd00f7eSHadar Hen Zion 						  f->key);
385bbd00f7eSHadar Hen Zion 		switch (key->addr_type) {
386bbd00f7eSHadar Hen Zion 		case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
38719f44401SOr Gerlitz 		case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
388bbd00f7eSHadar Hen Zion 			if (parse_tunnel_attr(priv, spec, f))
389bbd00f7eSHadar Hen Zion 				return -EOPNOTSUPP;
390bbd00f7eSHadar Hen Zion 			break;
391bbd00f7eSHadar Hen Zion 		default:
392bbd00f7eSHadar Hen Zion 			return -EOPNOTSUPP;
393bbd00f7eSHadar Hen Zion 		}
394bbd00f7eSHadar Hen Zion 
395bbd00f7eSHadar Hen Zion 		/* In decap flow, header pointers should point to the inner
396bbd00f7eSHadar Hen Zion 		 * headers, outer header were already set by parse_tunnel_attr
397bbd00f7eSHadar Hen Zion 		 */
398bbd00f7eSHadar Hen Zion 		headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
399bbd00f7eSHadar Hen Zion 					 inner_headers);
400bbd00f7eSHadar Hen Zion 		headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
401bbd00f7eSHadar Hen Zion 					 inner_headers);
402bbd00f7eSHadar Hen Zion 	}
403bbd00f7eSHadar Hen Zion 
404e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
405e3a2b7edSAmir Vadai 		struct flow_dissector_key_control *key =
406e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
4071dbd0d37SHadar Hen Zion 						  FLOW_DISSECTOR_KEY_CONTROL,
408e3a2b7edSAmir Vadai 						  f->key);
4093f7d0eb4SOr Gerlitz 
4103f7d0eb4SOr Gerlitz 		struct flow_dissector_key_control *mask =
4113f7d0eb4SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
4123f7d0eb4SOr Gerlitz 						  FLOW_DISSECTOR_KEY_CONTROL,
4133f7d0eb4SOr Gerlitz 						  f->mask);
414e3a2b7edSAmir Vadai 		addr_type = key->addr_type;
4153f7d0eb4SOr Gerlitz 
4163f7d0eb4SOr Gerlitz 		if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
4173f7d0eb4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
4183f7d0eb4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
4193f7d0eb4SOr Gerlitz 				 key->flags & FLOW_DIS_IS_FRAGMENT);
4200827444dSOr Gerlitz 
4210827444dSOr Gerlitz 			/* the HW doesn't need L3 inline to match on frag=no */
4220827444dSOr Gerlitz 			if (key->flags & FLOW_DIS_IS_FRAGMENT)
4230827444dSOr Gerlitz 				*min_inline = MLX5_INLINE_MODE_IP;
4243f7d0eb4SOr Gerlitz 		}
425e3a2b7edSAmir Vadai 	}
426e3a2b7edSAmir Vadai 
427e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
428e3a2b7edSAmir Vadai 		struct flow_dissector_key_basic *key =
429e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
430e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_BASIC,
431e3a2b7edSAmir Vadai 						  f->key);
432e3a2b7edSAmir Vadai 		struct flow_dissector_key_basic *mask =
433e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
434e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_BASIC,
435e3a2b7edSAmir Vadai 						  f->mask);
436e3a2b7edSAmir Vadai 		ip_proto = key->ip_proto;
437e3a2b7edSAmir Vadai 
438e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
439e3a2b7edSAmir Vadai 			 ntohs(mask->n_proto));
440e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
441e3a2b7edSAmir Vadai 			 ntohs(key->n_proto));
442e3a2b7edSAmir Vadai 
443e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
444e3a2b7edSAmir Vadai 			 mask->ip_proto);
445e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
446e3a2b7edSAmir Vadai 			 key->ip_proto);
447de0af0bfSRoi Dayan 
448de0af0bfSRoi Dayan 		if (mask->ip_proto)
449de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_IP;
450e3a2b7edSAmir Vadai 	}
451e3a2b7edSAmir Vadai 
452e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
453e3a2b7edSAmir Vadai 		struct flow_dissector_key_eth_addrs *key =
454e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
455e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
456e3a2b7edSAmir Vadai 						  f->key);
457e3a2b7edSAmir Vadai 		struct flow_dissector_key_eth_addrs *mask =
458e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
459e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
460e3a2b7edSAmir Vadai 						  f->mask);
461e3a2b7edSAmir Vadai 
462e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
463e3a2b7edSAmir Vadai 					     dmac_47_16),
464e3a2b7edSAmir Vadai 				mask->dst);
465e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
466e3a2b7edSAmir Vadai 					     dmac_47_16),
467e3a2b7edSAmir Vadai 				key->dst);
468e3a2b7edSAmir Vadai 
469e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
470e3a2b7edSAmir Vadai 					     smac_47_16),
471e3a2b7edSAmir Vadai 				mask->src);
472e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
473e3a2b7edSAmir Vadai 					     smac_47_16),
474e3a2b7edSAmir Vadai 				key->src);
475e3a2b7edSAmir Vadai 	}
476e3a2b7edSAmir Vadai 
477095b6cfdSOr Gerlitz 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
478095b6cfdSOr Gerlitz 		struct flow_dissector_key_vlan *key =
479095b6cfdSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
480095b6cfdSOr Gerlitz 						  FLOW_DISSECTOR_KEY_VLAN,
481095b6cfdSOr Gerlitz 						  f->key);
482095b6cfdSOr Gerlitz 		struct flow_dissector_key_vlan *mask =
483095b6cfdSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
484095b6cfdSOr Gerlitz 						  FLOW_DISSECTOR_KEY_VLAN,
485095b6cfdSOr Gerlitz 						  f->mask);
486358d79a4SOr Gerlitz 		if (mask->vlan_id || mask->vlan_priority) {
48710543365SMohamad Haj Yahia 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
48810543365SMohamad Haj Yahia 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
489095b6cfdSOr Gerlitz 
490095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
491095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
492358d79a4SOr Gerlitz 
493358d79a4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
494358d79a4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
495095b6cfdSOr Gerlitz 		}
496095b6cfdSOr Gerlitz 	}
497095b6cfdSOr Gerlitz 
498e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
499e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv4_addrs *key =
500e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
501e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
502e3a2b7edSAmir Vadai 						  f->key);
503e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv4_addrs *mask =
504e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
505e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
506e3a2b7edSAmir Vadai 						  f->mask);
507e3a2b7edSAmir Vadai 
508e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
509e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
510e3a2b7edSAmir Vadai 		       &mask->src, sizeof(mask->src));
511e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
512e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
513e3a2b7edSAmir Vadai 		       &key->src, sizeof(key->src));
514e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
515e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
516e3a2b7edSAmir Vadai 		       &mask->dst, sizeof(mask->dst));
517e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
518e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
519e3a2b7edSAmir Vadai 		       &key->dst, sizeof(key->dst));
520de0af0bfSRoi Dayan 
521de0af0bfSRoi Dayan 		if (mask->src || mask->dst)
522de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_IP;
523e3a2b7edSAmir Vadai 	}
524e3a2b7edSAmir Vadai 
525e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
526e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv6_addrs *key =
527e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
528e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
529e3a2b7edSAmir Vadai 						  f->key);
530e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv6_addrs *mask =
531e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
532e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
533e3a2b7edSAmir Vadai 						  f->mask);
534e3a2b7edSAmir Vadai 
535e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
536e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
537e3a2b7edSAmir Vadai 		       &mask->src, sizeof(mask->src));
538e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
539e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
540e3a2b7edSAmir Vadai 		       &key->src, sizeof(key->src));
541e3a2b7edSAmir Vadai 
542e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
543e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
544e3a2b7edSAmir Vadai 		       &mask->dst, sizeof(mask->dst));
545e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
546e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
547e3a2b7edSAmir Vadai 		       &key->dst, sizeof(key->dst));
548de0af0bfSRoi Dayan 
549de0af0bfSRoi Dayan 		if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
550de0af0bfSRoi Dayan 		    ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
551de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_IP;
552e3a2b7edSAmir Vadai 	}
553e3a2b7edSAmir Vadai 
554e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
555e3a2b7edSAmir Vadai 		struct flow_dissector_key_ports *key =
556e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
557e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_PORTS,
558e3a2b7edSAmir Vadai 						  f->key);
559e3a2b7edSAmir Vadai 		struct flow_dissector_key_ports *mask =
560e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
561e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_PORTS,
562e3a2b7edSAmir Vadai 						  f->mask);
563e3a2b7edSAmir Vadai 		switch (ip_proto) {
564e3a2b7edSAmir Vadai 		case IPPROTO_TCP:
565e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
566e3a2b7edSAmir Vadai 				 tcp_sport, ntohs(mask->src));
567e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
568e3a2b7edSAmir Vadai 				 tcp_sport, ntohs(key->src));
569e3a2b7edSAmir Vadai 
570e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
571e3a2b7edSAmir Vadai 				 tcp_dport, ntohs(mask->dst));
572e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
573e3a2b7edSAmir Vadai 				 tcp_dport, ntohs(key->dst));
574e3a2b7edSAmir Vadai 			break;
575e3a2b7edSAmir Vadai 
576e3a2b7edSAmir Vadai 		case IPPROTO_UDP:
577e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
578e3a2b7edSAmir Vadai 				 udp_sport, ntohs(mask->src));
579e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
580e3a2b7edSAmir Vadai 				 udp_sport, ntohs(key->src));
581e3a2b7edSAmir Vadai 
582e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
583e3a2b7edSAmir Vadai 				 udp_dport, ntohs(mask->dst));
584e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
585e3a2b7edSAmir Vadai 				 udp_dport, ntohs(key->dst));
586e3a2b7edSAmir Vadai 			break;
587e3a2b7edSAmir Vadai 		default:
588e3a2b7edSAmir Vadai 			netdev_err(priv->netdev,
589e3a2b7edSAmir Vadai 				   "Only UDP and TCP transport are supported\n");
590e3a2b7edSAmir Vadai 			return -EINVAL;
591e3a2b7edSAmir Vadai 		}
592de0af0bfSRoi Dayan 
593de0af0bfSRoi Dayan 		if (mask->src || mask->dst)
594de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_TCP_UDP;
595e3a2b7edSAmir Vadai 	}
596e3a2b7edSAmir Vadai 
597e3a2b7edSAmir Vadai 	return 0;
598e3a2b7edSAmir Vadai }
599e3a2b7edSAmir Vadai 
600de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv,
601de0af0bfSRoi Dayan 			    struct mlx5_flow_spec *spec,
602de0af0bfSRoi Dayan 			    struct tc_cls_flower_offload *f)
603de0af0bfSRoi Dayan {
604de0af0bfSRoi Dayan 	struct mlx5_core_dev *dev = priv->mdev;
605de0af0bfSRoi Dayan 	struct mlx5_eswitch *esw = dev->priv.eswitch;
606de0af0bfSRoi Dayan 	struct mlx5_eswitch_rep *rep = priv->ppriv;
607de0af0bfSRoi Dayan 	u8 min_inline;
608de0af0bfSRoi Dayan 	int err;
609de0af0bfSRoi Dayan 
610de0af0bfSRoi Dayan 	err = __parse_cls_flower(priv, spec, f, &min_inline);
611de0af0bfSRoi Dayan 
612de0af0bfSRoi Dayan 	if (!err && esw->mode == SRIOV_OFFLOADS &&
613de0af0bfSRoi Dayan 	    rep->vport != FDB_UPLINK_VPORT) {
614de0af0bfSRoi Dayan 		if (min_inline > esw->offloads.inline_mode) {
615de0af0bfSRoi Dayan 			netdev_warn(priv->netdev,
616de0af0bfSRoi Dayan 				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
617de0af0bfSRoi Dayan 				    min_inline, esw->offloads.inline_mode);
618de0af0bfSRoi Dayan 			return -EOPNOTSUPP;
619de0af0bfSRoi Dayan 		}
620de0af0bfSRoi Dayan 	}
621de0af0bfSRoi Dayan 
622de0af0bfSRoi Dayan 	return err;
623de0af0bfSRoi Dayan }
624de0af0bfSRoi Dayan 
6255c40348cSOr Gerlitz static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
626e3a2b7edSAmir Vadai 				u32 *action, u32 *flow_tag)
627e3a2b7edSAmir Vadai {
628e3a2b7edSAmir Vadai 	const struct tc_action *a;
62922dc13c8SWANG Cong 	LIST_HEAD(actions);
630e3a2b7edSAmir Vadai 
631e3a2b7edSAmir Vadai 	if (tc_no_actions(exts))
632e3a2b7edSAmir Vadai 		return -EINVAL;
633e3a2b7edSAmir Vadai 
634e3a2b7edSAmir Vadai 	*flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
635e3a2b7edSAmir Vadai 	*action = 0;
636e3a2b7edSAmir Vadai 
63722dc13c8SWANG Cong 	tcf_exts_to_list(exts, &actions);
63822dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list) {
639e3a2b7edSAmir Vadai 		/* Only support a single action per rule */
640e3a2b7edSAmir Vadai 		if (*action)
641e3a2b7edSAmir Vadai 			return -EINVAL;
642e3a2b7edSAmir Vadai 
643e3a2b7edSAmir Vadai 		if (is_tcf_gact_shot(a)) {
644e3a2b7edSAmir Vadai 			*action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
645aad7e08dSAmir Vadai 			if (MLX5_CAP_FLOWTABLE(priv->mdev,
646aad7e08dSAmir Vadai 					       flow_table_properties_nic_receive.flow_counter))
647aad7e08dSAmir Vadai 				*action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
648e3a2b7edSAmir Vadai 			continue;
649e3a2b7edSAmir Vadai 		}
650e3a2b7edSAmir Vadai 
651e3a2b7edSAmir Vadai 		if (is_tcf_skbedit_mark(a)) {
652e3a2b7edSAmir Vadai 			u32 mark = tcf_skbedit_mark(a);
653e3a2b7edSAmir Vadai 
654e3a2b7edSAmir Vadai 			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
655e3a2b7edSAmir Vadai 				netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
656e3a2b7edSAmir Vadai 					    mark);
657e3a2b7edSAmir Vadai 				return -EINVAL;
658e3a2b7edSAmir Vadai 			}
659e3a2b7edSAmir Vadai 
660e3a2b7edSAmir Vadai 			*flow_tag = mark;
661e3a2b7edSAmir Vadai 			*action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
662e3a2b7edSAmir Vadai 			continue;
663e3a2b7edSAmir Vadai 		}
664e3a2b7edSAmir Vadai 
665e3a2b7edSAmir Vadai 		return -EINVAL;
666e3a2b7edSAmir Vadai 	}
667e3a2b7edSAmir Vadai 
668e3a2b7edSAmir Vadai 	return 0;
669e3a2b7edSAmir Vadai }
670e3a2b7edSAmir Vadai 
671a54e20b4SHadar Hen Zion static inline int cmp_encap_info(struct mlx5_encap_info *a,
672a54e20b4SHadar Hen Zion 				 struct mlx5_encap_info *b)
673a54e20b4SHadar Hen Zion {
674a54e20b4SHadar Hen Zion 	return memcmp(a, b, sizeof(*a));
675a54e20b4SHadar Hen Zion }
676a54e20b4SHadar Hen Zion 
677a54e20b4SHadar Hen Zion static inline int hash_encap_info(struct mlx5_encap_info *info)
678a54e20b4SHadar Hen Zion {
679a54e20b4SHadar Hen Zion 	return jhash(info, sizeof(*info), 0);
680a54e20b4SHadar Hen Zion }
681a54e20b4SHadar Hen Zion 
682a54e20b4SHadar Hen Zion static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
683a54e20b4SHadar Hen Zion 				   struct net_device *mirred_dev,
684a54e20b4SHadar Hen Zion 				   struct net_device **out_dev,
685a54e20b4SHadar Hen Zion 				   struct flowi4 *fl4,
686a54e20b4SHadar Hen Zion 				   struct neighbour **out_n,
687a54e20b4SHadar Hen Zion 				   __be32 *saddr,
688a54e20b4SHadar Hen Zion 				   int *out_ttl)
689a54e20b4SHadar Hen Zion {
690a54e20b4SHadar Hen Zion 	struct rtable *rt;
691a54e20b4SHadar Hen Zion 	struct neighbour *n = NULL;
692a54e20b4SHadar Hen Zion 	int ttl;
693a54e20b4SHadar Hen Zion 
694a54e20b4SHadar Hen Zion #if IS_ENABLED(CONFIG_INET)
695abeffce9SArnd Bergmann 	int ret;
696abeffce9SArnd Bergmann 
697a54e20b4SHadar Hen Zion 	rt = ip_route_output_key(dev_net(mirred_dev), fl4);
698abeffce9SArnd Bergmann 	ret = PTR_ERR_OR_ZERO(rt);
699abeffce9SArnd Bergmann 	if (ret)
700abeffce9SArnd Bergmann 		return ret;
701a54e20b4SHadar Hen Zion #else
702a54e20b4SHadar Hen Zion 	return -EOPNOTSUPP;
703a54e20b4SHadar Hen Zion #endif
704a54e20b4SHadar Hen Zion 
705a54e20b4SHadar Hen Zion 	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
706a42485ebSOr Gerlitz 		pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
707a54e20b4SHadar Hen Zion 		ip_rt_put(rt);
708a54e20b4SHadar Hen Zion 		return -EOPNOTSUPP;
709a54e20b4SHadar Hen Zion 	}
710a54e20b4SHadar Hen Zion 
711a54e20b4SHadar Hen Zion 	ttl = ip4_dst_hoplimit(&rt->dst);
712a54e20b4SHadar Hen Zion 	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
713a54e20b4SHadar Hen Zion 	ip_rt_put(rt);
714a54e20b4SHadar Hen Zion 	if (!n)
715a54e20b4SHadar Hen Zion 		return -ENOMEM;
716a54e20b4SHadar Hen Zion 
717a54e20b4SHadar Hen Zion 	*out_n = n;
718a54e20b4SHadar Hen Zion 	*saddr = fl4->saddr;
719a54e20b4SHadar Hen Zion 	*out_ttl = ttl;
720a54e20b4SHadar Hen Zion 	*out_dev = rt->dst.dev;
721a54e20b4SHadar Hen Zion 
722a54e20b4SHadar Hen Zion 	return 0;
723a54e20b4SHadar Hen Zion }
724a54e20b4SHadar Hen Zion 
725a54e20b4SHadar Hen Zion static int gen_vxlan_header_ipv4(struct net_device *out_dev,
726a54e20b4SHadar Hen Zion 				 char buf[],
727a54e20b4SHadar Hen Zion 				 unsigned char h_dest[ETH_ALEN],
728a54e20b4SHadar Hen Zion 				 int ttl,
729a54e20b4SHadar Hen Zion 				 __be32 daddr,
730a54e20b4SHadar Hen Zion 				 __be32 saddr,
731a54e20b4SHadar Hen Zion 				 __be16 udp_dst_port,
732a54e20b4SHadar Hen Zion 				 __be32 vx_vni)
733a54e20b4SHadar Hen Zion {
734a54e20b4SHadar Hen Zion 	int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
735a54e20b4SHadar Hen Zion 	struct ethhdr *eth = (struct ethhdr *)buf;
736a54e20b4SHadar Hen Zion 	struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
737a54e20b4SHadar Hen Zion 	struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
738a54e20b4SHadar Hen Zion 	struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
739a54e20b4SHadar Hen Zion 
740a54e20b4SHadar Hen Zion 	memset(buf, 0, encap_size);
741a54e20b4SHadar Hen Zion 
742a54e20b4SHadar Hen Zion 	ether_addr_copy(eth->h_dest, h_dest);
743a54e20b4SHadar Hen Zion 	ether_addr_copy(eth->h_source, out_dev->dev_addr);
744a54e20b4SHadar Hen Zion 	eth->h_proto = htons(ETH_P_IP);
745a54e20b4SHadar Hen Zion 
746a54e20b4SHadar Hen Zion 	ip->daddr = daddr;
747a54e20b4SHadar Hen Zion 	ip->saddr = saddr;
748a54e20b4SHadar Hen Zion 
749a54e20b4SHadar Hen Zion 	ip->ttl = ttl;
750a54e20b4SHadar Hen Zion 	ip->protocol = IPPROTO_UDP;
751a54e20b4SHadar Hen Zion 	ip->version = 0x4;
752a54e20b4SHadar Hen Zion 	ip->ihl = 0x5;
753a54e20b4SHadar Hen Zion 
754a54e20b4SHadar Hen Zion 	udp->dest = udp_dst_port;
755a54e20b4SHadar Hen Zion 	vxh->vx_flags = VXLAN_HF_VNI;
756a54e20b4SHadar Hen Zion 	vxh->vx_vni = vxlan_vni_field(vx_vni);
757a54e20b4SHadar Hen Zion 
758a54e20b4SHadar Hen Zion 	return encap_size;
759a54e20b4SHadar Hen Zion }
760a54e20b4SHadar Hen Zion 
761a54e20b4SHadar Hen Zion static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
762a54e20b4SHadar Hen Zion 					  struct net_device *mirred_dev,
763a54e20b4SHadar Hen Zion 					  struct mlx5_encap_entry *e,
764a54e20b4SHadar Hen Zion 					  struct net_device **out_dev)
765a54e20b4SHadar Hen Zion {
766a54e20b4SHadar Hen Zion 	int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
767a42485ebSOr Gerlitz 	struct neighbour *n = NULL;
768a54e20b4SHadar Hen Zion 	struct flowi4 fl4 = {};
769a54e20b4SHadar Hen Zion 	char *encap_header;
770a54e20b4SHadar Hen Zion 	int encap_size;
771abeffce9SArnd Bergmann 	__be32 saddr;
772abeffce9SArnd Bergmann 	int ttl;
773a54e20b4SHadar Hen Zion 	int err;
774a54e20b4SHadar Hen Zion 
775a54e20b4SHadar Hen Zion 	encap_header = kzalloc(max_encap_size, GFP_KERNEL);
776a54e20b4SHadar Hen Zion 	if (!encap_header)
777a54e20b4SHadar Hen Zion 		return -ENOMEM;
778a54e20b4SHadar Hen Zion 
779a54e20b4SHadar Hen Zion 	switch (e->tunnel_type) {
780a54e20b4SHadar Hen Zion 	case MLX5_HEADER_TYPE_VXLAN:
781a54e20b4SHadar Hen Zion 		fl4.flowi4_proto = IPPROTO_UDP;
782a54e20b4SHadar Hen Zion 		fl4.fl4_dport = e->tun_info.tp_dst;
783a54e20b4SHadar Hen Zion 		break;
784a54e20b4SHadar Hen Zion 	default:
785a54e20b4SHadar Hen Zion 		err = -EOPNOTSUPP;
786a54e20b4SHadar Hen Zion 		goto out;
787a54e20b4SHadar Hen Zion 	}
788a54e20b4SHadar Hen Zion 	fl4.daddr = e->tun_info.daddr;
789a54e20b4SHadar Hen Zion 
790a54e20b4SHadar Hen Zion 	err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
791a54e20b4SHadar Hen Zion 				      &fl4, &n, &saddr, &ttl);
792a54e20b4SHadar Hen Zion 	if (err)
793a54e20b4SHadar Hen Zion 		goto out;
794a54e20b4SHadar Hen Zion 
795a54e20b4SHadar Hen Zion 	e->n = n;
796a54e20b4SHadar Hen Zion 	e->out_dev = *out_dev;
797a54e20b4SHadar Hen Zion 
798a54e20b4SHadar Hen Zion 	if (!(n->nud_state & NUD_VALID)) {
799a42485ebSOr Gerlitz 		pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
800a42485ebSOr Gerlitz 		err = -EOPNOTSUPP;
801a54e20b4SHadar Hen Zion 		goto out;
802a54e20b4SHadar Hen Zion 	}
803a54e20b4SHadar Hen Zion 
804a54e20b4SHadar Hen Zion 	neigh_ha_snapshot(e->h_dest, n, *out_dev);
805a54e20b4SHadar Hen Zion 
806a54e20b4SHadar Hen Zion 	switch (e->tunnel_type) {
807a54e20b4SHadar Hen Zion 	case MLX5_HEADER_TYPE_VXLAN:
808a54e20b4SHadar Hen Zion 		encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
809a54e20b4SHadar Hen Zion 						   e->h_dest, ttl,
810a54e20b4SHadar Hen Zion 						   e->tun_info.daddr,
811a54e20b4SHadar Hen Zion 						   saddr, e->tun_info.tp_dst,
812a54e20b4SHadar Hen Zion 						   e->tun_info.tun_id);
813a54e20b4SHadar Hen Zion 		break;
814a54e20b4SHadar Hen Zion 	default:
815a54e20b4SHadar Hen Zion 		err = -EOPNOTSUPP;
816a54e20b4SHadar Hen Zion 		goto out;
817a54e20b4SHadar Hen Zion 	}
818a54e20b4SHadar Hen Zion 
819a54e20b4SHadar Hen Zion 	err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
820a54e20b4SHadar Hen Zion 			       encap_size, encap_header, &e->encap_id);
821a54e20b4SHadar Hen Zion out:
822a42485ebSOr Gerlitz 	if (err && n)
823a42485ebSOr Gerlitz 		neigh_release(n);
824a54e20b4SHadar Hen Zion 	kfree(encap_header);
825a54e20b4SHadar Hen Zion 	return err;
826a54e20b4SHadar Hen Zion }
827a54e20b4SHadar Hen Zion 
828a54e20b4SHadar Hen Zion static int mlx5e_attach_encap(struct mlx5e_priv *priv,
829a54e20b4SHadar Hen Zion 			      struct ip_tunnel_info *tun_info,
830a54e20b4SHadar Hen Zion 			      struct net_device *mirred_dev,
831776b12b6SOr Gerlitz 			      struct mlx5_esw_flow_attr *attr)
83203a9d11eSOr Gerlitz {
833a54e20b4SHadar Hen Zion 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
834a54e20b4SHadar Hen Zion 	unsigned short family = ip_tunnel_info_af(tun_info);
835a54e20b4SHadar Hen Zion 	struct ip_tunnel_key *key = &tun_info->key;
836a54e20b4SHadar Hen Zion 	struct mlx5_encap_info info;
837a54e20b4SHadar Hen Zion 	struct mlx5_encap_entry *e;
838a54e20b4SHadar Hen Zion 	struct net_device *out_dev;
839a54e20b4SHadar Hen Zion 	uintptr_t hash_key;
840a54e20b4SHadar Hen Zion 	bool found = false;
841a54e20b4SHadar Hen Zion 	int tunnel_type;
842a54e20b4SHadar Hen Zion 	int err;
843a54e20b4SHadar Hen Zion 
8442fcd82e9SOr Gerlitz 	/* udp dst port must be set */
845a54e20b4SHadar Hen Zion 	if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
8462fcd82e9SOr Gerlitz 		goto vxlan_encap_offload_err;
847a54e20b4SHadar Hen Zion 
848cd377663SOr Gerlitz 	/* setting udp src port isn't supported */
8492fcd82e9SOr Gerlitz 	if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
8502fcd82e9SOr Gerlitz vxlan_encap_offload_err:
8512fcd82e9SOr Gerlitz 		netdev_warn(priv->netdev,
8522fcd82e9SOr Gerlitz 			    "must set udp dst port and not set udp src port\n");
853cd377663SOr Gerlitz 		return -EOPNOTSUPP;
8542fcd82e9SOr Gerlitz 	}
855cd377663SOr Gerlitz 
856a54e20b4SHadar Hen Zion 	if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
857a54e20b4SHadar Hen Zion 	    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
858a54e20b4SHadar Hen Zion 		info.tp_dst = key->tp_dst;
859a54e20b4SHadar Hen Zion 		info.tun_id = tunnel_id_to_key32(key->tun_id);
860a54e20b4SHadar Hen Zion 		tunnel_type = MLX5_HEADER_TYPE_VXLAN;
861a54e20b4SHadar Hen Zion 	} else {
8622fcd82e9SOr Gerlitz 		netdev_warn(priv->netdev,
8632fcd82e9SOr Gerlitz 			    "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
864a54e20b4SHadar Hen Zion 		return -EOPNOTSUPP;
865a54e20b4SHadar Hen Zion 	}
866a54e20b4SHadar Hen Zion 
867a54e20b4SHadar Hen Zion 	switch (family) {
868a54e20b4SHadar Hen Zion 	case AF_INET:
869a54e20b4SHadar Hen Zion 		info.daddr = key->u.ipv4.dst;
870a54e20b4SHadar Hen Zion 		break;
8712fcd82e9SOr Gerlitz 	case AF_INET6:
8722fcd82e9SOr Gerlitz 		netdev_warn(priv->netdev,
8732fcd82e9SOr Gerlitz 			    "IPv6 tunnel encap offload isn't supported\n");
874a54e20b4SHadar Hen Zion 	default:
875a54e20b4SHadar Hen Zion 		return -EOPNOTSUPP;
876a54e20b4SHadar Hen Zion 	}
877a54e20b4SHadar Hen Zion 
878a54e20b4SHadar Hen Zion 	hash_key = hash_encap_info(&info);
879a54e20b4SHadar Hen Zion 
880a54e20b4SHadar Hen Zion 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
881a54e20b4SHadar Hen Zion 				   encap_hlist, hash_key) {
882a54e20b4SHadar Hen Zion 		if (!cmp_encap_info(&e->tun_info, &info)) {
883a54e20b4SHadar Hen Zion 			found = true;
884a54e20b4SHadar Hen Zion 			break;
885a54e20b4SHadar Hen Zion 		}
886a54e20b4SHadar Hen Zion 	}
887a54e20b4SHadar Hen Zion 
888a54e20b4SHadar Hen Zion 	if (found) {
889a54e20b4SHadar Hen Zion 		attr->encap = e;
890a54e20b4SHadar Hen Zion 		return 0;
891a54e20b4SHadar Hen Zion 	}
892a54e20b4SHadar Hen Zion 
893a54e20b4SHadar Hen Zion 	e = kzalloc(sizeof(*e), GFP_KERNEL);
894a54e20b4SHadar Hen Zion 	if (!e)
895a54e20b4SHadar Hen Zion 		return -ENOMEM;
896a54e20b4SHadar Hen Zion 
897a54e20b4SHadar Hen Zion 	e->tun_info = info;
898a54e20b4SHadar Hen Zion 	e->tunnel_type = tunnel_type;
899a54e20b4SHadar Hen Zion 	INIT_LIST_HEAD(&e->flows);
900a54e20b4SHadar Hen Zion 
901a54e20b4SHadar Hen Zion 	err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
902a54e20b4SHadar Hen Zion 	if (err)
903a54e20b4SHadar Hen Zion 		goto out_err;
904a54e20b4SHadar Hen Zion 
905a54e20b4SHadar Hen Zion 	attr->encap = e;
906a54e20b4SHadar Hen Zion 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
907a54e20b4SHadar Hen Zion 
908a54e20b4SHadar Hen Zion 	return err;
909a54e20b4SHadar Hen Zion 
910a54e20b4SHadar Hen Zion out_err:
911a54e20b4SHadar Hen Zion 	kfree(e);
912a54e20b4SHadar Hen Zion 	return err;
913a54e20b4SHadar Hen Zion }
914a54e20b4SHadar Hen Zion 
915a54e20b4SHadar Hen Zion static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
916a54e20b4SHadar Hen Zion 				struct mlx5e_tc_flow *flow)
917a54e20b4SHadar Hen Zion {
918a54e20b4SHadar Hen Zion 	struct mlx5_esw_flow_attr *attr = flow->attr;
919a54e20b4SHadar Hen Zion 	struct ip_tunnel_info *info = NULL;
92003a9d11eSOr Gerlitz 	const struct tc_action *a;
92122dc13c8SWANG Cong 	LIST_HEAD(actions);
922a54e20b4SHadar Hen Zion 	bool encap = false;
923a54e20b4SHadar Hen Zion 	int err;
92403a9d11eSOr Gerlitz 
92503a9d11eSOr Gerlitz 	if (tc_no_actions(exts))
92603a9d11eSOr Gerlitz 		return -EINVAL;
92703a9d11eSOr Gerlitz 
928776b12b6SOr Gerlitz 	memset(attr, 0, sizeof(*attr));
929776b12b6SOr Gerlitz 	attr->in_rep = priv->ppriv;
93003a9d11eSOr Gerlitz 
93122dc13c8SWANG Cong 	tcf_exts_to_list(exts, &actions);
93222dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list) {
93303a9d11eSOr Gerlitz 		if (is_tcf_gact_shot(a)) {
9348b32580dSOr Gerlitz 			attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
93503a9d11eSOr Gerlitz 					MLX5_FLOW_CONTEXT_ACTION_COUNT;
93603a9d11eSOr Gerlitz 			continue;
93703a9d11eSOr Gerlitz 		}
93803a9d11eSOr Gerlitz 
9395724b8b5SShmulik Ladkani 		if (is_tcf_mirred_egress_redirect(a)) {
94003a9d11eSOr Gerlitz 			int ifindex = tcf_mirred_ifindex(a);
94103a9d11eSOr Gerlitz 			struct net_device *out_dev;
94203a9d11eSOr Gerlitz 			struct mlx5e_priv *out_priv;
94303a9d11eSOr Gerlitz 
94403a9d11eSOr Gerlitz 			out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
94503a9d11eSOr Gerlitz 
946a54e20b4SHadar Hen Zion 			if (switchdev_port_same_parent_id(priv->netdev,
947a54e20b4SHadar Hen Zion 							  out_dev)) {
948e37a79e5SMark Bloch 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
949e37a79e5SMark Bloch 					MLX5_FLOW_CONTEXT_ACTION_COUNT;
95003a9d11eSOr Gerlitz 				out_priv = netdev_priv(out_dev);
951776b12b6SOr Gerlitz 				attr->out_rep = out_priv->ppriv;
952a54e20b4SHadar Hen Zion 			} else if (encap) {
953a54e20b4SHadar Hen Zion 				err = mlx5e_attach_encap(priv, info,
954a54e20b4SHadar Hen Zion 							 out_dev, attr);
955a54e20b4SHadar Hen Zion 				if (err)
956a54e20b4SHadar Hen Zion 					return err;
957a54e20b4SHadar Hen Zion 				list_add(&flow->encap, &attr->encap->flows);
958a54e20b4SHadar Hen Zion 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
959a54e20b4SHadar Hen Zion 					MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
960a54e20b4SHadar Hen Zion 					MLX5_FLOW_CONTEXT_ACTION_COUNT;
961a54e20b4SHadar Hen Zion 				out_priv = netdev_priv(attr->encap->out_dev);
962a54e20b4SHadar Hen Zion 				attr->out_rep = out_priv->ppriv;
963a54e20b4SHadar Hen Zion 			} else {
964a54e20b4SHadar Hen Zion 				pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
965a54e20b4SHadar Hen Zion 				       priv->netdev->name, out_dev->name);
966a54e20b4SHadar Hen Zion 				return -EINVAL;
967a54e20b4SHadar Hen Zion 			}
968a54e20b4SHadar Hen Zion 			continue;
969a54e20b4SHadar Hen Zion 		}
970a54e20b4SHadar Hen Zion 
971a54e20b4SHadar Hen Zion 		if (is_tcf_tunnel_set(a)) {
972a54e20b4SHadar Hen Zion 			info = tcf_tunnel_info(a);
973a54e20b4SHadar Hen Zion 			if (info)
974a54e20b4SHadar Hen Zion 				encap = true;
975a54e20b4SHadar Hen Zion 			else
976a54e20b4SHadar Hen Zion 				return -EOPNOTSUPP;
97703a9d11eSOr Gerlitz 			continue;
97803a9d11eSOr Gerlitz 		}
97903a9d11eSOr Gerlitz 
9808b32580dSOr Gerlitz 		if (is_tcf_vlan(a)) {
9818b32580dSOr Gerlitz 			if (tcf_vlan_action(a) == VLAN_F_POP) {
9828b32580dSOr Gerlitz 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
9838b32580dSOr Gerlitz 			} else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
9848b32580dSOr Gerlitz 				if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
9858b32580dSOr Gerlitz 					return -EOPNOTSUPP;
9868b32580dSOr Gerlitz 
9878b32580dSOr Gerlitz 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
9888b32580dSOr Gerlitz 				attr->vlan = tcf_vlan_push_vid(a);
9898b32580dSOr Gerlitz 			}
9908b32580dSOr Gerlitz 			continue;
9918b32580dSOr Gerlitz 		}
9928b32580dSOr Gerlitz 
993bbd00f7eSHadar Hen Zion 		if (is_tcf_tunnel_release(a)) {
994bbd00f7eSHadar Hen Zion 			attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
995bbd00f7eSHadar Hen Zion 			continue;
996bbd00f7eSHadar Hen Zion 		}
997bbd00f7eSHadar Hen Zion 
99803a9d11eSOr Gerlitz 		return -EINVAL;
99903a9d11eSOr Gerlitz 	}
100003a9d11eSOr Gerlitz 	return 0;
100103a9d11eSOr Gerlitz }
100203a9d11eSOr Gerlitz 
1003e3a2b7edSAmir Vadai int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1004e3a2b7edSAmir Vadai 			   struct tc_cls_flower_offload *f)
1005e3a2b7edSAmir Vadai {
1006acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1007e3a2b7edSAmir Vadai 	int err = 0;
1008776b12b6SOr Gerlitz 	bool fdb_flow = false;
1009776b12b6SOr Gerlitz 	u32 flow_tag, action;
1010e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
1011c5bb1730SMaor Gottlieb 	struct mlx5_flow_spec *spec;
1012adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1013e3a2b7edSAmir Vadai 
1014776b12b6SOr Gerlitz 	if (esw && esw->mode == SRIOV_OFFLOADS)
1015776b12b6SOr Gerlitz 		fdb_flow = true;
1016776b12b6SOr Gerlitz 
1017776b12b6SOr Gerlitz 	if (fdb_flow)
101853636068SRoi Dayan 		flow = kzalloc(sizeof(*flow) +
101953636068SRoi Dayan 			       sizeof(struct mlx5_esw_flow_attr),
1020776b12b6SOr Gerlitz 			       GFP_KERNEL);
1021e3a2b7edSAmir Vadai 	else
1022e3a2b7edSAmir Vadai 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
1023e3a2b7edSAmir Vadai 
1024c5bb1730SMaor Gottlieb 	spec = mlx5_vzalloc(sizeof(*spec));
1025c5bb1730SMaor Gottlieb 	if (!spec || !flow) {
1026e3a2b7edSAmir Vadai 		err = -ENOMEM;
1027e3a2b7edSAmir Vadai 		goto err_free;
1028e3a2b7edSAmir Vadai 	}
1029e3a2b7edSAmir Vadai 
1030e3a2b7edSAmir Vadai 	flow->cookie = f->cookie;
1031e3a2b7edSAmir Vadai 
1032c5bb1730SMaor Gottlieb 	err = parse_cls_flower(priv, spec, f);
1033e3a2b7edSAmir Vadai 	if (err < 0)
1034e3a2b7edSAmir Vadai 		goto err_free;
1035e3a2b7edSAmir Vadai 
1036776b12b6SOr Gerlitz 	if (fdb_flow) {
1037776b12b6SOr Gerlitz 		flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
1038a54e20b4SHadar Hen Zion 		err = parse_tc_fdb_actions(priv, f->exts, flow);
1039adb4c123SOr Gerlitz 		if (err < 0)
1040adb4c123SOr Gerlitz 			goto err_free;
1041776b12b6SOr Gerlitz 		flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
1042adb4c123SOr Gerlitz 	} else {
10435c40348cSOr Gerlitz 		err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1044e3a2b7edSAmir Vadai 		if (err < 0)
1045e3a2b7edSAmir Vadai 			goto err_free;
10465c40348cSOr Gerlitz 		flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1047adb4c123SOr Gerlitz 	}
1048adb4c123SOr Gerlitz 
10495c40348cSOr Gerlitz 	if (IS_ERR(flow->rule)) {
10505c40348cSOr Gerlitz 		err = PTR_ERR(flow->rule);
10515e86397aSOr Gerlitz 		goto err_del_rule;
10525c40348cSOr Gerlitz 	}
10535c40348cSOr Gerlitz 
1054e3a2b7edSAmir Vadai 	err = rhashtable_insert_fast(&tc->ht, &flow->node,
1055e3a2b7edSAmir Vadai 				     tc->ht_params);
1056e3a2b7edSAmir Vadai 	if (err)
10575c40348cSOr Gerlitz 		goto err_del_rule;
1058e3a2b7edSAmir Vadai 
1059e3a2b7edSAmir Vadai 	goto out;
1060e3a2b7edSAmir Vadai 
10615c40348cSOr Gerlitz err_del_rule:
10625e86397aSOr Gerlitz 	mlx5e_tc_del_flow(priv, flow);
1063e3a2b7edSAmir Vadai 
1064e3a2b7edSAmir Vadai err_free:
1065e3a2b7edSAmir Vadai 	kfree(flow);
1066e3a2b7edSAmir Vadai out:
1067c5bb1730SMaor Gottlieb 	kvfree(spec);
1068e3a2b7edSAmir Vadai 	return err;
1069e3a2b7edSAmir Vadai }
1070e3a2b7edSAmir Vadai 
1071e3a2b7edSAmir Vadai int mlx5e_delete_flower(struct mlx5e_priv *priv,
1072e3a2b7edSAmir Vadai 			struct tc_cls_flower_offload *f)
1073e3a2b7edSAmir Vadai {
1074e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
1075acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1076e3a2b7edSAmir Vadai 
1077e3a2b7edSAmir Vadai 	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1078e3a2b7edSAmir Vadai 				      tc->ht_params);
1079e3a2b7edSAmir Vadai 	if (!flow)
1080e3a2b7edSAmir Vadai 		return -EINVAL;
1081e3a2b7edSAmir Vadai 
1082e3a2b7edSAmir Vadai 	rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1083e3a2b7edSAmir Vadai 
1084961e8979SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
1085e3a2b7edSAmir Vadai 
1086a54e20b4SHadar Hen Zion 
1087e3a2b7edSAmir Vadai 	kfree(flow);
1088e3a2b7edSAmir Vadai 
1089e3a2b7edSAmir Vadai 	return 0;
1090e3a2b7edSAmir Vadai }
1091e3a2b7edSAmir Vadai 
1092aad7e08dSAmir Vadai int mlx5e_stats_flower(struct mlx5e_priv *priv,
1093aad7e08dSAmir Vadai 		       struct tc_cls_flower_offload *f)
1094aad7e08dSAmir Vadai {
1095aad7e08dSAmir Vadai 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1096aad7e08dSAmir Vadai 	struct mlx5e_tc_flow *flow;
1097aad7e08dSAmir Vadai 	struct tc_action *a;
1098aad7e08dSAmir Vadai 	struct mlx5_fc *counter;
109922dc13c8SWANG Cong 	LIST_HEAD(actions);
1100aad7e08dSAmir Vadai 	u64 bytes;
1101aad7e08dSAmir Vadai 	u64 packets;
1102aad7e08dSAmir Vadai 	u64 lastuse;
1103aad7e08dSAmir Vadai 
1104aad7e08dSAmir Vadai 	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1105aad7e08dSAmir Vadai 				      tc->ht_params);
1106aad7e08dSAmir Vadai 	if (!flow)
1107aad7e08dSAmir Vadai 		return -EINVAL;
1108aad7e08dSAmir Vadai 
1109aad7e08dSAmir Vadai 	counter = mlx5_flow_rule_counter(flow->rule);
1110aad7e08dSAmir Vadai 	if (!counter)
1111aad7e08dSAmir Vadai 		return 0;
1112aad7e08dSAmir Vadai 
1113aad7e08dSAmir Vadai 	mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1114aad7e08dSAmir Vadai 
111522dc13c8SWANG Cong 	tcf_exts_to_list(f->exts, &actions);
111622dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list)
1117aad7e08dSAmir Vadai 		tcf_action_stats_update(a, bytes, packets, lastuse);
1118aad7e08dSAmir Vadai 
1119aad7e08dSAmir Vadai 	return 0;
1120aad7e08dSAmir Vadai }
1121aad7e08dSAmir Vadai 
1122e8f887acSAmir Vadai static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1123e8f887acSAmir Vadai 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
1124e8f887acSAmir Vadai 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1125e8f887acSAmir Vadai 	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1126e8f887acSAmir Vadai 	.automatic_shrinking = true,
1127e8f887acSAmir Vadai };
1128e8f887acSAmir Vadai 
1129e8f887acSAmir Vadai int mlx5e_tc_init(struct mlx5e_priv *priv)
1130e8f887acSAmir Vadai {
1131acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1132e8f887acSAmir Vadai 
1133e8f887acSAmir Vadai 	tc->ht_params = mlx5e_tc_flow_ht_params;
1134e8f887acSAmir Vadai 	return rhashtable_init(&tc->ht, &tc->ht_params);
1135e8f887acSAmir Vadai }
1136e8f887acSAmir Vadai 
1137e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1138e8f887acSAmir Vadai {
1139e8f887acSAmir Vadai 	struct mlx5e_tc_flow *flow = ptr;
1140e8f887acSAmir Vadai 	struct mlx5e_priv *priv = arg;
1141e8f887acSAmir Vadai 
1142961e8979SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
1143e8f887acSAmir Vadai 	kfree(flow);
1144e8f887acSAmir Vadai }
1145e8f887acSAmir Vadai 
1146e8f887acSAmir Vadai void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1147e8f887acSAmir Vadai {
1148acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1149e8f887acSAmir Vadai 
1150e8f887acSAmir Vadai 	rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1151e8f887acSAmir Vadai 
1152acff797cSMaor Gottlieb 	if (!IS_ERR_OR_NULL(tc->t)) {
1153acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(tc->t);
1154acff797cSMaor Gottlieb 		tc->t = NULL;
1155e8f887acSAmir Vadai 	}
1156e8f887acSAmir Vadai }
1157