1e8f887acSAmir Vadai /*
2e8f887acSAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3e8f887acSAmir Vadai  *
4e8f887acSAmir Vadai  * This software is available to you under a choice of one of two
5e8f887acSAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
6e8f887acSAmir Vadai  * General Public License (GPL) Version 2, available from the file
7e8f887acSAmir Vadai  * COPYING in the main directory of this source tree, or the
8e8f887acSAmir Vadai  * OpenIB.org BSD license below:
9e8f887acSAmir Vadai  *
10e8f887acSAmir Vadai  *     Redistribution and use in source and binary forms, with or
11e8f887acSAmir Vadai  *     without modification, are permitted provided that the following
12e8f887acSAmir Vadai  *     conditions are met:
13e8f887acSAmir Vadai  *
14e8f887acSAmir Vadai  *      - Redistributions of source code must retain the above
15e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
16e8f887acSAmir Vadai  *        disclaimer.
17e8f887acSAmir Vadai  *
18e8f887acSAmir Vadai  *      - Redistributions in binary form must reproduce the above
19e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
20e8f887acSAmir Vadai  *        disclaimer in the documentation and/or other materials
21e8f887acSAmir Vadai  *        provided with the distribution.
22e8f887acSAmir Vadai  *
23e8f887acSAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e8f887acSAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e8f887acSAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e8f887acSAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e8f887acSAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e8f887acSAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e8f887acSAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e8f887acSAmir Vadai  * SOFTWARE.
31e8f887acSAmir Vadai  */
32e8f887acSAmir Vadai 
33e3a2b7edSAmir Vadai #include <net/flow_dissector.h>
343f7d0eb4SOr Gerlitz #include <net/sch_generic.h>
35e3a2b7edSAmir Vadai #include <net/pkt_cls.h>
36e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h>
3712185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h>
38e8f887acSAmir Vadai #include <linux/mlx5/fs.h>
39e8f887acSAmir Vadai #include <linux/mlx5/device.h>
40e8f887acSAmir Vadai #include <linux/rhashtable.h>
4103a9d11eSOr Gerlitz #include <net/switchdev.h>
4203a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h>
43776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h>
44bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h>
45a54e20b4SHadar Hen Zion #include <net/vxlan.h>
46e8f887acSAmir Vadai #include "en.h"
47e8f887acSAmir Vadai #include "en_tc.h"
4803a9d11eSOr Gerlitz #include "eswitch.h"
49bbd00f7eSHadar Hen Zion #include "vxlan.h"
50e8f887acSAmir Vadai 
51e8f887acSAmir Vadai struct mlx5e_tc_flow {
52e8f887acSAmir Vadai 	struct rhash_head	node;
53e8f887acSAmir Vadai 	u64			cookie;
5474491de9SMark Bloch 	struct mlx5_flow_handle *rule;
55a54e20b4SHadar Hen Zion 	struct list_head	encap; /* flows sharing the same encap */
56776b12b6SOr Gerlitz 	struct mlx5_esw_flow_attr *attr;
57e8f887acSAmir Vadai };
58e8f887acSAmir Vadai 
59a54e20b4SHadar Hen Zion enum {
60a54e20b4SHadar Hen Zion 	MLX5_HEADER_TYPE_VXLAN = 0x0,
61a54e20b4SHadar Hen Zion 	MLX5_HEADER_TYPE_NVGRE = 0x1,
62a54e20b4SHadar Hen Zion };
63a54e20b4SHadar Hen Zion 
64acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
65acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4
66e8f887acSAmir Vadai 
6774491de9SMark Bloch static struct mlx5_flow_handle *
6874491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
69c5bb1730SMaor Gottlieb 		      struct mlx5_flow_spec *spec,
70e8f887acSAmir Vadai 		      u32 action, u32 flow_tag)
71e8f887acSAmir Vadai {
72aad7e08dSAmir Vadai 	struct mlx5_core_dev *dev = priv->mdev;
73aad7e08dSAmir Vadai 	struct mlx5_flow_destination dest = { 0 };
7466958ed9SHadar Hen Zion 	struct mlx5_flow_act flow_act = {
7566958ed9SHadar Hen Zion 		.action = action,
7666958ed9SHadar Hen Zion 		.flow_tag = flow_tag,
7766958ed9SHadar Hen Zion 		.encap_id = 0,
7866958ed9SHadar Hen Zion 	};
79aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
8074491de9SMark Bloch 	struct mlx5_flow_handle *rule;
81e8f887acSAmir Vadai 	bool table_created = false;
82e8f887acSAmir Vadai 
83aad7e08dSAmir Vadai 	if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
84aad7e08dSAmir Vadai 		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
85aad7e08dSAmir Vadai 		dest.ft = priv->fs.vlan.ft.t;
8655130287SOr Gerlitz 	} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
87aad7e08dSAmir Vadai 		counter = mlx5_fc_create(dev, true);
88aad7e08dSAmir Vadai 		if (IS_ERR(counter))
89aad7e08dSAmir Vadai 			return ERR_CAST(counter);
90aad7e08dSAmir Vadai 
91aad7e08dSAmir Vadai 		dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
92aad7e08dSAmir Vadai 		dest.counter = counter;
93aad7e08dSAmir Vadai 	}
94aad7e08dSAmir Vadai 
95acff797cSMaor Gottlieb 	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
96acff797cSMaor Gottlieb 		priv->fs.tc.t =
97acff797cSMaor Gottlieb 			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
98acff797cSMaor Gottlieb 							    MLX5E_TC_PRIO,
99acff797cSMaor Gottlieb 							    MLX5E_TC_TABLE_NUM_ENTRIES,
100acff797cSMaor Gottlieb 							    MLX5E_TC_TABLE_NUM_GROUPS,
101c9f1b073SHadar Hen Zion 							    0, 0);
102acff797cSMaor Gottlieb 		if (IS_ERR(priv->fs.tc.t)) {
103e8f887acSAmir Vadai 			netdev_err(priv->netdev,
104e8f887acSAmir Vadai 				   "Failed to create tc offload table\n");
105aad7e08dSAmir Vadai 			rule = ERR_CAST(priv->fs.tc.t);
106aad7e08dSAmir Vadai 			goto err_create_ft;
107e8f887acSAmir Vadai 		}
108e8f887acSAmir Vadai 
109e8f887acSAmir Vadai 		table_created = true;
110e8f887acSAmir Vadai 	}
111e8f887acSAmir Vadai 
112c5bb1730SMaor Gottlieb 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
11366958ed9SHadar Hen Zion 	rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
114e8f887acSAmir Vadai 
115aad7e08dSAmir Vadai 	if (IS_ERR(rule))
116aad7e08dSAmir Vadai 		goto err_add_rule;
117aad7e08dSAmir Vadai 
118aad7e08dSAmir Vadai 	return rule;
119aad7e08dSAmir Vadai 
120aad7e08dSAmir Vadai err_add_rule:
121aad7e08dSAmir Vadai 	if (table_created) {
122acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(priv->fs.tc.t);
123acff797cSMaor Gottlieb 		priv->fs.tc.t = NULL;
124e8f887acSAmir Vadai 	}
125aad7e08dSAmir Vadai err_create_ft:
126aad7e08dSAmir Vadai 	mlx5_fc_destroy(dev, counter);
127e8f887acSAmir Vadai 
128e8f887acSAmir Vadai 	return rule;
129e8f887acSAmir Vadai }
130e8f887acSAmir Vadai 
13174491de9SMark Bloch static struct mlx5_flow_handle *
13274491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
133adb4c123SOr Gerlitz 		      struct mlx5_flow_spec *spec,
134776b12b6SOr Gerlitz 		      struct mlx5_esw_flow_attr *attr)
135adb4c123SOr Gerlitz {
136adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1378b32580dSOr Gerlitz 	int err;
1388b32580dSOr Gerlitz 
1398b32580dSOr Gerlitz 	err = mlx5_eswitch_add_vlan_action(esw, attr);
1408b32580dSOr Gerlitz 	if (err)
1418b32580dSOr Gerlitz 		return ERR_PTR(err);
142adb4c123SOr Gerlitz 
143776b12b6SOr Gerlitz 	return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
144adb4c123SOr Gerlitz }
145adb4c123SOr Gerlitz 
1465067b602SRoi Dayan static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1475067b602SRoi Dayan 			       struct mlx5e_tc_flow *flow) {
1485067b602SRoi Dayan 	struct list_head *next = flow->encap.next;
1495067b602SRoi Dayan 
1505067b602SRoi Dayan 	list_del(&flow->encap);
1515067b602SRoi Dayan 	if (list_empty(next)) {
1525067b602SRoi Dayan 		struct mlx5_encap_entry *e;
1535067b602SRoi Dayan 
1545067b602SRoi Dayan 		e = list_entry(next, struct mlx5_encap_entry, flows);
1555067b602SRoi Dayan 		if (e->n) {
1565067b602SRoi Dayan 			mlx5_encap_dealloc(priv->mdev, e->encap_id);
1575067b602SRoi Dayan 			neigh_release(e->n);
1585067b602SRoi Dayan 		}
1595067b602SRoi Dayan 		hlist_del_rcu(&e->encap_hlist);
1605067b602SRoi Dayan 		kfree(e);
1615067b602SRoi Dayan 	}
1625067b602SRoi Dayan }
1635067b602SRoi Dayan 
1645e86397aSOr Gerlitz /* we get here also when setting rule to the FW failed, etc. It means that the
1655e86397aSOr Gerlitz  * flow rule itself might not exist, but some offloading related to the actions
1665e86397aSOr Gerlitz  * should be cleaned.
1675e86397aSOr Gerlitz  */
168e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
169961e8979SRoi Dayan 			      struct mlx5e_tc_flow *flow)
170e8f887acSAmir Vadai {
1718b32580dSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
172aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
173aad7e08dSAmir Vadai 
1745e86397aSOr Gerlitz 	if (!IS_ERR(flow->rule)) {
175961e8979SRoi Dayan 		counter = mlx5_flow_rule_counter(flow->rule);
176961e8979SRoi Dayan 		mlx5_del_flow_rules(flow->rule);
1775e86397aSOr Gerlitz 		mlx5_fc_destroy(priv->mdev, counter);
1785e86397aSOr Gerlitz 	}
17986a33ae1SRoi Dayan 
1805067b602SRoi Dayan 	if (esw && esw->mode == SRIOV_OFFLOADS) {
181961e8979SRoi Dayan 		mlx5_eswitch_del_vlan_action(esw, flow->attr);
1825067b602SRoi Dayan 		if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
1835067b602SRoi Dayan 			mlx5e_detach_encap(priv, flow);
1845067b602SRoi Dayan 	}
1858b32580dSOr Gerlitz 
1865c40348cSOr Gerlitz 	if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
187acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(priv->fs.tc.t);
188acff797cSMaor Gottlieb 		priv->fs.tc.t = NULL;
189e8f887acSAmir Vadai 	}
190e8f887acSAmir Vadai }
191e8f887acSAmir Vadai 
192bbd00f7eSHadar Hen Zion static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
193bbd00f7eSHadar Hen Zion 			     struct tc_cls_flower_offload *f)
194bbd00f7eSHadar Hen Zion {
195bbd00f7eSHadar Hen Zion 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196bbd00f7eSHadar Hen Zion 				       outer_headers);
197bbd00f7eSHadar Hen Zion 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198bbd00f7eSHadar Hen Zion 				       outer_headers);
199bbd00f7eSHadar Hen Zion 	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
200bbd00f7eSHadar Hen Zion 				    misc_parameters);
201bbd00f7eSHadar Hen Zion 	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
202bbd00f7eSHadar Hen Zion 				    misc_parameters);
203bbd00f7eSHadar Hen Zion 
204bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
205bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
206bbd00f7eSHadar Hen Zion 
207bbd00f7eSHadar Hen Zion 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
208bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_keyid *key =
209bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
210bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_KEYID,
211bbd00f7eSHadar Hen Zion 						  f->key);
212bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_keyid *mask =
213bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
214bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_KEYID,
215bbd00f7eSHadar Hen Zion 						  f->mask);
216bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
217bbd00f7eSHadar Hen Zion 			 be32_to_cpu(mask->keyid));
218bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
219bbd00f7eSHadar Hen Zion 			 be32_to_cpu(key->keyid));
220bbd00f7eSHadar Hen Zion 	}
221bbd00f7eSHadar Hen Zion }
222bbd00f7eSHadar Hen Zion 
223bbd00f7eSHadar Hen Zion static int parse_tunnel_attr(struct mlx5e_priv *priv,
224bbd00f7eSHadar Hen Zion 			     struct mlx5_flow_spec *spec,
225bbd00f7eSHadar Hen Zion 			     struct tc_cls_flower_offload *f)
226bbd00f7eSHadar Hen Zion {
227bbd00f7eSHadar Hen Zion 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
228bbd00f7eSHadar Hen Zion 				       outer_headers);
229bbd00f7eSHadar Hen Zion 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
230bbd00f7eSHadar Hen Zion 				       outer_headers);
231bbd00f7eSHadar Hen Zion 
2322e72eb43SOr Gerlitz 	struct flow_dissector_key_control *enc_control =
2332e72eb43SOr Gerlitz 		skb_flow_dissector_target(f->dissector,
2342e72eb43SOr Gerlitz 					  FLOW_DISSECTOR_KEY_ENC_CONTROL,
2352e72eb43SOr Gerlitz 					  f->key);
2362e72eb43SOr Gerlitz 
237bbd00f7eSHadar Hen Zion 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
238bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ports *key =
239bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
240bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
241bbd00f7eSHadar Hen Zion 						  f->key);
242bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ports *mask =
243bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
244bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
245bbd00f7eSHadar Hen Zion 						  f->mask);
246bbd00f7eSHadar Hen Zion 
247bbd00f7eSHadar Hen Zion 		/* Full udp dst port must be given */
248bbd00f7eSHadar Hen Zion 		if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2492fcd82e9SOr Gerlitz 			goto vxlan_match_offload_err;
250bbd00f7eSHadar Hen Zion 
251bbd00f7eSHadar Hen Zion 		if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
252bbd00f7eSHadar Hen Zion 		    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
253bbd00f7eSHadar Hen Zion 			parse_vxlan_attr(spec, f);
2542fcd82e9SOr Gerlitz 		else {
2552fcd82e9SOr Gerlitz 			netdev_warn(priv->netdev,
2562fcd82e9SOr Gerlitz 				    "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
257bbd00f7eSHadar Hen Zion 			return -EOPNOTSUPP;
2582fcd82e9SOr Gerlitz 		}
259bbd00f7eSHadar Hen Zion 
260bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
261bbd00f7eSHadar Hen Zion 			 udp_dport, ntohs(mask->dst));
262bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
263bbd00f7eSHadar Hen Zion 			 udp_dport, ntohs(key->dst));
264bbd00f7eSHadar Hen Zion 
265cd377663SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
266cd377663SOr Gerlitz 			 udp_sport, ntohs(mask->src));
267cd377663SOr Gerlitz 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268cd377663SOr Gerlitz 			 udp_sport, ntohs(key->src));
269bbd00f7eSHadar Hen Zion 	} else { /* udp dst port must be given */
2702fcd82e9SOr Gerlitz vxlan_match_offload_err:
2712fcd82e9SOr Gerlitz 		netdev_warn(priv->netdev,
2722fcd82e9SOr Gerlitz 			    "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
273bbd00f7eSHadar Hen Zion 		return -EOPNOTSUPP;
274bbd00f7eSHadar Hen Zion 	}
275bbd00f7eSHadar Hen Zion 
2762e72eb43SOr Gerlitz 	if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
277bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ipv4_addrs *key =
278bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
279bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
280bbd00f7eSHadar Hen Zion 						  f->key);
281bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ipv4_addrs *mask =
282bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
283bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
284bbd00f7eSHadar Hen Zion 						  f->mask);
285bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
286bbd00f7eSHadar Hen Zion 			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
287bbd00f7eSHadar Hen Zion 			 ntohl(mask->src));
288bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
289bbd00f7eSHadar Hen Zion 			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
290bbd00f7eSHadar Hen Zion 			 ntohl(key->src));
291bbd00f7eSHadar Hen Zion 
292bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
293bbd00f7eSHadar Hen Zion 			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
294bbd00f7eSHadar Hen Zion 			 ntohl(mask->dst));
295bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
296bbd00f7eSHadar Hen Zion 			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
297bbd00f7eSHadar Hen Zion 			 ntohl(key->dst));
298bbd00f7eSHadar Hen Zion 
299bbd00f7eSHadar Hen Zion 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
300bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
3012e72eb43SOr Gerlitz 	}
302bbd00f7eSHadar Hen Zion 
303bbd00f7eSHadar Hen Zion 	/* Enforce DMAC when offloading incoming tunneled flows.
304bbd00f7eSHadar Hen Zion 	 * Flow counters require a match on the DMAC.
305bbd00f7eSHadar Hen Zion 	 */
306bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
307bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
308bbd00f7eSHadar Hen Zion 	ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
309bbd00f7eSHadar Hen Zion 				     dmac_47_16), priv->netdev->dev_addr);
310bbd00f7eSHadar Hen Zion 
311bbd00f7eSHadar Hen Zion 	/* let software handle IP fragments */
312bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
313bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
314bbd00f7eSHadar Hen Zion 
315bbd00f7eSHadar Hen Zion 	return 0;
316bbd00f7eSHadar Hen Zion }
317bbd00f7eSHadar Hen Zion 
318de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv,
319de0af0bfSRoi Dayan 			      struct mlx5_flow_spec *spec,
320de0af0bfSRoi Dayan 			      struct tc_cls_flower_offload *f,
321de0af0bfSRoi Dayan 			      u8 *min_inline)
322e3a2b7edSAmir Vadai {
323c5bb1730SMaor Gottlieb 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
324c5bb1730SMaor Gottlieb 				       outer_headers);
325c5bb1730SMaor Gottlieb 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
326c5bb1730SMaor Gottlieb 				       outer_headers);
327e3a2b7edSAmir Vadai 	u16 addr_type = 0;
328e3a2b7edSAmir Vadai 	u8 ip_proto = 0;
329e3a2b7edSAmir Vadai 
330de0af0bfSRoi Dayan 	*min_inline = MLX5_INLINE_MODE_L2;
331de0af0bfSRoi Dayan 
332e3a2b7edSAmir Vadai 	if (f->dissector->used_keys &
333e3a2b7edSAmir Vadai 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
334e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
335e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
336095b6cfdSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
337e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
338e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
339bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
340bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
341bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
342bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
343bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
344bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
345e3a2b7edSAmir Vadai 		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
346e3a2b7edSAmir Vadai 			    f->dissector->used_keys);
347e3a2b7edSAmir Vadai 		return -EOPNOTSUPP;
348e3a2b7edSAmir Vadai 	}
349e3a2b7edSAmir Vadai 
350bbd00f7eSHadar Hen Zion 	if ((dissector_uses_key(f->dissector,
351bbd00f7eSHadar Hen Zion 				FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
352bbd00f7eSHadar Hen Zion 	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
353bbd00f7eSHadar Hen Zion 	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
354bbd00f7eSHadar Hen Zion 	    dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
355bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_control *key =
356bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
357bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
358bbd00f7eSHadar Hen Zion 						  f->key);
359bbd00f7eSHadar Hen Zion 		switch (key->addr_type) {
360bbd00f7eSHadar Hen Zion 		case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
361bbd00f7eSHadar Hen Zion 			if (parse_tunnel_attr(priv, spec, f))
362bbd00f7eSHadar Hen Zion 				return -EOPNOTSUPP;
363bbd00f7eSHadar Hen Zion 			break;
3642fcd82e9SOr Gerlitz 		case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
3652fcd82e9SOr Gerlitz 			netdev_warn(priv->netdev,
3662fcd82e9SOr Gerlitz 				    "IPv6 tunnel decap offload isn't supported\n");
367bbd00f7eSHadar Hen Zion 		default:
368bbd00f7eSHadar Hen Zion 			return -EOPNOTSUPP;
369bbd00f7eSHadar Hen Zion 		}
370bbd00f7eSHadar Hen Zion 
371bbd00f7eSHadar Hen Zion 		/* In decap flow, header pointers should point to the inner
372bbd00f7eSHadar Hen Zion 		 * headers, outer header were already set by parse_tunnel_attr
373bbd00f7eSHadar Hen Zion 		 */
374bbd00f7eSHadar Hen Zion 		headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
375bbd00f7eSHadar Hen Zion 					 inner_headers);
376bbd00f7eSHadar Hen Zion 		headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
377bbd00f7eSHadar Hen Zion 					 inner_headers);
378bbd00f7eSHadar Hen Zion 	}
379bbd00f7eSHadar Hen Zion 
380e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
381e3a2b7edSAmir Vadai 		struct flow_dissector_key_control *key =
382e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
3831dbd0d37SHadar Hen Zion 						  FLOW_DISSECTOR_KEY_CONTROL,
384e3a2b7edSAmir Vadai 						  f->key);
3853f7d0eb4SOr Gerlitz 
3863f7d0eb4SOr Gerlitz 		struct flow_dissector_key_control *mask =
3873f7d0eb4SOr Gerlitz 			skb_flow_dissector_target(f->dissector,
3883f7d0eb4SOr Gerlitz 						  FLOW_DISSECTOR_KEY_CONTROL,
3893f7d0eb4SOr Gerlitz 						  f->mask);
390e3a2b7edSAmir Vadai 		addr_type = key->addr_type;
3913f7d0eb4SOr Gerlitz 
3923f7d0eb4SOr Gerlitz 		if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
3933f7d0eb4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
3943f7d0eb4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
3953f7d0eb4SOr Gerlitz 				 key->flags & FLOW_DIS_IS_FRAGMENT);
3960827444dSOr Gerlitz 
3970827444dSOr Gerlitz 			/* the HW doesn't need L3 inline to match on frag=no */
3980827444dSOr Gerlitz 			if (key->flags & FLOW_DIS_IS_FRAGMENT)
3990827444dSOr Gerlitz 				*min_inline = MLX5_INLINE_MODE_IP;
4003f7d0eb4SOr Gerlitz 		}
401e3a2b7edSAmir Vadai 	}
402e3a2b7edSAmir Vadai 
403e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
404e3a2b7edSAmir Vadai 		struct flow_dissector_key_basic *key =
405e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
406e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_BASIC,
407e3a2b7edSAmir Vadai 						  f->key);
408e3a2b7edSAmir Vadai 		struct flow_dissector_key_basic *mask =
409e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
410e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_BASIC,
411e3a2b7edSAmir Vadai 						  f->mask);
412e3a2b7edSAmir Vadai 		ip_proto = key->ip_proto;
413e3a2b7edSAmir Vadai 
414e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
415e3a2b7edSAmir Vadai 			 ntohs(mask->n_proto));
416e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
417e3a2b7edSAmir Vadai 			 ntohs(key->n_proto));
418e3a2b7edSAmir Vadai 
419e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
420e3a2b7edSAmir Vadai 			 mask->ip_proto);
421e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
422e3a2b7edSAmir Vadai 			 key->ip_proto);
423de0af0bfSRoi Dayan 
424de0af0bfSRoi Dayan 		if (mask->ip_proto)
425de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_IP;
426e3a2b7edSAmir Vadai 	}
427e3a2b7edSAmir Vadai 
428e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
429e3a2b7edSAmir Vadai 		struct flow_dissector_key_eth_addrs *key =
430e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
431e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
432e3a2b7edSAmir Vadai 						  f->key);
433e3a2b7edSAmir Vadai 		struct flow_dissector_key_eth_addrs *mask =
434e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
435e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
436e3a2b7edSAmir Vadai 						  f->mask);
437e3a2b7edSAmir Vadai 
438e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
439e3a2b7edSAmir Vadai 					     dmac_47_16),
440e3a2b7edSAmir Vadai 				mask->dst);
441e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
442e3a2b7edSAmir Vadai 					     dmac_47_16),
443e3a2b7edSAmir Vadai 				key->dst);
444e3a2b7edSAmir Vadai 
445e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
446e3a2b7edSAmir Vadai 					     smac_47_16),
447e3a2b7edSAmir Vadai 				mask->src);
448e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
449e3a2b7edSAmir Vadai 					     smac_47_16),
450e3a2b7edSAmir Vadai 				key->src);
451e3a2b7edSAmir Vadai 	}
452e3a2b7edSAmir Vadai 
453095b6cfdSOr Gerlitz 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
454095b6cfdSOr Gerlitz 		struct flow_dissector_key_vlan *key =
455095b6cfdSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
456095b6cfdSOr Gerlitz 						  FLOW_DISSECTOR_KEY_VLAN,
457095b6cfdSOr Gerlitz 						  f->key);
458095b6cfdSOr Gerlitz 		struct flow_dissector_key_vlan *mask =
459095b6cfdSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
460095b6cfdSOr Gerlitz 						  FLOW_DISSECTOR_KEY_VLAN,
461095b6cfdSOr Gerlitz 						  f->mask);
462358d79a4SOr Gerlitz 		if (mask->vlan_id || mask->vlan_priority) {
463095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
464095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
465095b6cfdSOr Gerlitz 
466095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
467095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
468358d79a4SOr Gerlitz 
469358d79a4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
470358d79a4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
471095b6cfdSOr Gerlitz 		}
472095b6cfdSOr Gerlitz 	}
473095b6cfdSOr Gerlitz 
474e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
475e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv4_addrs *key =
476e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
477e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
478e3a2b7edSAmir Vadai 						  f->key);
479e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv4_addrs *mask =
480e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
481e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
482e3a2b7edSAmir Vadai 						  f->mask);
483e3a2b7edSAmir Vadai 
484e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
485e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
486e3a2b7edSAmir Vadai 		       &mask->src, sizeof(mask->src));
487e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
488e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
489e3a2b7edSAmir Vadai 		       &key->src, sizeof(key->src));
490e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
491e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
492e3a2b7edSAmir Vadai 		       &mask->dst, sizeof(mask->dst));
493e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
494e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
495e3a2b7edSAmir Vadai 		       &key->dst, sizeof(key->dst));
496de0af0bfSRoi Dayan 
497de0af0bfSRoi Dayan 		if (mask->src || mask->dst)
498de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_IP;
499e3a2b7edSAmir Vadai 	}
500e3a2b7edSAmir Vadai 
501e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
502e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv6_addrs *key =
503e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
504e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
505e3a2b7edSAmir Vadai 						  f->key);
506e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv6_addrs *mask =
507e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
508e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
509e3a2b7edSAmir Vadai 						  f->mask);
510e3a2b7edSAmir Vadai 
511e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
512e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
513e3a2b7edSAmir Vadai 		       &mask->src, sizeof(mask->src));
514e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
515e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
516e3a2b7edSAmir Vadai 		       &key->src, sizeof(key->src));
517e3a2b7edSAmir Vadai 
518e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
519e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
520e3a2b7edSAmir Vadai 		       &mask->dst, sizeof(mask->dst));
521e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
522e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
523e3a2b7edSAmir Vadai 		       &key->dst, sizeof(key->dst));
524de0af0bfSRoi Dayan 
525de0af0bfSRoi Dayan 		if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
526de0af0bfSRoi Dayan 		    ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
527de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_IP;
528e3a2b7edSAmir Vadai 	}
529e3a2b7edSAmir Vadai 
530e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
531e3a2b7edSAmir Vadai 		struct flow_dissector_key_ports *key =
532e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
533e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_PORTS,
534e3a2b7edSAmir Vadai 						  f->key);
535e3a2b7edSAmir Vadai 		struct flow_dissector_key_ports *mask =
536e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
537e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_PORTS,
538e3a2b7edSAmir Vadai 						  f->mask);
539e3a2b7edSAmir Vadai 		switch (ip_proto) {
540e3a2b7edSAmir Vadai 		case IPPROTO_TCP:
541e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
542e3a2b7edSAmir Vadai 				 tcp_sport, ntohs(mask->src));
543e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
544e3a2b7edSAmir Vadai 				 tcp_sport, ntohs(key->src));
545e3a2b7edSAmir Vadai 
546e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
547e3a2b7edSAmir Vadai 				 tcp_dport, ntohs(mask->dst));
548e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
549e3a2b7edSAmir Vadai 				 tcp_dport, ntohs(key->dst));
550e3a2b7edSAmir Vadai 			break;
551e3a2b7edSAmir Vadai 
552e3a2b7edSAmir Vadai 		case IPPROTO_UDP:
553e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
554e3a2b7edSAmir Vadai 				 udp_sport, ntohs(mask->src));
555e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
556e3a2b7edSAmir Vadai 				 udp_sport, ntohs(key->src));
557e3a2b7edSAmir Vadai 
558e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
559e3a2b7edSAmir Vadai 				 udp_dport, ntohs(mask->dst));
560e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
561e3a2b7edSAmir Vadai 				 udp_dport, ntohs(key->dst));
562e3a2b7edSAmir Vadai 			break;
563e3a2b7edSAmir Vadai 		default:
564e3a2b7edSAmir Vadai 			netdev_err(priv->netdev,
565e3a2b7edSAmir Vadai 				   "Only UDP and TCP transport are supported\n");
566e3a2b7edSAmir Vadai 			return -EINVAL;
567e3a2b7edSAmir Vadai 		}
568de0af0bfSRoi Dayan 
569de0af0bfSRoi Dayan 		if (mask->src || mask->dst)
570de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_TCP_UDP;
571e3a2b7edSAmir Vadai 	}
572e3a2b7edSAmir Vadai 
573e3a2b7edSAmir Vadai 	return 0;
574e3a2b7edSAmir Vadai }
575e3a2b7edSAmir Vadai 
576de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv,
577de0af0bfSRoi Dayan 			    struct mlx5_flow_spec *spec,
578de0af0bfSRoi Dayan 			    struct tc_cls_flower_offload *f)
579de0af0bfSRoi Dayan {
580de0af0bfSRoi Dayan 	struct mlx5_core_dev *dev = priv->mdev;
581de0af0bfSRoi Dayan 	struct mlx5_eswitch *esw = dev->priv.eswitch;
582de0af0bfSRoi Dayan 	struct mlx5_eswitch_rep *rep = priv->ppriv;
583de0af0bfSRoi Dayan 	u8 min_inline;
584de0af0bfSRoi Dayan 	int err;
585de0af0bfSRoi Dayan 
586de0af0bfSRoi Dayan 	err = __parse_cls_flower(priv, spec, f, &min_inline);
587de0af0bfSRoi Dayan 
588de0af0bfSRoi Dayan 	if (!err && esw->mode == SRIOV_OFFLOADS &&
589de0af0bfSRoi Dayan 	    rep->vport != FDB_UPLINK_VPORT) {
590de0af0bfSRoi Dayan 		if (min_inline > esw->offloads.inline_mode) {
591de0af0bfSRoi Dayan 			netdev_warn(priv->netdev,
592de0af0bfSRoi Dayan 				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
593de0af0bfSRoi Dayan 				    min_inline, esw->offloads.inline_mode);
594de0af0bfSRoi Dayan 			return -EOPNOTSUPP;
595de0af0bfSRoi Dayan 		}
596de0af0bfSRoi Dayan 	}
597de0af0bfSRoi Dayan 
598de0af0bfSRoi Dayan 	return err;
599de0af0bfSRoi Dayan }
600de0af0bfSRoi Dayan 
6015c40348cSOr Gerlitz static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
602e3a2b7edSAmir Vadai 				u32 *action, u32 *flow_tag)
603e3a2b7edSAmir Vadai {
604e3a2b7edSAmir Vadai 	const struct tc_action *a;
60522dc13c8SWANG Cong 	LIST_HEAD(actions);
606e3a2b7edSAmir Vadai 
607e3a2b7edSAmir Vadai 	if (tc_no_actions(exts))
608e3a2b7edSAmir Vadai 		return -EINVAL;
609e3a2b7edSAmir Vadai 
610e3a2b7edSAmir Vadai 	*flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
611e3a2b7edSAmir Vadai 	*action = 0;
612e3a2b7edSAmir Vadai 
61322dc13c8SWANG Cong 	tcf_exts_to_list(exts, &actions);
61422dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list) {
615e3a2b7edSAmir Vadai 		/* Only support a single action per rule */
616e3a2b7edSAmir Vadai 		if (*action)
617e3a2b7edSAmir Vadai 			return -EINVAL;
618e3a2b7edSAmir Vadai 
619e3a2b7edSAmir Vadai 		if (is_tcf_gact_shot(a)) {
620e3a2b7edSAmir Vadai 			*action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
621aad7e08dSAmir Vadai 			if (MLX5_CAP_FLOWTABLE(priv->mdev,
622aad7e08dSAmir Vadai 					       flow_table_properties_nic_receive.flow_counter))
623aad7e08dSAmir Vadai 				*action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
624e3a2b7edSAmir Vadai 			continue;
625e3a2b7edSAmir Vadai 		}
626e3a2b7edSAmir Vadai 
627e3a2b7edSAmir Vadai 		if (is_tcf_skbedit_mark(a)) {
628e3a2b7edSAmir Vadai 			u32 mark = tcf_skbedit_mark(a);
629e3a2b7edSAmir Vadai 
630e3a2b7edSAmir Vadai 			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
631e3a2b7edSAmir Vadai 				netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
632e3a2b7edSAmir Vadai 					    mark);
633e3a2b7edSAmir Vadai 				return -EINVAL;
634e3a2b7edSAmir Vadai 			}
635e3a2b7edSAmir Vadai 
636e3a2b7edSAmir Vadai 			*flow_tag = mark;
637e3a2b7edSAmir Vadai 			*action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
638e3a2b7edSAmir Vadai 			continue;
639e3a2b7edSAmir Vadai 		}
640e3a2b7edSAmir Vadai 
641e3a2b7edSAmir Vadai 		return -EINVAL;
642e3a2b7edSAmir Vadai 	}
643e3a2b7edSAmir Vadai 
644e3a2b7edSAmir Vadai 	return 0;
645e3a2b7edSAmir Vadai }
646e3a2b7edSAmir Vadai 
647a54e20b4SHadar Hen Zion static inline int cmp_encap_info(struct mlx5_encap_info *a,
648a54e20b4SHadar Hen Zion 				 struct mlx5_encap_info *b)
649a54e20b4SHadar Hen Zion {
650a54e20b4SHadar Hen Zion 	return memcmp(a, b, sizeof(*a));
651a54e20b4SHadar Hen Zion }
652a54e20b4SHadar Hen Zion 
653a54e20b4SHadar Hen Zion static inline int hash_encap_info(struct mlx5_encap_info *info)
654a54e20b4SHadar Hen Zion {
655a54e20b4SHadar Hen Zion 	return jhash(info, sizeof(*info), 0);
656a54e20b4SHadar Hen Zion }
657a54e20b4SHadar Hen Zion 
658a54e20b4SHadar Hen Zion static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
659a54e20b4SHadar Hen Zion 				   struct net_device *mirred_dev,
660a54e20b4SHadar Hen Zion 				   struct net_device **out_dev,
661a54e20b4SHadar Hen Zion 				   struct flowi4 *fl4,
662a54e20b4SHadar Hen Zion 				   struct neighbour **out_n,
663a54e20b4SHadar Hen Zion 				   __be32 *saddr,
664a54e20b4SHadar Hen Zion 				   int *out_ttl)
665a54e20b4SHadar Hen Zion {
666a54e20b4SHadar Hen Zion 	struct rtable *rt;
667a54e20b4SHadar Hen Zion 	struct neighbour *n = NULL;
668a54e20b4SHadar Hen Zion 	int ttl;
669a54e20b4SHadar Hen Zion 
670a54e20b4SHadar Hen Zion #if IS_ENABLED(CONFIG_INET)
671abeffce9SArnd Bergmann 	int ret;
672abeffce9SArnd Bergmann 
673a54e20b4SHadar Hen Zion 	rt = ip_route_output_key(dev_net(mirred_dev), fl4);
674abeffce9SArnd Bergmann 	ret = PTR_ERR_OR_ZERO(rt);
675abeffce9SArnd Bergmann 	if (ret)
676abeffce9SArnd Bergmann 		return ret;
677a54e20b4SHadar Hen Zion #else
678a54e20b4SHadar Hen Zion 	return -EOPNOTSUPP;
679a54e20b4SHadar Hen Zion #endif
680a54e20b4SHadar Hen Zion 
681a54e20b4SHadar Hen Zion 	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
682a42485ebSOr Gerlitz 		pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
683a54e20b4SHadar Hen Zion 		ip_rt_put(rt);
684a54e20b4SHadar Hen Zion 		return -EOPNOTSUPP;
685a54e20b4SHadar Hen Zion 	}
686a54e20b4SHadar Hen Zion 
687a54e20b4SHadar Hen Zion 	ttl = ip4_dst_hoplimit(&rt->dst);
688a54e20b4SHadar Hen Zion 	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
689a54e20b4SHadar Hen Zion 	ip_rt_put(rt);
690a54e20b4SHadar Hen Zion 	if (!n)
691a54e20b4SHadar Hen Zion 		return -ENOMEM;
692a54e20b4SHadar Hen Zion 
693a54e20b4SHadar Hen Zion 	*out_n = n;
694a54e20b4SHadar Hen Zion 	*saddr = fl4->saddr;
695a54e20b4SHadar Hen Zion 	*out_ttl = ttl;
696a54e20b4SHadar Hen Zion 	*out_dev = rt->dst.dev;
697a54e20b4SHadar Hen Zion 
698a54e20b4SHadar Hen Zion 	return 0;
699a54e20b4SHadar Hen Zion }
700a54e20b4SHadar Hen Zion 
701a54e20b4SHadar Hen Zion static int gen_vxlan_header_ipv4(struct net_device *out_dev,
702a54e20b4SHadar Hen Zion 				 char buf[],
703a54e20b4SHadar Hen Zion 				 unsigned char h_dest[ETH_ALEN],
704a54e20b4SHadar Hen Zion 				 int ttl,
705a54e20b4SHadar Hen Zion 				 __be32 daddr,
706a54e20b4SHadar Hen Zion 				 __be32 saddr,
707a54e20b4SHadar Hen Zion 				 __be16 udp_dst_port,
708a54e20b4SHadar Hen Zion 				 __be32 vx_vni)
709a54e20b4SHadar Hen Zion {
710a54e20b4SHadar Hen Zion 	int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
711a54e20b4SHadar Hen Zion 	struct ethhdr *eth = (struct ethhdr *)buf;
712a54e20b4SHadar Hen Zion 	struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
713a54e20b4SHadar Hen Zion 	struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
714a54e20b4SHadar Hen Zion 	struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
715a54e20b4SHadar Hen Zion 
716a54e20b4SHadar Hen Zion 	memset(buf, 0, encap_size);
717a54e20b4SHadar Hen Zion 
718a54e20b4SHadar Hen Zion 	ether_addr_copy(eth->h_dest, h_dest);
719a54e20b4SHadar Hen Zion 	ether_addr_copy(eth->h_source, out_dev->dev_addr);
720a54e20b4SHadar Hen Zion 	eth->h_proto = htons(ETH_P_IP);
721a54e20b4SHadar Hen Zion 
722a54e20b4SHadar Hen Zion 	ip->daddr = daddr;
723a54e20b4SHadar Hen Zion 	ip->saddr = saddr;
724a54e20b4SHadar Hen Zion 
725a54e20b4SHadar Hen Zion 	ip->ttl = ttl;
726a54e20b4SHadar Hen Zion 	ip->protocol = IPPROTO_UDP;
727a54e20b4SHadar Hen Zion 	ip->version = 0x4;
728a54e20b4SHadar Hen Zion 	ip->ihl = 0x5;
729a54e20b4SHadar Hen Zion 
730a54e20b4SHadar Hen Zion 	udp->dest = udp_dst_port;
731a54e20b4SHadar Hen Zion 	vxh->vx_flags = VXLAN_HF_VNI;
732a54e20b4SHadar Hen Zion 	vxh->vx_vni = vxlan_vni_field(vx_vni);
733a54e20b4SHadar Hen Zion 
734a54e20b4SHadar Hen Zion 	return encap_size;
735a54e20b4SHadar Hen Zion }
736a54e20b4SHadar Hen Zion 
737a54e20b4SHadar Hen Zion static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
738a54e20b4SHadar Hen Zion 					  struct net_device *mirred_dev,
739a54e20b4SHadar Hen Zion 					  struct mlx5_encap_entry *e,
740a54e20b4SHadar Hen Zion 					  struct net_device **out_dev)
741a54e20b4SHadar Hen Zion {
742a54e20b4SHadar Hen Zion 	int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
743a42485ebSOr Gerlitz 	struct neighbour *n = NULL;
744a54e20b4SHadar Hen Zion 	struct flowi4 fl4 = {};
745a54e20b4SHadar Hen Zion 	char *encap_header;
746a54e20b4SHadar Hen Zion 	int encap_size;
747abeffce9SArnd Bergmann 	__be32 saddr;
748abeffce9SArnd Bergmann 	int ttl;
749a54e20b4SHadar Hen Zion 	int err;
750a54e20b4SHadar Hen Zion 
751a54e20b4SHadar Hen Zion 	encap_header = kzalloc(max_encap_size, GFP_KERNEL);
752a54e20b4SHadar Hen Zion 	if (!encap_header)
753a54e20b4SHadar Hen Zion 		return -ENOMEM;
754a54e20b4SHadar Hen Zion 
755a54e20b4SHadar Hen Zion 	switch (e->tunnel_type) {
756a54e20b4SHadar Hen Zion 	case MLX5_HEADER_TYPE_VXLAN:
757a54e20b4SHadar Hen Zion 		fl4.flowi4_proto = IPPROTO_UDP;
758a54e20b4SHadar Hen Zion 		fl4.fl4_dport = e->tun_info.tp_dst;
759a54e20b4SHadar Hen Zion 		break;
760a54e20b4SHadar Hen Zion 	default:
761a54e20b4SHadar Hen Zion 		err = -EOPNOTSUPP;
762a54e20b4SHadar Hen Zion 		goto out;
763a54e20b4SHadar Hen Zion 	}
764a54e20b4SHadar Hen Zion 	fl4.daddr = e->tun_info.daddr;
765a54e20b4SHadar Hen Zion 
766a54e20b4SHadar Hen Zion 	err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
767a54e20b4SHadar Hen Zion 				      &fl4, &n, &saddr, &ttl);
768a54e20b4SHadar Hen Zion 	if (err)
769a54e20b4SHadar Hen Zion 		goto out;
770a54e20b4SHadar Hen Zion 
771a54e20b4SHadar Hen Zion 	e->n = n;
772a54e20b4SHadar Hen Zion 	e->out_dev = *out_dev;
773a54e20b4SHadar Hen Zion 
774a54e20b4SHadar Hen Zion 	if (!(n->nud_state & NUD_VALID)) {
775a42485ebSOr Gerlitz 		pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
776a42485ebSOr Gerlitz 		err = -EOPNOTSUPP;
777a54e20b4SHadar Hen Zion 		goto out;
778a54e20b4SHadar Hen Zion 	}
779a54e20b4SHadar Hen Zion 
780a54e20b4SHadar Hen Zion 	neigh_ha_snapshot(e->h_dest, n, *out_dev);
781a54e20b4SHadar Hen Zion 
782a54e20b4SHadar Hen Zion 	switch (e->tunnel_type) {
783a54e20b4SHadar Hen Zion 	case MLX5_HEADER_TYPE_VXLAN:
784a54e20b4SHadar Hen Zion 		encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
785a54e20b4SHadar Hen Zion 						   e->h_dest, ttl,
786a54e20b4SHadar Hen Zion 						   e->tun_info.daddr,
787a54e20b4SHadar Hen Zion 						   saddr, e->tun_info.tp_dst,
788a54e20b4SHadar Hen Zion 						   e->tun_info.tun_id);
789a54e20b4SHadar Hen Zion 		break;
790a54e20b4SHadar Hen Zion 	default:
791a54e20b4SHadar Hen Zion 		err = -EOPNOTSUPP;
792a54e20b4SHadar Hen Zion 		goto out;
793a54e20b4SHadar Hen Zion 	}
794a54e20b4SHadar Hen Zion 
795a54e20b4SHadar Hen Zion 	err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
796a54e20b4SHadar Hen Zion 			       encap_size, encap_header, &e->encap_id);
797a54e20b4SHadar Hen Zion out:
798a42485ebSOr Gerlitz 	if (err && n)
799a42485ebSOr Gerlitz 		neigh_release(n);
800a54e20b4SHadar Hen Zion 	kfree(encap_header);
801a54e20b4SHadar Hen Zion 	return err;
802a54e20b4SHadar Hen Zion }
803a54e20b4SHadar Hen Zion 
804a54e20b4SHadar Hen Zion static int mlx5e_attach_encap(struct mlx5e_priv *priv,
805a54e20b4SHadar Hen Zion 			      struct ip_tunnel_info *tun_info,
806a54e20b4SHadar Hen Zion 			      struct net_device *mirred_dev,
807776b12b6SOr Gerlitz 			      struct mlx5_esw_flow_attr *attr)
80803a9d11eSOr Gerlitz {
809a54e20b4SHadar Hen Zion 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
810a54e20b4SHadar Hen Zion 	unsigned short family = ip_tunnel_info_af(tun_info);
811a54e20b4SHadar Hen Zion 	struct ip_tunnel_key *key = &tun_info->key;
812a54e20b4SHadar Hen Zion 	struct mlx5_encap_info info;
813a54e20b4SHadar Hen Zion 	struct mlx5_encap_entry *e;
814a54e20b4SHadar Hen Zion 	struct net_device *out_dev;
815a54e20b4SHadar Hen Zion 	uintptr_t hash_key;
816a54e20b4SHadar Hen Zion 	bool found = false;
817a54e20b4SHadar Hen Zion 	int tunnel_type;
818a54e20b4SHadar Hen Zion 	int err;
819a54e20b4SHadar Hen Zion 
8202fcd82e9SOr Gerlitz 	/* udp dst port must be set */
821a54e20b4SHadar Hen Zion 	if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
8222fcd82e9SOr Gerlitz 		goto vxlan_encap_offload_err;
823a54e20b4SHadar Hen Zion 
824cd377663SOr Gerlitz 	/* setting udp src port isn't supported */
8252fcd82e9SOr Gerlitz 	if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
8262fcd82e9SOr Gerlitz vxlan_encap_offload_err:
8272fcd82e9SOr Gerlitz 		netdev_warn(priv->netdev,
8282fcd82e9SOr Gerlitz 			    "must set udp dst port and not set udp src port\n");
829cd377663SOr Gerlitz 		return -EOPNOTSUPP;
8302fcd82e9SOr Gerlitz 	}
831cd377663SOr Gerlitz 
832a54e20b4SHadar Hen Zion 	if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
833a54e20b4SHadar Hen Zion 	    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
834a54e20b4SHadar Hen Zion 		info.tp_dst = key->tp_dst;
835a54e20b4SHadar Hen Zion 		info.tun_id = tunnel_id_to_key32(key->tun_id);
836a54e20b4SHadar Hen Zion 		tunnel_type = MLX5_HEADER_TYPE_VXLAN;
837a54e20b4SHadar Hen Zion 	} else {
8382fcd82e9SOr Gerlitz 		netdev_warn(priv->netdev,
8392fcd82e9SOr Gerlitz 			    "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
840a54e20b4SHadar Hen Zion 		return -EOPNOTSUPP;
841a54e20b4SHadar Hen Zion 	}
842a54e20b4SHadar Hen Zion 
843a54e20b4SHadar Hen Zion 	switch (family) {
844a54e20b4SHadar Hen Zion 	case AF_INET:
845a54e20b4SHadar Hen Zion 		info.daddr = key->u.ipv4.dst;
846a54e20b4SHadar Hen Zion 		break;
8472fcd82e9SOr Gerlitz 	case AF_INET6:
8482fcd82e9SOr Gerlitz 		netdev_warn(priv->netdev,
8492fcd82e9SOr Gerlitz 			    "IPv6 tunnel encap offload isn't supported\n");
850a54e20b4SHadar Hen Zion 	default:
851a54e20b4SHadar Hen Zion 		return -EOPNOTSUPP;
852a54e20b4SHadar Hen Zion 	}
853a54e20b4SHadar Hen Zion 
854a54e20b4SHadar Hen Zion 	hash_key = hash_encap_info(&info);
855a54e20b4SHadar Hen Zion 
856a54e20b4SHadar Hen Zion 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
857a54e20b4SHadar Hen Zion 				   encap_hlist, hash_key) {
858a54e20b4SHadar Hen Zion 		if (!cmp_encap_info(&e->tun_info, &info)) {
859a54e20b4SHadar Hen Zion 			found = true;
860a54e20b4SHadar Hen Zion 			break;
861a54e20b4SHadar Hen Zion 		}
862a54e20b4SHadar Hen Zion 	}
863a54e20b4SHadar Hen Zion 
864a54e20b4SHadar Hen Zion 	if (found) {
865a54e20b4SHadar Hen Zion 		attr->encap = e;
866a54e20b4SHadar Hen Zion 		return 0;
867a54e20b4SHadar Hen Zion 	}
868a54e20b4SHadar Hen Zion 
869a54e20b4SHadar Hen Zion 	e = kzalloc(sizeof(*e), GFP_KERNEL);
870a54e20b4SHadar Hen Zion 	if (!e)
871a54e20b4SHadar Hen Zion 		return -ENOMEM;
872a54e20b4SHadar Hen Zion 
873a54e20b4SHadar Hen Zion 	e->tun_info = info;
874a54e20b4SHadar Hen Zion 	e->tunnel_type = tunnel_type;
875a54e20b4SHadar Hen Zion 	INIT_LIST_HEAD(&e->flows);
876a54e20b4SHadar Hen Zion 
877a54e20b4SHadar Hen Zion 	err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
878a54e20b4SHadar Hen Zion 	if (err)
879a54e20b4SHadar Hen Zion 		goto out_err;
880a54e20b4SHadar Hen Zion 
881a54e20b4SHadar Hen Zion 	attr->encap = e;
882a54e20b4SHadar Hen Zion 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
883a54e20b4SHadar Hen Zion 
884a54e20b4SHadar Hen Zion 	return err;
885a54e20b4SHadar Hen Zion 
886a54e20b4SHadar Hen Zion out_err:
887a54e20b4SHadar Hen Zion 	kfree(e);
888a54e20b4SHadar Hen Zion 	return err;
889a54e20b4SHadar Hen Zion }
890a54e20b4SHadar Hen Zion 
891a54e20b4SHadar Hen Zion static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
892a54e20b4SHadar Hen Zion 				struct mlx5e_tc_flow *flow)
893a54e20b4SHadar Hen Zion {
894a54e20b4SHadar Hen Zion 	struct mlx5_esw_flow_attr *attr = flow->attr;
895a54e20b4SHadar Hen Zion 	struct ip_tunnel_info *info = NULL;
89603a9d11eSOr Gerlitz 	const struct tc_action *a;
89722dc13c8SWANG Cong 	LIST_HEAD(actions);
898a54e20b4SHadar Hen Zion 	bool encap = false;
899a54e20b4SHadar Hen Zion 	int err;
90003a9d11eSOr Gerlitz 
90103a9d11eSOr Gerlitz 	if (tc_no_actions(exts))
90203a9d11eSOr Gerlitz 		return -EINVAL;
90303a9d11eSOr Gerlitz 
904776b12b6SOr Gerlitz 	memset(attr, 0, sizeof(*attr));
905776b12b6SOr Gerlitz 	attr->in_rep = priv->ppriv;
90603a9d11eSOr Gerlitz 
90722dc13c8SWANG Cong 	tcf_exts_to_list(exts, &actions);
90822dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list) {
90903a9d11eSOr Gerlitz 		if (is_tcf_gact_shot(a)) {
9108b32580dSOr Gerlitz 			attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
91103a9d11eSOr Gerlitz 					MLX5_FLOW_CONTEXT_ACTION_COUNT;
91203a9d11eSOr Gerlitz 			continue;
91303a9d11eSOr Gerlitz 		}
91403a9d11eSOr Gerlitz 
9155724b8b5SShmulik Ladkani 		if (is_tcf_mirred_egress_redirect(a)) {
91603a9d11eSOr Gerlitz 			int ifindex = tcf_mirred_ifindex(a);
91703a9d11eSOr Gerlitz 			struct net_device *out_dev;
91803a9d11eSOr Gerlitz 			struct mlx5e_priv *out_priv;
91903a9d11eSOr Gerlitz 
92003a9d11eSOr Gerlitz 			out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
92103a9d11eSOr Gerlitz 
922a54e20b4SHadar Hen Zion 			if (switchdev_port_same_parent_id(priv->netdev,
923a54e20b4SHadar Hen Zion 							  out_dev)) {
924e37a79e5SMark Bloch 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
925e37a79e5SMark Bloch 					MLX5_FLOW_CONTEXT_ACTION_COUNT;
92603a9d11eSOr Gerlitz 				out_priv = netdev_priv(out_dev);
927776b12b6SOr Gerlitz 				attr->out_rep = out_priv->ppriv;
928a54e20b4SHadar Hen Zion 			} else if (encap) {
929a54e20b4SHadar Hen Zion 				err = mlx5e_attach_encap(priv, info,
930a54e20b4SHadar Hen Zion 							 out_dev, attr);
931a54e20b4SHadar Hen Zion 				if (err)
932a54e20b4SHadar Hen Zion 					return err;
933a54e20b4SHadar Hen Zion 				list_add(&flow->encap, &attr->encap->flows);
934a54e20b4SHadar Hen Zion 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
935a54e20b4SHadar Hen Zion 					MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
936a54e20b4SHadar Hen Zion 					MLX5_FLOW_CONTEXT_ACTION_COUNT;
937a54e20b4SHadar Hen Zion 				out_priv = netdev_priv(attr->encap->out_dev);
938a54e20b4SHadar Hen Zion 				attr->out_rep = out_priv->ppriv;
939a54e20b4SHadar Hen Zion 			} else {
940a54e20b4SHadar Hen Zion 				pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
941a54e20b4SHadar Hen Zion 				       priv->netdev->name, out_dev->name);
942a54e20b4SHadar Hen Zion 				return -EINVAL;
943a54e20b4SHadar Hen Zion 			}
944a54e20b4SHadar Hen Zion 			continue;
945a54e20b4SHadar Hen Zion 		}
946a54e20b4SHadar Hen Zion 
947a54e20b4SHadar Hen Zion 		if (is_tcf_tunnel_set(a)) {
948a54e20b4SHadar Hen Zion 			info = tcf_tunnel_info(a);
949a54e20b4SHadar Hen Zion 			if (info)
950a54e20b4SHadar Hen Zion 				encap = true;
951a54e20b4SHadar Hen Zion 			else
952a54e20b4SHadar Hen Zion 				return -EOPNOTSUPP;
95303a9d11eSOr Gerlitz 			continue;
95403a9d11eSOr Gerlitz 		}
95503a9d11eSOr Gerlitz 
9568b32580dSOr Gerlitz 		if (is_tcf_vlan(a)) {
9578b32580dSOr Gerlitz 			if (tcf_vlan_action(a) == VLAN_F_POP) {
9588b32580dSOr Gerlitz 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
9598b32580dSOr Gerlitz 			} else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
9608b32580dSOr Gerlitz 				if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
9618b32580dSOr Gerlitz 					return -EOPNOTSUPP;
9628b32580dSOr Gerlitz 
9638b32580dSOr Gerlitz 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
9648b32580dSOr Gerlitz 				attr->vlan = tcf_vlan_push_vid(a);
9658b32580dSOr Gerlitz 			}
9668b32580dSOr Gerlitz 			continue;
9678b32580dSOr Gerlitz 		}
9688b32580dSOr Gerlitz 
969bbd00f7eSHadar Hen Zion 		if (is_tcf_tunnel_release(a)) {
970bbd00f7eSHadar Hen Zion 			attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
971bbd00f7eSHadar Hen Zion 			continue;
972bbd00f7eSHadar Hen Zion 		}
973bbd00f7eSHadar Hen Zion 
97403a9d11eSOr Gerlitz 		return -EINVAL;
97503a9d11eSOr Gerlitz 	}
97603a9d11eSOr Gerlitz 	return 0;
97703a9d11eSOr Gerlitz }
97803a9d11eSOr Gerlitz 
979e3a2b7edSAmir Vadai int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
980e3a2b7edSAmir Vadai 			   struct tc_cls_flower_offload *f)
981e3a2b7edSAmir Vadai {
982acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
983e3a2b7edSAmir Vadai 	int err = 0;
984776b12b6SOr Gerlitz 	bool fdb_flow = false;
985776b12b6SOr Gerlitz 	u32 flow_tag, action;
986e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
987c5bb1730SMaor Gottlieb 	struct mlx5_flow_spec *spec;
988adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
989e3a2b7edSAmir Vadai 
990776b12b6SOr Gerlitz 	if (esw && esw->mode == SRIOV_OFFLOADS)
991776b12b6SOr Gerlitz 		fdb_flow = true;
992776b12b6SOr Gerlitz 
993776b12b6SOr Gerlitz 	if (fdb_flow)
99453636068SRoi Dayan 		flow = kzalloc(sizeof(*flow) +
99553636068SRoi Dayan 			       sizeof(struct mlx5_esw_flow_attr),
996776b12b6SOr Gerlitz 			       GFP_KERNEL);
997e3a2b7edSAmir Vadai 	else
998e3a2b7edSAmir Vadai 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
999e3a2b7edSAmir Vadai 
1000c5bb1730SMaor Gottlieb 	spec = mlx5_vzalloc(sizeof(*spec));
1001c5bb1730SMaor Gottlieb 	if (!spec || !flow) {
1002e3a2b7edSAmir Vadai 		err = -ENOMEM;
1003e3a2b7edSAmir Vadai 		goto err_free;
1004e3a2b7edSAmir Vadai 	}
1005e3a2b7edSAmir Vadai 
1006e3a2b7edSAmir Vadai 	flow->cookie = f->cookie;
1007e3a2b7edSAmir Vadai 
1008c5bb1730SMaor Gottlieb 	err = parse_cls_flower(priv, spec, f);
1009e3a2b7edSAmir Vadai 	if (err < 0)
1010e3a2b7edSAmir Vadai 		goto err_free;
1011e3a2b7edSAmir Vadai 
1012776b12b6SOr Gerlitz 	if (fdb_flow) {
1013776b12b6SOr Gerlitz 		flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
1014a54e20b4SHadar Hen Zion 		err = parse_tc_fdb_actions(priv, f->exts, flow);
1015adb4c123SOr Gerlitz 		if (err < 0)
1016adb4c123SOr Gerlitz 			goto err_free;
1017776b12b6SOr Gerlitz 		flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
1018adb4c123SOr Gerlitz 	} else {
10195c40348cSOr Gerlitz 		err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1020e3a2b7edSAmir Vadai 		if (err < 0)
1021e3a2b7edSAmir Vadai 			goto err_free;
10225c40348cSOr Gerlitz 		flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1023adb4c123SOr Gerlitz 	}
1024adb4c123SOr Gerlitz 
10255c40348cSOr Gerlitz 	if (IS_ERR(flow->rule)) {
10265c40348cSOr Gerlitz 		err = PTR_ERR(flow->rule);
10275e86397aSOr Gerlitz 		goto err_del_rule;
10285c40348cSOr Gerlitz 	}
10295c40348cSOr Gerlitz 
1030e3a2b7edSAmir Vadai 	err = rhashtable_insert_fast(&tc->ht, &flow->node,
1031e3a2b7edSAmir Vadai 				     tc->ht_params);
1032e3a2b7edSAmir Vadai 	if (err)
10335c40348cSOr Gerlitz 		goto err_del_rule;
1034e3a2b7edSAmir Vadai 
1035e3a2b7edSAmir Vadai 	goto out;
1036e3a2b7edSAmir Vadai 
10375c40348cSOr Gerlitz err_del_rule:
10385e86397aSOr Gerlitz 	mlx5e_tc_del_flow(priv, flow);
1039e3a2b7edSAmir Vadai 
1040e3a2b7edSAmir Vadai err_free:
1041e3a2b7edSAmir Vadai 	kfree(flow);
1042e3a2b7edSAmir Vadai out:
1043c5bb1730SMaor Gottlieb 	kvfree(spec);
1044e3a2b7edSAmir Vadai 	return err;
1045e3a2b7edSAmir Vadai }
1046e3a2b7edSAmir Vadai 
1047e3a2b7edSAmir Vadai int mlx5e_delete_flower(struct mlx5e_priv *priv,
1048e3a2b7edSAmir Vadai 			struct tc_cls_flower_offload *f)
1049e3a2b7edSAmir Vadai {
1050e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
1051acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1052e3a2b7edSAmir Vadai 
1053e3a2b7edSAmir Vadai 	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1054e3a2b7edSAmir Vadai 				      tc->ht_params);
1055e3a2b7edSAmir Vadai 	if (!flow)
1056e3a2b7edSAmir Vadai 		return -EINVAL;
1057e3a2b7edSAmir Vadai 
1058e3a2b7edSAmir Vadai 	rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1059e3a2b7edSAmir Vadai 
1060961e8979SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
1061e3a2b7edSAmir Vadai 
1062a54e20b4SHadar Hen Zion 
1063e3a2b7edSAmir Vadai 	kfree(flow);
1064e3a2b7edSAmir Vadai 
1065e3a2b7edSAmir Vadai 	return 0;
1066e3a2b7edSAmir Vadai }
1067e3a2b7edSAmir Vadai 
1068aad7e08dSAmir Vadai int mlx5e_stats_flower(struct mlx5e_priv *priv,
1069aad7e08dSAmir Vadai 		       struct tc_cls_flower_offload *f)
1070aad7e08dSAmir Vadai {
1071aad7e08dSAmir Vadai 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1072aad7e08dSAmir Vadai 	struct mlx5e_tc_flow *flow;
1073aad7e08dSAmir Vadai 	struct tc_action *a;
1074aad7e08dSAmir Vadai 	struct mlx5_fc *counter;
107522dc13c8SWANG Cong 	LIST_HEAD(actions);
1076aad7e08dSAmir Vadai 	u64 bytes;
1077aad7e08dSAmir Vadai 	u64 packets;
1078aad7e08dSAmir Vadai 	u64 lastuse;
1079aad7e08dSAmir Vadai 
1080aad7e08dSAmir Vadai 	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1081aad7e08dSAmir Vadai 				      tc->ht_params);
1082aad7e08dSAmir Vadai 	if (!flow)
1083aad7e08dSAmir Vadai 		return -EINVAL;
1084aad7e08dSAmir Vadai 
1085aad7e08dSAmir Vadai 	counter = mlx5_flow_rule_counter(flow->rule);
1086aad7e08dSAmir Vadai 	if (!counter)
1087aad7e08dSAmir Vadai 		return 0;
1088aad7e08dSAmir Vadai 
1089aad7e08dSAmir Vadai 	mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1090aad7e08dSAmir Vadai 
109122dc13c8SWANG Cong 	tcf_exts_to_list(f->exts, &actions);
109222dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list)
1093aad7e08dSAmir Vadai 		tcf_action_stats_update(a, bytes, packets, lastuse);
1094aad7e08dSAmir Vadai 
1095aad7e08dSAmir Vadai 	return 0;
1096aad7e08dSAmir Vadai }
1097aad7e08dSAmir Vadai 
1098e8f887acSAmir Vadai static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1099e8f887acSAmir Vadai 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
1100e8f887acSAmir Vadai 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1101e8f887acSAmir Vadai 	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1102e8f887acSAmir Vadai 	.automatic_shrinking = true,
1103e8f887acSAmir Vadai };
1104e8f887acSAmir Vadai 
1105e8f887acSAmir Vadai int mlx5e_tc_init(struct mlx5e_priv *priv)
1106e8f887acSAmir Vadai {
1107acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1108e8f887acSAmir Vadai 
1109e8f887acSAmir Vadai 	tc->ht_params = mlx5e_tc_flow_ht_params;
1110e8f887acSAmir Vadai 	return rhashtable_init(&tc->ht, &tc->ht_params);
1111e8f887acSAmir Vadai }
1112e8f887acSAmir Vadai 
1113e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1114e8f887acSAmir Vadai {
1115e8f887acSAmir Vadai 	struct mlx5e_tc_flow *flow = ptr;
1116e8f887acSAmir Vadai 	struct mlx5e_priv *priv = arg;
1117e8f887acSAmir Vadai 
1118961e8979SRoi Dayan 	mlx5e_tc_del_flow(priv, flow);
1119e8f887acSAmir Vadai 	kfree(flow);
1120e8f887acSAmir Vadai }
1121e8f887acSAmir Vadai 
1122e8f887acSAmir Vadai void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1123e8f887acSAmir Vadai {
1124acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1125e8f887acSAmir Vadai 
1126e8f887acSAmir Vadai 	rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1127e8f887acSAmir Vadai 
1128acff797cSMaor Gottlieb 	if (!IS_ERR_OR_NULL(tc->t)) {
1129acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(tc->t);
1130acff797cSMaor Gottlieb 		tc->t = NULL;
1131e8f887acSAmir Vadai 	}
1132e8f887acSAmir Vadai }
1133