1e8f887acSAmir Vadai /*
2e8f887acSAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3e8f887acSAmir Vadai  *
4e8f887acSAmir Vadai  * This software is available to you under a choice of one of two
5e8f887acSAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
6e8f887acSAmir Vadai  * General Public License (GPL) Version 2, available from the file
7e8f887acSAmir Vadai  * COPYING in the main directory of this source tree, or the
8e8f887acSAmir Vadai  * OpenIB.org BSD license below:
9e8f887acSAmir Vadai  *
10e8f887acSAmir Vadai  *     Redistribution and use in source and binary forms, with or
11e8f887acSAmir Vadai  *     without modification, are permitted provided that the following
12e8f887acSAmir Vadai  *     conditions are met:
13e8f887acSAmir Vadai  *
14e8f887acSAmir Vadai  *      - Redistributions of source code must retain the above
15e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
16e8f887acSAmir Vadai  *        disclaimer.
17e8f887acSAmir Vadai  *
18e8f887acSAmir Vadai  *      - Redistributions in binary form must reproduce the above
19e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
20e8f887acSAmir Vadai  *        disclaimer in the documentation and/or other materials
21e8f887acSAmir Vadai  *        provided with the distribution.
22e8f887acSAmir Vadai  *
23e8f887acSAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e8f887acSAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e8f887acSAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e8f887acSAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e8f887acSAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e8f887acSAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e8f887acSAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e8f887acSAmir Vadai  * SOFTWARE.
31e8f887acSAmir Vadai  */
32e8f887acSAmir Vadai 
33e3a2b7edSAmir Vadai #include <net/flow_dissector.h>
34e3a2b7edSAmir Vadai #include <net/pkt_cls.h>
35e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h>
3612185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h>
37e8f887acSAmir Vadai #include <linux/mlx5/fs.h>
38e8f887acSAmir Vadai #include <linux/mlx5/device.h>
39e8f887acSAmir Vadai #include <linux/rhashtable.h>
4003a9d11eSOr Gerlitz #include <net/switchdev.h>
4103a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h>
42776b12b6SOr Gerlitz #include <net/tc_act/tc_vlan.h>
43bbd00f7eSHadar Hen Zion #include <net/tc_act/tc_tunnel_key.h>
44a54e20b4SHadar Hen Zion #include <net/vxlan.h>
45e8f887acSAmir Vadai #include "en.h"
46e8f887acSAmir Vadai #include "en_tc.h"
4703a9d11eSOr Gerlitz #include "eswitch.h"
48bbd00f7eSHadar Hen Zion #include "vxlan.h"
49e8f887acSAmir Vadai 
50e8f887acSAmir Vadai struct mlx5e_tc_flow {
51e8f887acSAmir Vadai 	struct rhash_head	node;
52e8f887acSAmir Vadai 	u64			cookie;
5374491de9SMark Bloch 	struct mlx5_flow_handle *rule;
54a54e20b4SHadar Hen Zion 	struct list_head	encap; /* flows sharing the same encap */
55776b12b6SOr Gerlitz 	struct mlx5_esw_flow_attr *attr;
56e8f887acSAmir Vadai };
57e8f887acSAmir Vadai 
58a54e20b4SHadar Hen Zion enum {
59a54e20b4SHadar Hen Zion 	MLX5_HEADER_TYPE_VXLAN = 0x0,
60a54e20b4SHadar Hen Zion 	MLX5_HEADER_TYPE_NVGRE = 0x1,
61a54e20b4SHadar Hen Zion };
62a54e20b4SHadar Hen Zion 
63acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
64acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4
65e8f887acSAmir Vadai 
6674491de9SMark Bloch static struct mlx5_flow_handle *
6774491de9SMark Bloch mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
68c5bb1730SMaor Gottlieb 		      struct mlx5_flow_spec *spec,
69e8f887acSAmir Vadai 		      u32 action, u32 flow_tag)
70e8f887acSAmir Vadai {
71aad7e08dSAmir Vadai 	struct mlx5_core_dev *dev = priv->mdev;
72aad7e08dSAmir Vadai 	struct mlx5_flow_destination dest = { 0 };
7366958ed9SHadar Hen Zion 	struct mlx5_flow_act flow_act = {
7466958ed9SHadar Hen Zion 		.action = action,
7566958ed9SHadar Hen Zion 		.flow_tag = flow_tag,
7666958ed9SHadar Hen Zion 		.encap_id = 0,
7766958ed9SHadar Hen Zion 	};
78aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
7974491de9SMark Bloch 	struct mlx5_flow_handle *rule;
80e8f887acSAmir Vadai 	bool table_created = false;
81e8f887acSAmir Vadai 
82aad7e08dSAmir Vadai 	if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
83aad7e08dSAmir Vadai 		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
84aad7e08dSAmir Vadai 		dest.ft = priv->fs.vlan.ft.t;
8555130287SOr Gerlitz 	} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
86aad7e08dSAmir Vadai 		counter = mlx5_fc_create(dev, true);
87aad7e08dSAmir Vadai 		if (IS_ERR(counter))
88aad7e08dSAmir Vadai 			return ERR_CAST(counter);
89aad7e08dSAmir Vadai 
90aad7e08dSAmir Vadai 		dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
91aad7e08dSAmir Vadai 		dest.counter = counter;
92aad7e08dSAmir Vadai 	}
93aad7e08dSAmir Vadai 
94acff797cSMaor Gottlieb 	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
95acff797cSMaor Gottlieb 		priv->fs.tc.t =
96acff797cSMaor Gottlieb 			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
97acff797cSMaor Gottlieb 							    MLX5E_TC_PRIO,
98acff797cSMaor Gottlieb 							    MLX5E_TC_TABLE_NUM_ENTRIES,
99acff797cSMaor Gottlieb 							    MLX5E_TC_TABLE_NUM_GROUPS,
100c9f1b073SHadar Hen Zion 							    0, 0);
101acff797cSMaor Gottlieb 		if (IS_ERR(priv->fs.tc.t)) {
102e8f887acSAmir Vadai 			netdev_err(priv->netdev,
103e8f887acSAmir Vadai 				   "Failed to create tc offload table\n");
104aad7e08dSAmir Vadai 			rule = ERR_CAST(priv->fs.tc.t);
105aad7e08dSAmir Vadai 			goto err_create_ft;
106e8f887acSAmir Vadai 		}
107e8f887acSAmir Vadai 
108e8f887acSAmir Vadai 		table_created = true;
109e8f887acSAmir Vadai 	}
110e8f887acSAmir Vadai 
111c5bb1730SMaor Gottlieb 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
11266958ed9SHadar Hen Zion 	rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
113e8f887acSAmir Vadai 
114aad7e08dSAmir Vadai 	if (IS_ERR(rule))
115aad7e08dSAmir Vadai 		goto err_add_rule;
116aad7e08dSAmir Vadai 
117aad7e08dSAmir Vadai 	return rule;
118aad7e08dSAmir Vadai 
119aad7e08dSAmir Vadai err_add_rule:
120aad7e08dSAmir Vadai 	if (table_created) {
121acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(priv->fs.tc.t);
122acff797cSMaor Gottlieb 		priv->fs.tc.t = NULL;
123e8f887acSAmir Vadai 	}
124aad7e08dSAmir Vadai err_create_ft:
125aad7e08dSAmir Vadai 	mlx5_fc_destroy(dev, counter);
126e8f887acSAmir Vadai 
127e8f887acSAmir Vadai 	return rule;
128e8f887acSAmir Vadai }
129e8f887acSAmir Vadai 
13074491de9SMark Bloch static struct mlx5_flow_handle *
13174491de9SMark Bloch mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
132adb4c123SOr Gerlitz 		      struct mlx5_flow_spec *spec,
133776b12b6SOr Gerlitz 		      struct mlx5_esw_flow_attr *attr)
134adb4c123SOr Gerlitz {
135adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1368b32580dSOr Gerlitz 	int err;
1378b32580dSOr Gerlitz 
1388b32580dSOr Gerlitz 	err = mlx5_eswitch_add_vlan_action(esw, attr);
1398b32580dSOr Gerlitz 	if (err)
1408b32580dSOr Gerlitz 		return ERR_PTR(err);
141adb4c123SOr Gerlitz 
142776b12b6SOr Gerlitz 	return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
143adb4c123SOr Gerlitz }
144adb4c123SOr Gerlitz 
145e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
14674491de9SMark Bloch 			      struct mlx5_flow_handle *rule,
1478b32580dSOr Gerlitz 			      struct mlx5_esw_flow_attr *attr)
148e8f887acSAmir Vadai {
1498b32580dSOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
150aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
151aad7e08dSAmir Vadai 
152aad7e08dSAmir Vadai 	counter = mlx5_flow_rule_counter(rule);
153aad7e08dSAmir Vadai 
15486a33ae1SRoi Dayan 	mlx5_del_flow_rules(rule);
15586a33ae1SRoi Dayan 
1568b32580dSOr Gerlitz 	if (esw && esw->mode == SRIOV_OFFLOADS)
1578b32580dSOr Gerlitz 		mlx5_eswitch_del_vlan_action(esw, attr);
1588b32580dSOr Gerlitz 
159aad7e08dSAmir Vadai 	mlx5_fc_destroy(priv->mdev, counter);
160aad7e08dSAmir Vadai 
1615c40348cSOr Gerlitz 	if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
162acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(priv->fs.tc.t);
163acff797cSMaor Gottlieb 		priv->fs.tc.t = NULL;
164e8f887acSAmir Vadai 	}
165e8f887acSAmir Vadai }
166e8f887acSAmir Vadai 
167bbd00f7eSHadar Hen Zion static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
168bbd00f7eSHadar Hen Zion 			     struct tc_cls_flower_offload *f)
169bbd00f7eSHadar Hen Zion {
170bbd00f7eSHadar Hen Zion 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
171bbd00f7eSHadar Hen Zion 				       outer_headers);
172bbd00f7eSHadar Hen Zion 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
173bbd00f7eSHadar Hen Zion 				       outer_headers);
174bbd00f7eSHadar Hen Zion 	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
175bbd00f7eSHadar Hen Zion 				    misc_parameters);
176bbd00f7eSHadar Hen Zion 	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
177bbd00f7eSHadar Hen Zion 				    misc_parameters);
178bbd00f7eSHadar Hen Zion 
179bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
180bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
181bbd00f7eSHadar Hen Zion 
182bbd00f7eSHadar Hen Zion 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
183bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_keyid *key =
184bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
185bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_KEYID,
186bbd00f7eSHadar Hen Zion 						  f->key);
187bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_keyid *mask =
188bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
189bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_KEYID,
190bbd00f7eSHadar Hen Zion 						  f->mask);
191bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
192bbd00f7eSHadar Hen Zion 			 be32_to_cpu(mask->keyid));
193bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
194bbd00f7eSHadar Hen Zion 			 be32_to_cpu(key->keyid));
195bbd00f7eSHadar Hen Zion 	}
196bbd00f7eSHadar Hen Zion }
197bbd00f7eSHadar Hen Zion 
198bbd00f7eSHadar Hen Zion static int parse_tunnel_attr(struct mlx5e_priv *priv,
199bbd00f7eSHadar Hen Zion 			     struct mlx5_flow_spec *spec,
200bbd00f7eSHadar Hen Zion 			     struct tc_cls_flower_offload *f)
201bbd00f7eSHadar Hen Zion {
202bbd00f7eSHadar Hen Zion 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
203bbd00f7eSHadar Hen Zion 				       outer_headers);
204bbd00f7eSHadar Hen Zion 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
205bbd00f7eSHadar Hen Zion 				       outer_headers);
206bbd00f7eSHadar Hen Zion 
207bbd00f7eSHadar Hen Zion 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
208bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ports *key =
209bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
210bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
211bbd00f7eSHadar Hen Zion 						  f->key);
212bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ports *mask =
213bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
214bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
215bbd00f7eSHadar Hen Zion 						  f->mask);
216bbd00f7eSHadar Hen Zion 
217bbd00f7eSHadar Hen Zion 		/* Full udp dst port must be given */
218bbd00f7eSHadar Hen Zion 		if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
219bbd00f7eSHadar Hen Zion 			return -EOPNOTSUPP;
220bbd00f7eSHadar Hen Zion 
221bbd00f7eSHadar Hen Zion 		/* udp src port isn't supported */
222bbd00f7eSHadar Hen Zion 		if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
223bbd00f7eSHadar Hen Zion 			return -EOPNOTSUPP;
224bbd00f7eSHadar Hen Zion 
225bbd00f7eSHadar Hen Zion 		if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
226bbd00f7eSHadar Hen Zion 		    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
227bbd00f7eSHadar Hen Zion 			parse_vxlan_attr(spec, f);
228bbd00f7eSHadar Hen Zion 		else
229bbd00f7eSHadar Hen Zion 			return -EOPNOTSUPP;
230bbd00f7eSHadar Hen Zion 
231bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
232bbd00f7eSHadar Hen Zion 			 udp_dport, ntohs(mask->dst));
233bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
234bbd00f7eSHadar Hen Zion 			 udp_dport, ntohs(key->dst));
235bbd00f7eSHadar Hen Zion 
236bbd00f7eSHadar Hen Zion 	} else { /* udp dst port must be given */
237bbd00f7eSHadar Hen Zion 			return -EOPNOTSUPP;
238bbd00f7eSHadar Hen Zion 	}
239bbd00f7eSHadar Hen Zion 
240bbd00f7eSHadar Hen Zion 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
241bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ipv4_addrs *key =
242bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
243bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
244bbd00f7eSHadar Hen Zion 						  f->key);
245bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_ipv4_addrs *mask =
246bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
247bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
248bbd00f7eSHadar Hen Zion 						  f->mask);
249bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
250bbd00f7eSHadar Hen Zion 			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
251bbd00f7eSHadar Hen Zion 			 ntohl(mask->src));
252bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
253bbd00f7eSHadar Hen Zion 			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
254bbd00f7eSHadar Hen Zion 			 ntohl(key->src));
255bbd00f7eSHadar Hen Zion 
256bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
257bbd00f7eSHadar Hen Zion 			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
258bbd00f7eSHadar Hen Zion 			 ntohl(mask->dst));
259bbd00f7eSHadar Hen Zion 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
260bbd00f7eSHadar Hen Zion 			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
261bbd00f7eSHadar Hen Zion 			 ntohl(key->dst));
262bbd00f7eSHadar Hen Zion 	}
263bbd00f7eSHadar Hen Zion 
264bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
265bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
266bbd00f7eSHadar Hen Zion 
267bbd00f7eSHadar Hen Zion 	/* Enforce DMAC when offloading incoming tunneled flows.
268bbd00f7eSHadar Hen Zion 	 * Flow counters require a match on the DMAC.
269bbd00f7eSHadar Hen Zion 	 */
270bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
271bbd00f7eSHadar Hen Zion 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
272bbd00f7eSHadar Hen Zion 	ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
273bbd00f7eSHadar Hen Zion 				     dmac_47_16), priv->netdev->dev_addr);
274bbd00f7eSHadar Hen Zion 
275bbd00f7eSHadar Hen Zion 	/* let software handle IP fragments */
276bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
277bbd00f7eSHadar Hen Zion 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
278bbd00f7eSHadar Hen Zion 
279bbd00f7eSHadar Hen Zion 	return 0;
280bbd00f7eSHadar Hen Zion }
281bbd00f7eSHadar Hen Zion 
282de0af0bfSRoi Dayan static int __parse_cls_flower(struct mlx5e_priv *priv,
283de0af0bfSRoi Dayan 			      struct mlx5_flow_spec *spec,
284de0af0bfSRoi Dayan 			      struct tc_cls_flower_offload *f,
285de0af0bfSRoi Dayan 			      u8 *min_inline)
286e3a2b7edSAmir Vadai {
287c5bb1730SMaor Gottlieb 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
288c5bb1730SMaor Gottlieb 				       outer_headers);
289c5bb1730SMaor Gottlieb 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
290c5bb1730SMaor Gottlieb 				       outer_headers);
291e3a2b7edSAmir Vadai 	u16 addr_type = 0;
292e3a2b7edSAmir Vadai 	u8 ip_proto = 0;
293e3a2b7edSAmir Vadai 
294de0af0bfSRoi Dayan 	*min_inline = MLX5_INLINE_MODE_L2;
295de0af0bfSRoi Dayan 
296e3a2b7edSAmir Vadai 	if (f->dissector->used_keys &
297e3a2b7edSAmir Vadai 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
298e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
299e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
300095b6cfdSOr Gerlitz 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
301e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
302e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
303bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
304bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
305bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
306bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
307bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
308bbd00f7eSHadar Hen Zion 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
309e3a2b7edSAmir Vadai 		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
310e3a2b7edSAmir Vadai 			    f->dissector->used_keys);
311e3a2b7edSAmir Vadai 		return -EOPNOTSUPP;
312e3a2b7edSAmir Vadai 	}
313e3a2b7edSAmir Vadai 
314bbd00f7eSHadar Hen Zion 	if ((dissector_uses_key(f->dissector,
315bbd00f7eSHadar Hen Zion 				FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
316bbd00f7eSHadar Hen Zion 	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
317bbd00f7eSHadar Hen Zion 	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
318bbd00f7eSHadar Hen Zion 	    dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
319bbd00f7eSHadar Hen Zion 		struct flow_dissector_key_control *key =
320bbd00f7eSHadar Hen Zion 			skb_flow_dissector_target(f->dissector,
321bbd00f7eSHadar Hen Zion 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
322bbd00f7eSHadar Hen Zion 						  f->key);
323bbd00f7eSHadar Hen Zion 		switch (key->addr_type) {
324bbd00f7eSHadar Hen Zion 		case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
325bbd00f7eSHadar Hen Zion 			if (parse_tunnel_attr(priv, spec, f))
326bbd00f7eSHadar Hen Zion 				return -EOPNOTSUPP;
327bbd00f7eSHadar Hen Zion 			break;
328bbd00f7eSHadar Hen Zion 		default:
329bbd00f7eSHadar Hen Zion 			return -EOPNOTSUPP;
330bbd00f7eSHadar Hen Zion 		}
331bbd00f7eSHadar Hen Zion 
332bbd00f7eSHadar Hen Zion 		/* In decap flow, header pointers should point to the inner
333bbd00f7eSHadar Hen Zion 		 * headers, outer header were already set by parse_tunnel_attr
334bbd00f7eSHadar Hen Zion 		 */
335bbd00f7eSHadar Hen Zion 		headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
336bbd00f7eSHadar Hen Zion 					 inner_headers);
337bbd00f7eSHadar Hen Zion 		headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
338bbd00f7eSHadar Hen Zion 					 inner_headers);
339bbd00f7eSHadar Hen Zion 	}
340bbd00f7eSHadar Hen Zion 
341e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
342e3a2b7edSAmir Vadai 		struct flow_dissector_key_control *key =
343e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
3441dbd0d37SHadar Hen Zion 						  FLOW_DISSECTOR_KEY_CONTROL,
345e3a2b7edSAmir Vadai 						  f->key);
346e3a2b7edSAmir Vadai 		addr_type = key->addr_type;
347e3a2b7edSAmir Vadai 	}
348e3a2b7edSAmir Vadai 
349e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
350e3a2b7edSAmir Vadai 		struct flow_dissector_key_basic *key =
351e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
352e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_BASIC,
353e3a2b7edSAmir Vadai 						  f->key);
354e3a2b7edSAmir Vadai 		struct flow_dissector_key_basic *mask =
355e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
356e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_BASIC,
357e3a2b7edSAmir Vadai 						  f->mask);
358e3a2b7edSAmir Vadai 		ip_proto = key->ip_proto;
359e3a2b7edSAmir Vadai 
360e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
361e3a2b7edSAmir Vadai 			 ntohs(mask->n_proto));
362e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
363e3a2b7edSAmir Vadai 			 ntohs(key->n_proto));
364e3a2b7edSAmir Vadai 
365e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
366e3a2b7edSAmir Vadai 			 mask->ip_proto);
367e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
368e3a2b7edSAmir Vadai 			 key->ip_proto);
369de0af0bfSRoi Dayan 
370de0af0bfSRoi Dayan 		if (mask->ip_proto)
371de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_IP;
372e3a2b7edSAmir Vadai 	}
373e3a2b7edSAmir Vadai 
374e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
375e3a2b7edSAmir Vadai 		struct flow_dissector_key_eth_addrs *key =
376e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
377e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
378e3a2b7edSAmir Vadai 						  f->key);
379e3a2b7edSAmir Vadai 		struct flow_dissector_key_eth_addrs *mask =
380e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
381e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
382e3a2b7edSAmir Vadai 						  f->mask);
383e3a2b7edSAmir Vadai 
384e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
385e3a2b7edSAmir Vadai 					     dmac_47_16),
386e3a2b7edSAmir Vadai 				mask->dst);
387e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
388e3a2b7edSAmir Vadai 					     dmac_47_16),
389e3a2b7edSAmir Vadai 				key->dst);
390e3a2b7edSAmir Vadai 
391e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
392e3a2b7edSAmir Vadai 					     smac_47_16),
393e3a2b7edSAmir Vadai 				mask->src);
394e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
395e3a2b7edSAmir Vadai 					     smac_47_16),
396e3a2b7edSAmir Vadai 				key->src);
397e3a2b7edSAmir Vadai 	}
398e3a2b7edSAmir Vadai 
399095b6cfdSOr Gerlitz 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
400095b6cfdSOr Gerlitz 		struct flow_dissector_key_vlan *key =
401095b6cfdSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
402095b6cfdSOr Gerlitz 						  FLOW_DISSECTOR_KEY_VLAN,
403095b6cfdSOr Gerlitz 						  f->key);
404095b6cfdSOr Gerlitz 		struct flow_dissector_key_vlan *mask =
405095b6cfdSOr Gerlitz 			skb_flow_dissector_target(f->dissector,
406095b6cfdSOr Gerlitz 						  FLOW_DISSECTOR_KEY_VLAN,
407095b6cfdSOr Gerlitz 						  f->mask);
408358d79a4SOr Gerlitz 		if (mask->vlan_id || mask->vlan_priority) {
409095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
410095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
411095b6cfdSOr Gerlitz 
412095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
413095b6cfdSOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
414358d79a4SOr Gerlitz 
415358d79a4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
416358d79a4SOr Gerlitz 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
417095b6cfdSOr Gerlitz 		}
418095b6cfdSOr Gerlitz 	}
419095b6cfdSOr Gerlitz 
420e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
421e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv4_addrs *key =
422e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
423e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
424e3a2b7edSAmir Vadai 						  f->key);
425e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv4_addrs *mask =
426e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
427e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
428e3a2b7edSAmir Vadai 						  f->mask);
429e3a2b7edSAmir Vadai 
430e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
431e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
432e3a2b7edSAmir Vadai 		       &mask->src, sizeof(mask->src));
433e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
434e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
435e3a2b7edSAmir Vadai 		       &key->src, sizeof(key->src));
436e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
437e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
438e3a2b7edSAmir Vadai 		       &mask->dst, sizeof(mask->dst));
439e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
440e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
441e3a2b7edSAmir Vadai 		       &key->dst, sizeof(key->dst));
442de0af0bfSRoi Dayan 
443de0af0bfSRoi Dayan 		if (mask->src || mask->dst)
444de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_IP;
445e3a2b7edSAmir Vadai 	}
446e3a2b7edSAmir Vadai 
447e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
448e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv6_addrs *key =
449e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
450e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
451e3a2b7edSAmir Vadai 						  f->key);
452e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv6_addrs *mask =
453e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
454e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
455e3a2b7edSAmir Vadai 						  f->mask);
456e3a2b7edSAmir Vadai 
457e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
458e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
459e3a2b7edSAmir Vadai 		       &mask->src, sizeof(mask->src));
460e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
461e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
462e3a2b7edSAmir Vadai 		       &key->src, sizeof(key->src));
463e3a2b7edSAmir Vadai 
464e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
465e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
466e3a2b7edSAmir Vadai 		       &mask->dst, sizeof(mask->dst));
467e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
468e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
469e3a2b7edSAmir Vadai 		       &key->dst, sizeof(key->dst));
470de0af0bfSRoi Dayan 
471de0af0bfSRoi Dayan 		if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
472de0af0bfSRoi Dayan 		    ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
473de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_IP;
474e3a2b7edSAmir Vadai 	}
475e3a2b7edSAmir Vadai 
476e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
477e3a2b7edSAmir Vadai 		struct flow_dissector_key_ports *key =
478e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
479e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_PORTS,
480e3a2b7edSAmir Vadai 						  f->key);
481e3a2b7edSAmir Vadai 		struct flow_dissector_key_ports *mask =
482e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
483e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_PORTS,
484e3a2b7edSAmir Vadai 						  f->mask);
485e3a2b7edSAmir Vadai 		switch (ip_proto) {
486e3a2b7edSAmir Vadai 		case IPPROTO_TCP:
487e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
488e3a2b7edSAmir Vadai 				 tcp_sport, ntohs(mask->src));
489e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
490e3a2b7edSAmir Vadai 				 tcp_sport, ntohs(key->src));
491e3a2b7edSAmir Vadai 
492e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
493e3a2b7edSAmir Vadai 				 tcp_dport, ntohs(mask->dst));
494e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
495e3a2b7edSAmir Vadai 				 tcp_dport, ntohs(key->dst));
496e3a2b7edSAmir Vadai 			break;
497e3a2b7edSAmir Vadai 
498e3a2b7edSAmir Vadai 		case IPPROTO_UDP:
499e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
500e3a2b7edSAmir Vadai 				 udp_sport, ntohs(mask->src));
501e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
502e3a2b7edSAmir Vadai 				 udp_sport, ntohs(key->src));
503e3a2b7edSAmir Vadai 
504e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
505e3a2b7edSAmir Vadai 				 udp_dport, ntohs(mask->dst));
506e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
507e3a2b7edSAmir Vadai 				 udp_dport, ntohs(key->dst));
508e3a2b7edSAmir Vadai 			break;
509e3a2b7edSAmir Vadai 		default:
510e3a2b7edSAmir Vadai 			netdev_err(priv->netdev,
511e3a2b7edSAmir Vadai 				   "Only UDP and TCP transport are supported\n");
512e3a2b7edSAmir Vadai 			return -EINVAL;
513e3a2b7edSAmir Vadai 		}
514de0af0bfSRoi Dayan 
515de0af0bfSRoi Dayan 		if (mask->src || mask->dst)
516de0af0bfSRoi Dayan 			*min_inline = MLX5_INLINE_MODE_TCP_UDP;
517e3a2b7edSAmir Vadai 	}
518e3a2b7edSAmir Vadai 
519e3a2b7edSAmir Vadai 	return 0;
520e3a2b7edSAmir Vadai }
521e3a2b7edSAmir Vadai 
522de0af0bfSRoi Dayan static int parse_cls_flower(struct mlx5e_priv *priv,
523de0af0bfSRoi Dayan 			    struct mlx5_flow_spec *spec,
524de0af0bfSRoi Dayan 			    struct tc_cls_flower_offload *f)
525de0af0bfSRoi Dayan {
526de0af0bfSRoi Dayan 	struct mlx5_core_dev *dev = priv->mdev;
527de0af0bfSRoi Dayan 	struct mlx5_eswitch *esw = dev->priv.eswitch;
528de0af0bfSRoi Dayan 	struct mlx5_eswitch_rep *rep = priv->ppriv;
529de0af0bfSRoi Dayan 	u8 min_inline;
530de0af0bfSRoi Dayan 	int err;
531de0af0bfSRoi Dayan 
532de0af0bfSRoi Dayan 	err = __parse_cls_flower(priv, spec, f, &min_inline);
533de0af0bfSRoi Dayan 
534de0af0bfSRoi Dayan 	if (!err && esw->mode == SRIOV_OFFLOADS &&
535de0af0bfSRoi Dayan 	    rep->vport != FDB_UPLINK_VPORT) {
536de0af0bfSRoi Dayan 		if (min_inline > esw->offloads.inline_mode) {
537de0af0bfSRoi Dayan 			netdev_warn(priv->netdev,
538de0af0bfSRoi Dayan 				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
539de0af0bfSRoi Dayan 				    min_inline, esw->offloads.inline_mode);
540de0af0bfSRoi Dayan 			return -EOPNOTSUPP;
541de0af0bfSRoi Dayan 		}
542de0af0bfSRoi Dayan 	}
543de0af0bfSRoi Dayan 
544de0af0bfSRoi Dayan 	return err;
545de0af0bfSRoi Dayan }
546de0af0bfSRoi Dayan 
5475c40348cSOr Gerlitz static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
548e3a2b7edSAmir Vadai 				u32 *action, u32 *flow_tag)
549e3a2b7edSAmir Vadai {
550e3a2b7edSAmir Vadai 	const struct tc_action *a;
55122dc13c8SWANG Cong 	LIST_HEAD(actions);
552e3a2b7edSAmir Vadai 
553e3a2b7edSAmir Vadai 	if (tc_no_actions(exts))
554e3a2b7edSAmir Vadai 		return -EINVAL;
555e3a2b7edSAmir Vadai 
556e3a2b7edSAmir Vadai 	*flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
557e3a2b7edSAmir Vadai 	*action = 0;
558e3a2b7edSAmir Vadai 
55922dc13c8SWANG Cong 	tcf_exts_to_list(exts, &actions);
56022dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list) {
561e3a2b7edSAmir Vadai 		/* Only support a single action per rule */
562e3a2b7edSAmir Vadai 		if (*action)
563e3a2b7edSAmir Vadai 			return -EINVAL;
564e3a2b7edSAmir Vadai 
565e3a2b7edSAmir Vadai 		if (is_tcf_gact_shot(a)) {
566e3a2b7edSAmir Vadai 			*action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
567aad7e08dSAmir Vadai 			if (MLX5_CAP_FLOWTABLE(priv->mdev,
568aad7e08dSAmir Vadai 					       flow_table_properties_nic_receive.flow_counter))
569aad7e08dSAmir Vadai 				*action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
570e3a2b7edSAmir Vadai 			continue;
571e3a2b7edSAmir Vadai 		}
572e3a2b7edSAmir Vadai 
573e3a2b7edSAmir Vadai 		if (is_tcf_skbedit_mark(a)) {
574e3a2b7edSAmir Vadai 			u32 mark = tcf_skbedit_mark(a);
575e3a2b7edSAmir Vadai 
576e3a2b7edSAmir Vadai 			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
577e3a2b7edSAmir Vadai 				netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
578e3a2b7edSAmir Vadai 					    mark);
579e3a2b7edSAmir Vadai 				return -EINVAL;
580e3a2b7edSAmir Vadai 			}
581e3a2b7edSAmir Vadai 
582e3a2b7edSAmir Vadai 			*flow_tag = mark;
583e3a2b7edSAmir Vadai 			*action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
584e3a2b7edSAmir Vadai 			continue;
585e3a2b7edSAmir Vadai 		}
586e3a2b7edSAmir Vadai 
587e3a2b7edSAmir Vadai 		return -EINVAL;
588e3a2b7edSAmir Vadai 	}
589e3a2b7edSAmir Vadai 
590e3a2b7edSAmir Vadai 	return 0;
591e3a2b7edSAmir Vadai }
592e3a2b7edSAmir Vadai 
593a54e20b4SHadar Hen Zion static inline int cmp_encap_info(struct mlx5_encap_info *a,
594a54e20b4SHadar Hen Zion 				 struct mlx5_encap_info *b)
595a54e20b4SHadar Hen Zion {
596a54e20b4SHadar Hen Zion 	return memcmp(a, b, sizeof(*a));
597a54e20b4SHadar Hen Zion }
598a54e20b4SHadar Hen Zion 
599a54e20b4SHadar Hen Zion static inline int hash_encap_info(struct mlx5_encap_info *info)
600a54e20b4SHadar Hen Zion {
601a54e20b4SHadar Hen Zion 	return jhash(info, sizeof(*info), 0);
602a54e20b4SHadar Hen Zion }
603a54e20b4SHadar Hen Zion 
604a54e20b4SHadar Hen Zion static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
605a54e20b4SHadar Hen Zion 				   struct net_device *mirred_dev,
606a54e20b4SHadar Hen Zion 				   struct net_device **out_dev,
607a54e20b4SHadar Hen Zion 				   struct flowi4 *fl4,
608a54e20b4SHadar Hen Zion 				   struct neighbour **out_n,
609a54e20b4SHadar Hen Zion 				   __be32 *saddr,
610a54e20b4SHadar Hen Zion 				   int *out_ttl)
611a54e20b4SHadar Hen Zion {
612a54e20b4SHadar Hen Zion 	struct rtable *rt;
613a54e20b4SHadar Hen Zion 	struct neighbour *n = NULL;
614a54e20b4SHadar Hen Zion 	int ttl;
615a54e20b4SHadar Hen Zion 
616a54e20b4SHadar Hen Zion #if IS_ENABLED(CONFIG_INET)
617a54e20b4SHadar Hen Zion 	rt = ip_route_output_key(dev_net(mirred_dev), fl4);
618a54e20b4SHadar Hen Zion 	if (IS_ERR(rt)) {
619a54e20b4SHadar Hen Zion 		pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
620a54e20b4SHadar Hen Zion 		return -EOPNOTSUPP;
621a54e20b4SHadar Hen Zion 	}
622a54e20b4SHadar Hen Zion #else
623a54e20b4SHadar Hen Zion 	return -EOPNOTSUPP;
624a54e20b4SHadar Hen Zion #endif
625a54e20b4SHadar Hen Zion 
626a54e20b4SHadar Hen Zion 	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
627a54e20b4SHadar Hen Zion 		pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
628a54e20b4SHadar Hen Zion 			__func__);
629a54e20b4SHadar Hen Zion 		ip_rt_put(rt);
630a54e20b4SHadar Hen Zion 		return -EOPNOTSUPP;
631a54e20b4SHadar Hen Zion 	}
632a54e20b4SHadar Hen Zion 
633a54e20b4SHadar Hen Zion 	ttl = ip4_dst_hoplimit(&rt->dst);
634a54e20b4SHadar Hen Zion 	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
635a54e20b4SHadar Hen Zion 	ip_rt_put(rt);
636a54e20b4SHadar Hen Zion 	if (!n)
637a54e20b4SHadar Hen Zion 		return -ENOMEM;
638a54e20b4SHadar Hen Zion 
639a54e20b4SHadar Hen Zion 	*out_n = n;
640a54e20b4SHadar Hen Zion 	*saddr = fl4->saddr;
641a54e20b4SHadar Hen Zion 	*out_ttl = ttl;
642a54e20b4SHadar Hen Zion 	*out_dev = rt->dst.dev;
643a54e20b4SHadar Hen Zion 
644a54e20b4SHadar Hen Zion 	return 0;
645a54e20b4SHadar Hen Zion }
646a54e20b4SHadar Hen Zion 
647a54e20b4SHadar Hen Zion static int gen_vxlan_header_ipv4(struct net_device *out_dev,
648a54e20b4SHadar Hen Zion 				 char buf[],
649a54e20b4SHadar Hen Zion 				 unsigned char h_dest[ETH_ALEN],
650a54e20b4SHadar Hen Zion 				 int ttl,
651a54e20b4SHadar Hen Zion 				 __be32 daddr,
652a54e20b4SHadar Hen Zion 				 __be32 saddr,
653a54e20b4SHadar Hen Zion 				 __be16 udp_dst_port,
654a54e20b4SHadar Hen Zion 				 __be32 vx_vni)
655a54e20b4SHadar Hen Zion {
656a54e20b4SHadar Hen Zion 	int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
657a54e20b4SHadar Hen Zion 	struct ethhdr *eth = (struct ethhdr *)buf;
658a54e20b4SHadar Hen Zion 	struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
659a54e20b4SHadar Hen Zion 	struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
660a54e20b4SHadar Hen Zion 	struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
661a54e20b4SHadar Hen Zion 
662a54e20b4SHadar Hen Zion 	memset(buf, 0, encap_size);
663a54e20b4SHadar Hen Zion 
664a54e20b4SHadar Hen Zion 	ether_addr_copy(eth->h_dest, h_dest);
665a54e20b4SHadar Hen Zion 	ether_addr_copy(eth->h_source, out_dev->dev_addr);
666a54e20b4SHadar Hen Zion 	eth->h_proto = htons(ETH_P_IP);
667a54e20b4SHadar Hen Zion 
668a54e20b4SHadar Hen Zion 	ip->daddr = daddr;
669a54e20b4SHadar Hen Zion 	ip->saddr = saddr;
670a54e20b4SHadar Hen Zion 
671a54e20b4SHadar Hen Zion 	ip->ttl = ttl;
672a54e20b4SHadar Hen Zion 	ip->protocol = IPPROTO_UDP;
673a54e20b4SHadar Hen Zion 	ip->version = 0x4;
674a54e20b4SHadar Hen Zion 	ip->ihl = 0x5;
675a54e20b4SHadar Hen Zion 
676a54e20b4SHadar Hen Zion 	udp->dest = udp_dst_port;
677a54e20b4SHadar Hen Zion 	vxh->vx_flags = VXLAN_HF_VNI;
678a54e20b4SHadar Hen Zion 	vxh->vx_vni = vxlan_vni_field(vx_vni);
679a54e20b4SHadar Hen Zion 
680a54e20b4SHadar Hen Zion 	return encap_size;
681a54e20b4SHadar Hen Zion }
682a54e20b4SHadar Hen Zion 
683a54e20b4SHadar Hen Zion static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
684a54e20b4SHadar Hen Zion 					  struct net_device *mirred_dev,
685a54e20b4SHadar Hen Zion 					  struct mlx5_encap_entry *e,
686a54e20b4SHadar Hen Zion 					  struct net_device **out_dev)
687a54e20b4SHadar Hen Zion {
688a54e20b4SHadar Hen Zion 	int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
689a54e20b4SHadar Hen Zion 	struct flowi4 fl4 = {};
690a54e20b4SHadar Hen Zion 	struct neighbour *n;
691a54e20b4SHadar Hen Zion 	char *encap_header;
692a54e20b4SHadar Hen Zion 	int encap_size;
693a54e20b4SHadar Hen Zion 	__be32 saddr;
694a54e20b4SHadar Hen Zion 	int ttl;
695a54e20b4SHadar Hen Zion 	int err;
696a54e20b4SHadar Hen Zion 
697a54e20b4SHadar Hen Zion 	encap_header = kzalloc(max_encap_size, GFP_KERNEL);
698a54e20b4SHadar Hen Zion 	if (!encap_header)
699a54e20b4SHadar Hen Zion 		return -ENOMEM;
700a54e20b4SHadar Hen Zion 
701a54e20b4SHadar Hen Zion 	switch (e->tunnel_type) {
702a54e20b4SHadar Hen Zion 	case MLX5_HEADER_TYPE_VXLAN:
703a54e20b4SHadar Hen Zion 		fl4.flowi4_proto = IPPROTO_UDP;
704a54e20b4SHadar Hen Zion 		fl4.fl4_dport = e->tun_info.tp_dst;
705a54e20b4SHadar Hen Zion 		break;
706a54e20b4SHadar Hen Zion 	default:
707a54e20b4SHadar Hen Zion 		err = -EOPNOTSUPP;
708a54e20b4SHadar Hen Zion 		goto out;
709a54e20b4SHadar Hen Zion 	}
710a54e20b4SHadar Hen Zion 	fl4.daddr = e->tun_info.daddr;
711a54e20b4SHadar Hen Zion 
712a54e20b4SHadar Hen Zion 	err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
713a54e20b4SHadar Hen Zion 				      &fl4, &n, &saddr, &ttl);
714a54e20b4SHadar Hen Zion 	if (err)
715a54e20b4SHadar Hen Zion 		goto out;
716a54e20b4SHadar Hen Zion 
717a54e20b4SHadar Hen Zion 	e->n = n;
718a54e20b4SHadar Hen Zion 	e->out_dev = *out_dev;
719a54e20b4SHadar Hen Zion 
720a54e20b4SHadar Hen Zion 	if (!(n->nud_state & NUD_VALID)) {
721a54e20b4SHadar Hen Zion 		err = -ENOTSUPP;
722a54e20b4SHadar Hen Zion 		goto out;
723a54e20b4SHadar Hen Zion 	}
724a54e20b4SHadar Hen Zion 
725a54e20b4SHadar Hen Zion 	neigh_ha_snapshot(e->h_dest, n, *out_dev);
726a54e20b4SHadar Hen Zion 
727a54e20b4SHadar Hen Zion 	switch (e->tunnel_type) {
728a54e20b4SHadar Hen Zion 	case MLX5_HEADER_TYPE_VXLAN:
729a54e20b4SHadar Hen Zion 		encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
730a54e20b4SHadar Hen Zion 						   e->h_dest, ttl,
731a54e20b4SHadar Hen Zion 						   e->tun_info.daddr,
732a54e20b4SHadar Hen Zion 						   saddr, e->tun_info.tp_dst,
733a54e20b4SHadar Hen Zion 						   e->tun_info.tun_id);
734a54e20b4SHadar Hen Zion 		break;
735a54e20b4SHadar Hen Zion 	default:
736a54e20b4SHadar Hen Zion 		err = -EOPNOTSUPP;
737a54e20b4SHadar Hen Zion 		goto out;
738a54e20b4SHadar Hen Zion 	}
739a54e20b4SHadar Hen Zion 
740a54e20b4SHadar Hen Zion 	err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
741a54e20b4SHadar Hen Zion 			       encap_size, encap_header, &e->encap_id);
742a54e20b4SHadar Hen Zion out:
743a54e20b4SHadar Hen Zion 	kfree(encap_header);
744a54e20b4SHadar Hen Zion 	return err;
745a54e20b4SHadar Hen Zion }
746a54e20b4SHadar Hen Zion 
747a54e20b4SHadar Hen Zion static int mlx5e_attach_encap(struct mlx5e_priv *priv,
748a54e20b4SHadar Hen Zion 			      struct ip_tunnel_info *tun_info,
749a54e20b4SHadar Hen Zion 			      struct net_device *mirred_dev,
750776b12b6SOr Gerlitz 			      struct mlx5_esw_flow_attr *attr)
75103a9d11eSOr Gerlitz {
752a54e20b4SHadar Hen Zion 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
753a54e20b4SHadar Hen Zion 	unsigned short family = ip_tunnel_info_af(tun_info);
754a54e20b4SHadar Hen Zion 	struct ip_tunnel_key *key = &tun_info->key;
755a54e20b4SHadar Hen Zion 	struct mlx5_encap_info info;
756a54e20b4SHadar Hen Zion 	struct mlx5_encap_entry *e;
757a54e20b4SHadar Hen Zion 	struct net_device *out_dev;
758a54e20b4SHadar Hen Zion 	uintptr_t hash_key;
759a54e20b4SHadar Hen Zion 	bool found = false;
760a54e20b4SHadar Hen Zion 	int tunnel_type;
761a54e20b4SHadar Hen Zion 	int err;
762a54e20b4SHadar Hen Zion 
763a54e20b4SHadar Hen Zion 	/* udp dst port must be given */
764a54e20b4SHadar Hen Zion 	if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
765a54e20b4SHadar Hen Zion 		return -EOPNOTSUPP;
766a54e20b4SHadar Hen Zion 
767a54e20b4SHadar Hen Zion 	if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
768a54e20b4SHadar Hen Zion 	    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
769a54e20b4SHadar Hen Zion 		info.tp_dst = key->tp_dst;
770a54e20b4SHadar Hen Zion 		info.tun_id = tunnel_id_to_key32(key->tun_id);
771a54e20b4SHadar Hen Zion 		tunnel_type = MLX5_HEADER_TYPE_VXLAN;
772a54e20b4SHadar Hen Zion 	} else {
773a54e20b4SHadar Hen Zion 		return -EOPNOTSUPP;
774a54e20b4SHadar Hen Zion 	}
775a54e20b4SHadar Hen Zion 
776a54e20b4SHadar Hen Zion 	switch (family) {
777a54e20b4SHadar Hen Zion 	case AF_INET:
778a54e20b4SHadar Hen Zion 		info.daddr = key->u.ipv4.dst;
779a54e20b4SHadar Hen Zion 		break;
780a54e20b4SHadar Hen Zion 	default:
781a54e20b4SHadar Hen Zion 		return -EOPNOTSUPP;
782a54e20b4SHadar Hen Zion 	}
783a54e20b4SHadar Hen Zion 
784a54e20b4SHadar Hen Zion 	hash_key = hash_encap_info(&info);
785a54e20b4SHadar Hen Zion 
786a54e20b4SHadar Hen Zion 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
787a54e20b4SHadar Hen Zion 				   encap_hlist, hash_key) {
788a54e20b4SHadar Hen Zion 		if (!cmp_encap_info(&e->tun_info, &info)) {
789a54e20b4SHadar Hen Zion 			found = true;
790a54e20b4SHadar Hen Zion 			break;
791a54e20b4SHadar Hen Zion 		}
792a54e20b4SHadar Hen Zion 	}
793a54e20b4SHadar Hen Zion 
794a54e20b4SHadar Hen Zion 	if (found) {
795a54e20b4SHadar Hen Zion 		attr->encap = e;
796a54e20b4SHadar Hen Zion 		return 0;
797a54e20b4SHadar Hen Zion 	}
798a54e20b4SHadar Hen Zion 
799a54e20b4SHadar Hen Zion 	e = kzalloc(sizeof(*e), GFP_KERNEL);
800a54e20b4SHadar Hen Zion 	if (!e)
801a54e20b4SHadar Hen Zion 		return -ENOMEM;
802a54e20b4SHadar Hen Zion 
803a54e20b4SHadar Hen Zion 	e->tun_info = info;
804a54e20b4SHadar Hen Zion 	e->tunnel_type = tunnel_type;
805a54e20b4SHadar Hen Zion 	INIT_LIST_HEAD(&e->flows);
806a54e20b4SHadar Hen Zion 
807a54e20b4SHadar Hen Zion 	err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
808a54e20b4SHadar Hen Zion 	if (err)
809a54e20b4SHadar Hen Zion 		goto out_err;
810a54e20b4SHadar Hen Zion 
811a54e20b4SHadar Hen Zion 	attr->encap = e;
812a54e20b4SHadar Hen Zion 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
813a54e20b4SHadar Hen Zion 
814a54e20b4SHadar Hen Zion 	return err;
815a54e20b4SHadar Hen Zion 
816a54e20b4SHadar Hen Zion out_err:
817a54e20b4SHadar Hen Zion 	kfree(e);
818a54e20b4SHadar Hen Zion 	return err;
819a54e20b4SHadar Hen Zion }
820a54e20b4SHadar Hen Zion 
821a54e20b4SHadar Hen Zion static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
822a54e20b4SHadar Hen Zion 				struct mlx5e_tc_flow *flow)
823a54e20b4SHadar Hen Zion {
824a54e20b4SHadar Hen Zion 	struct mlx5_esw_flow_attr *attr = flow->attr;
825a54e20b4SHadar Hen Zion 	struct ip_tunnel_info *info = NULL;
82603a9d11eSOr Gerlitz 	const struct tc_action *a;
82722dc13c8SWANG Cong 	LIST_HEAD(actions);
828a54e20b4SHadar Hen Zion 	bool encap = false;
829a54e20b4SHadar Hen Zion 	int err;
83003a9d11eSOr Gerlitz 
83103a9d11eSOr Gerlitz 	if (tc_no_actions(exts))
83203a9d11eSOr Gerlitz 		return -EINVAL;
83303a9d11eSOr Gerlitz 
834776b12b6SOr Gerlitz 	memset(attr, 0, sizeof(*attr));
835776b12b6SOr Gerlitz 	attr->in_rep = priv->ppriv;
83603a9d11eSOr Gerlitz 
83722dc13c8SWANG Cong 	tcf_exts_to_list(exts, &actions);
83822dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list) {
83903a9d11eSOr Gerlitz 		if (is_tcf_gact_shot(a)) {
8408b32580dSOr Gerlitz 			attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
84103a9d11eSOr Gerlitz 					MLX5_FLOW_CONTEXT_ACTION_COUNT;
84203a9d11eSOr Gerlitz 			continue;
84303a9d11eSOr Gerlitz 		}
84403a9d11eSOr Gerlitz 
8455724b8b5SShmulik Ladkani 		if (is_tcf_mirred_egress_redirect(a)) {
84603a9d11eSOr Gerlitz 			int ifindex = tcf_mirred_ifindex(a);
84703a9d11eSOr Gerlitz 			struct net_device *out_dev;
84803a9d11eSOr Gerlitz 			struct mlx5e_priv *out_priv;
84903a9d11eSOr Gerlitz 
85003a9d11eSOr Gerlitz 			out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
85103a9d11eSOr Gerlitz 
852a54e20b4SHadar Hen Zion 			if (switchdev_port_same_parent_id(priv->netdev,
853a54e20b4SHadar Hen Zion 							  out_dev)) {
854e37a79e5SMark Bloch 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
855e37a79e5SMark Bloch 					MLX5_FLOW_CONTEXT_ACTION_COUNT;
85603a9d11eSOr Gerlitz 				out_priv = netdev_priv(out_dev);
857776b12b6SOr Gerlitz 				attr->out_rep = out_priv->ppriv;
858a54e20b4SHadar Hen Zion 			} else if (encap) {
859a54e20b4SHadar Hen Zion 				err = mlx5e_attach_encap(priv, info,
860a54e20b4SHadar Hen Zion 							 out_dev, attr);
861a54e20b4SHadar Hen Zion 				if (err)
862a54e20b4SHadar Hen Zion 					return err;
863a54e20b4SHadar Hen Zion 				list_add(&flow->encap, &attr->encap->flows);
864a54e20b4SHadar Hen Zion 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
865a54e20b4SHadar Hen Zion 					MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
866a54e20b4SHadar Hen Zion 					MLX5_FLOW_CONTEXT_ACTION_COUNT;
867a54e20b4SHadar Hen Zion 				out_priv = netdev_priv(attr->encap->out_dev);
868a54e20b4SHadar Hen Zion 				attr->out_rep = out_priv->ppriv;
869a54e20b4SHadar Hen Zion 			} else {
870a54e20b4SHadar Hen Zion 				pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
871a54e20b4SHadar Hen Zion 				       priv->netdev->name, out_dev->name);
872a54e20b4SHadar Hen Zion 				return -EINVAL;
873a54e20b4SHadar Hen Zion 			}
874a54e20b4SHadar Hen Zion 			continue;
875a54e20b4SHadar Hen Zion 		}
876a54e20b4SHadar Hen Zion 
877a54e20b4SHadar Hen Zion 		if (is_tcf_tunnel_set(a)) {
878a54e20b4SHadar Hen Zion 			info = tcf_tunnel_info(a);
879a54e20b4SHadar Hen Zion 			if (info)
880a54e20b4SHadar Hen Zion 				encap = true;
881a54e20b4SHadar Hen Zion 			else
882a54e20b4SHadar Hen Zion 				return -EOPNOTSUPP;
88303a9d11eSOr Gerlitz 			continue;
88403a9d11eSOr Gerlitz 		}
88503a9d11eSOr Gerlitz 
8868b32580dSOr Gerlitz 		if (is_tcf_vlan(a)) {
8878b32580dSOr Gerlitz 			if (tcf_vlan_action(a) == VLAN_F_POP) {
8888b32580dSOr Gerlitz 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
8898b32580dSOr Gerlitz 			} else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
8908b32580dSOr Gerlitz 				if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
8918b32580dSOr Gerlitz 					return -EOPNOTSUPP;
8928b32580dSOr Gerlitz 
8938b32580dSOr Gerlitz 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
8948b32580dSOr Gerlitz 				attr->vlan = tcf_vlan_push_vid(a);
8958b32580dSOr Gerlitz 			}
8968b32580dSOr Gerlitz 			continue;
8978b32580dSOr Gerlitz 		}
8988b32580dSOr Gerlitz 
899bbd00f7eSHadar Hen Zion 		if (is_tcf_tunnel_release(a)) {
900bbd00f7eSHadar Hen Zion 			attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
901bbd00f7eSHadar Hen Zion 			continue;
902bbd00f7eSHadar Hen Zion 		}
903bbd00f7eSHadar Hen Zion 
90403a9d11eSOr Gerlitz 		return -EINVAL;
90503a9d11eSOr Gerlitz 	}
90603a9d11eSOr Gerlitz 	return 0;
90703a9d11eSOr Gerlitz }
90803a9d11eSOr Gerlitz 
909e3a2b7edSAmir Vadai int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
910e3a2b7edSAmir Vadai 			   struct tc_cls_flower_offload *f)
911e3a2b7edSAmir Vadai {
912acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
913e3a2b7edSAmir Vadai 	int err = 0;
914776b12b6SOr Gerlitz 	bool fdb_flow = false;
915776b12b6SOr Gerlitz 	u32 flow_tag, action;
916e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
917c5bb1730SMaor Gottlieb 	struct mlx5_flow_spec *spec;
918adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
919e3a2b7edSAmir Vadai 
920776b12b6SOr Gerlitz 	if (esw && esw->mode == SRIOV_OFFLOADS)
921776b12b6SOr Gerlitz 		fdb_flow = true;
922776b12b6SOr Gerlitz 
923776b12b6SOr Gerlitz 	if (fdb_flow)
92453636068SRoi Dayan 		flow = kzalloc(sizeof(*flow) +
92553636068SRoi Dayan 			       sizeof(struct mlx5_esw_flow_attr),
926776b12b6SOr Gerlitz 			       GFP_KERNEL);
927e3a2b7edSAmir Vadai 	else
928e3a2b7edSAmir Vadai 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
929e3a2b7edSAmir Vadai 
930c5bb1730SMaor Gottlieb 	spec = mlx5_vzalloc(sizeof(*spec));
931c5bb1730SMaor Gottlieb 	if (!spec || !flow) {
932e3a2b7edSAmir Vadai 		err = -ENOMEM;
933e3a2b7edSAmir Vadai 		goto err_free;
934e3a2b7edSAmir Vadai 	}
935e3a2b7edSAmir Vadai 
936e3a2b7edSAmir Vadai 	flow->cookie = f->cookie;
937e3a2b7edSAmir Vadai 
938c5bb1730SMaor Gottlieb 	err = parse_cls_flower(priv, spec, f);
939e3a2b7edSAmir Vadai 	if (err < 0)
940e3a2b7edSAmir Vadai 		goto err_free;
941e3a2b7edSAmir Vadai 
942776b12b6SOr Gerlitz 	if (fdb_flow) {
943776b12b6SOr Gerlitz 		flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
944a54e20b4SHadar Hen Zion 		err = parse_tc_fdb_actions(priv, f->exts, flow);
945adb4c123SOr Gerlitz 		if (err < 0)
946adb4c123SOr Gerlitz 			goto err_free;
947776b12b6SOr Gerlitz 		flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
948adb4c123SOr Gerlitz 	} else {
9495c40348cSOr Gerlitz 		err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
950e3a2b7edSAmir Vadai 		if (err < 0)
951e3a2b7edSAmir Vadai 			goto err_free;
9525c40348cSOr Gerlitz 		flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
953adb4c123SOr Gerlitz 	}
954adb4c123SOr Gerlitz 
9555c40348cSOr Gerlitz 	if (IS_ERR(flow->rule)) {
9565c40348cSOr Gerlitz 		err = PTR_ERR(flow->rule);
9575c40348cSOr Gerlitz 		goto err_free;
9585c40348cSOr Gerlitz 	}
9595c40348cSOr Gerlitz 
960e3a2b7edSAmir Vadai 	err = rhashtable_insert_fast(&tc->ht, &flow->node,
961e3a2b7edSAmir Vadai 				     tc->ht_params);
962e3a2b7edSAmir Vadai 	if (err)
9635c40348cSOr Gerlitz 		goto err_del_rule;
964e3a2b7edSAmir Vadai 
965e3a2b7edSAmir Vadai 	goto out;
966e3a2b7edSAmir Vadai 
9675c40348cSOr Gerlitz err_del_rule:
96874491de9SMark Bloch 	mlx5_del_flow_rules(flow->rule);
969e3a2b7edSAmir Vadai 
970e3a2b7edSAmir Vadai err_free:
971e3a2b7edSAmir Vadai 	kfree(flow);
972e3a2b7edSAmir Vadai out:
973c5bb1730SMaor Gottlieb 	kvfree(spec);
974e3a2b7edSAmir Vadai 	return err;
975e3a2b7edSAmir Vadai }
976e3a2b7edSAmir Vadai 
977a54e20b4SHadar Hen Zion static void mlx5e_detach_encap(struct mlx5e_priv *priv,
978a54e20b4SHadar Hen Zion 			       struct mlx5e_tc_flow *flow) {
979a54e20b4SHadar Hen Zion 	struct list_head *next = flow->encap.next;
980a54e20b4SHadar Hen Zion 
981a54e20b4SHadar Hen Zion 	list_del(&flow->encap);
982a54e20b4SHadar Hen Zion 	if (list_empty(next)) {
983a54e20b4SHadar Hen Zion 		struct mlx5_encap_entry *e;
984a54e20b4SHadar Hen Zion 
985a54e20b4SHadar Hen Zion 		e = list_entry(next, struct mlx5_encap_entry, flows);
986a54e20b4SHadar Hen Zion 		if (e->n) {
987a54e20b4SHadar Hen Zion 			mlx5_encap_dealloc(priv->mdev, e->encap_id);
988a54e20b4SHadar Hen Zion 			neigh_release(e->n);
989a54e20b4SHadar Hen Zion 		}
990a54e20b4SHadar Hen Zion 		hlist_del_rcu(&e->encap_hlist);
991a54e20b4SHadar Hen Zion 		kfree(e);
992a54e20b4SHadar Hen Zion 	}
993a54e20b4SHadar Hen Zion }
994a54e20b4SHadar Hen Zion 
995e3a2b7edSAmir Vadai int mlx5e_delete_flower(struct mlx5e_priv *priv,
996e3a2b7edSAmir Vadai 			struct tc_cls_flower_offload *f)
997e3a2b7edSAmir Vadai {
998e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
999acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1000e3a2b7edSAmir Vadai 
1001e3a2b7edSAmir Vadai 	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1002e3a2b7edSAmir Vadai 				      tc->ht_params);
1003e3a2b7edSAmir Vadai 	if (!flow)
1004e3a2b7edSAmir Vadai 		return -EINVAL;
1005e3a2b7edSAmir Vadai 
1006e3a2b7edSAmir Vadai 	rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1007e3a2b7edSAmir Vadai 
10088b32580dSOr Gerlitz 	mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
1009e3a2b7edSAmir Vadai 
1010a54e20b4SHadar Hen Zion 	if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
1011a54e20b4SHadar Hen Zion 		mlx5e_detach_encap(priv, flow);
1012a54e20b4SHadar Hen Zion 
1013e3a2b7edSAmir Vadai 	kfree(flow);
1014e3a2b7edSAmir Vadai 
1015e3a2b7edSAmir Vadai 	return 0;
1016e3a2b7edSAmir Vadai }
1017e3a2b7edSAmir Vadai 
1018aad7e08dSAmir Vadai int mlx5e_stats_flower(struct mlx5e_priv *priv,
1019aad7e08dSAmir Vadai 		       struct tc_cls_flower_offload *f)
1020aad7e08dSAmir Vadai {
1021aad7e08dSAmir Vadai 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1022aad7e08dSAmir Vadai 	struct mlx5e_tc_flow *flow;
1023aad7e08dSAmir Vadai 	struct tc_action *a;
1024aad7e08dSAmir Vadai 	struct mlx5_fc *counter;
102522dc13c8SWANG Cong 	LIST_HEAD(actions);
1026aad7e08dSAmir Vadai 	u64 bytes;
1027aad7e08dSAmir Vadai 	u64 packets;
1028aad7e08dSAmir Vadai 	u64 lastuse;
1029aad7e08dSAmir Vadai 
1030aad7e08dSAmir Vadai 	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1031aad7e08dSAmir Vadai 				      tc->ht_params);
1032aad7e08dSAmir Vadai 	if (!flow)
1033aad7e08dSAmir Vadai 		return -EINVAL;
1034aad7e08dSAmir Vadai 
1035aad7e08dSAmir Vadai 	counter = mlx5_flow_rule_counter(flow->rule);
1036aad7e08dSAmir Vadai 	if (!counter)
1037aad7e08dSAmir Vadai 		return 0;
1038aad7e08dSAmir Vadai 
1039aad7e08dSAmir Vadai 	mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1040aad7e08dSAmir Vadai 
104122dc13c8SWANG Cong 	tcf_exts_to_list(f->exts, &actions);
104222dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list)
1043aad7e08dSAmir Vadai 		tcf_action_stats_update(a, bytes, packets, lastuse);
1044aad7e08dSAmir Vadai 
1045aad7e08dSAmir Vadai 	return 0;
1046aad7e08dSAmir Vadai }
1047aad7e08dSAmir Vadai 
1048e8f887acSAmir Vadai static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1049e8f887acSAmir Vadai 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
1050e8f887acSAmir Vadai 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1051e8f887acSAmir Vadai 	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1052e8f887acSAmir Vadai 	.automatic_shrinking = true,
1053e8f887acSAmir Vadai };
1054e8f887acSAmir Vadai 
1055e8f887acSAmir Vadai int mlx5e_tc_init(struct mlx5e_priv *priv)
1056e8f887acSAmir Vadai {
1057acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1058e8f887acSAmir Vadai 
1059e8f887acSAmir Vadai 	tc->ht_params = mlx5e_tc_flow_ht_params;
1060e8f887acSAmir Vadai 	return rhashtable_init(&tc->ht, &tc->ht_params);
1061e8f887acSAmir Vadai }
1062e8f887acSAmir Vadai 
1063e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1064e8f887acSAmir Vadai {
1065e8f887acSAmir Vadai 	struct mlx5e_tc_flow *flow = ptr;
1066e8f887acSAmir Vadai 	struct mlx5e_priv *priv = arg;
1067e8f887acSAmir Vadai 
10688b32580dSOr Gerlitz 	mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
1069e8f887acSAmir Vadai 	kfree(flow);
1070e8f887acSAmir Vadai }
1071e8f887acSAmir Vadai 
1072e8f887acSAmir Vadai void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1073e8f887acSAmir Vadai {
1074acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
1075e8f887acSAmir Vadai 
1076e8f887acSAmir Vadai 	rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1077e8f887acSAmir Vadai 
1078acff797cSMaor Gottlieb 	if (!IS_ERR_OR_NULL(tc->t)) {
1079acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(tc->t);
1080acff797cSMaor Gottlieb 		tc->t = NULL;
1081e8f887acSAmir Vadai 	}
1082e8f887acSAmir Vadai }
1083