1e8f887acSAmir Vadai /*
2e8f887acSAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3e8f887acSAmir Vadai  *
4e8f887acSAmir Vadai  * This software is available to you under a choice of one of two
5e8f887acSAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
6e8f887acSAmir Vadai  * General Public License (GPL) Version 2, available from the file
7e8f887acSAmir Vadai  * COPYING in the main directory of this source tree, or the
8e8f887acSAmir Vadai  * OpenIB.org BSD license below:
9e8f887acSAmir Vadai  *
10e8f887acSAmir Vadai  *     Redistribution and use in source and binary forms, with or
11e8f887acSAmir Vadai  *     without modification, are permitted provided that the following
12e8f887acSAmir Vadai  *     conditions are met:
13e8f887acSAmir Vadai  *
14e8f887acSAmir Vadai  *      - Redistributions of source code must retain the above
15e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
16e8f887acSAmir Vadai  *        disclaimer.
17e8f887acSAmir Vadai  *
18e8f887acSAmir Vadai  *      - Redistributions in binary form must reproduce the above
19e8f887acSAmir Vadai  *        copyright notice, this list of conditions and the following
20e8f887acSAmir Vadai  *        disclaimer in the documentation and/or other materials
21e8f887acSAmir Vadai  *        provided with the distribution.
22e8f887acSAmir Vadai  *
23e8f887acSAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e8f887acSAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e8f887acSAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e8f887acSAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e8f887acSAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e8f887acSAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e8f887acSAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e8f887acSAmir Vadai  * SOFTWARE.
31e8f887acSAmir Vadai  */
32e8f887acSAmir Vadai 
33e3a2b7edSAmir Vadai #include <net/flow_dissector.h>
34e3a2b7edSAmir Vadai #include <net/pkt_cls.h>
35e3a2b7edSAmir Vadai #include <net/tc_act/tc_gact.h>
3612185a9fSAmir Vadai #include <net/tc_act/tc_skbedit.h>
37e8f887acSAmir Vadai #include <linux/mlx5/fs.h>
38e8f887acSAmir Vadai #include <linux/mlx5/device.h>
39e8f887acSAmir Vadai #include <linux/rhashtable.h>
4003a9d11eSOr Gerlitz #include <net/switchdev.h>
4103a9d11eSOr Gerlitz #include <net/tc_act/tc_mirred.h>
42e8f887acSAmir Vadai #include "en.h"
43e8f887acSAmir Vadai #include "en_tc.h"
4403a9d11eSOr Gerlitz #include "eswitch.h"
45e8f887acSAmir Vadai 
46e8f887acSAmir Vadai struct mlx5e_tc_flow {
47e8f887acSAmir Vadai 	struct rhash_head	node;
48e8f887acSAmir Vadai 	u64			cookie;
49e8f887acSAmir Vadai 	struct mlx5_flow_rule	*rule;
50e8f887acSAmir Vadai };
51e8f887acSAmir Vadai 
52acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
53acff797cSMaor Gottlieb #define MLX5E_TC_TABLE_NUM_GROUPS 4
54e8f887acSAmir Vadai 
555c40348cSOr Gerlitz static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
56c5bb1730SMaor Gottlieb 						    struct mlx5_flow_spec *spec,
57e8f887acSAmir Vadai 						    u32 action, u32 flow_tag)
58e8f887acSAmir Vadai {
59aad7e08dSAmir Vadai 	struct mlx5_core_dev *dev = priv->mdev;
60aad7e08dSAmir Vadai 	struct mlx5_flow_destination dest = { 0 };
61aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
62e8f887acSAmir Vadai 	struct mlx5_flow_rule *rule;
63e8f887acSAmir Vadai 	bool table_created = false;
64e8f887acSAmir Vadai 
65aad7e08dSAmir Vadai 	if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
66aad7e08dSAmir Vadai 		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
67aad7e08dSAmir Vadai 		dest.ft = priv->fs.vlan.ft.t;
6855130287SOr Gerlitz 	} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
69aad7e08dSAmir Vadai 		counter = mlx5_fc_create(dev, true);
70aad7e08dSAmir Vadai 		if (IS_ERR(counter))
71aad7e08dSAmir Vadai 			return ERR_CAST(counter);
72aad7e08dSAmir Vadai 
73aad7e08dSAmir Vadai 		dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
74aad7e08dSAmir Vadai 		dest.counter = counter;
75aad7e08dSAmir Vadai 	}
76aad7e08dSAmir Vadai 
77acff797cSMaor Gottlieb 	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
78acff797cSMaor Gottlieb 		priv->fs.tc.t =
79acff797cSMaor Gottlieb 			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
80acff797cSMaor Gottlieb 							    MLX5E_TC_PRIO,
81acff797cSMaor Gottlieb 							    MLX5E_TC_TABLE_NUM_ENTRIES,
82acff797cSMaor Gottlieb 							    MLX5E_TC_TABLE_NUM_GROUPS,
83d63cd286SMaor Gottlieb 							    0);
84acff797cSMaor Gottlieb 		if (IS_ERR(priv->fs.tc.t)) {
85e8f887acSAmir Vadai 			netdev_err(priv->netdev,
86e8f887acSAmir Vadai 				   "Failed to create tc offload table\n");
87aad7e08dSAmir Vadai 			rule = ERR_CAST(priv->fs.tc.t);
88aad7e08dSAmir Vadai 			goto err_create_ft;
89e8f887acSAmir Vadai 		}
90e8f887acSAmir Vadai 
91e8f887acSAmir Vadai 		table_created = true;
92e8f887acSAmir Vadai 	}
93e8f887acSAmir Vadai 
94c5bb1730SMaor Gottlieb 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
95c5bb1730SMaor Gottlieb 	rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
96e8f887acSAmir Vadai 				  action, flow_tag,
97aad7e08dSAmir Vadai 				  &dest);
98e8f887acSAmir Vadai 
99aad7e08dSAmir Vadai 	if (IS_ERR(rule))
100aad7e08dSAmir Vadai 		goto err_add_rule;
101aad7e08dSAmir Vadai 
102aad7e08dSAmir Vadai 	return rule;
103aad7e08dSAmir Vadai 
104aad7e08dSAmir Vadai err_add_rule:
105aad7e08dSAmir Vadai 	if (table_created) {
106acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(priv->fs.tc.t);
107acff797cSMaor Gottlieb 		priv->fs.tc.t = NULL;
108e8f887acSAmir Vadai 	}
109aad7e08dSAmir Vadai err_create_ft:
110aad7e08dSAmir Vadai 	mlx5_fc_destroy(dev, counter);
111e8f887acSAmir Vadai 
112e8f887acSAmir Vadai 	return rule;
113e8f887acSAmir Vadai }
114e8f887acSAmir Vadai 
115adb4c123SOr Gerlitz static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
116adb4c123SOr Gerlitz 						    struct mlx5_flow_spec *spec,
117adb4c123SOr Gerlitz 						    u32 action, u32 dst_vport)
118adb4c123SOr Gerlitz {
119adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
120adb4c123SOr Gerlitz 	struct mlx5_eswitch_rep *rep = priv->ppriv;
121adb4c123SOr Gerlitz 	u32 src_vport;
122adb4c123SOr Gerlitz 
123adb4c123SOr Gerlitz 	if (rep->vport) /* set source vport for the flow */
124adb4c123SOr Gerlitz 		src_vport = rep->vport;
125adb4c123SOr Gerlitz 	else
126adb4c123SOr Gerlitz 		src_vport = FDB_UPLINK_VPORT;
127adb4c123SOr Gerlitz 
128adb4c123SOr Gerlitz 	return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport);
129adb4c123SOr Gerlitz }
130adb4c123SOr Gerlitz 
131e8f887acSAmir Vadai static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
132e8f887acSAmir Vadai 			      struct mlx5_flow_rule *rule)
133e8f887acSAmir Vadai {
134aad7e08dSAmir Vadai 	struct mlx5_fc *counter = NULL;
135aad7e08dSAmir Vadai 
136aad7e08dSAmir Vadai 	counter = mlx5_flow_rule_counter(rule);
137aad7e08dSAmir Vadai 
138e8f887acSAmir Vadai 	mlx5_del_flow_rule(rule);
139e8f887acSAmir Vadai 
140aad7e08dSAmir Vadai 	mlx5_fc_destroy(priv->mdev, counter);
141aad7e08dSAmir Vadai 
1425c40348cSOr Gerlitz 	if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
143acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(priv->fs.tc.t);
144acff797cSMaor Gottlieb 		priv->fs.tc.t = NULL;
145e8f887acSAmir Vadai 	}
146e8f887acSAmir Vadai }
147e8f887acSAmir Vadai 
148c5bb1730SMaor Gottlieb static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
149e3a2b7edSAmir Vadai 			    struct tc_cls_flower_offload *f)
150e3a2b7edSAmir Vadai {
151c5bb1730SMaor Gottlieb 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
152c5bb1730SMaor Gottlieb 				       outer_headers);
153c5bb1730SMaor Gottlieb 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
154c5bb1730SMaor Gottlieb 				       outer_headers);
155e3a2b7edSAmir Vadai 	u16 addr_type = 0;
156e3a2b7edSAmir Vadai 	u8 ip_proto = 0;
157e3a2b7edSAmir Vadai 
158e3a2b7edSAmir Vadai 	if (f->dissector->used_keys &
159e3a2b7edSAmir Vadai 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
160e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
161e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
162e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
163e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
164e3a2b7edSAmir Vadai 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
165e3a2b7edSAmir Vadai 		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
166e3a2b7edSAmir Vadai 			    f->dissector->used_keys);
167e3a2b7edSAmir Vadai 		return -EOPNOTSUPP;
168e3a2b7edSAmir Vadai 	}
169e3a2b7edSAmir Vadai 
170e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
171e3a2b7edSAmir Vadai 		struct flow_dissector_key_control *key =
172e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
1731dbd0d37SHadar Hen Zion 						  FLOW_DISSECTOR_KEY_CONTROL,
174e3a2b7edSAmir Vadai 						  f->key);
175e3a2b7edSAmir Vadai 		addr_type = key->addr_type;
176e3a2b7edSAmir Vadai 	}
177e3a2b7edSAmir Vadai 
178e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
179e3a2b7edSAmir Vadai 		struct flow_dissector_key_basic *key =
180e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
181e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_BASIC,
182e3a2b7edSAmir Vadai 						  f->key);
183e3a2b7edSAmir Vadai 		struct flow_dissector_key_basic *mask =
184e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
185e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_BASIC,
186e3a2b7edSAmir Vadai 						  f->mask);
187e3a2b7edSAmir Vadai 		ip_proto = key->ip_proto;
188e3a2b7edSAmir Vadai 
189e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
190e3a2b7edSAmir Vadai 			 ntohs(mask->n_proto));
191e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
192e3a2b7edSAmir Vadai 			 ntohs(key->n_proto));
193e3a2b7edSAmir Vadai 
194e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
195e3a2b7edSAmir Vadai 			 mask->ip_proto);
196e3a2b7edSAmir Vadai 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
197e3a2b7edSAmir Vadai 			 key->ip_proto);
198e3a2b7edSAmir Vadai 	}
199e3a2b7edSAmir Vadai 
200e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
201e3a2b7edSAmir Vadai 		struct flow_dissector_key_eth_addrs *key =
202e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
203e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
204e3a2b7edSAmir Vadai 						  f->key);
205e3a2b7edSAmir Vadai 		struct flow_dissector_key_eth_addrs *mask =
206e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
207e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
208e3a2b7edSAmir Vadai 						  f->mask);
209e3a2b7edSAmir Vadai 
210e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
211e3a2b7edSAmir Vadai 					     dmac_47_16),
212e3a2b7edSAmir Vadai 				mask->dst);
213e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
214e3a2b7edSAmir Vadai 					     dmac_47_16),
215e3a2b7edSAmir Vadai 				key->dst);
216e3a2b7edSAmir Vadai 
217e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
218e3a2b7edSAmir Vadai 					     smac_47_16),
219e3a2b7edSAmir Vadai 				mask->src);
220e3a2b7edSAmir Vadai 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
221e3a2b7edSAmir Vadai 					     smac_47_16),
222e3a2b7edSAmir Vadai 				key->src);
223e3a2b7edSAmir Vadai 	}
224e3a2b7edSAmir Vadai 
225e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
226e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv4_addrs *key =
227e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
228e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
229e3a2b7edSAmir Vadai 						  f->key);
230e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv4_addrs *mask =
231e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
232e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
233e3a2b7edSAmir Vadai 						  f->mask);
234e3a2b7edSAmir Vadai 
235e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
236e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
237e3a2b7edSAmir Vadai 		       &mask->src, sizeof(mask->src));
238e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
239e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
240e3a2b7edSAmir Vadai 		       &key->src, sizeof(key->src));
241e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
242e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
243e3a2b7edSAmir Vadai 		       &mask->dst, sizeof(mask->dst));
244e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
245e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
246e3a2b7edSAmir Vadai 		       &key->dst, sizeof(key->dst));
247e3a2b7edSAmir Vadai 	}
248e3a2b7edSAmir Vadai 
249e3a2b7edSAmir Vadai 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
250e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv6_addrs *key =
251e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
252e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
253e3a2b7edSAmir Vadai 						  f->key);
254e3a2b7edSAmir Vadai 		struct flow_dissector_key_ipv6_addrs *mask =
255e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
256e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
257e3a2b7edSAmir Vadai 						  f->mask);
258e3a2b7edSAmir Vadai 
259e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
260e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
261e3a2b7edSAmir Vadai 		       &mask->src, sizeof(mask->src));
262e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
263e3a2b7edSAmir Vadai 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
264e3a2b7edSAmir Vadai 		       &key->src, sizeof(key->src));
265e3a2b7edSAmir Vadai 
266e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
267e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
268e3a2b7edSAmir Vadai 		       &mask->dst, sizeof(mask->dst));
269e3a2b7edSAmir Vadai 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
270e3a2b7edSAmir Vadai 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
271e3a2b7edSAmir Vadai 		       &key->dst, sizeof(key->dst));
272e3a2b7edSAmir Vadai 	}
273e3a2b7edSAmir Vadai 
274e3a2b7edSAmir Vadai 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
275e3a2b7edSAmir Vadai 		struct flow_dissector_key_ports *key =
276e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
277e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_PORTS,
278e3a2b7edSAmir Vadai 						  f->key);
279e3a2b7edSAmir Vadai 		struct flow_dissector_key_ports *mask =
280e3a2b7edSAmir Vadai 			skb_flow_dissector_target(f->dissector,
281e3a2b7edSAmir Vadai 						  FLOW_DISSECTOR_KEY_PORTS,
282e3a2b7edSAmir Vadai 						  f->mask);
283e3a2b7edSAmir Vadai 		switch (ip_proto) {
284e3a2b7edSAmir Vadai 		case IPPROTO_TCP:
285e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
286e3a2b7edSAmir Vadai 				 tcp_sport, ntohs(mask->src));
287e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
288e3a2b7edSAmir Vadai 				 tcp_sport, ntohs(key->src));
289e3a2b7edSAmir Vadai 
290e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
291e3a2b7edSAmir Vadai 				 tcp_dport, ntohs(mask->dst));
292e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
293e3a2b7edSAmir Vadai 				 tcp_dport, ntohs(key->dst));
294e3a2b7edSAmir Vadai 			break;
295e3a2b7edSAmir Vadai 
296e3a2b7edSAmir Vadai 		case IPPROTO_UDP:
297e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
298e3a2b7edSAmir Vadai 				 udp_sport, ntohs(mask->src));
299e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
300e3a2b7edSAmir Vadai 				 udp_sport, ntohs(key->src));
301e3a2b7edSAmir Vadai 
302e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
303e3a2b7edSAmir Vadai 				 udp_dport, ntohs(mask->dst));
304e3a2b7edSAmir Vadai 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
305e3a2b7edSAmir Vadai 				 udp_dport, ntohs(key->dst));
306e3a2b7edSAmir Vadai 			break;
307e3a2b7edSAmir Vadai 		default:
308e3a2b7edSAmir Vadai 			netdev_err(priv->netdev,
309e3a2b7edSAmir Vadai 				   "Only UDP and TCP transport are supported\n");
310e3a2b7edSAmir Vadai 			return -EINVAL;
311e3a2b7edSAmir Vadai 		}
312e3a2b7edSAmir Vadai 	}
313e3a2b7edSAmir Vadai 
314e3a2b7edSAmir Vadai 	return 0;
315e3a2b7edSAmir Vadai }
316e3a2b7edSAmir Vadai 
3175c40348cSOr Gerlitz static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
318e3a2b7edSAmir Vadai 				u32 *action, u32 *flow_tag)
319e3a2b7edSAmir Vadai {
320e3a2b7edSAmir Vadai 	const struct tc_action *a;
32122dc13c8SWANG Cong 	LIST_HEAD(actions);
322e3a2b7edSAmir Vadai 
323e3a2b7edSAmir Vadai 	if (tc_no_actions(exts))
324e3a2b7edSAmir Vadai 		return -EINVAL;
325e3a2b7edSAmir Vadai 
326e3a2b7edSAmir Vadai 	*flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
327e3a2b7edSAmir Vadai 	*action = 0;
328e3a2b7edSAmir Vadai 
32922dc13c8SWANG Cong 	tcf_exts_to_list(exts, &actions);
33022dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list) {
331e3a2b7edSAmir Vadai 		/* Only support a single action per rule */
332e3a2b7edSAmir Vadai 		if (*action)
333e3a2b7edSAmir Vadai 			return -EINVAL;
334e3a2b7edSAmir Vadai 
335e3a2b7edSAmir Vadai 		if (is_tcf_gact_shot(a)) {
336e3a2b7edSAmir Vadai 			*action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
337aad7e08dSAmir Vadai 			if (MLX5_CAP_FLOWTABLE(priv->mdev,
338aad7e08dSAmir Vadai 					       flow_table_properties_nic_receive.flow_counter))
339aad7e08dSAmir Vadai 				*action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
340e3a2b7edSAmir Vadai 			continue;
341e3a2b7edSAmir Vadai 		}
342e3a2b7edSAmir Vadai 
343e3a2b7edSAmir Vadai 		if (is_tcf_skbedit_mark(a)) {
344e3a2b7edSAmir Vadai 			u32 mark = tcf_skbedit_mark(a);
345e3a2b7edSAmir Vadai 
346e3a2b7edSAmir Vadai 			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
347e3a2b7edSAmir Vadai 				netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
348e3a2b7edSAmir Vadai 					    mark);
349e3a2b7edSAmir Vadai 				return -EINVAL;
350e3a2b7edSAmir Vadai 			}
351e3a2b7edSAmir Vadai 
352e3a2b7edSAmir Vadai 			*flow_tag = mark;
353e3a2b7edSAmir Vadai 			*action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
354e3a2b7edSAmir Vadai 			continue;
355e3a2b7edSAmir Vadai 		}
356e3a2b7edSAmir Vadai 
357e3a2b7edSAmir Vadai 		return -EINVAL;
358e3a2b7edSAmir Vadai 	}
359e3a2b7edSAmir Vadai 
360e3a2b7edSAmir Vadai 	return 0;
361e3a2b7edSAmir Vadai }
362e3a2b7edSAmir Vadai 
36303a9d11eSOr Gerlitz static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
36403a9d11eSOr Gerlitz 				u32 *action, u32 *dest_vport)
36503a9d11eSOr Gerlitz {
36603a9d11eSOr Gerlitz 	const struct tc_action *a;
36722dc13c8SWANG Cong 	LIST_HEAD(actions);
36803a9d11eSOr Gerlitz 
36903a9d11eSOr Gerlitz 	if (tc_no_actions(exts))
37003a9d11eSOr Gerlitz 		return -EINVAL;
37103a9d11eSOr Gerlitz 
37203a9d11eSOr Gerlitz 	*action = 0;
37303a9d11eSOr Gerlitz 
37422dc13c8SWANG Cong 	tcf_exts_to_list(exts, &actions);
37522dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list) {
37603a9d11eSOr Gerlitz 		/* Only support a single action per rule */
37703a9d11eSOr Gerlitz 		if (*action)
37803a9d11eSOr Gerlitz 			return -EINVAL;
37903a9d11eSOr Gerlitz 
38003a9d11eSOr Gerlitz 		if (is_tcf_gact_shot(a)) {
38103a9d11eSOr Gerlitz 			*action = MLX5_FLOW_CONTEXT_ACTION_DROP |
38203a9d11eSOr Gerlitz 				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
38303a9d11eSOr Gerlitz 			continue;
38403a9d11eSOr Gerlitz 		}
38503a9d11eSOr Gerlitz 
38603a9d11eSOr Gerlitz 		if (is_tcf_mirred_redirect(a)) {
38703a9d11eSOr Gerlitz 			int ifindex = tcf_mirred_ifindex(a);
38803a9d11eSOr Gerlitz 			struct net_device *out_dev;
38903a9d11eSOr Gerlitz 			struct mlx5e_priv *out_priv;
39003a9d11eSOr Gerlitz 			struct mlx5_eswitch_rep *out_rep;
39103a9d11eSOr Gerlitz 
39203a9d11eSOr Gerlitz 			out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
39303a9d11eSOr Gerlitz 
39403a9d11eSOr Gerlitz 			if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
39503a9d11eSOr Gerlitz 				pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
39603a9d11eSOr Gerlitz 				       priv->netdev->name, out_dev->name);
39703a9d11eSOr Gerlitz 				return -EINVAL;
39803a9d11eSOr Gerlitz 			}
39903a9d11eSOr Gerlitz 
40003a9d11eSOr Gerlitz 			out_priv = netdev_priv(out_dev);
40103a9d11eSOr Gerlitz 			out_rep  = out_priv->ppriv;
40203a9d11eSOr Gerlitz 			if (out_rep->vport == 0)
40303a9d11eSOr Gerlitz 				*dest_vport = FDB_UPLINK_VPORT;
40403a9d11eSOr Gerlitz 			else
40503a9d11eSOr Gerlitz 				*dest_vport = out_rep->vport;
40603a9d11eSOr Gerlitz 			*action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
40703a9d11eSOr Gerlitz 			continue;
40803a9d11eSOr Gerlitz 		}
40903a9d11eSOr Gerlitz 
41003a9d11eSOr Gerlitz 		return -EINVAL;
41103a9d11eSOr Gerlitz 	}
41203a9d11eSOr Gerlitz 	return 0;
41303a9d11eSOr Gerlitz }
41403a9d11eSOr Gerlitz 
415e3a2b7edSAmir Vadai int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
416e3a2b7edSAmir Vadai 			   struct tc_cls_flower_offload *f)
417e3a2b7edSAmir Vadai {
418acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
419e3a2b7edSAmir Vadai 	int err = 0;
420adb4c123SOr Gerlitz 	u32 flow_tag, action, dest_vport = 0;
421e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
422c5bb1730SMaor Gottlieb 	struct mlx5_flow_spec *spec;
423e3a2b7edSAmir Vadai 	struct mlx5_flow_rule *old = NULL;
424adb4c123SOr Gerlitz 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
425e3a2b7edSAmir Vadai 
426e3a2b7edSAmir Vadai 	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
427e3a2b7edSAmir Vadai 				      tc->ht_params);
428e3a2b7edSAmir Vadai 	if (flow)
429e3a2b7edSAmir Vadai 		old = flow->rule;
430e3a2b7edSAmir Vadai 	else
431e3a2b7edSAmir Vadai 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
432e3a2b7edSAmir Vadai 
433c5bb1730SMaor Gottlieb 	spec = mlx5_vzalloc(sizeof(*spec));
434c5bb1730SMaor Gottlieb 	if (!spec || !flow) {
435e3a2b7edSAmir Vadai 		err = -ENOMEM;
436e3a2b7edSAmir Vadai 		goto err_free;
437e3a2b7edSAmir Vadai 	}
438e3a2b7edSAmir Vadai 
439e3a2b7edSAmir Vadai 	flow->cookie = f->cookie;
440e3a2b7edSAmir Vadai 
441c5bb1730SMaor Gottlieb 	err = parse_cls_flower(priv, spec, f);
442e3a2b7edSAmir Vadai 	if (err < 0)
443e3a2b7edSAmir Vadai 		goto err_free;
444e3a2b7edSAmir Vadai 
445adb4c123SOr Gerlitz 	if (esw && esw->mode == SRIOV_OFFLOADS) {
446adb4c123SOr Gerlitz 		err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport);
447adb4c123SOr Gerlitz 		if (err < 0)
448adb4c123SOr Gerlitz 			goto err_free;
449adb4c123SOr Gerlitz 		flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport);
450adb4c123SOr Gerlitz 	} else {
4515c40348cSOr Gerlitz 		err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
452e3a2b7edSAmir Vadai 		if (err < 0)
453e3a2b7edSAmir Vadai 			goto err_free;
4545c40348cSOr Gerlitz 		flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
455adb4c123SOr Gerlitz 	}
456adb4c123SOr Gerlitz 
4575c40348cSOr Gerlitz 	if (IS_ERR(flow->rule)) {
4585c40348cSOr Gerlitz 		err = PTR_ERR(flow->rule);
4595c40348cSOr Gerlitz 		goto err_free;
4605c40348cSOr Gerlitz 	}
4615c40348cSOr Gerlitz 
462e3a2b7edSAmir Vadai 	err = rhashtable_insert_fast(&tc->ht, &flow->node,
463e3a2b7edSAmir Vadai 				     tc->ht_params);
464e3a2b7edSAmir Vadai 	if (err)
4655c40348cSOr Gerlitz 		goto err_del_rule;
466e3a2b7edSAmir Vadai 
467e3a2b7edSAmir Vadai 	if (old)
468e3a2b7edSAmir Vadai 		mlx5e_tc_del_flow(priv, old);
469e3a2b7edSAmir Vadai 
470e3a2b7edSAmir Vadai 	goto out;
471e3a2b7edSAmir Vadai 
4725c40348cSOr Gerlitz err_del_rule:
4735c40348cSOr Gerlitz 	mlx5_del_flow_rule(flow->rule);
474e3a2b7edSAmir Vadai 
475e3a2b7edSAmir Vadai err_free:
476e3a2b7edSAmir Vadai 	if (!old)
477e3a2b7edSAmir Vadai 		kfree(flow);
478e3a2b7edSAmir Vadai out:
479c5bb1730SMaor Gottlieb 	kvfree(spec);
480e3a2b7edSAmir Vadai 	return err;
481e3a2b7edSAmir Vadai }
482e3a2b7edSAmir Vadai 
483e3a2b7edSAmir Vadai int mlx5e_delete_flower(struct mlx5e_priv *priv,
484e3a2b7edSAmir Vadai 			struct tc_cls_flower_offload *f)
485e3a2b7edSAmir Vadai {
486e3a2b7edSAmir Vadai 	struct mlx5e_tc_flow *flow;
487acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
488e3a2b7edSAmir Vadai 
489e3a2b7edSAmir Vadai 	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
490e3a2b7edSAmir Vadai 				      tc->ht_params);
491e3a2b7edSAmir Vadai 	if (!flow)
492e3a2b7edSAmir Vadai 		return -EINVAL;
493e3a2b7edSAmir Vadai 
494e3a2b7edSAmir Vadai 	rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
495e3a2b7edSAmir Vadai 
496e3a2b7edSAmir Vadai 	mlx5e_tc_del_flow(priv, flow->rule);
497e3a2b7edSAmir Vadai 
498e3a2b7edSAmir Vadai 	kfree(flow);
499e3a2b7edSAmir Vadai 
500e3a2b7edSAmir Vadai 	return 0;
501e3a2b7edSAmir Vadai }
502e3a2b7edSAmir Vadai 
503aad7e08dSAmir Vadai int mlx5e_stats_flower(struct mlx5e_priv *priv,
504aad7e08dSAmir Vadai 		       struct tc_cls_flower_offload *f)
505aad7e08dSAmir Vadai {
506aad7e08dSAmir Vadai 	struct mlx5e_tc_table *tc = &priv->fs.tc;
507aad7e08dSAmir Vadai 	struct mlx5e_tc_flow *flow;
508aad7e08dSAmir Vadai 	struct tc_action *a;
509aad7e08dSAmir Vadai 	struct mlx5_fc *counter;
51022dc13c8SWANG Cong 	LIST_HEAD(actions);
511aad7e08dSAmir Vadai 	u64 bytes;
512aad7e08dSAmir Vadai 	u64 packets;
513aad7e08dSAmir Vadai 	u64 lastuse;
514aad7e08dSAmir Vadai 
515aad7e08dSAmir Vadai 	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
516aad7e08dSAmir Vadai 				      tc->ht_params);
517aad7e08dSAmir Vadai 	if (!flow)
518aad7e08dSAmir Vadai 		return -EINVAL;
519aad7e08dSAmir Vadai 
520aad7e08dSAmir Vadai 	counter = mlx5_flow_rule_counter(flow->rule);
521aad7e08dSAmir Vadai 	if (!counter)
522aad7e08dSAmir Vadai 		return 0;
523aad7e08dSAmir Vadai 
524aad7e08dSAmir Vadai 	mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
525aad7e08dSAmir Vadai 
52622dc13c8SWANG Cong 	tcf_exts_to_list(f->exts, &actions);
52722dc13c8SWANG Cong 	list_for_each_entry(a, &actions, list)
528aad7e08dSAmir Vadai 		tcf_action_stats_update(a, bytes, packets, lastuse);
529aad7e08dSAmir Vadai 
530aad7e08dSAmir Vadai 	return 0;
531aad7e08dSAmir Vadai }
532aad7e08dSAmir Vadai 
533e8f887acSAmir Vadai static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
534e8f887acSAmir Vadai 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
535e8f887acSAmir Vadai 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
536e8f887acSAmir Vadai 	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
537e8f887acSAmir Vadai 	.automatic_shrinking = true,
538e8f887acSAmir Vadai };
539e8f887acSAmir Vadai 
540e8f887acSAmir Vadai int mlx5e_tc_init(struct mlx5e_priv *priv)
541e8f887acSAmir Vadai {
542acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
543e8f887acSAmir Vadai 
544e8f887acSAmir Vadai 	tc->ht_params = mlx5e_tc_flow_ht_params;
545e8f887acSAmir Vadai 	return rhashtable_init(&tc->ht, &tc->ht_params);
546e8f887acSAmir Vadai }
547e8f887acSAmir Vadai 
548e8f887acSAmir Vadai static void _mlx5e_tc_del_flow(void *ptr, void *arg)
549e8f887acSAmir Vadai {
550e8f887acSAmir Vadai 	struct mlx5e_tc_flow *flow = ptr;
551e8f887acSAmir Vadai 	struct mlx5e_priv *priv = arg;
552e8f887acSAmir Vadai 
553e8f887acSAmir Vadai 	mlx5e_tc_del_flow(priv, flow->rule);
554e8f887acSAmir Vadai 	kfree(flow);
555e8f887acSAmir Vadai }
556e8f887acSAmir Vadai 
557e8f887acSAmir Vadai void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
558e8f887acSAmir Vadai {
559acff797cSMaor Gottlieb 	struct mlx5e_tc_table *tc = &priv->fs.tc;
560e8f887acSAmir Vadai 
561e8f887acSAmir Vadai 	rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
562e8f887acSAmir Vadai 
563acff797cSMaor Gottlieb 	if (!IS_ERR_OR_NULL(tc->t)) {
564acff797cSMaor Gottlieb 		mlx5_destroy_flow_table(tc->t);
565acff797cSMaor Gottlieb 		tc->t = NULL;
566e8f887acSAmir Vadai 	}
567e8f887acSAmir Vadai }
568