1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/fs.h>
34 #include "en.h"
35 #include "en/params.h"
36 #include "en/xsk/pool.h"
37 
38 static int flow_type_to_traffic_type(u32 flow_type);
39 
40 static u32 flow_type_mask(u32 flow_type)
41 {
42 	return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
43 }
44 
45 struct mlx5e_ethtool_rule {
46 	struct list_head             list;
47 	struct ethtool_rx_flow_spec  flow_spec;
48 	struct mlx5_flow_handle	     *rule;
49 	struct mlx5e_ethtool_table   *eth_ft;
50 	struct mlx5e_rss             *rss;
51 };
52 
53 static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
54 {
55 	if (!--eth_ft->num_rules) {
56 		mlx5_destroy_flow_table(eth_ft->ft);
57 		eth_ft->ft = NULL;
58 	}
59 }
60 
61 #define MLX5E_ETHTOOL_L3_L4_PRIO 0
62 #define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
63 #define MLX5E_ETHTOOL_NUM_ENTRIES 64000
64 #define MLX5E_ETHTOOL_NUM_GROUPS  10
65 static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
66 						  struct ethtool_rx_flow_spec *fs,
67 						  int num_tuples)
68 {
69 	struct mlx5_flow_table_attr ft_attr = {};
70 	struct mlx5e_ethtool_table *eth_ft;
71 	struct mlx5_flow_namespace *ns;
72 	struct mlx5_flow_table *ft;
73 	int max_tuples;
74 	int table_size;
75 	int prio;
76 
77 	switch (flow_type_mask(fs->flow_type)) {
78 	case TCP_V4_FLOW:
79 	case UDP_V4_FLOW:
80 	case TCP_V6_FLOW:
81 	case UDP_V6_FLOW:
82 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
83 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
84 		eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
85 		break;
86 	case IP_USER_FLOW:
87 	case IPV6_USER_FLOW:
88 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
89 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
90 		eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
91 		break;
92 	case ETHER_FLOW:
93 		max_tuples = ETHTOOL_NUM_L2_FTS;
94 		prio = max_tuples - num_tuples;
95 		eth_ft = &priv->fs->ethtool.l2_ft[prio];
96 		prio += MLX5E_ETHTOOL_L2_PRIO;
97 		break;
98 	default:
99 		return ERR_PTR(-EINVAL);
100 	}
101 
102 	eth_ft->num_rules++;
103 	if (eth_ft->ft)
104 		return eth_ft;
105 
106 	ns = mlx5_get_flow_namespace(priv->mdev,
107 				     MLX5_FLOW_NAMESPACE_ETHTOOL);
108 	if (!ns)
109 		return ERR_PTR(-EOPNOTSUPP);
110 
111 	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
112 						       flow_table_properties_nic_receive.log_max_ft_size)),
113 			   MLX5E_ETHTOOL_NUM_ENTRIES);
114 
115 	ft_attr.prio = prio;
116 	ft_attr.max_fte = table_size;
117 	ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
118 	ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
119 	if (IS_ERR(ft))
120 		return (void *)ft;
121 
122 	eth_ft->ft = ft;
123 	return eth_ft;
124 }
125 
126 static void mask_spec(u8 *mask, u8 *val, size_t size)
127 {
128 	unsigned int i;
129 
130 	for (i = 0; i < size; i++, mask++, val++)
131 		*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
132 }
133 
134 #define MLX5E_FTE_SET(header_p, fld, v)  \
135 	MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
136 
137 #define MLX5E_FTE_ADDR_OF(header_p, fld) \
138 	MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
139 
140 static void
141 set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
142 	__be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
143 {
144 	if (ip4src_m) {
145 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
146 		       &ip4src_v, sizeof(ip4src_v));
147 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
148 		       &ip4src_m, sizeof(ip4src_m));
149 	}
150 	if (ip4dst_m) {
151 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
152 		       &ip4dst_v, sizeof(ip4dst_v));
153 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
154 		       &ip4dst_m, sizeof(ip4dst_m));
155 	}
156 
157 	MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
158 	MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
159 }
160 
161 static void
162 set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
163 	__be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
164 {
165 	u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
166 
167 	if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
168 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
169 		       ip6src_v, ip6_sz);
170 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
171 		       ip6src_m, ip6_sz);
172 	}
173 	if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
174 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
175 		       ip6dst_v, ip6_sz);
176 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
177 		       ip6dst_m, ip6_sz);
178 	}
179 
180 	MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
181 	MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
182 }
183 
184 static void
185 set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
186 	__be16 pdst_m, __be16 pdst_v)
187 {
188 	if (psrc_m) {
189 		MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
190 		MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
191 	}
192 	if (pdst_m) {
193 		MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
194 		MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
195 	}
196 
197 	MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
198 	MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
199 }
200 
201 static void
202 set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
203 	__be16 pdst_m, __be16 pdst_v)
204 {
205 	if (psrc_m) {
206 		MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
207 		MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
208 	}
209 
210 	if (pdst_m) {
211 		MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
212 		MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
213 	}
214 
215 	MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
216 	MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
217 }
218 
219 static void
220 parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
221 {
222 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
223 	struct ethtool_tcpip4_spec *l4_val  = &fs->h_u.tcp_ip4_spec;
224 
225 	set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
226 		l4_mask->ip4dst, l4_val->ip4dst);
227 
228 	set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
229 		l4_mask->pdst, l4_val->pdst);
230 }
231 
232 static void
233 parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
234 {
235 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
236 	struct ethtool_tcpip4_spec *l4_val  = &fs->h_u.udp_ip4_spec;
237 
238 	set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
239 		l4_mask->ip4dst, l4_val->ip4dst);
240 
241 	set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
242 		l4_mask->pdst, l4_val->pdst);
243 }
244 
245 static void
246 parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
247 {
248 	struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
249 	struct ethtool_usrip4_spec *l3_val  = &fs->h_u.usr_ip4_spec;
250 
251 	set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
252 		l3_mask->ip4dst, l3_val->ip4dst);
253 
254 	if (l3_mask->proto) {
255 		MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
256 		MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
257 	}
258 }
259 
260 static void
261 parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
262 {
263 	struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
264 	struct ethtool_usrip6_spec *l3_val  = &fs->h_u.usr_ip6_spec;
265 
266 	set_ip6(headers_c, headers_v, l3_mask->ip6src,
267 		l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
268 
269 	if (l3_mask->l4_proto) {
270 		MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
271 		MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
272 	}
273 }
274 
275 static void
276 parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
277 {
278 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
279 	struct ethtool_tcpip6_spec *l4_val  = &fs->h_u.tcp_ip6_spec;
280 
281 	set_ip6(headers_c, headers_v, l4_mask->ip6src,
282 		l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
283 
284 	set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
285 		l4_mask->pdst, l4_val->pdst);
286 }
287 
288 static void
289 parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
290 {
291 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
292 	struct ethtool_tcpip6_spec *l4_val  = &fs->h_u.udp_ip6_spec;
293 
294 	set_ip6(headers_c, headers_v, l4_mask->ip6src,
295 		l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
296 
297 	set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
298 		l4_mask->pdst, l4_val->pdst);
299 }
300 
301 static void
302 parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
303 {
304 	struct ethhdr *eth_mask = &fs->m_u.ether_spec;
305 	struct ethhdr *eth_val = &fs->h_u.ether_spec;
306 
307 	mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
308 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
309 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
310 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
311 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
312 	MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
313 	MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
314 }
315 
316 static void
317 set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
318 {
319 	MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
320 	MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
321 	MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
322 	MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
323 }
324 
325 static void
326 set_dmac(void *headers_c, void *headers_v,
327 	 unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
328 {
329 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
330 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
331 }
332 
333 static int set_flow_attrs(u32 *match_c, u32 *match_v,
334 			  struct ethtool_rx_flow_spec *fs)
335 {
336 	void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
337 					     outer_headers);
338 	void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
339 					     outer_headers);
340 	u32 flow_type = flow_type_mask(fs->flow_type);
341 
342 	switch (flow_type) {
343 	case TCP_V4_FLOW:
344 		parse_tcp4(outer_headers_c, outer_headers_v, fs);
345 		break;
346 	case UDP_V4_FLOW:
347 		parse_udp4(outer_headers_c, outer_headers_v, fs);
348 		break;
349 	case IP_USER_FLOW:
350 		parse_ip4(outer_headers_c, outer_headers_v, fs);
351 		break;
352 	case TCP_V6_FLOW:
353 		parse_tcp6(outer_headers_c, outer_headers_v, fs);
354 		break;
355 	case UDP_V6_FLOW:
356 		parse_udp6(outer_headers_c, outer_headers_v, fs);
357 		break;
358 	case IPV6_USER_FLOW:
359 		parse_ip6(outer_headers_c, outer_headers_v, fs);
360 		break;
361 	case ETHER_FLOW:
362 		parse_ether(outer_headers_c, outer_headers_v, fs);
363 		break;
364 	default:
365 		return -EINVAL;
366 	}
367 
368 	if ((fs->flow_type & FLOW_EXT) &&
369 	    (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
370 		set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
371 
372 	if (fs->flow_type & FLOW_MAC_EXT &&
373 	    !is_zero_ether_addr(fs->m_ext.h_dest)) {
374 		mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
375 		set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
376 			 fs->h_ext.h_dest);
377 	}
378 
379 	return 0;
380 }
381 
382 static void add_rule_to_list(struct mlx5e_priv *priv,
383 			     struct mlx5e_ethtool_rule *rule)
384 {
385 	struct mlx5e_ethtool_rule *iter;
386 	struct list_head *head = &priv->fs->ethtool.rules;
387 
388 	list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
389 		if (iter->flow_spec.location > rule->flow_spec.location)
390 			break;
391 		head = &iter->list;
392 	}
393 	priv->fs->ethtool.tot_num_rules++;
394 	list_add(&rule->list, head);
395 }
396 
397 static bool outer_header_zero(u32 *match_criteria)
398 {
399 	int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
400 	char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
401 					     outer_headers);
402 
403 	return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
404 						  outer_headers_c + 1,
405 						  size - 1);
406 }
407 
408 static int flow_get_tirn(struct mlx5e_priv *priv,
409 			 struct mlx5e_ethtool_rule *eth_rule,
410 			 struct ethtool_rx_flow_spec *fs,
411 			 u32 rss_context, u32 *tirn)
412 {
413 	if (fs->flow_type & FLOW_RSS) {
414 		struct mlx5e_packet_merge_param pkt_merge_param;
415 		struct mlx5e_rss *rss;
416 		u32 flow_type;
417 		int err;
418 		int tt;
419 
420 		rss = mlx5e_rx_res_rss_get(priv->rx_res, rss_context);
421 		if (!rss)
422 			return -ENOENT;
423 
424 		flow_type = flow_type_mask(fs->flow_type);
425 		tt = flow_type_to_traffic_type(flow_type);
426 		if (tt < 0)
427 			return -EINVAL;
428 
429 		pkt_merge_param = priv->channels.params.packet_merge;
430 		err = mlx5e_rss_obtain_tirn(rss, tt, &pkt_merge_param, false, tirn);
431 		if (err)
432 			return err;
433 		eth_rule->rss = rss;
434 		mlx5e_rss_refcnt_inc(eth_rule->rss);
435 	} else {
436 		struct mlx5e_params *params = &priv->channels.params;
437 		enum mlx5e_rq_group group;
438 		u16 ix;
439 
440 		mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
441 
442 		*tirn = group == MLX5E_RQ_GROUP_XSK ?
443 			mlx5e_rx_res_get_tirn_xsk(priv->rx_res, ix) :
444 			mlx5e_rx_res_get_tirn_direct(priv->rx_res, ix);
445 	}
446 
447 	return 0;
448 }
449 
450 static struct mlx5_flow_handle *
451 add_ethtool_flow_rule(struct mlx5e_priv *priv,
452 		      struct mlx5e_ethtool_rule *eth_rule,
453 		      struct mlx5_flow_table *ft,
454 		      struct ethtool_rx_flow_spec *fs, u32 rss_context)
455 {
456 	struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
457 	struct mlx5_flow_destination *dst = NULL;
458 	struct mlx5_flow_handle *rule;
459 	struct mlx5_flow_spec *spec;
460 	int err = 0;
461 
462 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
463 	if (!spec)
464 		return ERR_PTR(-ENOMEM);
465 	err = set_flow_attrs(spec->match_criteria, spec->match_value,
466 			     fs);
467 	if (err)
468 		goto free;
469 
470 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
471 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
472 	} else {
473 		dst = kzalloc(sizeof(*dst), GFP_KERNEL);
474 		if (!dst) {
475 			err = -ENOMEM;
476 			goto free;
477 		}
478 
479 		err = flow_get_tirn(priv, eth_rule, fs, rss_context, &dst->tir_num);
480 		if (err)
481 			goto free;
482 
483 		dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
484 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
485 	}
486 
487 	spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
488 	spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
489 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
490 	if (IS_ERR(rule)) {
491 		err = PTR_ERR(rule);
492 		netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
493 			   __func__, err);
494 		goto free;
495 	}
496 free:
497 	kvfree(spec);
498 	kfree(dst);
499 	return err ? ERR_PTR(err) : rule;
500 }
501 
502 static void del_ethtool_rule(struct mlx5e_priv *priv,
503 			     struct mlx5e_ethtool_rule *eth_rule)
504 {
505 	if (eth_rule->rule)
506 		mlx5_del_flow_rules(eth_rule->rule);
507 	if (eth_rule->rss)
508 		mlx5e_rss_refcnt_dec(eth_rule->rss);
509 	list_del(&eth_rule->list);
510 	priv->fs->ethtool.tot_num_rules--;
511 	put_flow_table(eth_rule->eth_ft);
512 	kfree(eth_rule);
513 }
514 
515 static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
516 						    int location)
517 {
518 	struct mlx5e_ethtool_rule *iter;
519 
520 	list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
521 		if (iter->flow_spec.location == location)
522 			return iter;
523 	}
524 	return NULL;
525 }
526 
527 static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
528 						   int location)
529 {
530 	struct mlx5e_ethtool_rule *eth_rule;
531 
532 	eth_rule = find_ethtool_rule(priv, location);
533 	if (eth_rule)
534 		del_ethtool_rule(priv, eth_rule);
535 
536 	eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
537 	if (!eth_rule)
538 		return ERR_PTR(-ENOMEM);
539 
540 	add_rule_to_list(priv, eth_rule);
541 	return eth_rule;
542 }
543 
544 #define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
545 
546 #define all_ones(field) (field == (__force typeof(field))-1)
547 #define all_zeros_or_all_ones(field)		\
548 	((field) == 0 || (field) == (__force typeof(field))-1)
549 
550 static int validate_ethter(struct ethtool_rx_flow_spec *fs)
551 {
552 	struct ethhdr *eth_mask = &fs->m_u.ether_spec;
553 	int ntuples = 0;
554 
555 	if (!is_zero_ether_addr(eth_mask->h_dest))
556 		ntuples++;
557 	if (!is_zero_ether_addr(eth_mask->h_source))
558 		ntuples++;
559 	if (eth_mask->h_proto)
560 		ntuples++;
561 	return ntuples;
562 }
563 
564 static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
565 {
566 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
567 	int ntuples = 0;
568 
569 	if (l4_mask->tos)
570 		return -EINVAL;
571 
572 	if (l4_mask->ip4src)
573 		ntuples++;
574 	if (l4_mask->ip4dst)
575 		ntuples++;
576 	if (l4_mask->psrc)
577 		ntuples++;
578 	if (l4_mask->pdst)
579 		ntuples++;
580 	/* Flow is TCP/UDP */
581 	return ++ntuples;
582 }
583 
584 static int validate_ip4(struct ethtool_rx_flow_spec *fs)
585 {
586 	struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
587 	int ntuples = 0;
588 
589 	if (l3_mask->l4_4_bytes || l3_mask->tos ||
590 	    fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
591 		return -EINVAL;
592 	if (l3_mask->ip4src)
593 		ntuples++;
594 	if (l3_mask->ip4dst)
595 		ntuples++;
596 	if (l3_mask->proto)
597 		ntuples++;
598 	/* Flow is IPv4 */
599 	return ++ntuples;
600 }
601 
602 static int validate_ip6(struct ethtool_rx_flow_spec *fs)
603 {
604 	struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
605 	int ntuples = 0;
606 
607 	if (l3_mask->l4_4_bytes || l3_mask->tclass)
608 		return -EINVAL;
609 	if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
610 		ntuples++;
611 
612 	if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
613 		ntuples++;
614 	if (l3_mask->l4_proto)
615 		ntuples++;
616 	/* Flow is IPv6 */
617 	return ++ntuples;
618 }
619 
620 static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
621 {
622 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
623 	int ntuples = 0;
624 
625 	if (l4_mask->tclass)
626 		return -EINVAL;
627 
628 	if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
629 		ntuples++;
630 
631 	if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
632 		ntuples++;
633 
634 	if (l4_mask->psrc)
635 		ntuples++;
636 	if (l4_mask->pdst)
637 		ntuples++;
638 	/* Flow is TCP/UDP */
639 	return ++ntuples;
640 }
641 
642 static int validate_vlan(struct ethtool_rx_flow_spec *fs)
643 {
644 	if (fs->m_ext.vlan_etype ||
645 	    fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
646 		return -EINVAL;
647 
648 	if (fs->m_ext.vlan_tci &&
649 	    (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
650 		return -EINVAL;
651 
652 	return 1;
653 }
654 
655 static int validate_flow(struct mlx5e_priv *priv,
656 			 struct ethtool_rx_flow_spec *fs)
657 {
658 	int num_tuples = 0;
659 	int ret = 0;
660 
661 	if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
662 		return -ENOSPC;
663 
664 	if (fs->ring_cookie != RX_CLS_FLOW_DISC)
665 		if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
666 					fs->ring_cookie))
667 			return -EINVAL;
668 
669 	switch (flow_type_mask(fs->flow_type)) {
670 	case ETHER_FLOW:
671 		num_tuples += validate_ethter(fs);
672 		break;
673 	case TCP_V4_FLOW:
674 	case UDP_V4_FLOW:
675 		ret = validate_tcpudp4(fs);
676 		if (ret < 0)
677 			return ret;
678 		num_tuples += ret;
679 		break;
680 	case IP_USER_FLOW:
681 		ret = validate_ip4(fs);
682 		if (ret < 0)
683 			return ret;
684 		num_tuples += ret;
685 		break;
686 	case TCP_V6_FLOW:
687 	case UDP_V6_FLOW:
688 		ret = validate_tcpudp6(fs);
689 		if (ret < 0)
690 			return ret;
691 		num_tuples += ret;
692 		break;
693 	case IPV6_USER_FLOW:
694 		ret = validate_ip6(fs);
695 		if (ret < 0)
696 			return ret;
697 		num_tuples += ret;
698 		break;
699 	default:
700 		return -ENOTSUPP;
701 	}
702 	if ((fs->flow_type & FLOW_EXT)) {
703 		ret = validate_vlan(fs);
704 		if (ret < 0)
705 			return ret;
706 		num_tuples += ret;
707 	}
708 
709 	if (fs->flow_type & FLOW_MAC_EXT &&
710 	    !is_zero_ether_addr(fs->m_ext.h_dest))
711 		num_tuples++;
712 
713 	return num_tuples;
714 }
715 
716 static int
717 mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
718 			   struct ethtool_rx_flow_spec *fs, u32 rss_context)
719 {
720 	struct mlx5e_ethtool_table *eth_ft;
721 	struct mlx5e_ethtool_rule *eth_rule;
722 	struct mlx5_flow_handle *rule;
723 	int num_tuples;
724 	int err;
725 
726 	num_tuples = validate_flow(priv, fs);
727 	if (num_tuples <= 0) {
728 		netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
729 			    __func__, num_tuples);
730 		return num_tuples;
731 	}
732 
733 	eth_ft = get_flow_table(priv, fs, num_tuples);
734 	if (IS_ERR(eth_ft))
735 		return PTR_ERR(eth_ft);
736 
737 	eth_rule = get_ethtool_rule(priv, fs->location);
738 	if (IS_ERR(eth_rule)) {
739 		put_flow_table(eth_ft);
740 		return PTR_ERR(eth_rule);
741 	}
742 
743 	eth_rule->flow_spec = *fs;
744 	eth_rule->eth_ft = eth_ft;
745 
746 	rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
747 	if (IS_ERR(rule)) {
748 		err = PTR_ERR(rule);
749 		goto del_ethtool_rule;
750 	}
751 
752 	eth_rule->rule = rule;
753 
754 	return 0;
755 
756 del_ethtool_rule:
757 	del_ethtool_rule(priv, eth_rule);
758 
759 	return err;
760 }
761 
762 static int
763 mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
764 {
765 	struct mlx5e_ethtool_rule *eth_rule;
766 	int err = 0;
767 
768 	if (location >= MAX_NUM_OF_ETHTOOL_RULES)
769 		return -ENOSPC;
770 
771 	eth_rule = find_ethtool_rule(priv, location);
772 	if (!eth_rule) {
773 		err =  -ENOENT;
774 		goto out;
775 	}
776 
777 	del_ethtool_rule(priv, eth_rule);
778 out:
779 	return err;
780 }
781 
782 static int
783 mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
784 		       struct ethtool_rxnfc *info, int location)
785 {
786 	struct mlx5e_ethtool_rule *eth_rule;
787 
788 	if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
789 		return -EINVAL;
790 
791 	list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) {
792 		int index;
793 
794 		if (eth_rule->flow_spec.location != location)
795 			continue;
796 		if (!info)
797 			return 0;
798 		info->fs = eth_rule->flow_spec;
799 		if (!eth_rule->rss)
800 			return 0;
801 		index = mlx5e_rx_res_rss_index(priv->rx_res, eth_rule->rss);
802 		if (index < 0)
803 			return index;
804 		info->rss_context = index;
805 		return 0;
806 	}
807 
808 	return -ENOENT;
809 }
810 
811 static int
812 mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
813 			    struct ethtool_rxnfc *info, u32 *rule_locs)
814 {
815 	int location = 0;
816 	int idx = 0;
817 	int err = 0;
818 
819 	info->data = MAX_NUM_OF_ETHTOOL_RULES;
820 	while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
821 		err = mlx5e_ethtool_get_flow(priv, NULL, location);
822 		if (!err)
823 			rule_locs[idx++] = location;
824 		location++;
825 	}
826 	return err;
827 }
828 
829 void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
830 {
831 	struct mlx5e_ethtool_rule *iter;
832 	struct mlx5e_ethtool_rule *temp;
833 
834 	list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list)
835 		del_ethtool_rule(priv, iter);
836 }
837 
838 void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
839 {
840 	INIT_LIST_HEAD(&priv->fs->ethtool.rules);
841 }
842 
843 static int flow_type_to_traffic_type(u32 flow_type)
844 {
845 	switch (flow_type) {
846 	case TCP_V4_FLOW:
847 		return MLX5_TT_IPV4_TCP;
848 	case TCP_V6_FLOW:
849 		return MLX5_TT_IPV6_TCP;
850 	case UDP_V4_FLOW:
851 		return MLX5_TT_IPV4_UDP;
852 	case UDP_V6_FLOW:
853 		return MLX5_TT_IPV6_UDP;
854 	case AH_V4_FLOW:
855 		return MLX5_TT_IPV4_IPSEC_AH;
856 	case AH_V6_FLOW:
857 		return MLX5_TT_IPV6_IPSEC_AH;
858 	case ESP_V4_FLOW:
859 		return MLX5_TT_IPV4_IPSEC_ESP;
860 	case ESP_V6_FLOW:
861 		return MLX5_TT_IPV6_IPSEC_ESP;
862 	case IPV4_FLOW:
863 		return MLX5_TT_IPV4;
864 	case IPV6_FLOW:
865 		return MLX5_TT_IPV6;
866 	default:
867 		return -EINVAL;
868 	}
869 }
870 
871 static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
872 				  struct ethtool_rxnfc *nfc)
873 {
874 	u8 rx_hash_field = 0;
875 	int err;
876 	int tt;
877 
878 	tt = flow_type_to_traffic_type(nfc->flow_type);
879 	if (tt < 0)
880 		return tt;
881 
882 	/*  RSS does not support anything other than hashing to queues
883 	 *  on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
884 	 *  port.
885 	 */
886 	if (nfc->flow_type != TCP_V4_FLOW &&
887 	    nfc->flow_type != TCP_V6_FLOW &&
888 	    nfc->flow_type != UDP_V4_FLOW &&
889 	    nfc->flow_type != UDP_V6_FLOW)
890 		return -EOPNOTSUPP;
891 
892 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
893 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
894 		return -EOPNOTSUPP;
895 
896 	if (nfc->data & RXH_IP_SRC)
897 		rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
898 	if (nfc->data & RXH_IP_DST)
899 		rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
900 	if (nfc->data & RXH_L4_B_0_1)
901 		rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
902 	if (nfc->data & RXH_L4_B_2_3)
903 		rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
904 
905 	mutex_lock(&priv->state_lock);
906 	err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, tt, rx_hash_field);
907 	mutex_unlock(&priv->state_lock);
908 
909 	return err;
910 }
911 
912 static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
913 				  struct ethtool_rxnfc *nfc)
914 {
915 	u32 hash_field = 0;
916 	int tt;
917 
918 	tt = flow_type_to_traffic_type(nfc->flow_type);
919 	if (tt < 0)
920 		return tt;
921 
922 	hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, tt);
923 	nfc->data = 0;
924 
925 	if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
926 		nfc->data |= RXH_IP_SRC;
927 	if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
928 		nfc->data |= RXH_IP_DST;
929 	if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
930 		nfc->data |= RXH_L4_B_0_1;
931 	if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
932 		nfc->data |= RXH_L4_B_2_3;
933 
934 	return 0;
935 }
936 
937 int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
938 {
939 	int err = 0;
940 
941 	switch (cmd->cmd) {
942 	case ETHTOOL_SRXCLSRLINS:
943 		err = mlx5e_ethtool_flow_replace(priv, &cmd->fs, cmd->rss_context);
944 		break;
945 	case ETHTOOL_SRXCLSRLDEL:
946 		err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
947 		break;
948 	case ETHTOOL_SRXFH:
949 		err = mlx5e_set_rss_hash_opt(priv, cmd);
950 		break;
951 	default:
952 		err = -EOPNOTSUPP;
953 		break;
954 	}
955 
956 	return err;
957 }
958 
959 int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
960 			    struct ethtool_rxnfc *info, u32 *rule_locs)
961 {
962 	int err = 0;
963 
964 	switch (info->cmd) {
965 	case ETHTOOL_GRXCLSRLCNT:
966 		info->rule_cnt = priv->fs->ethtool.tot_num_rules;
967 		break;
968 	case ETHTOOL_GRXCLSRULE:
969 		err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
970 		break;
971 	case ETHTOOL_GRXCLSRLALL:
972 		err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
973 		break;
974 	case ETHTOOL_GRXFH:
975 		err =  mlx5e_get_rss_hash_opt(priv, info);
976 		break;
977 	default:
978 		err = -EOPNOTSUPP;
979 		break;
980 	}
981 
982 	return err;
983 }
984 
985