1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/fs.h>
34 #include "en.h"
35 
36 struct mlx5e_ethtool_rule {
37 	struct list_head             list;
38 	struct ethtool_rx_flow_spec  flow_spec;
39 	struct mlx5_flow_handle	     *rule;
40 	struct mlx5e_ethtool_table   *eth_ft;
41 };
42 
43 static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
44 {
45 	if (!--eth_ft->num_rules) {
46 		mlx5_destroy_flow_table(eth_ft->ft);
47 		eth_ft->ft = NULL;
48 	}
49 }
50 
51 #define MLX5E_ETHTOOL_L3_L4_PRIO 0
52 #define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
53 #define MLX5E_ETHTOOL_NUM_ENTRIES 64000
54 #define MLX5E_ETHTOOL_NUM_GROUPS  10
55 static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
56 						  struct ethtool_rx_flow_spec *fs,
57 						  int num_tuples)
58 {
59 	struct mlx5e_ethtool_table *eth_ft;
60 	struct mlx5_flow_namespace *ns;
61 	struct mlx5_flow_table *ft;
62 	int max_tuples;
63 	int table_size;
64 	int prio;
65 
66 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
67 	case TCP_V4_FLOW:
68 	case UDP_V4_FLOW:
69 	case TCP_V6_FLOW:
70 	case UDP_V6_FLOW:
71 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
72 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
73 		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
74 		break;
75 	case IP_USER_FLOW:
76 	case IPV6_USER_FLOW:
77 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
78 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
79 		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
80 		break;
81 	case ETHER_FLOW:
82 		max_tuples = ETHTOOL_NUM_L2_FTS;
83 		prio = max_tuples - num_tuples;
84 		eth_ft = &priv->fs.ethtool.l2_ft[prio];
85 		prio += MLX5E_ETHTOOL_L2_PRIO;
86 		break;
87 	default:
88 		return ERR_PTR(-EINVAL);
89 	}
90 
91 	eth_ft->num_rules++;
92 	if (eth_ft->ft)
93 		return eth_ft;
94 
95 	ns = mlx5_get_flow_namespace(priv->mdev,
96 				     MLX5_FLOW_NAMESPACE_ETHTOOL);
97 	if (!ns)
98 		return ERR_PTR(-EOPNOTSUPP);
99 
100 	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
101 						       flow_table_properties_nic_receive.log_max_ft_size)),
102 			   MLX5E_ETHTOOL_NUM_ENTRIES);
103 	ft = mlx5_create_auto_grouped_flow_table(ns, prio,
104 						 table_size,
105 						 MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
106 	if (IS_ERR(ft))
107 		return (void *)ft;
108 
109 	eth_ft->ft = ft;
110 	return eth_ft;
111 }
112 
113 static void mask_spec(u8 *mask, u8 *val, size_t size)
114 {
115 	unsigned int i;
116 
117 	for (i = 0; i < size; i++, mask++, val++)
118 		*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
119 }
120 
121 #define MLX5E_FTE_SET(header_p, fld, v)  \
122 	MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
123 
124 #define MLX5E_FTE_ADDR_OF(header_p, fld) \
125 	MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
126 
127 static void
128 set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
129 	__be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
130 {
131 	if (ip4src_m) {
132 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
133 		       &ip4src_v, sizeof(ip4src_v));
134 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
135 		       &ip4src_m, sizeof(ip4src_m));
136 	}
137 	if (ip4dst_m) {
138 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
139 		       &ip4dst_v, sizeof(ip4dst_v));
140 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
141 		       &ip4dst_m, sizeof(ip4dst_m));
142 	}
143 
144 	MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
145 	MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
146 }
147 
148 static void
149 set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
150 	__be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
151 {
152 	u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
153 
154 	if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
155 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
156 		       ip6src_v, ip6_sz);
157 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
158 		       ip6src_m, ip6_sz);
159 	}
160 	if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
161 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
162 		       ip6dst_v, ip6_sz);
163 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
164 		       ip6dst_m, ip6_sz);
165 	}
166 
167 	MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
168 	MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
169 }
170 
171 static void
172 set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
173 	__be16 pdst_m, __be16 pdst_v)
174 {
175 	if (psrc_m) {
176 		MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
177 		MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
178 	}
179 	if (pdst_m) {
180 		MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
181 		MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
182 	}
183 
184 	MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
185 	MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
186 }
187 
188 static void
189 set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
190 	__be16 pdst_m, __be16 pdst_v)
191 {
192 	if (psrc_m) {
193 		MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
194 		MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
195 	}
196 
197 	if (pdst_m) {
198 		MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
199 		MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
200 	}
201 
202 	MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
203 	MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
204 }
205 
206 static void
207 parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
208 {
209 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
210 	struct ethtool_tcpip4_spec *l4_val  = &fs->h_u.tcp_ip4_spec;
211 
212 	set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
213 		l4_mask->ip4dst, l4_val->ip4dst);
214 
215 	set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
216 		l4_mask->pdst, l4_val->pdst);
217 }
218 
219 static void
220 parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
221 {
222 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
223 	struct ethtool_tcpip4_spec *l4_val  = &fs->h_u.udp_ip4_spec;
224 
225 	set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
226 		l4_mask->ip4dst, l4_val->ip4dst);
227 
228 	set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
229 		l4_mask->pdst, l4_val->pdst);
230 }
231 
232 static void
233 parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
234 {
235 	struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
236 	struct ethtool_usrip4_spec *l3_val  = &fs->h_u.usr_ip4_spec;
237 
238 	set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
239 		l3_mask->ip4dst, l3_val->ip4dst);
240 
241 	if (l3_mask->proto) {
242 		MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
243 		MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
244 	}
245 }
246 
247 static void
248 parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
249 {
250 	struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
251 	struct ethtool_usrip6_spec *l3_val  = &fs->h_u.usr_ip6_spec;
252 
253 	set_ip6(headers_c, headers_v, l3_mask->ip6src,
254 		l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
255 
256 	if (l3_mask->l4_proto) {
257 		MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
258 		MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
259 	}
260 }
261 
262 static void
263 parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
264 {
265 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
266 	struct ethtool_tcpip6_spec *l4_val  = &fs->h_u.tcp_ip6_spec;
267 
268 	set_ip6(headers_c, headers_v, l4_mask->ip6src,
269 		l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
270 
271 	set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
272 		l4_mask->pdst, l4_val->pdst);
273 }
274 
275 static void
276 parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
277 {
278 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
279 	struct ethtool_tcpip6_spec *l4_val  = &fs->h_u.udp_ip6_spec;
280 
281 	set_ip6(headers_c, headers_v, l4_mask->ip6src,
282 		l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
283 
284 	set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
285 		l4_mask->pdst, l4_val->pdst);
286 }
287 
288 static void
289 parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
290 {
291 	struct ethhdr *eth_mask = &fs->m_u.ether_spec;
292 	struct ethhdr *eth_val = &fs->h_u.ether_spec;
293 
294 	mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
295 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
296 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
297 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
298 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
299 	MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
300 	MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
301 }
302 
303 static void
304 set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
305 {
306 	MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
307 	MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
308 	MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
309 	MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
310 }
311 
312 static void
313 set_dmac(void *headers_c, void *headers_v,
314 	 unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
315 {
316 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
317 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
318 }
319 
320 static int set_flow_attrs(u32 *match_c, u32 *match_v,
321 			  struct ethtool_rx_flow_spec *fs)
322 {
323 	void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
324 					     outer_headers);
325 	void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
326 					     outer_headers);
327 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
328 
329 	switch (flow_type) {
330 	case TCP_V4_FLOW:
331 		parse_tcp4(outer_headers_c, outer_headers_v, fs);
332 		break;
333 	case UDP_V4_FLOW:
334 		parse_udp4(outer_headers_c, outer_headers_v, fs);
335 		break;
336 	case IP_USER_FLOW:
337 		parse_ip4(outer_headers_c, outer_headers_v, fs);
338 		break;
339 	case TCP_V6_FLOW:
340 		parse_tcp6(outer_headers_c, outer_headers_v, fs);
341 		break;
342 	case UDP_V6_FLOW:
343 		parse_udp6(outer_headers_c, outer_headers_v, fs);
344 		break;
345 	case IPV6_USER_FLOW:
346 		parse_ip6(outer_headers_c, outer_headers_v, fs);
347 		break;
348 	case ETHER_FLOW:
349 		parse_ether(outer_headers_c, outer_headers_v, fs);
350 		break;
351 	default:
352 		return -EINVAL;
353 	}
354 
355 	if ((fs->flow_type & FLOW_EXT) &&
356 	    (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
357 		set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
358 
359 	if (fs->flow_type & FLOW_MAC_EXT &&
360 	    !is_zero_ether_addr(fs->m_ext.h_dest)) {
361 		mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
362 		set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
363 			 fs->h_ext.h_dest);
364 	}
365 
366 	return 0;
367 }
368 
369 static void add_rule_to_list(struct mlx5e_priv *priv,
370 			     struct mlx5e_ethtool_rule *rule)
371 {
372 	struct mlx5e_ethtool_rule *iter;
373 	struct list_head *head = &priv->fs.ethtool.rules;
374 
375 	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
376 		if (iter->flow_spec.location > rule->flow_spec.location)
377 			break;
378 		head = &iter->list;
379 	}
380 	priv->fs.ethtool.tot_num_rules++;
381 	list_add(&rule->list, head);
382 }
383 
384 static bool outer_header_zero(u32 *match_criteria)
385 {
386 	int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
387 	char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
388 					     outer_headers);
389 
390 	return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
391 						  outer_headers_c + 1,
392 						  size - 1);
393 }
394 
395 static struct mlx5_flow_handle *
396 add_ethtool_flow_rule(struct mlx5e_priv *priv,
397 		      struct mlx5_flow_table *ft,
398 		      struct ethtool_rx_flow_spec *fs)
399 {
400 	struct mlx5_flow_destination *dst = NULL;
401 	struct mlx5_flow_act flow_act = {0};
402 	struct mlx5_flow_spec *spec;
403 	struct mlx5_flow_handle *rule;
404 	int err = 0;
405 
406 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
407 	if (!spec)
408 		return ERR_PTR(-ENOMEM);
409 	err = set_flow_attrs(spec->match_criteria, spec->match_value,
410 			     fs);
411 	if (err)
412 		goto free;
413 
414 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
415 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
416 	} else {
417 		dst = kzalloc(sizeof(*dst), GFP_KERNEL);
418 		if (!dst) {
419 			err = -ENOMEM;
420 			goto free;
421 		}
422 
423 		dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
424 		dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
425 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
426 	}
427 
428 	spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
429 	flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
430 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
431 	if (IS_ERR(rule)) {
432 		err = PTR_ERR(rule);
433 		netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
434 			   __func__, err);
435 		goto free;
436 	}
437 free:
438 	kvfree(spec);
439 	kfree(dst);
440 	return err ? ERR_PTR(err) : rule;
441 }
442 
443 static void del_ethtool_rule(struct mlx5e_priv *priv,
444 			     struct mlx5e_ethtool_rule *eth_rule)
445 {
446 	if (eth_rule->rule)
447 		mlx5_del_flow_rules(eth_rule->rule);
448 	list_del(&eth_rule->list);
449 	priv->fs.ethtool.tot_num_rules--;
450 	put_flow_table(eth_rule->eth_ft);
451 	kfree(eth_rule);
452 }
453 
454 static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
455 						    int location)
456 {
457 	struct mlx5e_ethtool_rule *iter;
458 
459 	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
460 		if (iter->flow_spec.location == location)
461 			return iter;
462 	}
463 	return NULL;
464 }
465 
466 static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
467 						   int location)
468 {
469 	struct mlx5e_ethtool_rule *eth_rule;
470 
471 	eth_rule = find_ethtool_rule(priv, location);
472 	if (eth_rule)
473 		del_ethtool_rule(priv, eth_rule);
474 
475 	eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
476 	if (!eth_rule)
477 		return ERR_PTR(-ENOMEM);
478 
479 	add_rule_to_list(priv, eth_rule);
480 	return eth_rule;
481 }
482 
483 #define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
484 
485 #define all_ones(field) (field == (__force typeof(field))-1)
486 #define all_zeros_or_all_ones(field)		\
487 	((field) == 0 || (field) == (__force typeof(field))-1)
488 
489 static int validate_ethter(struct ethtool_rx_flow_spec *fs)
490 {
491 	struct ethhdr *eth_mask = &fs->m_u.ether_spec;
492 	int ntuples = 0;
493 
494 	if (!is_zero_ether_addr(eth_mask->h_dest))
495 		ntuples++;
496 	if (!is_zero_ether_addr(eth_mask->h_source))
497 		ntuples++;
498 	if (eth_mask->h_proto)
499 		ntuples++;
500 	return ntuples;
501 }
502 
503 static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
504 {
505 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
506 	int ntuples = 0;
507 
508 	if (l4_mask->tos)
509 		return -EINVAL;
510 
511 	if (l4_mask->ip4src)
512 		ntuples++;
513 	if (l4_mask->ip4dst)
514 		ntuples++;
515 	if (l4_mask->psrc)
516 		ntuples++;
517 	if (l4_mask->pdst)
518 		ntuples++;
519 	/* Flow is TCP/UDP */
520 	return ++ntuples;
521 }
522 
523 static int validate_ip4(struct ethtool_rx_flow_spec *fs)
524 {
525 	struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
526 	int ntuples = 0;
527 
528 	if (l3_mask->l4_4_bytes || l3_mask->tos ||
529 	    fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
530 		return -EINVAL;
531 	if (l3_mask->ip4src)
532 		ntuples++;
533 	if (l3_mask->ip4dst)
534 		ntuples++;
535 	if (l3_mask->proto)
536 		ntuples++;
537 	/* Flow is IPv4 */
538 	return ++ntuples;
539 }
540 
541 static int validate_ip6(struct ethtool_rx_flow_spec *fs)
542 {
543 	struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
544 	int ntuples = 0;
545 
546 	if (l3_mask->l4_4_bytes || l3_mask->tclass)
547 		return -EINVAL;
548 	if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
549 		ntuples++;
550 
551 	if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
552 		ntuples++;
553 	if (l3_mask->l4_proto)
554 		ntuples++;
555 	/* Flow is IPv6 */
556 	return ++ntuples;
557 }
558 
559 static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
560 {
561 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
562 	int ntuples = 0;
563 
564 	if (l4_mask->tclass)
565 		return -EINVAL;
566 
567 	if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
568 		ntuples++;
569 
570 	if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
571 		ntuples++;
572 
573 	if (l4_mask->psrc)
574 		ntuples++;
575 	if (l4_mask->pdst)
576 		ntuples++;
577 	/* Flow is TCP/UDP */
578 	return ++ntuples;
579 }
580 
581 static int validate_vlan(struct ethtool_rx_flow_spec *fs)
582 {
583 	if (fs->m_ext.vlan_etype ||
584 	    fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
585 		return -EINVAL;
586 
587 	if (fs->m_ext.vlan_tci &&
588 	    (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
589 		return -EINVAL;
590 
591 	return 1;
592 }
593 
594 static int validate_flow(struct mlx5e_priv *priv,
595 			 struct ethtool_rx_flow_spec *fs)
596 {
597 	int num_tuples = 0;
598 	int ret = 0;
599 
600 	if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
601 		return -ENOSPC;
602 
603 	if (fs->ring_cookie >= priv->channels.params.num_channels &&
604 	    fs->ring_cookie != RX_CLS_FLOW_DISC)
605 		return -EINVAL;
606 
607 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
608 	case ETHER_FLOW:
609 		num_tuples += validate_ethter(fs);
610 		break;
611 	case TCP_V4_FLOW:
612 	case UDP_V4_FLOW:
613 		ret = validate_tcpudp4(fs);
614 		if (ret < 0)
615 			return ret;
616 		num_tuples += ret;
617 		break;
618 	case IP_USER_FLOW:
619 		ret = validate_ip4(fs);
620 		if (ret < 0)
621 			return ret;
622 		num_tuples += ret;
623 		break;
624 	case TCP_V6_FLOW:
625 	case UDP_V6_FLOW:
626 		ret = validate_tcpudp6(fs);
627 		if (ret < 0)
628 			return ret;
629 		num_tuples += ret;
630 		break;
631 	case IPV6_USER_FLOW:
632 		ret = validate_ip6(fs);
633 		if (ret < 0)
634 			return ret;
635 		num_tuples += ret;
636 		break;
637 	default:
638 		return -ENOTSUPP;
639 	}
640 	if ((fs->flow_type & FLOW_EXT)) {
641 		ret = validate_vlan(fs);
642 		if (ret < 0)
643 			return ret;
644 		num_tuples += ret;
645 	}
646 
647 	if (fs->flow_type & FLOW_MAC_EXT &&
648 	    !is_zero_ether_addr(fs->m_ext.h_dest))
649 		num_tuples++;
650 
651 	return num_tuples;
652 }
653 
654 static int
655 mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
656 			   struct ethtool_rx_flow_spec *fs)
657 {
658 	struct mlx5e_ethtool_table *eth_ft;
659 	struct mlx5e_ethtool_rule *eth_rule;
660 	struct mlx5_flow_handle *rule;
661 	int num_tuples;
662 	int err;
663 
664 	num_tuples = validate_flow(priv, fs);
665 	if (num_tuples <= 0) {
666 		netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
667 			    __func__, num_tuples);
668 		return num_tuples;
669 	}
670 
671 	eth_ft = get_flow_table(priv, fs, num_tuples);
672 	if (IS_ERR(eth_ft))
673 		return PTR_ERR(eth_ft);
674 
675 	eth_rule = get_ethtool_rule(priv, fs->location);
676 	if (IS_ERR(eth_rule)) {
677 		put_flow_table(eth_ft);
678 		return PTR_ERR(eth_rule);
679 	}
680 
681 	eth_rule->flow_spec = *fs;
682 	eth_rule->eth_ft = eth_ft;
683 	if (!eth_ft->ft) {
684 		err = -EINVAL;
685 		goto del_ethtool_rule;
686 	}
687 	rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
688 	if (IS_ERR(rule)) {
689 		err = PTR_ERR(rule);
690 		goto del_ethtool_rule;
691 	}
692 
693 	eth_rule->rule = rule;
694 
695 	return 0;
696 
697 del_ethtool_rule:
698 	del_ethtool_rule(priv, eth_rule);
699 
700 	return err;
701 }
702 
703 static int
704 mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
705 {
706 	struct mlx5e_ethtool_rule *eth_rule;
707 	int err = 0;
708 
709 	if (location >= MAX_NUM_OF_ETHTOOL_RULES)
710 		return -ENOSPC;
711 
712 	eth_rule = find_ethtool_rule(priv, location);
713 	if (!eth_rule) {
714 		err =  -ENOENT;
715 		goto out;
716 	}
717 
718 	del_ethtool_rule(priv, eth_rule);
719 out:
720 	return err;
721 }
722 
723 static int
724 mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
725 		       struct ethtool_rxnfc *info, int location)
726 {
727 	struct mlx5e_ethtool_rule *eth_rule;
728 
729 	if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
730 		return -EINVAL;
731 
732 	list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
733 		if (eth_rule->flow_spec.location == location) {
734 			info->fs = eth_rule->flow_spec;
735 			return 0;
736 		}
737 	}
738 
739 	return -ENOENT;
740 }
741 
742 static int
743 mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
744 			    struct ethtool_rxnfc *info, u32 *rule_locs)
745 {
746 	int location = 0;
747 	int idx = 0;
748 	int err = 0;
749 
750 	info->data = MAX_NUM_OF_ETHTOOL_RULES;
751 	while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
752 		err = mlx5e_ethtool_get_flow(priv, info, location);
753 		if (!err)
754 			rule_locs[idx++] = location;
755 		location++;
756 	}
757 	return err;
758 }
759 
760 void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
761 {
762 	struct mlx5e_ethtool_rule *iter;
763 	struct mlx5e_ethtool_rule *temp;
764 
765 	list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
766 		del_ethtool_rule(priv, iter);
767 }
768 
769 void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
770 {
771 	INIT_LIST_HEAD(&priv->fs.ethtool.rules);
772 }
773 
774 static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type)
775 {
776 	switch (flow_type) {
777 	case TCP_V4_FLOW:
778 		return  MLX5E_TT_IPV4_TCP;
779 	case TCP_V6_FLOW:
780 		return MLX5E_TT_IPV6_TCP;
781 	case UDP_V4_FLOW:
782 		return MLX5E_TT_IPV4_UDP;
783 	case UDP_V6_FLOW:
784 		return MLX5E_TT_IPV6_UDP;
785 	case AH_V4_FLOW:
786 		return MLX5E_TT_IPV4_IPSEC_AH;
787 	case AH_V6_FLOW:
788 		return MLX5E_TT_IPV6_IPSEC_AH;
789 	case ESP_V4_FLOW:
790 		return MLX5E_TT_IPV4_IPSEC_ESP;
791 	case ESP_V6_FLOW:
792 		return MLX5E_TT_IPV6_IPSEC_ESP;
793 	case IPV4_FLOW:
794 		return MLX5E_TT_IPV4;
795 	case IPV6_FLOW:
796 		return MLX5E_TT_IPV6;
797 	default:
798 		return MLX5E_NUM_INDIR_TIRS;
799 	}
800 }
801 
802 static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
803 				  struct ethtool_rxnfc *nfc)
804 {
805 	int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
806 	enum mlx5e_traffic_types tt;
807 	u8 rx_hash_field = 0;
808 	void *in;
809 
810 	tt = flow_type_to_traffic_type(nfc->flow_type);
811 	if (tt == MLX5E_NUM_INDIR_TIRS)
812 		return -EINVAL;
813 
814 	/*  RSS does not support anything other than hashing to queues
815 	 *  on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
816 	 *  port.
817 	 */
818 	if (nfc->flow_type != TCP_V4_FLOW &&
819 	    nfc->flow_type != TCP_V6_FLOW &&
820 	    nfc->flow_type != UDP_V4_FLOW &&
821 	    nfc->flow_type != UDP_V6_FLOW)
822 		return -EOPNOTSUPP;
823 
824 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
825 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
826 		return -EOPNOTSUPP;
827 
828 	if (nfc->data & RXH_IP_SRC)
829 		rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
830 	if (nfc->data & RXH_IP_DST)
831 		rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
832 	if (nfc->data & RXH_L4_B_0_1)
833 		rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
834 	if (nfc->data & RXH_L4_B_2_3)
835 		rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
836 
837 	in = kvzalloc(inlen, GFP_KERNEL);
838 	if (!in)
839 		return -ENOMEM;
840 
841 	mutex_lock(&priv->state_lock);
842 
843 	if (rx_hash_field == priv->rss_params.rx_hash_fields[tt])
844 		goto out;
845 
846 	priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
847 	mlx5e_modify_tirs_hash(priv, in, inlen);
848 
849 out:
850 	mutex_unlock(&priv->state_lock);
851 	kvfree(in);
852 	return 0;
853 }
854 
855 static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
856 				  struct ethtool_rxnfc *nfc)
857 {
858 	enum mlx5e_traffic_types tt;
859 	u32 hash_field = 0;
860 
861 	tt = flow_type_to_traffic_type(nfc->flow_type);
862 	if (tt == MLX5E_NUM_INDIR_TIRS)
863 		return -EINVAL;
864 
865 	hash_field = priv->rss_params.rx_hash_fields[tt];
866 	nfc->data = 0;
867 
868 	if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
869 		nfc->data |= RXH_IP_SRC;
870 	if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
871 		nfc->data |= RXH_IP_DST;
872 	if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
873 		nfc->data |= RXH_L4_B_0_1;
874 	if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
875 		nfc->data |= RXH_L4_B_2_3;
876 
877 	return 0;
878 }
879 
880 int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
881 {
882 	int err = 0;
883 	struct mlx5e_priv *priv = netdev_priv(dev);
884 
885 	switch (cmd->cmd) {
886 	case ETHTOOL_SRXCLSRLINS:
887 		err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
888 		break;
889 	case ETHTOOL_SRXCLSRLDEL:
890 		err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
891 		break;
892 	case ETHTOOL_SRXFH:
893 		err = mlx5e_set_rss_hash_opt(priv, cmd);
894 		break;
895 	default:
896 		err = -EOPNOTSUPP;
897 		break;
898 	}
899 
900 	return err;
901 }
902 
903 int mlx5e_get_rxnfc(struct net_device *dev,
904 		    struct ethtool_rxnfc *info, u32 *rule_locs)
905 {
906 	struct mlx5e_priv *priv = netdev_priv(dev);
907 	int err = 0;
908 
909 	switch (info->cmd) {
910 	case ETHTOOL_GRXRINGS:
911 		info->data = priv->channels.params.num_channels;
912 		break;
913 	case ETHTOOL_GRXCLSRLCNT:
914 		info->rule_cnt = priv->fs.ethtool.tot_num_rules;
915 		break;
916 	case ETHTOOL_GRXCLSRULE:
917 		err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
918 		break;
919 	case ETHTOOL_GRXCLSRLALL:
920 		err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
921 		break;
922 	case ETHTOOL_GRXFH:
923 		err =  mlx5e_get_rss_hash_opt(priv, info);
924 		break;
925 	default:
926 		err = -EOPNOTSUPP;
927 		break;
928 	}
929 
930 	return err;
931 }
932 
933