1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/hash.h>
34 #include <linux/mlx5/fs.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include "en.h"
38 
39 struct arfs_tuple {
40 	__be16 etype;
41 	u8     ip_proto;
42 	union {
43 		__be32 src_ipv4;
44 		struct in6_addr src_ipv6;
45 	};
46 	union {
47 		__be32 dst_ipv4;
48 		struct in6_addr dst_ipv6;
49 	};
50 	__be16 src_port;
51 	__be16 dst_port;
52 };
53 
54 struct arfs_rule {
55 	struct mlx5e_priv	*priv;
56 	struct work_struct      arfs_work;
57 	struct mlx5_flow_handle *rule;
58 	struct hlist_node	hlist;
59 	int			rxq;
60 	/* Flow ID passed to ndo_rx_flow_steer */
61 	int			flow_id;
62 	/* Filter ID returned by ndo_rx_flow_steer */
63 	int			filter_id;
64 	struct arfs_tuple	tuple;
65 };
66 
67 #define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
68 	for (i = 0; i < ARFS_NUM_TYPES; i++) \
69 		mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
70 
71 #define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
72 	for (j = 0; j < ARFS_HASH_SIZE; j++) \
73 		hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
74 
75 static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
76 {
77 	switch (type) {
78 	case ARFS_IPV4_TCP:
79 		return MLX5E_TT_IPV4_TCP;
80 	case ARFS_IPV4_UDP:
81 		return MLX5E_TT_IPV4_UDP;
82 	case ARFS_IPV6_TCP:
83 		return MLX5E_TT_IPV6_TCP;
84 	case ARFS_IPV6_UDP:
85 		return MLX5E_TT_IPV6_UDP;
86 	default:
87 		return -EINVAL;
88 	}
89 }
90 
91 static int arfs_disable(struct mlx5e_priv *priv)
92 {
93 	struct mlx5_flow_destination dest = {};
94 	struct mlx5e_tir *tir = priv->indir_tir;
95 	int err = 0;
96 	int tt;
97 	int i;
98 
99 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
100 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
101 		dest.tir_num = tir[i].tirn;
102 		tt = arfs_get_tt(i);
103 		/* Modify ttc rules destination to bypass the aRFS tables*/
104 		err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
105 						   &dest, NULL);
106 		if (err) {
107 			netdev_err(priv->netdev,
108 				   "%s: modify ttc destination failed\n",
109 				   __func__);
110 			return err;
111 		}
112 	}
113 	return 0;
114 }
115 
116 static void arfs_del_rules(struct mlx5e_priv *priv);
117 
118 int mlx5e_arfs_disable(struct mlx5e_priv *priv)
119 {
120 	arfs_del_rules(priv);
121 
122 	return arfs_disable(priv);
123 }
124 
125 int mlx5e_arfs_enable(struct mlx5e_priv *priv)
126 {
127 	struct mlx5_flow_destination dest = {};
128 	int err = 0;
129 	int tt;
130 	int i;
131 
132 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
133 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
134 		dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
135 		tt = arfs_get_tt(i);
136 		/* Modify ttc rules destination to point on the aRFS FTs */
137 		err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
138 						   &dest, NULL);
139 		if (err) {
140 			netdev_err(priv->netdev,
141 				   "%s: modify ttc destination failed err=%d\n",
142 				   __func__, err);
143 			arfs_disable(priv);
144 			return err;
145 		}
146 	}
147 	return 0;
148 }
149 
150 static void arfs_destroy_table(struct arfs_table *arfs_t)
151 {
152 	mlx5_del_flow_rules(arfs_t->default_rule);
153 	mlx5e_destroy_flow_table(&arfs_t->ft);
154 }
155 
156 void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
157 {
158 	int i;
159 
160 	if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
161 		return;
162 
163 	arfs_del_rules(priv);
164 	destroy_workqueue(priv->fs.arfs.wq);
165 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
166 		if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
167 			arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
168 	}
169 }
170 
171 static int arfs_add_default_rule(struct mlx5e_priv *priv,
172 				 enum arfs_type type)
173 {
174 	struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
175 	struct mlx5e_tir *tir = priv->indir_tir;
176 	struct mlx5_flow_destination dest = {};
177 	MLX5_DECLARE_FLOW_ACT(flow_act);
178 	struct mlx5_flow_spec *spec;
179 	enum mlx5e_traffic_types tt;
180 	int err = 0;
181 
182 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
183 	if (!spec) {
184 		err = -ENOMEM;
185 		goto out;
186 	}
187 
188 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
189 	tt = arfs_get_tt(type);
190 	if (tt == -EINVAL) {
191 		netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
192 			   __func__, type);
193 		err = -EINVAL;
194 		goto out;
195 	}
196 
197 	dest.tir_num = tir[tt].tirn;
198 
199 	arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
200 						   &flow_act,
201 						   &dest, 1);
202 	if (IS_ERR(arfs_t->default_rule)) {
203 		err = PTR_ERR(arfs_t->default_rule);
204 		arfs_t->default_rule = NULL;
205 		netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
206 			   __func__, type);
207 	}
208 out:
209 	kvfree(spec);
210 	return err;
211 }
212 
213 #define MLX5E_ARFS_NUM_GROUPS	2
214 #define MLX5E_ARFS_GROUP1_SIZE	(BIT(16) - 1)
215 #define MLX5E_ARFS_GROUP2_SIZE	BIT(0)
216 #define MLX5E_ARFS_TABLE_SIZE	(MLX5E_ARFS_GROUP1_SIZE +\
217 				 MLX5E_ARFS_GROUP2_SIZE)
218 static int arfs_create_groups(struct mlx5e_flow_table *ft,
219 			      enum  arfs_type type)
220 {
221 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
222 	void *outer_headers_c;
223 	int ix = 0;
224 	u32 *in;
225 	int err;
226 	u8 *mc;
227 
228 	ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
229 			sizeof(*ft->g), GFP_KERNEL);
230 	in = kvzalloc(inlen, GFP_KERNEL);
231 	if  (!in || !ft->g) {
232 		kvfree(ft->g);
233 		kvfree(in);
234 		return -ENOMEM;
235 	}
236 
237 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
238 	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
239 				       outer_headers);
240 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
241 	switch (type) {
242 	case ARFS_IPV4_TCP:
243 	case ARFS_IPV6_TCP:
244 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
245 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
246 		break;
247 	case ARFS_IPV4_UDP:
248 	case ARFS_IPV6_UDP:
249 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
250 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
251 		break;
252 	default:
253 		err = -EINVAL;
254 		goto out;
255 	}
256 
257 	switch (type) {
258 	case ARFS_IPV4_TCP:
259 	case ARFS_IPV4_UDP:
260 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
261 				 src_ipv4_src_ipv6.ipv4_layout.ipv4);
262 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
263 				 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
264 		break;
265 	case ARFS_IPV6_TCP:
266 	case ARFS_IPV6_UDP:
267 		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
268 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
269 		       0xff, 16);
270 		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
271 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
272 		       0xff, 16);
273 		break;
274 	default:
275 		err = -EINVAL;
276 		goto out;
277 	}
278 
279 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
280 	MLX5_SET_CFG(in, start_flow_index, ix);
281 	ix += MLX5E_ARFS_GROUP1_SIZE;
282 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
283 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
284 	if (IS_ERR(ft->g[ft->num_groups]))
285 		goto err;
286 	ft->num_groups++;
287 
288 	memset(in, 0, inlen);
289 	MLX5_SET_CFG(in, start_flow_index, ix);
290 	ix += MLX5E_ARFS_GROUP2_SIZE;
291 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
292 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
293 	if (IS_ERR(ft->g[ft->num_groups]))
294 		goto err;
295 	ft->num_groups++;
296 
297 	kvfree(in);
298 	return 0;
299 
300 err:
301 	err = PTR_ERR(ft->g[ft->num_groups]);
302 	ft->g[ft->num_groups] = NULL;
303 out:
304 	kvfree(in);
305 
306 	return err;
307 }
308 
309 static int arfs_create_table(struct mlx5e_priv *priv,
310 			     enum arfs_type type)
311 {
312 	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
313 	struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
314 	struct mlx5_flow_table_attr ft_attr = {};
315 	int err;
316 
317 	ft->num_groups = 0;
318 
319 	ft_attr.max_fte = MLX5E_ARFS_TABLE_SIZE;
320 	ft_attr.level = MLX5E_ARFS_FT_LEVEL;
321 	ft_attr.prio = MLX5E_NIC_PRIO;
322 
323 	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
324 	if (IS_ERR(ft->t)) {
325 		err = PTR_ERR(ft->t);
326 		ft->t = NULL;
327 		return err;
328 	}
329 
330 	err = arfs_create_groups(ft, type);
331 	if (err)
332 		goto err;
333 
334 	err = arfs_add_default_rule(priv, type);
335 	if (err)
336 		goto err;
337 
338 	return 0;
339 err:
340 	mlx5e_destroy_flow_table(ft);
341 	return err;
342 }
343 
344 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
345 {
346 	int err = 0;
347 	int i;
348 
349 	if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
350 		return 0;
351 
352 	spin_lock_init(&priv->fs.arfs.arfs_lock);
353 	INIT_LIST_HEAD(&priv->fs.arfs.rules);
354 	priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
355 	if (!priv->fs.arfs.wq)
356 		return -ENOMEM;
357 
358 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
359 		err = arfs_create_table(priv, i);
360 		if (err)
361 			goto err;
362 	}
363 	return 0;
364 err:
365 	mlx5e_arfs_destroy_tables(priv);
366 	return err;
367 }
368 
369 #define MLX5E_ARFS_EXPIRY_QUOTA 60
370 
371 static void arfs_may_expire_flow(struct mlx5e_priv *priv)
372 {
373 	struct arfs_rule *arfs_rule;
374 	struct hlist_node *htmp;
375 	int quota = 0;
376 	int i;
377 	int j;
378 
379 	HLIST_HEAD(del_list);
380 	spin_lock_bh(&priv->fs.arfs.arfs_lock);
381 	mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
382 		if (!work_pending(&arfs_rule->arfs_work) &&
383 		    rps_may_expire_flow(priv->netdev,
384 					arfs_rule->rxq, arfs_rule->flow_id,
385 					arfs_rule->filter_id)) {
386 			hlist_del_init(&arfs_rule->hlist);
387 			hlist_add_head(&arfs_rule->hlist, &del_list);
388 			if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
389 				break;
390 		}
391 	}
392 	spin_unlock_bh(&priv->fs.arfs.arfs_lock);
393 	hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
394 		if (arfs_rule->rule)
395 			mlx5_del_flow_rules(arfs_rule->rule);
396 		hlist_del(&arfs_rule->hlist);
397 		kfree(arfs_rule);
398 	}
399 }
400 
401 static void arfs_del_rules(struct mlx5e_priv *priv)
402 {
403 	struct hlist_node *htmp;
404 	struct arfs_rule *rule;
405 	int i;
406 	int j;
407 
408 	HLIST_HEAD(del_list);
409 	spin_lock_bh(&priv->fs.arfs.arfs_lock);
410 	mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
411 		hlist_del_init(&rule->hlist);
412 		hlist_add_head(&rule->hlist, &del_list);
413 	}
414 	spin_unlock_bh(&priv->fs.arfs.arfs_lock);
415 
416 	hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
417 		cancel_work_sync(&rule->arfs_work);
418 		if (rule->rule)
419 			mlx5_del_flow_rules(rule->rule);
420 		hlist_del(&rule->hlist);
421 		kfree(rule);
422 	}
423 }
424 
425 static struct hlist_head *
426 arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
427 		 __be16 dst_port)
428 {
429 	unsigned long l;
430 	int bucket_idx;
431 
432 	l = (__force unsigned long)src_port |
433 	    ((__force unsigned long)dst_port << 2);
434 
435 	bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
436 
437 	return &arfs_t->rules_hash[bucket_idx];
438 }
439 
440 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
441 					 u8 ip_proto, __be16 etype)
442 {
443 	if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
444 		return &arfs->arfs_tables[ARFS_IPV4_TCP];
445 	if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
446 		return &arfs->arfs_tables[ARFS_IPV4_UDP];
447 	if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
448 		return &arfs->arfs_tables[ARFS_IPV6_TCP];
449 	if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
450 		return &arfs->arfs_tables[ARFS_IPV6_UDP];
451 
452 	return NULL;
453 }
454 
455 static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
456 					      struct arfs_rule *arfs_rule)
457 {
458 	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
459 	struct arfs_tuple *tuple = &arfs_rule->tuple;
460 	struct mlx5_flow_handle *rule = NULL;
461 	struct mlx5_flow_destination dest = {};
462 	MLX5_DECLARE_FLOW_ACT(flow_act);
463 	struct arfs_table *arfs_table;
464 	struct mlx5_flow_spec *spec;
465 	struct mlx5_flow_table *ft;
466 	int err = 0;
467 
468 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
469 	if (!spec) {
470 		err = -ENOMEM;
471 		goto out;
472 	}
473 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
474 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
475 			 outer_headers.ethertype);
476 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
477 		 ntohs(tuple->etype));
478 	arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
479 	if (!arfs_table) {
480 		err = -EINVAL;
481 		goto out;
482 	}
483 
484 	ft = arfs_table->ft.t;
485 	if (tuple->ip_proto == IPPROTO_TCP) {
486 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
487 				 outer_headers.tcp_dport);
488 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
489 				 outer_headers.tcp_sport);
490 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
491 			 ntohs(tuple->dst_port));
492 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
493 			 ntohs(tuple->src_port));
494 	} else {
495 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
496 				 outer_headers.udp_dport);
497 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
498 				 outer_headers.udp_sport);
499 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
500 			 ntohs(tuple->dst_port));
501 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
502 			 ntohs(tuple->src_port));
503 	}
504 	if (tuple->etype == htons(ETH_P_IP)) {
505 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
506 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
507 		       &tuple->src_ipv4,
508 		       4);
509 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
510 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
511 		       &tuple->dst_ipv4,
512 		       4);
513 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
514 				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
515 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
516 				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
517 	} else {
518 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
519 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
520 		       &tuple->src_ipv6,
521 		       16);
522 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
523 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
524 		       &tuple->dst_ipv6,
525 		       16);
526 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
527 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
528 		       0xff,
529 		       16);
530 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
531 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
532 		       0xff,
533 		       16);
534 	}
535 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
536 	dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
537 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
538 	if (IS_ERR(rule)) {
539 		err = PTR_ERR(rule);
540 		priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
541 		mlx5e_dbg(HW, priv,
542 			  "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
543 			  __func__, arfs_rule->filter_id, arfs_rule->rxq,
544 			  tuple->ip_proto, err);
545 	}
546 
547 out:
548 	kvfree(spec);
549 	return err ? ERR_PTR(err) : rule;
550 }
551 
552 static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
553 				struct mlx5_flow_handle *rule, u16 rxq)
554 {
555 	struct mlx5_flow_destination dst = {};
556 	int err = 0;
557 
558 	dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
559 	dst.tir_num = priv->direct_tir[rxq].tirn;
560 	err =  mlx5_modify_rule_destination(rule, &dst, NULL);
561 	if (err)
562 		netdev_warn(priv->netdev,
563 			    "Failed to modify aRFS rule destination to rq=%d\n", rxq);
564 }
565 
566 static void arfs_handle_work(struct work_struct *work)
567 {
568 	struct arfs_rule *arfs_rule = container_of(work,
569 						   struct arfs_rule,
570 						   arfs_work);
571 	struct mlx5e_priv *priv = arfs_rule->priv;
572 	struct mlx5_flow_handle *rule;
573 
574 	mutex_lock(&priv->state_lock);
575 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
576 		spin_lock_bh(&priv->fs.arfs.arfs_lock);
577 		hlist_del(&arfs_rule->hlist);
578 		spin_unlock_bh(&priv->fs.arfs.arfs_lock);
579 
580 		mutex_unlock(&priv->state_lock);
581 		kfree(arfs_rule);
582 		goto out;
583 	}
584 	mutex_unlock(&priv->state_lock);
585 
586 	if (!arfs_rule->rule) {
587 		rule = arfs_add_rule(priv, arfs_rule);
588 		if (IS_ERR(rule))
589 			goto out;
590 		arfs_rule->rule = rule;
591 	} else {
592 		arfs_modify_rule_rq(priv, arfs_rule->rule,
593 				    arfs_rule->rxq);
594 	}
595 out:
596 	arfs_may_expire_flow(priv);
597 }
598 
599 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
600 					 struct arfs_table *arfs_t,
601 					 const struct flow_keys *fk,
602 					 u16 rxq, u32 flow_id)
603 {
604 	struct arfs_rule *rule;
605 	struct arfs_tuple *tuple;
606 
607 	rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
608 	if (!rule)
609 		return NULL;
610 
611 	rule->priv = priv;
612 	rule->rxq = rxq;
613 	INIT_WORK(&rule->arfs_work, arfs_handle_work);
614 
615 	tuple = &rule->tuple;
616 	tuple->etype = fk->basic.n_proto;
617 	tuple->ip_proto = fk->basic.ip_proto;
618 	if (tuple->etype == htons(ETH_P_IP)) {
619 		tuple->src_ipv4 = fk->addrs.v4addrs.src;
620 		tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
621 	} else {
622 		memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
623 		       sizeof(struct in6_addr));
624 		memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
625 		       sizeof(struct in6_addr));
626 	}
627 	tuple->src_port = fk->ports.src;
628 	tuple->dst_port = fk->ports.dst;
629 
630 	rule->flow_id = flow_id;
631 	rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
632 
633 	hlist_add_head(&rule->hlist,
634 		       arfs_hash_bucket(arfs_t, tuple->src_port,
635 					tuple->dst_port));
636 	return rule;
637 }
638 
639 static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
640 {
641 	if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
642 		return false;
643 	if (tuple->etype != fk->basic.n_proto)
644 		return false;
645 	if (tuple->etype == htons(ETH_P_IP))
646 		return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
647 		       tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
648 	if (tuple->etype == htons(ETH_P_IPV6))
649 		return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
650 			       sizeof(struct in6_addr)) &&
651 		       !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
652 			       sizeof(struct in6_addr));
653 	return false;
654 }
655 
656 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
657 					const struct flow_keys *fk)
658 {
659 	struct arfs_rule *arfs_rule;
660 	struct hlist_head *head;
661 
662 	head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
663 	hlist_for_each_entry(arfs_rule, head, hlist) {
664 		if (arfs_cmp(&arfs_rule->tuple, fk))
665 			return arfs_rule;
666 	}
667 
668 	return NULL;
669 }
670 
671 int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
672 			u16 rxq_index, u32 flow_id)
673 {
674 	struct mlx5e_priv *priv = netdev_priv(dev);
675 	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
676 	struct arfs_table *arfs_t;
677 	struct arfs_rule *arfs_rule;
678 	struct flow_keys fk;
679 
680 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
681 		return -EPROTONOSUPPORT;
682 
683 	if (fk.basic.n_proto != htons(ETH_P_IP) &&
684 	    fk.basic.n_proto != htons(ETH_P_IPV6))
685 		return -EPROTONOSUPPORT;
686 
687 	if (skb->encapsulation)
688 		return -EPROTONOSUPPORT;
689 
690 	arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
691 	if (!arfs_t)
692 		return -EPROTONOSUPPORT;
693 
694 	spin_lock_bh(&arfs->arfs_lock);
695 	arfs_rule = arfs_find_rule(arfs_t, &fk);
696 	if (arfs_rule) {
697 		if (arfs_rule->rxq == rxq_index) {
698 			spin_unlock_bh(&arfs->arfs_lock);
699 			return arfs_rule->filter_id;
700 		}
701 		arfs_rule->rxq = rxq_index;
702 	} else {
703 		arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
704 		if (!arfs_rule) {
705 			spin_unlock_bh(&arfs->arfs_lock);
706 			return -ENOMEM;
707 		}
708 	}
709 	queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
710 	spin_unlock_bh(&arfs->arfs_lock);
711 	return arfs_rule->filter_id;
712 }
713 
714