1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <net/udp_tunnel.h>
35 #include <linux/bitops.h>
36 #include <linux/vmalloc.h>
37 
38 #include <linux/qed/qed_if.h>
39 #include "qede.h"
40 
41 #define QEDE_FILTER_PRINT_MAX_LEN	(64)
42 struct qede_arfs_tuple {
43 	union {
44 		__be32 src_ipv4;
45 		struct in6_addr src_ipv6;
46 	};
47 	union {
48 		__be32 dst_ipv4;
49 		struct in6_addr dst_ipv6;
50 	};
51 	__be16  src_port;
52 	__be16  dst_port;
53 	__be16  eth_proto;
54 	u8      ip_proto;
55 
56 	/* Describe filtering mode needed for this kind of filter */
57 	enum qed_filter_config_mode mode;
58 
59 	/* Used to compare new/old filters. Return true if IPs match */
60 	bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b);
61 
62 	/* Given an address into ethhdr build a header from tuple info */
63 	void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
64 
65 	/* Stringify the tuple for a print into the provided buffer */
66 	void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
67 };
68 
69 struct qede_arfs_fltr_node {
70 #define QEDE_FLTR_VALID	 0
71 	unsigned long state;
72 
73 	/* pointer to aRFS packet buffer */
74 	void *data;
75 
76 	/* dma map address of aRFS packet buffer */
77 	dma_addr_t mapping;
78 
79 	/* length of aRFS packet buffer */
80 	int buf_len;
81 
82 	/* tuples to hold from aRFS packet buffer */
83 	struct qede_arfs_tuple tuple;
84 
85 	u32 flow_id;
86 	u16 sw_id;
87 	u16 rxq_id;
88 	u16 next_rxq_id;
89 	u8 vfid;
90 	bool filter_op;
91 	bool used;
92 	u8 fw_rc;
93 	bool b_is_drop;
94 	struct hlist_node node;
95 };
96 
97 struct qede_arfs {
98 #define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
99 #define QEDE_ARFS_POLL_COUNT	100
100 #define QEDE_RFS_FLW_BITSHIFT	(4)
101 #define QEDE_RFS_FLW_MASK	((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
102 	struct hlist_head	arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
103 
104 	/* lock for filter list access */
105 	spinlock_t		arfs_list_lock;
106 	unsigned long		*arfs_fltr_bmap;
107 	int			filter_count;
108 
109 	/* Currently configured filtering mode */
110 	enum qed_filter_config_mode mode;
111 };
112 
113 static void qede_configure_arfs_fltr(struct qede_dev *edev,
114 				     struct qede_arfs_fltr_node *n,
115 				     u16 rxq_id, bool add_fltr)
116 {
117 	const struct qed_eth_ops *op = edev->ops;
118 	struct qed_ntuple_filter_params params;
119 
120 	if (n->used)
121 		return;
122 
123 	memset(&params, 0, sizeof(params));
124 
125 	params.addr = n->mapping;
126 	params.length = n->buf_len;
127 	params.qid = rxq_id;
128 	params.b_is_add = add_fltr;
129 	params.b_is_drop = n->b_is_drop;
130 
131 	if (n->vfid) {
132 		params.b_is_vf = true;
133 		params.vf_id = n->vfid - 1;
134 	}
135 
136 	if (n->tuple.stringify) {
137 		char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
138 
139 		n->tuple.stringify(&n->tuple, tuple_buffer);
140 		DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
141 			   "%s sw_id[0x%x]: %s [vf %u queue %d]\n",
142 			   add_fltr ? "Adding" : "Deleting",
143 			   n->sw_id, tuple_buffer, n->vfid, rxq_id);
144 	}
145 
146 	n->used = true;
147 	n->filter_op = add_fltr;
148 	op->ntuple_filter_config(edev->cdev, n, &params);
149 }
150 
151 static void
152 qede_free_arfs_filter(struct qede_dev *edev,  struct qede_arfs_fltr_node *fltr)
153 {
154 	kfree(fltr->data);
155 	clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
156 	kfree(fltr);
157 }
158 
159 static int
160 qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
161 				      struct qede_arfs_fltr_node *fltr,
162 				      u16 bucket_idx)
163 {
164 	fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
165 				       fltr->buf_len, DMA_TO_DEVICE);
166 	if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
167 		DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
168 		qede_free_arfs_filter(edev, fltr);
169 		return -ENOMEM;
170 	}
171 
172 	INIT_HLIST_NODE(&fltr->node);
173 	hlist_add_head(&fltr->node,
174 		       QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
175 
176 	edev->arfs->filter_count++;
177 	if (edev->arfs->filter_count == 1 &&
178 	    edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
179 		edev->ops->configure_arfs_searcher(edev->cdev,
180 						   fltr->tuple.mode);
181 		edev->arfs->mode = fltr->tuple.mode;
182 	}
183 
184 	return 0;
185 }
186 
187 static void
188 qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
189 				      struct qede_arfs_fltr_node *fltr)
190 {
191 	hlist_del(&fltr->node);
192 	dma_unmap_single(&edev->pdev->dev, fltr->mapping,
193 			 fltr->buf_len, DMA_TO_DEVICE);
194 
195 	qede_free_arfs_filter(edev, fltr);
196 
197 	edev->arfs->filter_count--;
198 	if (!edev->arfs->filter_count &&
199 	    edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
200 		enum qed_filter_config_mode mode;
201 
202 		mode = QED_FILTER_CONFIG_MODE_DISABLE;
203 		edev->ops->configure_arfs_searcher(edev->cdev, mode);
204 		edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE;
205 	}
206 }
207 
208 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
209 {
210 	struct qede_arfs_fltr_node *fltr = filter;
211 	struct qede_dev *edev = dev;
212 
213 	fltr->fw_rc = fw_rc;
214 
215 	if (fw_rc) {
216 		DP_NOTICE(edev,
217 			  "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
218 			  fw_rc, fltr->flow_id, fltr->sw_id,
219 			  ntohs(fltr->tuple.src_port),
220 			  ntohs(fltr->tuple.dst_port), fltr->rxq_id);
221 
222 		spin_lock_bh(&edev->arfs->arfs_list_lock);
223 
224 		fltr->used = false;
225 		clear_bit(QEDE_FLTR_VALID, &fltr->state);
226 
227 		spin_unlock_bh(&edev->arfs->arfs_list_lock);
228 		return;
229 	}
230 
231 	spin_lock_bh(&edev->arfs->arfs_list_lock);
232 
233 	fltr->used = false;
234 
235 	if (fltr->filter_op) {
236 		set_bit(QEDE_FLTR_VALID, &fltr->state);
237 		if (fltr->rxq_id != fltr->next_rxq_id)
238 			qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
239 						 false);
240 	} else {
241 		clear_bit(QEDE_FLTR_VALID, &fltr->state);
242 		if (fltr->rxq_id != fltr->next_rxq_id) {
243 			fltr->rxq_id = fltr->next_rxq_id;
244 			qede_configure_arfs_fltr(edev, fltr,
245 						 fltr->rxq_id, true);
246 		}
247 	}
248 
249 	spin_unlock_bh(&edev->arfs->arfs_list_lock);
250 }
251 
252 /* Should be called while qede_lock is held */
253 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
254 {
255 	int i;
256 
257 	for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
258 		struct hlist_node *temp;
259 		struct hlist_head *head;
260 		struct qede_arfs_fltr_node *fltr;
261 
262 		head = &edev->arfs->arfs_hl_head[i];
263 
264 		hlist_for_each_entry_safe(fltr, temp, head, node) {
265 			bool del = false;
266 
267 			if (edev->state != QEDE_STATE_OPEN)
268 				del = true;
269 
270 			spin_lock_bh(&edev->arfs->arfs_list_lock);
271 
272 			if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
273 			     !fltr->used) || free_fltr) {
274 				qede_dequeue_fltr_and_config_searcher(edev,
275 								      fltr);
276 			} else {
277 				bool flow_exp = false;
278 #ifdef CONFIG_RFS_ACCEL
279 				flow_exp = rps_may_expire_flow(edev->ndev,
280 							       fltr->rxq_id,
281 							       fltr->flow_id,
282 							       fltr->sw_id);
283 #endif
284 				if ((flow_exp || del) && !free_fltr)
285 					qede_configure_arfs_fltr(edev, fltr,
286 								 fltr->rxq_id,
287 								 false);
288 			}
289 
290 			spin_unlock_bh(&edev->arfs->arfs_list_lock);
291 		}
292 	}
293 
294 #ifdef CONFIG_RFS_ACCEL
295 	spin_lock_bh(&edev->arfs->arfs_list_lock);
296 
297 	if (edev->arfs->filter_count) {
298 		set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
299 		schedule_delayed_work(&edev->sp_task,
300 				      QEDE_SP_TASK_POLL_DELAY);
301 	}
302 
303 	spin_unlock_bh(&edev->arfs->arfs_list_lock);
304 #endif
305 }
306 
307 /* This function waits until all aRFS filters get deleted and freed.
308  * On timeout it frees all filters forcefully.
309  */
310 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
311 {
312 	int count = QEDE_ARFS_POLL_COUNT;
313 
314 	while (count) {
315 		qede_process_arfs_filters(edev, false);
316 
317 		if (!edev->arfs->filter_count)
318 			break;
319 
320 		msleep(100);
321 		count--;
322 	}
323 
324 	if (!count) {
325 		DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
326 
327 		/* Something is terribly wrong, free forcefully */
328 		qede_process_arfs_filters(edev, true);
329 	}
330 }
331 
332 int qede_alloc_arfs(struct qede_dev *edev)
333 {
334 	int i;
335 
336 	edev->arfs = vzalloc(sizeof(*edev->arfs));
337 	if (!edev->arfs)
338 		return -ENOMEM;
339 
340 	spin_lock_init(&edev->arfs->arfs_list_lock);
341 
342 	for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
343 		INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
344 
345 	edev->arfs->arfs_fltr_bmap =
346 		vzalloc(array_size(sizeof(long),
347 				   BITS_TO_LONGS(QEDE_RFS_MAX_FLTR)));
348 	if (!edev->arfs->arfs_fltr_bmap) {
349 		vfree(edev->arfs);
350 		edev->arfs = NULL;
351 		return -ENOMEM;
352 	}
353 
354 #ifdef CONFIG_RFS_ACCEL
355 	edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
356 	if (!edev->ndev->rx_cpu_rmap) {
357 		vfree(edev->arfs->arfs_fltr_bmap);
358 		edev->arfs->arfs_fltr_bmap = NULL;
359 		vfree(edev->arfs);
360 		edev->arfs = NULL;
361 		return -ENOMEM;
362 	}
363 #endif
364 	return 0;
365 }
366 
367 void qede_free_arfs(struct qede_dev *edev)
368 {
369 	if (!edev->arfs)
370 		return;
371 
372 #ifdef CONFIG_RFS_ACCEL
373 	if (edev->ndev->rx_cpu_rmap)
374 		free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
375 
376 	edev->ndev->rx_cpu_rmap = NULL;
377 #endif
378 	vfree(edev->arfs->arfs_fltr_bmap);
379 	edev->arfs->arfs_fltr_bmap = NULL;
380 	vfree(edev->arfs);
381 	edev->arfs = NULL;
382 }
383 
384 #ifdef CONFIG_RFS_ACCEL
385 static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
386 				 const struct sk_buff *skb)
387 {
388 	if (skb->protocol == htons(ETH_P_IP)) {
389 		if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
390 		    tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
391 			return true;
392 		else
393 			return false;
394 	} else {
395 		struct in6_addr *src = &tpos->tuple.src_ipv6;
396 		u8 size = sizeof(struct in6_addr);
397 
398 		if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
399 		    !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
400 			return true;
401 		else
402 			return false;
403 	}
404 }
405 
406 static struct qede_arfs_fltr_node *
407 qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
408 			  __be16 src_port, __be16 dst_port, u8 ip_proto)
409 {
410 	struct qede_arfs_fltr_node *tpos;
411 
412 	hlist_for_each_entry(tpos, h, node)
413 		if (tpos->tuple.ip_proto == ip_proto &&
414 		    tpos->tuple.eth_proto == skb->protocol &&
415 		    qede_compare_ip_addr(tpos, skb) &&
416 		    tpos->tuple.src_port == src_port &&
417 		    tpos->tuple.dst_port == dst_port)
418 			return tpos;
419 
420 	return NULL;
421 }
422 
423 static struct qede_arfs_fltr_node *
424 qede_alloc_filter(struct qede_dev *edev, int min_hlen)
425 {
426 	struct qede_arfs_fltr_node *n;
427 	int bit_id;
428 
429 	bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
430 				     QEDE_RFS_MAX_FLTR);
431 
432 	if (bit_id >= QEDE_RFS_MAX_FLTR)
433 		return NULL;
434 
435 	n = kzalloc(sizeof(*n), GFP_ATOMIC);
436 	if (!n)
437 		return NULL;
438 
439 	n->data = kzalloc(min_hlen, GFP_ATOMIC);
440 	if (!n->data) {
441 		kfree(n);
442 		return NULL;
443 	}
444 
445 	n->sw_id = (u16)bit_id;
446 	set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
447 	return n;
448 }
449 
450 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
451 		       u16 rxq_index, u32 flow_id)
452 {
453 	struct qede_dev *edev = netdev_priv(dev);
454 	struct qede_arfs_fltr_node *n;
455 	int min_hlen, rc, tp_offset;
456 	struct ethhdr *eth;
457 	__be16 *ports;
458 	u16 tbl_idx;
459 	u8 ip_proto;
460 
461 	if (skb->encapsulation)
462 		return -EPROTONOSUPPORT;
463 
464 	if (skb->protocol != htons(ETH_P_IP) &&
465 	    skb->protocol != htons(ETH_P_IPV6))
466 		return -EPROTONOSUPPORT;
467 
468 	if (skb->protocol == htons(ETH_P_IP)) {
469 		ip_proto = ip_hdr(skb)->protocol;
470 		tp_offset = sizeof(struct iphdr);
471 	} else {
472 		ip_proto = ipv6_hdr(skb)->nexthdr;
473 		tp_offset = sizeof(struct ipv6hdr);
474 	}
475 
476 	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
477 		return -EPROTONOSUPPORT;
478 
479 	ports = (__be16 *)(skb->data + tp_offset);
480 	tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
481 
482 	spin_lock_bh(&edev->arfs->arfs_list_lock);
483 
484 	n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
485 				      skb, ports[0], ports[1], ip_proto);
486 	if (n) {
487 		/* Filter match */
488 		n->next_rxq_id = rxq_index;
489 
490 		if (test_bit(QEDE_FLTR_VALID, &n->state)) {
491 			if (n->rxq_id != rxq_index)
492 				qede_configure_arfs_fltr(edev, n, n->rxq_id,
493 							 false);
494 		} else {
495 			if (!n->used) {
496 				n->rxq_id = rxq_index;
497 				qede_configure_arfs_fltr(edev, n, n->rxq_id,
498 							 true);
499 			}
500 		}
501 
502 		rc = n->sw_id;
503 		goto ret_unlock;
504 	}
505 
506 	min_hlen = ETH_HLEN + skb_headlen(skb);
507 
508 	n = qede_alloc_filter(edev, min_hlen);
509 	if (!n) {
510 		rc = -ENOMEM;
511 		goto ret_unlock;
512 	}
513 
514 	n->buf_len = min_hlen;
515 	n->rxq_id = rxq_index;
516 	n->next_rxq_id = rxq_index;
517 	n->tuple.src_port = ports[0];
518 	n->tuple.dst_port = ports[1];
519 	n->flow_id = flow_id;
520 
521 	if (skb->protocol == htons(ETH_P_IP)) {
522 		n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
523 		n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
524 	} else {
525 		memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
526 		       sizeof(struct in6_addr));
527 		memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
528 		       sizeof(struct in6_addr));
529 	}
530 
531 	eth = (struct ethhdr *)n->data;
532 	eth->h_proto = skb->protocol;
533 	n->tuple.eth_proto = skb->protocol;
534 	n->tuple.ip_proto = ip_proto;
535 	n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
536 	memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
537 
538 	rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
539 	if (rc)
540 		goto ret_unlock;
541 
542 	qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
543 
544 	spin_unlock_bh(&edev->arfs->arfs_list_lock);
545 
546 	set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
547 	schedule_delayed_work(&edev->sp_task, 0);
548 
549 	return n->sw_id;
550 
551 ret_unlock:
552 	spin_unlock_bh(&edev->arfs->arfs_list_lock);
553 	return rc;
554 }
555 #endif
556 
557 void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
558 {
559 	struct qede_dev *edev = dev;
560 
561 	if (edev->vxlan_dst_port != vxlan_port)
562 		edev->vxlan_dst_port = 0;
563 
564 	if (edev->geneve_dst_port != geneve_port)
565 		edev->geneve_dst_port = 0;
566 }
567 
568 void qede_force_mac(void *dev, u8 *mac, bool forced)
569 {
570 	struct qede_dev *edev = dev;
571 
572 	__qede_lock(edev);
573 
574 	if (!is_valid_ether_addr(mac)) {
575 		__qede_unlock(edev);
576 		return;
577 	}
578 
579 	ether_addr_copy(edev->ndev->dev_addr, mac);
580 	__qede_unlock(edev);
581 }
582 
583 void qede_fill_rss_params(struct qede_dev *edev,
584 			  struct qed_update_vport_rss_params *rss, u8 *update)
585 {
586 	bool need_reset = false;
587 	int i;
588 
589 	if (QEDE_RSS_COUNT(edev) <= 1) {
590 		memset(rss, 0, sizeof(*rss));
591 		*update = 0;
592 		return;
593 	}
594 
595 	/* Need to validate current RSS config uses valid entries */
596 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
597 		if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
598 			need_reset = true;
599 			break;
600 		}
601 	}
602 
603 	if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
604 		for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
605 			u16 indir_val, val;
606 
607 			val = QEDE_RSS_COUNT(edev);
608 			indir_val = ethtool_rxfh_indir_default(i, val);
609 			edev->rss_ind_table[i] = indir_val;
610 		}
611 		edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
612 	}
613 
614 	/* Now that we have the queue-indirection, prepare the handles */
615 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
616 		u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
617 
618 		rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
619 	}
620 
621 	if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
622 		netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
623 		edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
624 	}
625 	memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
626 
627 	if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
628 		edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
629 		    QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
630 		edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
631 	}
632 	rss->rss_caps = edev->rss_caps;
633 
634 	*update = 1;
635 }
636 
637 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
638 				 enum qed_filter_xcast_params_type opcode,
639 				 unsigned char mac[ETH_ALEN])
640 {
641 	struct qed_filter_params filter_cmd;
642 
643 	memset(&filter_cmd, 0, sizeof(filter_cmd));
644 	filter_cmd.type = QED_FILTER_TYPE_UCAST;
645 	filter_cmd.filter.ucast.type = opcode;
646 	filter_cmd.filter.ucast.mac_valid = 1;
647 	ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
648 
649 	return edev->ops->filter_config(edev->cdev, &filter_cmd);
650 }
651 
652 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
653 				  enum qed_filter_xcast_params_type opcode,
654 				  u16 vid)
655 {
656 	struct qed_filter_params filter_cmd;
657 
658 	memset(&filter_cmd, 0, sizeof(filter_cmd));
659 	filter_cmd.type = QED_FILTER_TYPE_UCAST;
660 	filter_cmd.filter.ucast.type = opcode;
661 	filter_cmd.filter.ucast.vlan_valid = 1;
662 	filter_cmd.filter.ucast.vlan = vid;
663 
664 	return edev->ops->filter_config(edev->cdev, &filter_cmd);
665 }
666 
667 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
668 {
669 	struct qed_update_vport_params *params;
670 	int rc;
671 
672 	/* Proceed only if action actually needs to be performed */
673 	if (edev->accept_any_vlan == action)
674 		return 0;
675 
676 	params = vzalloc(sizeof(*params));
677 	if (!params)
678 		return -ENOMEM;
679 
680 	params->vport_id = 0;
681 	params->accept_any_vlan = action;
682 	params->update_accept_any_vlan_flg = 1;
683 
684 	rc = edev->ops->vport_update(edev->cdev, params);
685 	if (rc) {
686 		DP_ERR(edev, "Failed to %s accept-any-vlan\n",
687 		       action ? "enable" : "disable");
688 	} else {
689 		DP_INFO(edev, "%s accept-any-vlan\n",
690 			action ? "enabled" : "disabled");
691 		edev->accept_any_vlan = action;
692 	}
693 
694 	vfree(params);
695 	return 0;
696 }
697 
698 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
699 {
700 	struct qede_dev *edev = netdev_priv(dev);
701 	struct qede_vlan *vlan, *tmp;
702 	int rc = 0;
703 
704 	DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
705 
706 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
707 	if (!vlan) {
708 		DP_INFO(edev, "Failed to allocate struct for vlan\n");
709 		return -ENOMEM;
710 	}
711 	INIT_LIST_HEAD(&vlan->list);
712 	vlan->vid = vid;
713 	vlan->configured = false;
714 
715 	/* Verify vlan isn't already configured */
716 	list_for_each_entry(tmp, &edev->vlan_list, list) {
717 		if (tmp->vid == vlan->vid) {
718 			DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
719 				   "vlan already configured\n");
720 			kfree(vlan);
721 			return -EEXIST;
722 		}
723 	}
724 
725 	/* If interface is down, cache this VLAN ID and return */
726 	__qede_lock(edev);
727 	if (edev->state != QEDE_STATE_OPEN) {
728 		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
729 			   "Interface is down, VLAN %d will be configured when interface is up\n",
730 			   vid);
731 		if (vid != 0)
732 			edev->non_configured_vlans++;
733 		list_add(&vlan->list, &edev->vlan_list);
734 		goto out;
735 	}
736 
737 	/* Check for the filter limit.
738 	 * Note - vlan0 has a reserved filter and can be added without
739 	 * worrying about quota
740 	 */
741 	if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
742 	    (vlan->vid == 0)) {
743 		rc = qede_set_ucast_rx_vlan(edev,
744 					    QED_FILTER_XCAST_TYPE_ADD,
745 					    vlan->vid);
746 		if (rc) {
747 			DP_ERR(edev, "Failed to configure VLAN %d\n",
748 			       vlan->vid);
749 			kfree(vlan);
750 			goto out;
751 		}
752 		vlan->configured = true;
753 
754 		/* vlan0 filter isn't consuming out of our quota */
755 		if (vlan->vid != 0)
756 			edev->configured_vlans++;
757 	} else {
758 		/* Out of quota; Activate accept-any-VLAN mode */
759 		if (!edev->non_configured_vlans) {
760 			rc = qede_config_accept_any_vlan(edev, true);
761 			if (rc) {
762 				kfree(vlan);
763 				goto out;
764 			}
765 		}
766 
767 		edev->non_configured_vlans++;
768 	}
769 
770 	list_add(&vlan->list, &edev->vlan_list);
771 
772 out:
773 	__qede_unlock(edev);
774 	return rc;
775 }
776 
777 static void qede_del_vlan_from_list(struct qede_dev *edev,
778 				    struct qede_vlan *vlan)
779 {
780 	/* vlan0 filter isn't consuming out of our quota */
781 	if (vlan->vid != 0) {
782 		if (vlan->configured)
783 			edev->configured_vlans--;
784 		else
785 			edev->non_configured_vlans--;
786 	}
787 
788 	list_del(&vlan->list);
789 	kfree(vlan);
790 }
791 
792 int qede_configure_vlan_filters(struct qede_dev *edev)
793 {
794 	int rc = 0, real_rc = 0, accept_any_vlan = 0;
795 	struct qed_dev_eth_info *dev_info;
796 	struct qede_vlan *vlan = NULL;
797 
798 	if (list_empty(&edev->vlan_list))
799 		return 0;
800 
801 	dev_info = &edev->dev_info;
802 
803 	/* Configure non-configured vlans */
804 	list_for_each_entry(vlan, &edev->vlan_list, list) {
805 		if (vlan->configured)
806 			continue;
807 
808 		/* We have used all our credits, now enable accept_any_vlan */
809 		if ((vlan->vid != 0) &&
810 		    (edev->configured_vlans == dev_info->num_vlan_filters)) {
811 			accept_any_vlan = 1;
812 			continue;
813 		}
814 
815 		DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
816 
817 		rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
818 					    vlan->vid);
819 		if (rc) {
820 			DP_ERR(edev, "Failed to configure VLAN %u\n",
821 			       vlan->vid);
822 			real_rc = rc;
823 			continue;
824 		}
825 
826 		vlan->configured = true;
827 		/* vlan0 filter doesn't consume our VLAN filter's quota */
828 		if (vlan->vid != 0) {
829 			edev->non_configured_vlans--;
830 			edev->configured_vlans++;
831 		}
832 	}
833 
834 	/* enable accept_any_vlan mode if we have more VLANs than credits,
835 	 * or remove accept_any_vlan mode if we've actually removed
836 	 * a non-configured vlan, and all remaining vlans are truly configured.
837 	 */
838 
839 	if (accept_any_vlan)
840 		rc = qede_config_accept_any_vlan(edev, true);
841 	else if (!edev->non_configured_vlans)
842 		rc = qede_config_accept_any_vlan(edev, false);
843 
844 	if (rc && !real_rc)
845 		real_rc = rc;
846 
847 	return real_rc;
848 }
849 
850 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
851 {
852 	struct qede_dev *edev = netdev_priv(dev);
853 	struct qede_vlan *vlan = NULL;
854 	int rc = 0;
855 
856 	DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
857 
858 	/* Find whether entry exists */
859 	__qede_lock(edev);
860 	list_for_each_entry(vlan, &edev->vlan_list, list)
861 		if (vlan->vid == vid)
862 			break;
863 
864 	if (!vlan || (vlan->vid != vid)) {
865 		DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
866 			   "Vlan isn't configured\n");
867 		goto out;
868 	}
869 
870 	if (edev->state != QEDE_STATE_OPEN) {
871 		/* As interface is already down, we don't have a VPORT
872 		 * instance to remove vlan filter. So just update vlan list
873 		 */
874 		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
875 			   "Interface is down, removing VLAN from list only\n");
876 		qede_del_vlan_from_list(edev, vlan);
877 		goto out;
878 	}
879 
880 	/* Remove vlan */
881 	if (vlan->configured) {
882 		rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
883 					    vid);
884 		if (rc) {
885 			DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
886 			goto out;
887 		}
888 	}
889 
890 	qede_del_vlan_from_list(edev, vlan);
891 
892 	/* We have removed a VLAN - try to see if we can
893 	 * configure non-configured VLAN from the list.
894 	 */
895 	rc = qede_configure_vlan_filters(edev);
896 
897 out:
898 	__qede_unlock(edev);
899 	return rc;
900 }
901 
902 void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
903 {
904 	struct qede_vlan *vlan = NULL;
905 
906 	if (list_empty(&edev->vlan_list))
907 		return;
908 
909 	list_for_each_entry(vlan, &edev->vlan_list, list) {
910 		if (!vlan->configured)
911 			continue;
912 
913 		vlan->configured = false;
914 
915 		/* vlan0 filter isn't consuming out of our quota */
916 		if (vlan->vid != 0) {
917 			edev->non_configured_vlans++;
918 			edev->configured_vlans--;
919 		}
920 
921 		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
922 			   "marked vlan %d as non-configured\n", vlan->vid);
923 	}
924 
925 	edev->accept_any_vlan = false;
926 }
927 
928 static void qede_set_features_reload(struct qede_dev *edev,
929 				     struct qede_reload_args *args)
930 {
931 	edev->ndev->features = args->u.features;
932 }
933 
934 netdev_features_t qede_fix_features(struct net_device *dev,
935 				    netdev_features_t features)
936 {
937 	struct qede_dev *edev = netdev_priv(dev);
938 
939 	if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
940 	    !(features & NETIF_F_GRO))
941 		features &= ~NETIF_F_GRO_HW;
942 
943 	return features;
944 }
945 
946 int qede_set_features(struct net_device *dev, netdev_features_t features)
947 {
948 	struct qede_dev *edev = netdev_priv(dev);
949 	netdev_features_t changes = features ^ dev->features;
950 	bool need_reload = false;
951 
952 	if (changes & NETIF_F_GRO_HW)
953 		need_reload = true;
954 
955 	if (need_reload) {
956 		struct qede_reload_args args;
957 
958 		args.u.features = features;
959 		args.func = &qede_set_features_reload;
960 
961 		/* Make sure that we definitely need to reload.
962 		 * In case of an eBPF attached program, there will be no FW
963 		 * aggregations, so no need to actually reload.
964 		 */
965 		__qede_lock(edev);
966 		if (edev->xdp_prog)
967 			args.func(edev, &args);
968 		else
969 			qede_reload(edev, &args, true);
970 		__qede_unlock(edev);
971 
972 		return 1;
973 	}
974 
975 	return 0;
976 }
977 
978 void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
979 {
980 	struct qede_dev *edev = netdev_priv(dev);
981 	struct qed_tunn_params tunn_params;
982 	u16 t_port = ntohs(ti->port);
983 	int rc;
984 
985 	memset(&tunn_params, 0, sizeof(tunn_params));
986 
987 	switch (ti->type) {
988 	case UDP_TUNNEL_TYPE_VXLAN:
989 		if (!edev->dev_info.common.vxlan_enable)
990 			return;
991 
992 		if (edev->vxlan_dst_port)
993 			return;
994 
995 		tunn_params.update_vxlan_port = 1;
996 		tunn_params.vxlan_port = t_port;
997 
998 		__qede_lock(edev);
999 		rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
1000 		__qede_unlock(edev);
1001 
1002 		if (!rc) {
1003 			edev->vxlan_dst_port = t_port;
1004 			DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
1005 				   t_port);
1006 		} else {
1007 			DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
1008 				  t_port);
1009 		}
1010 
1011 		break;
1012 	case UDP_TUNNEL_TYPE_GENEVE:
1013 		if (!edev->dev_info.common.geneve_enable)
1014 			return;
1015 
1016 		if (edev->geneve_dst_port)
1017 			return;
1018 
1019 		tunn_params.update_geneve_port = 1;
1020 		tunn_params.geneve_port = t_port;
1021 
1022 		__qede_lock(edev);
1023 		rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
1024 		__qede_unlock(edev);
1025 
1026 		if (!rc) {
1027 			edev->geneve_dst_port = t_port;
1028 			DP_VERBOSE(edev, QED_MSG_DEBUG,
1029 				   "Added geneve port=%d\n", t_port);
1030 		} else {
1031 			DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
1032 				  t_port);
1033 		}
1034 
1035 		break;
1036 	default:
1037 		return;
1038 	}
1039 }
1040 
1041 void qede_udp_tunnel_del(struct net_device *dev,
1042 			 struct udp_tunnel_info *ti)
1043 {
1044 	struct qede_dev *edev = netdev_priv(dev);
1045 	struct qed_tunn_params tunn_params;
1046 	u16 t_port = ntohs(ti->port);
1047 
1048 	memset(&tunn_params, 0, sizeof(tunn_params));
1049 
1050 	switch (ti->type) {
1051 	case UDP_TUNNEL_TYPE_VXLAN:
1052 		if (t_port != edev->vxlan_dst_port)
1053 			return;
1054 
1055 		tunn_params.update_vxlan_port = 1;
1056 		tunn_params.vxlan_port = 0;
1057 
1058 		__qede_lock(edev);
1059 		edev->ops->tunn_config(edev->cdev, &tunn_params);
1060 		__qede_unlock(edev);
1061 
1062 		edev->vxlan_dst_port = 0;
1063 
1064 		DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
1065 			   t_port);
1066 
1067 		break;
1068 	case UDP_TUNNEL_TYPE_GENEVE:
1069 		if (t_port != edev->geneve_dst_port)
1070 			return;
1071 
1072 		tunn_params.update_geneve_port = 1;
1073 		tunn_params.geneve_port = 0;
1074 
1075 		__qede_lock(edev);
1076 		edev->ops->tunn_config(edev->cdev, &tunn_params);
1077 		__qede_unlock(edev);
1078 
1079 		edev->geneve_dst_port = 0;
1080 
1081 		DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
1082 			   t_port);
1083 		break;
1084 	default:
1085 		return;
1086 	}
1087 }
1088 
1089 static void qede_xdp_reload_func(struct qede_dev *edev,
1090 				 struct qede_reload_args *args)
1091 {
1092 	struct bpf_prog *old;
1093 
1094 	old = xchg(&edev->xdp_prog, args->u.new_prog);
1095 	if (old)
1096 		bpf_prog_put(old);
1097 }
1098 
1099 static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1100 {
1101 	struct qede_reload_args args;
1102 
1103 	/* If we're called, there was already a bpf reference increment */
1104 	args.func = &qede_xdp_reload_func;
1105 	args.u.new_prog = prog;
1106 	qede_reload(edev, &args, false);
1107 
1108 	return 0;
1109 }
1110 
1111 int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1112 {
1113 	struct qede_dev *edev = netdev_priv(dev);
1114 
1115 	switch (xdp->command) {
1116 	case XDP_SETUP_PROG:
1117 		return qede_xdp_set(edev, xdp->prog);
1118 	case XDP_QUERY_PROG:
1119 		xdp->prog_attached = !!edev->xdp_prog;
1120 		xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
1121 		return 0;
1122 	default:
1123 		return -EINVAL;
1124 	}
1125 }
1126 
1127 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1128 				 enum qed_filter_xcast_params_type opcode,
1129 				 unsigned char *mac, int num_macs)
1130 {
1131 	struct qed_filter_params filter_cmd;
1132 	int i;
1133 
1134 	memset(&filter_cmd, 0, sizeof(filter_cmd));
1135 	filter_cmd.type = QED_FILTER_TYPE_MCAST;
1136 	filter_cmd.filter.mcast.type = opcode;
1137 	filter_cmd.filter.mcast.num = num_macs;
1138 
1139 	for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1140 		ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1141 
1142 	return edev->ops->filter_config(edev->cdev, &filter_cmd);
1143 }
1144 
1145 int qede_set_mac_addr(struct net_device *ndev, void *p)
1146 {
1147 	struct qede_dev *edev = netdev_priv(ndev);
1148 	struct sockaddr *addr = p;
1149 	int rc = 0;
1150 
1151 	/* Make sure the state doesn't transition while changing the MAC.
1152 	 * Also, all flows accessing the dev_addr field are doing that under
1153 	 * this lock.
1154 	 */
1155 	__qede_lock(edev);
1156 
1157 	if (!is_valid_ether_addr(addr->sa_data)) {
1158 		DP_NOTICE(edev, "The MAC address is not valid\n");
1159 		rc = -EFAULT;
1160 		goto out;
1161 	}
1162 
1163 	if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1164 		DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
1165 			  addr->sa_data);
1166 		rc = -EINVAL;
1167 		goto out;
1168 	}
1169 
1170 	if (edev->state == QEDE_STATE_OPEN) {
1171 		/* Remove the previous primary mac */
1172 		rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1173 					   ndev->dev_addr);
1174 		if (rc)
1175 			goto out;
1176 	}
1177 
1178 	ether_addr_copy(ndev->dev_addr, addr->sa_data);
1179 	DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
1180 
1181 	if (edev->state != QEDE_STATE_OPEN) {
1182 		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1183 			   "The device is currently down\n");
1184 		/* Ask PF to explicitly update a copy in bulletin board */
1185 		if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
1186 			edev->ops->req_bulletin_update_mac(edev->cdev,
1187 							   ndev->dev_addr);
1188 		goto out;
1189 	}
1190 
1191 	edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
1192 
1193 	rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1194 				   ndev->dev_addr);
1195 out:
1196 	__qede_unlock(edev);
1197 	return rc;
1198 }
1199 
1200 static int
1201 qede_configure_mcast_filtering(struct net_device *ndev,
1202 			       enum qed_filter_rx_mode_type *accept_flags)
1203 {
1204 	struct qede_dev *edev = netdev_priv(ndev);
1205 	unsigned char *mc_macs, *temp;
1206 	struct netdev_hw_addr *ha;
1207 	int rc = 0, mc_count;
1208 	size_t size;
1209 
1210 	size = 64 * ETH_ALEN;
1211 
1212 	mc_macs = kzalloc(size, GFP_KERNEL);
1213 	if (!mc_macs) {
1214 		DP_NOTICE(edev,
1215 			  "Failed to allocate memory for multicast MACs\n");
1216 		rc = -ENOMEM;
1217 		goto exit;
1218 	}
1219 
1220 	temp = mc_macs;
1221 
1222 	/* Remove all previously configured MAC filters */
1223 	rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1224 				   mc_macs, 1);
1225 	if (rc)
1226 		goto exit;
1227 
1228 	netif_addr_lock_bh(ndev);
1229 
1230 	mc_count = netdev_mc_count(ndev);
1231 	if (mc_count < 64) {
1232 		netdev_for_each_mc_addr(ha, ndev) {
1233 			ether_addr_copy(temp, ha->addr);
1234 			temp += ETH_ALEN;
1235 		}
1236 	}
1237 
1238 	netif_addr_unlock_bh(ndev);
1239 
1240 	/* Check for all multicast @@@TBD resource allocation */
1241 	if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1242 		if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1243 			*accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1244 	} else {
1245 		/* Add all multicast MAC filters */
1246 		rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1247 					   mc_macs, mc_count);
1248 	}
1249 
1250 exit:
1251 	kfree(mc_macs);
1252 	return rc;
1253 }
1254 
1255 void qede_set_rx_mode(struct net_device *ndev)
1256 {
1257 	struct qede_dev *edev = netdev_priv(ndev);
1258 
1259 	set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1260 	schedule_delayed_work(&edev->sp_task, 0);
1261 }
1262 
1263 /* Must be called with qede_lock held */
1264 void qede_config_rx_mode(struct net_device *ndev)
1265 {
1266 	enum qed_filter_rx_mode_type accept_flags;
1267 	struct qede_dev *edev = netdev_priv(ndev);
1268 	struct qed_filter_params rx_mode;
1269 	unsigned char *uc_macs, *temp;
1270 	struct netdev_hw_addr *ha;
1271 	int rc, uc_count;
1272 	size_t size;
1273 
1274 	netif_addr_lock_bh(ndev);
1275 
1276 	uc_count = netdev_uc_count(ndev);
1277 	size = uc_count * ETH_ALEN;
1278 
1279 	uc_macs = kzalloc(size, GFP_ATOMIC);
1280 	if (!uc_macs) {
1281 		DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1282 		netif_addr_unlock_bh(ndev);
1283 		return;
1284 	}
1285 
1286 	temp = uc_macs;
1287 	netdev_for_each_uc_addr(ha, ndev) {
1288 		ether_addr_copy(temp, ha->addr);
1289 		temp += ETH_ALEN;
1290 	}
1291 
1292 	netif_addr_unlock_bh(ndev);
1293 
1294 	/* Configure the struct for the Rx mode */
1295 	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1296 	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1297 
1298 	/* Remove all previous unicast secondary macs and multicast macs
1299 	 * (configrue / leave the primary mac)
1300 	 */
1301 	rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1302 				   edev->ndev->dev_addr);
1303 	if (rc)
1304 		goto out;
1305 
1306 	/* Check for promiscuous */
1307 	if (ndev->flags & IFF_PROMISC)
1308 		accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1309 	else
1310 		accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1311 
1312 	/* Configure all filters regardless, in case promisc is rejected */
1313 	if (uc_count < edev->dev_info.num_mac_filters) {
1314 		int i;
1315 
1316 		temp = uc_macs;
1317 		for (i = 0; i < uc_count; i++) {
1318 			rc = qede_set_ucast_rx_mac(edev,
1319 						   QED_FILTER_XCAST_TYPE_ADD,
1320 						   temp);
1321 			if (rc)
1322 				goto out;
1323 
1324 			temp += ETH_ALEN;
1325 		}
1326 	} else {
1327 		accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1328 	}
1329 
1330 	rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1331 	if (rc)
1332 		goto out;
1333 
1334 	/* take care of VLAN mode */
1335 	if (ndev->flags & IFF_PROMISC) {
1336 		qede_config_accept_any_vlan(edev, true);
1337 	} else if (!edev->non_configured_vlans) {
1338 		/* It's possible that accept_any_vlan mode is set due to a
1339 		 * previous setting of IFF_PROMISC. If vlan credits are
1340 		 * sufficient, disable accept_any_vlan.
1341 		 */
1342 		qede_config_accept_any_vlan(edev, false);
1343 	}
1344 
1345 	rx_mode.filter.accept_flags = accept_flags;
1346 	edev->ops->filter_config(edev->cdev, &rx_mode);
1347 out:
1348 	kfree(uc_macs);
1349 }
1350 
1351 static struct qede_arfs_fltr_node *
1352 qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
1353 {
1354 	struct qede_arfs_fltr_node *fltr;
1355 
1356 	hlist_for_each_entry(fltr, head, node)
1357 		if (location == fltr->sw_id)
1358 			return fltr;
1359 
1360 	return NULL;
1361 }
1362 
1363 int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
1364 			  u32 *rule_locs)
1365 {
1366 	struct qede_arfs_fltr_node *fltr;
1367 	struct hlist_head *head;
1368 	int cnt = 0, rc = 0;
1369 
1370 	info->data = QEDE_RFS_MAX_FLTR;
1371 
1372 	__qede_lock(edev);
1373 
1374 	if (!edev->arfs) {
1375 		rc = -EPERM;
1376 		goto unlock;
1377 	}
1378 
1379 	head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1380 
1381 	hlist_for_each_entry(fltr, head, node) {
1382 		if (cnt == info->rule_cnt) {
1383 			rc = -EMSGSIZE;
1384 			goto unlock;
1385 		}
1386 
1387 		rule_locs[cnt] = fltr->sw_id;
1388 		cnt++;
1389 	}
1390 
1391 	info->rule_cnt = cnt;
1392 
1393 unlock:
1394 	__qede_unlock(edev);
1395 	return rc;
1396 }
1397 
1398 int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
1399 {
1400 	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1401 	struct qede_arfs_fltr_node *fltr = NULL;
1402 	int rc = 0;
1403 
1404 	cmd->data = QEDE_RFS_MAX_FLTR;
1405 
1406 	__qede_lock(edev);
1407 
1408 	if (!edev->arfs) {
1409 		rc = -EPERM;
1410 		goto unlock;
1411 	}
1412 
1413 	fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1414 					 fsp->location);
1415 	if (!fltr) {
1416 		DP_NOTICE(edev, "Rule not found - location=0x%x\n",
1417 			  fsp->location);
1418 		rc = -EINVAL;
1419 		goto unlock;
1420 	}
1421 
1422 	if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
1423 		if (fltr->tuple.ip_proto == IPPROTO_TCP)
1424 			fsp->flow_type = TCP_V4_FLOW;
1425 		else
1426 			fsp->flow_type = UDP_V4_FLOW;
1427 
1428 		fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
1429 		fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
1430 		fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
1431 		fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
1432 	} else {
1433 		if (fltr->tuple.ip_proto == IPPROTO_TCP)
1434 			fsp->flow_type = TCP_V6_FLOW;
1435 		else
1436 			fsp->flow_type = UDP_V6_FLOW;
1437 		fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
1438 		fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
1439 		memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
1440 		       &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
1441 		memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
1442 		       &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
1443 	}
1444 
1445 	fsp->ring_cookie = fltr->rxq_id;
1446 
1447 	if (fltr->vfid) {
1448 		fsp->ring_cookie |= ((u64)fltr->vfid) <<
1449 					ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
1450 	}
1451 
1452 	if (fltr->b_is_drop)
1453 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
1454 unlock:
1455 	__qede_unlock(edev);
1456 	return rc;
1457 }
1458 
1459 static int
1460 qede_poll_arfs_filter_config(struct qede_dev *edev,
1461 			     struct qede_arfs_fltr_node *fltr)
1462 {
1463 	int count = QEDE_ARFS_POLL_COUNT;
1464 
1465 	while (fltr->used && count) {
1466 		msleep(20);
1467 		count--;
1468 	}
1469 
1470 	if (count == 0 || fltr->fw_rc) {
1471 		DP_NOTICE(edev, "Timeout in polling filter config\n");
1472 		qede_dequeue_fltr_and_config_searcher(edev, fltr);
1473 		return -EIO;
1474 	}
1475 
1476 	return fltr->fw_rc;
1477 }
1478 
1479 static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
1480 {
1481 	int size = ETH_HLEN;
1482 
1483 	if (t->eth_proto == htons(ETH_P_IP))
1484 		size += sizeof(struct iphdr);
1485 	else
1486 		size += sizeof(struct ipv6hdr);
1487 
1488 	if (t->ip_proto == IPPROTO_TCP)
1489 		size += sizeof(struct tcphdr);
1490 	else
1491 		size += sizeof(struct udphdr);
1492 
1493 	return size;
1494 }
1495 
1496 static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a,
1497 				    struct qede_arfs_tuple *b)
1498 {
1499 	if (a->eth_proto != htons(ETH_P_IP) ||
1500 	    b->eth_proto != htons(ETH_P_IP))
1501 		return false;
1502 
1503 	return (a->src_ipv4 == b->src_ipv4) &&
1504 	       (a->dst_ipv4 == b->dst_ipv4);
1505 }
1506 
1507 static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
1508 				     void *header)
1509 {
1510 	__be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr));
1511 	struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN);
1512 	struct ethhdr *eth = (struct ethhdr *)header;
1513 
1514 	eth->h_proto = t->eth_proto;
1515 	ip->saddr = t->src_ipv4;
1516 	ip->daddr = t->dst_ipv4;
1517 	ip->version = 0x4;
1518 	ip->ihl = 0x5;
1519 	ip->protocol = t->ip_proto;
1520 	ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
1521 
1522 	/* ports is weakly typed to suit both TCP and UDP ports */
1523 	ports[0] = t->src_port;
1524 	ports[1] = t->dst_port;
1525 }
1526 
1527 static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
1528 					 void *buffer)
1529 {
1530 	const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
1531 
1532 	snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN,
1533 		 "%s %pI4 (%04x) -> %pI4 (%04x)",
1534 		 prefix, &t->src_ipv4, t->src_port,
1535 		 &t->dst_ipv4, t->dst_port);
1536 }
1537 
1538 static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a,
1539 				    struct qede_arfs_tuple *b)
1540 {
1541 	if (a->eth_proto != htons(ETH_P_IPV6) ||
1542 	    b->eth_proto != htons(ETH_P_IPV6))
1543 		return false;
1544 
1545 	if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr)))
1546 		return false;
1547 
1548 	if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr)))
1549 		return false;
1550 
1551 	return true;
1552 }
1553 
1554 static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
1555 				     void *header)
1556 {
1557 	__be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr));
1558 	struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN);
1559 	struct ethhdr *eth = (struct ethhdr *)header;
1560 
1561 	eth->h_proto = t->eth_proto;
1562 	memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
1563 	memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
1564 	ip6->version = 0x6;
1565 
1566 	if (t->ip_proto == IPPROTO_TCP) {
1567 		ip6->nexthdr = NEXTHDR_TCP;
1568 		ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
1569 	} else {
1570 		ip6->nexthdr = NEXTHDR_UDP;
1571 		ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
1572 	}
1573 
1574 	/* ports is weakly typed to suit both TCP and UDP ports */
1575 	ports[0] = t->src_port;
1576 	ports[1] = t->dst_port;
1577 }
1578 
1579 /* Validate fields which are set and not accepted by the driver */
1580 static int qede_flow_spec_validate_unused(struct qede_dev *edev,
1581 					  struct ethtool_rx_flow_spec *fs)
1582 {
1583 	if (fs->flow_type & FLOW_MAC_EXT) {
1584 		DP_INFO(edev, "Don't support MAC extensions\n");
1585 		return -EOPNOTSUPP;
1586 	}
1587 
1588 	if ((fs->flow_type & FLOW_EXT) &&
1589 	    (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
1590 		DP_INFO(edev, "Don't support vlan-based classification\n");
1591 		return -EOPNOTSUPP;
1592 	}
1593 
1594 	if ((fs->flow_type & FLOW_EXT) &&
1595 	    (fs->h_ext.data[0] || fs->h_ext.data[1])) {
1596 		DP_INFO(edev, "Don't support user defined data\n");
1597 		return -EOPNOTSUPP;
1598 	}
1599 
1600 	return 0;
1601 }
1602 
1603 static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
1604 					       struct qede_arfs_tuple *t,
1605 					       struct ethtool_rx_flow_spec *fs)
1606 {
1607 	if ((fs->h_u.tcp_ip4_spec.ip4src &
1608 	     fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) {
1609 		DP_INFO(edev, "Don't support IP-masks\n");
1610 		return -EOPNOTSUPP;
1611 	}
1612 
1613 	if ((fs->h_u.tcp_ip4_spec.ip4dst &
1614 	     fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) {
1615 		DP_INFO(edev, "Don't support IP-masks\n");
1616 		return -EOPNOTSUPP;
1617 	}
1618 
1619 	if ((fs->h_u.tcp_ip4_spec.psrc &
1620 	     fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) {
1621 		DP_INFO(edev, "Don't support port-masks\n");
1622 		return -EOPNOTSUPP;
1623 	}
1624 
1625 	if ((fs->h_u.tcp_ip4_spec.pdst &
1626 	     fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) {
1627 		DP_INFO(edev, "Don't support port-masks\n");
1628 		return -EOPNOTSUPP;
1629 	}
1630 
1631 	if (fs->h_u.tcp_ip4_spec.tos) {
1632 		DP_INFO(edev, "Don't support tos\n");
1633 		return -EOPNOTSUPP;
1634 	}
1635 
1636 	t->eth_proto = htons(ETH_P_IP);
1637 	t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src;
1638 	t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst;
1639 	t->src_port = fs->h_u.tcp_ip4_spec.psrc;
1640 	t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
1641 
1642 	/* We must either have a valid 4-tuple or only dst port
1643 	 * or only src ip as an input
1644 	 */
1645 	if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
1646 		t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1647 	} else if (!t->src_port && t->dst_port &&
1648 		   !t->src_ipv4 && !t->dst_ipv4) {
1649 		t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1650 	}  else if (!t->src_port && !t->dst_port &&
1651 		    !t->dst_ipv4 && t->src_ipv4) {
1652 		t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1653 	} else {
1654 		DP_INFO(edev, "Invalid N-tuple\n");
1655 		return -EOPNOTSUPP;
1656 	}
1657 
1658 	t->ip_comp = qede_flow_spec_ipv4_cmp;
1659 	t->build_hdr = qede_flow_build_ipv4_hdr;
1660 	t->stringify = qede_flow_stringify_ipv4_hdr;
1661 
1662 	return 0;
1663 }
1664 
1665 static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
1666 					 struct qede_arfs_tuple *t,
1667 					 struct ethtool_rx_flow_spec *fs)
1668 {
1669 	t->ip_proto = IPPROTO_TCP;
1670 
1671 	if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
1672 		return -EINVAL;
1673 
1674 	return 0;
1675 }
1676 
1677 static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev,
1678 					 struct qede_arfs_tuple *t,
1679 					 struct ethtool_rx_flow_spec *fs)
1680 {
1681 	t->ip_proto = IPPROTO_UDP;
1682 
1683 	if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
1684 		return -EINVAL;
1685 
1686 	return 0;
1687 }
1688 
1689 static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
1690 					       struct qede_arfs_tuple *t,
1691 					       struct ethtool_rx_flow_spec *fs)
1692 {
1693 	struct in6_addr zero_addr;
1694 	void *p;
1695 
1696 	p = &zero_addr;
1697 	memset(p, 0, sizeof(zero_addr));
1698 
1699 	if ((fs->h_u.tcp_ip6_spec.psrc &
1700 	     fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
1701 		DP_INFO(edev, "Don't support port-masks\n");
1702 		return -EOPNOTSUPP;
1703 	}
1704 
1705 	if ((fs->h_u.tcp_ip6_spec.pdst &
1706 	     fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) {
1707 		DP_INFO(edev, "Don't support port-masks\n");
1708 		return -EOPNOTSUPP;
1709 	}
1710 
1711 	if (fs->h_u.tcp_ip6_spec.tclass) {
1712 		DP_INFO(edev, "Don't support tclass\n");
1713 		return -EOPNOTSUPP;
1714 	}
1715 
1716 	t->eth_proto = htons(ETH_P_IPV6);
1717 	memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src,
1718 	       sizeof(struct in6_addr));
1719 	memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst,
1720 	       sizeof(struct in6_addr));
1721 	t->src_port = fs->h_u.tcp_ip6_spec.psrc;
1722 	t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
1723 
1724 	/* We must make sure we have a valid 4-tuple or only dest port
1725 	 * or only src ip as an input
1726 	 */
1727 	if (t->src_port && t->dst_port &&
1728 	    memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
1729 	    memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
1730 		t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1731 	} else if (!t->src_port && t->dst_port &&
1732 		   !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
1733 		   !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
1734 		t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1735 	} else if (!t->src_port && !t->dst_port &&
1736 		   !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) &&
1737 		   memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) {
1738 		t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1739 	} else {
1740 		DP_INFO(edev, "Invalid N-tuple\n");
1741 		return -EOPNOTSUPP;
1742 	}
1743 
1744 	t->ip_comp = qede_flow_spec_ipv6_cmp;
1745 	t->build_hdr = qede_flow_build_ipv6_hdr;
1746 
1747 	return 0;
1748 }
1749 
1750 static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
1751 					 struct qede_arfs_tuple *t,
1752 					 struct ethtool_rx_flow_spec *fs)
1753 {
1754 	t->ip_proto = IPPROTO_TCP;
1755 
1756 	if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
1757 		return -EINVAL;
1758 
1759 	return 0;
1760 }
1761 
1762 static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev,
1763 					 struct qede_arfs_tuple *t,
1764 					 struct ethtool_rx_flow_spec *fs)
1765 {
1766 	t->ip_proto = IPPROTO_UDP;
1767 
1768 	if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
1769 		return -EINVAL;
1770 
1771 	return 0;
1772 }
1773 
1774 static int qede_flow_spec_to_tuple(struct qede_dev *edev,
1775 				   struct qede_arfs_tuple *t,
1776 				   struct ethtool_rx_flow_spec *fs)
1777 {
1778 	memset(t, 0, sizeof(*t));
1779 
1780 	if (qede_flow_spec_validate_unused(edev, fs))
1781 		return -EOPNOTSUPP;
1782 
1783 	switch ((fs->flow_type & ~FLOW_EXT)) {
1784 	case TCP_V4_FLOW:
1785 		return qede_flow_spec_to_tuple_tcpv4(edev, t, fs);
1786 	case UDP_V4_FLOW:
1787 		return qede_flow_spec_to_tuple_udpv4(edev, t, fs);
1788 	case TCP_V6_FLOW:
1789 		return qede_flow_spec_to_tuple_tcpv6(edev, t, fs);
1790 	case UDP_V6_FLOW:
1791 		return qede_flow_spec_to_tuple_udpv6(edev, t, fs);
1792 	default:
1793 		DP_VERBOSE(edev, NETIF_MSG_IFUP,
1794 			   "Can't support flow of type %08x\n", fs->flow_type);
1795 		return -EOPNOTSUPP;
1796 	}
1797 
1798 	return 0;
1799 }
1800 
1801 static int qede_flow_spec_validate(struct qede_dev *edev,
1802 				   struct ethtool_rx_flow_spec *fs,
1803 				   struct qede_arfs_tuple *t)
1804 {
1805 	if (fs->location >= QEDE_RFS_MAX_FLTR) {
1806 		DP_INFO(edev, "Location out-of-bounds\n");
1807 		return -EINVAL;
1808 	}
1809 
1810 	/* Check location isn't already in use */
1811 	if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) {
1812 		DP_INFO(edev, "Location already in use\n");
1813 		return -EINVAL;
1814 	}
1815 
1816 	/* Check if the filtering-mode could support the filter */
1817 	if (edev->arfs->filter_count &&
1818 	    edev->arfs->mode != t->mode) {
1819 		DP_INFO(edev,
1820 			"flow_spec would require filtering mode %08x, but %08x is configured\n",
1821 			t->mode, edev->arfs->filter_count);
1822 		return -EINVAL;
1823 	}
1824 
1825 	/* If drop requested then no need to validate other data */
1826 	if (fs->ring_cookie == RX_CLS_FLOW_DISC)
1827 		return 0;
1828 
1829 	if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie))
1830 		return 0;
1831 
1832 	if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) {
1833 		DP_INFO(edev, "Queue out-of-bounds\n");
1834 		return -EINVAL;
1835 	}
1836 
1837 	return 0;
1838 }
1839 
1840 /* Must be called while qede lock is held */
1841 static struct qede_arfs_fltr_node *
1842 qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
1843 {
1844 	struct qede_arfs_fltr_node *fltr;
1845 	struct hlist_node *temp;
1846 	struct hlist_head *head;
1847 
1848 	head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1849 
1850 	hlist_for_each_entry_safe(fltr, temp, head, node) {
1851 		if (fltr->tuple.ip_proto == t->ip_proto &&
1852 		    fltr->tuple.src_port == t->src_port &&
1853 		    fltr->tuple.dst_port == t->dst_port &&
1854 		    t->ip_comp(&fltr->tuple, t))
1855 			return fltr;
1856 	}
1857 
1858 	return NULL;
1859 }
1860 
1861 static void qede_flow_set_destination(struct qede_dev *edev,
1862 				      struct qede_arfs_fltr_node *n,
1863 				      struct ethtool_rx_flow_spec *fs)
1864 {
1865 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
1866 		n->b_is_drop = true;
1867 		return;
1868 	}
1869 
1870 	n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1871 	n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
1872 	n->next_rxq_id = n->rxq_id;
1873 
1874 	if (n->vfid)
1875 		DP_VERBOSE(edev, QED_MSG_SP,
1876 			   "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
1877 }
1878 
1879 int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
1880 {
1881 	struct ethtool_rx_flow_spec *fsp = &info->fs;
1882 	struct qede_arfs_fltr_node *n;
1883 	struct qede_arfs_tuple t;
1884 	int min_hlen, rc;
1885 
1886 	__qede_lock(edev);
1887 
1888 	if (!edev->arfs) {
1889 		rc = -EPERM;
1890 		goto unlock;
1891 	}
1892 
1893 	/* Translate the flow specification into something fittign our DB */
1894 	rc = qede_flow_spec_to_tuple(edev, &t, fsp);
1895 	if (rc)
1896 		goto unlock;
1897 
1898 	/* Make sure location is valid and filter isn't already set */
1899 	rc = qede_flow_spec_validate(edev, fsp, &t);
1900 	if (rc)
1901 		goto unlock;
1902 
1903 	if (qede_flow_find_fltr(edev, &t)) {
1904 		rc = -EINVAL;
1905 		goto unlock;
1906 	}
1907 
1908 	n = kzalloc(sizeof(*n), GFP_KERNEL);
1909 	if (!n) {
1910 		rc = -ENOMEM;
1911 		goto unlock;
1912 	}
1913 
1914 	min_hlen = qede_flow_get_min_header_size(&t);
1915 	n->data = kzalloc(min_hlen, GFP_KERNEL);
1916 	if (!n->data) {
1917 		kfree(n);
1918 		rc = -ENOMEM;
1919 		goto unlock;
1920 	}
1921 
1922 	n->sw_id = fsp->location;
1923 	set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
1924 	n->buf_len = min_hlen;
1925 
1926 	memcpy(&n->tuple, &t, sizeof(n->tuple));
1927 
1928 	qede_flow_set_destination(edev, n, fsp);
1929 
1930 	/* Build a minimal header according to the flow */
1931 	n->tuple.build_hdr(&n->tuple, n->data);
1932 
1933 	rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
1934 	if (rc)
1935 		goto unlock;
1936 
1937 	qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
1938 	rc = qede_poll_arfs_filter_config(edev, n);
1939 unlock:
1940 	__qede_unlock(edev);
1941 
1942 	return rc;
1943 }
1944 
1945 int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
1946 {
1947 	struct ethtool_rx_flow_spec *fsp = &info->fs;
1948 	struct qede_arfs_fltr_node *fltr = NULL;
1949 	int rc = -EPERM;
1950 
1951 	__qede_lock(edev);
1952 	if (!edev->arfs)
1953 		goto unlock;
1954 
1955 	fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1956 					 fsp->location);
1957 	if (!fltr)
1958 		goto unlock;
1959 
1960 	qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
1961 
1962 	rc = qede_poll_arfs_filter_config(edev, fltr);
1963 	if (rc == 0)
1964 		qede_dequeue_fltr_and_config_searcher(edev, fltr);
1965 
1966 unlock:
1967 	__qede_unlock(edev);
1968 	return rc;
1969 }
1970 
1971 int qede_get_arfs_filter_count(struct qede_dev *edev)
1972 {
1973 	int count = 0;
1974 
1975 	__qede_lock(edev);
1976 
1977 	if (!edev->arfs)
1978 		goto unlock;
1979 
1980 	count = edev->arfs->filter_count;
1981 
1982 unlock:
1983 	__qede_unlock(edev);
1984 	return count;
1985 }
1986