1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Huawei HiNIC PCI Express Linux driver
4  * Copyright(c) 2017 Huawei Technologies Co., Ltd
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/errno.h>
10 #include <linux/pci.h>
11 #include <linux/device.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/u64_stats_sync.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/skbuff.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/prefetch.h>
20 #include <linux/cpumask.h>
21 #include <linux/if_vlan.h>
22 #include <asm/barrier.h>
23 
24 #include "hinic_common.h"
25 #include "hinic_hw_if.h"
26 #include "hinic_hw_wqe.h"
27 #include "hinic_hw_wq.h"
28 #include "hinic_hw_qp.h"
29 #include "hinic_hw_dev.h"
30 #include "hinic_rx.h"
31 #include "hinic_dev.h"
32 
33 #define RX_IRQ_NO_PENDING               0
34 #define RX_IRQ_NO_COALESC               0
35 #define RX_IRQ_NO_LLI_TIMER             0
36 #define RX_IRQ_NO_CREDIT                0
37 #define RX_IRQ_NO_RESEND_TIMER          0
38 #define HINIC_RX_BUFFER_WRITE           16
39 
40 #define HINIC_RX_IPV6_PKT		7
41 #define LRO_PKT_HDR_LEN_IPV4		66
42 #define LRO_PKT_HDR_LEN_IPV6		86
43 #define LRO_REPLENISH_THLD		256
44 
45 #define LRO_PKT_HDR_LEN(cqe)		\
46 	(HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
47 	 HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
48 
49 /**
50  * hinic_rxq_clean_stats - Clean the statistics of specific queue
51  * @rxq: Logical Rx Queue
52  **/
53 void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
54 {
55 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
56 
57 	u64_stats_update_begin(&rxq_stats->syncp);
58 	rxq_stats->pkts  = 0;
59 	rxq_stats->bytes = 0;
60 	rxq_stats->errors = 0;
61 	rxq_stats->csum_errors = 0;
62 	rxq_stats->other_errors = 0;
63 	u64_stats_update_end(&rxq_stats->syncp);
64 }
65 
66 /**
67  * hinic_rxq_get_stats - get statistics of Rx Queue
68  * @rxq: Logical Rx Queue
69  * @stats: return updated stats here
70  **/
71 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
72 {
73 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
74 	unsigned int start;
75 
76 	u64_stats_update_begin(&stats->syncp);
77 	do {
78 		start = u64_stats_fetch_begin(&rxq_stats->syncp);
79 		stats->pkts = rxq_stats->pkts;
80 		stats->bytes = rxq_stats->bytes;
81 		stats->errors = rxq_stats->csum_errors +
82 				rxq_stats->other_errors;
83 		stats->csum_errors = rxq_stats->csum_errors;
84 		stats->other_errors = rxq_stats->other_errors;
85 	} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
86 	u64_stats_update_end(&stats->syncp);
87 }
88 
89 /**
90  * rxq_stats_init - Initialize the statistics of specific queue
91  * @rxq: Logical Rx Queue
92  **/
93 static void rxq_stats_init(struct hinic_rxq *rxq)
94 {
95 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
96 
97 	u64_stats_init(&rxq_stats->syncp);
98 	hinic_rxq_clean_stats(rxq);
99 }
100 
101 static void rx_csum(struct hinic_rxq *rxq, u32 status,
102 		    struct sk_buff *skb)
103 {
104 	struct net_device *netdev = rxq->netdev;
105 	u32 csum_err;
106 
107 	csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
108 
109 	if (!(netdev->features & NETIF_F_RXCSUM))
110 		return;
111 
112 	if (!csum_err) {
113 		skb->ip_summed = CHECKSUM_UNNECESSARY;
114 	} else {
115 		if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
116 			HINIC_RX_CSUM_IPSU_OTHER_ERR)))
117 			rxq->rxq_stats.csum_errors++;
118 		skb->ip_summed = CHECKSUM_NONE;
119 	}
120 }
121 /**
122  * rx_alloc_skb - allocate skb and map it to dma address
123  * @rxq: rx queue
124  * @dma_addr: returned dma address for the skb
125  *
126  * Return skb
127  **/
128 static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
129 				    dma_addr_t *dma_addr)
130 {
131 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
132 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
133 	struct hinic_hwif *hwif = hwdev->hwif;
134 	struct pci_dev *pdev = hwif->pdev;
135 	struct sk_buff *skb;
136 	dma_addr_t addr;
137 	int err;
138 
139 	skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
140 	if (!skb) {
141 		netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n");
142 		return NULL;
143 	}
144 
145 	addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
146 			      DMA_FROM_DEVICE);
147 	err = dma_mapping_error(&pdev->dev, addr);
148 	if (err) {
149 		dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err);
150 		goto err_rx_map;
151 	}
152 
153 	*dma_addr = addr;
154 	return skb;
155 
156 err_rx_map:
157 	dev_kfree_skb_any(skb);
158 	return NULL;
159 }
160 
161 /**
162  * rx_unmap_skb - unmap the dma address of the skb
163  * @rxq: rx queue
164  * @dma_addr: dma address of the skb
165  **/
166 static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
167 {
168 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
169 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
170 	struct hinic_hwif *hwif = hwdev->hwif;
171 	struct pci_dev *pdev = hwif->pdev;
172 
173 	dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
174 			 DMA_FROM_DEVICE);
175 }
176 
177 /**
178  * rx_free_skb - unmap and free skb
179  * @rxq: rx queue
180  * @skb: skb to free
181  * @dma_addr: dma address of the skb
182  **/
183 static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
184 			dma_addr_t dma_addr)
185 {
186 	rx_unmap_skb(rxq, dma_addr);
187 	dev_kfree_skb_any(skb);
188 }
189 
190 /**
191  * rx_alloc_pkts - allocate pkts in rx queue
192  * @rxq: rx queue
193  *
194  * Return number of skbs allocated
195  **/
196 static int rx_alloc_pkts(struct hinic_rxq *rxq)
197 {
198 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
199 	struct hinic_rq_wqe *rq_wqe;
200 	unsigned int free_wqebbs;
201 	struct hinic_sge sge;
202 	dma_addr_t dma_addr;
203 	struct sk_buff *skb;
204 	u16 prod_idx;
205 	int i;
206 
207 	free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
208 
209 	/* Limit the allocation chunks */
210 	if (free_wqebbs > nic_dev->rx_weight)
211 		free_wqebbs = nic_dev->rx_weight;
212 
213 	for (i = 0; i < free_wqebbs; i++) {
214 		skb = rx_alloc_skb(rxq, &dma_addr);
215 		if (!skb) {
216 			netdev_err(rxq->netdev, "Failed to alloc Rx skb\n");
217 			goto skb_out;
218 		}
219 
220 		hinic_set_sge(&sge, dma_addr, skb->len);
221 
222 		rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
223 					  &prod_idx);
224 		if (!rq_wqe) {
225 			rx_free_skb(rxq, skb, dma_addr);
226 			goto skb_out;
227 		}
228 
229 		hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
230 
231 		hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
232 	}
233 
234 skb_out:
235 	if (i) {
236 		wmb();  /* write all the wqes before update PI */
237 
238 		hinic_rq_update(rxq->rq, prod_idx);
239 	}
240 
241 	return i;
242 }
243 
244 /**
245  * free_all_rx_skbs - free all skbs in rx queue
246  * @rxq: rx queue
247  **/
248 static void free_all_rx_skbs(struct hinic_rxq *rxq)
249 {
250 	struct hinic_rq *rq = rxq->rq;
251 	struct hinic_hw_wqe *hw_wqe;
252 	struct hinic_sge sge;
253 	u16 ci;
254 
255 	while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
256 		if (IS_ERR(hw_wqe))
257 			break;
258 
259 		hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
260 
261 		hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
262 
263 		rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
264 	}
265 }
266 
267 /**
268  * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
269  * @rxq: rx queue
270  * @head_skb: the first skb in the list
271  * @left_pkt_len: left size of the pkt exclude head skb
272  * @ci: consumer index
273  *
274  * Return number of wqes that used for the left of the pkt
275  **/
276 static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
277 			     unsigned int left_pkt_len, u16 ci)
278 {
279 	struct sk_buff *skb, *curr_skb = head_skb;
280 	struct hinic_rq_wqe *rq_wqe;
281 	unsigned int curr_len;
282 	struct hinic_sge sge;
283 	int num_wqes = 0;
284 
285 	while (left_pkt_len > 0) {
286 		rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
287 						&skb, &ci);
288 
289 		num_wqes++;
290 
291 		hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
292 
293 		rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
294 
295 		prefetch(skb->data);
296 
297 		curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ :
298 			    left_pkt_len;
299 
300 		left_pkt_len -= curr_len;
301 
302 		__skb_put(skb, curr_len);
303 
304 		if (curr_skb == head_skb)
305 			skb_shinfo(head_skb)->frag_list = skb;
306 		else
307 			curr_skb->next = skb;
308 
309 		head_skb->len += skb->len;
310 		head_skb->data_len += skb->len;
311 		head_skb->truesize += skb->truesize;
312 
313 		curr_skb = skb;
314 	}
315 
316 	return num_wqes;
317 }
318 
319 static void hinic_copy_lp_data(struct hinic_dev *nic_dev,
320 			       struct sk_buff *skb)
321 {
322 	struct net_device *netdev = nic_dev->netdev;
323 	u8 *lb_buf = nic_dev->lb_test_rx_buf;
324 	int lb_len = nic_dev->lb_pkt_len;
325 	int pkt_offset, frag_len, i;
326 	void *frag_data = NULL;
327 
328 	if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) {
329 		nic_dev->lb_test_rx_idx = 0;
330 		netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n");
331 	}
332 
333 	if (skb->len != nic_dev->lb_pkt_len) {
334 		netif_warn(nic_dev, drv, netdev, "Wrong packet length\n");
335 		nic_dev->lb_test_rx_idx++;
336 		return;
337 	}
338 
339 	pkt_offset = nic_dev->lb_test_rx_idx * lb_len;
340 	frag_len = (int)skb_headlen(skb);
341 	memcpy(lb_buf + pkt_offset, skb->data, frag_len);
342 	pkt_offset += frag_len;
343 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
344 		frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
345 		frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]);
346 		memcpy((lb_buf + pkt_offset), frag_data, frag_len);
347 		pkt_offset += frag_len;
348 	}
349 	nic_dev->lb_test_rx_idx++;
350 }
351 
352 /**
353  * rxq_recv - Rx handler
354  * @rxq: rx queue
355  * @budget: maximum pkts to process
356  *
357  * Return number of pkts received
358  **/
359 static int rxq_recv(struct hinic_rxq *rxq, int budget)
360 {
361 	struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
362 	struct net_device *netdev = rxq->netdev;
363 	u64 pkt_len = 0, rx_bytes = 0;
364 	struct hinic_rq *rq = rxq->rq;
365 	struct hinic_rq_wqe *rq_wqe;
366 	struct hinic_dev *nic_dev;
367 	unsigned int free_wqebbs;
368 	struct hinic_rq_cqe *cqe;
369 	int num_wqes, pkts = 0;
370 	struct hinic_sge sge;
371 	unsigned int status;
372 	struct sk_buff *skb;
373 	u32 offload_type;
374 	u16 ci, num_lro;
375 	u16 num_wqe = 0;
376 	u32 vlan_len;
377 	u16 vid;
378 
379 	nic_dev = netdev_priv(netdev);
380 
381 	while (pkts < budget) {
382 		num_wqes = 0;
383 
384 		rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
385 					   &ci);
386 		if (!rq_wqe)
387 			break;
388 
389 		/* make sure we read rx_done before packet length */
390 		dma_rmb();
391 
392 		cqe = rq->cqe[ci];
393 		status =  be32_to_cpu(cqe->status);
394 		hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
395 
396 		rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
397 
398 		rx_csum(rxq, status, skb);
399 
400 		prefetch(skb->data);
401 
402 		pkt_len = sge.len;
403 
404 		if (pkt_len <= HINIC_RX_BUF_SZ) {
405 			__skb_put(skb, pkt_len);
406 		} else {
407 			__skb_put(skb, HINIC_RX_BUF_SZ);
408 			num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
409 						     HINIC_RX_BUF_SZ, ci);
410 		}
411 
412 		hinic_rq_put_wqe(rq, ci,
413 				 (num_wqes + 1) * HINIC_RQ_WQE_SIZE);
414 
415 		offload_type = be32_to_cpu(cqe->offload_type);
416 		vlan_len = be32_to_cpu(cqe->len);
417 		if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
418 		    HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
419 			vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
420 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
421 		}
422 
423 		if (unlikely(nic_dev->flags & HINIC_LP_TEST))
424 			hinic_copy_lp_data(nic_dev, skb);
425 
426 		skb_record_rx_queue(skb, qp->q_id);
427 		skb->protocol = eth_type_trans(skb, rxq->netdev);
428 
429 		napi_gro_receive(&rxq->napi, skb);
430 
431 		pkts++;
432 		rx_bytes += pkt_len;
433 
434 		num_lro = HINIC_GET_RX_NUM_LRO(status);
435 		if (num_lro) {
436 			rx_bytes += ((num_lro - 1) *
437 				     LRO_PKT_HDR_LEN(cqe));
438 
439 			num_wqe +=
440 			(u16)(pkt_len >> rxq->rx_buff_shift) +
441 			((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
442 		}
443 
444 		cqe->status = 0;
445 
446 		if (num_wqe >= LRO_REPLENISH_THLD)
447 			break;
448 	}
449 
450 	free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
451 	if (free_wqebbs > HINIC_RX_BUFFER_WRITE)
452 		rx_alloc_pkts(rxq);
453 
454 	u64_stats_update_begin(&rxq->rxq_stats.syncp);
455 	rxq->rxq_stats.pkts += pkts;
456 	rxq->rxq_stats.bytes += rx_bytes;
457 	u64_stats_update_end(&rxq->rxq_stats.syncp);
458 
459 	return pkts;
460 }
461 
462 static int rx_poll(struct napi_struct *napi, int budget)
463 {
464 	struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
465 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
466 	struct hinic_rq *rq = rxq->rq;
467 	int pkts;
468 
469 	pkts = rxq_recv(rxq, budget);
470 	if (pkts >= budget)
471 		return budget;
472 
473 	napi_complete(napi);
474 
475 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
476 		hinic_hwdev_set_msix_state(nic_dev->hwdev,
477 					   rq->msix_entry,
478 					   HINIC_MSIX_ENABLE);
479 
480 	return pkts;
481 }
482 
483 static void rx_add_napi(struct hinic_rxq *rxq)
484 {
485 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
486 
487 	netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight);
488 	napi_enable(&rxq->napi);
489 }
490 
491 static void rx_del_napi(struct hinic_rxq *rxq)
492 {
493 	napi_disable(&rxq->napi);
494 	netif_napi_del(&rxq->napi);
495 }
496 
497 static irqreturn_t rx_irq(int irq, void *data)
498 {
499 	struct hinic_rxq *rxq = (struct hinic_rxq *)data;
500 	struct hinic_rq *rq = rxq->rq;
501 	struct hinic_dev *nic_dev;
502 
503 	/* Disable the interrupt until napi will be completed */
504 	nic_dev = netdev_priv(rxq->netdev);
505 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
506 		hinic_hwdev_set_msix_state(nic_dev->hwdev,
507 					   rq->msix_entry,
508 					   HINIC_MSIX_DISABLE);
509 
510 	nic_dev = netdev_priv(rxq->netdev);
511 	hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
512 
513 	napi_schedule(&rxq->napi);
514 	return IRQ_HANDLED;
515 }
516 
517 static int rx_request_irq(struct hinic_rxq *rxq)
518 {
519 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
520 	struct hinic_msix_config interrupt_info = {0};
521 	struct hinic_intr_coal_info *intr_coal = NULL;
522 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
523 	struct hinic_rq *rq = rxq->rq;
524 	struct hinic_qp *qp;
525 	int err;
526 
527 	qp = container_of(rq, struct hinic_qp, rq);
528 
529 	rx_add_napi(rxq);
530 
531 	hinic_hwdev_msix_set(hwdev, rq->msix_entry,
532 			     RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC,
533 			     RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
534 			     RX_IRQ_NO_RESEND_TIMER);
535 
536 	intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id];
537 	interrupt_info.msix_index = rq->msix_entry;
538 	interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
539 	interrupt_info.pending_cnt = intr_coal->pending_limt;
540 	interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
541 
542 	err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
543 	if (err) {
544 		netif_err(nic_dev, drv, rxq->netdev,
545 			  "Failed to set RX interrupt coalescing attribute\n");
546 		goto err_req_irq;
547 	}
548 
549 	err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
550 	if (err)
551 		goto err_req_irq;
552 
553 	cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
554 	err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
555 	if (err)
556 		goto err_irq_affinity;
557 
558 	return 0;
559 
560 err_irq_affinity:
561 	free_irq(rq->irq, rxq);
562 err_req_irq:
563 	rx_del_napi(rxq);
564 	return err;
565 }
566 
567 static void rx_free_irq(struct hinic_rxq *rxq)
568 {
569 	struct hinic_rq *rq = rxq->rq;
570 
571 	irq_set_affinity_hint(rq->irq, NULL);
572 	free_irq(rq->irq, rxq);
573 	rx_del_napi(rxq);
574 }
575 
576 /**
577  * hinic_init_rxq - Initialize the Rx Queue
578  * @rxq: Logical Rx Queue
579  * @rq: Hardware Rx Queue to connect the Logical queue with
580  * @netdev: network device to connect the Logical queue with
581  *
582  * Return 0 - Success, negative - Failure
583  **/
584 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
585 		   struct net_device *netdev)
586 {
587 	struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
588 	int err, pkts;
589 
590 	rxq->netdev = netdev;
591 	rxq->rq = rq;
592 	rxq->buf_len = HINIC_RX_BUF_SZ;
593 	rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
594 
595 	rxq_stats_init(rxq);
596 
597 	rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
598 				       "%s_rxq%d", netdev->name, qp->q_id);
599 	if (!rxq->irq_name)
600 		return -ENOMEM;
601 
602 	pkts = rx_alloc_pkts(rxq);
603 	if (!pkts) {
604 		err = -ENOMEM;
605 		goto err_rx_pkts;
606 	}
607 
608 	err = rx_request_irq(rxq);
609 	if (err) {
610 		netdev_err(netdev, "Failed to request Rx irq\n");
611 		goto err_req_rx_irq;
612 	}
613 
614 	return 0;
615 
616 err_req_rx_irq:
617 err_rx_pkts:
618 	free_all_rx_skbs(rxq);
619 	devm_kfree(&netdev->dev, rxq->irq_name);
620 	return err;
621 }
622 
623 /**
624  * hinic_clean_rxq - Clean the Rx Queue
625  * @rxq: Logical Rx Queue
626  **/
627 void hinic_clean_rxq(struct hinic_rxq *rxq)
628 {
629 	struct net_device *netdev = rxq->netdev;
630 
631 	rx_free_irq(rxq);
632 
633 	free_all_rx_skbs(rxq);
634 	devm_kfree(&netdev->dev, rxq->irq_name);
635 }
636