12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c3e79bafSAviad Krawczyk /*
3c3e79bafSAviad Krawczyk * Huawei HiNIC PCI Express Linux driver
4c3e79bafSAviad Krawczyk * Copyright(c) 2017 Huawei Technologies Co., Ltd
5c3e79bafSAviad Krawczyk */
6c3e79bafSAviad Krawczyk
7e2585ea7SAviad Krawczyk #include <linux/kernel.h>
8e2585ea7SAviad Krawczyk #include <linux/types.h>
9e2585ea7SAviad Krawczyk #include <linux/errno.h>
10e2585ea7SAviad Krawczyk #include <linux/pci.h>
11e2585ea7SAviad Krawczyk #include <linux/device.h>
12c3e79bafSAviad Krawczyk #include <linux/netdevice.h>
13e2585ea7SAviad Krawczyk #include <linux/etherdevice.h>
14c3e79bafSAviad Krawczyk #include <linux/u64_stats_sync.h>
15e2585ea7SAviad Krawczyk #include <linux/slab.h>
16e2585ea7SAviad Krawczyk #include <linux/interrupt.h>
17e2585ea7SAviad Krawczyk #include <linux/skbuff.h>
18e2585ea7SAviad Krawczyk #include <linux/dma-mapping.h>
19e2585ea7SAviad Krawczyk #include <linux/prefetch.h>
20352f58b0SAviad Krawczyk #include <linux/cpumask.h>
21aebd17b7SXue Chaojing #include <linux/if_vlan.h>
22e2585ea7SAviad Krawczyk #include <asm/barrier.h>
23c3e79bafSAviad Krawczyk
24e2585ea7SAviad Krawczyk #include "hinic_common.h"
25e2585ea7SAviad Krawczyk #include "hinic_hw_if.h"
26e2585ea7SAviad Krawczyk #include "hinic_hw_wqe.h"
27e2585ea7SAviad Krawczyk #include "hinic_hw_wq.h"
28c3e79bafSAviad Krawczyk #include "hinic_hw_qp.h"
29e2585ea7SAviad Krawczyk #include "hinic_hw_dev.h"
30c3e79bafSAviad Krawczyk #include "hinic_rx.h"
31e2585ea7SAviad Krawczyk #include "hinic_dev.h"
32e2585ea7SAviad Krawczyk
33e2585ea7SAviad Krawczyk #define RX_IRQ_NO_PENDING 0
34e2585ea7SAviad Krawczyk #define RX_IRQ_NO_COALESC 0
35e2585ea7SAviad Krawczyk #define RX_IRQ_NO_LLI_TIMER 0
36e2585ea7SAviad Krawczyk #define RX_IRQ_NO_CREDIT 0
37e2585ea7SAviad Krawczyk #define RX_IRQ_NO_RESEND_TIMER 0
38e1a76515SXue Chaojing #define HINIC_RX_BUFFER_WRITE 16
39c3e79bafSAviad Krawczyk
401e007181SXue Chaojing #define HINIC_RX_IPV6_PKT 7
411e007181SXue Chaojing #define LRO_PKT_HDR_LEN_IPV4 66
421e007181SXue Chaojing #define LRO_PKT_HDR_LEN_IPV6 86
431e007181SXue Chaojing #define LRO_REPLENISH_THLD 256
441e007181SXue Chaojing
451e007181SXue Chaojing #define LRO_PKT_HDR_LEN(cqe) \
461e007181SXue Chaojing (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
471e007181SXue Chaojing HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
481e007181SXue Chaojing
49c3e79bafSAviad Krawczyk /**
50c3e79bafSAviad Krawczyk * hinic_rxq_clean_stats - Clean the statistics of specific queue
51c3e79bafSAviad Krawczyk * @rxq: Logical Rx Queue
52c3e79bafSAviad Krawczyk **/
hinic_rxq_clean_stats(struct hinic_rxq * rxq)5373f25f16SZhengchao Shao static void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
54c3e79bafSAviad Krawczyk {
55c3e79bafSAviad Krawczyk struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
56c3e79bafSAviad Krawczyk
57c3e79bafSAviad Krawczyk u64_stats_update_begin(&rxq_stats->syncp);
58c3e79bafSAviad Krawczyk rxq_stats->pkts = 0;
59c3e79bafSAviad Krawczyk rxq_stats->bytes = 0;
60e54fbbdfSXue Chaojing rxq_stats->errors = 0;
61e54fbbdfSXue Chaojing rxq_stats->csum_errors = 0;
62e54fbbdfSXue Chaojing rxq_stats->other_errors = 0;
63c3e79bafSAviad Krawczyk u64_stats_update_end(&rxq_stats->syncp);
64c3e79bafSAviad Krawczyk }
65c3e79bafSAviad Krawczyk
66c3e79bafSAviad Krawczyk /**
67edd384f6SAviad Krawczyk * hinic_rxq_get_stats - get statistics of Rx Queue
68edd384f6SAviad Krawczyk * @rxq: Logical Rx Queue
69edd384f6SAviad Krawczyk * @stats: return updated stats here
70edd384f6SAviad Krawczyk **/
hinic_rxq_get_stats(struct hinic_rxq * rxq,struct hinic_rxq_stats * stats)71edd384f6SAviad Krawczyk void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
72edd384f6SAviad Krawczyk {
73edd384f6SAviad Krawczyk struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
74edd384f6SAviad Krawczyk unsigned int start;
75edd384f6SAviad Krawczyk
76edd384f6SAviad Krawczyk do {
77*068c38adSThomas Gleixner start = u64_stats_fetch_begin(&rxq_stats->syncp);
78edd384f6SAviad Krawczyk stats->pkts = rxq_stats->pkts;
79edd384f6SAviad Krawczyk stats->bytes = rxq_stats->bytes;
80e54fbbdfSXue Chaojing stats->errors = rxq_stats->csum_errors +
81e54fbbdfSXue Chaojing rxq_stats->other_errors;
82e54fbbdfSXue Chaojing stats->csum_errors = rxq_stats->csum_errors;
83e54fbbdfSXue Chaojing stats->other_errors = rxq_stats->other_errors;
84*068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
85edd384f6SAviad Krawczyk }
86edd384f6SAviad Krawczyk
87edd384f6SAviad Krawczyk /**
88c3e79bafSAviad Krawczyk * rxq_stats_init - Initialize the statistics of specific queue
89c3e79bafSAviad Krawczyk * @rxq: Logical Rx Queue
90c3e79bafSAviad Krawczyk **/
rxq_stats_init(struct hinic_rxq * rxq)91c3e79bafSAviad Krawczyk static void rxq_stats_init(struct hinic_rxq *rxq)
92c3e79bafSAviad Krawczyk {
93c3e79bafSAviad Krawczyk struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
94c3e79bafSAviad Krawczyk
95c3e79bafSAviad Krawczyk u64_stats_init(&rxq_stats->syncp);
96c3e79bafSAviad Krawczyk hinic_rxq_clean_stats(rxq);
97c3e79bafSAviad Krawczyk }
98c3e79bafSAviad Krawczyk
rx_csum(struct hinic_rxq * rxq,u32 status,struct sk_buff * skb)991e007181SXue Chaojing static void rx_csum(struct hinic_rxq *rxq, u32 status,
1004a61abb1SXue Chaojing struct sk_buff *skb)
1014a61abb1SXue Chaojing {
1024a61abb1SXue Chaojing struct net_device *netdev = rxq->netdev;
1034a61abb1SXue Chaojing u32 csum_err;
1044a61abb1SXue Chaojing
1054a61abb1SXue Chaojing csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
1064a61abb1SXue Chaojing
1074a61abb1SXue Chaojing if (!(netdev->features & NETIF_F_RXCSUM))
1084a61abb1SXue Chaojing return;
1094a61abb1SXue Chaojing
110e54fbbdfSXue Chaojing if (!csum_err) {
1114a61abb1SXue Chaojing skb->ip_summed = CHECKSUM_UNNECESSARY;
112e54fbbdfSXue Chaojing } else {
113e54fbbdfSXue Chaojing if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
114e54fbbdfSXue Chaojing HINIC_RX_CSUM_IPSU_OTHER_ERR)))
115e54fbbdfSXue Chaojing rxq->rxq_stats.csum_errors++;
1164a61abb1SXue Chaojing skb->ip_summed = CHECKSUM_NONE;
1174a61abb1SXue Chaojing }
118e54fbbdfSXue Chaojing }
1193402ab54SGuangbin Huang
120c3e79bafSAviad Krawczyk /**
121e2585ea7SAviad Krawczyk * rx_alloc_skb - allocate skb and map it to dma address
122e2585ea7SAviad Krawczyk * @rxq: rx queue
123e2585ea7SAviad Krawczyk * @dma_addr: returned dma address for the skb
124e2585ea7SAviad Krawczyk *
125e2585ea7SAviad Krawczyk * Return skb
126e2585ea7SAviad Krawczyk **/
rx_alloc_skb(struct hinic_rxq * rxq,dma_addr_t * dma_addr)127e2585ea7SAviad Krawczyk static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
128e2585ea7SAviad Krawczyk dma_addr_t *dma_addr)
129e2585ea7SAviad Krawczyk {
130e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
131e2585ea7SAviad Krawczyk struct hinic_hwdev *hwdev = nic_dev->hwdev;
132e2585ea7SAviad Krawczyk struct hinic_hwif *hwif = hwdev->hwif;
133e2585ea7SAviad Krawczyk struct pci_dev *pdev = hwif->pdev;
134e2585ea7SAviad Krawczyk struct sk_buff *skb;
135e2585ea7SAviad Krawczyk dma_addr_t addr;
136e2585ea7SAviad Krawczyk int err;
137e2585ea7SAviad Krawczyk
138e2585ea7SAviad Krawczyk skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
139c199fdb8SDaode Huang if (!skb)
140e2585ea7SAviad Krawczyk return NULL;
141e2585ea7SAviad Krawczyk
142e2585ea7SAviad Krawczyk addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
143e2585ea7SAviad Krawczyk DMA_FROM_DEVICE);
144e2585ea7SAviad Krawczyk err = dma_mapping_error(&pdev->dev, addr);
145e2585ea7SAviad Krawczyk if (err) {
146e2585ea7SAviad Krawczyk dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err);
147e2585ea7SAviad Krawczyk goto err_rx_map;
148e2585ea7SAviad Krawczyk }
149e2585ea7SAviad Krawczyk
150e2585ea7SAviad Krawczyk *dma_addr = addr;
151e2585ea7SAviad Krawczyk return skb;
152e2585ea7SAviad Krawczyk
153e2585ea7SAviad Krawczyk err_rx_map:
154e2585ea7SAviad Krawczyk dev_kfree_skb_any(skb);
155e2585ea7SAviad Krawczyk return NULL;
156e2585ea7SAviad Krawczyk }
157e2585ea7SAviad Krawczyk
158e2585ea7SAviad Krawczyk /**
159e2585ea7SAviad Krawczyk * rx_unmap_skb - unmap the dma address of the skb
160e2585ea7SAviad Krawczyk * @rxq: rx queue
161e2585ea7SAviad Krawczyk * @dma_addr: dma address of the skb
162e2585ea7SAviad Krawczyk **/
rx_unmap_skb(struct hinic_rxq * rxq,dma_addr_t dma_addr)163e2585ea7SAviad Krawczyk static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
164e2585ea7SAviad Krawczyk {
165e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
166e2585ea7SAviad Krawczyk struct hinic_hwdev *hwdev = nic_dev->hwdev;
167e2585ea7SAviad Krawczyk struct hinic_hwif *hwif = hwdev->hwif;
168e2585ea7SAviad Krawczyk struct pci_dev *pdev = hwif->pdev;
169e2585ea7SAviad Krawczyk
170e2585ea7SAviad Krawczyk dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
171e2585ea7SAviad Krawczyk DMA_FROM_DEVICE);
172e2585ea7SAviad Krawczyk }
173e2585ea7SAviad Krawczyk
174e2585ea7SAviad Krawczyk /**
175e2585ea7SAviad Krawczyk * rx_free_skb - unmap and free skb
176e2585ea7SAviad Krawczyk * @rxq: rx queue
177e2585ea7SAviad Krawczyk * @skb: skb to free
178e2585ea7SAviad Krawczyk * @dma_addr: dma address of the skb
179e2585ea7SAviad Krawczyk **/
rx_free_skb(struct hinic_rxq * rxq,struct sk_buff * skb,dma_addr_t dma_addr)180e2585ea7SAviad Krawczyk static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
181e2585ea7SAviad Krawczyk dma_addr_t dma_addr)
182e2585ea7SAviad Krawczyk {
183e2585ea7SAviad Krawczyk rx_unmap_skb(rxq, dma_addr);
184e2585ea7SAviad Krawczyk dev_kfree_skb_any(skb);
185e2585ea7SAviad Krawczyk }
186e2585ea7SAviad Krawczyk
187e2585ea7SAviad Krawczyk /**
188e2585ea7SAviad Krawczyk * rx_alloc_pkts - allocate pkts in rx queue
189e2585ea7SAviad Krawczyk * @rxq: rx queue
190e2585ea7SAviad Krawczyk *
191e2585ea7SAviad Krawczyk * Return number of skbs allocated
192e2585ea7SAviad Krawczyk **/
rx_alloc_pkts(struct hinic_rxq * rxq)193e2585ea7SAviad Krawczyk static int rx_alloc_pkts(struct hinic_rxq *rxq)
194e2585ea7SAviad Krawczyk {
195e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
196e2585ea7SAviad Krawczyk struct hinic_rq_wqe *rq_wqe;
197e2585ea7SAviad Krawczyk unsigned int free_wqebbs;
198e2585ea7SAviad Krawczyk struct hinic_sge sge;
199e2585ea7SAviad Krawczyk dma_addr_t dma_addr;
200e2585ea7SAviad Krawczyk struct sk_buff *skb;
201e2585ea7SAviad Krawczyk u16 prod_idx;
202352f58b0SAviad Krawczyk int i;
203e2585ea7SAviad Krawczyk
204e2585ea7SAviad Krawczyk free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
205e2585ea7SAviad Krawczyk
206e2585ea7SAviad Krawczyk /* Limit the allocation chunks */
207e2585ea7SAviad Krawczyk if (free_wqebbs > nic_dev->rx_weight)
208e2585ea7SAviad Krawczyk free_wqebbs = nic_dev->rx_weight;
209e2585ea7SAviad Krawczyk
210e2585ea7SAviad Krawczyk for (i = 0; i < free_wqebbs; i++) {
211e2585ea7SAviad Krawczyk skb = rx_alloc_skb(rxq, &dma_addr);
212c199fdb8SDaode Huang if (!skb)
213e2585ea7SAviad Krawczyk goto skb_out;
214e2585ea7SAviad Krawczyk
215e2585ea7SAviad Krawczyk hinic_set_sge(&sge, dma_addr, skb->len);
216e2585ea7SAviad Krawczyk
217e2585ea7SAviad Krawczyk rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
218e2585ea7SAviad Krawczyk &prod_idx);
219e2585ea7SAviad Krawczyk if (!rq_wqe) {
220e2585ea7SAviad Krawczyk rx_free_skb(rxq, skb, dma_addr);
221e2585ea7SAviad Krawczyk goto skb_out;
222e2585ea7SAviad Krawczyk }
223e2585ea7SAviad Krawczyk
224e2585ea7SAviad Krawczyk hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
225e2585ea7SAviad Krawczyk
226e2585ea7SAviad Krawczyk hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
227e2585ea7SAviad Krawczyk }
228e2585ea7SAviad Krawczyk
229e2585ea7SAviad Krawczyk skb_out:
230e2585ea7SAviad Krawczyk if (i) {
231e2585ea7SAviad Krawczyk wmb(); /* write all the wqes before update PI */
232e2585ea7SAviad Krawczyk
233e2585ea7SAviad Krawczyk hinic_rq_update(rxq->rq, prod_idx);
234e2585ea7SAviad Krawczyk }
235e2585ea7SAviad Krawczyk
236e2585ea7SAviad Krawczyk return i;
237e2585ea7SAviad Krawczyk }
238e2585ea7SAviad Krawczyk
239e2585ea7SAviad Krawczyk /**
240e2585ea7SAviad Krawczyk * free_all_rx_skbs - free all skbs in rx queue
241e2585ea7SAviad Krawczyk * @rxq: rx queue
242e2585ea7SAviad Krawczyk **/
free_all_rx_skbs(struct hinic_rxq * rxq)243e2585ea7SAviad Krawczyk static void free_all_rx_skbs(struct hinic_rxq *rxq)
244e2585ea7SAviad Krawczyk {
245e2585ea7SAviad Krawczyk struct hinic_rq *rq = rxq->rq;
246e2585ea7SAviad Krawczyk struct hinic_hw_wqe *hw_wqe;
247e2585ea7SAviad Krawczyk struct hinic_sge sge;
248e2585ea7SAviad Krawczyk u16 ci;
249e2585ea7SAviad Krawczyk
250e2585ea7SAviad Krawczyk while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
251e2585ea7SAviad Krawczyk if (IS_ERR(hw_wqe))
252e2585ea7SAviad Krawczyk break;
253e2585ea7SAviad Krawczyk
254e2585ea7SAviad Krawczyk hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
255e2585ea7SAviad Krawczyk
256e2585ea7SAviad Krawczyk hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
257e2585ea7SAviad Krawczyk
258e2585ea7SAviad Krawczyk rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
259e2585ea7SAviad Krawczyk }
260e2585ea7SAviad Krawczyk }
261e2585ea7SAviad Krawczyk
262e2585ea7SAviad Krawczyk /**
263e2585ea7SAviad Krawczyk * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
264e2585ea7SAviad Krawczyk * @rxq: rx queue
265e2585ea7SAviad Krawczyk * @head_skb: the first skb in the list
266e2585ea7SAviad Krawczyk * @left_pkt_len: left size of the pkt exclude head skb
267e2585ea7SAviad Krawczyk * @ci: consumer index
268e2585ea7SAviad Krawczyk *
269e2585ea7SAviad Krawczyk * Return number of wqes that used for the left of the pkt
270e2585ea7SAviad Krawczyk **/
rx_recv_jumbo_pkt(struct hinic_rxq * rxq,struct sk_buff * head_skb,unsigned int left_pkt_len,u16 ci)271e2585ea7SAviad Krawczyk static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
272e2585ea7SAviad Krawczyk unsigned int left_pkt_len, u16 ci)
273e2585ea7SAviad Krawczyk {
274e2585ea7SAviad Krawczyk struct sk_buff *skb, *curr_skb = head_skb;
275e2585ea7SAviad Krawczyk struct hinic_rq_wqe *rq_wqe;
276e2585ea7SAviad Krawczyk unsigned int curr_len;
277e2585ea7SAviad Krawczyk struct hinic_sge sge;
278e2585ea7SAviad Krawczyk int num_wqes = 0;
279e2585ea7SAviad Krawczyk
280e2585ea7SAviad Krawczyk while (left_pkt_len > 0) {
281e2585ea7SAviad Krawczyk rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
282e2585ea7SAviad Krawczyk &skb, &ci);
283e2585ea7SAviad Krawczyk
284e2585ea7SAviad Krawczyk num_wqes++;
285e2585ea7SAviad Krawczyk
286e2585ea7SAviad Krawczyk hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
287e2585ea7SAviad Krawczyk
288e2585ea7SAviad Krawczyk rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
289e2585ea7SAviad Krawczyk
290e2585ea7SAviad Krawczyk prefetch(skb->data);
291e2585ea7SAviad Krawczyk
292e2585ea7SAviad Krawczyk curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ :
293e2585ea7SAviad Krawczyk left_pkt_len;
294e2585ea7SAviad Krawczyk
295e2585ea7SAviad Krawczyk left_pkt_len -= curr_len;
296e2585ea7SAviad Krawczyk
297e2585ea7SAviad Krawczyk __skb_put(skb, curr_len);
298e2585ea7SAviad Krawczyk
299e2585ea7SAviad Krawczyk if (curr_skb == head_skb)
300e2585ea7SAviad Krawczyk skb_shinfo(head_skb)->frag_list = skb;
301e2585ea7SAviad Krawczyk else
302e2585ea7SAviad Krawczyk curr_skb->next = skb;
303e2585ea7SAviad Krawczyk
304e2585ea7SAviad Krawczyk head_skb->len += skb->len;
305e2585ea7SAviad Krawczyk head_skb->data_len += skb->len;
306e2585ea7SAviad Krawczyk head_skb->truesize += skb->truesize;
307e2585ea7SAviad Krawczyk
308e2585ea7SAviad Krawczyk curr_skb = skb;
309e2585ea7SAviad Krawczyk }
310e2585ea7SAviad Krawczyk
311e2585ea7SAviad Krawczyk return num_wqes;
312e2585ea7SAviad Krawczyk }
313e2585ea7SAviad Krawczyk
hinic_copy_lp_data(struct hinic_dev * nic_dev,struct sk_buff * skb)3144aa218a4SLuo bin static void hinic_copy_lp_data(struct hinic_dev *nic_dev,
3154aa218a4SLuo bin struct sk_buff *skb)
3164aa218a4SLuo bin {
3174aa218a4SLuo bin struct net_device *netdev = nic_dev->netdev;
3184aa218a4SLuo bin u8 *lb_buf = nic_dev->lb_test_rx_buf;
3194aa218a4SLuo bin int lb_len = nic_dev->lb_pkt_len;
3204aa218a4SLuo bin int pkt_offset, frag_len, i;
3214aa218a4SLuo bin void *frag_data = NULL;
3224aa218a4SLuo bin
3234aa218a4SLuo bin if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) {
3244aa218a4SLuo bin nic_dev->lb_test_rx_idx = 0;
3254aa218a4SLuo bin netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n");
3264aa218a4SLuo bin }
3274aa218a4SLuo bin
3284aa218a4SLuo bin if (skb->len != nic_dev->lb_pkt_len) {
3294aa218a4SLuo bin netif_warn(nic_dev, drv, netdev, "Wrong packet length\n");
3304aa218a4SLuo bin nic_dev->lb_test_rx_idx++;
3314aa218a4SLuo bin return;
3324aa218a4SLuo bin }
3334aa218a4SLuo bin
3344aa218a4SLuo bin pkt_offset = nic_dev->lb_test_rx_idx * lb_len;
3354aa218a4SLuo bin frag_len = (int)skb_headlen(skb);
3364aa218a4SLuo bin memcpy(lb_buf + pkt_offset, skb->data, frag_len);
3374aa218a4SLuo bin pkt_offset += frag_len;
3384aa218a4SLuo bin for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3394aa218a4SLuo bin frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
3404aa218a4SLuo bin frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]);
3414aa218a4SLuo bin memcpy((lb_buf + pkt_offset), frag_data, frag_len);
3424aa218a4SLuo bin pkt_offset += frag_len;
3434aa218a4SLuo bin }
3444aa218a4SLuo bin nic_dev->lb_test_rx_idx++;
3454aa218a4SLuo bin }
3464aa218a4SLuo bin
347e2585ea7SAviad Krawczyk /**
348e2585ea7SAviad Krawczyk * rxq_recv - Rx handler
349e2585ea7SAviad Krawczyk * @rxq: rx queue
350e2585ea7SAviad Krawczyk * @budget: maximum pkts to process
351e2585ea7SAviad Krawczyk *
352e2585ea7SAviad Krawczyk * Return number of pkts received
353e2585ea7SAviad Krawczyk **/
rxq_recv(struct hinic_rxq * rxq,int budget)354e2585ea7SAviad Krawczyk static int rxq_recv(struct hinic_rxq *rxq, int budget)
355e2585ea7SAviad Krawczyk {
356e2585ea7SAviad Krawczyk struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
357aebd17b7SXue Chaojing struct net_device *netdev = rxq->netdev;
358e2585ea7SAviad Krawczyk u64 pkt_len = 0, rx_bytes = 0;
3591e007181SXue Chaojing struct hinic_rq *rq = rxq->rq;
360e2585ea7SAviad Krawczyk struct hinic_rq_wqe *rq_wqe;
3614aa218a4SLuo bin struct hinic_dev *nic_dev;
362e1a76515SXue Chaojing unsigned int free_wqebbs;
3631e007181SXue Chaojing struct hinic_rq_cqe *cqe;
364e2585ea7SAviad Krawczyk int num_wqes, pkts = 0;
365e2585ea7SAviad Krawczyk struct hinic_sge sge;
3661e007181SXue Chaojing unsigned int status;
367e2585ea7SAviad Krawczyk struct sk_buff *skb;
368aebd17b7SXue Chaojing u32 offload_type;
3691e007181SXue Chaojing u16 ci, num_lro;
3701e007181SXue Chaojing u16 num_wqe = 0;
371aebd17b7SXue Chaojing u32 vlan_len;
372aebd17b7SXue Chaojing u16 vid;
373e2585ea7SAviad Krawczyk
3744aa218a4SLuo bin nic_dev = netdev_priv(netdev);
3754aa218a4SLuo bin
376e2585ea7SAviad Krawczyk while (pkts < budget) {
377e2585ea7SAviad Krawczyk num_wqes = 0;
378e2585ea7SAviad Krawczyk
379e2585ea7SAviad Krawczyk rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
380e2585ea7SAviad Krawczyk &ci);
381e2585ea7SAviad Krawczyk if (!rq_wqe)
382e2585ea7SAviad Krawczyk break;
383e2585ea7SAviad Krawczyk
38433f15da2SLuo bin /* make sure we read rx_done before packet length */
38533f15da2SLuo bin dma_rmb();
38633f15da2SLuo bin
3871e007181SXue Chaojing cqe = rq->cqe[ci];
3881e007181SXue Chaojing status = be32_to_cpu(cqe->status);
389e2585ea7SAviad Krawczyk hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
390e2585ea7SAviad Krawczyk
391e2585ea7SAviad Krawczyk rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
392e2585ea7SAviad Krawczyk
3931e007181SXue Chaojing rx_csum(rxq, status, skb);
3944a61abb1SXue Chaojing
395e2585ea7SAviad Krawczyk prefetch(skb->data);
396e2585ea7SAviad Krawczyk
397e2585ea7SAviad Krawczyk pkt_len = sge.len;
398e2585ea7SAviad Krawczyk
399e2585ea7SAviad Krawczyk if (pkt_len <= HINIC_RX_BUF_SZ) {
400e2585ea7SAviad Krawczyk __skb_put(skb, pkt_len);
401e2585ea7SAviad Krawczyk } else {
402e2585ea7SAviad Krawczyk __skb_put(skb, HINIC_RX_BUF_SZ);
403e2585ea7SAviad Krawczyk num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
404e2585ea7SAviad Krawczyk HINIC_RX_BUF_SZ, ci);
405e2585ea7SAviad Krawczyk }
406e2585ea7SAviad Krawczyk
4071e007181SXue Chaojing hinic_rq_put_wqe(rq, ci,
408e2585ea7SAviad Krawczyk (num_wqes + 1) * HINIC_RQ_WQE_SIZE);
409e2585ea7SAviad Krawczyk
410aebd17b7SXue Chaojing offload_type = be32_to_cpu(cqe->offload_type);
411aebd17b7SXue Chaojing vlan_len = be32_to_cpu(cqe->len);
412aebd17b7SXue Chaojing if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
413aebd17b7SXue Chaojing HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
414aebd17b7SXue Chaojing vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
415aebd17b7SXue Chaojing __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
416aebd17b7SXue Chaojing }
417aebd17b7SXue Chaojing
4184aa218a4SLuo bin if (unlikely(nic_dev->flags & HINIC_LP_TEST))
4194aa218a4SLuo bin hinic_copy_lp_data(nic_dev, skb);
4204aa218a4SLuo bin
421e2585ea7SAviad Krawczyk skb_record_rx_queue(skb, qp->q_id);
422e2585ea7SAviad Krawczyk skb->protocol = eth_type_trans(skb, rxq->netdev);
423e2585ea7SAviad Krawczyk
424e2585ea7SAviad Krawczyk napi_gro_receive(&rxq->napi, skb);
425e2585ea7SAviad Krawczyk
426e2585ea7SAviad Krawczyk pkts++;
427e2585ea7SAviad Krawczyk rx_bytes += pkt_len;
4281e007181SXue Chaojing
4291e007181SXue Chaojing num_lro = HINIC_GET_RX_NUM_LRO(status);
4301e007181SXue Chaojing if (num_lro) {
4311e007181SXue Chaojing rx_bytes += ((num_lro - 1) *
4321e007181SXue Chaojing LRO_PKT_HDR_LEN(cqe));
4331e007181SXue Chaojing
4341e007181SXue Chaojing num_wqe +=
4351e007181SXue Chaojing (u16)(pkt_len >> rxq->rx_buff_shift) +
4361e007181SXue Chaojing ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
4371e007181SXue Chaojing }
4381e007181SXue Chaojing
4391e007181SXue Chaojing cqe->status = 0;
4401e007181SXue Chaojing
4411e007181SXue Chaojing if (num_wqe >= LRO_REPLENISH_THLD)
4421e007181SXue Chaojing break;
443e2585ea7SAviad Krawczyk }
444e2585ea7SAviad Krawczyk
445e1a76515SXue Chaojing free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
446e1a76515SXue Chaojing if (free_wqebbs > HINIC_RX_BUFFER_WRITE)
447e1a76515SXue Chaojing rx_alloc_pkts(rxq);
448e2585ea7SAviad Krawczyk
449e2585ea7SAviad Krawczyk u64_stats_update_begin(&rxq->rxq_stats.syncp);
450e2585ea7SAviad Krawczyk rxq->rxq_stats.pkts += pkts;
451e2585ea7SAviad Krawczyk rxq->rxq_stats.bytes += rx_bytes;
452e2585ea7SAviad Krawczyk u64_stats_update_end(&rxq->rxq_stats.syncp);
453e2585ea7SAviad Krawczyk
454e2585ea7SAviad Krawczyk return pkts;
455e2585ea7SAviad Krawczyk }
456e2585ea7SAviad Krawczyk
rx_poll(struct napi_struct * napi,int budget)457e2585ea7SAviad Krawczyk static int rx_poll(struct napi_struct *napi, int budget)
458e2585ea7SAviad Krawczyk {
459e2585ea7SAviad Krawczyk struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
460905b464aSXue Chaojing struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
461e2585ea7SAviad Krawczyk struct hinic_rq *rq = rxq->rq;
462e2585ea7SAviad Krawczyk int pkts;
463e2585ea7SAviad Krawczyk
464e2585ea7SAviad Krawczyk pkts = rxq_recv(rxq, budget);
465e2585ea7SAviad Krawczyk if (pkts >= budget)
466e2585ea7SAviad Krawczyk return budget;
467e2585ea7SAviad Krawczyk
468e2585ea7SAviad Krawczyk napi_complete(napi);
4697dd29ee1SLuo bin
4707dd29ee1SLuo bin if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
471905b464aSXue Chaojing hinic_hwdev_set_msix_state(nic_dev->hwdev,
472905b464aSXue Chaojing rq->msix_entry,
473905b464aSXue Chaojing HINIC_MSIX_ENABLE);
474905b464aSXue Chaojing
475e2585ea7SAviad Krawczyk return pkts;
476e2585ea7SAviad Krawczyk }
477e2585ea7SAviad Krawczyk
rx_add_napi(struct hinic_rxq * rxq)478e2585ea7SAviad Krawczyk static void rx_add_napi(struct hinic_rxq *rxq)
479e2585ea7SAviad Krawczyk {
480e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
481e2585ea7SAviad Krawczyk
482b707b89fSJakub Kicinski netif_napi_add_weight(rxq->netdev, &rxq->napi, rx_poll,
483b707b89fSJakub Kicinski nic_dev->rx_weight);
484e2585ea7SAviad Krawczyk napi_enable(&rxq->napi);
485e2585ea7SAviad Krawczyk }
486e2585ea7SAviad Krawczyk
rx_del_napi(struct hinic_rxq * rxq)487e2585ea7SAviad Krawczyk static void rx_del_napi(struct hinic_rxq *rxq)
488e2585ea7SAviad Krawczyk {
489e2585ea7SAviad Krawczyk napi_disable(&rxq->napi);
490e2585ea7SAviad Krawczyk netif_napi_del(&rxq->napi);
491e2585ea7SAviad Krawczyk }
492e2585ea7SAviad Krawczyk
rx_irq(int irq,void * data)493e2585ea7SAviad Krawczyk static irqreturn_t rx_irq(int irq, void *data)
494e2585ea7SAviad Krawczyk {
495e2585ea7SAviad Krawczyk struct hinic_rxq *rxq = (struct hinic_rxq *)data;
496e2585ea7SAviad Krawczyk struct hinic_rq *rq = rxq->rq;
497e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev;
498e2585ea7SAviad Krawczyk
499e2585ea7SAviad Krawczyk /* Disable the interrupt until napi will be completed */
500905b464aSXue Chaojing nic_dev = netdev_priv(rxq->netdev);
5017dd29ee1SLuo bin if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
502905b464aSXue Chaojing hinic_hwdev_set_msix_state(nic_dev->hwdev,
503905b464aSXue Chaojing rq->msix_entry,
504905b464aSXue Chaojing HINIC_MSIX_DISABLE);
505e2585ea7SAviad Krawczyk
506e2585ea7SAviad Krawczyk nic_dev = netdev_priv(rxq->netdev);
507e2585ea7SAviad Krawczyk hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
508e2585ea7SAviad Krawczyk
509e2585ea7SAviad Krawczyk napi_schedule(&rxq->napi);
510e2585ea7SAviad Krawczyk return IRQ_HANDLED;
511e2585ea7SAviad Krawczyk }
512e2585ea7SAviad Krawczyk
rx_request_irq(struct hinic_rxq * rxq)513e2585ea7SAviad Krawczyk static int rx_request_irq(struct hinic_rxq *rxq)
514e2585ea7SAviad Krawczyk {
515e2585ea7SAviad Krawczyk struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
516a0337c0dSLuo bin struct hinic_msix_config interrupt_info = {0};
517a0337c0dSLuo bin struct hinic_intr_coal_info *intr_coal = NULL;
518e2585ea7SAviad Krawczyk struct hinic_hwdev *hwdev = nic_dev->hwdev;
519e2585ea7SAviad Krawczyk struct hinic_rq *rq = rxq->rq;
520352f58b0SAviad Krawczyk struct hinic_qp *qp;
521e2585ea7SAviad Krawczyk int err;
522e2585ea7SAviad Krawczyk
523a0337c0dSLuo bin qp = container_of(rq, struct hinic_qp, rq);
524a0337c0dSLuo bin
525e2585ea7SAviad Krawczyk rx_add_napi(rxq);
526e2585ea7SAviad Krawczyk
527e2585ea7SAviad Krawczyk hinic_hwdev_msix_set(hwdev, rq->msix_entry,
528e2585ea7SAviad Krawczyk RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC,
529e2585ea7SAviad Krawczyk RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
530e2585ea7SAviad Krawczyk RX_IRQ_NO_RESEND_TIMER);
531e2585ea7SAviad Krawczyk
532a0337c0dSLuo bin intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id];
533a0337c0dSLuo bin interrupt_info.msix_index = rq->msix_entry;
534a0337c0dSLuo bin interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
535a0337c0dSLuo bin interrupt_info.pending_cnt = intr_coal->pending_limt;
536a0337c0dSLuo bin interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
537a0337c0dSLuo bin
538a0337c0dSLuo bin err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
539a0337c0dSLuo bin if (err) {
540a0337c0dSLuo bin netif_err(nic_dev, drv, rxq->netdev,
541a0337c0dSLuo bin "Failed to set RX interrupt coalescing attribute\n");
542ce000c61SWei Li goto err_req_irq;
543a0337c0dSLuo bin }
544a0337c0dSLuo bin
545e2585ea7SAviad Krawczyk err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
546ce000c61SWei Li if (err)
547ce000c61SWei Li goto err_req_irq;
548e2585ea7SAviad Krawczyk
5490bff777bSLuo bin cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
5502d1e72f2SNitesh Narayan Lal err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask);
551ce000c61SWei Li if (err)
552ce000c61SWei Li goto err_irq_affinity;
553ce000c61SWei Li
554ce000c61SWei Li return 0;
555ce000c61SWei Li
556ce000c61SWei Li err_irq_affinity:
557ce000c61SWei Li free_irq(rq->irq, rxq);
558ce000c61SWei Li err_req_irq:
559ce000c61SWei Li rx_del_napi(rxq);
560ce000c61SWei Li return err;
561e2585ea7SAviad Krawczyk }
562e2585ea7SAviad Krawczyk
rx_free_irq(struct hinic_rxq * rxq)563e2585ea7SAviad Krawczyk static void rx_free_irq(struct hinic_rxq *rxq)
564e2585ea7SAviad Krawczyk {
565e2585ea7SAviad Krawczyk struct hinic_rq *rq = rxq->rq;
566e2585ea7SAviad Krawczyk
5672d1e72f2SNitesh Narayan Lal irq_update_affinity_hint(rq->irq, NULL);
568e2585ea7SAviad Krawczyk free_irq(rq->irq, rxq);
569e2585ea7SAviad Krawczyk rx_del_napi(rxq);
570e2585ea7SAviad Krawczyk }
571e2585ea7SAviad Krawczyk
572e2585ea7SAviad Krawczyk /**
573c3e79bafSAviad Krawczyk * hinic_init_rxq - Initialize the Rx Queue
574c3e79bafSAviad Krawczyk * @rxq: Logical Rx Queue
575c3e79bafSAviad Krawczyk * @rq: Hardware Rx Queue to connect the Logical queue with
576c3e79bafSAviad Krawczyk * @netdev: network device to connect the Logical queue with
577c3e79bafSAviad Krawczyk *
578c3e79bafSAviad Krawczyk * Return 0 - Success, negative - Failure
579c3e79bafSAviad Krawczyk **/
hinic_init_rxq(struct hinic_rxq * rxq,struct hinic_rq * rq,struct net_device * netdev)580c3e79bafSAviad Krawczyk int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
581c3e79bafSAviad Krawczyk struct net_device *netdev)
582c3e79bafSAviad Krawczyk {
583e2585ea7SAviad Krawczyk struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
584930cfe0fSChristophe JAILLET int err, pkts;
585e2585ea7SAviad Krawczyk
586c3e79bafSAviad Krawczyk rxq->netdev = netdev;
587c3e79bafSAviad Krawczyk rxq->rq = rq;
5881e007181SXue Chaojing rxq->buf_len = HINIC_RX_BUF_SZ;
5891e007181SXue Chaojing rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
590c3e79bafSAviad Krawczyk
591c3e79bafSAviad Krawczyk rxq_stats_init(rxq);
592e2585ea7SAviad Krawczyk
593930cfe0fSChristophe JAILLET rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
594a9fd686aSLuo bin "%s_rxq%d", netdev->name, qp->q_id);
595e2585ea7SAviad Krawczyk if (!rxq->irq_name)
596e2585ea7SAviad Krawczyk return -ENOMEM;
597e2585ea7SAviad Krawczyk
598e2585ea7SAviad Krawczyk pkts = rx_alloc_pkts(rxq);
599e2585ea7SAviad Krawczyk if (!pkts) {
600e2585ea7SAviad Krawczyk err = -ENOMEM;
601e2585ea7SAviad Krawczyk goto err_rx_pkts;
602e2585ea7SAviad Krawczyk }
603e2585ea7SAviad Krawczyk
604e2585ea7SAviad Krawczyk err = rx_request_irq(rxq);
605e2585ea7SAviad Krawczyk if (err) {
606e2585ea7SAviad Krawczyk netdev_err(netdev, "Failed to request Rx irq\n");
607e2585ea7SAviad Krawczyk goto err_req_rx_irq;
608e2585ea7SAviad Krawczyk }
609e2585ea7SAviad Krawczyk
610c3e79bafSAviad Krawczyk return 0;
611e2585ea7SAviad Krawczyk
612e2585ea7SAviad Krawczyk err_req_rx_irq:
613e2585ea7SAviad Krawczyk err_rx_pkts:
614e2585ea7SAviad Krawczyk free_all_rx_skbs(rxq);
615e2585ea7SAviad Krawczyk devm_kfree(&netdev->dev, rxq->irq_name);
616e2585ea7SAviad Krawczyk return err;
617c3e79bafSAviad Krawczyk }
618c3e79bafSAviad Krawczyk
619c3e79bafSAviad Krawczyk /**
620c3e79bafSAviad Krawczyk * hinic_clean_rxq - Clean the Rx Queue
621c3e79bafSAviad Krawczyk * @rxq: Logical Rx Queue
622c3e79bafSAviad Krawczyk **/
hinic_clean_rxq(struct hinic_rxq * rxq)623c3e79bafSAviad Krawczyk void hinic_clean_rxq(struct hinic_rxq *rxq)
624c3e79bafSAviad Krawczyk {
625e2585ea7SAviad Krawczyk struct net_device *netdev = rxq->netdev;
626e2585ea7SAviad Krawczyk
627e2585ea7SAviad Krawczyk rx_free_irq(rxq);
628e2585ea7SAviad Krawczyk
629e2585ea7SAviad Krawczyk free_all_rx_skbs(rxq);
630e2585ea7SAviad Krawczyk devm_kfree(&netdev->dev, rxq->irq_name);
631c3e79bafSAviad Krawczyk }
632