125763b3cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
24863dea3SSunil Goutham /*
34863dea3SSunil Goutham * Copyright (C) 2015 Cavium, Inc.
44863dea3SSunil Goutham */
54863dea3SSunil Goutham
64863dea3SSunil Goutham #include <linux/pci.h>
74863dea3SSunil Goutham #include <linux/netdevice.h>
84863dea3SSunil Goutham #include <linux/ip.h>
94863dea3SSunil Goutham #include <linux/etherdevice.h>
1083abb7d7SSunil Goutham #include <linux/iommu.h>
114863dea3SSunil Goutham #include <net/ip.h>
124863dea3SSunil Goutham #include <net/tso.h>
133b80b73aSJakub Kicinski #include <uapi/linux/bpf.h>
144863dea3SSunil Goutham
154863dea3SSunil Goutham #include "nic_reg.h"
164863dea3SSunil Goutham #include "nic.h"
174863dea3SSunil Goutham #include "q_struct.h"
184863dea3SSunil Goutham #include "nicvf_queues.h"
194863dea3SSunil Goutham
2016f2bccdSSunil Goutham static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
2116f2bccdSSunil Goutham int size, u64 data);
nicvf_get_page(struct nicvf * nic)225c2e26f6SSunil Goutham static void nicvf_get_page(struct nicvf *nic)
235c2e26f6SSunil Goutham {
245c2e26f6SSunil Goutham if (!nic->rb_pageref || !nic->rb_page)
255c2e26f6SSunil Goutham return;
265c2e26f6SSunil Goutham
276d061f9fSJoonsoo Kim page_ref_add(nic->rb_page, nic->rb_pageref);
285c2e26f6SSunil Goutham nic->rb_pageref = 0;
295c2e26f6SSunil Goutham }
305c2e26f6SSunil Goutham
314863dea3SSunil Goutham /* Poll a register for a specific value */
nicvf_poll_reg(struct nicvf * nic,int qidx,u64 reg,int bit_pos,int bits,int val)324863dea3SSunil Goutham static int nicvf_poll_reg(struct nicvf *nic, int qidx,
334863dea3SSunil Goutham u64 reg, int bit_pos, int bits, int val)
344863dea3SSunil Goutham {
354863dea3SSunil Goutham u64 bit_mask;
364863dea3SSunil Goutham u64 reg_val;
374863dea3SSunil Goutham int timeout = 10;
384863dea3SSunil Goutham
394863dea3SSunil Goutham bit_mask = (1ULL << bits) - 1;
404863dea3SSunil Goutham bit_mask = (bit_mask << bit_pos);
414863dea3SSunil Goutham
424863dea3SSunil Goutham while (timeout) {
434863dea3SSunil Goutham reg_val = nicvf_queue_reg_read(nic, reg, qidx);
444863dea3SSunil Goutham if (((reg_val & bit_mask) >> bit_pos) == val)
454863dea3SSunil Goutham return 0;
464863dea3SSunil Goutham usleep_range(1000, 2000);
474863dea3SSunil Goutham timeout--;
484863dea3SSunil Goutham }
494863dea3SSunil Goutham netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
504863dea3SSunil Goutham return 1;
514863dea3SSunil Goutham }
524863dea3SSunil Goutham
534863dea3SSunil Goutham /* Allocate memory for a queue's descriptors */
nicvf_alloc_q_desc_mem(struct nicvf * nic,struct q_desc_mem * dmem,int q_len,int desc_size,int align_bytes)544863dea3SSunil Goutham static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
554863dea3SSunil Goutham int q_len, int desc_size, int align_bytes)
564863dea3SSunil Goutham {
574863dea3SSunil Goutham dmem->q_len = q_len;
584863dea3SSunil Goutham dmem->size = (desc_size * q_len) + align_bytes;
594863dea3SSunil Goutham /* Save address, need it while freeing */
60750afb08SLuis Chamberlain dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
614863dea3SSunil Goutham &dmem->dma, GFP_KERNEL);
624863dea3SSunil Goutham if (!dmem->unalign_base)
634863dea3SSunil Goutham return -ENOMEM;
644863dea3SSunil Goutham
654863dea3SSunil Goutham /* Align memory address for 'align_bytes' */
664863dea3SSunil Goutham dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
6739a0dd0bSAleksey Makarov dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
684863dea3SSunil Goutham return 0;
694863dea3SSunil Goutham }
704863dea3SSunil Goutham
714863dea3SSunil Goutham /* Free queue's descriptor memory */
nicvf_free_q_desc_mem(struct nicvf * nic,struct q_desc_mem * dmem)724863dea3SSunil Goutham static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
734863dea3SSunil Goutham {
744863dea3SSunil Goutham if (!dmem)
754863dea3SSunil Goutham return;
764863dea3SSunil Goutham
774863dea3SSunil Goutham dma_free_coherent(&nic->pdev->dev, dmem->size,
784863dea3SSunil Goutham dmem->unalign_base, dmem->dma);
794863dea3SSunil Goutham dmem->unalign_base = NULL;
804863dea3SSunil Goutham dmem->base = NULL;
814863dea3SSunil Goutham }
824863dea3SSunil Goutham
8377322538SSunil Goutham #define XDP_PAGE_REFCNT_REFILL 256
8477322538SSunil Goutham
855836b442SSunil Goutham /* Allocate a new page or recycle one if possible
865836b442SSunil Goutham *
875836b442SSunil Goutham * We cannot optimize dma mapping here, since
885836b442SSunil Goutham * 1. It's only one RBDR ring for 8 Rx queues.
895836b442SSunil Goutham * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed
905836b442SSunil Goutham * and not idx into RBDR ring, so can't refer to saved info.
915836b442SSunil Goutham * 3. There are multiple receive buffers per page
924863dea3SSunil Goutham */
nicvf_alloc_page(struct nicvf * nic,struct rbdr * rbdr,gfp_t gfp)9377322538SSunil Goutham static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
945836b442SSunil Goutham struct rbdr *rbdr, gfp_t gfp)
954863dea3SSunil Goutham {
9677322538SSunil Goutham int ref_count;
975836b442SSunil Goutham struct page *page = NULL;
985836b442SSunil Goutham struct pgcache *pgcache, *next;
995836b442SSunil Goutham
1005836b442SSunil Goutham /* Check if page is already allocated */
1015836b442SSunil Goutham pgcache = &rbdr->pgcache[rbdr->pgidx];
1025836b442SSunil Goutham page = pgcache->page;
1035836b442SSunil Goutham /* Check if page can be recycled */
10477322538SSunil Goutham if (page) {
10577322538SSunil Goutham ref_count = page_ref_count(page);
106b3e20806SDean Nelson /* This page can be recycled if internal ref_count and page's
107b3e20806SDean Nelson * ref_count are equal, indicating that the page has been used
108b3e20806SDean Nelson * once for packet transmission. For non-XDP mode, internal
109b3e20806SDean Nelson * ref_count is always '1'.
11077322538SSunil Goutham */
111b3e20806SDean Nelson if (rbdr->is_xdp) {
112b3e20806SDean Nelson if (ref_count == pgcache->ref_count)
11377322538SSunil Goutham pgcache->ref_count--;
11477322538SSunil Goutham else
1155836b442SSunil Goutham page = NULL;
116b3e20806SDean Nelson } else if (ref_count != 1) {
11777322538SSunil Goutham page = NULL;
11877322538SSunil Goutham }
119b3e20806SDean Nelson }
12077322538SSunil Goutham
1215836b442SSunil Goutham if (!page) {
1225836b442SSunil Goutham page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0);
1235836b442SSunil Goutham if (!page)
1245836b442SSunil Goutham return NULL;
1255836b442SSunil Goutham
1265836b442SSunil Goutham this_cpu_inc(nic->pnicvf->drv_stats->page_alloc);
1275836b442SSunil Goutham
1285836b442SSunil Goutham /* Check for space */
1295836b442SSunil Goutham if (rbdr->pgalloc >= rbdr->pgcnt) {
1305836b442SSunil Goutham /* Page can still be used */
1315836b442SSunil Goutham nic->rb_page = page;
1325836b442SSunil Goutham return NULL;
1335836b442SSunil Goutham }
1345836b442SSunil Goutham
1355836b442SSunil Goutham /* Save the page in page cache */
1365836b442SSunil Goutham pgcache->page = page;
137c56d91ceSSunil Goutham pgcache->dma_addr = 0;
13877322538SSunil Goutham pgcache->ref_count = 0;
1395836b442SSunil Goutham rbdr->pgalloc++;
1405836b442SSunil Goutham }
1415836b442SSunil Goutham
14277322538SSunil Goutham /* Take additional page references for recycling */
14377322538SSunil Goutham if (rbdr->is_xdp) {
14477322538SSunil Goutham /* Since there is single RBDR (i.e single core doing
14577322538SSunil Goutham * page recycling) per 8 Rx queues, in XDP mode adjusting
14677322538SSunil Goutham * page references atomically is the biggest bottleneck, so
14777322538SSunil Goutham * take bunch of references at a time.
14877322538SSunil Goutham *
14977322538SSunil Goutham * So here, below reference counts defer by '1'.
15077322538SSunil Goutham */
15177322538SSunil Goutham if (!pgcache->ref_count) {
15277322538SSunil Goutham pgcache->ref_count = XDP_PAGE_REFCNT_REFILL;
15377322538SSunil Goutham page_ref_add(page, XDP_PAGE_REFCNT_REFILL);
15477322538SSunil Goutham }
15577322538SSunil Goutham } else {
15677322538SSunil Goutham /* In non-XDP case, single 64K page is divided across multiple
15777322538SSunil Goutham * receive buffers, so cost of recycling is less anyway.
15877322538SSunil Goutham * So we can do with just one extra reference.
15977322538SSunil Goutham */
1605836b442SSunil Goutham page_ref_add(page, 1);
16177322538SSunil Goutham }
1625836b442SSunil Goutham
1635836b442SSunil Goutham rbdr->pgidx++;
1645836b442SSunil Goutham rbdr->pgidx &= (rbdr->pgcnt - 1);
1655836b442SSunil Goutham
1665836b442SSunil Goutham /* Prefetch refcount of next page in page cache */
1675836b442SSunil Goutham next = &rbdr->pgcache[rbdr->pgidx];
1685836b442SSunil Goutham page = next->page;
1695836b442SSunil Goutham if (page)
1705836b442SSunil Goutham prefetch(&page->_refcount);
1715836b442SSunil Goutham
1725836b442SSunil Goutham return pgcache;
1735836b442SSunil Goutham }
1745836b442SSunil Goutham
1755836b442SSunil Goutham /* Allocate buffer for packet reception */
nicvf_alloc_rcv_buffer(struct nicvf * nic,struct rbdr * rbdr,gfp_t gfp,u32 buf_len,u64 * rbuf)1765836b442SSunil Goutham static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
177927987f3SSunil Goutham gfp_t gfp, u32 buf_len, u64 *rbuf)
1785836b442SSunil Goutham {
1795836b442SSunil Goutham struct pgcache *pgcache = NULL;
1804863dea3SSunil Goutham
18105c773f5SSunil Goutham /* Check if request can be accomodated in previous allocated page.
18205c773f5SSunil Goutham * But in XDP mode only one buffer per page is permitted.
18305c773f5SSunil Goutham */
184c56d91ceSSunil Goutham if (!rbdr->is_xdp && nic->rb_page &&
1855836b442SSunil Goutham ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
1865c2e26f6SSunil Goutham nic->rb_pageref++;
1875c2e26f6SSunil Goutham goto ret;
1885c2e26f6SSunil Goutham }
1895c2e26f6SSunil Goutham
1905c2e26f6SSunil Goutham nicvf_get_page(nic);
1915836b442SSunil Goutham nic->rb_page = NULL;
1924863dea3SSunil Goutham
1935836b442SSunil Goutham /* Get new page, either recycled or new one */
1945836b442SSunil Goutham pgcache = nicvf_alloc_page(nic, rbdr, gfp);
1955836b442SSunil Goutham if (!pgcache && !nic->rb_page) {
19683abb7d7SSunil Goutham this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
1974863dea3SSunil Goutham return -ENOMEM;
1984863dea3SSunil Goutham }
1995836b442SSunil Goutham
2004863dea3SSunil Goutham nic->rb_page_offset = 0;
201e3d06ff9SSunil Goutham
202e3d06ff9SSunil Goutham /* Reserve space for header modifications by BPF program */
203e3d06ff9SSunil Goutham if (rbdr->is_xdp)
204e6dbe939SJesper Dangaard Brouer buf_len += XDP_PACKET_HEADROOM;
205e3d06ff9SSunil Goutham
2065836b442SSunil Goutham /* Check if it's recycled */
2075836b442SSunil Goutham if (pgcache)
2085836b442SSunil Goutham nic->rb_page = pgcache->page;
2095c2e26f6SSunil Goutham ret:
210c56d91ceSSunil Goutham if (rbdr->is_xdp && pgcache && pgcache->dma_addr) {
211c56d91ceSSunil Goutham *rbuf = pgcache->dma_addr;
212c56d91ceSSunil Goutham } else {
21383abb7d7SSunil Goutham /* HW will ensure data coherency, CPU sync not required */
214927987f3SSunil Goutham *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
21583abb7d7SSunil Goutham nic->rb_page_offset, buf_len,
21683abb7d7SSunil Goutham DMA_FROM_DEVICE,
217927987f3SSunil Goutham DMA_ATTR_SKIP_CPU_SYNC);
21883abb7d7SSunil Goutham if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
21983abb7d7SSunil Goutham if (!nic->rb_page_offset)
2205836b442SSunil Goutham __free_pages(nic->rb_page, 0);
22183abb7d7SSunil Goutham nic->rb_page = NULL;
22283abb7d7SSunil Goutham return -ENOMEM;
22383abb7d7SSunil Goutham }
224c56d91ceSSunil Goutham if (pgcache)
225e6dbe939SJesper Dangaard Brouer pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
2265c2e26f6SSunil Goutham nic->rb_page_offset += buf_len;
227c56d91ceSSunil Goutham }
2284863dea3SSunil Goutham
2294863dea3SSunil Goutham return 0;
2304863dea3SSunil Goutham }
2314863dea3SSunil Goutham
232668dda06SSunil Goutham /* Build skb around receive buffer */
nicvf_rb_ptr_to_skb(struct nicvf * nic,u64 rb_ptr,int len)2334863dea3SSunil Goutham static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
2344863dea3SSunil Goutham u64 rb_ptr, int len)
2354863dea3SSunil Goutham {
236668dda06SSunil Goutham void *data;
2374863dea3SSunil Goutham struct sk_buff *skb;
2384863dea3SSunil Goutham
239668dda06SSunil Goutham data = phys_to_virt(rb_ptr);
2404863dea3SSunil Goutham
2414863dea3SSunil Goutham /* Now build an skb to give to stack */
242668dda06SSunil Goutham skb = build_skb(data, RCV_FRAG_LEN);
2434863dea3SSunil Goutham if (!skb) {
244668dda06SSunil Goutham put_page(virt_to_page(data));
2454863dea3SSunil Goutham return NULL;
2464863dea3SSunil Goutham }
2474863dea3SSunil Goutham
248668dda06SSunil Goutham prefetch(skb->data);
2494863dea3SSunil Goutham return skb;
2504863dea3SSunil Goutham }
2514863dea3SSunil Goutham
2524863dea3SSunil Goutham /* Allocate RBDR ring and populate receive buffers */
nicvf_init_rbdr(struct nicvf * nic,struct rbdr * rbdr,int ring_len,int buf_size)2534863dea3SSunil Goutham static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
2544863dea3SSunil Goutham int ring_len, int buf_size)
2554863dea3SSunil Goutham {
2564863dea3SSunil Goutham int idx;
257927987f3SSunil Goutham u64 rbuf;
2584863dea3SSunil Goutham struct rbdr_entry_t *desc;
2594863dea3SSunil Goutham int err;
2604863dea3SSunil Goutham
2614863dea3SSunil Goutham err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
2624863dea3SSunil Goutham sizeof(struct rbdr_entry_t),
2634863dea3SSunil Goutham NICVF_RCV_BUF_ALIGN_BYTES);
2644863dea3SSunil Goutham if (err)
2654863dea3SSunil Goutham return err;
2664863dea3SSunil Goutham
2674863dea3SSunil Goutham rbdr->desc = rbdr->dmem.base;
2684863dea3SSunil Goutham /* Buffer size has to be in multiples of 128 bytes */
2694863dea3SSunil Goutham rbdr->dma_size = buf_size;
2704863dea3SSunil Goutham rbdr->enable = true;
2714863dea3SSunil Goutham rbdr->thresh = RBDR_THRESH;
27283abb7d7SSunil Goutham rbdr->head = 0;
27383abb7d7SSunil Goutham rbdr->tail = 0;
2744863dea3SSunil Goutham
2755836b442SSunil Goutham /* Initialize page recycling stuff.
2765836b442SSunil Goutham *
2775836b442SSunil Goutham * Can't use single buffer per page especially with 64K pages.
2785836b442SSunil Goutham * On embedded platforms i.e 81xx/83xx available memory itself
2795836b442SSunil Goutham * is low and minimum ring size of RBDR is 8K, that takes away
2805836b442SSunil Goutham * lots of memory.
281c56d91ceSSunil Goutham *
282c56d91ceSSunil Goutham * But for XDP it has to be a single buffer per page.
2835836b442SSunil Goutham */
284c56d91ceSSunil Goutham if (!nic->pnicvf->xdp_prog) {
2855836b442SSunil Goutham rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
286c56d91ceSSunil Goutham rbdr->is_xdp = false;
287c56d91ceSSunil Goutham } else {
288c56d91ceSSunil Goutham rbdr->pgcnt = ring_len;
289c56d91ceSSunil Goutham rbdr->is_xdp = true;
290c56d91ceSSunil Goutham }
2915836b442SSunil Goutham rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
2926396bb22SKees Cook rbdr->pgcache = kcalloc(rbdr->pgcnt, sizeof(*rbdr->pgcache),
2936396bb22SKees Cook GFP_KERNEL);
2945836b442SSunil Goutham if (!rbdr->pgcache)
2955836b442SSunil Goutham return -ENOMEM;
2965836b442SSunil Goutham rbdr->pgidx = 0;
2975836b442SSunil Goutham rbdr->pgalloc = 0;
2985836b442SSunil Goutham
2994863dea3SSunil Goutham nic->rb_page = NULL;
3004863dea3SSunil Goutham for (idx = 0; idx < ring_len; idx++) {
3015836b442SSunil Goutham err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL,
3025836b442SSunil Goutham RCV_FRAG_LEN, &rbuf);
30383abb7d7SSunil Goutham if (err) {
30483abb7d7SSunil Goutham /* To free already allocated and mapped ones */
30583abb7d7SSunil Goutham rbdr->tail = idx - 1;
3064863dea3SSunil Goutham return err;
30783abb7d7SSunil Goutham }
3084863dea3SSunil Goutham
3094863dea3SSunil Goutham desc = GET_RBDR_DESC(rbdr, idx);
310927987f3SSunil Goutham desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
3114863dea3SSunil Goutham }
3125c2e26f6SSunil Goutham
3135c2e26f6SSunil Goutham nicvf_get_page(nic);
3145c2e26f6SSunil Goutham
3154863dea3SSunil Goutham return 0;
3164863dea3SSunil Goutham }
3174863dea3SSunil Goutham
3184863dea3SSunil Goutham /* Free RBDR ring and its receive buffers */
nicvf_free_rbdr(struct nicvf * nic,struct rbdr * rbdr)3194863dea3SSunil Goutham static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
3204863dea3SSunil Goutham {
3214863dea3SSunil Goutham int head, tail;
32283abb7d7SSunil Goutham u64 buf_addr, phys_addr;
3235836b442SSunil Goutham struct pgcache *pgcache;
3244863dea3SSunil Goutham struct rbdr_entry_t *desc;
3254863dea3SSunil Goutham
3264863dea3SSunil Goutham if (!rbdr)
3274863dea3SSunil Goutham return;
3284863dea3SSunil Goutham
3294863dea3SSunil Goutham rbdr->enable = false;
3304863dea3SSunil Goutham if (!rbdr->dmem.base)
3314863dea3SSunil Goutham return;
3324863dea3SSunil Goutham
3334863dea3SSunil Goutham head = rbdr->head;
3344863dea3SSunil Goutham tail = rbdr->tail;
3354863dea3SSunil Goutham
33683abb7d7SSunil Goutham /* Release page references */
3374863dea3SSunil Goutham while (head != tail) {
3384863dea3SSunil Goutham desc = GET_RBDR_DESC(rbdr, head);
3395e848e4cSSunil Goutham buf_addr = desc->buf_addr;
34083abb7d7SSunil Goutham phys_addr = nicvf_iova_to_phys(nic, buf_addr);
34183abb7d7SSunil Goutham dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
34283abb7d7SSunil Goutham DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
34383abb7d7SSunil Goutham if (phys_addr)
34483abb7d7SSunil Goutham put_page(virt_to_page(phys_to_virt(phys_addr)));
3454863dea3SSunil Goutham head++;
3464863dea3SSunil Goutham head &= (rbdr->dmem.q_len - 1);
3474863dea3SSunil Goutham }
34883abb7d7SSunil Goutham /* Release buffer of tail desc */
3494863dea3SSunil Goutham desc = GET_RBDR_DESC(rbdr, tail);
3505e848e4cSSunil Goutham buf_addr = desc->buf_addr;
35183abb7d7SSunil Goutham phys_addr = nicvf_iova_to_phys(nic, buf_addr);
35283abb7d7SSunil Goutham dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
35383abb7d7SSunil Goutham DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
35483abb7d7SSunil Goutham if (phys_addr)
35583abb7d7SSunil Goutham put_page(virt_to_page(phys_to_virt(phys_addr)));
3564863dea3SSunil Goutham
3575836b442SSunil Goutham /* Sync page cache info */
3585836b442SSunil Goutham smp_rmb();
3595836b442SSunil Goutham
3605836b442SSunil Goutham /* Release additional page references held for recycling */
3615836b442SSunil Goutham head = 0;
3625836b442SSunil Goutham while (head < rbdr->pgcnt) {
3635836b442SSunil Goutham pgcache = &rbdr->pgcache[head];
36477322538SSunil Goutham if (pgcache->page && page_ref_count(pgcache->page) != 0) {
365cd35ef91SDean Nelson if (rbdr->is_xdp) {
366cd35ef91SDean Nelson page_ref_sub(pgcache->page,
367cd35ef91SDean Nelson pgcache->ref_count - 1);
36877322538SSunil Goutham }
36977322538SSunil Goutham put_page(pgcache->page);
37077322538SSunil Goutham }
3715836b442SSunil Goutham head++;
3725836b442SSunil Goutham }
3735836b442SSunil Goutham
3744863dea3SSunil Goutham /* Free RBDR ring */
3754863dea3SSunil Goutham nicvf_free_q_desc_mem(nic, &rbdr->dmem);
3764863dea3SSunil Goutham }
3774863dea3SSunil Goutham
3784863dea3SSunil Goutham /* Refill receive buffer descriptors with new buffers.
3794863dea3SSunil Goutham */
nicvf_refill_rbdr(struct nicvf * nic,gfp_t gfp)380fd7ec062SAleksey Makarov static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
3814863dea3SSunil Goutham {
3824863dea3SSunil Goutham struct queue_set *qs = nic->qs;
3834863dea3SSunil Goutham int rbdr_idx = qs->rbdr_cnt;
3844863dea3SSunil Goutham int tail, qcount;
3854863dea3SSunil Goutham int refill_rb_cnt;
3864863dea3SSunil Goutham struct rbdr *rbdr;
3874863dea3SSunil Goutham struct rbdr_entry_t *desc;
388927987f3SSunil Goutham u64 rbuf;
3894863dea3SSunil Goutham int new_rb = 0;
3904863dea3SSunil Goutham
3914863dea3SSunil Goutham refill:
3924863dea3SSunil Goutham if (!rbdr_idx)
3934863dea3SSunil Goutham return;
3944863dea3SSunil Goutham rbdr_idx--;
3954863dea3SSunil Goutham rbdr = &qs->rbdr[rbdr_idx];
3964863dea3SSunil Goutham /* Check if it's enabled */
3974863dea3SSunil Goutham if (!rbdr->enable)
3984863dea3SSunil Goutham goto next_rbdr;
3994863dea3SSunil Goutham
4004863dea3SSunil Goutham /* Get no of desc's to be refilled */
4014863dea3SSunil Goutham qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
4024863dea3SSunil Goutham qcount &= 0x7FFFF;
4034863dea3SSunil Goutham /* Doorbell can be ringed with a max of ring size minus 1 */
4044863dea3SSunil Goutham if (qcount >= (qs->rbdr_len - 1))
4054863dea3SSunil Goutham goto next_rbdr;
4064863dea3SSunil Goutham else
4074863dea3SSunil Goutham refill_rb_cnt = qs->rbdr_len - qcount - 1;
4084863dea3SSunil Goutham
4095836b442SSunil Goutham /* Sync page cache info */
4105836b442SSunil Goutham smp_rmb();
4115836b442SSunil Goutham
4124863dea3SSunil Goutham /* Start filling descs from tail */
4134863dea3SSunil Goutham tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
4144863dea3SSunil Goutham while (refill_rb_cnt) {
4154863dea3SSunil Goutham tail++;
4164863dea3SSunil Goutham tail &= (rbdr->dmem.q_len - 1);
4174863dea3SSunil Goutham
4185836b442SSunil Goutham if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf))
4194863dea3SSunil Goutham break;
4204863dea3SSunil Goutham
4214863dea3SSunil Goutham desc = GET_RBDR_DESC(rbdr, tail);
422927987f3SSunil Goutham desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
4234863dea3SSunil Goutham refill_rb_cnt--;
4244863dea3SSunil Goutham new_rb++;
4254863dea3SSunil Goutham }
4264863dea3SSunil Goutham
4275c2e26f6SSunil Goutham nicvf_get_page(nic);
4285c2e26f6SSunil Goutham
4294863dea3SSunil Goutham /* make sure all memory stores are done before ringing doorbell */
4304863dea3SSunil Goutham smp_wmb();
4314863dea3SSunil Goutham
4324863dea3SSunil Goutham /* Check if buffer allocation failed */
4334863dea3SSunil Goutham if (refill_rb_cnt)
4344863dea3SSunil Goutham nic->rb_alloc_fail = true;
4354863dea3SSunil Goutham else
4364863dea3SSunil Goutham nic->rb_alloc_fail = false;
4374863dea3SSunil Goutham
4384863dea3SSunil Goutham /* Notify HW */
4394863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
4404863dea3SSunil Goutham rbdr_idx, new_rb);
4414863dea3SSunil Goutham next_rbdr:
4424863dea3SSunil Goutham /* Re-enable RBDR interrupts only if buffer allocation is success */
443c94acf80SSunil Goutham if (!nic->rb_alloc_fail && rbdr->enable &&
444c94acf80SSunil Goutham netif_running(nic->pnicvf->netdev))
4454863dea3SSunil Goutham nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
4464863dea3SSunil Goutham
4474863dea3SSunil Goutham if (rbdr_idx)
4484863dea3SSunil Goutham goto refill;
4494863dea3SSunil Goutham }
4504863dea3SSunil Goutham
4514863dea3SSunil Goutham /* Alloc rcv buffers in non-atomic mode for better success */
nicvf_rbdr_work(struct work_struct * work)4524863dea3SSunil Goutham void nicvf_rbdr_work(struct work_struct *work)
4534863dea3SSunil Goutham {
4544863dea3SSunil Goutham struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
4554863dea3SSunil Goutham
4564863dea3SSunil Goutham nicvf_refill_rbdr(nic, GFP_KERNEL);
4574863dea3SSunil Goutham if (nic->rb_alloc_fail)
4584863dea3SSunil Goutham schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
4594863dea3SSunil Goutham else
4604863dea3SSunil Goutham nic->rb_work_scheduled = false;
4614863dea3SSunil Goutham }
4624863dea3SSunil Goutham
4634863dea3SSunil Goutham /* In Softirq context, alloc rcv buffers in atomic mode */
nicvf_rbdr_task(struct tasklet_struct * t)464dfe4e612SAllen Pais void nicvf_rbdr_task(struct tasklet_struct *t)
4654863dea3SSunil Goutham {
466dfe4e612SAllen Pais struct nicvf *nic = from_tasklet(nic, t, rbdr_task);
4674863dea3SSunil Goutham
4684863dea3SSunil Goutham nicvf_refill_rbdr(nic, GFP_ATOMIC);
4694863dea3SSunil Goutham if (nic->rb_alloc_fail) {
4704863dea3SSunil Goutham nic->rb_work_scheduled = true;
4714863dea3SSunil Goutham schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
4724863dea3SSunil Goutham }
4734863dea3SSunil Goutham }
4744863dea3SSunil Goutham
4754863dea3SSunil Goutham /* Initialize completion queue */
nicvf_init_cmp_queue(struct nicvf * nic,struct cmp_queue * cq,int q_len)4764863dea3SSunil Goutham static int nicvf_init_cmp_queue(struct nicvf *nic,
4774863dea3SSunil Goutham struct cmp_queue *cq, int q_len)
4784863dea3SSunil Goutham {
4794863dea3SSunil Goutham int err;
4804863dea3SSunil Goutham
4814863dea3SSunil Goutham err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
4824863dea3SSunil Goutham NICVF_CQ_BASE_ALIGN_BYTES);
4834863dea3SSunil Goutham if (err)
4844863dea3SSunil Goutham return err;
4854863dea3SSunil Goutham
4864863dea3SSunil Goutham cq->desc = cq->dmem.base;
487b9687b48SSunil Goutham cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
4884863dea3SSunil Goutham nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
4894863dea3SSunil Goutham
4904863dea3SSunil Goutham return 0;
4914863dea3SSunil Goutham }
4924863dea3SSunil Goutham
nicvf_free_cmp_queue(struct nicvf * nic,struct cmp_queue * cq)4934863dea3SSunil Goutham static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
4944863dea3SSunil Goutham {
4954863dea3SSunil Goutham if (!cq)
4964863dea3SSunil Goutham return;
4974863dea3SSunil Goutham if (!cq->dmem.base)
4984863dea3SSunil Goutham return;
4994863dea3SSunil Goutham
5004863dea3SSunil Goutham nicvf_free_q_desc_mem(nic, &cq->dmem);
5014863dea3SSunil Goutham }
5024863dea3SSunil Goutham
5034863dea3SSunil Goutham /* Initialize transmit queue */
nicvf_init_snd_queue(struct nicvf * nic,struct snd_queue * sq,int q_len,int qidx)5044863dea3SSunil Goutham static int nicvf_init_snd_queue(struct nicvf *nic,
50516f2bccdSSunil Goutham struct snd_queue *sq, int q_len, int qidx)
5064863dea3SSunil Goutham {
5074863dea3SSunil Goutham int err;
5084863dea3SSunil Goutham
5094863dea3SSunil Goutham err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
5104863dea3SSunil Goutham NICVF_SQ_BASE_ALIGN_BYTES);
5114863dea3SSunil Goutham if (err)
5124863dea3SSunil Goutham return err;
5134863dea3SSunil Goutham
5144863dea3SSunil Goutham sq->desc = sq->dmem.base;
51586ace693SAleksey Makarov sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
516fa1a6c93SAleksey Makarov if (!sq->skbuff)
517fa1a6c93SAleksey Makarov return -ENOMEM;
51816f2bccdSSunil Goutham
5194863dea3SSunil Goutham sq->head = 0;
5204863dea3SSunil Goutham sq->tail = 0;
5214863dea3SSunil Goutham sq->thresh = SND_QUEUE_THRESH;
5224863dea3SSunil Goutham
52316f2bccdSSunil Goutham /* Check if this SQ is a XDP TX queue */
52416f2bccdSSunil Goutham if (nic->sqs_mode)
52516f2bccdSSunil Goutham qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS);
52616f2bccdSSunil Goutham if (qidx < nic->pnicvf->xdp_tx_queues) {
52716f2bccdSSunil Goutham /* Alloc memory to save page pointers for XDP_TX */
52816f2bccdSSunil Goutham sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
52916f2bccdSSunil Goutham if (!sq->xdp_page)
53016f2bccdSSunil Goutham return -ENOMEM;
53116f2bccdSSunil Goutham sq->xdp_desc_cnt = 0;
53216f2bccdSSunil Goutham sq->xdp_free_cnt = q_len - 1;
53316f2bccdSSunil Goutham sq->is_xdp = true;
53416f2bccdSSunil Goutham } else {
53516f2bccdSSunil Goutham sq->xdp_page = NULL;
53616f2bccdSSunil Goutham sq->xdp_desc_cnt = 0;
53716f2bccdSSunil Goutham sq->xdp_free_cnt = 0;
53816f2bccdSSunil Goutham sq->is_xdp = false;
53916f2bccdSSunil Goutham
54016f2bccdSSunil Goutham atomic_set(&sq->free_cnt, q_len - 1);
54116f2bccdSSunil Goutham
5424863dea3SSunil Goutham /* Preallocate memory for TSO segment's header */
5434863dea3SSunil Goutham sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
5444863dea3SSunil Goutham q_len * TSO_HEADER_SIZE,
54516f2bccdSSunil Goutham &sq->tso_hdrs_phys,
54616f2bccdSSunil Goutham GFP_KERNEL);
5474863dea3SSunil Goutham if (!sq->tso_hdrs)
5484863dea3SSunil Goutham return -ENOMEM;
54916f2bccdSSunil Goutham }
5504863dea3SSunil Goutham
5514863dea3SSunil Goutham return 0;
5524863dea3SSunil Goutham }
5534863dea3SSunil Goutham
nicvf_unmap_sndq_buffers(struct nicvf * nic,struct snd_queue * sq,int hdr_sqe,u8 subdesc_cnt)55483abb7d7SSunil Goutham void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
55583abb7d7SSunil Goutham int hdr_sqe, u8 subdesc_cnt)
55683abb7d7SSunil Goutham {
55783abb7d7SSunil Goutham u8 idx;
55883abb7d7SSunil Goutham struct sq_gather_subdesc *gather;
55983abb7d7SSunil Goutham
56083abb7d7SSunil Goutham /* Unmap DMA mapped skb data buffers */
56183abb7d7SSunil Goutham for (idx = 0; idx < subdesc_cnt; idx++) {
56283abb7d7SSunil Goutham hdr_sqe++;
56383abb7d7SSunil Goutham hdr_sqe &= (sq->dmem.q_len - 1);
56483abb7d7SSunil Goutham gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
56583abb7d7SSunil Goutham /* HW will ensure data coherency, CPU sync not required */
56683abb7d7SSunil Goutham dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
56783abb7d7SSunil Goutham gather->size, DMA_TO_DEVICE,
56883abb7d7SSunil Goutham DMA_ATTR_SKIP_CPU_SYNC);
56983abb7d7SSunil Goutham }
57083abb7d7SSunil Goutham }
57183abb7d7SSunil Goutham
nicvf_free_snd_queue(struct nicvf * nic,struct snd_queue * sq)5724863dea3SSunil Goutham static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
5734863dea3SSunil Goutham {
574c94acf80SSunil Goutham struct sk_buff *skb;
57516f2bccdSSunil Goutham struct page *page;
57683abb7d7SSunil Goutham struct sq_hdr_subdesc *hdr;
57783abb7d7SSunil Goutham struct sq_hdr_subdesc *tso_sqe;
578c94acf80SSunil Goutham
5794863dea3SSunil Goutham if (!sq)
5804863dea3SSunil Goutham return;
5814863dea3SSunil Goutham if (!sq->dmem.base)
5824863dea3SSunil Goutham return;
5834863dea3SSunil Goutham
584ef2a7cf1SLorenzo Bianconi if (sq->tso_hdrs) {
585143ceb0bSSunil Goutham dma_free_coherent(&nic->pdev->dev,
586143ceb0bSSunil Goutham sq->dmem.q_len * TSO_HEADER_SIZE,
5874863dea3SSunil Goutham sq->tso_hdrs, sq->tso_hdrs_phys);
588ef2a7cf1SLorenzo Bianconi sq->tso_hdrs = NULL;
589ef2a7cf1SLorenzo Bianconi }
5904863dea3SSunil Goutham
591c94acf80SSunil Goutham /* Free pending skbs in the queue */
592c94acf80SSunil Goutham smp_rmb();
593c94acf80SSunil Goutham while (sq->head != sq->tail) {
594c94acf80SSunil Goutham skb = (struct sk_buff *)sq->skbuff[sq->head];
59516f2bccdSSunil Goutham if (!skb || !sq->xdp_page)
59683abb7d7SSunil Goutham goto next;
59716f2bccdSSunil Goutham
59816f2bccdSSunil Goutham page = (struct page *)sq->xdp_page[sq->head];
59916f2bccdSSunil Goutham if (!page)
60016f2bccdSSunil Goutham goto next;
60116f2bccdSSunil Goutham else
60216f2bccdSSunil Goutham put_page(page);
60316f2bccdSSunil Goutham
60483abb7d7SSunil Goutham hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
60583abb7d7SSunil Goutham /* Check for dummy descriptor used for HW TSO offload on 88xx */
60683abb7d7SSunil Goutham if (hdr->dont_send) {
60783abb7d7SSunil Goutham /* Get actual TSO descriptors and unmap them */
60883abb7d7SSunil Goutham tso_sqe =
60983abb7d7SSunil Goutham (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
61083abb7d7SSunil Goutham nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
61183abb7d7SSunil Goutham tso_sqe->subdesc_cnt);
61283abb7d7SSunil Goutham } else {
61383abb7d7SSunil Goutham nicvf_unmap_sndq_buffers(nic, sq, sq->head,
61483abb7d7SSunil Goutham hdr->subdesc_cnt);
61583abb7d7SSunil Goutham }
61616f2bccdSSunil Goutham if (skb)
617c94acf80SSunil Goutham dev_kfree_skb_any(skb);
61883abb7d7SSunil Goutham next:
619c94acf80SSunil Goutham sq->head++;
620c94acf80SSunil Goutham sq->head &= (sq->dmem.q_len - 1);
621c94acf80SSunil Goutham }
6224863dea3SSunil Goutham kfree(sq->skbuff);
62316f2bccdSSunil Goutham kfree(sq->xdp_page);
6244863dea3SSunil Goutham nicvf_free_q_desc_mem(nic, &sq->dmem);
6254863dea3SSunil Goutham }
6264863dea3SSunil Goutham
nicvf_reclaim_snd_queue(struct nicvf * nic,struct queue_set * qs,int qidx)6274863dea3SSunil Goutham static void nicvf_reclaim_snd_queue(struct nicvf *nic,
6284863dea3SSunil Goutham struct queue_set *qs, int qidx)
6294863dea3SSunil Goutham {
6304863dea3SSunil Goutham /* Disable send queue */
6314863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
6324863dea3SSunil Goutham /* Check if SQ is stopped */
6334863dea3SSunil Goutham if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
6344863dea3SSunil Goutham return;
6354863dea3SSunil Goutham /* Reset send queue */
6364863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
6374863dea3SSunil Goutham }
6384863dea3SSunil Goutham
nicvf_reclaim_rcv_queue(struct nicvf * nic,struct queue_set * qs,int qidx)6394863dea3SSunil Goutham static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
6404863dea3SSunil Goutham struct queue_set *qs, int qidx)
6414863dea3SSunil Goutham {
6424863dea3SSunil Goutham union nic_mbx mbx = {};
6434863dea3SSunil Goutham
6444863dea3SSunil Goutham /* Make sure all packets in the pipeline are written back into mem */
6454863dea3SSunil Goutham mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
6464863dea3SSunil Goutham nicvf_send_msg_to_pf(nic, &mbx);
6474863dea3SSunil Goutham }
6484863dea3SSunil Goutham
nicvf_reclaim_cmp_queue(struct nicvf * nic,struct queue_set * qs,int qidx)6494863dea3SSunil Goutham static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
6504863dea3SSunil Goutham struct queue_set *qs, int qidx)
6514863dea3SSunil Goutham {
6524863dea3SSunil Goutham /* Disable timer threshold (doesn't get reset upon CQ reset */
6534863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
6544863dea3SSunil Goutham /* Disable completion queue */
6554863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
6564863dea3SSunil Goutham /* Reset completion queue */
6574863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
6584863dea3SSunil Goutham }
6594863dea3SSunil Goutham
nicvf_reclaim_rbdr(struct nicvf * nic,struct rbdr * rbdr,int qidx)6604863dea3SSunil Goutham static void nicvf_reclaim_rbdr(struct nicvf *nic,
6614863dea3SSunil Goutham struct rbdr *rbdr, int qidx)
6624863dea3SSunil Goutham {
6634863dea3SSunil Goutham u64 tmp, fifo_state;
6644863dea3SSunil Goutham int timeout = 10;
6654863dea3SSunil Goutham
6664863dea3SSunil Goutham /* Save head and tail pointers for feeing up buffers */
6674863dea3SSunil Goutham rbdr->head = nicvf_queue_reg_read(nic,
6684863dea3SSunil Goutham NIC_QSET_RBDR_0_1_HEAD,
6694863dea3SSunil Goutham qidx) >> 3;
6704863dea3SSunil Goutham rbdr->tail = nicvf_queue_reg_read(nic,
6714863dea3SSunil Goutham NIC_QSET_RBDR_0_1_TAIL,
6724863dea3SSunil Goutham qidx) >> 3;
6734863dea3SSunil Goutham
6744863dea3SSunil Goutham /* If RBDR FIFO is in 'FAIL' state then do a reset first
6754863dea3SSunil Goutham * before relaiming.
6764863dea3SSunil Goutham */
6774863dea3SSunil Goutham fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
6784863dea3SSunil Goutham if (((fifo_state >> 62) & 0x03) == 0x3)
6794863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
6804863dea3SSunil Goutham qidx, NICVF_RBDR_RESET);
6814863dea3SSunil Goutham
6824863dea3SSunil Goutham /* Disable RBDR */
6834863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
6844863dea3SSunil Goutham if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
6854863dea3SSunil Goutham return;
6864863dea3SSunil Goutham while (1) {
6874863dea3SSunil Goutham tmp = nicvf_queue_reg_read(nic,
6884863dea3SSunil Goutham NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
6894863dea3SSunil Goutham qidx);
6904863dea3SSunil Goutham if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
6914863dea3SSunil Goutham break;
6924863dea3SSunil Goutham usleep_range(1000, 2000);
6934863dea3SSunil Goutham timeout--;
6944863dea3SSunil Goutham if (!timeout) {
6954863dea3SSunil Goutham netdev_err(nic->netdev,
6964863dea3SSunil Goutham "Failed polling on prefetch status\n");
6974863dea3SSunil Goutham return;
6984863dea3SSunil Goutham }
6994863dea3SSunil Goutham }
7004863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
7014863dea3SSunil Goutham qidx, NICVF_RBDR_RESET);
7024863dea3SSunil Goutham
7034863dea3SSunil Goutham if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
7044863dea3SSunil Goutham return;
7054863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
7064863dea3SSunil Goutham if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
7074863dea3SSunil Goutham return;
7084863dea3SSunil Goutham }
7094863dea3SSunil Goutham
nicvf_config_vlan_stripping(struct nicvf * nic,netdev_features_t features)710aa2e259bSSunil Goutham void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
711aa2e259bSSunil Goutham {
712aa2e259bSSunil Goutham u64 rq_cfg;
713aa2e259bSSunil Goutham int sqs;
714aa2e259bSSunil Goutham
715aa2e259bSSunil Goutham rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
716aa2e259bSSunil Goutham
717aa2e259bSSunil Goutham /* Enable first VLAN stripping */
718aa2e259bSSunil Goutham if (features & NETIF_F_HW_VLAN_CTAG_RX)
719aa2e259bSSunil Goutham rq_cfg |= (1ULL << 25);
720aa2e259bSSunil Goutham else
721aa2e259bSSunil Goutham rq_cfg &= ~(1ULL << 25);
722aa2e259bSSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
723aa2e259bSSunil Goutham
724aa2e259bSSunil Goutham /* Configure Secondary Qsets, if any */
725aa2e259bSSunil Goutham for (sqs = 0; sqs < nic->sqs_count; sqs++)
726aa2e259bSSunil Goutham if (nic->snicvf[sqs])
727aa2e259bSSunil Goutham nicvf_queue_reg_write(nic->snicvf[sqs],
728aa2e259bSSunil Goutham NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
729aa2e259bSSunil Goutham }
730aa2e259bSSunil Goutham
nicvf_reset_rcv_queue_stats(struct nicvf * nic)7313458c40dSJerin Jacob static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
7323458c40dSJerin Jacob {
7333458c40dSJerin Jacob union nic_mbx mbx = {};
7343458c40dSJerin Jacob
735964cb69bSSunil Goutham /* Reset all RQ/SQ and VF stats */
7363458c40dSJerin Jacob mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
737964cb69bSSunil Goutham mbx.reset_stat.rx_stat_mask = 0x3FFF;
738964cb69bSSunil Goutham mbx.reset_stat.tx_stat_mask = 0x1F;
7393458c40dSJerin Jacob mbx.reset_stat.rq_stat_mask = 0xFFFF;
740964cb69bSSunil Goutham mbx.reset_stat.sq_stat_mask = 0xFFFF;
7413458c40dSJerin Jacob nicvf_send_msg_to_pf(nic, &mbx);
7423458c40dSJerin Jacob }
7433458c40dSJerin Jacob
7444863dea3SSunil Goutham /* Configures receive queue */
nicvf_rcv_queue_config(struct nicvf * nic,struct queue_set * qs,int qidx,bool enable)7454863dea3SSunil Goutham static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
7464863dea3SSunil Goutham int qidx, bool enable)
7474863dea3SSunil Goutham {
7484863dea3SSunil Goutham union nic_mbx mbx = {};
7494863dea3SSunil Goutham struct rcv_queue *rq;
7504863dea3SSunil Goutham struct rq_cfg rq_cfg;
7514863dea3SSunil Goutham
7524863dea3SSunil Goutham rq = &qs->rq[qidx];
7534863dea3SSunil Goutham rq->enable = enable;
7544863dea3SSunil Goutham
7554863dea3SSunil Goutham /* Disable receive queue */
7564863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
7574863dea3SSunil Goutham
7584863dea3SSunil Goutham if (!rq->enable) {
7594863dea3SSunil Goutham nicvf_reclaim_rcv_queue(nic, qs, qidx);
76027e95e36SJesper Dangaard Brouer xdp_rxq_info_unreg(&rq->xdp_rxq);
7614863dea3SSunil Goutham return;
7624863dea3SSunil Goutham }
7634863dea3SSunil Goutham
7644863dea3SSunil Goutham rq->cq_qs = qs->vnic_id;
7654863dea3SSunil Goutham rq->cq_idx = qidx;
7664863dea3SSunil Goutham rq->start_rbdr_qs = qs->vnic_id;
7674863dea3SSunil Goutham rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
7684863dea3SSunil Goutham rq->cont_rbdr_qs = qs->vnic_id;
7694863dea3SSunil Goutham rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
7704863dea3SSunil Goutham /* all writes of RBDR data to be loaded into L2 Cache as well*/
7714863dea3SSunil Goutham rq->caching = 1;
7724863dea3SSunil Goutham
77327e95e36SJesper Dangaard Brouer /* Driver have no proper error path for failed XDP RX-queue info reg */
774b02e5a0eSBjörn Töpel WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx, 0) < 0);
77527e95e36SJesper Dangaard Brouer
7764863dea3SSunil Goutham /* Send a mailbox msg to PF to config RQ */
7774863dea3SSunil Goutham mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
7784863dea3SSunil Goutham mbx.rq.qs_num = qs->vnic_id;
7794863dea3SSunil Goutham mbx.rq.rq_num = qidx;
780e701a258SColin Ian King mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
7814863dea3SSunil Goutham (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
7824863dea3SSunil Goutham (rq->cont_qs_rbdr_idx << 8) |
7834863dea3SSunil Goutham (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
7844863dea3SSunil Goutham nicvf_send_msg_to_pf(nic, &mbx);
7854863dea3SSunil Goutham
7864863dea3SSunil Goutham mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
787d5b2d7a7SSunil Goutham mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
788d5b2d7a7SSunil Goutham (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) |
789d5b2d7a7SSunil Goutham (qs->vnic_id << 0);
7904863dea3SSunil Goutham nicvf_send_msg_to_pf(nic, &mbx);
7914863dea3SSunil Goutham
7924863dea3SSunil Goutham /* RQ drop config
7934863dea3SSunil Goutham * Enable CQ drop to reserve sufficient CQEs for all tx packets
7944863dea3SSunil Goutham */
7954863dea3SSunil Goutham mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
796d5b2d7a7SSunil Goutham mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
797d5b2d7a7SSunil Goutham (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) |
798d5b2d7a7SSunil Goutham (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8);
7994863dea3SSunil Goutham nicvf_send_msg_to_pf(nic, &mbx);
8004863dea3SSunil Goutham
801cadcf95aSSunil Goutham if (!nic->sqs_mode && (qidx == 0)) {
80236fa35d2SThanneeru Srinivasulu /* Enable checking L3/L4 length and TCP/UDP checksums
80336fa35d2SThanneeru Srinivasulu * Also allow IPv6 pkts with zero UDP checksum.
80436fa35d2SThanneeru Srinivasulu */
805cadcf95aSSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
80636fa35d2SThanneeru Srinivasulu (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
807aa2e259bSSunil Goutham nicvf_config_vlan_stripping(nic, nic->netdev->features);
808cadcf95aSSunil Goutham }
8094863dea3SSunil Goutham
8104863dea3SSunil Goutham /* Enable Receive queue */
811161de2caSxypron.glpk@gmx.de memset(&rq_cfg, 0, sizeof(struct rq_cfg));
8124863dea3SSunil Goutham rq_cfg.ena = 1;
8134863dea3SSunil Goutham rq_cfg.tcp_ena = 0;
8144863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
8154863dea3SSunil Goutham }
8164863dea3SSunil Goutham
8174863dea3SSunil Goutham /* Configures completion queue */
nicvf_cmp_queue_config(struct nicvf * nic,struct queue_set * qs,int qidx,bool enable)8184863dea3SSunil Goutham void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
8194863dea3SSunil Goutham int qidx, bool enable)
8204863dea3SSunil Goutham {
8214863dea3SSunil Goutham struct cmp_queue *cq;
8224863dea3SSunil Goutham struct cq_cfg cq_cfg;
8234863dea3SSunil Goutham
8244863dea3SSunil Goutham cq = &qs->cq[qidx];
8254863dea3SSunil Goutham cq->enable = enable;
8264863dea3SSunil Goutham
8274863dea3SSunil Goutham if (!cq->enable) {
8284863dea3SSunil Goutham nicvf_reclaim_cmp_queue(nic, qs, qidx);
8294863dea3SSunil Goutham return;
8304863dea3SSunil Goutham }
8314863dea3SSunil Goutham
8324863dea3SSunil Goutham /* Reset completion queue */
8334863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
8344863dea3SSunil Goutham
8354863dea3SSunil Goutham if (!cq->enable)
8364863dea3SSunil Goutham return;
8374863dea3SSunil Goutham
8384863dea3SSunil Goutham spin_lock_init(&cq->lock);
8394863dea3SSunil Goutham /* Set completion queue base address */
8404863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
8414863dea3SSunil Goutham qidx, (u64)(cq->dmem.phys_base));
8424863dea3SSunil Goutham
8434863dea3SSunil Goutham /* Enable Completion queue */
844161de2caSxypron.glpk@gmx.de memset(&cq_cfg, 0, sizeof(struct cq_cfg));
8454863dea3SSunil Goutham cq_cfg.ena = 1;
8464863dea3SSunil Goutham cq_cfg.reset = 0;
8474863dea3SSunil Goutham cq_cfg.caching = 0;
848fff4ffddSSunil Goutham cq_cfg.qsize = ilog2(qs->cq_len >> 10);
8494863dea3SSunil Goutham cq_cfg.avg_con = 0;
8504863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
8514863dea3SSunil Goutham
8524863dea3SSunil Goutham /* Set threshold value for interrupt generation */
8534863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
8544863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
855006394a7SSunil Goutham qidx, CMP_QUEUE_TIMER_THRESH);
8564863dea3SSunil Goutham }
8574863dea3SSunil Goutham
8584863dea3SSunil Goutham /* Configures transmit queue */
nicvf_snd_queue_config(struct nicvf * nic,struct queue_set * qs,int qidx,bool enable)8594863dea3SSunil Goutham static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
8604863dea3SSunil Goutham int qidx, bool enable)
8614863dea3SSunil Goutham {
8624863dea3SSunil Goutham union nic_mbx mbx = {};
8634863dea3SSunil Goutham struct snd_queue *sq;
8644863dea3SSunil Goutham struct sq_cfg sq_cfg;
8654863dea3SSunil Goutham
8664863dea3SSunil Goutham sq = &qs->sq[qidx];
8674863dea3SSunil Goutham sq->enable = enable;
8684863dea3SSunil Goutham
8694863dea3SSunil Goutham if (!sq->enable) {
8704863dea3SSunil Goutham nicvf_reclaim_snd_queue(nic, qs, qidx);
8714863dea3SSunil Goutham return;
8724863dea3SSunil Goutham }
8734863dea3SSunil Goutham
8744863dea3SSunil Goutham /* Reset send queue */
8754863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
8764863dea3SSunil Goutham
8774863dea3SSunil Goutham sq->cq_qs = qs->vnic_id;
8784863dea3SSunil Goutham sq->cq_idx = qidx;
8794863dea3SSunil Goutham
8804863dea3SSunil Goutham /* Send a mailbox msg to PF to config SQ */
8814863dea3SSunil Goutham mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
8824863dea3SSunil Goutham mbx.sq.qs_num = qs->vnic_id;
8834863dea3SSunil Goutham mbx.sq.sq_num = qidx;
88492dc8769SSunil Goutham mbx.sq.sqs_mode = nic->sqs_mode;
8854863dea3SSunil Goutham mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
8864863dea3SSunil Goutham nicvf_send_msg_to_pf(nic, &mbx);
8874863dea3SSunil Goutham
8884863dea3SSunil Goutham /* Set queue base address */
8894863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
8904863dea3SSunil Goutham qidx, (u64)(sq->dmem.phys_base));
8914863dea3SSunil Goutham
8924863dea3SSunil Goutham /* Enable send queue & set queue size */
893161de2caSxypron.glpk@gmx.de memset(&sq_cfg, 0, sizeof(struct sq_cfg));
8944863dea3SSunil Goutham sq_cfg.ena = 1;
8954863dea3SSunil Goutham sq_cfg.reset = 0;
8964863dea3SSunil Goutham sq_cfg.ldwb = 0;
897fff4ffddSSunil Goutham sq_cfg.qsize = ilog2(qs->sq_len >> 10);
8984863dea3SSunil Goutham sq_cfg.tstmp_bgx_intf = 0;
899fff4ffddSSunil Goutham /* CQ's level at which HW will stop processing SQEs to avoid
900fff4ffddSSunil Goutham * transmitting a pkt with no space in CQ to post CQE_TX.
901fff4ffddSSunil Goutham */
902fff4ffddSSunil Goutham sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
9034863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
9044863dea3SSunil Goutham
9054863dea3SSunil Goutham /* Set threshold value for interrupt generation */
9064863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
9074863dea3SSunil Goutham
9084863dea3SSunil Goutham /* Set queue:cpu affinity for better load distribution */
9094863dea3SSunil Goutham if (cpu_online(qidx)) {
9104863dea3SSunil Goutham cpumask_set_cpu(qidx, &sq->affinity_mask);
9114863dea3SSunil Goutham netif_set_xps_queue(nic->netdev,
9124863dea3SSunil Goutham &sq->affinity_mask, qidx);
9134863dea3SSunil Goutham }
9144863dea3SSunil Goutham }
9154863dea3SSunil Goutham
9164863dea3SSunil Goutham /* Configures receive buffer descriptor ring */
nicvf_rbdr_config(struct nicvf * nic,struct queue_set * qs,int qidx,bool enable)9174863dea3SSunil Goutham static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
9184863dea3SSunil Goutham int qidx, bool enable)
9194863dea3SSunil Goutham {
9204863dea3SSunil Goutham struct rbdr *rbdr;
9214863dea3SSunil Goutham struct rbdr_cfg rbdr_cfg;
9224863dea3SSunil Goutham
9234863dea3SSunil Goutham rbdr = &qs->rbdr[qidx];
9244863dea3SSunil Goutham nicvf_reclaim_rbdr(nic, rbdr, qidx);
9254863dea3SSunil Goutham if (!enable)
9264863dea3SSunil Goutham return;
9274863dea3SSunil Goutham
9284863dea3SSunil Goutham /* Set descriptor base address */
9294863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
9304863dea3SSunil Goutham qidx, (u64)(rbdr->dmem.phys_base));
9314863dea3SSunil Goutham
9324863dea3SSunil Goutham /* Enable RBDR & set queue size */
9334863dea3SSunil Goutham /* Buffer size should be in multiples of 128 bytes */
934161de2caSxypron.glpk@gmx.de memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
9354863dea3SSunil Goutham rbdr_cfg.ena = 1;
9364863dea3SSunil Goutham rbdr_cfg.reset = 0;
9374863dea3SSunil Goutham rbdr_cfg.ldwb = 0;
9384863dea3SSunil Goutham rbdr_cfg.qsize = RBDR_SIZE;
9394863dea3SSunil Goutham rbdr_cfg.avg_con = 0;
9404863dea3SSunil Goutham rbdr_cfg.lines = rbdr->dma_size / 128;
9414863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
9424863dea3SSunil Goutham qidx, *(u64 *)&rbdr_cfg);
9434863dea3SSunil Goutham
9444863dea3SSunil Goutham /* Notify HW */
9454863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
9464863dea3SSunil Goutham qidx, qs->rbdr_len - 1);
9474863dea3SSunil Goutham
9484863dea3SSunil Goutham /* Set threshold value for interrupt generation */
9494863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
9504863dea3SSunil Goutham qidx, rbdr->thresh - 1);
9514863dea3SSunil Goutham }
9524863dea3SSunil Goutham
9534863dea3SSunil Goutham /* Requests PF to assign and enable Qset */
nicvf_qset_config(struct nicvf * nic,bool enable)9544863dea3SSunil Goutham void nicvf_qset_config(struct nicvf *nic, bool enable)
9554863dea3SSunil Goutham {
9564863dea3SSunil Goutham union nic_mbx mbx = {};
9574863dea3SSunil Goutham struct queue_set *qs = nic->qs;
9584863dea3SSunil Goutham struct qs_cfg *qs_cfg;
9594863dea3SSunil Goutham
9604863dea3SSunil Goutham if (!qs) {
9614863dea3SSunil Goutham netdev_warn(nic->netdev,
9624863dea3SSunil Goutham "Qset is still not allocated, don't init queues\n");
9634863dea3SSunil Goutham return;
9644863dea3SSunil Goutham }
9654863dea3SSunil Goutham
9664863dea3SSunil Goutham qs->enable = enable;
9674863dea3SSunil Goutham qs->vnic_id = nic->vf_id;
9684863dea3SSunil Goutham
9694863dea3SSunil Goutham /* Send a mailbox msg to PF to config Qset */
9704863dea3SSunil Goutham mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
9714863dea3SSunil Goutham mbx.qs.num = qs->vnic_id;
97292dc8769SSunil Goutham mbx.qs.sqs_count = nic->sqs_count;
9734863dea3SSunil Goutham
9744863dea3SSunil Goutham mbx.qs.cfg = 0;
9754863dea3SSunil Goutham qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
9764863dea3SSunil Goutham if (qs->enable) {
9774863dea3SSunil Goutham qs_cfg->ena = 1;
9784863dea3SSunil Goutham #ifdef __BIG_ENDIAN
9794863dea3SSunil Goutham qs_cfg->be = 1;
9804863dea3SSunil Goutham #endif
9814863dea3SSunil Goutham qs_cfg->vnic = qs->vnic_id;
9824a875509SSunil Goutham /* Enable Tx timestamping capability */
9834a875509SSunil Goutham if (nic->ptp_clock)
9844a875509SSunil Goutham qs_cfg->send_tstmp_ena = 1;
9854863dea3SSunil Goutham }
9864863dea3SSunil Goutham nicvf_send_msg_to_pf(nic, &mbx);
9874863dea3SSunil Goutham }
9884863dea3SSunil Goutham
nicvf_free_resources(struct nicvf * nic)9894863dea3SSunil Goutham static void nicvf_free_resources(struct nicvf *nic)
9904863dea3SSunil Goutham {
9914863dea3SSunil Goutham int qidx;
9924863dea3SSunil Goutham struct queue_set *qs = nic->qs;
9934863dea3SSunil Goutham
9944863dea3SSunil Goutham /* Free receive buffer descriptor ring */
9954863dea3SSunil Goutham for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
9964863dea3SSunil Goutham nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
9974863dea3SSunil Goutham
9984863dea3SSunil Goutham /* Free completion queue */
9994863dea3SSunil Goutham for (qidx = 0; qidx < qs->cq_cnt; qidx++)
10004863dea3SSunil Goutham nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
10014863dea3SSunil Goutham
10024863dea3SSunil Goutham /* Free send queue */
10034863dea3SSunil Goutham for (qidx = 0; qidx < qs->sq_cnt; qidx++)
10044863dea3SSunil Goutham nicvf_free_snd_queue(nic, &qs->sq[qidx]);
10054863dea3SSunil Goutham }
10064863dea3SSunil Goutham
nicvf_alloc_resources(struct nicvf * nic)10074863dea3SSunil Goutham static int nicvf_alloc_resources(struct nicvf *nic)
10084863dea3SSunil Goutham {
10094863dea3SSunil Goutham int qidx;
10104863dea3SSunil Goutham struct queue_set *qs = nic->qs;
10114863dea3SSunil Goutham
10124863dea3SSunil Goutham /* Alloc receive buffer descriptor ring */
10134863dea3SSunil Goutham for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
10144863dea3SSunil Goutham if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
10154863dea3SSunil Goutham DMA_BUFFER_LEN))
10164863dea3SSunil Goutham goto alloc_fail;
10174863dea3SSunil Goutham }
10184863dea3SSunil Goutham
10194863dea3SSunil Goutham /* Alloc send queue */
10204863dea3SSunil Goutham for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
102116f2bccdSSunil Goutham if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
10224863dea3SSunil Goutham goto alloc_fail;
10234863dea3SSunil Goutham }
10244863dea3SSunil Goutham
10254863dea3SSunil Goutham /* Alloc completion queue */
10264863dea3SSunil Goutham for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
10274863dea3SSunil Goutham if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
10284863dea3SSunil Goutham goto alloc_fail;
10294863dea3SSunil Goutham }
10304863dea3SSunil Goutham
10314863dea3SSunil Goutham return 0;
10324863dea3SSunil Goutham alloc_fail:
10334863dea3SSunil Goutham nicvf_free_resources(nic);
10344863dea3SSunil Goutham return -ENOMEM;
10354863dea3SSunil Goutham }
10364863dea3SSunil Goutham
nicvf_set_qset_resources(struct nicvf * nic)10374863dea3SSunil Goutham int nicvf_set_qset_resources(struct nicvf *nic)
10384863dea3SSunil Goutham {
10394863dea3SSunil Goutham struct queue_set *qs;
10404863dea3SSunil Goutham
10414863dea3SSunil Goutham qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
10424863dea3SSunil Goutham if (!qs)
10434863dea3SSunil Goutham return -ENOMEM;
10444863dea3SSunil Goutham nic->qs = qs;
10454863dea3SSunil Goutham
10464863dea3SSunil Goutham /* Set count of each queue */
10473a397ebeSSunil Goutham qs->rbdr_cnt = DEFAULT_RBDR_CNT;
10483a397ebeSSunil Goutham qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
10493a397ebeSSunil Goutham qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
10503a397ebeSSunil Goutham qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
10514863dea3SSunil Goutham
10524863dea3SSunil Goutham /* Set queue lengths */
10534863dea3SSunil Goutham qs->rbdr_len = RCV_BUF_COUNT;
10544863dea3SSunil Goutham qs->sq_len = SND_QUEUE_LEN;
10554863dea3SSunil Goutham qs->cq_len = CMP_QUEUE_LEN;
105692dc8769SSunil Goutham
105792dc8769SSunil Goutham nic->rx_queues = qs->rq_cnt;
105892dc8769SSunil Goutham nic->tx_queues = qs->sq_cnt;
105905c773f5SSunil Goutham nic->xdp_tx_queues = 0;
106092dc8769SSunil Goutham
10614863dea3SSunil Goutham return 0;
10624863dea3SSunil Goutham }
10634863dea3SSunil Goutham
nicvf_config_data_transfer(struct nicvf * nic,bool enable)10644863dea3SSunil Goutham int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
10654863dea3SSunil Goutham {
10664863dea3SSunil Goutham bool disable = false;
10674863dea3SSunil Goutham struct queue_set *qs = nic->qs;
1068fff4ffddSSunil Goutham struct queue_set *pqs = nic->pnicvf->qs;
10694863dea3SSunil Goutham int qidx;
10704863dea3SSunil Goutham
10714863dea3SSunil Goutham if (!qs)
10724863dea3SSunil Goutham return 0;
10734863dea3SSunil Goutham
1074fff4ffddSSunil Goutham /* Take primary VF's queue lengths.
1075fff4ffddSSunil Goutham * This is needed to take queue lengths set from ethtool
1076fff4ffddSSunil Goutham * into consideration.
1077fff4ffddSSunil Goutham */
1078fff4ffddSSunil Goutham if (nic->sqs_mode && pqs) {
1079fff4ffddSSunil Goutham qs->cq_len = pqs->cq_len;
1080fff4ffddSSunil Goutham qs->sq_len = pqs->sq_len;
1081fff4ffddSSunil Goutham }
1082fff4ffddSSunil Goutham
10834863dea3SSunil Goutham if (enable) {
10844863dea3SSunil Goutham if (nicvf_alloc_resources(nic))
10854863dea3SSunil Goutham return -ENOMEM;
10864863dea3SSunil Goutham
10874863dea3SSunil Goutham for (qidx = 0; qidx < qs->sq_cnt; qidx++)
10884863dea3SSunil Goutham nicvf_snd_queue_config(nic, qs, qidx, enable);
10894863dea3SSunil Goutham for (qidx = 0; qidx < qs->cq_cnt; qidx++)
10904863dea3SSunil Goutham nicvf_cmp_queue_config(nic, qs, qidx, enable);
10914863dea3SSunil Goutham for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
10924863dea3SSunil Goutham nicvf_rbdr_config(nic, qs, qidx, enable);
10934863dea3SSunil Goutham for (qidx = 0; qidx < qs->rq_cnt; qidx++)
10944863dea3SSunil Goutham nicvf_rcv_queue_config(nic, qs, qidx, enable);
10954863dea3SSunil Goutham } else {
10964863dea3SSunil Goutham for (qidx = 0; qidx < qs->rq_cnt; qidx++)
10974863dea3SSunil Goutham nicvf_rcv_queue_config(nic, qs, qidx, disable);
10984863dea3SSunil Goutham for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
10994863dea3SSunil Goutham nicvf_rbdr_config(nic, qs, qidx, disable);
11004863dea3SSunil Goutham for (qidx = 0; qidx < qs->sq_cnt; qidx++)
11014863dea3SSunil Goutham nicvf_snd_queue_config(nic, qs, qidx, disable);
11024863dea3SSunil Goutham for (qidx = 0; qidx < qs->cq_cnt; qidx++)
11034863dea3SSunil Goutham nicvf_cmp_queue_config(nic, qs, qidx, disable);
11044863dea3SSunil Goutham
11054863dea3SSunil Goutham nicvf_free_resources(nic);
11064863dea3SSunil Goutham }
11074863dea3SSunil Goutham
11083458c40dSJerin Jacob /* Reset RXQ's stats.
11093458c40dSJerin Jacob * SQ's stats will get reset automatically once SQ is reset.
11103458c40dSJerin Jacob */
11113458c40dSJerin Jacob nicvf_reset_rcv_queue_stats(nic);
11123458c40dSJerin Jacob
11134863dea3SSunil Goutham return 0;
11144863dea3SSunil Goutham }
11154863dea3SSunil Goutham
11164863dea3SSunil Goutham /* Get a free desc from SQ
11174863dea3SSunil Goutham * returns descriptor ponter & descriptor number
11184863dea3SSunil Goutham */
nicvf_get_sq_desc(struct snd_queue * sq,int desc_cnt)11194863dea3SSunil Goutham static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
11204863dea3SSunil Goutham {
11214863dea3SSunil Goutham int qentry;
11224863dea3SSunil Goutham
11234863dea3SSunil Goutham qentry = sq->tail;
112416f2bccdSSunil Goutham if (!sq->is_xdp)
11254863dea3SSunil Goutham atomic_sub(desc_cnt, &sq->free_cnt);
112616f2bccdSSunil Goutham else
112716f2bccdSSunil Goutham sq->xdp_free_cnt -= desc_cnt;
11284863dea3SSunil Goutham sq->tail += desc_cnt;
11294863dea3SSunil Goutham sq->tail &= (sq->dmem.q_len - 1);
11304863dea3SSunil Goutham
11314863dea3SSunil Goutham return qentry;
11324863dea3SSunil Goutham }
11334863dea3SSunil Goutham
113483abb7d7SSunil Goutham /* Rollback to previous tail pointer when descriptors not used */
nicvf_rollback_sq_desc(struct snd_queue * sq,int qentry,int desc_cnt)113583abb7d7SSunil Goutham static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
113683abb7d7SSunil Goutham int qentry, int desc_cnt)
113783abb7d7SSunil Goutham {
113883abb7d7SSunil Goutham sq->tail = qentry;
113983abb7d7SSunil Goutham atomic_add(desc_cnt, &sq->free_cnt);
114083abb7d7SSunil Goutham }
114183abb7d7SSunil Goutham
11424863dea3SSunil Goutham /* Free descriptor back to SQ for future use */
nicvf_put_sq_desc(struct snd_queue * sq,int desc_cnt)11434863dea3SSunil Goutham void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
11444863dea3SSunil Goutham {
114516f2bccdSSunil Goutham if (!sq->is_xdp)
11464863dea3SSunil Goutham atomic_add(desc_cnt, &sq->free_cnt);
114716f2bccdSSunil Goutham else
114816f2bccdSSunil Goutham sq->xdp_free_cnt += desc_cnt;
11494863dea3SSunil Goutham sq->head += desc_cnt;
11504863dea3SSunil Goutham sq->head &= (sq->dmem.q_len - 1);
11514863dea3SSunil Goutham }
11524863dea3SSunil Goutham
nicvf_get_nxt_sqentry(struct snd_queue * sq,int qentry)11534863dea3SSunil Goutham static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
11544863dea3SSunil Goutham {
11554863dea3SSunil Goutham qentry++;
11564863dea3SSunil Goutham qentry &= (sq->dmem.q_len - 1);
11574863dea3SSunil Goutham return qentry;
11584863dea3SSunil Goutham }
11594863dea3SSunil Goutham
nicvf_sq_enable(struct nicvf * nic,struct snd_queue * sq,int qidx)11604863dea3SSunil Goutham void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
11614863dea3SSunil Goutham {
11624863dea3SSunil Goutham u64 sq_cfg;
11634863dea3SSunil Goutham
11644863dea3SSunil Goutham sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
11654863dea3SSunil Goutham sq_cfg |= NICVF_SQ_EN;
11664863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
11674863dea3SSunil Goutham /* Ring doorbell so that H/W restarts processing SQEs */
11684863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
11694863dea3SSunil Goutham }
11704863dea3SSunil Goutham
nicvf_sq_disable(struct nicvf * nic,int qidx)11714863dea3SSunil Goutham void nicvf_sq_disable(struct nicvf *nic, int qidx)
11724863dea3SSunil Goutham {
11734863dea3SSunil Goutham u64 sq_cfg;
11744863dea3SSunil Goutham
11754863dea3SSunil Goutham sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
11764863dea3SSunil Goutham sq_cfg &= ~NICVF_SQ_EN;
11774863dea3SSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
11784863dea3SSunil Goutham }
11794863dea3SSunil Goutham
nicvf_sq_free_used_descs(struct net_device * netdev,struct snd_queue * sq,int qidx)11804863dea3SSunil Goutham void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
11814863dea3SSunil Goutham int qidx)
11824863dea3SSunil Goutham {
1183723d5e5bSZheng zengkai u64 head;
11844863dea3SSunil Goutham struct sk_buff *skb;
11854863dea3SSunil Goutham struct nicvf *nic = netdev_priv(netdev);
11864863dea3SSunil Goutham struct sq_hdr_subdesc *hdr;
11874863dea3SSunil Goutham
11884863dea3SSunil Goutham head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
11894863dea3SSunil Goutham while (sq->head != head) {
11904863dea3SSunil Goutham hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
11914863dea3SSunil Goutham if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
11924863dea3SSunil Goutham nicvf_put_sq_desc(sq, 1);
11934863dea3SSunil Goutham continue;
11944863dea3SSunil Goutham }
11954863dea3SSunil Goutham skb = (struct sk_buff *)sq->skbuff[sq->head];
1196143ceb0bSSunil Goutham if (skb)
1197143ceb0bSSunil Goutham dev_kfree_skb_any(skb);
11984863dea3SSunil Goutham atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
11994863dea3SSunil Goutham atomic64_add(hdr->tot_len,
12004863dea3SSunil Goutham (atomic64_t *)&netdev->stats.tx_bytes);
12014863dea3SSunil Goutham nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
12024863dea3SSunil Goutham }
12034863dea3SSunil Goutham }
12044863dea3SSunil Goutham
120516f2bccdSSunil Goutham /* XDP Transmit APIs */
nicvf_xdp_sq_doorbell(struct nicvf * nic,struct snd_queue * sq,int sq_num)120616f2bccdSSunil Goutham void nicvf_xdp_sq_doorbell(struct nicvf *nic,
120716f2bccdSSunil Goutham struct snd_queue *sq, int sq_num)
120816f2bccdSSunil Goutham {
120916f2bccdSSunil Goutham if (!sq->xdp_desc_cnt)
121016f2bccdSSunil Goutham return;
121116f2bccdSSunil Goutham
121216f2bccdSSunil Goutham /* make sure all memory stores are done before ringing doorbell */
121316f2bccdSSunil Goutham wmb();
121416f2bccdSSunil Goutham
121516f2bccdSSunil Goutham /* Inform HW to xmit all TSO segments */
121616f2bccdSSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
121716f2bccdSSunil Goutham sq_num, sq->xdp_desc_cnt);
121816f2bccdSSunil Goutham sq->xdp_desc_cnt = 0;
121916f2bccdSSunil Goutham }
122016f2bccdSSunil Goutham
122116f2bccdSSunil Goutham static inline void
nicvf_xdp_sq_add_hdr_subdesc(struct snd_queue * sq,int qentry,int subdesc_cnt,u64 data,int len)122216f2bccdSSunil Goutham nicvf_xdp_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
122316f2bccdSSunil Goutham int subdesc_cnt, u64 data, int len)
122416f2bccdSSunil Goutham {
122516f2bccdSSunil Goutham struct sq_hdr_subdesc *hdr;
122616f2bccdSSunil Goutham
122716f2bccdSSunil Goutham hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
122816f2bccdSSunil Goutham memset(hdr, 0, SND_QUEUE_DESC_SIZE);
122916f2bccdSSunil Goutham hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
123016f2bccdSSunil Goutham hdr->subdesc_cnt = subdesc_cnt;
123116f2bccdSSunil Goutham hdr->tot_len = len;
123216f2bccdSSunil Goutham hdr->post_cqe = 1;
123316f2bccdSSunil Goutham sq->xdp_page[qentry] = (u64)virt_to_page((void *)data);
123416f2bccdSSunil Goutham }
123516f2bccdSSunil Goutham
nicvf_xdp_sq_append_pkt(struct nicvf * nic,struct snd_queue * sq,u64 bufaddr,u64 dma_addr,u16 len)123616f2bccdSSunil Goutham int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
123716f2bccdSSunil Goutham u64 bufaddr, u64 dma_addr, u16 len)
123816f2bccdSSunil Goutham {
123916f2bccdSSunil Goutham int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
124016f2bccdSSunil Goutham int qentry;
124116f2bccdSSunil Goutham
124216f2bccdSSunil Goutham if (subdesc_cnt > sq->xdp_free_cnt)
1243e6dbe939SJesper Dangaard Brouer return 0;
124416f2bccdSSunil Goutham
124516f2bccdSSunil Goutham qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
124616f2bccdSSunil Goutham
124716f2bccdSSunil Goutham nicvf_xdp_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, bufaddr, len);
124816f2bccdSSunil Goutham
124916f2bccdSSunil Goutham qentry = nicvf_get_nxt_sqentry(sq, qentry);
125016f2bccdSSunil Goutham nicvf_sq_add_gather_subdesc(sq, qentry, len, dma_addr);
125116f2bccdSSunil Goutham
125216f2bccdSSunil Goutham sq->xdp_desc_cnt += subdesc_cnt;
125316f2bccdSSunil Goutham
1254e6dbe939SJesper Dangaard Brouer return 1;
125516f2bccdSSunil Goutham }
125616f2bccdSSunil Goutham
12574863dea3SSunil Goutham /* Calculate no of SQ subdescriptors needed to transmit all
12584863dea3SSunil Goutham * segments of this TSO packet.
12594863dea3SSunil Goutham * Taken from 'Tilera network driver' with a minor modification.
12604863dea3SSunil Goutham */
nicvf_tso_count_subdescs(struct sk_buff * skb)12614863dea3SSunil Goutham static int nicvf_tso_count_subdescs(struct sk_buff *skb)
12624863dea3SSunil Goutham {
12634863dea3SSunil Goutham struct skb_shared_info *sh = skb_shinfo(skb);
1264*504148feSEric Dumazet unsigned int sh_len = skb_tcp_all_headers(skb);
12654863dea3SSunil Goutham unsigned int data_len = skb->len - sh_len;
12664863dea3SSunil Goutham unsigned int p_len = sh->gso_size;
12674863dea3SSunil Goutham long f_id = -1; /* id of the current fragment */
12684863dea3SSunil Goutham long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
12694863dea3SSunil Goutham long f_used = 0; /* bytes used from the current fragment */
12704863dea3SSunil Goutham long n; /* size of the current piece of payload */
12714863dea3SSunil Goutham int num_edescs = 0;
12724863dea3SSunil Goutham int segment;
12734863dea3SSunil Goutham
12744863dea3SSunil Goutham for (segment = 0; segment < sh->gso_segs; segment++) {
12754863dea3SSunil Goutham unsigned int p_used = 0;
12764863dea3SSunil Goutham
12774863dea3SSunil Goutham /* One edesc for header and for each piece of the payload. */
12784863dea3SSunil Goutham for (num_edescs++; p_used < p_len; num_edescs++) {
12794863dea3SSunil Goutham /* Advance as needed. */
12804863dea3SSunil Goutham while (f_used >= f_size) {
12814863dea3SSunil Goutham f_id++;
12824863dea3SSunil Goutham f_size = skb_frag_size(&sh->frags[f_id]);
12834863dea3SSunil Goutham f_used = 0;
12844863dea3SSunil Goutham }
12854863dea3SSunil Goutham
12864863dea3SSunil Goutham /* Use bytes from the current fragment. */
12874863dea3SSunil Goutham n = p_len - p_used;
12884863dea3SSunil Goutham if (n > f_size - f_used)
12894863dea3SSunil Goutham n = f_size - f_used;
12904863dea3SSunil Goutham f_used += n;
12914863dea3SSunil Goutham p_used += n;
12924863dea3SSunil Goutham }
12934863dea3SSunil Goutham
12944863dea3SSunil Goutham /* The last segment may be less than gso_size. */
12954863dea3SSunil Goutham data_len -= p_len;
12964863dea3SSunil Goutham if (data_len < p_len)
12974863dea3SSunil Goutham p_len = data_len;
12984863dea3SSunil Goutham }
12994863dea3SSunil Goutham
13004863dea3SSunil Goutham /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
13014863dea3SSunil Goutham return num_edescs + sh->gso_segs;
13024863dea3SSunil Goutham }
13034863dea3SSunil Goutham
13047ceb8a13SSunil Goutham #define POST_CQE_DESC_COUNT 2
13057ceb8a13SSunil Goutham
13064863dea3SSunil Goutham /* Get the number of SQ descriptors needed to xmit this skb */
nicvf_sq_subdesc_required(struct nicvf * nic,struct sk_buff * skb)13074863dea3SSunil Goutham static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
13084863dea3SSunil Goutham {
13094863dea3SSunil Goutham int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
13104863dea3SSunil Goutham
131140fb5f8aSSunil Goutham if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
13124863dea3SSunil Goutham subdesc_cnt = nicvf_tso_count_subdescs(skb);
13134863dea3SSunil Goutham return subdesc_cnt;
13144863dea3SSunil Goutham }
13154863dea3SSunil Goutham
13167ceb8a13SSunil Goutham /* Dummy descriptors to get TSO pkt completion notification */
13177ceb8a13SSunil Goutham if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
13187ceb8a13SSunil Goutham subdesc_cnt += POST_CQE_DESC_COUNT;
13197ceb8a13SSunil Goutham
13204863dea3SSunil Goutham if (skb_shinfo(skb)->nr_frags)
13214863dea3SSunil Goutham subdesc_cnt += skb_shinfo(skb)->nr_frags;
13224863dea3SSunil Goutham
13234863dea3SSunil Goutham return subdesc_cnt;
13244863dea3SSunil Goutham }
13254863dea3SSunil Goutham
13264863dea3SSunil Goutham /* Add SQ HEADER subdescriptor.
13274863dea3SSunil Goutham * First subdescriptor for every send descriptor.
13284863dea3SSunil Goutham */
13294863dea3SSunil Goutham static inline void
nicvf_sq_add_hdr_subdesc(struct nicvf * nic,struct snd_queue * sq,int qentry,int subdesc_cnt,struct sk_buff * skb,int len)133040fb5f8aSSunil Goutham nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
13314863dea3SSunil Goutham int subdesc_cnt, struct sk_buff *skb, int len)
13324863dea3SSunil Goutham {
13334863dea3SSunil Goutham int proto;
13344863dea3SSunil Goutham struct sq_hdr_subdesc *hdr;
13353a9024f5SThanneeru Srinivasulu union {
13363a9024f5SThanneeru Srinivasulu struct iphdr *v4;
13373a9024f5SThanneeru Srinivasulu struct ipv6hdr *v6;
13383a9024f5SThanneeru Srinivasulu unsigned char *hdr;
13393a9024f5SThanneeru Srinivasulu } ip;
13404863dea3SSunil Goutham
13413a9024f5SThanneeru Srinivasulu ip.hdr = skb_network_header(skb);
13424863dea3SSunil Goutham hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
13434863dea3SSunil Goutham memset(hdr, 0, SND_QUEUE_DESC_SIZE);
13444863dea3SSunil Goutham hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
13457ceb8a13SSunil Goutham
13467ceb8a13SSunil Goutham if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
13477ceb8a13SSunil Goutham /* post_cqe = 0, to avoid HW posting a CQE for every TSO
13487ceb8a13SSunil Goutham * segment transmitted on 88xx.
13497ceb8a13SSunil Goutham */
13507ceb8a13SSunil Goutham hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
13517ceb8a13SSunil Goutham } else {
13527ceb8a13SSunil Goutham sq->skbuff[qentry] = (u64)skb;
13534863dea3SSunil Goutham /* Enable notification via CQE after processing SQE */
13544863dea3SSunil Goutham hdr->post_cqe = 1;
13554863dea3SSunil Goutham /* No of subdescriptors following this */
13564863dea3SSunil Goutham hdr->subdesc_cnt = subdesc_cnt;
13577ceb8a13SSunil Goutham }
13584863dea3SSunil Goutham hdr->tot_len = len;
13594863dea3SSunil Goutham
13604863dea3SSunil Goutham /* Offload checksum calculation to HW */
13614863dea3SSunil Goutham if (skb->ip_summed == CHECKSUM_PARTIAL) {
1362134059fdSFlorian Westphal if (ip.v4->version == 4)
1363134059fdSFlorian Westphal hdr->csum_l3 = 1; /* Enable IP csum calculation */
13644863dea3SSunil Goutham hdr->l3_offset = skb_network_offset(skb);
13654863dea3SSunil Goutham hdr->l4_offset = skb_transport_offset(skb);
13664863dea3SSunil Goutham
13673a9024f5SThanneeru Srinivasulu proto = (ip.v4->version == 4) ? ip.v4->protocol :
13683a9024f5SThanneeru Srinivasulu ip.v6->nexthdr;
13693a9024f5SThanneeru Srinivasulu
13704863dea3SSunil Goutham switch (proto) {
13714863dea3SSunil Goutham case IPPROTO_TCP:
13724863dea3SSunil Goutham hdr->csum_l4 = SEND_L4_CSUM_TCP;
13734863dea3SSunil Goutham break;
13744863dea3SSunil Goutham case IPPROTO_UDP:
13754863dea3SSunil Goutham hdr->csum_l4 = SEND_L4_CSUM_UDP;
13764863dea3SSunil Goutham break;
13774863dea3SSunil Goutham case IPPROTO_SCTP:
13784863dea3SSunil Goutham hdr->csum_l4 = SEND_L4_CSUM_SCTP;
13794863dea3SSunil Goutham break;
13804863dea3SSunil Goutham }
13814863dea3SSunil Goutham }
138240fb5f8aSSunil Goutham
138340fb5f8aSSunil Goutham if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
138440fb5f8aSSunil Goutham hdr->tso = 1;
1385*504148feSEric Dumazet hdr->tso_start = skb_tcp_all_headers(skb);
138640fb5f8aSSunil Goutham hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
138740fb5f8aSSunil Goutham /* For non-tunneled pkts, point this to L2 ethertype */
138840fb5f8aSSunil Goutham hdr->inner_l3_offset = skb_network_offset(skb) - 2;
1389964cb69bSSunil Goutham this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
139040fb5f8aSSunil Goutham }
13914a875509SSunil Goutham
13924a875509SSunil Goutham /* Check if timestamp is requested */
13934a875509SSunil Goutham if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
13944a875509SSunil Goutham skb_tx_timestamp(skb);
13954a875509SSunil Goutham return;
13964a875509SSunil Goutham }
13974a875509SSunil Goutham
13984a875509SSunil Goutham /* Tx timestamping not supported along with TSO, so ignore request */
13994a875509SSunil Goutham if (skb_shinfo(skb)->gso_size)
14004a875509SSunil Goutham return;
14014a875509SSunil Goutham
14024a875509SSunil Goutham /* HW supports only a single outstanding packet to timestamp */
14034a875509SSunil Goutham if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
14044a875509SSunil Goutham return;
14054a875509SSunil Goutham
14064a875509SSunil Goutham /* Mark the SKB for later reference */
14074a875509SSunil Goutham skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
14084a875509SSunil Goutham
14094a875509SSunil Goutham /* Finally enable timestamp generation
14104a875509SSunil Goutham * Since 'post_cqe' is also set, two CQEs will be posted
14114a875509SSunil Goutham * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP.
14124a875509SSunil Goutham */
14134a875509SSunil Goutham hdr->tstmp = 1;
14144863dea3SSunil Goutham }
14154863dea3SSunil Goutham
14164863dea3SSunil Goutham /* SQ GATHER subdescriptor
14174863dea3SSunil Goutham * Must follow HDR descriptor
14184863dea3SSunil Goutham */
nicvf_sq_add_gather_subdesc(struct snd_queue * sq,int qentry,int size,u64 data)14194863dea3SSunil Goutham static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
14204863dea3SSunil Goutham int size, u64 data)
14214863dea3SSunil Goutham {
14224863dea3SSunil Goutham struct sq_gather_subdesc *gather;
14234863dea3SSunil Goutham
14244863dea3SSunil Goutham qentry &= (sq->dmem.q_len - 1);
14254863dea3SSunil Goutham gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
14264863dea3SSunil Goutham
14274863dea3SSunil Goutham memset(gather, 0, SND_QUEUE_DESC_SIZE);
14284863dea3SSunil Goutham gather->subdesc_type = SQ_DESC_TYPE_GATHER;
14294b561c17SSunil Goutham gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
14304863dea3SSunil Goutham gather->size = size;
14314863dea3SSunil Goutham gather->addr = data;
14324863dea3SSunil Goutham }
14334863dea3SSunil Goutham
14347ceb8a13SSunil Goutham /* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
14357ceb8a13SSunil Goutham * packet so that a CQE is posted as a notifation for transmission of
14367ceb8a13SSunil Goutham * TSO packet.
14377ceb8a13SSunil Goutham */
nicvf_sq_add_cqe_subdesc(struct snd_queue * sq,int qentry,int tso_sqe,struct sk_buff * skb)14387ceb8a13SSunil Goutham static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
14397ceb8a13SSunil Goutham int tso_sqe, struct sk_buff *skb)
14407ceb8a13SSunil Goutham {
14417ceb8a13SSunil Goutham struct sq_imm_subdesc *imm;
14427ceb8a13SSunil Goutham struct sq_hdr_subdesc *hdr;
14437ceb8a13SSunil Goutham
14447ceb8a13SSunil Goutham sq->skbuff[qentry] = (u64)skb;
14457ceb8a13SSunil Goutham
14467ceb8a13SSunil Goutham hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
14477ceb8a13SSunil Goutham memset(hdr, 0, SND_QUEUE_DESC_SIZE);
14487ceb8a13SSunil Goutham hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
14497ceb8a13SSunil Goutham /* Enable notification via CQE after processing SQE */
14507ceb8a13SSunil Goutham hdr->post_cqe = 1;
14517ceb8a13SSunil Goutham /* There is no packet to transmit here */
14527ceb8a13SSunil Goutham hdr->dont_send = 1;
14537ceb8a13SSunil Goutham hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
14547ceb8a13SSunil Goutham hdr->tot_len = 1;
14557ceb8a13SSunil Goutham /* Actual TSO header SQE index, needed for cleanup */
14567ceb8a13SSunil Goutham hdr->rsvd2 = tso_sqe;
14577ceb8a13SSunil Goutham
14587ceb8a13SSunil Goutham qentry = nicvf_get_nxt_sqentry(sq, qentry);
14597ceb8a13SSunil Goutham imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
14607ceb8a13SSunil Goutham memset(imm, 0, SND_QUEUE_DESC_SIZE);
14617ceb8a13SSunil Goutham imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
14627ceb8a13SSunil Goutham imm->len = 1;
14637ceb8a13SSunil Goutham }
14647ceb8a13SSunil Goutham
nicvf_sq_doorbell(struct nicvf * nic,struct sk_buff * skb,int sq_num,int desc_cnt)14652c204c2bSSunil Goutham static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
14662c204c2bSSunil Goutham int sq_num, int desc_cnt)
14672c204c2bSSunil Goutham {
14682c204c2bSSunil Goutham struct netdev_queue *txq;
14692c204c2bSSunil Goutham
14702c204c2bSSunil Goutham txq = netdev_get_tx_queue(nic->pnicvf->netdev,
14712c204c2bSSunil Goutham skb_get_queue_mapping(skb));
14722c204c2bSSunil Goutham
14732c204c2bSSunil Goutham netdev_tx_sent_queue(txq, skb->len);
14742c204c2bSSunil Goutham
14752c204c2bSSunil Goutham /* make sure all memory stores are done before ringing doorbell */
14762c204c2bSSunil Goutham smp_wmb();
14772c204c2bSSunil Goutham
14782c204c2bSSunil Goutham /* Inform HW to xmit all TSO segments */
14792c204c2bSSunil Goutham nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
14802c204c2bSSunil Goutham sq_num, desc_cnt);
14812c204c2bSSunil Goutham }
14822c204c2bSSunil Goutham
14834863dea3SSunil Goutham /* Segment a TSO packet into 'gso_size' segments and append
14844863dea3SSunil Goutham * them to SQ for transfer
14854863dea3SSunil Goutham */
nicvf_sq_append_tso(struct nicvf * nic,struct snd_queue * sq,int sq_num,int qentry,struct sk_buff * skb)14864863dea3SSunil Goutham static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
148792dc8769SSunil Goutham int sq_num, int qentry, struct sk_buff *skb)
14884863dea3SSunil Goutham {
14894863dea3SSunil Goutham struct tso_t tso;
14904863dea3SSunil Goutham int seg_subdescs = 0, desc_cnt = 0;
14914863dea3SSunil Goutham int seg_len, total_len, data_left;
14924863dea3SSunil Goutham int hdr_qentry = qentry;
1493761b331cSEric Dumazet int hdr_len;
14944863dea3SSunil Goutham
1495761b331cSEric Dumazet hdr_len = tso_start(skb, &tso);
1496761b331cSEric Dumazet
14974863dea3SSunil Goutham total_len = skb->len - hdr_len;
14984863dea3SSunil Goutham while (total_len > 0) {
14994863dea3SSunil Goutham char *hdr;
15004863dea3SSunil Goutham
15014863dea3SSunil Goutham /* Save Qentry for adding HDR_SUBDESC at the end */
15024863dea3SSunil Goutham hdr_qentry = qentry;
15034863dea3SSunil Goutham
15044863dea3SSunil Goutham data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
15054863dea3SSunil Goutham total_len -= data_left;
15064863dea3SSunil Goutham
15074863dea3SSunil Goutham /* Add segment's header */
15084863dea3SSunil Goutham qentry = nicvf_get_nxt_sqentry(sq, qentry);
15094863dea3SSunil Goutham hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
15104863dea3SSunil Goutham tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
15114863dea3SSunil Goutham nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
15124863dea3SSunil Goutham sq->tso_hdrs_phys +
15134863dea3SSunil Goutham qentry * TSO_HEADER_SIZE);
15144863dea3SSunil Goutham /* HDR_SUDESC + GATHER */
15154863dea3SSunil Goutham seg_subdescs = 2;
15164863dea3SSunil Goutham seg_len = hdr_len;
15174863dea3SSunil Goutham
15184863dea3SSunil Goutham /* Add segment's payload fragments */
15194863dea3SSunil Goutham while (data_left > 0) {
15204863dea3SSunil Goutham int size;
15214863dea3SSunil Goutham
15224863dea3SSunil Goutham size = min_t(int, tso.size, data_left);
15234863dea3SSunil Goutham
15244863dea3SSunil Goutham qentry = nicvf_get_nxt_sqentry(sq, qentry);
15254863dea3SSunil Goutham nicvf_sq_add_gather_subdesc(sq, qentry, size,
15264863dea3SSunil Goutham virt_to_phys(tso.data));
15274863dea3SSunil Goutham seg_subdescs++;
15284863dea3SSunil Goutham seg_len += size;
15294863dea3SSunil Goutham
15304863dea3SSunil Goutham data_left -= size;
15314863dea3SSunil Goutham tso_build_data(skb, &tso, size);
15324863dea3SSunil Goutham }
153340fb5f8aSSunil Goutham nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
15344863dea3SSunil Goutham seg_subdescs - 1, skb, seg_len);
1535143ceb0bSSunil Goutham sq->skbuff[hdr_qentry] = (u64)NULL;
15364863dea3SSunil Goutham qentry = nicvf_get_nxt_sqentry(sq, qentry);
15374863dea3SSunil Goutham
15384863dea3SSunil Goutham desc_cnt += seg_subdescs;
15394863dea3SSunil Goutham }
15404863dea3SSunil Goutham /* Save SKB in the last segment for freeing */
15414863dea3SSunil Goutham sq->skbuff[hdr_qentry] = (u64)skb;
15424863dea3SSunil Goutham
15432c204c2bSSunil Goutham nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
15444863dea3SSunil Goutham
1545964cb69bSSunil Goutham this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
15464863dea3SSunil Goutham return 1;
15474863dea3SSunil Goutham }
15484863dea3SSunil Goutham
15494863dea3SSunil Goutham /* Append an skb to a SQ for packet transfer. */
nicvf_sq_append_skb(struct nicvf * nic,struct snd_queue * sq,struct sk_buff * skb,u8 sq_num)1550bd3ad7d3SSunil Goutham int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1551bd3ad7d3SSunil Goutham struct sk_buff *skb, u8 sq_num)
15524863dea3SSunil Goutham {
15534863dea3SSunil Goutham int i, size;
155483abb7d7SSunil Goutham int subdesc_cnt, hdr_sqe = 0;
1555bd3ad7d3SSunil Goutham int qentry;
155683abb7d7SSunil Goutham u64 dma_addr;
15574863dea3SSunil Goutham
15584863dea3SSunil Goutham subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
15594863dea3SSunil Goutham if (subdesc_cnt > atomic_read(&sq->free_cnt))
15604863dea3SSunil Goutham goto append_fail;
15614863dea3SSunil Goutham
15624863dea3SSunil Goutham qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
15634863dea3SSunil Goutham
15644863dea3SSunil Goutham /* Check if its a TSO packet */
156540fb5f8aSSunil Goutham if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
156692dc8769SSunil Goutham return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
15674863dea3SSunil Goutham
15684863dea3SSunil Goutham /* Add SQ header subdesc */
156940fb5f8aSSunil Goutham nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
157040fb5f8aSSunil Goutham skb, skb->len);
157183abb7d7SSunil Goutham hdr_sqe = qentry;
15724863dea3SSunil Goutham
15734863dea3SSunil Goutham /* Add SQ gather subdescs */
15744863dea3SSunil Goutham qentry = nicvf_get_nxt_sqentry(sq, qentry);
15754863dea3SSunil Goutham size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
157683abb7d7SSunil Goutham /* HW will ensure data coherency, CPU sync not required */
157783abb7d7SSunil Goutham dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
157883abb7d7SSunil Goutham offset_in_page(skb->data), size,
157983abb7d7SSunil Goutham DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
158083abb7d7SSunil Goutham if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
158183abb7d7SSunil Goutham nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
158283abb7d7SSunil Goutham return 0;
158383abb7d7SSunil Goutham }
158483abb7d7SSunil Goutham
158583abb7d7SSunil Goutham nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
15864863dea3SSunil Goutham
15874863dea3SSunil Goutham /* Check for scattered buffer */
15884863dea3SSunil Goutham if (!skb_is_nonlinear(skb))
15894863dea3SSunil Goutham goto doorbell;
15904863dea3SSunil Goutham
15914863dea3SSunil Goutham for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1592d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
15934863dea3SSunil Goutham
15944863dea3SSunil Goutham qentry = nicvf_get_nxt_sqentry(sq, qentry);
15954863dea3SSunil Goutham size = skb_frag_size(frag);
159683abb7d7SSunil Goutham dma_addr = dma_map_page_attrs(&nic->pdev->dev,
159783abb7d7SSunil Goutham skb_frag_page(frag),
1598b54c9d5bSJonathan Lemon skb_frag_off(frag), size,
159983abb7d7SSunil Goutham DMA_TO_DEVICE,
160083abb7d7SSunil Goutham DMA_ATTR_SKIP_CPU_SYNC);
160183abb7d7SSunil Goutham if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
160283abb7d7SSunil Goutham /* Free entire chain of mapped buffers
160383abb7d7SSunil Goutham * here 'i' = frags mapped + above mapped skb->data
160483abb7d7SSunil Goutham */
160583abb7d7SSunil Goutham nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
160683abb7d7SSunil Goutham nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
160783abb7d7SSunil Goutham return 0;
160883abb7d7SSunil Goutham }
160983abb7d7SSunil Goutham nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
16104863dea3SSunil Goutham }
16114863dea3SSunil Goutham
16124863dea3SSunil Goutham doorbell:
16137ceb8a13SSunil Goutham if (nic->t88 && skb_shinfo(skb)->gso_size) {
16147ceb8a13SSunil Goutham qentry = nicvf_get_nxt_sqentry(sq, qentry);
161583abb7d7SSunil Goutham nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
16167ceb8a13SSunil Goutham }
16177ceb8a13SSunil Goutham
16182c204c2bSSunil Goutham nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
16194863dea3SSunil Goutham
16204863dea3SSunil Goutham return 1;
16214863dea3SSunil Goutham
16224863dea3SSunil Goutham append_fail:
162392dc8769SSunil Goutham /* Use original PCI dev for debug log */
162492dc8769SSunil Goutham nic = nic->pnicvf;
16254863dea3SSunil Goutham netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
16264863dea3SSunil Goutham return 0;
16274863dea3SSunil Goutham }
16284863dea3SSunil Goutham
frag_num(unsigned i)16294863dea3SSunil Goutham static inline unsigned frag_num(unsigned i)
16304863dea3SSunil Goutham {
16314863dea3SSunil Goutham #ifdef __BIG_ENDIAN
16324863dea3SSunil Goutham return (i & ~3) + 3 - (i & 3);
16334863dea3SSunil Goutham #else
16344863dea3SSunil Goutham return i;
16354863dea3SSunil Goutham #endif
16364863dea3SSunil Goutham }
16374863dea3SSunil Goutham
nicvf_unmap_rcv_buffer(struct nicvf * nic,u64 dma_addr,u64 buf_addr,bool xdp)1638c56d91ceSSunil Goutham static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
1639c56d91ceSSunil Goutham u64 buf_addr, bool xdp)
1640c56d91ceSSunil Goutham {
1641c56d91ceSSunil Goutham struct page *page = NULL;
1642c56d91ceSSunil Goutham int len = RCV_FRAG_LEN;
1643c56d91ceSSunil Goutham
1644c56d91ceSSunil Goutham if (xdp) {
1645c56d91ceSSunil Goutham page = virt_to_page(phys_to_virt(buf_addr));
1646c56d91ceSSunil Goutham /* Check if it's a recycled page, if not
1647c56d91ceSSunil Goutham * unmap the DMA mapping.
1648c56d91ceSSunil Goutham *
1649c56d91ceSSunil Goutham * Recycled page holds an extra reference.
1650c56d91ceSSunil Goutham */
1651c56d91ceSSunil Goutham if (page_ref_count(page) != 1)
1652c56d91ceSSunil Goutham return;
1653e3d06ff9SSunil Goutham
1654e6dbe939SJesper Dangaard Brouer len += XDP_PACKET_HEADROOM;
1655c56d91ceSSunil Goutham /* Receive buffers in XDP mode are mapped from page start */
1656c56d91ceSSunil Goutham dma_addr &= PAGE_MASK;
1657c56d91ceSSunil Goutham }
1658c56d91ceSSunil Goutham dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len,
1659c56d91ceSSunil Goutham DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1660c56d91ceSSunil Goutham }
1661c56d91ceSSunil Goutham
16624863dea3SSunil Goutham /* Returns SKB for a received packet */
nicvf_get_rcv_skb(struct nicvf * nic,struct cqe_rx_t * cqe_rx,bool xdp)1663c56d91ceSSunil Goutham struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
1664c56d91ceSSunil Goutham struct cqe_rx_t *cqe_rx, bool xdp)
16654863dea3SSunil Goutham {
16664863dea3SSunil Goutham int frag;
16674863dea3SSunil Goutham int payload_len = 0;
16684863dea3SSunil Goutham struct sk_buff *skb = NULL;
1669a8671accSSunil Goutham struct page *page;
1670a8671accSSunil Goutham int offset;
16714863dea3SSunil Goutham u16 *rb_lens = NULL;
16724863dea3SSunil Goutham u64 *rb_ptrs = NULL;
167383abb7d7SSunil Goutham u64 phys_addr;
16744863dea3SSunil Goutham
16754863dea3SSunil Goutham rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
167602a72bd8SSunil Goutham /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
167702a72bd8SSunil Goutham * CQE_RX at word6, hence buffer pointers move by word
167802a72bd8SSunil Goutham *
167902a72bd8SSunil Goutham * Use existing 'hw_tso' flag which will be set for all chips
168002a72bd8SSunil Goutham * except 88xx pass1 instead of a additional cache line
168102a72bd8SSunil Goutham * access (or miss) by using pci dev's revision.
168202a72bd8SSunil Goutham */
168302a72bd8SSunil Goutham if (!nic->hw_tso)
16844863dea3SSunil Goutham rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
168502a72bd8SSunil Goutham else
168602a72bd8SSunil Goutham rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
16874863dea3SSunil Goutham
16884863dea3SSunil Goutham for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
16894863dea3SSunil Goutham payload_len = rb_lens[frag_num(frag)];
169083abb7d7SSunil Goutham phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
169183abb7d7SSunil Goutham if (!phys_addr) {
169283abb7d7SSunil Goutham if (skb)
169383abb7d7SSunil Goutham dev_kfree_skb_any(skb);
169483abb7d7SSunil Goutham return NULL;
169583abb7d7SSunil Goutham }
169683abb7d7SSunil Goutham
16974863dea3SSunil Goutham if (!frag) {
16984863dea3SSunil Goutham /* First fragment */
1699c56d91ceSSunil Goutham nicvf_unmap_rcv_buffer(nic,
17004863dea3SSunil Goutham *rb_ptrs - cqe_rx->align_pad,
1701c56d91ceSSunil Goutham phys_addr, xdp);
170283abb7d7SSunil Goutham skb = nicvf_rb_ptr_to_skb(nic,
170383abb7d7SSunil Goutham phys_addr - cqe_rx->align_pad,
17044863dea3SSunil Goutham payload_len);
17054863dea3SSunil Goutham if (!skb)
17064863dea3SSunil Goutham return NULL;
17074863dea3SSunil Goutham skb_reserve(skb, cqe_rx->align_pad);
17084863dea3SSunil Goutham skb_put(skb, payload_len);
17094863dea3SSunil Goutham } else {
17104863dea3SSunil Goutham /* Add fragments */
1711c56d91ceSSunil Goutham nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp);
171283abb7d7SSunil Goutham page = virt_to_page(phys_to_virt(phys_addr));
171383abb7d7SSunil Goutham offset = phys_to_virt(phys_addr) - page_address(page);
1714a8671accSSunil Goutham skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1715a8671accSSunil Goutham offset, payload_len, RCV_FRAG_LEN);
17164863dea3SSunil Goutham }
17174863dea3SSunil Goutham /* Next buffer pointer */
17184863dea3SSunil Goutham rb_ptrs++;
17194863dea3SSunil Goutham }
17204863dea3SSunil Goutham return skb;
17214863dea3SSunil Goutham }
17224863dea3SSunil Goutham
nicvf_int_type_to_mask(int int_type,int q_idx)1723b45ceb40SYury Norov static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
17244863dea3SSunil Goutham {
17254863dea3SSunil Goutham u64 reg_val;
17264863dea3SSunil Goutham
17274863dea3SSunil Goutham switch (int_type) {
17284863dea3SSunil Goutham case NICVF_INTR_CQ:
17294863dea3SSunil Goutham reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
17304863dea3SSunil Goutham break;
17314863dea3SSunil Goutham case NICVF_INTR_SQ:
17324863dea3SSunil Goutham reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
17334863dea3SSunil Goutham break;
17344863dea3SSunil Goutham case NICVF_INTR_RBDR:
17354863dea3SSunil Goutham reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
17364863dea3SSunil Goutham break;
17374863dea3SSunil Goutham case NICVF_INTR_PKT_DROP:
17384863dea3SSunil Goutham reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
17394863dea3SSunil Goutham break;
17404863dea3SSunil Goutham case NICVF_INTR_TCP_TIMER:
17414863dea3SSunil Goutham reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
17424863dea3SSunil Goutham break;
17434863dea3SSunil Goutham case NICVF_INTR_MBOX:
17444863dea3SSunil Goutham reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
17454863dea3SSunil Goutham break;
17464863dea3SSunil Goutham case NICVF_INTR_QS_ERR:
1747b45ceb40SYury Norov reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
17484863dea3SSunil Goutham break;
17494863dea3SSunil Goutham default:
1750b45ceb40SYury Norov reg_val = 0;
17514863dea3SSunil Goutham }
17524863dea3SSunil Goutham
1753b45ceb40SYury Norov return reg_val;
1754b45ceb40SYury Norov }
1755b45ceb40SYury Norov
1756b45ceb40SYury Norov /* Enable interrupt */
nicvf_enable_intr(struct nicvf * nic,int int_type,int q_idx)1757b45ceb40SYury Norov void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1758b45ceb40SYury Norov {
1759b45ceb40SYury Norov u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1760b45ceb40SYury Norov
1761b45ceb40SYury Norov if (!mask) {
1762b45ceb40SYury Norov netdev_dbg(nic->netdev,
1763b45ceb40SYury Norov "Failed to enable interrupt: unknown type\n");
1764b45ceb40SYury Norov return;
1765b45ceb40SYury Norov }
1766b45ceb40SYury Norov nicvf_reg_write(nic, NIC_VF_ENA_W1S,
1767b45ceb40SYury Norov nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
1768b45ceb40SYury Norov }
1769b45ceb40SYury Norov
1770b45ceb40SYury Norov /* Disable interrupt */
nicvf_disable_intr(struct nicvf * nic,int int_type,int q_idx)1771b45ceb40SYury Norov void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1772b45ceb40SYury Norov {
1773b45ceb40SYury Norov u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1774b45ceb40SYury Norov
1775b45ceb40SYury Norov if (!mask) {
1776b45ceb40SYury Norov netdev_dbg(nic->netdev,
1777b45ceb40SYury Norov "Failed to disable interrupt: unknown type\n");
1778b45ceb40SYury Norov return;
1779b45ceb40SYury Norov }
1780b45ceb40SYury Norov
1781b45ceb40SYury Norov nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
1782b45ceb40SYury Norov }
1783b45ceb40SYury Norov
1784b45ceb40SYury Norov /* Clear interrupt */
nicvf_clear_intr(struct nicvf * nic,int int_type,int q_idx)1785b45ceb40SYury Norov void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1786b45ceb40SYury Norov {
1787b45ceb40SYury Norov u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1788b45ceb40SYury Norov
1789b45ceb40SYury Norov if (!mask) {
1790b45ceb40SYury Norov netdev_dbg(nic->netdev,
1791b45ceb40SYury Norov "Failed to clear interrupt: unknown type\n");
1792b45ceb40SYury Norov return;
1793b45ceb40SYury Norov }
1794b45ceb40SYury Norov
1795b45ceb40SYury Norov nicvf_reg_write(nic, NIC_VF_INT, mask);
17964863dea3SSunil Goutham }
17974863dea3SSunil Goutham
17984863dea3SSunil Goutham /* Check if interrupt is enabled */
nicvf_is_intr_enabled(struct nicvf * nic,int int_type,int q_idx)17994863dea3SSunil Goutham int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
18004863dea3SSunil Goutham {
1801b45ceb40SYury Norov u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1802b45ceb40SYury Norov /* If interrupt type is unknown, we treat it disabled. */
1803b45ceb40SYury Norov if (!mask) {
1804b45ceb40SYury Norov netdev_dbg(nic->netdev,
18054863dea3SSunil Goutham "Failed to check interrupt enable: unknown type\n");
1806b45ceb40SYury Norov return 0;
18074863dea3SSunil Goutham }
18084863dea3SSunil Goutham
1809b45ceb40SYury Norov return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
18104863dea3SSunil Goutham }
18114863dea3SSunil Goutham
nicvf_update_rq_stats(struct nicvf * nic,int rq_idx)18124863dea3SSunil Goutham void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
18134863dea3SSunil Goutham {
18144863dea3SSunil Goutham struct rcv_queue *rq;
18154863dea3SSunil Goutham
18164863dea3SSunil Goutham #define GET_RQ_STATS(reg) \
18174863dea3SSunil Goutham nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
18184863dea3SSunil Goutham (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
18194863dea3SSunil Goutham
18204863dea3SSunil Goutham rq = &nic->qs->rq[rq_idx];
18214863dea3SSunil Goutham rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
18224863dea3SSunil Goutham rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
18234863dea3SSunil Goutham }
18244863dea3SSunil Goutham
nicvf_update_sq_stats(struct nicvf * nic,int sq_idx)18254863dea3SSunil Goutham void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
18264863dea3SSunil Goutham {
18274863dea3SSunil Goutham struct snd_queue *sq;
18284863dea3SSunil Goutham
18294863dea3SSunil Goutham #define GET_SQ_STATS(reg) \
18304863dea3SSunil Goutham nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
18314863dea3SSunil Goutham (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
18324863dea3SSunil Goutham
18334863dea3SSunil Goutham sq = &nic->qs->sq[sq_idx];
18344863dea3SSunil Goutham sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
18354863dea3SSunil Goutham sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
18364863dea3SSunil Goutham }
18374863dea3SSunil Goutham
18384863dea3SSunil Goutham /* Check for errors in the receive cmp.queue entry */
nicvf_check_cqe_rx_errs(struct nicvf * nic,struct cqe_rx_t * cqe_rx)1839ad2ecebdSSunil Goutham int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
18404863dea3SSunil Goutham {
1841bf24e136SJoe Perches netif_err(nic, rx_err, nic->netdev,
1842bf24e136SJoe Perches "RX error CQE err_level 0x%x err_opcode 0x%x\n",
18434863dea3SSunil Goutham cqe_rx->err_level, cqe_rx->err_opcode);
18444863dea3SSunil Goutham
18454863dea3SSunil Goutham switch (cqe_rx->err_opcode) {
18464863dea3SSunil Goutham case CQ_RX_ERROP_RE_PARTIAL:
1847964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
18484863dea3SSunil Goutham break;
18494863dea3SSunil Goutham case CQ_RX_ERROP_RE_JABBER:
1850964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_jabber_errs);
18514863dea3SSunil Goutham break;
18524863dea3SSunil Goutham case CQ_RX_ERROP_RE_FCS:
1853964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_fcs_errs);
18544863dea3SSunil Goutham break;
18554863dea3SSunil Goutham case CQ_RX_ERROP_RE_RX_CTL:
1856964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_bgx_errs);
18574863dea3SSunil Goutham break;
18584863dea3SSunil Goutham case CQ_RX_ERROP_PREL2_ERR:
1859964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_prel2_errs);
18604863dea3SSunil Goutham break;
18614863dea3SSunil Goutham case CQ_RX_ERROP_L2_MAL:
1862964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
18634863dea3SSunil Goutham break;
18644863dea3SSunil Goutham case CQ_RX_ERROP_L2_OVERSIZE:
1865964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_oversize);
18664863dea3SSunil Goutham break;
18674863dea3SSunil Goutham case CQ_RX_ERROP_L2_UNDERSIZE:
1868964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_undersize);
18694863dea3SSunil Goutham break;
18704863dea3SSunil Goutham case CQ_RX_ERROP_L2_LENMISM:
1871964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
18724863dea3SSunil Goutham break;
18734863dea3SSunil Goutham case CQ_RX_ERROP_L2_PCLP:
1874964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_l2_pclp);
18754863dea3SSunil Goutham break;
18764863dea3SSunil Goutham case CQ_RX_ERROP_IP_NOT:
1877964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
18784863dea3SSunil Goutham break;
18794863dea3SSunil Goutham case CQ_RX_ERROP_IP_CSUM_ERR:
1880964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
18814863dea3SSunil Goutham break;
18824863dea3SSunil Goutham case CQ_RX_ERROP_IP_MAL:
1883964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
18844863dea3SSunil Goutham break;
18854863dea3SSunil Goutham case CQ_RX_ERROP_IP_MALD:
1886964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
18874863dea3SSunil Goutham break;
18884863dea3SSunil Goutham case CQ_RX_ERROP_IP_HOP:
1889964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
18904863dea3SSunil Goutham break;
18914863dea3SSunil Goutham case CQ_RX_ERROP_L3_PCLP:
1892964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_l3_pclp);
18934863dea3SSunil Goutham break;
18944863dea3SSunil Goutham case CQ_RX_ERROP_L4_MAL:
1895964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_l4_malformed);
18964863dea3SSunil Goutham break;
18974863dea3SSunil Goutham case CQ_RX_ERROP_L4_CHK:
1898964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
18994863dea3SSunil Goutham break;
19004863dea3SSunil Goutham case CQ_RX_ERROP_UDP_LEN:
1901964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
19024863dea3SSunil Goutham break;
19034863dea3SSunil Goutham case CQ_RX_ERROP_L4_PORT:
1904964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
19054863dea3SSunil Goutham break;
19064863dea3SSunil Goutham case CQ_RX_ERROP_TCP_FLAG:
1907964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
19084863dea3SSunil Goutham break;
19094863dea3SSunil Goutham case CQ_RX_ERROP_TCP_OFFSET:
1910964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
19114863dea3SSunil Goutham break;
19124863dea3SSunil Goutham case CQ_RX_ERROP_L4_PCLP:
1913964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_l4_pclp);
19144863dea3SSunil Goutham break;
19154863dea3SSunil Goutham case CQ_RX_ERROP_RBDR_TRUNC:
1916964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
19174863dea3SSunil Goutham break;
19184863dea3SSunil Goutham }
19194863dea3SSunil Goutham
19204863dea3SSunil Goutham return 1;
19214863dea3SSunil Goutham }
19224863dea3SSunil Goutham
19234863dea3SSunil Goutham /* Check for errors in the send cmp.queue entry */
nicvf_check_cqe_tx_errs(struct nicvf * nic,struct cqe_send_t * cqe_tx)1924964cb69bSSunil Goutham int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
19254863dea3SSunil Goutham {
19264863dea3SSunil Goutham switch (cqe_tx->send_status) {
19274863dea3SSunil Goutham case CQ_TX_ERROP_DESC_FAULT:
1928964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_desc_fault);
19294863dea3SSunil Goutham break;
19304863dea3SSunil Goutham case CQ_TX_ERROP_HDR_CONS_ERR:
1931964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
19324863dea3SSunil Goutham break;
19334863dea3SSunil Goutham case CQ_TX_ERROP_SUBDC_ERR:
1934964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_subdesc_err);
19354863dea3SSunil Goutham break;
1936712c3185SSunil Goutham case CQ_TX_ERROP_MAX_SIZE_VIOL:
1937964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
1938712c3185SSunil Goutham break;
19394863dea3SSunil Goutham case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1940964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
19414863dea3SSunil Goutham break;
19424863dea3SSunil Goutham case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1943964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_data_seq_err);
19444863dea3SSunil Goutham break;
19454863dea3SSunil Goutham case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1946964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
19474863dea3SSunil Goutham break;
19484863dea3SSunil Goutham case CQ_TX_ERROP_LOCK_VIOL:
1949964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_lock_viol);
19504863dea3SSunil Goutham break;
19514863dea3SSunil Goutham case CQ_TX_ERROP_DATA_FAULT:
1952964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_data_fault);
19534863dea3SSunil Goutham break;
19544863dea3SSunil Goutham case CQ_TX_ERROP_TSTMP_CONFLICT:
1955964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
19564863dea3SSunil Goutham break;
19574863dea3SSunil Goutham case CQ_TX_ERROP_TSTMP_TIMEOUT:
1958964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
19594863dea3SSunil Goutham break;
19604863dea3SSunil Goutham case CQ_TX_ERROP_MEM_FAULT:
1961964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_mem_fault);
19624863dea3SSunil Goutham break;
19634863dea3SSunil Goutham case CQ_TX_ERROP_CK_OVERLAP:
1964964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_csum_overlap);
19654863dea3SSunil Goutham break;
19664863dea3SSunil Goutham case CQ_TX_ERROP_CK_OFLOW:
1967964cb69bSSunil Goutham this_cpu_inc(nic->drv_stats->tx_csum_overflow);
19684863dea3SSunil Goutham break;
19694863dea3SSunil Goutham }
19704863dea3SSunil Goutham
19714863dea3SSunil Goutham return 1;
19724863dea3SSunil Goutham }
1973