1f5cedc84SCatherine Sullivan // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2f5cedc84SCatherine Sullivan /* Google virtual Ethernet (gve) driver
3f5cedc84SCatherine Sullivan *
4dbdaa675SBailey Forrest * Copyright (C) 2015-2021 Google, Inc.
5f5cedc84SCatherine Sullivan */
6f5cedc84SCatherine Sullivan
7f5cedc84SCatherine Sullivan #include "gve.h"
8f5cedc84SCatherine Sullivan #include "gve_adminq.h"
9dbdaa675SBailey Forrest #include "gve_utils.h"
10f5cedc84SCatherine Sullivan #include <linux/etherdevice.h>
1175eaae15SPraveen Kaligineedi #include <linux/filter.h>
1275eaae15SPraveen Kaligineedi #include <net/xdp.h>
13fd8e4032SPraveen Kaligineedi #include <net/xdp_sock_drv.h>
14f5cedc84SCatherine Sullivan
gve_rx_free_buffer(struct device * dev,struct gve_rx_slot_page_info * page_info,union gve_rx_data_slot * data_slot)15ede3fcf5SCatherine Sullivan static void gve_rx_free_buffer(struct device *dev,
16ede3fcf5SCatherine Sullivan struct gve_rx_slot_page_info *page_info,
17ede3fcf5SCatherine Sullivan union gve_rx_data_slot *data_slot)
18ede3fcf5SCatherine Sullivan {
19ede3fcf5SCatherine Sullivan dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) &
20ede3fcf5SCatherine Sullivan GVE_DATA_SLOT_ADDR_PAGE_MASK);
21ede3fcf5SCatherine Sullivan
2258401b2aSCatherine Sullivan page_ref_sub(page_info->page, page_info->pagecnt_bias - 1);
23ede3fcf5SCatherine Sullivan gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
24ede3fcf5SCatherine Sullivan }
25ede3fcf5SCatherine Sullivan
gve_rx_unfill_pages(struct gve_priv * priv,struct gve_rx_ring * rx)26ede3fcf5SCatherine Sullivan static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
27ede3fcf5SCatherine Sullivan {
28ede3fcf5SCatherine Sullivan u32 slots = rx->mask + 1;
29ede3fcf5SCatherine Sullivan int i;
30ede3fcf5SCatherine Sullivan
3158401b2aSCatherine Sullivan if (rx->data.raw_addressing) {
32ede3fcf5SCatherine Sullivan for (i = 0; i < slots; i++)
33ede3fcf5SCatherine Sullivan gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
34ede3fcf5SCatherine Sullivan &rx->data.data_ring[i]);
35ede3fcf5SCatherine Sullivan } else {
3658401b2aSCatherine Sullivan for (i = 0; i < slots; i++)
3758401b2aSCatherine Sullivan page_ref_sub(rx->data.page_info[i].page,
3858401b2aSCatherine Sullivan rx->data.page_info[i].pagecnt_bias - 1);
39ede3fcf5SCatherine Sullivan gve_unassign_qpl(priv, rx->data.qpl->id);
40ede3fcf5SCatherine Sullivan rx->data.qpl = NULL;
4182fd151dSShailend Chand
4282fd151dSShailend Chand for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
4382fd151dSShailend Chand page_ref_sub(rx->qpl_copy_pool[i].page,
4482fd151dSShailend Chand rx->qpl_copy_pool[i].pagecnt_bias - 1);
4582fd151dSShailend Chand put_page(rx->qpl_copy_pool[i].page);
4682fd151dSShailend Chand }
47ede3fcf5SCatherine Sullivan }
48ede3fcf5SCatherine Sullivan kvfree(rx->data.page_info);
49ede3fcf5SCatherine Sullivan rx->data.page_info = NULL;
50ede3fcf5SCatherine Sullivan }
51ede3fcf5SCatherine Sullivan
gve_rx_free_ring(struct gve_priv * priv,int idx)52f5cedc84SCatherine Sullivan static void gve_rx_free_ring(struct gve_priv *priv, int idx)
53f5cedc84SCatherine Sullivan {
54f5cedc84SCatherine Sullivan struct gve_rx_ring *rx = &priv->rx[idx];
55f5cedc84SCatherine Sullivan struct device *dev = &priv->pdev->dev;
56ede3fcf5SCatherine Sullivan u32 slots = rx->mask + 1;
57f5cedc84SCatherine Sullivan size_t bytes;
58f5cedc84SCatherine Sullivan
59f5cedc84SCatherine Sullivan gve_rx_remove_from_block(priv, idx);
60f5cedc84SCatherine Sullivan
61f5cedc84SCatherine Sullivan bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
62f5cedc84SCatherine Sullivan dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
63f5cedc84SCatherine Sullivan rx->desc.desc_ring = NULL;
64f5cedc84SCatherine Sullivan
65f5cedc84SCatherine Sullivan dma_free_coherent(dev, sizeof(*rx->q_resources),
66f5cedc84SCatherine Sullivan rx->q_resources, rx->q_resources_bus);
67f5cedc84SCatherine Sullivan rx->q_resources = NULL;
68f5cedc84SCatherine Sullivan
69ede3fcf5SCatherine Sullivan gve_rx_unfill_pages(priv, rx);
70f5cedc84SCatherine Sullivan
71f5cedc84SCatherine Sullivan bytes = sizeof(*rx->data.data_ring) * slots;
72f5cedc84SCatherine Sullivan dma_free_coherent(dev, bytes, rx->data.data_ring,
73f5cedc84SCatherine Sullivan rx->data.data_bus);
74f5cedc84SCatherine Sullivan rx->data.data_ring = NULL;
7582fd151dSShailend Chand
7682fd151dSShailend Chand kvfree(rx->qpl_copy_pool);
7782fd151dSShailend Chand rx->qpl_copy_pool = NULL;
7882fd151dSShailend Chand
79f5cedc84SCatherine Sullivan netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
80f5cedc84SCatherine Sullivan }
81f5cedc84SCatherine Sullivan
gve_setup_rx_buffer(struct gve_rx_slot_page_info * page_info,dma_addr_t addr,struct page * page,__be64 * slot_addr)82f5cedc84SCatherine Sullivan static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
83ede3fcf5SCatherine Sullivan dma_addr_t addr, struct page *page, __be64 *slot_addr)
84f5cedc84SCatherine Sullivan {
85f5cedc84SCatherine Sullivan page_info->page = page;
86f5cedc84SCatherine Sullivan page_info->page_offset = 0;
87f5cedc84SCatherine Sullivan page_info->page_address = page_address(page);
88ede3fcf5SCatherine Sullivan *slot_addr = cpu_to_be64(addr);
8958401b2aSCatherine Sullivan /* The page already has 1 ref */
9058401b2aSCatherine Sullivan page_ref_add(page, INT_MAX - 1);
9158401b2aSCatherine Sullivan page_info->pagecnt_bias = INT_MAX;
92ede3fcf5SCatherine Sullivan }
93ede3fcf5SCatherine Sullivan
gve_rx_alloc_buffer(struct gve_priv * priv,struct device * dev,struct gve_rx_slot_page_info * page_info,union gve_rx_data_slot * data_slot)94ede3fcf5SCatherine Sullivan static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
95ede3fcf5SCatherine Sullivan struct gve_rx_slot_page_info *page_info,
96ede3fcf5SCatherine Sullivan union gve_rx_data_slot *data_slot)
97ede3fcf5SCatherine Sullivan {
98ede3fcf5SCatherine Sullivan struct page *page;
99ede3fcf5SCatherine Sullivan dma_addr_t dma;
100ede3fcf5SCatherine Sullivan int err;
101ede3fcf5SCatherine Sullivan
102a92f7a6fSCatherine Sullivan err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
103a92f7a6fSCatherine Sullivan GFP_ATOMIC);
104ede3fcf5SCatherine Sullivan if (err)
105ede3fcf5SCatherine Sullivan return err;
106ede3fcf5SCatherine Sullivan
107ede3fcf5SCatherine Sullivan gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
108ede3fcf5SCatherine Sullivan return 0;
109f5cedc84SCatherine Sullivan }
110f5cedc84SCatherine Sullivan
gve_prefill_rx_pages(struct gve_rx_ring * rx)111f5cedc84SCatherine Sullivan static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
112f5cedc84SCatherine Sullivan {
113f5cedc84SCatherine Sullivan struct gve_priv *priv = rx->gve;
114f5cedc84SCatherine Sullivan u32 slots;
115ede3fcf5SCatherine Sullivan int err;
116f5cedc84SCatherine Sullivan int i;
11782fd151dSShailend Chand int j;
118f5cedc84SCatherine Sullivan
119f5cedc84SCatherine Sullivan /* Allocate one page per Rx queue slot. Each page is split into two
120f5cedc84SCatherine Sullivan * packet buffers, when possible we "page flip" between the two.
121f5cedc84SCatherine Sullivan */
122438b43bdSCatherine Sullivan slots = rx->mask + 1;
123f5cedc84SCatherine Sullivan
124f5cedc84SCatherine Sullivan rx->data.page_info = kvzalloc(slots *
125f5cedc84SCatherine Sullivan sizeof(*rx->data.page_info), GFP_KERNEL);
126f5cedc84SCatherine Sullivan if (!rx->data.page_info)
127f5cedc84SCatherine Sullivan return -ENOMEM;
128f5cedc84SCatherine Sullivan
129d4b111fdSCatherine Sullivan if (!rx->data.raw_addressing) {
1307fc2bf78SPraveen Kaligineedi rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num);
131d4b111fdSCatherine Sullivan if (!rx->data.qpl) {
132d4b111fdSCatherine Sullivan kvfree(rx->data.page_info);
133d4b111fdSCatherine Sullivan rx->data.page_info = NULL;
134d4b111fdSCatherine Sullivan return -ENOMEM;
135d4b111fdSCatherine Sullivan }
136d4b111fdSCatherine Sullivan }
137f5cedc84SCatherine Sullivan for (i = 0; i < slots; i++) {
138ede3fcf5SCatherine Sullivan if (!rx->data.raw_addressing) {
139f5cedc84SCatherine Sullivan struct page *page = rx->data.qpl->pages[i];
140f5cedc84SCatherine Sullivan dma_addr_t addr = i * PAGE_SIZE;
141f5cedc84SCatherine Sullivan
142ede3fcf5SCatherine Sullivan gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
143ede3fcf5SCatherine Sullivan &rx->data.data_ring[i].qpl_offset);
144ede3fcf5SCatherine Sullivan continue;
145ede3fcf5SCatherine Sullivan }
146ede3fcf5SCatherine Sullivan err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
147ede3fcf5SCatherine Sullivan &rx->data.data_ring[i]);
148ede3fcf5SCatherine Sullivan if (err)
14995535e37SShailend Chand goto alloc_err_rda;
150f5cedc84SCatherine Sullivan }
151f5cedc84SCatherine Sullivan
15282fd151dSShailend Chand if (!rx->data.raw_addressing) {
15382fd151dSShailend Chand for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) {
15482fd151dSShailend Chand struct page *page = alloc_page(GFP_KERNEL);
15582fd151dSShailend Chand
15664c426dfSYang Yingliang if (!page) {
15764c426dfSYang Yingliang err = -ENOMEM;
15882fd151dSShailend Chand goto alloc_err_qpl;
15964c426dfSYang Yingliang }
16082fd151dSShailend Chand
16182fd151dSShailend Chand rx->qpl_copy_pool[j].page = page;
16282fd151dSShailend Chand rx->qpl_copy_pool[j].page_offset = 0;
16382fd151dSShailend Chand rx->qpl_copy_pool[j].page_address = page_address(page);
16482fd151dSShailend Chand
16582fd151dSShailend Chand /* The page already has 1 ref. */
16682fd151dSShailend Chand page_ref_add(page, INT_MAX - 1);
16782fd151dSShailend Chand rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX;
16882fd151dSShailend Chand }
16982fd151dSShailend Chand }
17082fd151dSShailend Chand
171f5cedc84SCatherine Sullivan return slots;
17282fd151dSShailend Chand
17382fd151dSShailend Chand alloc_err_qpl:
17495535e37SShailend Chand /* Fully free the copy pool pages. */
17582fd151dSShailend Chand while (j--) {
17682fd151dSShailend Chand page_ref_sub(rx->qpl_copy_pool[j].page,
17782fd151dSShailend Chand rx->qpl_copy_pool[j].pagecnt_bias - 1);
17882fd151dSShailend Chand put_page(rx->qpl_copy_pool[j].page);
17982fd151dSShailend Chand }
18095535e37SShailend Chand
18195535e37SShailend Chand /* Do not fully free QPL pages - only remove the bias added in this
18295535e37SShailend Chand * function with gve_setup_rx_buffer.
18395535e37SShailend Chand */
18495535e37SShailend Chand while (i--)
18595535e37SShailend Chand page_ref_sub(rx->data.page_info[i].page,
18695535e37SShailend Chand rx->data.page_info[i].pagecnt_bias - 1);
18795535e37SShailend Chand
18895535e37SShailend Chand gve_unassign_qpl(priv, rx->data.qpl->id);
18995535e37SShailend Chand rx->data.qpl = NULL;
19095535e37SShailend Chand
19195535e37SShailend Chand return err;
19295535e37SShailend Chand
19395535e37SShailend Chand alloc_err_rda:
194ede3fcf5SCatherine Sullivan while (i--)
195ede3fcf5SCatherine Sullivan gve_rx_free_buffer(&priv->pdev->dev,
196ede3fcf5SCatherine Sullivan &rx->data.page_info[i],
197ede3fcf5SCatherine Sullivan &rx->data.data_ring[i]);
198ede3fcf5SCatherine Sullivan return err;
199f5cedc84SCatherine Sullivan }
200f5cedc84SCatherine Sullivan
gve_rx_ctx_clear(struct gve_rx_ctx * ctx)20137149e93SDavid Awogbemila static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
20237149e93SDavid Awogbemila {
20337149e93SDavid Awogbemila ctx->skb_head = NULL;
20437149e93SDavid Awogbemila ctx->skb_tail = NULL;
20582fd151dSShailend Chand ctx->total_size = 0;
20682fd151dSShailend Chand ctx->frag_cnt = 0;
20782fd151dSShailend Chand ctx->drop_pkt = false;
20837149e93SDavid Awogbemila }
20937149e93SDavid Awogbemila
gve_rx_alloc_ring(struct gve_priv * priv,int idx)210f5cedc84SCatherine Sullivan static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
211f5cedc84SCatherine Sullivan {
212f5cedc84SCatherine Sullivan struct gve_rx_ring *rx = &priv->rx[idx];
213f5cedc84SCatherine Sullivan struct device *hdev = &priv->pdev->dev;
214f5cedc84SCatherine Sullivan u32 slots, npages;
215f5cedc84SCatherine Sullivan int filled_pages;
216f5cedc84SCatherine Sullivan size_t bytes;
217f5cedc84SCatherine Sullivan int err;
218f5cedc84SCatherine Sullivan
219f5cedc84SCatherine Sullivan netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
220f5cedc84SCatherine Sullivan /* Make sure everything is zeroed to start with */
221f5cedc84SCatherine Sullivan memset(rx, 0, sizeof(*rx));
222f5cedc84SCatherine Sullivan
223f5cedc84SCatherine Sullivan rx->gve = priv;
224f5cedc84SCatherine Sullivan rx->q_num = idx;
225f5cedc84SCatherine Sullivan
226ede3fcf5SCatherine Sullivan slots = priv->rx_data_slot_cnt;
227438b43bdSCatherine Sullivan rx->mask = slots - 1;
228a5886ef4SBailey Forrest rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
229f5cedc84SCatherine Sullivan
230f5cedc84SCatherine Sullivan /* alloc rx data ring */
231f5cedc84SCatherine Sullivan bytes = sizeof(*rx->data.data_ring) * slots;
232f5cedc84SCatherine Sullivan rx->data.data_ring = dma_alloc_coherent(hdev, bytes,
233f5cedc84SCatherine Sullivan &rx->data.data_bus,
234f5cedc84SCatherine Sullivan GFP_KERNEL);
235f5cedc84SCatherine Sullivan if (!rx->data.data_ring)
236f5cedc84SCatherine Sullivan return -ENOMEM;
23782fd151dSShailend Chand
23882fd151dSShailend Chand rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1;
23982fd151dSShailend Chand rx->qpl_copy_pool_head = 0;
24082fd151dSShailend Chand rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1,
24182fd151dSShailend Chand sizeof(rx->qpl_copy_pool[0]),
24282fd151dSShailend Chand GFP_KERNEL);
24382fd151dSShailend Chand
24482fd151dSShailend Chand if (!rx->qpl_copy_pool) {
24582fd151dSShailend Chand err = -ENOMEM;
24682fd151dSShailend Chand goto abort_with_slots;
24782fd151dSShailend Chand }
24882fd151dSShailend Chand
249f5cedc84SCatherine Sullivan filled_pages = gve_prefill_rx_pages(rx);
250f5cedc84SCatherine Sullivan if (filled_pages < 0) {
251f5cedc84SCatherine Sullivan err = -ENOMEM;
25282fd151dSShailend Chand goto abort_with_copy_pool;
253f5cedc84SCatherine Sullivan }
254438b43bdSCatherine Sullivan rx->fill_cnt = filled_pages;
255f5cedc84SCatherine Sullivan /* Ensure data ring slots (packet buffers) are visible. */
256f5cedc84SCatherine Sullivan dma_wmb();
257f5cedc84SCatherine Sullivan
258f5cedc84SCatherine Sullivan /* Alloc gve_queue_resources */
259f5cedc84SCatherine Sullivan rx->q_resources =
260f5cedc84SCatherine Sullivan dma_alloc_coherent(hdev,
261f5cedc84SCatherine Sullivan sizeof(*rx->q_resources),
262f5cedc84SCatherine Sullivan &rx->q_resources_bus,
263f5cedc84SCatherine Sullivan GFP_KERNEL);
264f5cedc84SCatherine Sullivan if (!rx->q_resources) {
265f5cedc84SCatherine Sullivan err = -ENOMEM;
266f5cedc84SCatherine Sullivan goto abort_filled;
267f5cedc84SCatherine Sullivan }
268f5cedc84SCatherine Sullivan netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx,
269f5cedc84SCatherine Sullivan (unsigned long)rx->data.data_bus);
270f5cedc84SCatherine Sullivan
271f5cedc84SCatherine Sullivan /* alloc rx desc ring */
272f5cedc84SCatherine Sullivan bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
273f5cedc84SCatherine Sullivan npages = bytes / PAGE_SIZE;
274f5cedc84SCatherine Sullivan if (npages * PAGE_SIZE != bytes) {
275f5cedc84SCatherine Sullivan err = -EIO;
276f5cedc84SCatherine Sullivan goto abort_with_q_resources;
277f5cedc84SCatherine Sullivan }
278f5cedc84SCatherine Sullivan
279f5cedc84SCatherine Sullivan rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
280f5cedc84SCatherine Sullivan GFP_KERNEL);
281f5cedc84SCatherine Sullivan if (!rx->desc.desc_ring) {
282f5cedc84SCatherine Sullivan err = -ENOMEM;
283f5cedc84SCatherine Sullivan goto abort_with_q_resources;
284f5cedc84SCatherine Sullivan }
285438b43bdSCatherine Sullivan rx->cnt = 0;
286ede3fcf5SCatherine Sullivan rx->db_threshold = priv->rx_desc_cnt / 2;
287f5cedc84SCatherine Sullivan rx->desc.seqno = 1;
28837149e93SDavid Awogbemila
28937149e93SDavid Awogbemila /* Allocating half-page buffers allows page-flipping which is faster
29037149e93SDavid Awogbemila * than copying or allocating new pages.
29137149e93SDavid Awogbemila */
29237149e93SDavid Awogbemila rx->packet_buffer_size = PAGE_SIZE / 2;
29337149e93SDavid Awogbemila gve_rx_ctx_clear(&rx->ctx);
294f5cedc84SCatherine Sullivan gve_rx_add_to_block(priv, idx);
295f5cedc84SCatherine Sullivan
296f5cedc84SCatherine Sullivan return 0;
297f5cedc84SCatherine Sullivan
298f5cedc84SCatherine Sullivan abort_with_q_resources:
299f5cedc84SCatherine Sullivan dma_free_coherent(hdev, sizeof(*rx->q_resources),
300f5cedc84SCatherine Sullivan rx->q_resources, rx->q_resources_bus);
301f5cedc84SCatherine Sullivan rx->q_resources = NULL;
302f5cedc84SCatherine Sullivan abort_filled:
303ede3fcf5SCatherine Sullivan gve_rx_unfill_pages(priv, rx);
30482fd151dSShailend Chand abort_with_copy_pool:
30582fd151dSShailend Chand kvfree(rx->qpl_copy_pool);
30682fd151dSShailend Chand rx->qpl_copy_pool = NULL;
307f5cedc84SCatherine Sullivan abort_with_slots:
308f5cedc84SCatherine Sullivan bytes = sizeof(*rx->data.data_ring) * slots;
309f5cedc84SCatherine Sullivan dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
310f5cedc84SCatherine Sullivan rx->data.data_ring = NULL;
311f5cedc84SCatherine Sullivan
312f5cedc84SCatherine Sullivan return err;
313f5cedc84SCatherine Sullivan }
314f5cedc84SCatherine Sullivan
gve_rx_alloc_rings(struct gve_priv * priv)315f5cedc84SCatherine Sullivan int gve_rx_alloc_rings(struct gve_priv *priv)
316f5cedc84SCatherine Sullivan {
317f5cedc84SCatherine Sullivan int err = 0;
318f5cedc84SCatherine Sullivan int i;
319f5cedc84SCatherine Sullivan
320f5cedc84SCatherine Sullivan for (i = 0; i < priv->rx_cfg.num_queues; i++) {
321f5cedc84SCatherine Sullivan err = gve_rx_alloc_ring(priv, i);
322f5cedc84SCatherine Sullivan if (err) {
323f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev,
324f5cedc84SCatherine Sullivan "Failed to alloc rx ring=%d: err=%d\n",
325f5cedc84SCatherine Sullivan i, err);
326f5cedc84SCatherine Sullivan break;
327f5cedc84SCatherine Sullivan }
328f5cedc84SCatherine Sullivan }
329f5cedc84SCatherine Sullivan /* Unallocate if there was an error */
330f5cedc84SCatherine Sullivan if (err) {
331f5cedc84SCatherine Sullivan int j;
332f5cedc84SCatherine Sullivan
333f5cedc84SCatherine Sullivan for (j = 0; j < i; j++)
334f5cedc84SCatherine Sullivan gve_rx_free_ring(priv, j);
335f5cedc84SCatherine Sullivan }
336f5cedc84SCatherine Sullivan return err;
337f5cedc84SCatherine Sullivan }
338f5cedc84SCatherine Sullivan
gve_rx_free_rings_gqi(struct gve_priv * priv)3399c1a59a2SBailey Forrest void gve_rx_free_rings_gqi(struct gve_priv *priv)
340f5cedc84SCatherine Sullivan {
341f5cedc84SCatherine Sullivan int i;
342f5cedc84SCatherine Sullivan
343f5cedc84SCatherine Sullivan for (i = 0; i < priv->rx_cfg.num_queues; i++)
344f5cedc84SCatherine Sullivan gve_rx_free_ring(priv, i);
345f5cedc84SCatherine Sullivan }
346f5cedc84SCatherine Sullivan
gve_rx_write_doorbell(struct gve_priv * priv,struct gve_rx_ring * rx)347f5cedc84SCatherine Sullivan void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
348f5cedc84SCatherine Sullivan {
349f5cedc84SCatherine Sullivan u32 db_idx = be32_to_cpu(rx->q_resources->db_index);
350f5cedc84SCatherine Sullivan
351438b43bdSCatherine Sullivan iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]);
352f5cedc84SCatherine Sullivan }
353f5cedc84SCatherine Sullivan
gve_rss_type(__be16 pkt_flags)354f5cedc84SCatherine Sullivan static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
355f5cedc84SCatherine Sullivan {
356f5cedc84SCatherine Sullivan if (likely(pkt_flags & (GVE_RXF_TCP | GVE_RXF_UDP)))
357f5cedc84SCatherine Sullivan return PKT_HASH_TYPE_L4;
358f5cedc84SCatherine Sullivan if (pkt_flags & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
359f5cedc84SCatherine Sullivan return PKT_HASH_TYPE_L3;
360f5cedc84SCatherine Sullivan return PKT_HASH_TYPE_L2;
361f5cedc84SCatherine Sullivan }
362f5cedc84SCatherine Sullivan
gve_rx_add_frags(struct napi_struct * napi,struct gve_rx_slot_page_info * page_info,unsigned int truesize,u16 len,struct gve_rx_ctx * ctx)363ede3fcf5SCatherine Sullivan static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
364f5cedc84SCatherine Sullivan struct gve_rx_slot_page_info *page_info,
365*f08daa80SPraveen Kaligineedi unsigned int truesize, u16 len,
36637149e93SDavid Awogbemila struct gve_rx_ctx *ctx)
367f5cedc84SCatherine Sullivan {
36882fd151dSShailend Chand u32 offset = page_info->page_offset + page_info->pad;
36982fd151dSShailend Chand struct sk_buff *skb = ctx->skb_tail;
37082fd151dSShailend Chand int num_frags = 0;
371f5cedc84SCatherine Sullivan
37282fd151dSShailend Chand if (!skb) {
37382fd151dSShailend Chand skb = napi_get_frags(napi);
37482fd151dSShailend Chand if (unlikely(!skb))
375f5cedc84SCatherine Sullivan return NULL;
376f5cedc84SCatherine Sullivan
37782fd151dSShailend Chand ctx->skb_head = skb;
37882fd151dSShailend Chand ctx->skb_tail = skb;
37982fd151dSShailend Chand } else {
38082fd151dSShailend Chand num_frags = skb_shinfo(ctx->skb_tail)->nr_frags;
38182fd151dSShailend Chand if (num_frags == MAX_SKB_FRAGS) {
38282fd151dSShailend Chand skb = napi_alloc_skb(napi, 0);
38382fd151dSShailend Chand if (!skb)
38482fd151dSShailend Chand return NULL;
38582fd151dSShailend Chand
38682fd151dSShailend Chand // We will never chain more than two SKBs: 2 * 16 * 2k > 64k
38782fd151dSShailend Chand // which is why we do not need to chain by using skb->next
38882fd151dSShailend Chand skb_shinfo(ctx->skb_tail)->frag_list = skb;
38982fd151dSShailend Chand
39082fd151dSShailend Chand ctx->skb_tail = skb;
39182fd151dSShailend Chand num_frags = 0;
39282fd151dSShailend Chand }
39382fd151dSShailend Chand }
39482fd151dSShailend Chand
39582fd151dSShailend Chand if (skb != ctx->skb_head) {
39682fd151dSShailend Chand ctx->skb_head->len += len;
39782fd151dSShailend Chand ctx->skb_head->data_len += len;
398*f08daa80SPraveen Kaligineedi ctx->skb_head->truesize += truesize;
39982fd151dSShailend Chand }
40082fd151dSShailend Chand skb_add_rx_frag(skb, num_frags, page_info->page,
401*f08daa80SPraveen Kaligineedi offset, len, truesize);
402f5cedc84SCatherine Sullivan
40382fd151dSShailend Chand return ctx->skb_head;
404f5cedc84SCatherine Sullivan }
405f5cedc84SCatherine Sullivan
gve_rx_flip_buff(struct gve_rx_slot_page_info * page_info,__be64 * slot_addr)406ede3fcf5SCatherine Sullivan static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
407f5cedc84SCatherine Sullivan {
408ede3fcf5SCatherine Sullivan const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
409f5cedc84SCatherine Sullivan
410ede3fcf5SCatherine Sullivan /* "flip" to other packet buffer on this page */
411920fb451SBailey Forrest page_info->page_offset ^= PAGE_SIZE / 2;
412ede3fcf5SCatherine Sullivan *(slot_addr) ^= offset;
413f5cedc84SCatherine Sullivan }
414f5cedc84SCatherine Sullivan
gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info * page_info)41558401b2aSCatherine Sullivan static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
41602b0e0c1SDavid Awogbemila {
41758401b2aSCatherine Sullivan int pagecount = page_count(page_info->page);
41802b0e0c1SDavid Awogbemila
41902b0e0c1SDavid Awogbemila /* This page is not being used by any SKBs - reuse */
42058401b2aSCatherine Sullivan if (pagecount == page_info->pagecnt_bias)
42102b0e0c1SDavid Awogbemila return 1;
42202b0e0c1SDavid Awogbemila /* This page is still being used by an SKB - we can't reuse */
42358401b2aSCatherine Sullivan else if (pagecount > page_info->pagecnt_bias)
42402b0e0c1SDavid Awogbemila return 0;
42558401b2aSCatherine Sullivan WARN(pagecount < page_info->pagecnt_bias,
42658401b2aSCatherine Sullivan "Pagecount should never be less than the bias.");
42702b0e0c1SDavid Awogbemila return -1;
42802b0e0c1SDavid Awogbemila }
42902b0e0c1SDavid Awogbemila
43002b0e0c1SDavid Awogbemila static struct sk_buff *
gve_rx_raw_addressing(struct device * dev,struct net_device * netdev,struct gve_rx_slot_page_info * page_info,u16 len,struct napi_struct * napi,union gve_rx_data_slot * data_slot,u16 packet_buffer_size,struct gve_rx_ctx * ctx)43102b0e0c1SDavid Awogbemila gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
43202b0e0c1SDavid Awogbemila struct gve_rx_slot_page_info *page_info, u16 len,
43302b0e0c1SDavid Awogbemila struct napi_struct *napi,
43437149e93SDavid Awogbemila union gve_rx_data_slot *data_slot,
43537149e93SDavid Awogbemila u16 packet_buffer_size, struct gve_rx_ctx *ctx)
43602b0e0c1SDavid Awogbemila {
43737149e93SDavid Awogbemila struct sk_buff *skb = gve_rx_add_frags(napi, page_info, packet_buffer_size, len, ctx);
43802b0e0c1SDavid Awogbemila
43902b0e0c1SDavid Awogbemila if (!skb)
44002b0e0c1SDavid Awogbemila return NULL;
44102b0e0c1SDavid Awogbemila
44258401b2aSCatherine Sullivan /* Optimistically stop the kernel from freeing the page.
44358401b2aSCatherine Sullivan * We will check again in refill to determine if we need to alloc a
44458401b2aSCatherine Sullivan * new page.
44502b0e0c1SDavid Awogbemila */
44658401b2aSCatherine Sullivan gve_dec_pagecnt_bias(page_info);
44702b0e0c1SDavid Awogbemila
44802b0e0c1SDavid Awogbemila return skb;
44902b0e0c1SDavid Awogbemila }
45002b0e0c1SDavid Awogbemila
gve_rx_copy_to_pool(struct gve_rx_ring * rx,struct gve_rx_slot_page_info * page_info,u16 len,struct napi_struct * napi)45182fd151dSShailend Chand static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
45282fd151dSShailend Chand struct gve_rx_slot_page_info *page_info,
45382fd151dSShailend Chand u16 len, struct napi_struct *napi)
45482fd151dSShailend Chand {
45582fd151dSShailend Chand u32 pool_idx = rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask;
45682fd151dSShailend Chand void *src = page_info->page_address + page_info->page_offset;
45782fd151dSShailend Chand struct gve_rx_slot_page_info *copy_page_info;
45882fd151dSShailend Chand struct gve_rx_ctx *ctx = &rx->ctx;
45982fd151dSShailend Chand bool alloc_page = false;
46082fd151dSShailend Chand struct sk_buff *skb;
46182fd151dSShailend Chand void *dst;
46282fd151dSShailend Chand
46382fd151dSShailend Chand copy_page_info = &rx->qpl_copy_pool[pool_idx];
46482fd151dSShailend Chand if (!copy_page_info->can_flip) {
46582fd151dSShailend Chand int recycle = gve_rx_can_recycle_buffer(copy_page_info);
46682fd151dSShailend Chand
46782fd151dSShailend Chand if (unlikely(recycle < 0)) {
46882fd151dSShailend Chand gve_schedule_reset(rx->gve);
46982fd151dSShailend Chand return NULL;
47082fd151dSShailend Chand }
47182fd151dSShailend Chand alloc_page = !recycle;
47282fd151dSShailend Chand }
47382fd151dSShailend Chand
47482fd151dSShailend Chand if (alloc_page) {
47582fd151dSShailend Chand struct gve_rx_slot_page_info alloc_page_info;
47682fd151dSShailend Chand struct page *page;
47782fd151dSShailend Chand
47882fd151dSShailend Chand /* The least recently used page turned out to be
47982fd151dSShailend Chand * still in use by the kernel. Ignoring it and moving
48082fd151dSShailend Chand * on alleviates head-of-line blocking.
48182fd151dSShailend Chand */
48282fd151dSShailend Chand rx->qpl_copy_pool_head++;
48382fd151dSShailend Chand
48482fd151dSShailend Chand page = alloc_page(GFP_ATOMIC);
48582fd151dSShailend Chand if (!page)
48682fd151dSShailend Chand return NULL;
48782fd151dSShailend Chand
48882fd151dSShailend Chand alloc_page_info.page = page;
48982fd151dSShailend Chand alloc_page_info.page_offset = 0;
49082fd151dSShailend Chand alloc_page_info.page_address = page_address(page);
49182fd151dSShailend Chand alloc_page_info.pad = page_info->pad;
49282fd151dSShailend Chand
49382fd151dSShailend Chand memcpy(alloc_page_info.page_address, src, page_info->pad + len);
49482fd151dSShailend Chand skb = gve_rx_add_frags(napi, &alloc_page_info,
495*f08daa80SPraveen Kaligineedi PAGE_SIZE,
49682fd151dSShailend Chand len, ctx);
49782fd151dSShailend Chand
49882fd151dSShailend Chand u64_stats_update_begin(&rx->statss);
49982fd151dSShailend Chand rx->rx_frag_copy_cnt++;
50082fd151dSShailend Chand rx->rx_frag_alloc_cnt++;
50182fd151dSShailend Chand u64_stats_update_end(&rx->statss);
50282fd151dSShailend Chand
50382fd151dSShailend Chand return skb;
50482fd151dSShailend Chand }
50582fd151dSShailend Chand
50682fd151dSShailend Chand dst = copy_page_info->page_address + copy_page_info->page_offset;
50782fd151dSShailend Chand memcpy(dst, src, page_info->pad + len);
50882fd151dSShailend Chand copy_page_info->pad = page_info->pad;
50982fd151dSShailend Chand
51082fd151dSShailend Chand skb = gve_rx_add_frags(napi, copy_page_info,
51182fd151dSShailend Chand rx->packet_buffer_size, len, ctx);
51282fd151dSShailend Chand if (unlikely(!skb))
51382fd151dSShailend Chand return NULL;
51482fd151dSShailend Chand
51582fd151dSShailend Chand gve_dec_pagecnt_bias(copy_page_info);
51682fd151dSShailend Chand copy_page_info->page_offset += rx->packet_buffer_size;
51782fd151dSShailend Chand copy_page_info->page_offset &= (PAGE_SIZE - 1);
51882fd151dSShailend Chand
51982fd151dSShailend Chand if (copy_page_info->can_flip) {
52082fd151dSShailend Chand /* We have used both halves of this copy page, it
52182fd151dSShailend Chand * is time for it to go to the back of the queue.
52282fd151dSShailend Chand */
52382fd151dSShailend Chand copy_page_info->can_flip = false;
52482fd151dSShailend Chand rx->qpl_copy_pool_head++;
52582fd151dSShailend Chand prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page);
52682fd151dSShailend Chand } else {
52782fd151dSShailend Chand copy_page_info->can_flip = true;
52882fd151dSShailend Chand }
52982fd151dSShailend Chand
53082fd151dSShailend Chand u64_stats_update_begin(&rx->statss);
53182fd151dSShailend Chand rx->rx_frag_copy_cnt++;
53282fd151dSShailend Chand u64_stats_update_end(&rx->statss);
53382fd151dSShailend Chand
53482fd151dSShailend Chand return skb;
53582fd151dSShailend Chand }
53682fd151dSShailend Chand
53702b0e0c1SDavid Awogbemila static struct sk_buff *
gve_rx_qpl(struct device * dev,struct net_device * netdev,struct gve_rx_ring * rx,struct gve_rx_slot_page_info * page_info,u16 len,struct napi_struct * napi,union gve_rx_data_slot * data_slot)53802b0e0c1SDavid Awogbemila gve_rx_qpl(struct device *dev, struct net_device *netdev,
53902b0e0c1SDavid Awogbemila struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info,
54002b0e0c1SDavid Awogbemila u16 len, struct napi_struct *napi,
54102b0e0c1SDavid Awogbemila union gve_rx_data_slot *data_slot)
54202b0e0c1SDavid Awogbemila {
54337149e93SDavid Awogbemila struct gve_rx_ctx *ctx = &rx->ctx;
54402b0e0c1SDavid Awogbemila struct sk_buff *skb;
54502b0e0c1SDavid Awogbemila
54602b0e0c1SDavid Awogbemila /* if raw_addressing mode is not enabled gvnic can only receive into
54702b0e0c1SDavid Awogbemila * registered segments. If the buffer can't be recycled, our only
54802b0e0c1SDavid Awogbemila * choice is to copy the data out of it so that we can return it to the
54902b0e0c1SDavid Awogbemila * device.
55002b0e0c1SDavid Awogbemila */
55182fd151dSShailend Chand if (page_info->can_flip) {
55237149e93SDavid Awogbemila skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
55302b0e0c1SDavid Awogbemila /* No point in recycling if we didn't get the skb */
55402b0e0c1SDavid Awogbemila if (skb) {
55502b0e0c1SDavid Awogbemila /* Make sure that the page isn't freed. */
55658401b2aSCatherine Sullivan gve_dec_pagecnt_bias(page_info);
55702b0e0c1SDavid Awogbemila gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
55802b0e0c1SDavid Awogbemila }
55902b0e0c1SDavid Awogbemila } else {
56082fd151dSShailend Chand skb = gve_rx_copy_to_pool(rx, page_info, len, napi);
56102b0e0c1SDavid Awogbemila }
56202b0e0c1SDavid Awogbemila return skb;
56302b0e0c1SDavid Awogbemila }
56402b0e0c1SDavid Awogbemila
gve_rx_skb(struct gve_priv * priv,struct gve_rx_ring * rx,struct gve_rx_slot_page_info * page_info,struct napi_struct * napi,u16 len,union gve_rx_data_slot * data_slot,bool is_only_frag)56537149e93SDavid Awogbemila static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
56637149e93SDavid Awogbemila struct gve_rx_slot_page_info *page_info, struct napi_struct *napi,
56782fd151dSShailend Chand u16 len, union gve_rx_data_slot *data_slot,
56882fd151dSShailend Chand bool is_only_frag)
56937149e93SDavid Awogbemila {
57037149e93SDavid Awogbemila struct net_device *netdev = priv->dev;
57137149e93SDavid Awogbemila struct gve_rx_ctx *ctx = &rx->ctx;
57237149e93SDavid Awogbemila struct sk_buff *skb = NULL;
573f5cedc84SCatherine Sullivan
57482fd151dSShailend Chand if (len <= priv->rx_copybreak && is_only_frag) {
575f5cedc84SCatherine Sullivan /* Just copy small packets */
5762e80aeaeSPraveen Kaligineedi skb = gve_rx_copy(netdev, napi, page_info, len);
57737149e93SDavid Awogbemila if (skb) {
578433e274bSKuo Zhao u64_stats_update_begin(&rx->statss);
57902b0e0c1SDavid Awogbemila rx->rx_copied_pkt++;
58037149e93SDavid Awogbemila rx->rx_frag_copy_cnt++;
581433e274bSKuo Zhao rx->rx_copybreak_pkt++;
582721111b1SDan Carpenter u64_stats_update_end(&rx->statss);
583721111b1SDan Carpenter }
58402b0e0c1SDavid Awogbemila } else {
58537149e93SDavid Awogbemila int recycle = gve_rx_can_recycle_buffer(page_info);
58637149e93SDavid Awogbemila
58737149e93SDavid Awogbemila if (unlikely(recycle < 0)) {
58837149e93SDavid Awogbemila gve_schedule_reset(priv);
58937149e93SDavid Awogbemila return NULL;
59037149e93SDavid Awogbemila }
59137149e93SDavid Awogbemila page_info->can_flip = recycle;
59237149e93SDavid Awogbemila if (page_info->can_flip) {
59337149e93SDavid Awogbemila u64_stats_update_begin(&rx->statss);
59437149e93SDavid Awogbemila rx->rx_frag_flip_cnt++;
59537149e93SDavid Awogbemila u64_stats_update_end(&rx->statss);
59637149e93SDavid Awogbemila }
59782fd151dSShailend Chand
59882fd151dSShailend Chand if (rx->data.raw_addressing) {
59937149e93SDavid Awogbemila skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
60002b0e0c1SDavid Awogbemila page_info, len, napi,
60137149e93SDavid Awogbemila data_slot,
60237149e93SDavid Awogbemila rx->packet_buffer_size, ctx);
60302b0e0c1SDavid Awogbemila } else {
60437149e93SDavid Awogbemila skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
60502b0e0c1SDavid Awogbemila page_info, len, napi, data_slot);
60602b0e0c1SDavid Awogbemila }
60702b0e0c1SDavid Awogbemila }
60837149e93SDavid Awogbemila return skb;
60937149e93SDavid Awogbemila }
61002b0e0c1SDavid Awogbemila
gve_xsk_pool_redirect(struct net_device * dev,struct gve_rx_ring * rx,void * data,int len,struct bpf_prog * xdp_prog)611fd8e4032SPraveen Kaligineedi static int gve_xsk_pool_redirect(struct net_device *dev,
612fd8e4032SPraveen Kaligineedi struct gve_rx_ring *rx,
613fd8e4032SPraveen Kaligineedi void *data, int len,
614fd8e4032SPraveen Kaligineedi struct bpf_prog *xdp_prog)
615fd8e4032SPraveen Kaligineedi {
616fd8e4032SPraveen Kaligineedi struct xdp_buff *xdp;
617fd8e4032SPraveen Kaligineedi int err;
618fd8e4032SPraveen Kaligineedi
619fd8e4032SPraveen Kaligineedi if (rx->xsk_pool->frame_len < len)
620fd8e4032SPraveen Kaligineedi return -E2BIG;
621fd8e4032SPraveen Kaligineedi xdp = xsk_buff_alloc(rx->xsk_pool);
622fd8e4032SPraveen Kaligineedi if (!xdp) {
623fd8e4032SPraveen Kaligineedi u64_stats_update_begin(&rx->statss);
624fd8e4032SPraveen Kaligineedi rx->xdp_alloc_fails++;
625fd8e4032SPraveen Kaligineedi u64_stats_update_end(&rx->statss);
626fd8e4032SPraveen Kaligineedi return -ENOMEM;
627fd8e4032SPraveen Kaligineedi }
628fd8e4032SPraveen Kaligineedi xdp->data_end = xdp->data + len;
629fd8e4032SPraveen Kaligineedi memcpy(xdp->data, data, len);
630fd8e4032SPraveen Kaligineedi err = xdp_do_redirect(dev, xdp, xdp_prog);
631fd8e4032SPraveen Kaligineedi if (err)
632fd8e4032SPraveen Kaligineedi xsk_buff_free(xdp);
633fd8e4032SPraveen Kaligineedi return err;
634fd8e4032SPraveen Kaligineedi }
635fd8e4032SPraveen Kaligineedi
gve_xdp_redirect(struct net_device * dev,struct gve_rx_ring * rx,struct xdp_buff * orig,struct bpf_prog * xdp_prog)63639a7f4aaSPraveen Kaligineedi static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx,
63739a7f4aaSPraveen Kaligineedi struct xdp_buff *orig, struct bpf_prog *xdp_prog)
63839a7f4aaSPraveen Kaligineedi {
63939a7f4aaSPraveen Kaligineedi int total_len, len = orig->data_end - orig->data;
64039a7f4aaSPraveen Kaligineedi int headroom = XDP_PACKET_HEADROOM;
64139a7f4aaSPraveen Kaligineedi struct xdp_buff new;
64239a7f4aaSPraveen Kaligineedi void *frame;
64339a7f4aaSPraveen Kaligineedi int err;
64439a7f4aaSPraveen Kaligineedi
645fd8e4032SPraveen Kaligineedi if (rx->xsk_pool)
646fd8e4032SPraveen Kaligineedi return gve_xsk_pool_redirect(dev, rx, orig->data,
647fd8e4032SPraveen Kaligineedi len, xdp_prog);
648fd8e4032SPraveen Kaligineedi
64939a7f4aaSPraveen Kaligineedi total_len = headroom + SKB_DATA_ALIGN(len) +
65039a7f4aaSPraveen Kaligineedi SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
65139a7f4aaSPraveen Kaligineedi frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC);
65239a7f4aaSPraveen Kaligineedi if (!frame) {
65339a7f4aaSPraveen Kaligineedi u64_stats_update_begin(&rx->statss);
65439a7f4aaSPraveen Kaligineedi rx->xdp_alloc_fails++;
65539a7f4aaSPraveen Kaligineedi u64_stats_update_end(&rx->statss);
65639a7f4aaSPraveen Kaligineedi return -ENOMEM;
65739a7f4aaSPraveen Kaligineedi }
65839a7f4aaSPraveen Kaligineedi xdp_init_buff(&new, total_len, &rx->xdp_rxq);
65939a7f4aaSPraveen Kaligineedi xdp_prepare_buff(&new, frame, headroom, len, false);
66039a7f4aaSPraveen Kaligineedi memcpy(new.data, orig->data, len);
66139a7f4aaSPraveen Kaligineedi
66239a7f4aaSPraveen Kaligineedi err = xdp_do_redirect(dev, &new, xdp_prog);
66339a7f4aaSPraveen Kaligineedi if (err)
66439a7f4aaSPraveen Kaligineedi page_frag_free(frame);
66539a7f4aaSPraveen Kaligineedi
66639a7f4aaSPraveen Kaligineedi return err;
66739a7f4aaSPraveen Kaligineedi }
66839a7f4aaSPraveen Kaligineedi
gve_xdp_done(struct gve_priv * priv,struct gve_rx_ring * rx,struct xdp_buff * xdp,struct bpf_prog * xprog,int xdp_act)66975eaae15SPraveen Kaligineedi static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx,
67075eaae15SPraveen Kaligineedi struct xdp_buff *xdp, struct bpf_prog *xprog,
67175eaae15SPraveen Kaligineedi int xdp_act)
67275eaae15SPraveen Kaligineedi {
67375eaae15SPraveen Kaligineedi struct gve_tx_ring *tx;
67475eaae15SPraveen Kaligineedi int tx_qid;
67575eaae15SPraveen Kaligineedi int err;
67675eaae15SPraveen Kaligineedi
67775eaae15SPraveen Kaligineedi switch (xdp_act) {
67875eaae15SPraveen Kaligineedi case XDP_ABORTED:
67975eaae15SPraveen Kaligineedi case XDP_DROP:
68075eaae15SPraveen Kaligineedi default:
68175eaae15SPraveen Kaligineedi break;
68275eaae15SPraveen Kaligineedi case XDP_TX:
68375eaae15SPraveen Kaligineedi tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
68475eaae15SPraveen Kaligineedi tx = &priv->tx[tx_qid];
68539a7f4aaSPraveen Kaligineedi spin_lock(&tx->xdp_lock);
68675eaae15SPraveen Kaligineedi err = gve_xdp_xmit_one(priv, tx, xdp->data,
68739a7f4aaSPraveen Kaligineedi xdp->data_end - xdp->data, NULL);
68839a7f4aaSPraveen Kaligineedi spin_unlock(&tx->xdp_lock);
68975eaae15SPraveen Kaligineedi
69075eaae15SPraveen Kaligineedi if (unlikely(err)) {
69175eaae15SPraveen Kaligineedi u64_stats_update_begin(&rx->statss);
69275eaae15SPraveen Kaligineedi rx->xdp_tx_errors++;
69375eaae15SPraveen Kaligineedi u64_stats_update_end(&rx->statss);
69475eaae15SPraveen Kaligineedi }
69575eaae15SPraveen Kaligineedi break;
69675eaae15SPraveen Kaligineedi case XDP_REDIRECT:
69739a7f4aaSPraveen Kaligineedi err = gve_xdp_redirect(priv->dev, rx, xdp, xprog);
69839a7f4aaSPraveen Kaligineedi
69939a7f4aaSPraveen Kaligineedi if (unlikely(err)) {
70075eaae15SPraveen Kaligineedi u64_stats_update_begin(&rx->statss);
70175eaae15SPraveen Kaligineedi rx->xdp_redirect_errors++;
70275eaae15SPraveen Kaligineedi u64_stats_update_end(&rx->statss);
70339a7f4aaSPraveen Kaligineedi }
70475eaae15SPraveen Kaligineedi break;
70575eaae15SPraveen Kaligineedi }
70675eaae15SPraveen Kaligineedi u64_stats_update_begin(&rx->statss);
70775eaae15SPraveen Kaligineedi if ((u32)xdp_act < GVE_XDP_ACTIONS)
70875eaae15SPraveen Kaligineedi rx->xdp_actions[xdp_act]++;
70975eaae15SPraveen Kaligineedi u64_stats_update_end(&rx->statss);
71075eaae15SPraveen Kaligineedi }
71175eaae15SPraveen Kaligineedi
71282fd151dSShailend Chand #define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
gve_rx(struct gve_rx_ring * rx,netdev_features_t feat,struct gve_rx_desc * desc,u32 idx,struct gve_rx_cnts * cnts)71382fd151dSShailend Chand static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
71482fd151dSShailend Chand struct gve_rx_desc *desc, u32 idx,
71582fd151dSShailend Chand struct gve_rx_cnts *cnts)
71637149e93SDavid Awogbemila {
71782fd151dSShailend Chand bool is_last_frag = !GVE_PKTCONT_BIT_IS_SET(desc->flags_seq);
71837149e93SDavid Awogbemila struct gve_rx_slot_page_info *page_info;
71982fd151dSShailend Chand u16 frag_size = be16_to_cpu(desc->len);
72037149e93SDavid Awogbemila struct gve_rx_ctx *ctx = &rx->ctx;
72137149e93SDavid Awogbemila union gve_rx_data_slot *data_slot;
72237149e93SDavid Awogbemila struct gve_priv *priv = rx->gve;
72337149e93SDavid Awogbemila struct sk_buff *skb = NULL;
72475eaae15SPraveen Kaligineedi struct bpf_prog *xprog;
72575eaae15SPraveen Kaligineedi struct xdp_buff xdp;
72637149e93SDavid Awogbemila dma_addr_t page_bus;
72737149e93SDavid Awogbemila void *va;
72837149e93SDavid Awogbemila
72975eaae15SPraveen Kaligineedi u16 len = frag_size;
73082fd151dSShailend Chand struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
73182fd151dSShailend Chand bool is_first_frag = ctx->frag_cnt == 0;
73237149e93SDavid Awogbemila
73382fd151dSShailend Chand bool is_only_frag = is_first_frag && is_last_frag;
73437149e93SDavid Awogbemila
73582fd151dSShailend Chand if (unlikely(ctx->drop_pkt))
73682fd151dSShailend Chand goto finish_frag;
73782fd151dSShailend Chand
73882fd151dSShailend Chand if (desc->flags_seq & GVE_RXF_ERR) {
73982fd151dSShailend Chand ctx->drop_pkt = true;
74082fd151dSShailend Chand cnts->desc_err_pkt_cnt++;
74182fd151dSShailend Chand napi_free_frags(napi);
74282fd151dSShailend Chand goto finish_frag;
74382fd151dSShailend Chand }
74482fd151dSShailend Chand
74582fd151dSShailend Chand if (unlikely(frag_size > rx->packet_buffer_size)) {
74682fd151dSShailend Chand netdev_warn(priv->dev, "Unexpected frag size %d, can't exceed %d, scheduling reset",
74782fd151dSShailend Chand frag_size, rx->packet_buffer_size);
74882fd151dSShailend Chand ctx->drop_pkt = true;
74982fd151dSShailend Chand napi_free_frags(napi);
75082fd151dSShailend Chand gve_schedule_reset(rx->gve);
75182fd151dSShailend Chand goto finish_frag;
75282fd151dSShailend Chand }
75382fd151dSShailend Chand
75437149e93SDavid Awogbemila /* Prefetch two packet buffers ahead, we will need it soon. */
75537149e93SDavid Awogbemila page_info = &rx->data.page_info[(idx + 2) & rx->mask];
75637149e93SDavid Awogbemila va = page_info->page_address + page_info->page_offset;
75737149e93SDavid Awogbemila prefetch(page_info->page); /* Kernel page struct. */
75837149e93SDavid Awogbemila prefetch(va); /* Packet header. */
75937149e93SDavid Awogbemila prefetch(va + 64); /* Next cacheline too. */
76037149e93SDavid Awogbemila
76137149e93SDavid Awogbemila page_info = &rx->data.page_info[idx];
76237149e93SDavid Awogbemila data_slot = &rx->data.data_ring[idx];
76382fd151dSShailend Chand page_bus = (rx->data.raw_addressing) ?
76437149e93SDavid Awogbemila be64_to_cpu(data_slot->addr) - page_info->page_offset :
76537149e93SDavid Awogbemila rx->data.qpl->page_buses[idx];
76682fd151dSShailend Chand dma_sync_single_for_cpu(&priv->pdev->dev, page_bus,
76782fd151dSShailend Chand PAGE_SIZE, DMA_FROM_DEVICE);
76882fd151dSShailend Chand page_info->pad = is_first_frag ? GVE_RX_PAD : 0;
76975eaae15SPraveen Kaligineedi len -= page_info->pad;
77082fd151dSShailend Chand frag_size -= page_info->pad;
77137149e93SDavid Awogbemila
77275eaae15SPraveen Kaligineedi xprog = READ_ONCE(priv->xdp_prog);
77375eaae15SPraveen Kaligineedi if (xprog && is_only_frag) {
77475eaae15SPraveen Kaligineedi void *old_data;
77575eaae15SPraveen Kaligineedi int xdp_act;
77675eaae15SPraveen Kaligineedi
77775eaae15SPraveen Kaligineedi xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
77875eaae15SPraveen Kaligineedi xdp_prepare_buff(&xdp, page_info->page_address +
77975eaae15SPraveen Kaligineedi page_info->page_offset, GVE_RX_PAD,
78075eaae15SPraveen Kaligineedi len, false);
78175eaae15SPraveen Kaligineedi old_data = xdp.data;
78275eaae15SPraveen Kaligineedi xdp_act = bpf_prog_run_xdp(xprog, &xdp);
78375eaae15SPraveen Kaligineedi if (xdp_act != XDP_PASS) {
78475eaae15SPraveen Kaligineedi gve_xdp_done(priv, rx, &xdp, xprog, xdp_act);
78575eaae15SPraveen Kaligineedi ctx->total_size += frag_size;
78675eaae15SPraveen Kaligineedi goto finish_ok_pkt;
78775eaae15SPraveen Kaligineedi }
78875eaae15SPraveen Kaligineedi
78975eaae15SPraveen Kaligineedi page_info->pad += xdp.data - old_data;
79075eaae15SPraveen Kaligineedi len = xdp.data_end - xdp.data;
79175eaae15SPraveen Kaligineedi
79275eaae15SPraveen Kaligineedi u64_stats_update_begin(&rx->statss);
79375eaae15SPraveen Kaligineedi rx->xdp_actions[XDP_PASS]++;
79475eaae15SPraveen Kaligineedi u64_stats_update_end(&rx->statss);
79575eaae15SPraveen Kaligineedi }
79675eaae15SPraveen Kaligineedi
79775eaae15SPraveen Kaligineedi skb = gve_rx_skb(priv, rx, page_info, napi, len,
79882fd151dSShailend Chand data_slot, is_only_frag);
799433e274bSKuo Zhao if (!skb) {
800433e274bSKuo Zhao u64_stats_update_begin(&rx->statss);
801433e274bSKuo Zhao rx->rx_skb_alloc_fail++;
802433e274bSKuo Zhao u64_stats_update_end(&rx->statss);
80337149e93SDavid Awogbemila
80482fd151dSShailend Chand napi_free_frags(napi);
80582fd151dSShailend Chand ctx->drop_pkt = true;
80682fd151dSShailend Chand goto finish_frag;
807433e274bSKuo Zhao }
80882fd151dSShailend Chand ctx->total_size += frag_size;
809f5cedc84SCatherine Sullivan
81082fd151dSShailend Chand if (is_first_frag) {
811f5cedc84SCatherine Sullivan if (likely(feat & NETIF_F_RXCSUM)) {
812f5cedc84SCatherine Sullivan /* NIC passes up the partial sum */
81382fd151dSShailend Chand if (desc->csum)
814f5cedc84SCatherine Sullivan skb->ip_summed = CHECKSUM_COMPLETE;
815f5cedc84SCatherine Sullivan else
816f5cedc84SCatherine Sullivan skb->ip_summed = CHECKSUM_NONE;
81782fd151dSShailend Chand skb->csum = csum_unfold(desc->csum);
818f5cedc84SCatherine Sullivan }
819f5cedc84SCatherine Sullivan
820f5cedc84SCatherine Sullivan /* parse flags & pass relevant info up */
821f5cedc84SCatherine Sullivan if (likely(feat & NETIF_F_RXHASH) &&
82282fd151dSShailend Chand gve_needs_rss(desc->flags_seq))
82382fd151dSShailend Chand skb_set_hash(skb, be32_to_cpu(desc->rss_hash),
82482fd151dSShailend Chand gve_rss_type(desc->flags_seq));
82582fd151dSShailend Chand }
826f5cedc84SCatherine Sullivan
82782fd151dSShailend Chand if (is_last_frag) {
828084cbb2eSTao Liu skb_record_rx_queue(skb, rx->q_num);
829f5cedc84SCatherine Sullivan if (skb_is_nonlinear(skb))
830f5cedc84SCatherine Sullivan napi_gro_frags(napi);
831f5cedc84SCatherine Sullivan else
832f5cedc84SCatherine Sullivan napi_gro_receive(napi, skb);
83382fd151dSShailend Chand goto finish_ok_pkt;
83437149e93SDavid Awogbemila }
83582fd151dSShailend Chand
83682fd151dSShailend Chand goto finish_frag;
83782fd151dSShailend Chand
83882fd151dSShailend Chand finish_ok_pkt:
83982fd151dSShailend Chand cnts->ok_pkt_bytes += ctx->total_size;
84082fd151dSShailend Chand cnts->ok_pkt_cnt++;
84182fd151dSShailend Chand finish_frag:
84282fd151dSShailend Chand ctx->frag_cnt++;
84382fd151dSShailend Chand if (is_last_frag) {
84482fd151dSShailend Chand cnts->total_pkt_cnt++;
84582fd151dSShailend Chand cnts->cont_pkt_cnt += (ctx->frag_cnt > 1);
84637149e93SDavid Awogbemila gve_rx_ctx_clear(ctx);
84782fd151dSShailend Chand }
848f5cedc84SCatherine Sullivan }
849f5cedc84SCatherine Sullivan
gve_rx_work_pending(struct gve_rx_ring * rx)8502cb67ab1SYangchun Fu bool gve_rx_work_pending(struct gve_rx_ring *rx)
851f5cedc84SCatherine Sullivan {
852f5cedc84SCatherine Sullivan struct gve_rx_desc *desc;
853f5cedc84SCatherine Sullivan __be16 flags_seq;
854f5cedc84SCatherine Sullivan u32 next_idx;
855f5cedc84SCatherine Sullivan
856438b43bdSCatherine Sullivan next_idx = rx->cnt & rx->mask;
857f5cedc84SCatherine Sullivan desc = rx->desc.desc_ring + next_idx;
858f5cedc84SCatherine Sullivan
859f5cedc84SCatherine Sullivan flags_seq = desc->flags_seq;
860f5cedc84SCatherine Sullivan
861f5cedc84SCatherine Sullivan return (GVE_SEQNO(flags_seq) == rx->desc.seqno);
862f5cedc84SCatherine Sullivan }
863f5cedc84SCatherine Sullivan
gve_rx_refill_buffers(struct gve_priv * priv,struct gve_rx_ring * rx)864ede3fcf5SCatherine Sullivan static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
865ede3fcf5SCatherine Sullivan {
866ede3fcf5SCatherine Sullivan int refill_target = rx->mask + 1;
867ede3fcf5SCatherine Sullivan u32 fill_cnt = rx->fill_cnt;
868ede3fcf5SCatherine Sullivan
869ede3fcf5SCatherine Sullivan while (fill_cnt - rx->cnt < refill_target) {
870ede3fcf5SCatherine Sullivan struct gve_rx_slot_page_info *page_info;
871ede3fcf5SCatherine Sullivan u32 idx = fill_cnt & rx->mask;
872ede3fcf5SCatherine Sullivan
873ede3fcf5SCatherine Sullivan page_info = &rx->data.page_info[idx];
87402b0e0c1SDavid Awogbemila if (page_info->can_flip) {
87502b0e0c1SDavid Awogbemila /* The other half of the page is free because it was
87602b0e0c1SDavid Awogbemila * free when we processed the descriptor. Flip to it.
87702b0e0c1SDavid Awogbemila */
87802b0e0c1SDavid Awogbemila union gve_rx_data_slot *data_slot =
87902b0e0c1SDavid Awogbemila &rx->data.data_ring[idx];
88002b0e0c1SDavid Awogbemila
88102b0e0c1SDavid Awogbemila gve_rx_flip_buff(page_info, &data_slot->addr);
88202b0e0c1SDavid Awogbemila page_info->can_flip = 0;
88302b0e0c1SDavid Awogbemila } else {
88402b0e0c1SDavid Awogbemila /* It is possible that the networking stack has already
88502b0e0c1SDavid Awogbemila * finished processing all outstanding packets in the buffer
88602b0e0c1SDavid Awogbemila * and it can be reused.
88702b0e0c1SDavid Awogbemila * Flipping is unnecessary here - if the networking stack still
88802b0e0c1SDavid Awogbemila * owns half the page it is impossible to tell which half. Either
88902b0e0c1SDavid Awogbemila * the whole page is free or it needs to be replaced.
89002b0e0c1SDavid Awogbemila */
89158401b2aSCatherine Sullivan int recycle = gve_rx_can_recycle_buffer(page_info);
89202b0e0c1SDavid Awogbemila
89302b0e0c1SDavid Awogbemila if (recycle < 0) {
89402b0e0c1SDavid Awogbemila if (!rx->data.raw_addressing)
89502b0e0c1SDavid Awogbemila gve_schedule_reset(priv);
89602b0e0c1SDavid Awogbemila return false;
89702b0e0c1SDavid Awogbemila }
89802b0e0c1SDavid Awogbemila if (!recycle) {
89902b0e0c1SDavid Awogbemila /* We can't reuse the buffer - alloc a new one*/
90002b0e0c1SDavid Awogbemila union gve_rx_data_slot *data_slot =
90102b0e0c1SDavid Awogbemila &rx->data.data_ring[idx];
90202b0e0c1SDavid Awogbemila struct device *dev = &priv->pdev->dev;
903ede3fcf5SCatherine Sullivan gve_rx_free_buffer(dev, page_info, data_slot);
904ede3fcf5SCatherine Sullivan page_info->page = NULL;
9051b4d1c9bSCatherine Sullivan if (gve_rx_alloc_buffer(priv, dev, page_info,
9061b4d1c9bSCatherine Sullivan data_slot)) {
9071b4d1c9bSCatherine Sullivan u64_stats_update_begin(&rx->statss);
9081b4d1c9bSCatherine Sullivan rx->rx_buf_alloc_fail++;
9091b4d1c9bSCatherine Sullivan u64_stats_update_end(&rx->statss);
910ede3fcf5SCatherine Sullivan break;
911ede3fcf5SCatherine Sullivan }
91202b0e0c1SDavid Awogbemila }
9131b4d1c9bSCatherine Sullivan }
914ede3fcf5SCatherine Sullivan fill_cnt++;
915ede3fcf5SCatherine Sullivan }
916ede3fcf5SCatherine Sullivan rx->fill_cnt = fill_cnt;
917ede3fcf5SCatherine Sullivan return true;
918ede3fcf5SCatherine Sullivan }
919ede3fcf5SCatherine Sullivan
gve_clean_rx_done(struct gve_rx_ring * rx,int budget,netdev_features_t feat)9202cb67ab1SYangchun Fu static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
921f5cedc84SCatherine Sullivan netdev_features_t feat)
922f5cedc84SCatherine Sullivan {
92339a7f4aaSPraveen Kaligineedi u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
92475eaae15SPraveen Kaligineedi u64 xdp_txs = rx->xdp_actions[XDP_TX];
92582fd151dSShailend Chand struct gve_rx_ctx *ctx = &rx->ctx;
926f5cedc84SCatherine Sullivan struct gve_priv *priv = rx->gve;
92782fd151dSShailend Chand struct gve_rx_cnts cnts = {0};
92882fd151dSShailend Chand struct gve_rx_desc *next_desc;
92937149e93SDavid Awogbemila u32 idx = rx->cnt & rx->mask;
93082fd151dSShailend Chand u32 work_done = 0;
931f5cedc84SCatherine Sullivan
93282fd151dSShailend Chand struct gve_rx_desc *desc = &rx->desc.desc_ring[idx];
93382fd151dSShailend Chand
93482fd151dSShailend Chand // Exceed budget only if (and till) the inflight packet is consumed.
935f5cedc84SCatherine Sullivan while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
93682fd151dSShailend Chand (work_done < budget || ctx->frag_cnt)) {
93782fd151dSShailend Chand next_desc = &rx->desc.desc_ring[(idx + 1) & rx->mask];
93882fd151dSShailend Chand prefetch(next_desc);
939ede3fcf5SCatherine Sullivan
94082fd151dSShailend Chand gve_rx(rx, feat, desc, idx, &cnts);
94158401b2aSCatherine Sullivan
94282fd151dSShailend Chand rx->cnt++;
94337149e93SDavid Awogbemila idx = rx->cnt & rx->mask;
94437149e93SDavid Awogbemila desc = &rx->desc.desc_ring[idx];
94582fd151dSShailend Chand rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
94682fd151dSShailend Chand work_done++;
94782fd151dSShailend Chand }
94882fd151dSShailend Chand
94982fd151dSShailend Chand // The device will only send whole packets.
95082fd151dSShailend Chand if (unlikely(ctx->frag_cnt)) {
95182fd151dSShailend Chand struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
95282fd151dSShailend Chand
95382fd151dSShailend Chand napi_free_frags(napi);
95482fd151dSShailend Chand gve_rx_ctx_clear(&rx->ctx);
95582fd151dSShailend Chand netdev_warn(priv->dev, "Unexpected seq number %d with incomplete packet, expected %d, scheduling reset",
95682fd151dSShailend Chand GVE_SEQNO(desc->flags_seq), rx->desc.seqno);
95782fd151dSShailend Chand gve_schedule_reset(rx->gve);
958f5cedc84SCatherine Sullivan }
959f5cedc84SCatherine Sullivan
96037149e93SDavid Awogbemila if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold)
9612cb67ab1SYangchun Fu return 0;
962f5cedc84SCatherine Sullivan
9632cb67ab1SYangchun Fu if (work_done) {
964f5cedc84SCatherine Sullivan u64_stats_update_begin(&rx->statss);
96582fd151dSShailend Chand rx->rpackets += cnts.ok_pkt_cnt;
96682fd151dSShailend Chand rx->rbytes += cnts.ok_pkt_bytes;
96782fd151dSShailend Chand rx->rx_cont_packet_cnt += cnts.cont_pkt_cnt;
96882fd151dSShailend Chand rx->rx_desc_err_dropped_pkt += cnts.desc_err_pkt_cnt;
969f5cedc84SCatherine Sullivan u64_stats_update_end(&rx->statss);
9702cb67ab1SYangchun Fu }
971ede3fcf5SCatherine Sullivan
97275eaae15SPraveen Kaligineedi if (xdp_txs != rx->xdp_actions[XDP_TX])
97375eaae15SPraveen Kaligineedi gve_xdp_tx_flush(priv, rx->q_num);
97475eaae15SPraveen Kaligineedi
97539a7f4aaSPraveen Kaligineedi if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
97639a7f4aaSPraveen Kaligineedi xdp_do_flush();
97739a7f4aaSPraveen Kaligineedi
978ede3fcf5SCatherine Sullivan /* restock ring slots */
979ede3fcf5SCatherine Sullivan if (!rx->data.raw_addressing) {
980ede3fcf5SCatherine Sullivan /* In QPL mode buffs are refilled as the desc are processed */
981438b43bdSCatherine Sullivan rx->fill_cnt += work_done;
98237149e93SDavid Awogbemila } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
983ede3fcf5SCatherine Sullivan /* In raw addressing mode buffs are only refilled if the avail
984ede3fcf5SCatherine Sullivan * falls below a threshold.
985ede3fcf5SCatherine Sullivan */
986ede3fcf5SCatherine Sullivan if (!gve_rx_refill_buffers(priv, rx))
9872cb67ab1SYangchun Fu return 0;
988ede3fcf5SCatherine Sullivan
989ede3fcf5SCatherine Sullivan /* If we were not able to completely refill buffers, we'll want
990ede3fcf5SCatherine Sullivan * to schedule this queue for work again to refill buffers.
991ede3fcf5SCatherine Sullivan */
99237149e93SDavid Awogbemila if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
993ede3fcf5SCatherine Sullivan gve_rx_write_doorbell(priv, rx);
9942cb67ab1SYangchun Fu return budget;
995ede3fcf5SCatherine Sullivan }
996ede3fcf5SCatherine Sullivan }
997f5cedc84SCatherine Sullivan
998f5cedc84SCatherine Sullivan gve_rx_write_doorbell(priv, rx);
99982fd151dSShailend Chand return cnts.total_pkt_cnt;
1000f5cedc84SCatherine Sullivan }
1001f5cedc84SCatherine Sullivan
gve_rx_poll(struct gve_notify_block * block,int budget)10022cb67ab1SYangchun Fu int gve_rx_poll(struct gve_notify_block *block, int budget)
1003f5cedc84SCatherine Sullivan {
1004f5cedc84SCatherine Sullivan struct gve_rx_ring *rx = block->rx;
1005f5cedc84SCatherine Sullivan netdev_features_t feat;
10062cb67ab1SYangchun Fu int work_done = 0;
1007f5cedc84SCatherine Sullivan
1008f5cedc84SCatherine Sullivan feat = block->napi.dev->features;
1009f5cedc84SCatherine Sullivan
1010f5cedc84SCatherine Sullivan if (budget > 0)
10112cb67ab1SYangchun Fu work_done = gve_clean_rx_done(rx, budget, feat);
10122cb67ab1SYangchun Fu
10132cb67ab1SYangchun Fu return work_done;
1014f5cedc84SCatherine Sullivan }
1015