1893ce44dSCatherine Sullivan // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2893ce44dSCatherine Sullivan /* Google virtual Ethernet (gve) driver
3893ce44dSCatherine Sullivan *
4a5886ef4SBailey Forrest * Copyright (C) 2015-2021 Google, Inc.
5893ce44dSCatherine Sullivan */
6893ce44dSCatherine Sullivan
775eaae15SPraveen Kaligineedi #include <linux/bpf.h>
8893ce44dSCatherine Sullivan #include <linux/cpumask.h>
9893ce44dSCatherine Sullivan #include <linux/etherdevice.h>
1075eaae15SPraveen Kaligineedi #include <linux/filter.h>
11893ce44dSCatherine Sullivan #include <linux/interrupt.h>
12893ce44dSCatherine Sullivan #include <linux/module.h>
13893ce44dSCatherine Sullivan #include <linux/pci.h>
14893ce44dSCatherine Sullivan #include <linux/sched.h>
15893ce44dSCatherine Sullivan #include <linux/timer.h>
169e5f7d26SCatherine Sullivan #include <linux/workqueue.h>
17c2a0c3edSJeroen de Borst #include <linux/utsname.h>
18c2a0c3edSJeroen de Borst #include <linux/version.h>
19893ce44dSCatherine Sullivan #include <net/sch_generic.h>
20fd8e4032SPraveen Kaligineedi #include <net/xdp_sock_drv.h>
21893ce44dSCatherine Sullivan #include "gve.h"
225e8c5adfSBailey Forrest #include "gve_dqo.h"
23893ce44dSCatherine Sullivan #include "gve_adminq.h"
24893ce44dSCatherine Sullivan #include "gve_register.h"
25893ce44dSCatherine Sullivan
26f5cedc84SCatherine Sullivan #define GVE_DEFAULT_RX_COPYBREAK (256)
27f5cedc84SCatherine Sullivan
28893ce44dSCatherine Sullivan #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK)
29893ce44dSCatherine Sullivan #define GVE_VERSION "1.0.0"
30893ce44dSCatherine Sullivan #define GVE_VERSION_PREFIX "GVE-"
31893ce44dSCatherine Sullivan
3287a7f321SJohn Fraker // Minimum amount of time between queue kicks in msec (10 seconds)
3387a7f321SJohn Fraker #define MIN_TX_TIMEOUT_GAP (1000 * 10)
3487a7f321SJohn Fraker
359d0aba98SJunfeng Guo char gve_driver_name[] = "gve";
36e5b845dcSCatherine Sullivan const char gve_version_str[] = GVE_VERSION;
37893ce44dSCatherine Sullivan static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
38893ce44dSCatherine Sullivan
gve_verify_driver_compatibility(struct gve_priv * priv)39c2a0c3edSJeroen de Borst static int gve_verify_driver_compatibility(struct gve_priv *priv)
40c2a0c3edSJeroen de Borst {
41c2a0c3edSJeroen de Borst int err;
42c2a0c3edSJeroen de Borst struct gve_driver_info *driver_info;
43c2a0c3edSJeroen de Borst dma_addr_t driver_info_bus;
44c2a0c3edSJeroen de Borst
45c2a0c3edSJeroen de Borst driver_info = dma_alloc_coherent(&priv->pdev->dev,
46c2a0c3edSJeroen de Borst sizeof(struct gve_driver_info),
47c2a0c3edSJeroen de Borst &driver_info_bus, GFP_KERNEL);
48c2a0c3edSJeroen de Borst if (!driver_info)
49c2a0c3edSJeroen de Borst return -ENOMEM;
50c2a0c3edSJeroen de Borst
51c2a0c3edSJeroen de Borst *driver_info = (struct gve_driver_info) {
52c2a0c3edSJeroen de Borst .os_type = 1, /* Linux */
53c2a0c3edSJeroen de Borst .os_version_major = cpu_to_be32(LINUX_VERSION_MAJOR),
54c2a0c3edSJeroen de Borst .os_version_minor = cpu_to_be32(LINUX_VERSION_SUBLEVEL),
55c2a0c3edSJeroen de Borst .os_version_sub = cpu_to_be32(LINUX_VERSION_PATCHLEVEL),
56c2a0c3edSJeroen de Borst .driver_capability_flags = {
57c2a0c3edSJeroen de Borst cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
58c2a0c3edSJeroen de Borst cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
59c2a0c3edSJeroen de Borst cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
60c2a0c3edSJeroen de Borst cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
61c2a0c3edSJeroen de Borst },
62c2a0c3edSJeroen de Borst };
63c2a0c3edSJeroen de Borst strscpy(driver_info->os_version_str1, utsname()->release,
64c2a0c3edSJeroen de Borst sizeof(driver_info->os_version_str1));
65c2a0c3edSJeroen de Borst strscpy(driver_info->os_version_str2, utsname()->version,
66c2a0c3edSJeroen de Borst sizeof(driver_info->os_version_str2));
67c2a0c3edSJeroen de Borst
68c2a0c3edSJeroen de Borst err = gve_adminq_verify_driver_compatibility(priv,
69c2a0c3edSJeroen de Borst sizeof(struct gve_driver_info),
70c2a0c3edSJeroen de Borst driver_info_bus);
71c2a0c3edSJeroen de Borst
72c2a0c3edSJeroen de Borst /* It's ok if the device doesn't support this */
73c2a0c3edSJeroen de Borst if (err == -EOPNOTSUPP)
74c2a0c3edSJeroen de Borst err = 0;
75c2a0c3edSJeroen de Borst
76c2a0c3edSJeroen de Borst dma_free_coherent(&priv->pdev->dev,
77c2a0c3edSJeroen de Borst sizeof(struct gve_driver_info),
78c2a0c3edSJeroen de Borst driver_info, driver_info_bus);
79c2a0c3edSJeroen de Borst return err;
80c2a0c3edSJeroen de Borst }
81c2a0c3edSJeroen de Borst
gve_start_xmit(struct sk_buff * skb,struct net_device * dev)825e8c5adfSBailey Forrest static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
835e8c5adfSBailey Forrest {
845e8c5adfSBailey Forrest struct gve_priv *priv = netdev_priv(dev);
855e8c5adfSBailey Forrest
865e8c5adfSBailey Forrest if (gve_is_gqi(priv))
875e8c5adfSBailey Forrest return gve_tx(skb, dev);
885e8c5adfSBailey Forrest else
895e8c5adfSBailey Forrest return gve_tx_dqo(skb, dev);
905e8c5adfSBailey Forrest }
915e8c5adfSBailey Forrest
gve_get_stats(struct net_device * dev,struct rtnl_link_stats64 * s)92f5cedc84SCatherine Sullivan static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
93f5cedc84SCatherine Sullivan {
94f5cedc84SCatherine Sullivan struct gve_priv *priv = netdev_priv(dev);
95f5cedc84SCatherine Sullivan unsigned int start;
962f57d497SEric Dumazet u64 packets, bytes;
972e80aeaeSPraveen Kaligineedi int num_tx_queues;
98f5cedc84SCatherine Sullivan int ring;
99f5cedc84SCatherine Sullivan
1002e80aeaeSPraveen Kaligineedi num_tx_queues = gve_num_tx_queues(priv);
101f5cedc84SCatherine Sullivan if (priv->rx) {
102f5cedc84SCatherine Sullivan for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
103f5cedc84SCatherine Sullivan do {
1043c13ce74SCatherine Sullivan start =
105068c38adSThomas Gleixner u64_stats_fetch_begin(&priv->rx[ring].statss);
1062f57d497SEric Dumazet packets = priv->rx[ring].rpackets;
1072f57d497SEric Dumazet bytes = priv->rx[ring].rbytes;
108068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
109f5cedc84SCatherine Sullivan start));
1102f57d497SEric Dumazet s->rx_packets += packets;
1112f57d497SEric Dumazet s->rx_bytes += bytes;
112f5cedc84SCatherine Sullivan }
113f5cedc84SCatherine Sullivan }
114f5cedc84SCatherine Sullivan if (priv->tx) {
1152e80aeaeSPraveen Kaligineedi for (ring = 0; ring < num_tx_queues; ring++) {
116f5cedc84SCatherine Sullivan do {
1173c13ce74SCatherine Sullivan start =
118068c38adSThomas Gleixner u64_stats_fetch_begin(&priv->tx[ring].statss);
1192f57d497SEric Dumazet packets = priv->tx[ring].pkt_done;
1202f57d497SEric Dumazet bytes = priv->tx[ring].bytes_done;
121068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
122f5cedc84SCatherine Sullivan start));
1232f57d497SEric Dumazet s->tx_packets += packets;
1242f57d497SEric Dumazet s->tx_bytes += bytes;
125f5cedc84SCatherine Sullivan }
126f5cedc84SCatherine Sullivan }
127f5cedc84SCatherine Sullivan }
128f5cedc84SCatherine Sullivan
gve_alloc_counter_array(struct gve_priv * priv)129893ce44dSCatherine Sullivan static int gve_alloc_counter_array(struct gve_priv *priv)
130893ce44dSCatherine Sullivan {
131893ce44dSCatherine Sullivan priv->counter_array =
132893ce44dSCatherine Sullivan dma_alloc_coherent(&priv->pdev->dev,
133893ce44dSCatherine Sullivan priv->num_event_counters *
134893ce44dSCatherine Sullivan sizeof(*priv->counter_array),
135893ce44dSCatherine Sullivan &priv->counter_array_bus, GFP_KERNEL);
136893ce44dSCatherine Sullivan if (!priv->counter_array)
137893ce44dSCatherine Sullivan return -ENOMEM;
138893ce44dSCatherine Sullivan
139893ce44dSCatherine Sullivan return 0;
140893ce44dSCatherine Sullivan }
141893ce44dSCatherine Sullivan
gve_free_counter_array(struct gve_priv * priv)142893ce44dSCatherine Sullivan static void gve_free_counter_array(struct gve_priv *priv)
143893ce44dSCatherine Sullivan {
144922aa9bcSTao Liu if (!priv->counter_array)
145922aa9bcSTao Liu return;
146922aa9bcSTao Liu
147893ce44dSCatherine Sullivan dma_free_coherent(&priv->pdev->dev,
148893ce44dSCatherine Sullivan priv->num_event_counters *
149893ce44dSCatherine Sullivan sizeof(*priv->counter_array),
150893ce44dSCatherine Sullivan priv->counter_array, priv->counter_array_bus);
151893ce44dSCatherine Sullivan priv->counter_array = NULL;
152893ce44dSCatherine Sullivan }
153893ce44dSCatherine Sullivan
15424aeb56fSKuo Zhao /* NIC requests to report stats */
gve_stats_report_task(struct work_struct * work)15524aeb56fSKuo Zhao static void gve_stats_report_task(struct work_struct *work)
15624aeb56fSKuo Zhao {
15724aeb56fSKuo Zhao struct gve_priv *priv = container_of(work, struct gve_priv,
15824aeb56fSKuo Zhao stats_report_task);
15924aeb56fSKuo Zhao if (gve_get_do_report_stats(priv)) {
16024aeb56fSKuo Zhao gve_handle_report_stats(priv);
16124aeb56fSKuo Zhao gve_clear_do_report_stats(priv);
16224aeb56fSKuo Zhao }
16324aeb56fSKuo Zhao }
16424aeb56fSKuo Zhao
gve_stats_report_schedule(struct gve_priv * priv)16524aeb56fSKuo Zhao static void gve_stats_report_schedule(struct gve_priv *priv)
16624aeb56fSKuo Zhao {
16724aeb56fSKuo Zhao if (!gve_get_probe_in_progress(priv) &&
16824aeb56fSKuo Zhao !gve_get_reset_in_progress(priv)) {
16924aeb56fSKuo Zhao gve_set_do_report_stats(priv);
17024aeb56fSKuo Zhao queue_work(priv->gve_wq, &priv->stats_report_task);
17124aeb56fSKuo Zhao }
17224aeb56fSKuo Zhao }
17324aeb56fSKuo Zhao
gve_stats_report_timer(struct timer_list * t)17424aeb56fSKuo Zhao static void gve_stats_report_timer(struct timer_list *t)
17524aeb56fSKuo Zhao {
17624aeb56fSKuo Zhao struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
17724aeb56fSKuo Zhao
17824aeb56fSKuo Zhao mod_timer(&priv->stats_report_timer,
17924aeb56fSKuo Zhao round_jiffies(jiffies +
18024aeb56fSKuo Zhao msecs_to_jiffies(priv->stats_report_timer_period)));
18124aeb56fSKuo Zhao gve_stats_report_schedule(priv);
18224aeb56fSKuo Zhao }
18324aeb56fSKuo Zhao
gve_alloc_stats_report(struct gve_priv * priv)18424aeb56fSKuo Zhao static int gve_alloc_stats_report(struct gve_priv *priv)
18524aeb56fSKuo Zhao {
18624aeb56fSKuo Zhao int tx_stats_num, rx_stats_num;
18724aeb56fSKuo Zhao
1882f523dc3SDavid Awogbemila tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
1892e80aeaeSPraveen Kaligineedi gve_num_tx_queues(priv);
1902f523dc3SDavid Awogbemila rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
19124aeb56fSKuo Zhao priv->rx_cfg.num_queues;
192691f4077SGustavo A. R. Silva priv->stats_report_len = struct_size(priv->stats_report, stats,
1933fc79c56SGustavo A. R. Silva size_add(tx_stats_num, rx_stats_num));
19424aeb56fSKuo Zhao priv->stats_report =
19524aeb56fSKuo Zhao dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
19624aeb56fSKuo Zhao &priv->stats_report_bus, GFP_KERNEL);
19724aeb56fSKuo Zhao if (!priv->stats_report)
19824aeb56fSKuo Zhao return -ENOMEM;
19924aeb56fSKuo Zhao /* Set up timer for the report-stats task */
20024aeb56fSKuo Zhao timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
20124aeb56fSKuo Zhao priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
20224aeb56fSKuo Zhao return 0;
20324aeb56fSKuo Zhao }
20424aeb56fSKuo Zhao
gve_free_stats_report(struct gve_priv * priv)20524aeb56fSKuo Zhao static void gve_free_stats_report(struct gve_priv *priv)
20624aeb56fSKuo Zhao {
207922aa9bcSTao Liu if (!priv->stats_report)
208922aa9bcSTao Liu return;
209922aa9bcSTao Liu
21024aeb56fSKuo Zhao del_timer_sync(&priv->stats_report_timer);
21124aeb56fSKuo Zhao dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
21224aeb56fSKuo Zhao priv->stats_report, priv->stats_report_bus);
21324aeb56fSKuo Zhao priv->stats_report = NULL;
21424aeb56fSKuo Zhao }
21524aeb56fSKuo Zhao
gve_mgmnt_intr(int irq,void * arg)216893ce44dSCatherine Sullivan static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
217893ce44dSCatherine Sullivan {
2189e5f7d26SCatherine Sullivan struct gve_priv *priv = arg;
2199e5f7d26SCatherine Sullivan
2209e5f7d26SCatherine Sullivan queue_work(priv->gve_wq, &priv->service_task);
221893ce44dSCatherine Sullivan return IRQ_HANDLED;
222893ce44dSCatherine Sullivan }
223893ce44dSCatherine Sullivan
gve_intr(int irq,void * arg)224893ce44dSCatherine Sullivan static irqreturn_t gve_intr(int irq, void *arg)
225893ce44dSCatherine Sullivan {
226f5cedc84SCatherine Sullivan struct gve_notify_block *block = arg;
227f5cedc84SCatherine Sullivan struct gve_priv *priv = block->priv;
228f5cedc84SCatherine Sullivan
229f5cedc84SCatherine Sullivan iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
230f5cedc84SCatherine Sullivan napi_schedule_irqoff(&block->napi);
231893ce44dSCatherine Sullivan return IRQ_HANDLED;
232893ce44dSCatherine Sullivan }
233893ce44dSCatherine Sullivan
gve_intr_dqo(int irq,void * arg)2345e8c5adfSBailey Forrest static irqreturn_t gve_intr_dqo(int irq, void *arg)
2355e8c5adfSBailey Forrest {
2365e8c5adfSBailey Forrest struct gve_notify_block *block = arg;
2375e8c5adfSBailey Forrest
2385e8c5adfSBailey Forrest /* Interrupts are automatically masked */
2395e8c5adfSBailey Forrest napi_schedule_irqoff(&block->napi);
2405e8c5adfSBailey Forrest return IRQ_HANDLED;
2415e8c5adfSBailey Forrest }
2425e8c5adfSBailey Forrest
gve_napi_poll(struct napi_struct * napi,int budget)243f5cedc84SCatherine Sullivan static int gve_napi_poll(struct napi_struct *napi, int budget)
244f5cedc84SCatherine Sullivan {
245f5cedc84SCatherine Sullivan struct gve_notify_block *block;
246f5cedc84SCatherine Sullivan __be32 __iomem *irq_doorbell;
247f5cedc84SCatherine Sullivan bool reschedule = false;
248f5cedc84SCatherine Sullivan struct gve_priv *priv;
2492cb67ab1SYangchun Fu int work_done = 0;
250f5cedc84SCatherine Sullivan
251f5cedc84SCatherine Sullivan block = container_of(napi, struct gve_notify_block, napi);
252f5cedc84SCatherine Sullivan priv = block->priv;
253f5cedc84SCatherine Sullivan
25475eaae15SPraveen Kaligineedi if (block->tx) {
25575eaae15SPraveen Kaligineedi if (block->tx->q_num < priv->tx_cfg.num_queues)
256f5cedc84SCatherine Sullivan reschedule |= gve_tx_poll(block, budget);
257ff33be9cSZiwei Xiao else if (budget)
25875eaae15SPraveen Kaligineedi reschedule |= gve_xdp_poll(block, budget);
25975eaae15SPraveen Kaligineedi }
26075eaae15SPraveen Kaligineedi
261ff33be9cSZiwei Xiao if (!budget)
262ff33be9cSZiwei Xiao return 0;
263ff33be9cSZiwei Xiao
2642cb67ab1SYangchun Fu if (block->rx) {
2652cb67ab1SYangchun Fu work_done = gve_rx_poll(block, budget);
2662cb67ab1SYangchun Fu reschedule |= work_done == budget;
2672cb67ab1SYangchun Fu }
268f5cedc84SCatherine Sullivan
269f5cedc84SCatherine Sullivan if (reschedule)
270f5cedc84SCatherine Sullivan return budget;
271f5cedc84SCatherine Sullivan
2722cb67ab1SYangchun Fu /* Complete processing - don't unmask irq if busy polling is enabled */
2732cb67ab1SYangchun Fu if (likely(napi_complete_done(napi, work_done))) {
274f5cedc84SCatherine Sullivan irq_doorbell = gve_irq_doorbell(priv, block);
275f5cedc84SCatherine Sullivan iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
276f5cedc84SCatherine Sullivan
27761d72c7eSTao Liu /* Ensure IRQ ACK is visible before we check pending work.
27861d72c7eSTao Liu * If queue had issued updates, it would be truly visible.
279f5cedc84SCatherine Sullivan */
280f8178183SCatherine Sullivan mb();
2812cb67ab1SYangchun Fu
282f5cedc84SCatherine Sullivan if (block->tx)
28361d72c7eSTao Liu reschedule |= gve_tx_clean_pending(priv, block->tx);
284f5cedc84SCatherine Sullivan if (block->rx)
2852cb67ab1SYangchun Fu reschedule |= gve_rx_work_pending(block->rx);
2862cb67ab1SYangchun Fu
287f5cedc84SCatherine Sullivan if (reschedule && napi_reschedule(napi))
288f5cedc84SCatherine Sullivan iowrite32be(GVE_IRQ_MASK, irq_doorbell);
2892cb67ab1SYangchun Fu }
2902cb67ab1SYangchun Fu return work_done;
291f5cedc84SCatherine Sullivan }
292f5cedc84SCatherine Sullivan
gve_napi_poll_dqo(struct napi_struct * napi,int budget)2935e8c5adfSBailey Forrest static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
2945e8c5adfSBailey Forrest {
2955e8c5adfSBailey Forrest struct gve_notify_block *block =
2965e8c5adfSBailey Forrest container_of(napi, struct gve_notify_block, napi);
2975e8c5adfSBailey Forrest struct gve_priv *priv = block->priv;
2985e8c5adfSBailey Forrest bool reschedule = false;
2995e8c5adfSBailey Forrest int work_done = 0;
3005e8c5adfSBailey Forrest
3015e8c5adfSBailey Forrest if (block->tx)
3025e8c5adfSBailey Forrest reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
3035e8c5adfSBailey Forrest
304ff33be9cSZiwei Xiao if (!budget)
305ff33be9cSZiwei Xiao return 0;
306ff33be9cSZiwei Xiao
3075e8c5adfSBailey Forrest if (block->rx) {
3085e8c5adfSBailey Forrest work_done = gve_rx_poll_dqo(block, budget);
3095e8c5adfSBailey Forrest reschedule |= work_done == budget;
3105e8c5adfSBailey Forrest }
3115e8c5adfSBailey Forrest
3125e8c5adfSBailey Forrest if (reschedule)
3135e8c5adfSBailey Forrest return budget;
3145e8c5adfSBailey Forrest
3155e8c5adfSBailey Forrest if (likely(napi_complete_done(napi, work_done))) {
3165e8c5adfSBailey Forrest /* Enable interrupts again.
3175e8c5adfSBailey Forrest *
3185e8c5adfSBailey Forrest * We don't need to repoll afterwards because HW supports the
3195e8c5adfSBailey Forrest * PCI MSI-X PBA feature.
3205e8c5adfSBailey Forrest *
3215e8c5adfSBailey Forrest * Another interrupt would be triggered if a new event came in
3225e8c5adfSBailey Forrest * since the last one.
3235e8c5adfSBailey Forrest */
3245e8c5adfSBailey Forrest gve_write_irq_doorbell_dqo(priv, block,
3255e8c5adfSBailey Forrest GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
3265e8c5adfSBailey Forrest }
3275e8c5adfSBailey Forrest
3285e8c5adfSBailey Forrest return work_done;
3295e8c5adfSBailey Forrest }
3305e8c5adfSBailey Forrest
gve_alloc_notify_blocks(struct gve_priv * priv)331893ce44dSCatherine Sullivan static int gve_alloc_notify_blocks(struct gve_priv *priv)
332893ce44dSCatherine Sullivan {
333893ce44dSCatherine Sullivan int num_vecs_requested = priv->num_ntfy_blks + 1;
334893ce44dSCatherine Sullivan unsigned int active_cpus;
335893ce44dSCatherine Sullivan int vecs_enabled;
336893ce44dSCatherine Sullivan int i, j;
337893ce44dSCatherine Sullivan int err;
338893ce44dSCatherine Sullivan
3397fec4d39SGustavo A. R. Silva priv->msix_vectors = kvcalloc(num_vecs_requested,
340893ce44dSCatherine Sullivan sizeof(*priv->msix_vectors), GFP_KERNEL);
341893ce44dSCatherine Sullivan if (!priv->msix_vectors)
342893ce44dSCatherine Sullivan return -ENOMEM;
343893ce44dSCatherine Sullivan for (i = 0; i < num_vecs_requested; i++)
344893ce44dSCatherine Sullivan priv->msix_vectors[i].entry = i;
345893ce44dSCatherine Sullivan vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
346893ce44dSCatherine Sullivan GVE_MIN_MSIX, num_vecs_requested);
347893ce44dSCatherine Sullivan if (vecs_enabled < 0) {
348893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
349893ce44dSCatherine Sullivan GVE_MIN_MSIX, vecs_enabled);
350893ce44dSCatherine Sullivan err = vecs_enabled;
351893ce44dSCatherine Sullivan goto abort_with_msix_vectors;
352893ce44dSCatherine Sullivan }
353893ce44dSCatherine Sullivan if (vecs_enabled != num_vecs_requested) {
354f5cedc84SCatherine Sullivan int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
355f5cedc84SCatherine Sullivan int vecs_per_type = new_num_ntfy_blks / 2;
356f5cedc84SCatherine Sullivan int vecs_left = new_num_ntfy_blks % 2;
357f5cedc84SCatherine Sullivan
358f5cedc84SCatherine Sullivan priv->num_ntfy_blks = new_num_ntfy_blks;
359e96b491aSDavid Awogbemila priv->mgmt_msix_idx = priv->num_ntfy_blks;
360f5cedc84SCatherine Sullivan priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
361f5cedc84SCatherine Sullivan vecs_per_type);
362f5cedc84SCatherine Sullivan priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
363f5cedc84SCatherine Sullivan vecs_per_type + vecs_left);
364893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev,
365f5cedc84SCatherine Sullivan "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
366f5cedc84SCatherine Sullivan vecs_enabled, priv->tx_cfg.max_queues,
367f5cedc84SCatherine Sullivan priv->rx_cfg.max_queues);
368f5cedc84SCatherine Sullivan if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
369f5cedc84SCatherine Sullivan priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
370f5cedc84SCatherine Sullivan if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
371f5cedc84SCatherine Sullivan priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
372893ce44dSCatherine Sullivan }
373893ce44dSCatherine Sullivan /* Half the notification blocks go to TX and half to RX */
374893ce44dSCatherine Sullivan active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
375893ce44dSCatherine Sullivan
376893ce44dSCatherine Sullivan /* Setup Management Vector - the last vector */
37784371145SPraveen Kaligineedi snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
37884371145SPraveen Kaligineedi pci_name(priv->pdev));
379893ce44dSCatherine Sullivan err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
380893ce44dSCatherine Sullivan gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
381893ce44dSCatherine Sullivan if (err) {
382893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
383893ce44dSCatherine Sullivan goto abort_with_msix_enabled;
384893ce44dSCatherine Sullivan }
385d30baaccSCatherine Sullivan priv->irq_db_indices =
386893ce44dSCatherine Sullivan dma_alloc_coherent(&priv->pdev->dev,
387893ce44dSCatherine Sullivan priv->num_ntfy_blks *
388d30baaccSCatherine Sullivan sizeof(*priv->irq_db_indices),
389d30baaccSCatherine Sullivan &priv->irq_db_indices_bus, GFP_KERNEL);
390d30baaccSCatherine Sullivan if (!priv->irq_db_indices) {
391893ce44dSCatherine Sullivan err = -ENOMEM;
392893ce44dSCatherine Sullivan goto abort_with_mgmt_vector;
393893ce44dSCatherine Sullivan }
394d30baaccSCatherine Sullivan
395d30baaccSCatherine Sullivan priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
396d30baaccSCatherine Sullivan sizeof(*priv->ntfy_blocks), GFP_KERNEL);
397d30baaccSCatherine Sullivan if (!priv->ntfy_blocks) {
398d30baaccSCatherine Sullivan err = -ENOMEM;
399d30baaccSCatherine Sullivan goto abort_with_irq_db_indices;
400d30baaccSCatherine Sullivan }
401d30baaccSCatherine Sullivan
402893ce44dSCatherine Sullivan /* Setup the other blocks - the first n-1 vectors */
403893ce44dSCatherine Sullivan for (i = 0; i < priv->num_ntfy_blks; i++) {
404893ce44dSCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[i];
405893ce44dSCatherine Sullivan int msix_idx = i;
406893ce44dSCatherine Sullivan
40784371145SPraveen Kaligineedi snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s",
40884371145SPraveen Kaligineedi i, pci_name(priv->pdev));
409893ce44dSCatherine Sullivan block->priv = priv;
410893ce44dSCatherine Sullivan err = request_irq(priv->msix_vectors[msix_idx].vector,
4115e8c5adfSBailey Forrest gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
4125e8c5adfSBailey Forrest 0, block->name, block);
413893ce44dSCatherine Sullivan if (err) {
414893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev,
415893ce44dSCatherine Sullivan "Failed to receive msix vector %d\n", i);
416893ce44dSCatherine Sullivan goto abort_with_some_ntfy_blocks;
417893ce44dSCatherine Sullivan }
418893ce44dSCatherine Sullivan irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
419893ce44dSCatherine Sullivan get_cpu_mask(i % active_cpus));
420d30baaccSCatherine Sullivan block->irq_db_index = &priv->irq_db_indices[i].index;
421893ce44dSCatherine Sullivan }
422893ce44dSCatherine Sullivan return 0;
423893ce44dSCatherine Sullivan abort_with_some_ntfy_blocks:
424893ce44dSCatherine Sullivan for (j = 0; j < i; j++) {
425893ce44dSCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[j];
426893ce44dSCatherine Sullivan int msix_idx = j;
427893ce44dSCatherine Sullivan
428893ce44dSCatherine Sullivan irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
429893ce44dSCatherine Sullivan NULL);
430893ce44dSCatherine Sullivan free_irq(priv->msix_vectors[msix_idx].vector, block);
431893ce44dSCatherine Sullivan }
432d30baaccSCatherine Sullivan kvfree(priv->ntfy_blocks);
433893ce44dSCatherine Sullivan priv->ntfy_blocks = NULL;
434d30baaccSCatherine Sullivan abort_with_irq_db_indices:
435d30baaccSCatherine Sullivan dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
436d30baaccSCatherine Sullivan sizeof(*priv->irq_db_indices),
437d30baaccSCatherine Sullivan priv->irq_db_indices, priv->irq_db_indices_bus);
438d30baaccSCatherine Sullivan priv->irq_db_indices = NULL;
439893ce44dSCatherine Sullivan abort_with_mgmt_vector:
440893ce44dSCatherine Sullivan free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
441893ce44dSCatherine Sullivan abort_with_msix_enabled:
442893ce44dSCatherine Sullivan pci_disable_msix(priv->pdev);
443893ce44dSCatherine Sullivan abort_with_msix_vectors:
4448ec1e900SChuhong Yuan kvfree(priv->msix_vectors);
445893ce44dSCatherine Sullivan priv->msix_vectors = NULL;
446893ce44dSCatherine Sullivan return err;
447893ce44dSCatherine Sullivan }
448893ce44dSCatherine Sullivan
gve_free_notify_blocks(struct gve_priv * priv)449893ce44dSCatherine Sullivan static void gve_free_notify_blocks(struct gve_priv *priv)
450893ce44dSCatherine Sullivan {
451893ce44dSCatherine Sullivan int i;
452893ce44dSCatherine Sullivan
453922aa9bcSTao Liu if (!priv->msix_vectors)
454922aa9bcSTao Liu return;
455922aa9bcSTao Liu
456893ce44dSCatherine Sullivan /* Free the irqs */
457893ce44dSCatherine Sullivan for (i = 0; i < priv->num_ntfy_blks; i++) {
458893ce44dSCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[i];
459893ce44dSCatherine Sullivan int msix_idx = i;
460893ce44dSCatherine Sullivan
461893ce44dSCatherine Sullivan irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
462893ce44dSCatherine Sullivan NULL);
463893ce44dSCatherine Sullivan free_irq(priv->msix_vectors[msix_idx].vector, block);
464893ce44dSCatherine Sullivan }
4655218e919SDavid Awogbemila free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
466d30baaccSCatherine Sullivan kvfree(priv->ntfy_blocks);
467893ce44dSCatherine Sullivan priv->ntfy_blocks = NULL;
468d30baaccSCatherine Sullivan dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
469d30baaccSCatherine Sullivan sizeof(*priv->irq_db_indices),
470d30baaccSCatherine Sullivan priv->irq_db_indices, priv->irq_db_indices_bus);
471d30baaccSCatherine Sullivan priv->irq_db_indices = NULL;
472893ce44dSCatherine Sullivan pci_disable_msix(priv->pdev);
4738ec1e900SChuhong Yuan kvfree(priv->msix_vectors);
474893ce44dSCatherine Sullivan priv->msix_vectors = NULL;
475893ce44dSCatherine Sullivan }
476893ce44dSCatherine Sullivan
gve_setup_device_resources(struct gve_priv * priv)477893ce44dSCatherine Sullivan static int gve_setup_device_resources(struct gve_priv *priv)
478893ce44dSCatherine Sullivan {
479893ce44dSCatherine Sullivan int err;
480893ce44dSCatherine Sullivan
481893ce44dSCatherine Sullivan err = gve_alloc_counter_array(priv);
482893ce44dSCatherine Sullivan if (err)
483893ce44dSCatherine Sullivan return err;
484893ce44dSCatherine Sullivan err = gve_alloc_notify_blocks(priv);
485893ce44dSCatherine Sullivan if (err)
486893ce44dSCatherine Sullivan goto abort_with_counter;
48724aeb56fSKuo Zhao err = gve_alloc_stats_report(priv);
48824aeb56fSKuo Zhao if (err)
48924aeb56fSKuo Zhao goto abort_with_ntfy_blocks;
490893ce44dSCatherine Sullivan err = gve_adminq_configure_device_resources(priv,
491893ce44dSCatherine Sullivan priv->counter_array_bus,
492893ce44dSCatherine Sullivan priv->num_event_counters,
493d30baaccSCatherine Sullivan priv->irq_db_indices_bus,
494893ce44dSCatherine Sullivan priv->num_ntfy_blks);
495893ce44dSCatherine Sullivan if (unlikely(err)) {
496893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev,
497893ce44dSCatherine Sullivan "could not setup device_resources: err=%d\n", err);
498893ce44dSCatherine Sullivan err = -ENXIO;
49924aeb56fSKuo Zhao goto abort_with_stats_report;
500893ce44dSCatherine Sullivan }
501c4b87ac8SBailey Forrest
50266ce8e6bSRushil Gupta if (!gve_is_gqi(priv)) {
503c4b87ac8SBailey Forrest priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
504c4b87ac8SBailey Forrest GFP_KERNEL);
505c4b87ac8SBailey Forrest if (!priv->ptype_lut_dqo) {
506c4b87ac8SBailey Forrest err = -ENOMEM;
507c4b87ac8SBailey Forrest goto abort_with_stats_report;
508c4b87ac8SBailey Forrest }
509c4b87ac8SBailey Forrest err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
510c4b87ac8SBailey Forrest if (err) {
511c4b87ac8SBailey Forrest dev_err(&priv->pdev->dev,
512c4b87ac8SBailey Forrest "Failed to get ptype map: err=%d\n", err);
513c4b87ac8SBailey Forrest goto abort_with_ptype_lut;
514c4b87ac8SBailey Forrest }
515c4b87ac8SBailey Forrest }
516c4b87ac8SBailey Forrest
51724aeb56fSKuo Zhao err = gve_adminq_report_stats(priv, priv->stats_report_len,
51824aeb56fSKuo Zhao priv->stats_report_bus,
51924aeb56fSKuo Zhao GVE_STATS_REPORT_TIMER_PERIOD);
52024aeb56fSKuo Zhao if (err)
52124aeb56fSKuo Zhao dev_err(&priv->pdev->dev,
52224aeb56fSKuo Zhao "Failed to report stats: err=%d\n", err);
523893ce44dSCatherine Sullivan gve_set_device_resources_ok(priv);
524893ce44dSCatherine Sullivan return 0;
525c4b87ac8SBailey Forrest
526c4b87ac8SBailey Forrest abort_with_ptype_lut:
527c4b87ac8SBailey Forrest kvfree(priv->ptype_lut_dqo);
528c4b87ac8SBailey Forrest priv->ptype_lut_dqo = NULL;
52924aeb56fSKuo Zhao abort_with_stats_report:
53024aeb56fSKuo Zhao gve_free_stats_report(priv);
531893ce44dSCatherine Sullivan abort_with_ntfy_blocks:
532893ce44dSCatherine Sullivan gve_free_notify_blocks(priv);
533893ce44dSCatherine Sullivan abort_with_counter:
534893ce44dSCatherine Sullivan gve_free_counter_array(priv);
535c4b87ac8SBailey Forrest
536893ce44dSCatherine Sullivan return err;
537893ce44dSCatherine Sullivan }
538893ce44dSCatherine Sullivan
5399e5f7d26SCatherine Sullivan static void gve_trigger_reset(struct gve_priv *priv);
5409e5f7d26SCatherine Sullivan
gve_teardown_device_resources(struct gve_priv * priv)541893ce44dSCatherine Sullivan static void gve_teardown_device_resources(struct gve_priv *priv)
542893ce44dSCatherine Sullivan {
543893ce44dSCatherine Sullivan int err;
544893ce44dSCatherine Sullivan
545893ce44dSCatherine Sullivan /* Tell device its resources are being freed */
546893ce44dSCatherine Sullivan if (gve_get_device_resources_ok(priv)) {
54724aeb56fSKuo Zhao /* detach the stats report */
54824aeb56fSKuo Zhao err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
54924aeb56fSKuo Zhao if (err) {
55024aeb56fSKuo Zhao dev_err(&priv->pdev->dev,
55124aeb56fSKuo Zhao "Failed to detach stats report: err=%d\n", err);
55224aeb56fSKuo Zhao gve_trigger_reset(priv);
55324aeb56fSKuo Zhao }
554893ce44dSCatherine Sullivan err = gve_adminq_deconfigure_device_resources(priv);
555893ce44dSCatherine Sullivan if (err) {
556893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev,
557893ce44dSCatherine Sullivan "Could not deconfigure device resources: err=%d\n",
558893ce44dSCatherine Sullivan err);
5599e5f7d26SCatherine Sullivan gve_trigger_reset(priv);
560893ce44dSCatherine Sullivan }
561893ce44dSCatherine Sullivan }
562c4b87ac8SBailey Forrest
563c4b87ac8SBailey Forrest kvfree(priv->ptype_lut_dqo);
564c4b87ac8SBailey Forrest priv->ptype_lut_dqo = NULL;
565c4b87ac8SBailey Forrest
566893ce44dSCatherine Sullivan gve_free_counter_array(priv);
567893ce44dSCatherine Sullivan gve_free_notify_blocks(priv);
56824aeb56fSKuo Zhao gve_free_stats_report(priv);
569893ce44dSCatherine Sullivan gve_clear_device_resources_ok(priv);
570893ce44dSCatherine Sullivan }
571893ce44dSCatherine Sullivan
gve_add_napi(struct gve_priv * priv,int ntfy_idx,int (* gve_poll)(struct napi_struct *,int))5725e8c5adfSBailey Forrest static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
5735e8c5adfSBailey Forrest int (*gve_poll)(struct napi_struct *, int))
574f5cedc84SCatherine Sullivan {
575f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
576f5cedc84SCatherine Sullivan
577b48b89f9SJakub Kicinski netif_napi_add(priv->dev, &block->napi, gve_poll);
578f5cedc84SCatherine Sullivan }
579f5cedc84SCatherine Sullivan
gve_remove_napi(struct gve_priv * priv,int ntfy_idx)580f5cedc84SCatherine Sullivan static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
581f5cedc84SCatherine Sullivan {
582f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
583f5cedc84SCatherine Sullivan
584f5cedc84SCatherine Sullivan netif_napi_del(&block->napi);
585f5cedc84SCatherine Sullivan }
586f5cedc84SCatherine Sullivan
gve_register_xdp_qpls(struct gve_priv * priv)58775eaae15SPraveen Kaligineedi static int gve_register_xdp_qpls(struct gve_priv *priv)
58875eaae15SPraveen Kaligineedi {
58975eaae15SPraveen Kaligineedi int start_id;
59075eaae15SPraveen Kaligineedi int err;
59175eaae15SPraveen Kaligineedi int i;
59275eaae15SPraveen Kaligineedi
59375eaae15SPraveen Kaligineedi start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
59475eaae15SPraveen Kaligineedi for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
59575eaae15SPraveen Kaligineedi err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
59675eaae15SPraveen Kaligineedi if (err) {
59775eaae15SPraveen Kaligineedi netif_err(priv, drv, priv->dev,
59875eaae15SPraveen Kaligineedi "failed to register queue page list %d\n",
59975eaae15SPraveen Kaligineedi priv->qpls[i].id);
60075eaae15SPraveen Kaligineedi /* This failure will trigger a reset - no need to clean
60175eaae15SPraveen Kaligineedi * up
60275eaae15SPraveen Kaligineedi */
60375eaae15SPraveen Kaligineedi return err;
60475eaae15SPraveen Kaligineedi }
60575eaae15SPraveen Kaligineedi }
60675eaae15SPraveen Kaligineedi return 0;
60775eaae15SPraveen Kaligineedi }
60875eaae15SPraveen Kaligineedi
gve_register_qpls(struct gve_priv * priv)609f5cedc84SCatherine Sullivan static int gve_register_qpls(struct gve_priv *priv)
610f5cedc84SCatherine Sullivan {
6117fc2bf78SPraveen Kaligineedi int start_id;
612f5cedc84SCatherine Sullivan int err;
613f5cedc84SCatherine Sullivan int i;
614f5cedc84SCatherine Sullivan
6157fc2bf78SPraveen Kaligineedi start_id = gve_tx_start_qpl_id(priv);
6167fc2bf78SPraveen Kaligineedi for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
6177fc2bf78SPraveen Kaligineedi err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
6187fc2bf78SPraveen Kaligineedi if (err) {
6197fc2bf78SPraveen Kaligineedi netif_err(priv, drv, priv->dev,
6207fc2bf78SPraveen Kaligineedi "failed to register queue page list %d\n",
6217fc2bf78SPraveen Kaligineedi priv->qpls[i].id);
6227fc2bf78SPraveen Kaligineedi /* This failure will trigger a reset - no need to clean
6237fc2bf78SPraveen Kaligineedi * up
6247fc2bf78SPraveen Kaligineedi */
6257fc2bf78SPraveen Kaligineedi return err;
6267fc2bf78SPraveen Kaligineedi }
6277fc2bf78SPraveen Kaligineedi }
6287fc2bf78SPraveen Kaligineedi
6297fc2bf78SPraveen Kaligineedi start_id = gve_rx_start_qpl_id(priv);
6307fc2bf78SPraveen Kaligineedi for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
631f5cedc84SCatherine Sullivan err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
632f5cedc84SCatherine Sullivan if (err) {
633f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev,
634f5cedc84SCatherine Sullivan "failed to register queue page list %d\n",
635f5cedc84SCatherine Sullivan priv->qpls[i].id);
6369e5f7d26SCatherine Sullivan /* This failure will trigger a reset - no need to clean
6379e5f7d26SCatherine Sullivan * up
6389e5f7d26SCatherine Sullivan */
639f5cedc84SCatherine Sullivan return err;
640f5cedc84SCatherine Sullivan }
641f5cedc84SCatherine Sullivan }
642f5cedc84SCatherine Sullivan return 0;
643f5cedc84SCatherine Sullivan }
644f5cedc84SCatherine Sullivan
gve_unregister_xdp_qpls(struct gve_priv * priv)64575eaae15SPraveen Kaligineedi static int gve_unregister_xdp_qpls(struct gve_priv *priv)
64675eaae15SPraveen Kaligineedi {
64775eaae15SPraveen Kaligineedi int start_id;
64875eaae15SPraveen Kaligineedi int err;
64975eaae15SPraveen Kaligineedi int i;
65075eaae15SPraveen Kaligineedi
65175eaae15SPraveen Kaligineedi start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
65275eaae15SPraveen Kaligineedi for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
65375eaae15SPraveen Kaligineedi err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
65475eaae15SPraveen Kaligineedi /* This failure will trigger a reset - no need to clean up */
65575eaae15SPraveen Kaligineedi if (err) {
65675eaae15SPraveen Kaligineedi netif_err(priv, drv, priv->dev,
65775eaae15SPraveen Kaligineedi "Failed to unregister queue page list %d\n",
65875eaae15SPraveen Kaligineedi priv->qpls[i].id);
65975eaae15SPraveen Kaligineedi return err;
66075eaae15SPraveen Kaligineedi }
66175eaae15SPraveen Kaligineedi }
66275eaae15SPraveen Kaligineedi return 0;
66375eaae15SPraveen Kaligineedi }
66475eaae15SPraveen Kaligineedi
gve_unregister_qpls(struct gve_priv * priv)665f5cedc84SCatherine Sullivan static int gve_unregister_qpls(struct gve_priv *priv)
666f5cedc84SCatherine Sullivan {
6677fc2bf78SPraveen Kaligineedi int start_id;
668f5cedc84SCatherine Sullivan int err;
669f5cedc84SCatherine Sullivan int i;
670f5cedc84SCatherine Sullivan
6717fc2bf78SPraveen Kaligineedi start_id = gve_tx_start_qpl_id(priv);
6727fc2bf78SPraveen Kaligineedi for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
6737fc2bf78SPraveen Kaligineedi err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
6747fc2bf78SPraveen Kaligineedi /* This failure will trigger a reset - no need to clean up */
6757fc2bf78SPraveen Kaligineedi if (err) {
6767fc2bf78SPraveen Kaligineedi netif_err(priv, drv, priv->dev,
6777fc2bf78SPraveen Kaligineedi "Failed to unregister queue page list %d\n",
6787fc2bf78SPraveen Kaligineedi priv->qpls[i].id);
6797fc2bf78SPraveen Kaligineedi return err;
6807fc2bf78SPraveen Kaligineedi }
6817fc2bf78SPraveen Kaligineedi }
6827fc2bf78SPraveen Kaligineedi
6837fc2bf78SPraveen Kaligineedi start_id = gve_rx_start_qpl_id(priv);
6847fc2bf78SPraveen Kaligineedi for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
685f5cedc84SCatherine Sullivan err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
6869e5f7d26SCatherine Sullivan /* This failure will trigger a reset - no need to clean up */
687f5cedc84SCatherine Sullivan if (err) {
688f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev,
689f5cedc84SCatherine Sullivan "Failed to unregister queue page list %d\n",
690f5cedc84SCatherine Sullivan priv->qpls[i].id);
691f5cedc84SCatherine Sullivan return err;
692f5cedc84SCatherine Sullivan }
693f5cedc84SCatherine Sullivan }
694f5cedc84SCatherine Sullivan return 0;
695f5cedc84SCatherine Sullivan }
696f5cedc84SCatherine Sullivan
gve_create_xdp_rings(struct gve_priv * priv)69775eaae15SPraveen Kaligineedi static int gve_create_xdp_rings(struct gve_priv *priv)
69875eaae15SPraveen Kaligineedi {
69975eaae15SPraveen Kaligineedi int err;
70075eaae15SPraveen Kaligineedi
70175eaae15SPraveen Kaligineedi err = gve_adminq_create_tx_queues(priv,
70275eaae15SPraveen Kaligineedi gve_xdp_tx_start_queue_id(priv),
70375eaae15SPraveen Kaligineedi priv->num_xdp_queues);
70475eaae15SPraveen Kaligineedi if (err) {
70575eaae15SPraveen Kaligineedi netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
70675eaae15SPraveen Kaligineedi priv->num_xdp_queues);
70775eaae15SPraveen Kaligineedi /* This failure will trigger a reset - no need to clean
70875eaae15SPraveen Kaligineedi * up
70975eaae15SPraveen Kaligineedi */
71075eaae15SPraveen Kaligineedi return err;
71175eaae15SPraveen Kaligineedi }
71275eaae15SPraveen Kaligineedi netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
71375eaae15SPraveen Kaligineedi priv->num_xdp_queues);
71475eaae15SPraveen Kaligineedi
71575eaae15SPraveen Kaligineedi return 0;
71675eaae15SPraveen Kaligineedi }
71775eaae15SPraveen Kaligineedi
gve_create_rings(struct gve_priv * priv)718f5cedc84SCatherine Sullivan static int gve_create_rings(struct gve_priv *priv)
719f5cedc84SCatherine Sullivan {
7202e80aeaeSPraveen Kaligineedi int num_tx_queues = gve_num_tx_queues(priv);
721f5cedc84SCatherine Sullivan int err;
722f5cedc84SCatherine Sullivan int i;
723f5cedc84SCatherine Sullivan
7247fc2bf78SPraveen Kaligineedi err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues);
725f5cedc84SCatherine Sullivan if (err) {
7265cdad90dSSagi Shahar netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
7272e80aeaeSPraveen Kaligineedi num_tx_queues);
7289e5f7d26SCatherine Sullivan /* This failure will trigger a reset - no need to clean
7299e5f7d26SCatherine Sullivan * up
7309e5f7d26SCatherine Sullivan */
731f5cedc84SCatherine Sullivan return err;
732f5cedc84SCatherine Sullivan }
7335cdad90dSSagi Shahar netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
7342e80aeaeSPraveen Kaligineedi num_tx_queues);
7355cdad90dSSagi Shahar
7365cdad90dSSagi Shahar err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
737f5cedc84SCatherine Sullivan if (err) {
7385cdad90dSSagi Shahar netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
7395cdad90dSSagi Shahar priv->rx_cfg.num_queues);
7409e5f7d26SCatherine Sullivan /* This failure will trigger a reset - no need to clean
7419e5f7d26SCatherine Sullivan * up
7429e5f7d26SCatherine Sullivan */
743f5cedc84SCatherine Sullivan return err;
744f5cedc84SCatherine Sullivan }
7455cdad90dSSagi Shahar netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
7465cdad90dSSagi Shahar priv->rx_cfg.num_queues);
7475cdad90dSSagi Shahar
7489c1a59a2SBailey Forrest if (gve_is_gqi(priv)) {
7495cdad90dSSagi Shahar /* Rx data ring has been prefilled with packet buffers at queue
7505cdad90dSSagi Shahar * allocation time.
7519c1a59a2SBailey Forrest *
7529c1a59a2SBailey Forrest * Write the doorbell to provide descriptor slots and packet
7539c1a59a2SBailey Forrest * buffers to the NIC.
754f5cedc84SCatherine Sullivan */
7555cdad90dSSagi Shahar for (i = 0; i < priv->rx_cfg.num_queues; i++)
756f5cedc84SCatherine Sullivan gve_rx_write_doorbell(priv, &priv->rx[i]);
7579c1a59a2SBailey Forrest } else {
7589c1a59a2SBailey Forrest for (i = 0; i < priv->rx_cfg.num_queues; i++) {
7599c1a59a2SBailey Forrest /* Post buffers and ring doorbell. */
7609c1a59a2SBailey Forrest gve_rx_post_buffers_dqo(&priv->rx[i]);
7619c1a59a2SBailey Forrest }
7629c1a59a2SBailey Forrest }
763f5cedc84SCatherine Sullivan
764f5cedc84SCatherine Sullivan return 0;
765f5cedc84SCatherine Sullivan }
766f5cedc84SCatherine Sullivan
add_napi_init_xdp_sync_stats(struct gve_priv * priv,int (* napi_poll)(struct napi_struct * napi,int budget))76775eaae15SPraveen Kaligineedi static void add_napi_init_xdp_sync_stats(struct gve_priv *priv,
76875eaae15SPraveen Kaligineedi int (*napi_poll)(struct napi_struct *napi,
76975eaae15SPraveen Kaligineedi int budget))
77075eaae15SPraveen Kaligineedi {
77175eaae15SPraveen Kaligineedi int start_id = gve_xdp_tx_start_queue_id(priv);
77275eaae15SPraveen Kaligineedi int i;
77375eaae15SPraveen Kaligineedi
77475eaae15SPraveen Kaligineedi /* Add xdp tx napi & init sync stats*/
77575eaae15SPraveen Kaligineedi for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
77675eaae15SPraveen Kaligineedi int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
77775eaae15SPraveen Kaligineedi
77875eaae15SPraveen Kaligineedi u64_stats_init(&priv->tx[i].statss);
77975eaae15SPraveen Kaligineedi priv->tx[i].ntfy_id = ntfy_idx;
78075eaae15SPraveen Kaligineedi gve_add_napi(priv, ntfy_idx, napi_poll);
78175eaae15SPraveen Kaligineedi }
78275eaae15SPraveen Kaligineedi }
78375eaae15SPraveen Kaligineedi
add_napi_init_sync_stats(struct gve_priv * priv,int (* napi_poll)(struct napi_struct * napi,int budget))7845e8c5adfSBailey Forrest static void add_napi_init_sync_stats(struct gve_priv *priv,
7855e8c5adfSBailey Forrest int (*napi_poll)(struct napi_struct *napi,
7865e8c5adfSBailey Forrest int budget))
7875e8c5adfSBailey Forrest {
7885e8c5adfSBailey Forrest int i;
7895e8c5adfSBailey Forrest
7905e8c5adfSBailey Forrest /* Add tx napi & init sync stats*/
7912e80aeaeSPraveen Kaligineedi for (i = 0; i < gve_num_tx_queues(priv); i++) {
7925e8c5adfSBailey Forrest int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
7935e8c5adfSBailey Forrest
7945e8c5adfSBailey Forrest u64_stats_init(&priv->tx[i].statss);
7955e8c5adfSBailey Forrest priv->tx[i].ntfy_id = ntfy_idx;
7965e8c5adfSBailey Forrest gve_add_napi(priv, ntfy_idx, napi_poll);
7975e8c5adfSBailey Forrest }
7985e8c5adfSBailey Forrest /* Add rx napi & init sync stats*/
7995e8c5adfSBailey Forrest for (i = 0; i < priv->rx_cfg.num_queues; i++) {
8005e8c5adfSBailey Forrest int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
8015e8c5adfSBailey Forrest
8025e8c5adfSBailey Forrest u64_stats_init(&priv->rx[i].statss);
8035e8c5adfSBailey Forrest priv->rx[i].ntfy_id = ntfy_idx;
8045e8c5adfSBailey Forrest gve_add_napi(priv, ntfy_idx, napi_poll);
8055e8c5adfSBailey Forrest }
8065e8c5adfSBailey Forrest }
8075e8c5adfSBailey Forrest
gve_tx_free_rings(struct gve_priv * priv,int start_id,int num_rings)8087fc2bf78SPraveen Kaligineedi static void gve_tx_free_rings(struct gve_priv *priv, int start_id, int num_rings)
8099c1a59a2SBailey Forrest {
8109c1a59a2SBailey Forrest if (gve_is_gqi(priv)) {
8117fc2bf78SPraveen Kaligineedi gve_tx_free_rings_gqi(priv, start_id, num_rings);
8129c1a59a2SBailey Forrest } else {
8139c1a59a2SBailey Forrest gve_tx_free_rings_dqo(priv);
8149c1a59a2SBailey Forrest }
8159c1a59a2SBailey Forrest }
8169c1a59a2SBailey Forrest
gve_alloc_xdp_rings(struct gve_priv * priv)81775eaae15SPraveen Kaligineedi static int gve_alloc_xdp_rings(struct gve_priv *priv)
81875eaae15SPraveen Kaligineedi {
81975eaae15SPraveen Kaligineedi int start_id;
82075eaae15SPraveen Kaligineedi int err = 0;
82175eaae15SPraveen Kaligineedi
82275eaae15SPraveen Kaligineedi if (!priv->num_xdp_queues)
82375eaae15SPraveen Kaligineedi return 0;
82475eaae15SPraveen Kaligineedi
82575eaae15SPraveen Kaligineedi start_id = gve_xdp_tx_start_queue_id(priv);
82675eaae15SPraveen Kaligineedi err = gve_tx_alloc_rings(priv, start_id, priv->num_xdp_queues);
82775eaae15SPraveen Kaligineedi if (err)
82875eaae15SPraveen Kaligineedi return err;
82975eaae15SPraveen Kaligineedi add_napi_init_xdp_sync_stats(priv, gve_napi_poll);
83075eaae15SPraveen Kaligineedi
83175eaae15SPraveen Kaligineedi return 0;
83275eaae15SPraveen Kaligineedi }
83375eaae15SPraveen Kaligineedi
gve_alloc_rings(struct gve_priv * priv)834f5cedc84SCatherine Sullivan static int gve_alloc_rings(struct gve_priv *priv)
835f5cedc84SCatherine Sullivan {
836f5cedc84SCatherine Sullivan int err;
837f5cedc84SCatherine Sullivan
838f5cedc84SCatherine Sullivan /* Setup tx rings */
8397fc2bf78SPraveen Kaligineedi priv->tx = kvcalloc(priv->tx_cfg.max_queues, sizeof(*priv->tx),
840f5cedc84SCatherine Sullivan GFP_KERNEL);
841f5cedc84SCatherine Sullivan if (!priv->tx)
842f5cedc84SCatherine Sullivan return -ENOMEM;
8439c1a59a2SBailey Forrest
8449c1a59a2SBailey Forrest if (gve_is_gqi(priv))
8457fc2bf78SPraveen Kaligineedi err = gve_tx_alloc_rings(priv, 0, gve_num_tx_queues(priv));
8469c1a59a2SBailey Forrest else
8479c1a59a2SBailey Forrest err = gve_tx_alloc_rings_dqo(priv);
848f5cedc84SCatherine Sullivan if (err)
849f5cedc84SCatherine Sullivan goto free_tx;
8509c1a59a2SBailey Forrest
851f5cedc84SCatherine Sullivan /* Setup rx rings */
8527fc2bf78SPraveen Kaligineedi priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx),
853f5cedc84SCatherine Sullivan GFP_KERNEL);
854f5cedc84SCatherine Sullivan if (!priv->rx) {
855f5cedc84SCatherine Sullivan err = -ENOMEM;
856f5cedc84SCatherine Sullivan goto free_tx_queue;
857f5cedc84SCatherine Sullivan }
8589c1a59a2SBailey Forrest
8599c1a59a2SBailey Forrest if (gve_is_gqi(priv))
860f5cedc84SCatherine Sullivan err = gve_rx_alloc_rings(priv);
8619c1a59a2SBailey Forrest else
8629c1a59a2SBailey Forrest err = gve_rx_alloc_rings_dqo(priv);
863f5cedc84SCatherine Sullivan if (err)
864f5cedc84SCatherine Sullivan goto free_rx;
8655e8c5adfSBailey Forrest
8665e8c5adfSBailey Forrest if (gve_is_gqi(priv))
8675e8c5adfSBailey Forrest add_napi_init_sync_stats(priv, gve_napi_poll);
8685e8c5adfSBailey Forrest else
8695e8c5adfSBailey Forrest add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
870f5cedc84SCatherine Sullivan
871f5cedc84SCatherine Sullivan return 0;
872f5cedc84SCatherine Sullivan
873f5cedc84SCatherine Sullivan free_rx:
8748ec1e900SChuhong Yuan kvfree(priv->rx);
875f5cedc84SCatherine Sullivan priv->rx = NULL;
876f5cedc84SCatherine Sullivan free_tx_queue:
8777fc2bf78SPraveen Kaligineedi gve_tx_free_rings(priv, 0, gve_num_tx_queues(priv));
878f5cedc84SCatherine Sullivan free_tx:
8798ec1e900SChuhong Yuan kvfree(priv->tx);
880f5cedc84SCatherine Sullivan priv->tx = NULL;
881f5cedc84SCatherine Sullivan return err;
882f5cedc84SCatherine Sullivan }
883f5cedc84SCatherine Sullivan
gve_destroy_xdp_rings(struct gve_priv * priv)88475eaae15SPraveen Kaligineedi static int gve_destroy_xdp_rings(struct gve_priv *priv)
88575eaae15SPraveen Kaligineedi {
88675eaae15SPraveen Kaligineedi int start_id;
88775eaae15SPraveen Kaligineedi int err;
88875eaae15SPraveen Kaligineedi
88975eaae15SPraveen Kaligineedi start_id = gve_xdp_tx_start_queue_id(priv);
89075eaae15SPraveen Kaligineedi err = gve_adminq_destroy_tx_queues(priv,
89175eaae15SPraveen Kaligineedi start_id,
89275eaae15SPraveen Kaligineedi priv->num_xdp_queues);
89375eaae15SPraveen Kaligineedi if (err) {
89475eaae15SPraveen Kaligineedi netif_err(priv, drv, priv->dev,
89575eaae15SPraveen Kaligineedi "failed to destroy XDP queues\n");
89675eaae15SPraveen Kaligineedi /* This failure will trigger a reset - no need to clean up */
89775eaae15SPraveen Kaligineedi return err;
89875eaae15SPraveen Kaligineedi }
89975eaae15SPraveen Kaligineedi netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
90075eaae15SPraveen Kaligineedi
90175eaae15SPraveen Kaligineedi return 0;
90275eaae15SPraveen Kaligineedi }
90375eaae15SPraveen Kaligineedi
gve_destroy_rings(struct gve_priv * priv)904f5cedc84SCatherine Sullivan static int gve_destroy_rings(struct gve_priv *priv)
905f5cedc84SCatherine Sullivan {
9062e80aeaeSPraveen Kaligineedi int num_tx_queues = gve_num_tx_queues(priv);
907f5cedc84SCatherine Sullivan int err;
908f5cedc84SCatherine Sullivan
9097fc2bf78SPraveen Kaligineedi err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues);
910f5cedc84SCatherine Sullivan if (err) {
911f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev,
9125cdad90dSSagi Shahar "failed to destroy tx queues\n");
9135cdad90dSSagi Shahar /* This failure will trigger a reset - no need to clean up */
914f5cedc84SCatherine Sullivan return err;
915f5cedc84SCatherine Sullivan }
9165cdad90dSSagi Shahar netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
9175cdad90dSSagi Shahar err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
918f5cedc84SCatherine Sullivan if (err) {
919f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev,
9205cdad90dSSagi Shahar "failed to destroy rx queues\n");
9215cdad90dSSagi Shahar /* This failure will trigger a reset - no need to clean up */
922f5cedc84SCatherine Sullivan return err;
923f5cedc84SCatherine Sullivan }
9245cdad90dSSagi Shahar netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
925f5cedc84SCatherine Sullivan return 0;
926f5cedc84SCatherine Sullivan }
927f5cedc84SCatherine Sullivan
gve_rx_free_rings(struct gve_priv * priv)928e8192476SBailey Forrest static void gve_rx_free_rings(struct gve_priv *priv)
9299c1a59a2SBailey Forrest {
9309c1a59a2SBailey Forrest if (gve_is_gqi(priv))
9319c1a59a2SBailey Forrest gve_rx_free_rings_gqi(priv);
9329c1a59a2SBailey Forrest else
9339c1a59a2SBailey Forrest gve_rx_free_rings_dqo(priv);
9349c1a59a2SBailey Forrest }
9359c1a59a2SBailey Forrest
gve_free_xdp_rings(struct gve_priv * priv)93675eaae15SPraveen Kaligineedi static void gve_free_xdp_rings(struct gve_priv *priv)
93775eaae15SPraveen Kaligineedi {
93875eaae15SPraveen Kaligineedi int ntfy_idx, start_id;
93975eaae15SPraveen Kaligineedi int i;
94075eaae15SPraveen Kaligineedi
94175eaae15SPraveen Kaligineedi start_id = gve_xdp_tx_start_queue_id(priv);
94275eaae15SPraveen Kaligineedi if (priv->tx) {
94375eaae15SPraveen Kaligineedi for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
94475eaae15SPraveen Kaligineedi ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
94575eaae15SPraveen Kaligineedi gve_remove_napi(priv, ntfy_idx);
94675eaae15SPraveen Kaligineedi }
94775eaae15SPraveen Kaligineedi gve_tx_free_rings(priv, start_id, priv->num_xdp_queues);
94875eaae15SPraveen Kaligineedi }
94975eaae15SPraveen Kaligineedi }
95075eaae15SPraveen Kaligineedi
gve_free_rings(struct gve_priv * priv)951f5cedc84SCatherine Sullivan static void gve_free_rings(struct gve_priv *priv)
952f5cedc84SCatherine Sullivan {
9532e80aeaeSPraveen Kaligineedi int num_tx_queues = gve_num_tx_queues(priv);
954f5cedc84SCatherine Sullivan int ntfy_idx;
955f5cedc84SCatherine Sullivan int i;
956f5cedc84SCatherine Sullivan
957f5cedc84SCatherine Sullivan if (priv->tx) {
9582e80aeaeSPraveen Kaligineedi for (i = 0; i < num_tx_queues; i++) {
959f5cedc84SCatherine Sullivan ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
960f5cedc84SCatherine Sullivan gve_remove_napi(priv, ntfy_idx);
961f5cedc84SCatherine Sullivan }
9627fc2bf78SPraveen Kaligineedi gve_tx_free_rings(priv, 0, num_tx_queues);
9638ec1e900SChuhong Yuan kvfree(priv->tx);
964f5cedc84SCatherine Sullivan priv->tx = NULL;
965f5cedc84SCatherine Sullivan }
966f5cedc84SCatherine Sullivan if (priv->rx) {
967f5cedc84SCatherine Sullivan for (i = 0; i < priv->rx_cfg.num_queues; i++) {
968f5cedc84SCatherine Sullivan ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
969f5cedc84SCatherine Sullivan gve_remove_napi(priv, ntfy_idx);
970f5cedc84SCatherine Sullivan }
971f5cedc84SCatherine Sullivan gve_rx_free_rings(priv);
9728ec1e900SChuhong Yuan kvfree(priv->rx);
973f5cedc84SCatherine Sullivan priv->rx = NULL;
974f5cedc84SCatherine Sullivan }
975f5cedc84SCatherine Sullivan }
976f5cedc84SCatherine Sullivan
gve_alloc_page(struct gve_priv * priv,struct device * dev,struct page ** page,dma_addr_t * dma,enum dma_data_direction dir,gfp_t gfp_flags)977433e274bSKuo Zhao int gve_alloc_page(struct gve_priv *priv, struct device *dev,
978433e274bSKuo Zhao struct page **page, dma_addr_t *dma,
979a92f7a6fSCatherine Sullivan enum dma_data_direction dir, gfp_t gfp_flags)
980f5cedc84SCatherine Sullivan {
981a92f7a6fSCatherine Sullivan *page = alloc_page(gfp_flags);
982433e274bSKuo Zhao if (!*page) {
983433e274bSKuo Zhao priv->page_alloc_fail++;
984f5cedc84SCatherine Sullivan return -ENOMEM;
985433e274bSKuo Zhao }
986f5cedc84SCatherine Sullivan *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
987f5cedc84SCatherine Sullivan if (dma_mapping_error(dev, *dma)) {
988433e274bSKuo Zhao priv->dma_mapping_error++;
989f5cedc84SCatherine Sullivan put_page(*page);
990f5cedc84SCatherine Sullivan return -ENOMEM;
991f5cedc84SCatherine Sullivan }
992f5cedc84SCatherine Sullivan return 0;
993f5cedc84SCatherine Sullivan }
994f5cedc84SCatherine Sullivan
gve_alloc_queue_page_list(struct gve_priv * priv,u32 id,int pages)995f5cedc84SCatherine Sullivan static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
996f5cedc84SCatherine Sullivan int pages)
997f5cedc84SCatherine Sullivan {
998f5cedc84SCatherine Sullivan struct gve_queue_page_list *qpl = &priv->qpls[id];
999f5cedc84SCatherine Sullivan int err;
1000f5cedc84SCatherine Sullivan int i;
1001f5cedc84SCatherine Sullivan
1002f5cedc84SCatherine Sullivan if (pages + priv->num_registered_pages > priv->max_registered_pages) {
1003f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev,
1004f5cedc84SCatherine Sullivan "Reached max number of registered pages %llu > %llu\n",
1005f5cedc84SCatherine Sullivan pages + priv->num_registered_pages,
1006f5cedc84SCatherine Sullivan priv->max_registered_pages);
1007f5cedc84SCatherine Sullivan return -EINVAL;
1008f5cedc84SCatherine Sullivan }
1009f5cedc84SCatherine Sullivan
1010f5cedc84SCatherine Sullivan qpl->id = id;
1011a95069ecSJeroen de Borst qpl->num_entries = 0;
10127fec4d39SGustavo A. R. Silva qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
1013f5cedc84SCatherine Sullivan /* caller handles clean up */
1014f5cedc84SCatherine Sullivan if (!qpl->pages)
1015f5cedc84SCatherine Sullivan return -ENOMEM;
10167fec4d39SGustavo A. R. Silva qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
1017f5cedc84SCatherine Sullivan /* caller handles clean up */
1018f5cedc84SCatherine Sullivan if (!qpl->page_buses)
1019f5cedc84SCatherine Sullivan return -ENOMEM;
1020f5cedc84SCatherine Sullivan
1021f5cedc84SCatherine Sullivan for (i = 0; i < pages; i++) {
1022433e274bSKuo Zhao err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
1023f5cedc84SCatherine Sullivan &qpl->page_buses[i],
1024a92f7a6fSCatherine Sullivan gve_qpl_dma_dir(priv, id), GFP_KERNEL);
1025f5cedc84SCatherine Sullivan /* caller handles clean up */
1026f5cedc84SCatherine Sullivan if (err)
1027f5cedc84SCatherine Sullivan return -ENOMEM;
1028a95069ecSJeroen de Borst qpl->num_entries++;
1029f5cedc84SCatherine Sullivan }
1030f5cedc84SCatherine Sullivan priv->num_registered_pages += pages;
1031f5cedc84SCatherine Sullivan
1032f5cedc84SCatherine Sullivan return 0;
1033f5cedc84SCatherine Sullivan }
1034f5cedc84SCatherine Sullivan
gve_free_page(struct device * dev,struct page * page,dma_addr_t dma,enum dma_data_direction dir)1035f5cedc84SCatherine Sullivan void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1036f5cedc84SCatherine Sullivan enum dma_data_direction dir)
1037f5cedc84SCatherine Sullivan {
1038f5cedc84SCatherine Sullivan if (!dma_mapping_error(dev, dma))
1039f5cedc84SCatherine Sullivan dma_unmap_page(dev, dma, PAGE_SIZE, dir);
1040f5cedc84SCatherine Sullivan if (page)
1041f5cedc84SCatherine Sullivan put_page(page);
1042f5cedc84SCatherine Sullivan }
1043f5cedc84SCatherine Sullivan
gve_free_queue_page_list(struct gve_priv * priv,u32 id)104413e7939cSCatherine Sullivan static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
1045f5cedc84SCatherine Sullivan {
1046f5cedc84SCatherine Sullivan struct gve_queue_page_list *qpl = &priv->qpls[id];
1047f5cedc84SCatherine Sullivan int i;
1048f5cedc84SCatherine Sullivan
1049f5cedc84SCatherine Sullivan if (!qpl->pages)
1050f5cedc84SCatherine Sullivan return;
1051f5cedc84SCatherine Sullivan if (!qpl->page_buses)
1052f5cedc84SCatherine Sullivan goto free_pages;
1053f5cedc84SCatherine Sullivan
1054f5cedc84SCatherine Sullivan for (i = 0; i < qpl->num_entries; i++)
1055f5cedc84SCatherine Sullivan gve_free_page(&priv->pdev->dev, qpl->pages[i],
1056f5cedc84SCatherine Sullivan qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
1057f5cedc84SCatherine Sullivan
10588ec1e900SChuhong Yuan kvfree(qpl->page_buses);
10597fc2bf78SPraveen Kaligineedi qpl->page_buses = NULL;
1060f5cedc84SCatherine Sullivan free_pages:
10618ec1e900SChuhong Yuan kvfree(qpl->pages);
10627fc2bf78SPraveen Kaligineedi qpl->pages = NULL;
1063f5cedc84SCatherine Sullivan priv->num_registered_pages -= qpl->num_entries;
1064f5cedc84SCatherine Sullivan }
1065f5cedc84SCatherine Sullivan
gve_alloc_xdp_qpls(struct gve_priv * priv)106675eaae15SPraveen Kaligineedi static int gve_alloc_xdp_qpls(struct gve_priv *priv)
106775eaae15SPraveen Kaligineedi {
106875eaae15SPraveen Kaligineedi int start_id;
106975eaae15SPraveen Kaligineedi int i, j;
107075eaae15SPraveen Kaligineedi int err;
107175eaae15SPraveen Kaligineedi
107275eaae15SPraveen Kaligineedi start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
107375eaae15SPraveen Kaligineedi for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
107475eaae15SPraveen Kaligineedi err = gve_alloc_queue_page_list(priv, i,
107575eaae15SPraveen Kaligineedi priv->tx_pages_per_qpl);
107675eaae15SPraveen Kaligineedi if (err)
107775eaae15SPraveen Kaligineedi goto free_qpls;
107875eaae15SPraveen Kaligineedi }
107975eaae15SPraveen Kaligineedi
108075eaae15SPraveen Kaligineedi return 0;
108175eaae15SPraveen Kaligineedi
108275eaae15SPraveen Kaligineedi free_qpls:
108375eaae15SPraveen Kaligineedi for (j = start_id; j <= i; j++)
108475eaae15SPraveen Kaligineedi gve_free_queue_page_list(priv, j);
108575eaae15SPraveen Kaligineedi return err;
108675eaae15SPraveen Kaligineedi }
108775eaae15SPraveen Kaligineedi
gve_alloc_qpls(struct gve_priv * priv)1088f5cedc84SCatherine Sullivan static int gve_alloc_qpls(struct gve_priv *priv)
1089f5cedc84SCatherine Sullivan {
10907fc2bf78SPraveen Kaligineedi int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
109166ce8e6bSRushil Gupta int page_count;
10927fc2bf78SPraveen Kaligineedi int start_id;
1093f5cedc84SCatherine Sullivan int i, j;
1094f5cedc84SCatherine Sullivan int err;
1095f5cedc84SCatherine Sullivan
109666ce8e6bSRushil Gupta if (!gve_is_qpl(priv))
10974944db80SCatherine Sullivan return 0;
10984944db80SCatherine Sullivan
10997fc2bf78SPraveen Kaligineedi priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL);
1100f5cedc84SCatherine Sullivan if (!priv->qpls)
1101f5cedc84SCatherine Sullivan return -ENOMEM;
1102f5cedc84SCatherine Sullivan
11037fc2bf78SPraveen Kaligineedi start_id = gve_tx_start_qpl_id(priv);
110466ce8e6bSRushil Gupta page_count = priv->tx_pages_per_qpl;
11057fc2bf78SPraveen Kaligineedi for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
1106f5cedc84SCatherine Sullivan err = gve_alloc_queue_page_list(priv, i,
110766ce8e6bSRushil Gupta page_count);
1108f5cedc84SCatherine Sullivan if (err)
1109f5cedc84SCatherine Sullivan goto free_qpls;
1110f5cedc84SCatherine Sullivan }
11117fc2bf78SPraveen Kaligineedi
11127fc2bf78SPraveen Kaligineedi start_id = gve_rx_start_qpl_id(priv);
111366ce8e6bSRushil Gupta
111466ce8e6bSRushil Gupta /* For GQI_QPL number of pages allocated have 1:1 relationship with
111566ce8e6bSRushil Gupta * number of descriptors. For DQO, number of pages required are
111666ce8e6bSRushil Gupta * more than descriptors (because of out of order completions).
111766ce8e6bSRushil Gupta */
111866ce8e6bSRushil Gupta page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ?
111966ce8e6bSRushil Gupta priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
11207fc2bf78SPraveen Kaligineedi for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
1121f5cedc84SCatherine Sullivan err = gve_alloc_queue_page_list(priv, i,
112266ce8e6bSRushil Gupta page_count);
1123f5cedc84SCatherine Sullivan if (err)
1124f5cedc84SCatherine Sullivan goto free_qpls;
1125f5cedc84SCatherine Sullivan }
1126f5cedc84SCatherine Sullivan
11277fc2bf78SPraveen Kaligineedi priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(max_queues) *
1128f5cedc84SCatherine Sullivan sizeof(unsigned long) * BITS_PER_BYTE;
11297fc2bf78SPraveen Kaligineedi priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
1130f5cedc84SCatherine Sullivan sizeof(unsigned long), GFP_KERNEL);
1131877cb240SWei Yongjun if (!priv->qpl_cfg.qpl_id_map) {
1132877cb240SWei Yongjun err = -ENOMEM;
1133f5cedc84SCatherine Sullivan goto free_qpls;
1134877cb240SWei Yongjun }
1135f5cedc84SCatherine Sullivan
1136f5cedc84SCatherine Sullivan return 0;
1137f5cedc84SCatherine Sullivan
1138f5cedc84SCatherine Sullivan free_qpls:
1139f5cedc84SCatherine Sullivan for (j = 0; j <= i; j++)
1140f5cedc84SCatherine Sullivan gve_free_queue_page_list(priv, j);
11418ec1e900SChuhong Yuan kvfree(priv->qpls);
11427fc2bf78SPraveen Kaligineedi priv->qpls = NULL;
1143f5cedc84SCatherine Sullivan return err;
1144f5cedc84SCatherine Sullivan }
1145f5cedc84SCatherine Sullivan
gve_free_xdp_qpls(struct gve_priv * priv)114675eaae15SPraveen Kaligineedi static void gve_free_xdp_qpls(struct gve_priv *priv)
114775eaae15SPraveen Kaligineedi {
114875eaae15SPraveen Kaligineedi int start_id;
114975eaae15SPraveen Kaligineedi int i;
115075eaae15SPraveen Kaligineedi
115175eaae15SPraveen Kaligineedi start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
115275eaae15SPraveen Kaligineedi for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++)
115375eaae15SPraveen Kaligineedi gve_free_queue_page_list(priv, i);
115475eaae15SPraveen Kaligineedi }
115575eaae15SPraveen Kaligineedi
gve_free_qpls(struct gve_priv * priv)1156f5cedc84SCatherine Sullivan static void gve_free_qpls(struct gve_priv *priv)
1157f5cedc84SCatherine Sullivan {
11587fc2bf78SPraveen Kaligineedi int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
1159f5cedc84SCatherine Sullivan int i;
1160f5cedc84SCatherine Sullivan
11617fc2bf78SPraveen Kaligineedi if (!priv->qpls)
11624944db80SCatherine Sullivan return;
11634944db80SCatherine Sullivan
11648ec1e900SChuhong Yuan kvfree(priv->qpl_cfg.qpl_id_map);
11657fc2bf78SPraveen Kaligineedi priv->qpl_cfg.qpl_id_map = NULL;
1166f5cedc84SCatherine Sullivan
11677fc2bf78SPraveen Kaligineedi for (i = 0; i < max_queues; i++)
1168f5cedc84SCatherine Sullivan gve_free_queue_page_list(priv, i);
1169f5cedc84SCatherine Sullivan
11708ec1e900SChuhong Yuan kvfree(priv->qpls);
11717fc2bf78SPraveen Kaligineedi priv->qpls = NULL;
1172f5cedc84SCatherine Sullivan }
1173f5cedc84SCatherine Sullivan
11749e5f7d26SCatherine Sullivan /* Use this to schedule a reset when the device is capable of continuing
11759e5f7d26SCatherine Sullivan * to handle other requests in its current state. If it is not, do a reset
11769e5f7d26SCatherine Sullivan * in thread instead.
11779e5f7d26SCatherine Sullivan */
gve_schedule_reset(struct gve_priv * priv)11789e5f7d26SCatherine Sullivan void gve_schedule_reset(struct gve_priv *priv)
11799e5f7d26SCatherine Sullivan {
11809e5f7d26SCatherine Sullivan gve_set_do_reset(priv);
11819e5f7d26SCatherine Sullivan queue_work(priv->gve_wq, &priv->service_task);
11829e5f7d26SCatherine Sullivan }
11839e5f7d26SCatherine Sullivan
11849e5f7d26SCatherine Sullivan static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
11859e5f7d26SCatherine Sullivan static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
1186f5cedc84SCatherine Sullivan static void gve_turndown(struct gve_priv *priv);
1187f5cedc84SCatherine Sullivan static void gve_turnup(struct gve_priv *priv);
1188f5cedc84SCatherine Sullivan
gve_reg_xdp_info(struct gve_priv * priv,struct net_device * dev)118975eaae15SPraveen Kaligineedi static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
119075eaae15SPraveen Kaligineedi {
119175eaae15SPraveen Kaligineedi struct napi_struct *napi;
119275eaae15SPraveen Kaligineedi struct gve_rx_ring *rx;
119375eaae15SPraveen Kaligineedi int err = 0;
119475eaae15SPraveen Kaligineedi int i, j;
1195fd8e4032SPraveen Kaligineedi u32 tx_qid;
119675eaae15SPraveen Kaligineedi
119775eaae15SPraveen Kaligineedi if (!priv->num_xdp_queues)
119875eaae15SPraveen Kaligineedi return 0;
119975eaae15SPraveen Kaligineedi
120075eaae15SPraveen Kaligineedi for (i = 0; i < priv->rx_cfg.num_queues; i++) {
120175eaae15SPraveen Kaligineedi rx = &priv->rx[i];
120275eaae15SPraveen Kaligineedi napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
120375eaae15SPraveen Kaligineedi
120475eaae15SPraveen Kaligineedi err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i,
120575eaae15SPraveen Kaligineedi napi->napi_id);
120675eaae15SPraveen Kaligineedi if (err)
120775eaae15SPraveen Kaligineedi goto err;
120875eaae15SPraveen Kaligineedi err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
120975eaae15SPraveen Kaligineedi MEM_TYPE_PAGE_SHARED, NULL);
121075eaae15SPraveen Kaligineedi if (err)
121175eaae15SPraveen Kaligineedi goto err;
1212fd8e4032SPraveen Kaligineedi rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
1213fd8e4032SPraveen Kaligineedi if (rx->xsk_pool) {
1214fd8e4032SPraveen Kaligineedi err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
1215fd8e4032SPraveen Kaligineedi napi->napi_id);
1216fd8e4032SPraveen Kaligineedi if (err)
1217fd8e4032SPraveen Kaligineedi goto err;
1218fd8e4032SPraveen Kaligineedi err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
1219fd8e4032SPraveen Kaligineedi MEM_TYPE_XSK_BUFF_POOL, NULL);
1220fd8e4032SPraveen Kaligineedi if (err)
1221fd8e4032SPraveen Kaligineedi goto err;
1222fd8e4032SPraveen Kaligineedi xsk_pool_set_rxq_info(rx->xsk_pool,
1223fd8e4032SPraveen Kaligineedi &rx->xsk_rxq);
1224fd8e4032SPraveen Kaligineedi }
1225fd8e4032SPraveen Kaligineedi }
1226fd8e4032SPraveen Kaligineedi
1227fd8e4032SPraveen Kaligineedi for (i = 0; i < priv->num_xdp_queues; i++) {
1228fd8e4032SPraveen Kaligineedi tx_qid = gve_xdp_tx_queue_id(priv, i);
1229fd8e4032SPraveen Kaligineedi priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
123075eaae15SPraveen Kaligineedi }
123175eaae15SPraveen Kaligineedi return 0;
123275eaae15SPraveen Kaligineedi
123375eaae15SPraveen Kaligineedi err:
123475eaae15SPraveen Kaligineedi for (j = i; j >= 0; j--) {
123575eaae15SPraveen Kaligineedi rx = &priv->rx[j];
123675eaae15SPraveen Kaligineedi if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
123775eaae15SPraveen Kaligineedi xdp_rxq_info_unreg(&rx->xdp_rxq);
1238fd8e4032SPraveen Kaligineedi if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
1239fd8e4032SPraveen Kaligineedi xdp_rxq_info_unreg(&rx->xsk_rxq);
124075eaae15SPraveen Kaligineedi }
124175eaae15SPraveen Kaligineedi return err;
124275eaae15SPraveen Kaligineedi }
124375eaae15SPraveen Kaligineedi
gve_unreg_xdp_info(struct gve_priv * priv)124475eaae15SPraveen Kaligineedi static void gve_unreg_xdp_info(struct gve_priv *priv)
124575eaae15SPraveen Kaligineedi {
1246fd8e4032SPraveen Kaligineedi int i, tx_qid;
124775eaae15SPraveen Kaligineedi
124875eaae15SPraveen Kaligineedi if (!priv->num_xdp_queues)
124975eaae15SPraveen Kaligineedi return;
125075eaae15SPraveen Kaligineedi
125175eaae15SPraveen Kaligineedi for (i = 0; i < priv->rx_cfg.num_queues; i++) {
125275eaae15SPraveen Kaligineedi struct gve_rx_ring *rx = &priv->rx[i];
125375eaae15SPraveen Kaligineedi
125475eaae15SPraveen Kaligineedi xdp_rxq_info_unreg(&rx->xdp_rxq);
1255fd8e4032SPraveen Kaligineedi if (rx->xsk_pool) {
1256fd8e4032SPraveen Kaligineedi xdp_rxq_info_unreg(&rx->xsk_rxq);
1257fd8e4032SPraveen Kaligineedi rx->xsk_pool = NULL;
1258fd8e4032SPraveen Kaligineedi }
1259fd8e4032SPraveen Kaligineedi }
1260fd8e4032SPraveen Kaligineedi
1261fd8e4032SPraveen Kaligineedi for (i = 0; i < priv->num_xdp_queues; i++) {
1262fd8e4032SPraveen Kaligineedi tx_qid = gve_xdp_tx_queue_id(priv, i);
1263fd8e4032SPraveen Kaligineedi priv->tx[tx_qid].xsk_pool = NULL;
126475eaae15SPraveen Kaligineedi }
126575eaae15SPraveen Kaligineedi }
126675eaae15SPraveen Kaligineedi
gve_drain_page_cache(struct gve_priv * priv)126739a7f4aaSPraveen Kaligineedi static void gve_drain_page_cache(struct gve_priv *priv)
126839a7f4aaSPraveen Kaligineedi {
126939a7f4aaSPraveen Kaligineedi struct page_frag_cache *nc;
127039a7f4aaSPraveen Kaligineedi int i;
127139a7f4aaSPraveen Kaligineedi
127239a7f4aaSPraveen Kaligineedi for (i = 0; i < priv->rx_cfg.num_queues; i++) {
127339a7f4aaSPraveen Kaligineedi nc = &priv->rx[i].page_cache;
127439a7f4aaSPraveen Kaligineedi if (nc->va) {
127539a7f4aaSPraveen Kaligineedi __page_frag_cache_drain(virt_to_page(nc->va),
127639a7f4aaSPraveen Kaligineedi nc->pagecnt_bias);
127739a7f4aaSPraveen Kaligineedi nc->va = NULL;
127839a7f4aaSPraveen Kaligineedi }
127939a7f4aaSPraveen Kaligineedi }
128039a7f4aaSPraveen Kaligineedi }
128139a7f4aaSPraveen Kaligineedi
gve_open(struct net_device * dev)1282f5cedc84SCatherine Sullivan static int gve_open(struct net_device *dev)
1283f5cedc84SCatherine Sullivan {
1284f5cedc84SCatherine Sullivan struct gve_priv *priv = netdev_priv(dev);
1285f5cedc84SCatherine Sullivan int err;
1286f5cedc84SCatherine Sullivan
128775eaae15SPraveen Kaligineedi if (priv->xdp_prog)
128875eaae15SPraveen Kaligineedi priv->num_xdp_queues = priv->rx_cfg.num_queues;
128975eaae15SPraveen Kaligineedi else
129075eaae15SPraveen Kaligineedi priv->num_xdp_queues = 0;
129175eaae15SPraveen Kaligineedi
1292f5cedc84SCatherine Sullivan err = gve_alloc_qpls(priv);
1293f5cedc84SCatherine Sullivan if (err)
1294f5cedc84SCatherine Sullivan return err;
12959c1a59a2SBailey Forrest
1296f5cedc84SCatherine Sullivan err = gve_alloc_rings(priv);
1297f5cedc84SCatherine Sullivan if (err)
1298f5cedc84SCatherine Sullivan goto free_qpls;
1299f5cedc84SCatherine Sullivan
1300f5cedc84SCatherine Sullivan err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
1301f5cedc84SCatherine Sullivan if (err)
1302f5cedc84SCatherine Sullivan goto free_rings;
1303f5cedc84SCatherine Sullivan err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
1304f5cedc84SCatherine Sullivan if (err)
1305f5cedc84SCatherine Sullivan goto free_rings;
1306f5cedc84SCatherine Sullivan
130775eaae15SPraveen Kaligineedi err = gve_reg_xdp_info(priv, dev);
130875eaae15SPraveen Kaligineedi if (err)
130975eaae15SPraveen Kaligineedi goto free_rings;
131075eaae15SPraveen Kaligineedi
1311f5cedc84SCatherine Sullivan err = gve_register_qpls(priv);
1312f5cedc84SCatherine Sullivan if (err)
13139e5f7d26SCatherine Sullivan goto reset;
13145e8c5adfSBailey Forrest
13155e8c5adfSBailey Forrest if (!gve_is_gqi(priv)) {
13165e8c5adfSBailey Forrest /* Hard code this for now. This may be tuned in the future for
13175e8c5adfSBailey Forrest * performance.
13185e8c5adfSBailey Forrest */
13195e8c5adfSBailey Forrest priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
13205e8c5adfSBailey Forrest }
1321f5cedc84SCatherine Sullivan err = gve_create_rings(priv);
1322f5cedc84SCatherine Sullivan if (err)
13239e5f7d26SCatherine Sullivan goto reset;
13245e8c5adfSBailey Forrest
1325f5cedc84SCatherine Sullivan gve_set_device_rings_ok(priv);
1326f5cedc84SCatherine Sullivan
132724aeb56fSKuo Zhao if (gve_get_report_stats(priv))
132824aeb56fSKuo Zhao mod_timer(&priv->stats_report_timer,
132924aeb56fSKuo Zhao round_jiffies(jiffies +
133024aeb56fSKuo Zhao msecs_to_jiffies(priv->stats_report_timer_period)));
133124aeb56fSKuo Zhao
1332f5cedc84SCatherine Sullivan gve_turnup(priv);
13333b7cc736SPatricio Noyola queue_work(priv->gve_wq, &priv->service_task);
1334433e274bSKuo Zhao priv->interface_up_cnt++;
1335f5cedc84SCatherine Sullivan return 0;
1336f5cedc84SCatherine Sullivan
1337f5cedc84SCatherine Sullivan free_rings:
1338f5cedc84SCatherine Sullivan gve_free_rings(priv);
1339f5cedc84SCatherine Sullivan free_qpls:
1340f5cedc84SCatherine Sullivan gve_free_qpls(priv);
1341f5cedc84SCatherine Sullivan return err;
13429e5f7d26SCatherine Sullivan
13439e5f7d26SCatherine Sullivan reset:
13449e5f7d26SCatherine Sullivan /* This must have been called from a reset due to the rtnl lock
13459e5f7d26SCatherine Sullivan * so just return at this point.
13469e5f7d26SCatherine Sullivan */
13479e5f7d26SCatherine Sullivan if (gve_get_reset_in_progress(priv))
13489e5f7d26SCatherine Sullivan return err;
13499e5f7d26SCatherine Sullivan /* Otherwise reset before returning */
13509e5f7d26SCatherine Sullivan gve_reset_and_teardown(priv, true);
13519e5f7d26SCatherine Sullivan /* if this fails there is nothing we can do so just ignore the return */
13529e5f7d26SCatherine Sullivan gve_reset_recovery(priv, false);
13539e5f7d26SCatherine Sullivan /* return the original error */
13549e5f7d26SCatherine Sullivan return err;
1355f5cedc84SCatherine Sullivan }
1356f5cedc84SCatherine Sullivan
gve_close(struct net_device * dev)1357f5cedc84SCatherine Sullivan static int gve_close(struct net_device *dev)
1358f5cedc84SCatherine Sullivan {
1359f5cedc84SCatherine Sullivan struct gve_priv *priv = netdev_priv(dev);
1360f5cedc84SCatherine Sullivan int err;
1361f5cedc84SCatherine Sullivan
1362f5cedc84SCatherine Sullivan netif_carrier_off(dev);
1363f5cedc84SCatherine Sullivan if (gve_get_device_rings_ok(priv)) {
1364f5cedc84SCatherine Sullivan gve_turndown(priv);
136539a7f4aaSPraveen Kaligineedi gve_drain_page_cache(priv);
1366f5cedc84SCatherine Sullivan err = gve_destroy_rings(priv);
1367f5cedc84SCatherine Sullivan if (err)
13689e5f7d26SCatherine Sullivan goto err;
1369f5cedc84SCatherine Sullivan err = gve_unregister_qpls(priv);
1370f5cedc84SCatherine Sullivan if (err)
13719e5f7d26SCatherine Sullivan goto err;
1372f5cedc84SCatherine Sullivan gve_clear_device_rings_ok(priv);
1373f5cedc84SCatherine Sullivan }
137424aeb56fSKuo Zhao del_timer_sync(&priv->stats_report_timer);
1375f5cedc84SCatherine Sullivan
137675eaae15SPraveen Kaligineedi gve_unreg_xdp_info(priv);
1377f5cedc84SCatherine Sullivan gve_free_rings(priv);
1378f5cedc84SCatherine Sullivan gve_free_qpls(priv);
1379433e274bSKuo Zhao priv->interface_down_cnt++;
1380f5cedc84SCatherine Sullivan return 0;
13819e5f7d26SCatherine Sullivan
13829e5f7d26SCatherine Sullivan err:
13839e5f7d26SCatherine Sullivan /* This must have been called from a reset due to the rtnl lock
13849e5f7d26SCatherine Sullivan * so just return at this point.
13859e5f7d26SCatherine Sullivan */
13869e5f7d26SCatherine Sullivan if (gve_get_reset_in_progress(priv))
13879e5f7d26SCatherine Sullivan return err;
13889e5f7d26SCatherine Sullivan /* Otherwise reset before returning */
13899e5f7d26SCatherine Sullivan gve_reset_and_teardown(priv, true);
13909e5f7d26SCatherine Sullivan return gve_reset_recovery(priv, false);
1391f5cedc84SCatherine Sullivan }
1392f5cedc84SCatherine Sullivan
gve_remove_xdp_queues(struct gve_priv * priv)139375eaae15SPraveen Kaligineedi static int gve_remove_xdp_queues(struct gve_priv *priv)
139475eaae15SPraveen Kaligineedi {
139575eaae15SPraveen Kaligineedi int err;
139675eaae15SPraveen Kaligineedi
139775eaae15SPraveen Kaligineedi err = gve_destroy_xdp_rings(priv);
139875eaae15SPraveen Kaligineedi if (err)
139975eaae15SPraveen Kaligineedi return err;
140075eaae15SPraveen Kaligineedi
140175eaae15SPraveen Kaligineedi err = gve_unregister_xdp_qpls(priv);
140275eaae15SPraveen Kaligineedi if (err)
140375eaae15SPraveen Kaligineedi return err;
140475eaae15SPraveen Kaligineedi
140575eaae15SPraveen Kaligineedi gve_unreg_xdp_info(priv);
140675eaae15SPraveen Kaligineedi gve_free_xdp_rings(priv);
140775eaae15SPraveen Kaligineedi gve_free_xdp_qpls(priv);
140875eaae15SPraveen Kaligineedi priv->num_xdp_queues = 0;
140975eaae15SPraveen Kaligineedi return 0;
141075eaae15SPraveen Kaligineedi }
141175eaae15SPraveen Kaligineedi
gve_add_xdp_queues(struct gve_priv * priv)141275eaae15SPraveen Kaligineedi static int gve_add_xdp_queues(struct gve_priv *priv)
141375eaae15SPraveen Kaligineedi {
141475eaae15SPraveen Kaligineedi int err;
141575eaae15SPraveen Kaligineedi
141675eaae15SPraveen Kaligineedi priv->num_xdp_queues = priv->tx_cfg.num_queues;
141775eaae15SPraveen Kaligineedi
141875eaae15SPraveen Kaligineedi err = gve_alloc_xdp_qpls(priv);
141975eaae15SPraveen Kaligineedi if (err)
142075eaae15SPraveen Kaligineedi goto err;
142175eaae15SPraveen Kaligineedi
142275eaae15SPraveen Kaligineedi err = gve_alloc_xdp_rings(priv);
142375eaae15SPraveen Kaligineedi if (err)
142475eaae15SPraveen Kaligineedi goto free_xdp_qpls;
142575eaae15SPraveen Kaligineedi
142675eaae15SPraveen Kaligineedi err = gve_reg_xdp_info(priv, priv->dev);
142775eaae15SPraveen Kaligineedi if (err)
142875eaae15SPraveen Kaligineedi goto free_xdp_rings;
142975eaae15SPraveen Kaligineedi
143075eaae15SPraveen Kaligineedi err = gve_register_xdp_qpls(priv);
143175eaae15SPraveen Kaligineedi if (err)
143275eaae15SPraveen Kaligineedi goto free_xdp_rings;
143375eaae15SPraveen Kaligineedi
143475eaae15SPraveen Kaligineedi err = gve_create_xdp_rings(priv);
143575eaae15SPraveen Kaligineedi if (err)
143675eaae15SPraveen Kaligineedi goto free_xdp_rings;
143775eaae15SPraveen Kaligineedi
143875eaae15SPraveen Kaligineedi return 0;
143975eaae15SPraveen Kaligineedi
144075eaae15SPraveen Kaligineedi free_xdp_rings:
144175eaae15SPraveen Kaligineedi gve_free_xdp_rings(priv);
144275eaae15SPraveen Kaligineedi free_xdp_qpls:
144375eaae15SPraveen Kaligineedi gve_free_xdp_qpls(priv);
144475eaae15SPraveen Kaligineedi err:
144575eaae15SPraveen Kaligineedi priv->num_xdp_queues = 0;
144675eaae15SPraveen Kaligineedi return err;
144775eaae15SPraveen Kaligineedi }
144875eaae15SPraveen Kaligineedi
gve_handle_link_status(struct gve_priv * priv,bool link_status)144975eaae15SPraveen Kaligineedi static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
145075eaae15SPraveen Kaligineedi {
145175eaae15SPraveen Kaligineedi if (!gve_get_napi_enabled(priv))
145275eaae15SPraveen Kaligineedi return;
145375eaae15SPraveen Kaligineedi
145475eaae15SPraveen Kaligineedi if (link_status == netif_carrier_ok(priv->dev))
145575eaae15SPraveen Kaligineedi return;
145675eaae15SPraveen Kaligineedi
145775eaae15SPraveen Kaligineedi if (link_status) {
145875eaae15SPraveen Kaligineedi netdev_info(priv->dev, "Device link is up.\n");
145975eaae15SPraveen Kaligineedi netif_carrier_on(priv->dev);
146075eaae15SPraveen Kaligineedi } else {
146175eaae15SPraveen Kaligineedi netdev_info(priv->dev, "Device link is down.\n");
146275eaae15SPraveen Kaligineedi netif_carrier_off(priv->dev);
146375eaae15SPraveen Kaligineedi }
146475eaae15SPraveen Kaligineedi }
146575eaae15SPraveen Kaligineedi
gve_set_xdp(struct gve_priv * priv,struct bpf_prog * prog,struct netlink_ext_ack * extack)146675eaae15SPraveen Kaligineedi static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
146775eaae15SPraveen Kaligineedi struct netlink_ext_ack *extack)
146875eaae15SPraveen Kaligineedi {
146975eaae15SPraveen Kaligineedi struct bpf_prog *old_prog;
147075eaae15SPraveen Kaligineedi int err = 0;
147175eaae15SPraveen Kaligineedi u32 status;
147275eaae15SPraveen Kaligineedi
147375eaae15SPraveen Kaligineedi old_prog = READ_ONCE(priv->xdp_prog);
147475eaae15SPraveen Kaligineedi if (!netif_carrier_ok(priv->dev)) {
147575eaae15SPraveen Kaligineedi WRITE_ONCE(priv->xdp_prog, prog);
147675eaae15SPraveen Kaligineedi if (old_prog)
147775eaae15SPraveen Kaligineedi bpf_prog_put(old_prog);
147875eaae15SPraveen Kaligineedi return 0;
147975eaae15SPraveen Kaligineedi }
148075eaae15SPraveen Kaligineedi
148175eaae15SPraveen Kaligineedi gve_turndown(priv);
148275eaae15SPraveen Kaligineedi if (!old_prog && prog) {
148375eaae15SPraveen Kaligineedi // Allocate XDP TX queues if an XDP program is
148475eaae15SPraveen Kaligineedi // being installed
148575eaae15SPraveen Kaligineedi err = gve_add_xdp_queues(priv);
148675eaae15SPraveen Kaligineedi if (err)
148775eaae15SPraveen Kaligineedi goto out;
148875eaae15SPraveen Kaligineedi } else if (old_prog && !prog) {
148975eaae15SPraveen Kaligineedi // Remove XDP TX queues if an XDP program is
149075eaae15SPraveen Kaligineedi // being uninstalled
149175eaae15SPraveen Kaligineedi err = gve_remove_xdp_queues(priv);
149275eaae15SPraveen Kaligineedi if (err)
149375eaae15SPraveen Kaligineedi goto out;
149475eaae15SPraveen Kaligineedi }
149575eaae15SPraveen Kaligineedi WRITE_ONCE(priv->xdp_prog, prog);
149675eaae15SPraveen Kaligineedi if (old_prog)
149775eaae15SPraveen Kaligineedi bpf_prog_put(old_prog);
149875eaae15SPraveen Kaligineedi
149975eaae15SPraveen Kaligineedi out:
150075eaae15SPraveen Kaligineedi gve_turnup(priv);
150175eaae15SPraveen Kaligineedi status = ioread32be(&priv->reg_bar0->device_status);
150275eaae15SPraveen Kaligineedi gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
150375eaae15SPraveen Kaligineedi return err;
150475eaae15SPraveen Kaligineedi }
150575eaae15SPraveen Kaligineedi
gve_xsk_pool_enable(struct net_device * dev,struct xsk_buff_pool * pool,u16 qid)1506fd8e4032SPraveen Kaligineedi static int gve_xsk_pool_enable(struct net_device *dev,
1507fd8e4032SPraveen Kaligineedi struct xsk_buff_pool *pool,
1508fd8e4032SPraveen Kaligineedi u16 qid)
1509fd8e4032SPraveen Kaligineedi {
1510fd8e4032SPraveen Kaligineedi struct gve_priv *priv = netdev_priv(dev);
1511fd8e4032SPraveen Kaligineedi struct napi_struct *napi;
1512fd8e4032SPraveen Kaligineedi struct gve_rx_ring *rx;
1513fd8e4032SPraveen Kaligineedi int tx_qid;
1514fd8e4032SPraveen Kaligineedi int err;
1515fd8e4032SPraveen Kaligineedi
1516fd8e4032SPraveen Kaligineedi if (qid >= priv->rx_cfg.num_queues) {
1517fd8e4032SPraveen Kaligineedi dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid);
1518fd8e4032SPraveen Kaligineedi return -EINVAL;
1519fd8e4032SPraveen Kaligineedi }
1520fd8e4032SPraveen Kaligineedi if (xsk_pool_get_rx_frame_size(pool) <
1521fd8e4032SPraveen Kaligineedi priv->dev->max_mtu + sizeof(struct ethhdr)) {
1522fd8e4032SPraveen Kaligineedi dev_err(&priv->pdev->dev, "xsk pool frame_len too small");
1523fd8e4032SPraveen Kaligineedi return -EINVAL;
1524fd8e4032SPraveen Kaligineedi }
1525fd8e4032SPraveen Kaligineedi
1526fd8e4032SPraveen Kaligineedi err = xsk_pool_dma_map(pool, &priv->pdev->dev,
1527fd8e4032SPraveen Kaligineedi DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1528fd8e4032SPraveen Kaligineedi if (err)
1529fd8e4032SPraveen Kaligineedi return err;
1530fd8e4032SPraveen Kaligineedi
1531771d66f2SJoshua Washington /* If XDP prog is not installed or interface is down, return. */
1532771d66f2SJoshua Washington if (!priv->xdp_prog || !netif_running(dev))
1533fd8e4032SPraveen Kaligineedi return 0;
1534fd8e4032SPraveen Kaligineedi
1535fd8e4032SPraveen Kaligineedi rx = &priv->rx[qid];
1536fd8e4032SPraveen Kaligineedi napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
1537fd8e4032SPraveen Kaligineedi err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
1538fd8e4032SPraveen Kaligineedi if (err)
1539fd8e4032SPraveen Kaligineedi goto err;
1540fd8e4032SPraveen Kaligineedi
1541fd8e4032SPraveen Kaligineedi err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
1542fd8e4032SPraveen Kaligineedi MEM_TYPE_XSK_BUFF_POOL, NULL);
1543fd8e4032SPraveen Kaligineedi if (err)
1544fd8e4032SPraveen Kaligineedi goto err;
1545fd8e4032SPraveen Kaligineedi
1546fd8e4032SPraveen Kaligineedi xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
1547fd8e4032SPraveen Kaligineedi rx->xsk_pool = pool;
1548fd8e4032SPraveen Kaligineedi
1549fd8e4032SPraveen Kaligineedi tx_qid = gve_xdp_tx_queue_id(priv, qid);
1550fd8e4032SPraveen Kaligineedi priv->tx[tx_qid].xsk_pool = pool;
1551fd8e4032SPraveen Kaligineedi
1552fd8e4032SPraveen Kaligineedi return 0;
1553fd8e4032SPraveen Kaligineedi err:
1554fd8e4032SPraveen Kaligineedi if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
1555fd8e4032SPraveen Kaligineedi xdp_rxq_info_unreg(&rx->xsk_rxq);
1556fd8e4032SPraveen Kaligineedi
1557fd8e4032SPraveen Kaligineedi xsk_pool_dma_unmap(pool,
1558fd8e4032SPraveen Kaligineedi DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1559fd8e4032SPraveen Kaligineedi return err;
1560fd8e4032SPraveen Kaligineedi }
1561fd8e4032SPraveen Kaligineedi
gve_xsk_pool_disable(struct net_device * dev,u16 qid)1562fd8e4032SPraveen Kaligineedi static int gve_xsk_pool_disable(struct net_device *dev,
1563fd8e4032SPraveen Kaligineedi u16 qid)
1564fd8e4032SPraveen Kaligineedi {
1565fd8e4032SPraveen Kaligineedi struct gve_priv *priv = netdev_priv(dev);
1566fd8e4032SPraveen Kaligineedi struct napi_struct *napi_rx;
1567fd8e4032SPraveen Kaligineedi struct napi_struct *napi_tx;
1568fd8e4032SPraveen Kaligineedi struct xsk_buff_pool *pool;
1569fd8e4032SPraveen Kaligineedi int tx_qid;
1570fd8e4032SPraveen Kaligineedi
1571fd8e4032SPraveen Kaligineedi pool = xsk_get_pool_from_qid(dev, qid);
1572fd8e4032SPraveen Kaligineedi if (!pool)
1573fd8e4032SPraveen Kaligineedi return -EINVAL;
1574fd8e4032SPraveen Kaligineedi if (qid >= priv->rx_cfg.num_queues)
1575fd8e4032SPraveen Kaligineedi return -EINVAL;
1576fd8e4032SPraveen Kaligineedi
1577771d66f2SJoshua Washington /* If XDP prog is not installed or interface is down, unmap DMA and
1578771d66f2SJoshua Washington * return.
1579771d66f2SJoshua Washington */
1580771d66f2SJoshua Washington if (!priv->xdp_prog || !netif_running(dev))
1581fd8e4032SPraveen Kaligineedi goto done;
1582fd8e4032SPraveen Kaligineedi
1583fd8e4032SPraveen Kaligineedi napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
1584fd8e4032SPraveen Kaligineedi napi_disable(napi_rx); /* make sure current rx poll is done */
1585fd8e4032SPraveen Kaligineedi
1586771d66f2SJoshua Washington tx_qid = gve_xdp_tx_queue_id(priv, qid);
1587fd8e4032SPraveen Kaligineedi napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
1588fd8e4032SPraveen Kaligineedi napi_disable(napi_tx); /* make sure current tx poll is done */
1589fd8e4032SPraveen Kaligineedi
1590fd8e4032SPraveen Kaligineedi priv->rx[qid].xsk_pool = NULL;
1591fd8e4032SPraveen Kaligineedi xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
1592fd8e4032SPraveen Kaligineedi priv->tx[tx_qid].xsk_pool = NULL;
1593fd8e4032SPraveen Kaligineedi smp_mb(); /* Make sure it is visible to the workers on datapath */
1594fd8e4032SPraveen Kaligineedi
1595fd8e4032SPraveen Kaligineedi napi_enable(napi_rx);
1596fd8e4032SPraveen Kaligineedi if (gve_rx_work_pending(&priv->rx[qid]))
1597fd8e4032SPraveen Kaligineedi napi_schedule(napi_rx);
1598fd8e4032SPraveen Kaligineedi
1599fd8e4032SPraveen Kaligineedi napi_enable(napi_tx);
1600fd8e4032SPraveen Kaligineedi if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
1601fd8e4032SPraveen Kaligineedi napi_schedule(napi_tx);
1602fd8e4032SPraveen Kaligineedi
1603fd8e4032SPraveen Kaligineedi done:
1604fd8e4032SPraveen Kaligineedi xsk_pool_dma_unmap(pool,
1605fd8e4032SPraveen Kaligineedi DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1606fd8e4032SPraveen Kaligineedi return 0;
1607fd8e4032SPraveen Kaligineedi }
1608fd8e4032SPraveen Kaligineedi
gve_xsk_wakeup(struct net_device * dev,u32 queue_id,u32 flags)1609fd8e4032SPraveen Kaligineedi static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
1610fd8e4032SPraveen Kaligineedi {
1611fd8e4032SPraveen Kaligineedi struct gve_priv *priv = netdev_priv(dev);
1612fd8e4032SPraveen Kaligineedi int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
1613fd8e4032SPraveen Kaligineedi
1614771d66f2SJoshua Washington if (!gve_get_napi_enabled(priv))
1615771d66f2SJoshua Washington return -ENETDOWN;
1616771d66f2SJoshua Washington
1617fd8e4032SPraveen Kaligineedi if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
1618fd8e4032SPraveen Kaligineedi return -EINVAL;
1619fd8e4032SPraveen Kaligineedi
1620fd8e4032SPraveen Kaligineedi if (flags & XDP_WAKEUP_TX) {
1621fd8e4032SPraveen Kaligineedi struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
1622fd8e4032SPraveen Kaligineedi struct napi_struct *napi =
1623fd8e4032SPraveen Kaligineedi &priv->ntfy_blocks[tx->ntfy_id].napi;
1624fd8e4032SPraveen Kaligineedi
1625fd8e4032SPraveen Kaligineedi if (!napi_if_scheduled_mark_missed(napi)) {
1626fd8e4032SPraveen Kaligineedi /* Call local_bh_enable to trigger SoftIRQ processing */
1627fd8e4032SPraveen Kaligineedi local_bh_disable();
1628fd8e4032SPraveen Kaligineedi napi_schedule(napi);
1629fd8e4032SPraveen Kaligineedi local_bh_enable();
1630fd8e4032SPraveen Kaligineedi }
1631fd8e4032SPraveen Kaligineedi
1632fd8e4032SPraveen Kaligineedi tx->xdp_xsk_wakeup++;
1633fd8e4032SPraveen Kaligineedi }
1634fd8e4032SPraveen Kaligineedi
1635fd8e4032SPraveen Kaligineedi return 0;
1636fd8e4032SPraveen Kaligineedi }
1637fd8e4032SPraveen Kaligineedi
verify_xdp_configuration(struct net_device * dev)163875eaae15SPraveen Kaligineedi static int verify_xdp_configuration(struct net_device *dev)
163975eaae15SPraveen Kaligineedi {
164075eaae15SPraveen Kaligineedi struct gve_priv *priv = netdev_priv(dev);
164175eaae15SPraveen Kaligineedi
164275eaae15SPraveen Kaligineedi if (dev->features & NETIF_F_LRO) {
164375eaae15SPraveen Kaligineedi netdev_warn(dev, "XDP is not supported when LRO is on.\n");
164475eaae15SPraveen Kaligineedi return -EOPNOTSUPP;
164575eaae15SPraveen Kaligineedi }
164675eaae15SPraveen Kaligineedi
164775eaae15SPraveen Kaligineedi if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
164875eaae15SPraveen Kaligineedi netdev_warn(dev, "XDP is not supported in mode %d.\n",
164975eaae15SPraveen Kaligineedi priv->queue_format);
165075eaae15SPraveen Kaligineedi return -EOPNOTSUPP;
165175eaae15SPraveen Kaligineedi }
165275eaae15SPraveen Kaligineedi
165375eaae15SPraveen Kaligineedi if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) {
165475eaae15SPraveen Kaligineedi netdev_warn(dev, "XDP is not supported for mtu %d.\n",
165575eaae15SPraveen Kaligineedi dev->mtu);
165675eaae15SPraveen Kaligineedi return -EOPNOTSUPP;
165775eaae15SPraveen Kaligineedi }
165875eaae15SPraveen Kaligineedi
165975eaae15SPraveen Kaligineedi if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
166075eaae15SPraveen Kaligineedi (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
166175eaae15SPraveen Kaligineedi netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d",
166275eaae15SPraveen Kaligineedi priv->rx_cfg.num_queues,
166375eaae15SPraveen Kaligineedi priv->tx_cfg.num_queues,
166475eaae15SPraveen Kaligineedi priv->tx_cfg.max_queues);
166575eaae15SPraveen Kaligineedi return -EINVAL;
166675eaae15SPraveen Kaligineedi }
166775eaae15SPraveen Kaligineedi return 0;
166875eaae15SPraveen Kaligineedi }
166975eaae15SPraveen Kaligineedi
gve_xdp(struct net_device * dev,struct netdev_bpf * xdp)167075eaae15SPraveen Kaligineedi static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
167175eaae15SPraveen Kaligineedi {
167275eaae15SPraveen Kaligineedi struct gve_priv *priv = netdev_priv(dev);
167375eaae15SPraveen Kaligineedi int err;
167475eaae15SPraveen Kaligineedi
167575eaae15SPraveen Kaligineedi err = verify_xdp_configuration(dev);
167675eaae15SPraveen Kaligineedi if (err)
167775eaae15SPraveen Kaligineedi return err;
167875eaae15SPraveen Kaligineedi switch (xdp->command) {
167975eaae15SPraveen Kaligineedi case XDP_SETUP_PROG:
168075eaae15SPraveen Kaligineedi return gve_set_xdp(priv, xdp->prog, xdp->extack);
1681fd8e4032SPraveen Kaligineedi case XDP_SETUP_XSK_POOL:
1682fd8e4032SPraveen Kaligineedi if (xdp->xsk.pool)
1683fd8e4032SPraveen Kaligineedi return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id);
1684fd8e4032SPraveen Kaligineedi else
1685fd8e4032SPraveen Kaligineedi return gve_xsk_pool_disable(dev, xdp->xsk.queue_id);
168675eaae15SPraveen Kaligineedi default:
168775eaae15SPraveen Kaligineedi return -EINVAL;
168875eaae15SPraveen Kaligineedi }
168975eaae15SPraveen Kaligineedi }
169075eaae15SPraveen Kaligineedi
gve_adjust_queues(struct gve_priv * priv,struct gve_queue_config new_rx_config,struct gve_queue_config new_tx_config)1691e5b845dcSCatherine Sullivan int gve_adjust_queues(struct gve_priv *priv,
1692e5b845dcSCatherine Sullivan struct gve_queue_config new_rx_config,
1693e5b845dcSCatherine Sullivan struct gve_queue_config new_tx_config)
1694e5b845dcSCatherine Sullivan {
1695e5b845dcSCatherine Sullivan int err;
1696e5b845dcSCatherine Sullivan
1697e5b845dcSCatherine Sullivan if (netif_carrier_ok(priv->dev)) {
1698e5b845dcSCatherine Sullivan /* To make this process as simple as possible we teardown the
1699e5b845dcSCatherine Sullivan * device, set the new configuration, and then bring the device
1700e5b845dcSCatherine Sullivan * up again.
1701e5b845dcSCatherine Sullivan */
1702e5b845dcSCatherine Sullivan err = gve_close(priv->dev);
1703e5b845dcSCatherine Sullivan /* we have already tried to reset in close,
1704e5b845dcSCatherine Sullivan * just fail at this point
1705e5b845dcSCatherine Sullivan */
1706e5b845dcSCatherine Sullivan if (err)
1707e5b845dcSCatherine Sullivan return err;
1708e5b845dcSCatherine Sullivan priv->tx_cfg = new_tx_config;
1709e5b845dcSCatherine Sullivan priv->rx_cfg = new_rx_config;
1710e5b845dcSCatherine Sullivan
1711e5b845dcSCatherine Sullivan err = gve_open(priv->dev);
1712e5b845dcSCatherine Sullivan if (err)
1713e5b845dcSCatherine Sullivan goto err;
1714e5b845dcSCatherine Sullivan
1715e5b845dcSCatherine Sullivan return 0;
1716e5b845dcSCatherine Sullivan }
1717e5b845dcSCatherine Sullivan /* Set the config for the next up. */
1718e5b845dcSCatherine Sullivan priv->tx_cfg = new_tx_config;
1719e5b845dcSCatherine Sullivan priv->rx_cfg = new_rx_config;
1720e5b845dcSCatherine Sullivan
1721e5b845dcSCatherine Sullivan return 0;
1722e5b845dcSCatherine Sullivan err:
1723e5b845dcSCatherine Sullivan netif_err(priv, drv, priv->dev,
1724e5b845dcSCatherine Sullivan "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
1725e5b845dcSCatherine Sullivan gve_turndown(priv);
1726e5b845dcSCatherine Sullivan return err;
1727e5b845dcSCatherine Sullivan }
1728e5b845dcSCatherine Sullivan
gve_turndown(struct gve_priv * priv)1729f5cedc84SCatherine Sullivan static void gve_turndown(struct gve_priv *priv)
1730f5cedc84SCatherine Sullivan {
1731f5cedc84SCatherine Sullivan int idx;
1732f5cedc84SCatherine Sullivan
1733f5cedc84SCatherine Sullivan if (netif_carrier_ok(priv->dev))
1734f5cedc84SCatherine Sullivan netif_carrier_off(priv->dev);
1735f5cedc84SCatherine Sullivan
1736f5cedc84SCatherine Sullivan if (!gve_get_napi_enabled(priv))
1737f5cedc84SCatherine Sullivan return;
1738f5cedc84SCatherine Sullivan
1739f5cedc84SCatherine Sullivan /* Disable napi to prevent more work from coming in */
17402e80aeaeSPraveen Kaligineedi for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1741f5cedc84SCatherine Sullivan int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1742f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1743f5cedc84SCatherine Sullivan
1744f5cedc84SCatherine Sullivan napi_disable(&block->napi);
1745f5cedc84SCatherine Sullivan }
1746f5cedc84SCatherine Sullivan for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1747f5cedc84SCatherine Sullivan int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1748f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1749f5cedc84SCatherine Sullivan
1750f5cedc84SCatherine Sullivan napi_disable(&block->napi);
1751f5cedc84SCatherine Sullivan }
1752f5cedc84SCatherine Sullivan
1753f5cedc84SCatherine Sullivan /* Stop tx queues */
1754f5cedc84SCatherine Sullivan netif_tx_disable(priv->dev);
1755f5cedc84SCatherine Sullivan
1756*9921e266SJoshua Washington xdp_features_clear_redirect_target(priv->dev);
1757*9921e266SJoshua Washington
1758f5cedc84SCatherine Sullivan gve_clear_napi_enabled(priv);
175924aeb56fSKuo Zhao gve_clear_report_stats(priv);
1760cbe9eb2cSJoshua Washington
1761cbe9eb2cSJoshua Washington /* Make sure that all traffic is finished processing. */
1762cbe9eb2cSJoshua Washington synchronize_net();
1763f5cedc84SCatherine Sullivan }
1764f5cedc84SCatherine Sullivan
gve_turnup(struct gve_priv * priv)1765f5cedc84SCatherine Sullivan static void gve_turnup(struct gve_priv *priv)
1766f5cedc84SCatherine Sullivan {
1767f5cedc84SCatherine Sullivan int idx;
1768f5cedc84SCatherine Sullivan
1769f5cedc84SCatherine Sullivan /* Start the tx queues */
1770f5cedc84SCatherine Sullivan netif_tx_start_all_queues(priv->dev);
1771f5cedc84SCatherine Sullivan
1772f5cedc84SCatherine Sullivan /* Enable napi and unmask interrupts for all queues */
17732e80aeaeSPraveen Kaligineedi for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1774f5cedc84SCatherine Sullivan int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1775f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1776f5cedc84SCatherine Sullivan
1777f5cedc84SCatherine Sullivan napi_enable(&block->napi);
17780dcc144aSBailey Forrest if (gve_is_gqi(priv)) {
1779f5cedc84SCatherine Sullivan iowrite32be(0, gve_irq_doorbell(priv, block));
17800dcc144aSBailey Forrest } else {
17816081ac20STao Liu gve_set_itr_coalesce_usecs_dqo(priv, block,
17826081ac20STao Liu priv->tx_coalesce_usecs);
17830dcc144aSBailey Forrest }
1784f5cedc84SCatherine Sullivan }
1785f5cedc84SCatherine Sullivan for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1786f5cedc84SCatherine Sullivan int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1787f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1788f5cedc84SCatherine Sullivan
1789f5cedc84SCatherine Sullivan napi_enable(&block->napi);
17900dcc144aSBailey Forrest if (gve_is_gqi(priv)) {
1791f5cedc84SCatherine Sullivan iowrite32be(0, gve_irq_doorbell(priv, block));
17920dcc144aSBailey Forrest } else {
17936081ac20STao Liu gve_set_itr_coalesce_usecs_dqo(priv, block,
17946081ac20STao Liu priv->rx_coalesce_usecs);
17950dcc144aSBailey Forrest }
1796f5cedc84SCatherine Sullivan }
1797f5cedc84SCatherine Sullivan
1798*9921e266SJoshua Washington if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv))
1799*9921e266SJoshua Washington xdp_features_set_redirect_target(priv->dev, false);
1800*9921e266SJoshua Washington
1801f5cedc84SCatherine Sullivan gve_set_napi_enabled(priv);
1802f5cedc84SCatherine Sullivan }
1803f5cedc84SCatherine Sullivan
gve_tx_timeout(struct net_device * dev,unsigned int txqueue)18040290bd29SMichael S. Tsirkin static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
1805f5cedc84SCatherine Sullivan {
180687a7f321SJohn Fraker struct gve_notify_block *block;
180787a7f321SJohn Fraker struct gve_tx_ring *tx = NULL;
180887a7f321SJohn Fraker struct gve_priv *priv;
180987a7f321SJohn Fraker u32 last_nic_done;
181087a7f321SJohn Fraker u32 current_time;
181187a7f321SJohn Fraker u32 ntfy_idx;
1812f5cedc84SCatherine Sullivan
181387a7f321SJohn Fraker netdev_info(dev, "Timeout on tx queue, %d", txqueue);
181487a7f321SJohn Fraker priv = netdev_priv(dev);
181587a7f321SJohn Fraker if (txqueue > priv->tx_cfg.num_queues)
181687a7f321SJohn Fraker goto reset;
181787a7f321SJohn Fraker
181887a7f321SJohn Fraker ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
18191c360cc1SDan Carpenter if (ntfy_idx >= priv->num_ntfy_blks)
182087a7f321SJohn Fraker goto reset;
182187a7f321SJohn Fraker
182287a7f321SJohn Fraker block = &priv->ntfy_blocks[ntfy_idx];
182387a7f321SJohn Fraker tx = block->tx;
182487a7f321SJohn Fraker
182587a7f321SJohn Fraker current_time = jiffies_to_msecs(jiffies);
182687a7f321SJohn Fraker if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
182787a7f321SJohn Fraker goto reset;
182887a7f321SJohn Fraker
182987a7f321SJohn Fraker /* Check to see if there are missed completions, which will allow us to
183087a7f321SJohn Fraker * kick the queue.
183187a7f321SJohn Fraker */
183287a7f321SJohn Fraker last_nic_done = gve_tx_load_event_counter(priv, tx);
183387a7f321SJohn Fraker if (last_nic_done - tx->done) {
183487a7f321SJohn Fraker netdev_info(dev, "Kicking queue %d", txqueue);
183587a7f321SJohn Fraker iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
183687a7f321SJohn Fraker napi_schedule(&block->napi);
183787a7f321SJohn Fraker tx->last_kick_msec = current_time;
183887a7f321SJohn Fraker goto out;
183987a7f321SJohn Fraker } // Else reset.
184087a7f321SJohn Fraker
184187a7f321SJohn Fraker reset:
18429e5f7d26SCatherine Sullivan gve_schedule_reset(priv);
184387a7f321SJohn Fraker
184487a7f321SJohn Fraker out:
184587a7f321SJohn Fraker if (tx)
184687a7f321SJohn Fraker tx->queue_timeout++;
1847f5cedc84SCatherine Sullivan priv->tx_timeo_cnt++;
1848f5cedc84SCatherine Sullivan }
1849f5cedc84SCatherine Sullivan
gve_set_features(struct net_device * netdev,netdev_features_t features)18505e8c5adfSBailey Forrest static int gve_set_features(struct net_device *netdev,
18515e8c5adfSBailey Forrest netdev_features_t features)
18525e8c5adfSBailey Forrest {
18535e8c5adfSBailey Forrest const netdev_features_t orig_features = netdev->features;
18545e8c5adfSBailey Forrest struct gve_priv *priv = netdev_priv(netdev);
18555e8c5adfSBailey Forrest int err;
18565e8c5adfSBailey Forrest
18575e8c5adfSBailey Forrest if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
18585e8c5adfSBailey Forrest netdev->features ^= NETIF_F_LRO;
18595e8c5adfSBailey Forrest if (netif_carrier_ok(netdev)) {
18605e8c5adfSBailey Forrest /* To make this process as simple as possible we
18615e8c5adfSBailey Forrest * teardown the device, set the new configuration,
18625e8c5adfSBailey Forrest * and then bring the device up again.
18635e8c5adfSBailey Forrest */
18645e8c5adfSBailey Forrest err = gve_close(netdev);
18655e8c5adfSBailey Forrest /* We have already tried to reset in close, just fail
18665e8c5adfSBailey Forrest * at this point.
18675e8c5adfSBailey Forrest */
18685e8c5adfSBailey Forrest if (err)
18695e8c5adfSBailey Forrest goto err;
18705e8c5adfSBailey Forrest
18715e8c5adfSBailey Forrest err = gve_open(netdev);
18725e8c5adfSBailey Forrest if (err)
18735e8c5adfSBailey Forrest goto err;
18745e8c5adfSBailey Forrest }
18755e8c5adfSBailey Forrest }
18765e8c5adfSBailey Forrest
18775e8c5adfSBailey Forrest return 0;
18785e8c5adfSBailey Forrest err:
18795e8c5adfSBailey Forrest /* Reverts the change on error. */
18805e8c5adfSBailey Forrest netdev->features = orig_features;
18815e8c5adfSBailey Forrest netif_err(priv, drv, netdev,
18825e8c5adfSBailey Forrest "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
18835e8c5adfSBailey Forrest return err;
18845e8c5adfSBailey Forrest }
18855e8c5adfSBailey Forrest
1886f5cedc84SCatherine Sullivan static const struct net_device_ops gve_netdev_ops = {
18875e8c5adfSBailey Forrest .ndo_start_xmit = gve_start_xmit,
1888f5cedc84SCatherine Sullivan .ndo_open = gve_open,
1889f5cedc84SCatherine Sullivan .ndo_stop = gve_close,
1890f5cedc84SCatherine Sullivan .ndo_get_stats64 = gve_get_stats,
1891f5cedc84SCatherine Sullivan .ndo_tx_timeout = gve_tx_timeout,
18925e8c5adfSBailey Forrest .ndo_set_features = gve_set_features,
189375eaae15SPraveen Kaligineedi .ndo_bpf = gve_xdp,
189439a7f4aaSPraveen Kaligineedi .ndo_xdp_xmit = gve_xdp_xmit,
1895fd8e4032SPraveen Kaligineedi .ndo_xsk_wakeup = gve_xsk_wakeup,
1896f5cedc84SCatherine Sullivan };
1897f5cedc84SCatherine Sullivan
gve_handle_status(struct gve_priv * priv,u32 status)18989e5f7d26SCatherine Sullivan static void gve_handle_status(struct gve_priv *priv, u32 status)
18999e5f7d26SCatherine Sullivan {
19009e5f7d26SCatherine Sullivan if (GVE_DEVICE_STATUS_RESET_MASK & status) {
19019e5f7d26SCatherine Sullivan dev_info(&priv->pdev->dev, "Device requested reset.\n");
19029e5f7d26SCatherine Sullivan gve_set_do_reset(priv);
19039e5f7d26SCatherine Sullivan }
190424aeb56fSKuo Zhao if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
190524aeb56fSKuo Zhao priv->stats_report_trigger_cnt++;
190624aeb56fSKuo Zhao gve_set_do_report_stats(priv);
190724aeb56fSKuo Zhao }
19089e5f7d26SCatherine Sullivan }
19099e5f7d26SCatherine Sullivan
gve_handle_reset(struct gve_priv * priv)19109e5f7d26SCatherine Sullivan static void gve_handle_reset(struct gve_priv *priv)
19119e5f7d26SCatherine Sullivan {
19129e5f7d26SCatherine Sullivan /* A service task will be scheduled at the end of probe to catch any
19139e5f7d26SCatherine Sullivan * resets that need to happen, and we don't want to reset until
19149e5f7d26SCatherine Sullivan * probe is done.
19159e5f7d26SCatherine Sullivan */
19169e5f7d26SCatherine Sullivan if (gve_get_probe_in_progress(priv))
19179e5f7d26SCatherine Sullivan return;
19189e5f7d26SCatherine Sullivan
19199e5f7d26SCatherine Sullivan if (gve_get_do_reset(priv)) {
19209e5f7d26SCatherine Sullivan rtnl_lock();
19219e5f7d26SCatherine Sullivan gve_reset(priv, false);
19229e5f7d26SCatherine Sullivan rtnl_unlock();
19239e5f7d26SCatherine Sullivan }
19249e5f7d26SCatherine Sullivan }
19259e5f7d26SCatherine Sullivan
gve_handle_report_stats(struct gve_priv * priv)192624aeb56fSKuo Zhao void gve_handle_report_stats(struct gve_priv *priv)
192724aeb56fSKuo Zhao {
192824aeb56fSKuo Zhao struct stats *stats = priv->stats_report->stats;
192917c37d74SEric Dumazet int idx, stats_idx = 0;
193017c37d74SEric Dumazet unsigned int start = 0;
193117c37d74SEric Dumazet u64 tx_bytes;
193224aeb56fSKuo Zhao
193324aeb56fSKuo Zhao if (!gve_get_report_stats(priv))
193424aeb56fSKuo Zhao return;
193524aeb56fSKuo Zhao
193624aeb56fSKuo Zhao be64_add_cpu(&priv->stats_report->written_count, 1);
193724aeb56fSKuo Zhao /* tx stats */
193824aeb56fSKuo Zhao if (priv->tx) {
19392e80aeaeSPraveen Kaligineedi for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
19405e8c5adfSBailey Forrest u32 last_completion = 0;
19415e8c5adfSBailey Forrest u32 tx_frames = 0;
19425e8c5adfSBailey Forrest
19435e8c5adfSBailey Forrest /* DQO doesn't currently support these metrics. */
19445e8c5adfSBailey Forrest if (gve_is_gqi(priv)) {
19455e8c5adfSBailey Forrest last_completion = priv->tx[idx].done;
19465e8c5adfSBailey Forrest tx_frames = priv->tx[idx].req;
19475e8c5adfSBailey Forrest }
19485e8c5adfSBailey Forrest
194924aeb56fSKuo Zhao do {
1950068c38adSThomas Gleixner start = u64_stats_fetch_begin(&priv->tx[idx].statss);
195124aeb56fSKuo Zhao tx_bytes = priv->tx[idx].bytes_done;
1952068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
195324aeb56fSKuo Zhao stats[stats_idx++] = (struct stats) {
195424aeb56fSKuo Zhao .stat_name = cpu_to_be32(TX_WAKE_CNT),
195524aeb56fSKuo Zhao .value = cpu_to_be64(priv->tx[idx].wake_queue),
195624aeb56fSKuo Zhao .queue_id = cpu_to_be32(idx),
195724aeb56fSKuo Zhao };
195824aeb56fSKuo Zhao stats[stats_idx++] = (struct stats) {
195924aeb56fSKuo Zhao .stat_name = cpu_to_be32(TX_STOP_CNT),
196024aeb56fSKuo Zhao .value = cpu_to_be64(priv->tx[idx].stop_queue),
196124aeb56fSKuo Zhao .queue_id = cpu_to_be32(idx),
196224aeb56fSKuo Zhao };
196324aeb56fSKuo Zhao stats[stats_idx++] = (struct stats) {
196424aeb56fSKuo Zhao .stat_name = cpu_to_be32(TX_FRAMES_SENT),
19655e8c5adfSBailey Forrest .value = cpu_to_be64(tx_frames),
196624aeb56fSKuo Zhao .queue_id = cpu_to_be32(idx),
196724aeb56fSKuo Zhao };
196824aeb56fSKuo Zhao stats[stats_idx++] = (struct stats) {
196924aeb56fSKuo Zhao .stat_name = cpu_to_be32(TX_BYTES_SENT),
197024aeb56fSKuo Zhao .value = cpu_to_be64(tx_bytes),
197124aeb56fSKuo Zhao .queue_id = cpu_to_be32(idx),
197224aeb56fSKuo Zhao };
197324aeb56fSKuo Zhao stats[stats_idx++] = (struct stats) {
197424aeb56fSKuo Zhao .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
19755e8c5adfSBailey Forrest .value = cpu_to_be64(last_completion),
197624aeb56fSKuo Zhao .queue_id = cpu_to_be32(idx),
197724aeb56fSKuo Zhao };
197887a7f321SJohn Fraker stats[stats_idx++] = (struct stats) {
197987a7f321SJohn Fraker .stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
198087a7f321SJohn Fraker .value = cpu_to_be64(priv->tx[idx].queue_timeout),
198187a7f321SJohn Fraker .queue_id = cpu_to_be32(idx),
198287a7f321SJohn Fraker };
198324aeb56fSKuo Zhao }
198424aeb56fSKuo Zhao }
198524aeb56fSKuo Zhao /* rx stats */
198624aeb56fSKuo Zhao if (priv->rx) {
198724aeb56fSKuo Zhao for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
198824aeb56fSKuo Zhao stats[stats_idx++] = (struct stats) {
198924aeb56fSKuo Zhao .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
199024aeb56fSKuo Zhao .value = cpu_to_be64(priv->rx[idx].desc.seqno),
199124aeb56fSKuo Zhao .queue_id = cpu_to_be32(idx),
199224aeb56fSKuo Zhao };
199324aeb56fSKuo Zhao stats[stats_idx++] = (struct stats) {
199424aeb56fSKuo Zhao .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
199524aeb56fSKuo Zhao .value = cpu_to_be64(priv->rx[0].fill_cnt),
199624aeb56fSKuo Zhao .queue_id = cpu_to_be32(idx),
199724aeb56fSKuo Zhao };
199824aeb56fSKuo Zhao }
199924aeb56fSKuo Zhao }
200024aeb56fSKuo Zhao }
200124aeb56fSKuo Zhao
200224aeb56fSKuo Zhao /* Handle NIC status register changes, reset requests and report stats */
gve_service_task(struct work_struct * work)20039e5f7d26SCatherine Sullivan static void gve_service_task(struct work_struct *work)
20049e5f7d26SCatherine Sullivan {
20059e5f7d26SCatherine Sullivan struct gve_priv *priv = container_of(work, struct gve_priv,
20069e5f7d26SCatherine Sullivan service_task);
20073b7cc736SPatricio Noyola u32 status = ioread32be(&priv->reg_bar0->device_status);
20089e5f7d26SCatherine Sullivan
20093b7cc736SPatricio Noyola gve_handle_status(priv, status);
20109e5f7d26SCatherine Sullivan
20119e5f7d26SCatherine Sullivan gve_handle_reset(priv);
20123b7cc736SPatricio Noyola gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
20139e5f7d26SCatherine Sullivan }
20149e5f7d26SCatherine Sullivan
gve_set_netdev_xdp_features(struct gve_priv * priv)201575eaae15SPraveen Kaligineedi static void gve_set_netdev_xdp_features(struct gve_priv *priv)
201675eaae15SPraveen Kaligineedi {
2017eff2cd6fSJakub Kicinski xdp_features_t xdp_features;
2018eff2cd6fSJakub Kicinski
201975eaae15SPraveen Kaligineedi if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
2020eff2cd6fSJakub Kicinski xdp_features = NETDEV_XDP_ACT_BASIC;
2021eff2cd6fSJakub Kicinski xdp_features |= NETDEV_XDP_ACT_REDIRECT;
2022eff2cd6fSJakub Kicinski xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
202375eaae15SPraveen Kaligineedi } else {
2024eff2cd6fSJakub Kicinski xdp_features = 0;
202575eaae15SPraveen Kaligineedi }
2026eff2cd6fSJakub Kicinski
2027eff2cd6fSJakub Kicinski xdp_set_features_flag(priv->dev, xdp_features);
202875eaae15SPraveen Kaligineedi }
202975eaae15SPraveen Kaligineedi
gve_init_priv(struct gve_priv * priv,bool skip_describe_device)2030893ce44dSCatherine Sullivan static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
2031893ce44dSCatherine Sullivan {
2032893ce44dSCatherine Sullivan int num_ntfy;
2033893ce44dSCatherine Sullivan int err;
2034893ce44dSCatherine Sullivan
2035893ce44dSCatherine Sullivan /* Set up the adminq */
2036893ce44dSCatherine Sullivan err = gve_adminq_alloc(&priv->pdev->dev, priv);
2037893ce44dSCatherine Sullivan if (err) {
2038893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev,
2039893ce44dSCatherine Sullivan "Failed to alloc admin queue: err=%d\n", err);
2040893ce44dSCatherine Sullivan return err;
2041893ce44dSCatherine Sullivan }
2042893ce44dSCatherine Sullivan
2043c2a0c3edSJeroen de Borst err = gve_verify_driver_compatibility(priv);
2044c2a0c3edSJeroen de Borst if (err) {
2045c2a0c3edSJeroen de Borst dev_err(&priv->pdev->dev,
2046c2a0c3edSJeroen de Borst "Could not verify driver compatibility: err=%d\n", err);
2047c2a0c3edSJeroen de Borst goto err;
2048c2a0c3edSJeroen de Borst }
2049c2a0c3edSJeroen de Borst
2050893ce44dSCatherine Sullivan if (skip_describe_device)
2051893ce44dSCatherine Sullivan goto setup_device;
2052893ce44dSCatherine Sullivan
2053a5886ef4SBailey Forrest priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
2054893ce44dSCatherine Sullivan /* Get the initial information we need from the device */
2055893ce44dSCatherine Sullivan err = gve_adminq_describe_device(priv);
2056893ce44dSCatherine Sullivan if (err) {
2057893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev,
2058893ce44dSCatherine Sullivan "Could not get device information: err=%d\n", err);
2059893ce44dSCatherine Sullivan goto err;
2060893ce44dSCatherine Sullivan }
2061893ce44dSCatherine Sullivan priv->dev->mtu = priv->dev->max_mtu;
2062893ce44dSCatherine Sullivan num_ntfy = pci_msix_vec_count(priv->pdev);
2063893ce44dSCatherine Sullivan if (num_ntfy <= 0) {
2064893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev,
2065893ce44dSCatherine Sullivan "could not count MSI-x vectors: err=%d\n", num_ntfy);
2066893ce44dSCatherine Sullivan err = num_ntfy;
2067893ce44dSCatherine Sullivan goto err;
2068893ce44dSCatherine Sullivan } else if (num_ntfy < GVE_MIN_MSIX) {
2069893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
2070893ce44dSCatherine Sullivan GVE_MIN_MSIX, num_ntfy);
2071893ce44dSCatherine Sullivan err = -EINVAL;
2072893ce44dSCatherine Sullivan goto err;
2073893ce44dSCatherine Sullivan }
2074893ce44dSCatherine Sullivan
2075a695641cSCoco Li /* Big TCP is only supported on DQ*/
2076a695641cSCoco Li if (!gve_is_gqi(priv))
207766ce8e6bSRushil Gupta netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
2078a695641cSCoco Li
2079f5cedc84SCatherine Sullivan priv->num_registered_pages = 0;
2080f5cedc84SCatherine Sullivan priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
2081893ce44dSCatherine Sullivan /* gvnic has one Notification Block per MSI-x vector, except for the
2082893ce44dSCatherine Sullivan * management vector
2083893ce44dSCatherine Sullivan */
2084893ce44dSCatherine Sullivan priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
2085893ce44dSCatherine Sullivan priv->mgmt_msix_idx = priv->num_ntfy_blks;
2086893ce44dSCatherine Sullivan
2087f5cedc84SCatherine Sullivan priv->tx_cfg.max_queues =
2088f5cedc84SCatherine Sullivan min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
2089f5cedc84SCatherine Sullivan priv->rx_cfg.max_queues =
2090f5cedc84SCatherine Sullivan min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
2091f5cedc84SCatherine Sullivan
2092f5cedc84SCatherine Sullivan priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
2093f5cedc84SCatherine Sullivan priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
2094f5cedc84SCatherine Sullivan if (priv->default_num_queues > 0) {
2095f5cedc84SCatherine Sullivan priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
2096f5cedc84SCatherine Sullivan priv->tx_cfg.num_queues);
2097f5cedc84SCatherine Sullivan priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
2098f5cedc84SCatherine Sullivan priv->rx_cfg.num_queues);
2099f5cedc84SCatherine Sullivan }
2100f5cedc84SCatherine Sullivan
21010d5775d3SCatherine Sullivan dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
2102f5cedc84SCatherine Sullivan priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
21030d5775d3SCatherine Sullivan dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
2104f5cedc84SCatherine Sullivan priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
2105f5cedc84SCatherine Sullivan
21066081ac20STao Liu if (!gve_is_gqi(priv)) {
21076081ac20STao Liu priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
21086081ac20STao Liu priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
21096081ac20STao Liu }
21106081ac20STao Liu
2111893ce44dSCatherine Sullivan setup_device:
211275eaae15SPraveen Kaligineedi gve_set_netdev_xdp_features(priv);
2113893ce44dSCatherine Sullivan err = gve_setup_device_resources(priv);
2114893ce44dSCatherine Sullivan if (!err)
2115893ce44dSCatherine Sullivan return 0;
2116893ce44dSCatherine Sullivan err:
2117893ce44dSCatherine Sullivan gve_adminq_free(&priv->pdev->dev, priv);
2118893ce44dSCatherine Sullivan return err;
2119893ce44dSCatherine Sullivan }
2120893ce44dSCatherine Sullivan
gve_teardown_priv_resources(struct gve_priv * priv)2121893ce44dSCatherine Sullivan static void gve_teardown_priv_resources(struct gve_priv *priv)
2122893ce44dSCatherine Sullivan {
2123893ce44dSCatherine Sullivan gve_teardown_device_resources(priv);
2124893ce44dSCatherine Sullivan gve_adminq_free(&priv->pdev->dev, priv);
2125893ce44dSCatherine Sullivan }
2126893ce44dSCatherine Sullivan
gve_trigger_reset(struct gve_priv * priv)21279e5f7d26SCatherine Sullivan static void gve_trigger_reset(struct gve_priv *priv)
21289e5f7d26SCatherine Sullivan {
21299e5f7d26SCatherine Sullivan /* Reset the device by releasing the AQ */
21309e5f7d26SCatherine Sullivan gve_adminq_release(priv);
21319e5f7d26SCatherine Sullivan }
21329e5f7d26SCatherine Sullivan
gve_reset_and_teardown(struct gve_priv * priv,bool was_up)21339e5f7d26SCatherine Sullivan static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
21349e5f7d26SCatherine Sullivan {
21359e5f7d26SCatherine Sullivan gve_trigger_reset(priv);
21369e5f7d26SCatherine Sullivan /* With the reset having already happened, close cannot fail */
21379e5f7d26SCatherine Sullivan if (was_up)
21389e5f7d26SCatherine Sullivan gve_close(priv->dev);
21399e5f7d26SCatherine Sullivan gve_teardown_priv_resources(priv);
21409e5f7d26SCatherine Sullivan }
21419e5f7d26SCatherine Sullivan
gve_reset_recovery(struct gve_priv * priv,bool was_up)21429e5f7d26SCatherine Sullivan static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
21439e5f7d26SCatherine Sullivan {
21449e5f7d26SCatherine Sullivan int err;
21459e5f7d26SCatherine Sullivan
21469e5f7d26SCatherine Sullivan err = gve_init_priv(priv, true);
21479e5f7d26SCatherine Sullivan if (err)
21489e5f7d26SCatherine Sullivan goto err;
21499e5f7d26SCatherine Sullivan if (was_up) {
21509e5f7d26SCatherine Sullivan err = gve_open(priv->dev);
21519e5f7d26SCatherine Sullivan if (err)
21529e5f7d26SCatherine Sullivan goto err;
21539e5f7d26SCatherine Sullivan }
21549e5f7d26SCatherine Sullivan return 0;
21559e5f7d26SCatherine Sullivan err:
21569e5f7d26SCatherine Sullivan dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
21579e5f7d26SCatherine Sullivan gve_turndown(priv);
21589e5f7d26SCatherine Sullivan return err;
21599e5f7d26SCatherine Sullivan }
21609e5f7d26SCatherine Sullivan
gve_reset(struct gve_priv * priv,bool attempt_teardown)21619e5f7d26SCatherine Sullivan int gve_reset(struct gve_priv *priv, bool attempt_teardown)
21629e5f7d26SCatherine Sullivan {
21639e5f7d26SCatherine Sullivan bool was_up = netif_carrier_ok(priv->dev);
21649e5f7d26SCatherine Sullivan int err;
21659e5f7d26SCatherine Sullivan
21669e5f7d26SCatherine Sullivan dev_info(&priv->pdev->dev, "Performing reset\n");
21679e5f7d26SCatherine Sullivan gve_clear_do_reset(priv);
21689e5f7d26SCatherine Sullivan gve_set_reset_in_progress(priv);
21699e5f7d26SCatherine Sullivan /* If we aren't attempting to teardown normally, just go turndown and
21709e5f7d26SCatherine Sullivan * reset right away.
21719e5f7d26SCatherine Sullivan */
21729e5f7d26SCatherine Sullivan if (!attempt_teardown) {
21739e5f7d26SCatherine Sullivan gve_turndown(priv);
21749e5f7d26SCatherine Sullivan gve_reset_and_teardown(priv, was_up);
21759e5f7d26SCatherine Sullivan } else {
21769e5f7d26SCatherine Sullivan /* Otherwise attempt to close normally */
21779e5f7d26SCatherine Sullivan if (was_up) {
21789e5f7d26SCatherine Sullivan err = gve_close(priv->dev);
21799e5f7d26SCatherine Sullivan /* If that fails reset as we did above */
21809e5f7d26SCatherine Sullivan if (err)
21819e5f7d26SCatherine Sullivan gve_reset_and_teardown(priv, was_up);
21829e5f7d26SCatherine Sullivan }
21839e5f7d26SCatherine Sullivan /* Clean up any remaining resources */
21849e5f7d26SCatherine Sullivan gve_teardown_priv_resources(priv);
21859e5f7d26SCatherine Sullivan }
21869e5f7d26SCatherine Sullivan
21879e5f7d26SCatherine Sullivan /* Set it all back up */
21889e5f7d26SCatherine Sullivan err = gve_reset_recovery(priv, was_up);
21899e5f7d26SCatherine Sullivan gve_clear_reset_in_progress(priv);
2190433e274bSKuo Zhao priv->reset_cnt++;
2191433e274bSKuo Zhao priv->interface_up_cnt = 0;
2192433e274bSKuo Zhao priv->interface_down_cnt = 0;
219324aeb56fSKuo Zhao priv->stats_report_trigger_cnt = 0;
21949e5f7d26SCatherine Sullivan return err;
21959e5f7d26SCatherine Sullivan }
21969e5f7d26SCatherine Sullivan
gve_write_version(u8 __iomem * driver_version_register)2197893ce44dSCatherine Sullivan static void gve_write_version(u8 __iomem *driver_version_register)
2198893ce44dSCatherine Sullivan {
2199893ce44dSCatherine Sullivan const char *c = gve_version_prefix;
2200893ce44dSCatherine Sullivan
2201893ce44dSCatherine Sullivan while (*c) {
2202893ce44dSCatherine Sullivan writeb(*c, driver_version_register);
2203893ce44dSCatherine Sullivan c++;
2204893ce44dSCatherine Sullivan }
2205893ce44dSCatherine Sullivan
2206893ce44dSCatherine Sullivan c = gve_version_str;
2207893ce44dSCatherine Sullivan while (*c) {
2208893ce44dSCatherine Sullivan writeb(*c, driver_version_register);
2209893ce44dSCatherine Sullivan c++;
2210893ce44dSCatherine Sullivan }
2211893ce44dSCatherine Sullivan writeb('\n', driver_version_register);
2212893ce44dSCatherine Sullivan }
2213893ce44dSCatherine Sullivan
gve_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2214893ce44dSCatherine Sullivan static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2215893ce44dSCatherine Sullivan {
2216893ce44dSCatherine Sullivan int max_tx_queues, max_rx_queues;
2217893ce44dSCatherine Sullivan struct net_device *dev;
2218893ce44dSCatherine Sullivan __be32 __iomem *db_bar;
2219893ce44dSCatherine Sullivan struct gve_registers __iomem *reg_bar;
2220893ce44dSCatherine Sullivan struct gve_priv *priv;
2221893ce44dSCatherine Sullivan int err;
2222893ce44dSCatherine Sullivan
2223893ce44dSCatherine Sullivan err = pci_enable_device(pdev);
2224893ce44dSCatherine Sullivan if (err)
22256dce38b4SChristophe JAILLET return err;
2226893ce44dSCatherine Sullivan
22279d0aba98SJunfeng Guo err = pci_request_regions(pdev, gve_driver_name);
2228893ce44dSCatherine Sullivan if (err)
2229893ce44dSCatherine Sullivan goto abort_with_enabled;
2230893ce44dSCatherine Sullivan
2231893ce44dSCatherine Sullivan pci_set_master(pdev);
2232893ce44dSCatherine Sullivan
2233bde3c8ffSChristophe JAILLET err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2234893ce44dSCatherine Sullivan if (err) {
2235893ce44dSCatherine Sullivan dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
2236893ce44dSCatherine Sullivan goto abort_with_pci_region;
2237893ce44dSCatherine Sullivan }
2238893ce44dSCatherine Sullivan
2239893ce44dSCatherine Sullivan reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
2240893ce44dSCatherine Sullivan if (!reg_bar) {
2241f5cedc84SCatherine Sullivan dev_err(&pdev->dev, "Failed to map pci bar!\n");
2242893ce44dSCatherine Sullivan err = -ENOMEM;
2243893ce44dSCatherine Sullivan goto abort_with_pci_region;
2244893ce44dSCatherine Sullivan }
2245893ce44dSCatherine Sullivan
2246893ce44dSCatherine Sullivan db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
2247893ce44dSCatherine Sullivan if (!db_bar) {
2248893ce44dSCatherine Sullivan dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
2249893ce44dSCatherine Sullivan err = -ENOMEM;
2250893ce44dSCatherine Sullivan goto abort_with_reg_bar;
2251893ce44dSCatherine Sullivan }
2252893ce44dSCatherine Sullivan
2253893ce44dSCatherine Sullivan gve_write_version(®_bar->driver_version);
2254893ce44dSCatherine Sullivan /* Get max queues to alloc etherdev */
22551db1a862SBailey Forrest max_tx_queues = ioread32be(®_bar->max_tx_queues);
22561db1a862SBailey Forrest max_rx_queues = ioread32be(®_bar->max_rx_queues);
2257893ce44dSCatherine Sullivan /* Alloc and setup the netdev and priv */
2258893ce44dSCatherine Sullivan dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
2259893ce44dSCatherine Sullivan if (!dev) {
2260893ce44dSCatherine Sullivan dev_err(&pdev->dev, "could not allocate netdev\n");
22616dce38b4SChristophe JAILLET err = -ENOMEM;
2262893ce44dSCatherine Sullivan goto abort_with_db_bar;
2263893ce44dSCatherine Sullivan }
2264893ce44dSCatherine Sullivan SET_NETDEV_DEV(dev, &pdev->dev);
2265893ce44dSCatherine Sullivan pci_set_drvdata(pdev, dev);
2266e5b845dcSCatherine Sullivan dev->ethtool_ops = &gve_ethtool_ops;
2267f5cedc84SCatherine Sullivan dev->netdev_ops = &gve_netdev_ops;
22685e8c5adfSBailey Forrest
22695e8c5adfSBailey Forrest /* Set default and supported features.
22705e8c5adfSBailey Forrest *
22715e8c5adfSBailey Forrest * Features might be set in other locations as well (such as
22725e8c5adfSBailey Forrest * `gve_adminq_describe_device`).
22735e8c5adfSBailey Forrest */
2274893ce44dSCatherine Sullivan dev->hw_features = NETIF_F_HIGHDMA;
2275893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_SG;
2276893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_HW_CSUM;
2277893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_TSO;
2278893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_TSO6;
2279893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_TSO_ECN;
2280893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_RXCSUM;
2281893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_RXHASH;
2282893ce44dSCatherine Sullivan dev->features = dev->hw_features;
2283f5cedc84SCatherine Sullivan dev->watchdog_timeo = 5 * HZ;
2284893ce44dSCatherine Sullivan dev->min_mtu = ETH_MIN_MTU;
2285893ce44dSCatherine Sullivan netif_carrier_off(dev);
2286893ce44dSCatherine Sullivan
2287893ce44dSCatherine Sullivan priv = netdev_priv(dev);
2288893ce44dSCatherine Sullivan priv->dev = dev;
2289893ce44dSCatherine Sullivan priv->pdev = pdev;
2290893ce44dSCatherine Sullivan priv->msg_enable = DEFAULT_MSG_LEVEL;
2291893ce44dSCatherine Sullivan priv->reg_bar0 = reg_bar;
2292893ce44dSCatherine Sullivan priv->db_bar2 = db_bar;
22939e5f7d26SCatherine Sullivan priv->service_task_flags = 0x0;
2294893ce44dSCatherine Sullivan priv->state_flags = 0x0;
229524aeb56fSKuo Zhao priv->ethtool_flags = 0x0;
22969e5f7d26SCatherine Sullivan
22979e5f7d26SCatherine Sullivan gve_set_probe_in_progress(priv);
22989e5f7d26SCatherine Sullivan priv->gve_wq = alloc_ordered_workqueue("gve", 0);
22999e5f7d26SCatherine Sullivan if (!priv->gve_wq) {
23009e5f7d26SCatherine Sullivan dev_err(&pdev->dev, "Could not allocate workqueue");
23019e5f7d26SCatherine Sullivan err = -ENOMEM;
23029e5f7d26SCatherine Sullivan goto abort_with_netdev;
23039e5f7d26SCatherine Sullivan }
23049e5f7d26SCatherine Sullivan INIT_WORK(&priv->service_task, gve_service_task);
230524aeb56fSKuo Zhao INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
2306f5cedc84SCatherine Sullivan priv->tx_cfg.max_queues = max_tx_queues;
2307f5cedc84SCatherine Sullivan priv->rx_cfg.max_queues = max_rx_queues;
2308893ce44dSCatherine Sullivan
2309893ce44dSCatherine Sullivan err = gve_init_priv(priv, false);
2310893ce44dSCatherine Sullivan if (err)
23119e5f7d26SCatherine Sullivan goto abort_with_wq;
2312893ce44dSCatherine Sullivan
2313893ce44dSCatherine Sullivan err = register_netdev(dev);
2314893ce44dSCatherine Sullivan if (err)
23152342ae10SChristophe JAILLET goto abort_with_gve_init;
2316893ce44dSCatherine Sullivan
2317893ce44dSCatherine Sullivan dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
2318a5886ef4SBailey Forrest dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
23199e5f7d26SCatherine Sullivan gve_clear_probe_in_progress(priv);
23209e5f7d26SCatherine Sullivan queue_work(priv->gve_wq, &priv->service_task);
2321893ce44dSCatherine Sullivan return 0;
2322893ce44dSCatherine Sullivan
23232342ae10SChristophe JAILLET abort_with_gve_init:
23242342ae10SChristophe JAILLET gve_teardown_priv_resources(priv);
23252342ae10SChristophe JAILLET
23269e5f7d26SCatherine Sullivan abort_with_wq:
23279e5f7d26SCatherine Sullivan destroy_workqueue(priv->gve_wq);
23289e5f7d26SCatherine Sullivan
2329893ce44dSCatherine Sullivan abort_with_netdev:
2330893ce44dSCatherine Sullivan free_netdev(dev);
2331893ce44dSCatherine Sullivan
2332893ce44dSCatherine Sullivan abort_with_db_bar:
2333893ce44dSCatherine Sullivan pci_iounmap(pdev, db_bar);
2334893ce44dSCatherine Sullivan
2335893ce44dSCatherine Sullivan abort_with_reg_bar:
2336893ce44dSCatherine Sullivan pci_iounmap(pdev, reg_bar);
2337893ce44dSCatherine Sullivan
2338893ce44dSCatherine Sullivan abort_with_pci_region:
2339893ce44dSCatherine Sullivan pci_release_regions(pdev);
2340893ce44dSCatherine Sullivan
2341893ce44dSCatherine Sullivan abort_with_enabled:
2342893ce44dSCatherine Sullivan pci_disable_device(pdev);
23436dce38b4SChristophe JAILLET return err;
2344893ce44dSCatherine Sullivan }
2345893ce44dSCatherine Sullivan
gve_remove(struct pci_dev * pdev)2346893ce44dSCatherine Sullivan static void gve_remove(struct pci_dev *pdev)
2347893ce44dSCatherine Sullivan {
2348893ce44dSCatherine Sullivan struct net_device *netdev = pci_get_drvdata(pdev);
2349893ce44dSCatherine Sullivan struct gve_priv *priv = netdev_priv(netdev);
2350893ce44dSCatherine Sullivan __be32 __iomem *db_bar = priv->db_bar2;
2351893ce44dSCatherine Sullivan void __iomem *reg_bar = priv->reg_bar0;
2352893ce44dSCatherine Sullivan
2353893ce44dSCatherine Sullivan unregister_netdev(netdev);
2354893ce44dSCatherine Sullivan gve_teardown_priv_resources(priv);
23559e5f7d26SCatherine Sullivan destroy_workqueue(priv->gve_wq);
2356893ce44dSCatherine Sullivan free_netdev(netdev);
2357893ce44dSCatherine Sullivan pci_iounmap(pdev, db_bar);
2358893ce44dSCatherine Sullivan pci_iounmap(pdev, reg_bar);
2359893ce44dSCatherine Sullivan pci_release_regions(pdev);
2360893ce44dSCatherine Sullivan pci_disable_device(pdev);
2361893ce44dSCatherine Sullivan }
2362893ce44dSCatherine Sullivan
gve_shutdown(struct pci_dev * pdev)2363974365e5SCatherine Sullivan static void gve_shutdown(struct pci_dev *pdev)
2364974365e5SCatherine Sullivan {
2365974365e5SCatherine Sullivan struct net_device *netdev = pci_get_drvdata(pdev);
2366974365e5SCatherine Sullivan struct gve_priv *priv = netdev_priv(netdev);
2367974365e5SCatherine Sullivan bool was_up = netif_carrier_ok(priv->dev);
2368974365e5SCatherine Sullivan
2369974365e5SCatherine Sullivan rtnl_lock();
2370974365e5SCatherine Sullivan if (was_up && gve_close(priv->dev)) {
2371974365e5SCatherine Sullivan /* If the dev was up, attempt to close, if close fails, reset */
2372974365e5SCatherine Sullivan gve_reset_and_teardown(priv, was_up);
2373974365e5SCatherine Sullivan } else {
2374974365e5SCatherine Sullivan /* If the dev wasn't up or close worked, finish tearing down */
2375974365e5SCatherine Sullivan gve_teardown_priv_resources(priv);
2376974365e5SCatherine Sullivan }
2377974365e5SCatherine Sullivan rtnl_unlock();
2378974365e5SCatherine Sullivan }
2379974365e5SCatherine Sullivan
2380974365e5SCatherine Sullivan #ifdef CONFIG_PM
gve_suspend(struct pci_dev * pdev,pm_message_t state)2381974365e5SCatherine Sullivan static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
2382974365e5SCatherine Sullivan {
2383974365e5SCatherine Sullivan struct net_device *netdev = pci_get_drvdata(pdev);
2384974365e5SCatherine Sullivan struct gve_priv *priv = netdev_priv(netdev);
2385974365e5SCatherine Sullivan bool was_up = netif_carrier_ok(priv->dev);
2386974365e5SCatherine Sullivan
2387974365e5SCatherine Sullivan priv->suspend_cnt++;
2388974365e5SCatherine Sullivan rtnl_lock();
2389974365e5SCatherine Sullivan if (was_up && gve_close(priv->dev)) {
2390974365e5SCatherine Sullivan /* If the dev was up, attempt to close, if close fails, reset */
2391974365e5SCatherine Sullivan gve_reset_and_teardown(priv, was_up);
2392974365e5SCatherine Sullivan } else {
2393974365e5SCatherine Sullivan /* If the dev wasn't up or close worked, finish tearing down */
2394974365e5SCatherine Sullivan gve_teardown_priv_resources(priv);
2395974365e5SCatherine Sullivan }
2396974365e5SCatherine Sullivan priv->up_before_suspend = was_up;
2397974365e5SCatherine Sullivan rtnl_unlock();
2398974365e5SCatherine Sullivan return 0;
2399974365e5SCatherine Sullivan }
2400974365e5SCatherine Sullivan
gve_resume(struct pci_dev * pdev)2401974365e5SCatherine Sullivan static int gve_resume(struct pci_dev *pdev)
2402974365e5SCatherine Sullivan {
2403974365e5SCatherine Sullivan struct net_device *netdev = pci_get_drvdata(pdev);
2404974365e5SCatherine Sullivan struct gve_priv *priv = netdev_priv(netdev);
2405974365e5SCatherine Sullivan int err;
2406974365e5SCatherine Sullivan
2407974365e5SCatherine Sullivan priv->resume_cnt++;
2408974365e5SCatherine Sullivan rtnl_lock();
2409974365e5SCatherine Sullivan err = gve_reset_recovery(priv, priv->up_before_suspend);
2410974365e5SCatherine Sullivan rtnl_unlock();
2411974365e5SCatherine Sullivan return err;
2412974365e5SCatherine Sullivan }
2413974365e5SCatherine Sullivan #endif /* CONFIG_PM */
2414974365e5SCatherine Sullivan
2415893ce44dSCatherine Sullivan static const struct pci_device_id gve_id_table[] = {
2416893ce44dSCatherine Sullivan { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
2417893ce44dSCatherine Sullivan { }
2418893ce44dSCatherine Sullivan };
2419893ce44dSCatherine Sullivan
24209d0aba98SJunfeng Guo static struct pci_driver gve_driver = {
24219d0aba98SJunfeng Guo .name = gve_driver_name,
2422893ce44dSCatherine Sullivan .id_table = gve_id_table,
2423893ce44dSCatherine Sullivan .probe = gve_probe,
2424893ce44dSCatherine Sullivan .remove = gve_remove,
2425974365e5SCatherine Sullivan .shutdown = gve_shutdown,
2426974365e5SCatherine Sullivan #ifdef CONFIG_PM
2427974365e5SCatherine Sullivan .suspend = gve_suspend,
2428974365e5SCatherine Sullivan .resume = gve_resume,
2429974365e5SCatherine Sullivan #endif
2430893ce44dSCatherine Sullivan };
2431893ce44dSCatherine Sullivan
24329d0aba98SJunfeng Guo module_pci_driver(gve_driver);
2433893ce44dSCatherine Sullivan
2434893ce44dSCatherine Sullivan MODULE_DEVICE_TABLE(pci, gve_id_table);
2435893ce44dSCatherine Sullivan MODULE_AUTHOR("Google, Inc.");
24369d0aba98SJunfeng Guo MODULE_DESCRIPTION("Google Virtual NIC Driver");
2437893ce44dSCatherine Sullivan MODULE_LICENSE("Dual MIT/GPL");
2438893ce44dSCatherine Sullivan MODULE_VERSION(GVE_VERSION);
2439