1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
16 #include "gve.h"
17 #include "gve_dqo.h"
18 #include "gve_adminq.h"
19 #include "gve_register.h"
20 
21 #define GVE_DEFAULT_RX_COPYBREAK	(256)
22 
23 #define DEFAULT_MSG_LEVEL	(NETIF_MSG_DRV | NETIF_MSG_LINK)
24 #define GVE_VERSION		"1.0.0"
25 #define GVE_VERSION_PREFIX	"GVE-"
26 
27 const char gve_version_str[] = GVE_VERSION;
28 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
29 
30 static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
31 {
32 	struct gve_priv *priv = netdev_priv(dev);
33 
34 	if (gve_is_gqi(priv))
35 		return gve_tx(skb, dev);
36 	else
37 		return gve_tx_dqo(skb, dev);
38 }
39 
40 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
41 {
42 	struct gve_priv *priv = netdev_priv(dev);
43 	unsigned int start;
44 	int ring;
45 
46 	if (priv->rx) {
47 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
48 			do {
49 				start =
50 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
51 				s->rx_packets += priv->rx[ring].rpackets;
52 				s->rx_bytes += priv->rx[ring].rbytes;
53 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
54 						       start));
55 		}
56 	}
57 	if (priv->tx) {
58 		for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
59 			do {
60 				start =
61 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
62 				s->tx_packets += priv->tx[ring].pkt_done;
63 				s->tx_bytes += priv->tx[ring].bytes_done;
64 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
65 						       start));
66 		}
67 	}
68 }
69 
70 static int gve_alloc_counter_array(struct gve_priv *priv)
71 {
72 	priv->counter_array =
73 		dma_alloc_coherent(&priv->pdev->dev,
74 				   priv->num_event_counters *
75 				   sizeof(*priv->counter_array),
76 				   &priv->counter_array_bus, GFP_KERNEL);
77 	if (!priv->counter_array)
78 		return -ENOMEM;
79 
80 	return 0;
81 }
82 
83 static void gve_free_counter_array(struct gve_priv *priv)
84 {
85 	dma_free_coherent(&priv->pdev->dev,
86 			  priv->num_event_counters *
87 			  sizeof(*priv->counter_array),
88 			  priv->counter_array, priv->counter_array_bus);
89 	priv->counter_array = NULL;
90 }
91 
92 /* NIC requests to report stats */
93 static void gve_stats_report_task(struct work_struct *work)
94 {
95 	struct gve_priv *priv = container_of(work, struct gve_priv,
96 					     stats_report_task);
97 	if (gve_get_do_report_stats(priv)) {
98 		gve_handle_report_stats(priv);
99 		gve_clear_do_report_stats(priv);
100 	}
101 }
102 
103 static void gve_stats_report_schedule(struct gve_priv *priv)
104 {
105 	if (!gve_get_probe_in_progress(priv) &&
106 	    !gve_get_reset_in_progress(priv)) {
107 		gve_set_do_report_stats(priv);
108 		queue_work(priv->gve_wq, &priv->stats_report_task);
109 	}
110 }
111 
112 static void gve_stats_report_timer(struct timer_list *t)
113 {
114 	struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
115 
116 	mod_timer(&priv->stats_report_timer,
117 		  round_jiffies(jiffies +
118 		  msecs_to_jiffies(priv->stats_report_timer_period)));
119 	gve_stats_report_schedule(priv);
120 }
121 
122 static int gve_alloc_stats_report(struct gve_priv *priv)
123 {
124 	int tx_stats_num, rx_stats_num;
125 
126 	tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
127 		       priv->tx_cfg.num_queues;
128 	rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
129 		       priv->rx_cfg.num_queues;
130 	priv->stats_report_len = struct_size(priv->stats_report, stats,
131 					     tx_stats_num + rx_stats_num);
132 	priv->stats_report =
133 		dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
134 				   &priv->stats_report_bus, GFP_KERNEL);
135 	if (!priv->stats_report)
136 		return -ENOMEM;
137 	/* Set up timer for the report-stats task */
138 	timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
139 	priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
140 	return 0;
141 }
142 
143 static void gve_free_stats_report(struct gve_priv *priv)
144 {
145 	del_timer_sync(&priv->stats_report_timer);
146 	dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
147 			  priv->stats_report, priv->stats_report_bus);
148 	priv->stats_report = NULL;
149 }
150 
151 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
152 {
153 	struct gve_priv *priv = arg;
154 
155 	queue_work(priv->gve_wq, &priv->service_task);
156 	return IRQ_HANDLED;
157 }
158 
159 static irqreturn_t gve_intr(int irq, void *arg)
160 {
161 	struct gve_notify_block *block = arg;
162 	struct gve_priv *priv = block->priv;
163 
164 	iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
165 	napi_schedule_irqoff(&block->napi);
166 	return IRQ_HANDLED;
167 }
168 
169 static irqreturn_t gve_intr_dqo(int irq, void *arg)
170 {
171 	struct gve_notify_block *block = arg;
172 
173 	/* Interrupts are automatically masked */
174 	napi_schedule_irqoff(&block->napi);
175 	return IRQ_HANDLED;
176 }
177 
178 static int gve_napi_poll(struct napi_struct *napi, int budget)
179 {
180 	struct gve_notify_block *block;
181 	__be32 __iomem *irq_doorbell;
182 	bool reschedule = false;
183 	struct gve_priv *priv;
184 
185 	block = container_of(napi, struct gve_notify_block, napi);
186 	priv = block->priv;
187 
188 	if (block->tx)
189 		reschedule |= gve_tx_poll(block, budget);
190 	if (block->rx)
191 		reschedule |= gve_rx_poll(block, budget);
192 
193 	if (reschedule)
194 		return budget;
195 
196 	napi_complete(napi);
197 	irq_doorbell = gve_irq_doorbell(priv, block);
198 	iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
199 
200 	/* Double check we have no extra work.
201 	 * Ensure unmask synchronizes with checking for work.
202 	 */
203 	mb();
204 	if (block->tx)
205 		reschedule |= gve_tx_poll(block, -1);
206 	if (block->rx)
207 		reschedule |= gve_rx_poll(block, -1);
208 	if (reschedule && napi_reschedule(napi))
209 		iowrite32be(GVE_IRQ_MASK, irq_doorbell);
210 
211 	return 0;
212 }
213 
214 static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
215 {
216 	struct gve_notify_block *block =
217 		container_of(napi, struct gve_notify_block, napi);
218 	struct gve_priv *priv = block->priv;
219 	bool reschedule = false;
220 	int work_done = 0;
221 
222 	/* Clear PCI MSI-X Pending Bit Array (PBA)
223 	 *
224 	 * This bit is set if an interrupt event occurs while the vector is
225 	 * masked. If this bit is set and we reenable the interrupt, it will
226 	 * fire again. Since we're just about to poll the queue state, we don't
227 	 * need it to fire again.
228 	 *
229 	 * Under high softirq load, it's possible that the interrupt condition
230 	 * is triggered twice before we got the chance to process it.
231 	 */
232 	gve_write_irq_doorbell_dqo(priv, block,
233 				   GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
234 
235 	if (block->tx)
236 		reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
237 
238 	if (block->rx) {
239 		work_done = gve_rx_poll_dqo(block, budget);
240 		reschedule |= work_done == budget;
241 	}
242 
243 	if (reschedule)
244 		return budget;
245 
246 	if (likely(napi_complete_done(napi, work_done))) {
247 		/* Enable interrupts again.
248 		 *
249 		 * We don't need to repoll afterwards because HW supports the
250 		 * PCI MSI-X PBA feature.
251 		 *
252 		 * Another interrupt would be triggered if a new event came in
253 		 * since the last one.
254 		 */
255 		gve_write_irq_doorbell_dqo(priv, block,
256 					   GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
257 	}
258 
259 	return work_done;
260 }
261 
262 static int gve_alloc_notify_blocks(struct gve_priv *priv)
263 {
264 	int num_vecs_requested = priv->num_ntfy_blks + 1;
265 	char *name = priv->dev->name;
266 	unsigned int active_cpus;
267 	int vecs_enabled;
268 	int i, j;
269 	int err;
270 
271 	priv->msix_vectors = kvzalloc(num_vecs_requested *
272 				      sizeof(*priv->msix_vectors), GFP_KERNEL);
273 	if (!priv->msix_vectors)
274 		return -ENOMEM;
275 	for (i = 0; i < num_vecs_requested; i++)
276 		priv->msix_vectors[i].entry = i;
277 	vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
278 					     GVE_MIN_MSIX, num_vecs_requested);
279 	if (vecs_enabled < 0) {
280 		dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
281 			GVE_MIN_MSIX, vecs_enabled);
282 		err = vecs_enabled;
283 		goto abort_with_msix_vectors;
284 	}
285 	if (vecs_enabled != num_vecs_requested) {
286 		int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
287 		int vecs_per_type = new_num_ntfy_blks / 2;
288 		int vecs_left = new_num_ntfy_blks % 2;
289 
290 		priv->num_ntfy_blks = new_num_ntfy_blks;
291 		priv->mgmt_msix_idx = priv->num_ntfy_blks;
292 		priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
293 						vecs_per_type);
294 		priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
295 						vecs_per_type + vecs_left);
296 		dev_err(&priv->pdev->dev,
297 			"Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
298 			vecs_enabled, priv->tx_cfg.max_queues,
299 			priv->rx_cfg.max_queues);
300 		if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
301 			priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
302 		if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
303 			priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
304 	}
305 	/* Half the notification blocks go to TX and half to RX */
306 	active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
307 
308 	/* Setup Management Vector  - the last vector */
309 	snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
310 		 name);
311 	err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
312 			  gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
313 	if (err) {
314 		dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
315 		goto abort_with_msix_enabled;
316 	}
317 	priv->ntfy_blocks =
318 		dma_alloc_coherent(&priv->pdev->dev,
319 				   priv->num_ntfy_blks *
320 				   sizeof(*priv->ntfy_blocks),
321 				   &priv->ntfy_block_bus, GFP_KERNEL);
322 	if (!priv->ntfy_blocks) {
323 		err = -ENOMEM;
324 		goto abort_with_mgmt_vector;
325 	}
326 	/* Setup the other blocks - the first n-1 vectors */
327 	for (i = 0; i < priv->num_ntfy_blks; i++) {
328 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
329 		int msix_idx = i;
330 
331 		snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
332 			 name, i);
333 		block->priv = priv;
334 		err = request_irq(priv->msix_vectors[msix_idx].vector,
335 				  gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
336 				  0, block->name, block);
337 		if (err) {
338 			dev_err(&priv->pdev->dev,
339 				"Failed to receive msix vector %d\n", i);
340 			goto abort_with_some_ntfy_blocks;
341 		}
342 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
343 				      get_cpu_mask(i % active_cpus));
344 	}
345 	return 0;
346 abort_with_some_ntfy_blocks:
347 	for (j = 0; j < i; j++) {
348 		struct gve_notify_block *block = &priv->ntfy_blocks[j];
349 		int msix_idx = j;
350 
351 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
352 				      NULL);
353 		free_irq(priv->msix_vectors[msix_idx].vector, block);
354 	}
355 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
356 			  sizeof(*priv->ntfy_blocks),
357 			  priv->ntfy_blocks, priv->ntfy_block_bus);
358 	priv->ntfy_blocks = NULL;
359 abort_with_mgmt_vector:
360 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
361 abort_with_msix_enabled:
362 	pci_disable_msix(priv->pdev);
363 abort_with_msix_vectors:
364 	kvfree(priv->msix_vectors);
365 	priv->msix_vectors = NULL;
366 	return err;
367 }
368 
369 static void gve_free_notify_blocks(struct gve_priv *priv)
370 {
371 	int i;
372 
373 	if (priv->msix_vectors) {
374 		/* Free the irqs */
375 		for (i = 0; i < priv->num_ntfy_blks; i++) {
376 			struct gve_notify_block *block = &priv->ntfy_blocks[i];
377 			int msix_idx = i;
378 
379 			irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
380 					      NULL);
381 			free_irq(priv->msix_vectors[msix_idx].vector, block);
382 		}
383 		free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
384 	}
385 	dma_free_coherent(&priv->pdev->dev,
386 			  priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
387 			  priv->ntfy_blocks, priv->ntfy_block_bus);
388 	priv->ntfy_blocks = NULL;
389 	pci_disable_msix(priv->pdev);
390 	kvfree(priv->msix_vectors);
391 	priv->msix_vectors = NULL;
392 }
393 
394 static int gve_setup_device_resources(struct gve_priv *priv)
395 {
396 	int err;
397 
398 	err = gve_alloc_counter_array(priv);
399 	if (err)
400 		return err;
401 	err = gve_alloc_notify_blocks(priv);
402 	if (err)
403 		goto abort_with_counter;
404 	err = gve_alloc_stats_report(priv);
405 	if (err)
406 		goto abort_with_ntfy_blocks;
407 	err = gve_adminq_configure_device_resources(priv,
408 						    priv->counter_array_bus,
409 						    priv->num_event_counters,
410 						    priv->ntfy_block_bus,
411 						    priv->num_ntfy_blks);
412 	if (unlikely(err)) {
413 		dev_err(&priv->pdev->dev,
414 			"could not setup device_resources: err=%d\n", err);
415 		err = -ENXIO;
416 		goto abort_with_stats_report;
417 	}
418 
419 	if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
420 		priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
421 					       GFP_KERNEL);
422 		if (!priv->ptype_lut_dqo) {
423 			err = -ENOMEM;
424 			goto abort_with_stats_report;
425 		}
426 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
427 		if (err) {
428 			dev_err(&priv->pdev->dev,
429 				"Failed to get ptype map: err=%d\n", err);
430 			goto abort_with_ptype_lut;
431 		}
432 	}
433 
434 	err = gve_adminq_report_stats(priv, priv->stats_report_len,
435 				      priv->stats_report_bus,
436 				      GVE_STATS_REPORT_TIMER_PERIOD);
437 	if (err)
438 		dev_err(&priv->pdev->dev,
439 			"Failed to report stats: err=%d\n", err);
440 	gve_set_device_resources_ok(priv);
441 	return 0;
442 
443 abort_with_ptype_lut:
444 	kvfree(priv->ptype_lut_dqo);
445 	priv->ptype_lut_dqo = NULL;
446 abort_with_stats_report:
447 	gve_free_stats_report(priv);
448 abort_with_ntfy_blocks:
449 	gve_free_notify_blocks(priv);
450 abort_with_counter:
451 	gve_free_counter_array(priv);
452 
453 	return err;
454 }
455 
456 static void gve_trigger_reset(struct gve_priv *priv);
457 
458 static void gve_teardown_device_resources(struct gve_priv *priv)
459 {
460 	int err;
461 
462 	/* Tell device its resources are being freed */
463 	if (gve_get_device_resources_ok(priv)) {
464 		/* detach the stats report */
465 		err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
466 		if (err) {
467 			dev_err(&priv->pdev->dev,
468 				"Failed to detach stats report: err=%d\n", err);
469 			gve_trigger_reset(priv);
470 		}
471 		err = gve_adminq_deconfigure_device_resources(priv);
472 		if (err) {
473 			dev_err(&priv->pdev->dev,
474 				"Could not deconfigure device resources: err=%d\n",
475 				err);
476 			gve_trigger_reset(priv);
477 		}
478 	}
479 
480 	kvfree(priv->ptype_lut_dqo);
481 	priv->ptype_lut_dqo = NULL;
482 
483 	gve_free_counter_array(priv);
484 	gve_free_notify_blocks(priv);
485 	gve_free_stats_report(priv);
486 	gve_clear_device_resources_ok(priv);
487 }
488 
489 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
490 			 int (*gve_poll)(struct napi_struct *, int))
491 {
492 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
493 
494 	netif_napi_add(priv->dev, &block->napi, gve_poll,
495 		       NAPI_POLL_WEIGHT);
496 }
497 
498 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
499 {
500 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
501 
502 	netif_napi_del(&block->napi);
503 }
504 
505 static int gve_register_qpls(struct gve_priv *priv)
506 {
507 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
508 	int err;
509 	int i;
510 
511 	for (i = 0; i < num_qpls; i++) {
512 		err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
513 		if (err) {
514 			netif_err(priv, drv, priv->dev,
515 				  "failed to register queue page list %d\n",
516 				  priv->qpls[i].id);
517 			/* This failure will trigger a reset - no need to clean
518 			 * up
519 			 */
520 			return err;
521 		}
522 	}
523 	return 0;
524 }
525 
526 static int gve_unregister_qpls(struct gve_priv *priv)
527 {
528 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
529 	int err;
530 	int i;
531 
532 	for (i = 0; i < num_qpls; i++) {
533 		err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
534 		/* This failure will trigger a reset - no need to clean up */
535 		if (err) {
536 			netif_err(priv, drv, priv->dev,
537 				  "Failed to unregister queue page list %d\n",
538 				  priv->qpls[i].id);
539 			return err;
540 		}
541 	}
542 	return 0;
543 }
544 
545 static int gve_create_rings(struct gve_priv *priv)
546 {
547 	int err;
548 	int i;
549 
550 	err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
551 	if (err) {
552 		netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
553 			  priv->tx_cfg.num_queues);
554 		/* This failure will trigger a reset - no need to clean
555 		 * up
556 		 */
557 		return err;
558 	}
559 	netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
560 		  priv->tx_cfg.num_queues);
561 
562 	err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
563 	if (err) {
564 		netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
565 			  priv->rx_cfg.num_queues);
566 		/* This failure will trigger a reset - no need to clean
567 		 * up
568 		 */
569 		return err;
570 	}
571 	netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
572 		  priv->rx_cfg.num_queues);
573 
574 	if (gve_is_gqi(priv)) {
575 		/* Rx data ring has been prefilled with packet buffers at queue
576 		 * allocation time.
577 		 *
578 		 * Write the doorbell to provide descriptor slots and packet
579 		 * buffers to the NIC.
580 		 */
581 		for (i = 0; i < priv->rx_cfg.num_queues; i++)
582 			gve_rx_write_doorbell(priv, &priv->rx[i]);
583 	} else {
584 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
585 			/* Post buffers and ring doorbell. */
586 			gve_rx_post_buffers_dqo(&priv->rx[i]);
587 		}
588 	}
589 
590 	return 0;
591 }
592 
593 static void add_napi_init_sync_stats(struct gve_priv *priv,
594 				     int (*napi_poll)(struct napi_struct *napi,
595 						      int budget))
596 {
597 	int i;
598 
599 	/* Add tx napi & init sync stats*/
600 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
601 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
602 
603 		u64_stats_init(&priv->tx[i].statss);
604 		priv->tx[i].ntfy_id = ntfy_idx;
605 		gve_add_napi(priv, ntfy_idx, napi_poll);
606 	}
607 	/* Add rx napi  & init sync stats*/
608 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
609 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
610 
611 		u64_stats_init(&priv->rx[i].statss);
612 		priv->rx[i].ntfy_id = ntfy_idx;
613 		gve_add_napi(priv, ntfy_idx, napi_poll);
614 	}
615 }
616 
617 static void gve_tx_free_rings(struct gve_priv *priv)
618 {
619 	if (gve_is_gqi(priv)) {
620 		gve_tx_free_rings_gqi(priv);
621 	} else {
622 		gve_tx_free_rings_dqo(priv);
623 	}
624 }
625 
626 static int gve_alloc_rings(struct gve_priv *priv)
627 {
628 	int err;
629 
630 	/* Setup tx rings */
631 	priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
632 			    GFP_KERNEL);
633 	if (!priv->tx)
634 		return -ENOMEM;
635 
636 	if (gve_is_gqi(priv))
637 		err = gve_tx_alloc_rings(priv);
638 	else
639 		err = gve_tx_alloc_rings_dqo(priv);
640 	if (err)
641 		goto free_tx;
642 
643 	/* Setup rx rings */
644 	priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
645 			    GFP_KERNEL);
646 	if (!priv->rx) {
647 		err = -ENOMEM;
648 		goto free_tx_queue;
649 	}
650 
651 	if (gve_is_gqi(priv))
652 		err = gve_rx_alloc_rings(priv);
653 	else
654 		err = gve_rx_alloc_rings_dqo(priv);
655 	if (err)
656 		goto free_rx;
657 
658 	if (gve_is_gqi(priv))
659 		add_napi_init_sync_stats(priv, gve_napi_poll);
660 	else
661 		add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
662 
663 	return 0;
664 
665 free_rx:
666 	kvfree(priv->rx);
667 	priv->rx = NULL;
668 free_tx_queue:
669 	gve_tx_free_rings(priv);
670 free_tx:
671 	kvfree(priv->tx);
672 	priv->tx = NULL;
673 	return err;
674 }
675 
676 static int gve_destroy_rings(struct gve_priv *priv)
677 {
678 	int err;
679 
680 	err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
681 	if (err) {
682 		netif_err(priv, drv, priv->dev,
683 			  "failed to destroy tx queues\n");
684 		/* This failure will trigger a reset - no need to clean up */
685 		return err;
686 	}
687 	netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
688 	err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
689 	if (err) {
690 		netif_err(priv, drv, priv->dev,
691 			  "failed to destroy rx queues\n");
692 		/* This failure will trigger a reset - no need to clean up */
693 		return err;
694 	}
695 	netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
696 	return 0;
697 }
698 
699 static void gve_rx_free_rings(struct gve_priv *priv)
700 {
701 	if (gve_is_gqi(priv))
702 		gve_rx_free_rings_gqi(priv);
703 	else
704 		gve_rx_free_rings_dqo(priv);
705 }
706 
707 static void gve_free_rings(struct gve_priv *priv)
708 {
709 	int ntfy_idx;
710 	int i;
711 
712 	if (priv->tx) {
713 		for (i = 0; i < priv->tx_cfg.num_queues; i++) {
714 			ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
715 			gve_remove_napi(priv, ntfy_idx);
716 		}
717 		gve_tx_free_rings(priv);
718 		kvfree(priv->tx);
719 		priv->tx = NULL;
720 	}
721 	if (priv->rx) {
722 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
723 			ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
724 			gve_remove_napi(priv, ntfy_idx);
725 		}
726 		gve_rx_free_rings(priv);
727 		kvfree(priv->rx);
728 		priv->rx = NULL;
729 	}
730 }
731 
732 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
733 		   struct page **page, dma_addr_t *dma,
734 		   enum dma_data_direction dir)
735 {
736 	*page = alloc_page(GFP_KERNEL);
737 	if (!*page) {
738 		priv->page_alloc_fail++;
739 		return -ENOMEM;
740 	}
741 	*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
742 	if (dma_mapping_error(dev, *dma)) {
743 		priv->dma_mapping_error++;
744 		put_page(*page);
745 		return -ENOMEM;
746 	}
747 	return 0;
748 }
749 
750 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
751 				     int pages)
752 {
753 	struct gve_queue_page_list *qpl = &priv->qpls[id];
754 	int err;
755 	int i;
756 
757 	if (pages + priv->num_registered_pages > priv->max_registered_pages) {
758 		netif_err(priv, drv, priv->dev,
759 			  "Reached max number of registered pages %llu > %llu\n",
760 			  pages + priv->num_registered_pages,
761 			  priv->max_registered_pages);
762 		return -EINVAL;
763 	}
764 
765 	qpl->id = id;
766 	qpl->num_entries = 0;
767 	qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
768 	/* caller handles clean up */
769 	if (!qpl->pages)
770 		return -ENOMEM;
771 	qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
772 				   GFP_KERNEL);
773 	/* caller handles clean up */
774 	if (!qpl->page_buses)
775 		return -ENOMEM;
776 
777 	for (i = 0; i < pages; i++) {
778 		err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
779 				     &qpl->page_buses[i],
780 				     gve_qpl_dma_dir(priv, id));
781 		/* caller handles clean up */
782 		if (err)
783 			return -ENOMEM;
784 		qpl->num_entries++;
785 	}
786 	priv->num_registered_pages += pages;
787 
788 	return 0;
789 }
790 
791 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
792 		   enum dma_data_direction dir)
793 {
794 	if (!dma_mapping_error(dev, dma))
795 		dma_unmap_page(dev, dma, PAGE_SIZE, dir);
796 	if (page)
797 		put_page(page);
798 }
799 
800 static void gve_free_queue_page_list(struct gve_priv *priv,
801 				     int id)
802 {
803 	struct gve_queue_page_list *qpl = &priv->qpls[id];
804 	int i;
805 
806 	if (!qpl->pages)
807 		return;
808 	if (!qpl->page_buses)
809 		goto free_pages;
810 
811 	for (i = 0; i < qpl->num_entries; i++)
812 		gve_free_page(&priv->pdev->dev, qpl->pages[i],
813 			      qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
814 
815 	kvfree(qpl->page_buses);
816 free_pages:
817 	kvfree(qpl->pages);
818 	priv->num_registered_pages -= qpl->num_entries;
819 }
820 
821 static int gve_alloc_qpls(struct gve_priv *priv)
822 {
823 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
824 	int i, j;
825 	int err;
826 
827 	/* Raw addressing means no QPLs */
828 	if (priv->queue_format == GVE_GQI_RDA_FORMAT)
829 		return 0;
830 
831 	priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
832 	if (!priv->qpls)
833 		return -ENOMEM;
834 
835 	for (i = 0; i < gve_num_tx_qpls(priv); i++) {
836 		err = gve_alloc_queue_page_list(priv, i,
837 						priv->tx_pages_per_qpl);
838 		if (err)
839 			goto free_qpls;
840 	}
841 	for (; i < num_qpls; i++) {
842 		err = gve_alloc_queue_page_list(priv, i,
843 						priv->rx_data_slot_cnt);
844 		if (err)
845 			goto free_qpls;
846 	}
847 
848 	priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
849 				     sizeof(unsigned long) * BITS_PER_BYTE;
850 	priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
851 					    sizeof(unsigned long), GFP_KERNEL);
852 	if (!priv->qpl_cfg.qpl_id_map) {
853 		err = -ENOMEM;
854 		goto free_qpls;
855 	}
856 
857 	return 0;
858 
859 free_qpls:
860 	for (j = 0; j <= i; j++)
861 		gve_free_queue_page_list(priv, j);
862 	kvfree(priv->qpls);
863 	return err;
864 }
865 
866 static void gve_free_qpls(struct gve_priv *priv)
867 {
868 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
869 	int i;
870 
871 	/* Raw addressing means no QPLs */
872 	if (priv->queue_format == GVE_GQI_RDA_FORMAT)
873 		return;
874 
875 	kvfree(priv->qpl_cfg.qpl_id_map);
876 
877 	for (i = 0; i < num_qpls; i++)
878 		gve_free_queue_page_list(priv, i);
879 
880 	kvfree(priv->qpls);
881 }
882 
883 /* Use this to schedule a reset when the device is capable of continuing
884  * to handle other requests in its current state. If it is not, do a reset
885  * in thread instead.
886  */
887 void gve_schedule_reset(struct gve_priv *priv)
888 {
889 	gve_set_do_reset(priv);
890 	queue_work(priv->gve_wq, &priv->service_task);
891 }
892 
893 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
894 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
895 static void gve_turndown(struct gve_priv *priv);
896 static void gve_turnup(struct gve_priv *priv);
897 
898 static int gve_open(struct net_device *dev)
899 {
900 	struct gve_priv *priv = netdev_priv(dev);
901 	int err;
902 
903 	err = gve_alloc_qpls(priv);
904 	if (err)
905 		return err;
906 
907 	err = gve_alloc_rings(priv);
908 	if (err)
909 		goto free_qpls;
910 
911 	err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
912 	if (err)
913 		goto free_rings;
914 	err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
915 	if (err)
916 		goto free_rings;
917 
918 	err = gve_register_qpls(priv);
919 	if (err)
920 		goto reset;
921 
922 	if (!gve_is_gqi(priv)) {
923 		/* Hard code this for now. This may be tuned in the future for
924 		 * performance.
925 		 */
926 		priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
927 	}
928 	err = gve_create_rings(priv);
929 	if (err)
930 		goto reset;
931 
932 	gve_set_device_rings_ok(priv);
933 
934 	if (gve_get_report_stats(priv))
935 		mod_timer(&priv->stats_report_timer,
936 			  round_jiffies(jiffies +
937 				msecs_to_jiffies(priv->stats_report_timer_period)));
938 
939 	gve_turnup(priv);
940 	queue_work(priv->gve_wq, &priv->service_task);
941 	priv->interface_up_cnt++;
942 	return 0;
943 
944 free_rings:
945 	gve_free_rings(priv);
946 free_qpls:
947 	gve_free_qpls(priv);
948 	return err;
949 
950 reset:
951 	/* This must have been called from a reset due to the rtnl lock
952 	 * so just return at this point.
953 	 */
954 	if (gve_get_reset_in_progress(priv))
955 		return err;
956 	/* Otherwise reset before returning */
957 	gve_reset_and_teardown(priv, true);
958 	/* if this fails there is nothing we can do so just ignore the return */
959 	gve_reset_recovery(priv, false);
960 	/* return the original error */
961 	return err;
962 }
963 
964 static int gve_close(struct net_device *dev)
965 {
966 	struct gve_priv *priv = netdev_priv(dev);
967 	int err;
968 
969 	netif_carrier_off(dev);
970 	if (gve_get_device_rings_ok(priv)) {
971 		gve_turndown(priv);
972 		err = gve_destroy_rings(priv);
973 		if (err)
974 			goto err;
975 		err = gve_unregister_qpls(priv);
976 		if (err)
977 			goto err;
978 		gve_clear_device_rings_ok(priv);
979 	}
980 	del_timer_sync(&priv->stats_report_timer);
981 
982 	gve_free_rings(priv);
983 	gve_free_qpls(priv);
984 	priv->interface_down_cnt++;
985 	return 0;
986 
987 err:
988 	/* This must have been called from a reset due to the rtnl lock
989 	 * so just return at this point.
990 	 */
991 	if (gve_get_reset_in_progress(priv))
992 		return err;
993 	/* Otherwise reset before returning */
994 	gve_reset_and_teardown(priv, true);
995 	return gve_reset_recovery(priv, false);
996 }
997 
998 int gve_adjust_queues(struct gve_priv *priv,
999 		      struct gve_queue_config new_rx_config,
1000 		      struct gve_queue_config new_tx_config)
1001 {
1002 	int err;
1003 
1004 	if (netif_carrier_ok(priv->dev)) {
1005 		/* To make this process as simple as possible we teardown the
1006 		 * device, set the new configuration, and then bring the device
1007 		 * up again.
1008 		 */
1009 		err = gve_close(priv->dev);
1010 		/* we have already tried to reset in close,
1011 		 * just fail at this point
1012 		 */
1013 		if (err)
1014 			return err;
1015 		priv->tx_cfg = new_tx_config;
1016 		priv->rx_cfg = new_rx_config;
1017 
1018 		err = gve_open(priv->dev);
1019 		if (err)
1020 			goto err;
1021 
1022 		return 0;
1023 	}
1024 	/* Set the config for the next up. */
1025 	priv->tx_cfg = new_tx_config;
1026 	priv->rx_cfg = new_rx_config;
1027 
1028 	return 0;
1029 err:
1030 	netif_err(priv, drv, priv->dev,
1031 		  "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
1032 	gve_turndown(priv);
1033 	return err;
1034 }
1035 
1036 static void gve_turndown(struct gve_priv *priv)
1037 {
1038 	int idx;
1039 
1040 	if (netif_carrier_ok(priv->dev))
1041 		netif_carrier_off(priv->dev);
1042 
1043 	if (!gve_get_napi_enabled(priv))
1044 		return;
1045 
1046 	/* Disable napi to prevent more work from coming in */
1047 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1048 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1049 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1050 
1051 		napi_disable(&block->napi);
1052 	}
1053 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1054 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1055 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1056 
1057 		napi_disable(&block->napi);
1058 	}
1059 
1060 	/* Stop tx queues */
1061 	netif_tx_disable(priv->dev);
1062 
1063 	gve_clear_napi_enabled(priv);
1064 	gve_clear_report_stats(priv);
1065 }
1066 
1067 static void gve_turnup(struct gve_priv *priv)
1068 {
1069 	int idx;
1070 
1071 	/* Start the tx queues */
1072 	netif_tx_start_all_queues(priv->dev);
1073 
1074 	/* Enable napi and unmask interrupts for all queues */
1075 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1076 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1077 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1078 
1079 		napi_enable(&block->napi);
1080 		if (gve_is_gqi(priv)) {
1081 			iowrite32be(0, gve_irq_doorbell(priv, block));
1082 		} else {
1083 			u32 val = gve_set_itr_ratelimit_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO);
1084 
1085 			gve_write_irq_doorbell_dqo(priv, block, val);
1086 		}
1087 	}
1088 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1089 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1090 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1091 
1092 		napi_enable(&block->napi);
1093 		if (gve_is_gqi(priv)) {
1094 			iowrite32be(0, gve_irq_doorbell(priv, block));
1095 		} else {
1096 			u32 val = gve_set_itr_ratelimit_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO);
1097 
1098 			gve_write_irq_doorbell_dqo(priv, block, val);
1099 		}
1100 	}
1101 
1102 	gve_set_napi_enabled(priv);
1103 }
1104 
1105 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
1106 {
1107 	struct gve_priv *priv = netdev_priv(dev);
1108 
1109 	gve_schedule_reset(priv);
1110 	priv->tx_timeo_cnt++;
1111 }
1112 
1113 static int gve_set_features(struct net_device *netdev,
1114 			    netdev_features_t features)
1115 {
1116 	const netdev_features_t orig_features = netdev->features;
1117 	struct gve_priv *priv = netdev_priv(netdev);
1118 	int err;
1119 
1120 	if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
1121 		netdev->features ^= NETIF_F_LRO;
1122 		if (netif_carrier_ok(netdev)) {
1123 			/* To make this process as simple as possible we
1124 			 * teardown the device, set the new configuration,
1125 			 * and then bring the device up again.
1126 			 */
1127 			err = gve_close(netdev);
1128 			/* We have already tried to reset in close, just fail
1129 			 * at this point.
1130 			 */
1131 			if (err)
1132 				goto err;
1133 
1134 			err = gve_open(netdev);
1135 			if (err)
1136 				goto err;
1137 		}
1138 	}
1139 
1140 	return 0;
1141 err:
1142 	/* Reverts the change on error. */
1143 	netdev->features = orig_features;
1144 	netif_err(priv, drv, netdev,
1145 		  "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
1146 	return err;
1147 }
1148 
1149 static const struct net_device_ops gve_netdev_ops = {
1150 	.ndo_start_xmit		=	gve_start_xmit,
1151 	.ndo_open		=	gve_open,
1152 	.ndo_stop		=	gve_close,
1153 	.ndo_get_stats64	=	gve_get_stats,
1154 	.ndo_tx_timeout         =       gve_tx_timeout,
1155 	.ndo_set_features	=	gve_set_features,
1156 };
1157 
1158 static void gve_handle_status(struct gve_priv *priv, u32 status)
1159 {
1160 	if (GVE_DEVICE_STATUS_RESET_MASK & status) {
1161 		dev_info(&priv->pdev->dev, "Device requested reset.\n");
1162 		gve_set_do_reset(priv);
1163 	}
1164 	if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
1165 		priv->stats_report_trigger_cnt++;
1166 		gve_set_do_report_stats(priv);
1167 	}
1168 }
1169 
1170 static void gve_handle_reset(struct gve_priv *priv)
1171 {
1172 	/* A service task will be scheduled at the end of probe to catch any
1173 	 * resets that need to happen, and we don't want to reset until
1174 	 * probe is done.
1175 	 */
1176 	if (gve_get_probe_in_progress(priv))
1177 		return;
1178 
1179 	if (gve_get_do_reset(priv)) {
1180 		rtnl_lock();
1181 		gve_reset(priv, false);
1182 		rtnl_unlock();
1183 	}
1184 }
1185 
1186 void gve_handle_report_stats(struct gve_priv *priv)
1187 {
1188 	int idx, stats_idx = 0, tx_bytes;
1189 	unsigned int start = 0;
1190 	struct stats *stats = priv->stats_report->stats;
1191 
1192 	if (!gve_get_report_stats(priv))
1193 		return;
1194 
1195 	be64_add_cpu(&priv->stats_report->written_count, 1);
1196 	/* tx stats */
1197 	if (priv->tx) {
1198 		for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1199 			u32 last_completion = 0;
1200 			u32 tx_frames = 0;
1201 
1202 			/* DQO doesn't currently support these metrics. */
1203 			if (gve_is_gqi(priv)) {
1204 				last_completion = priv->tx[idx].done;
1205 				tx_frames = priv->tx[idx].req;
1206 			}
1207 
1208 			do {
1209 				start = u64_stats_fetch_begin(&priv->tx[idx].statss);
1210 				tx_bytes = priv->tx[idx].bytes_done;
1211 			} while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
1212 			stats[stats_idx++] = (struct stats) {
1213 				.stat_name = cpu_to_be32(TX_WAKE_CNT),
1214 				.value = cpu_to_be64(priv->tx[idx].wake_queue),
1215 				.queue_id = cpu_to_be32(idx),
1216 			};
1217 			stats[stats_idx++] = (struct stats) {
1218 				.stat_name = cpu_to_be32(TX_STOP_CNT),
1219 				.value = cpu_to_be64(priv->tx[idx].stop_queue),
1220 				.queue_id = cpu_to_be32(idx),
1221 			};
1222 			stats[stats_idx++] = (struct stats) {
1223 				.stat_name = cpu_to_be32(TX_FRAMES_SENT),
1224 				.value = cpu_to_be64(tx_frames),
1225 				.queue_id = cpu_to_be32(idx),
1226 			};
1227 			stats[stats_idx++] = (struct stats) {
1228 				.stat_name = cpu_to_be32(TX_BYTES_SENT),
1229 				.value = cpu_to_be64(tx_bytes),
1230 				.queue_id = cpu_to_be32(idx),
1231 			};
1232 			stats[stats_idx++] = (struct stats) {
1233 				.stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1234 				.value = cpu_to_be64(last_completion),
1235 				.queue_id = cpu_to_be32(idx),
1236 			};
1237 		}
1238 	}
1239 	/* rx stats */
1240 	if (priv->rx) {
1241 		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1242 			stats[stats_idx++] = (struct stats) {
1243 				.stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1244 				.value = cpu_to_be64(priv->rx[idx].desc.seqno),
1245 				.queue_id = cpu_to_be32(idx),
1246 			};
1247 			stats[stats_idx++] = (struct stats) {
1248 				.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1249 				.value = cpu_to_be64(priv->rx[0].fill_cnt),
1250 				.queue_id = cpu_to_be32(idx),
1251 			};
1252 		}
1253 	}
1254 }
1255 
1256 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1257 {
1258 	if (!gve_get_napi_enabled(priv))
1259 		return;
1260 
1261 	if (link_status == netif_carrier_ok(priv->dev))
1262 		return;
1263 
1264 	if (link_status) {
1265 		netdev_info(priv->dev, "Device link is up.\n");
1266 		netif_carrier_on(priv->dev);
1267 	} else {
1268 		netdev_info(priv->dev, "Device link is down.\n");
1269 		netif_carrier_off(priv->dev);
1270 	}
1271 }
1272 
1273 /* Handle NIC status register changes, reset requests and report stats */
1274 static void gve_service_task(struct work_struct *work)
1275 {
1276 	struct gve_priv *priv = container_of(work, struct gve_priv,
1277 					     service_task);
1278 	u32 status = ioread32be(&priv->reg_bar0->device_status);
1279 
1280 	gve_handle_status(priv, status);
1281 
1282 	gve_handle_reset(priv);
1283 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1284 }
1285 
1286 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1287 {
1288 	int num_ntfy;
1289 	int err;
1290 
1291 	/* Set up the adminq */
1292 	err = gve_adminq_alloc(&priv->pdev->dev, priv);
1293 	if (err) {
1294 		dev_err(&priv->pdev->dev,
1295 			"Failed to alloc admin queue: err=%d\n", err);
1296 		return err;
1297 	}
1298 
1299 	if (skip_describe_device)
1300 		goto setup_device;
1301 
1302 	priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
1303 	/* Get the initial information we need from the device */
1304 	err = gve_adminq_describe_device(priv);
1305 	if (err) {
1306 		dev_err(&priv->pdev->dev,
1307 			"Could not get device information: err=%d\n", err);
1308 		goto err;
1309 	}
1310 	if (gve_is_gqi(priv) && priv->dev->max_mtu > PAGE_SIZE) {
1311 		priv->dev->max_mtu = PAGE_SIZE;
1312 		err = gve_adminq_set_mtu(priv, priv->dev->mtu);
1313 		if (err) {
1314 			dev_err(&priv->pdev->dev, "Could not set mtu");
1315 			goto err;
1316 		}
1317 	}
1318 	priv->dev->mtu = priv->dev->max_mtu;
1319 	num_ntfy = pci_msix_vec_count(priv->pdev);
1320 	if (num_ntfy <= 0) {
1321 		dev_err(&priv->pdev->dev,
1322 			"could not count MSI-x vectors: err=%d\n", num_ntfy);
1323 		err = num_ntfy;
1324 		goto err;
1325 	} else if (num_ntfy < GVE_MIN_MSIX) {
1326 		dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1327 			GVE_MIN_MSIX, num_ntfy);
1328 		err = -EINVAL;
1329 		goto err;
1330 	}
1331 
1332 	priv->num_registered_pages = 0;
1333 	priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1334 	/* gvnic has one Notification Block per MSI-x vector, except for the
1335 	 * management vector
1336 	 */
1337 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1338 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
1339 
1340 	priv->tx_cfg.max_queues =
1341 		min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1342 	priv->rx_cfg.max_queues =
1343 		min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1344 
1345 	priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1346 	priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1347 	if (priv->default_num_queues > 0) {
1348 		priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1349 						priv->tx_cfg.num_queues);
1350 		priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1351 						priv->rx_cfg.num_queues);
1352 	}
1353 
1354 	dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1355 		 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1356 	dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1357 		 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1358 
1359 setup_device:
1360 	err = gve_setup_device_resources(priv);
1361 	if (!err)
1362 		return 0;
1363 err:
1364 	gve_adminq_free(&priv->pdev->dev, priv);
1365 	return err;
1366 }
1367 
1368 static void gve_teardown_priv_resources(struct gve_priv *priv)
1369 {
1370 	gve_teardown_device_resources(priv);
1371 	gve_adminq_free(&priv->pdev->dev, priv);
1372 }
1373 
1374 static void gve_trigger_reset(struct gve_priv *priv)
1375 {
1376 	/* Reset the device by releasing the AQ */
1377 	gve_adminq_release(priv);
1378 }
1379 
1380 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1381 {
1382 	gve_trigger_reset(priv);
1383 	/* With the reset having already happened, close cannot fail */
1384 	if (was_up)
1385 		gve_close(priv->dev);
1386 	gve_teardown_priv_resources(priv);
1387 }
1388 
1389 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1390 {
1391 	int err;
1392 
1393 	err = gve_init_priv(priv, true);
1394 	if (err)
1395 		goto err;
1396 	if (was_up) {
1397 		err = gve_open(priv->dev);
1398 		if (err)
1399 			goto err;
1400 	}
1401 	return 0;
1402 err:
1403 	dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1404 	gve_turndown(priv);
1405 	return err;
1406 }
1407 
1408 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1409 {
1410 	bool was_up = netif_carrier_ok(priv->dev);
1411 	int err;
1412 
1413 	dev_info(&priv->pdev->dev, "Performing reset\n");
1414 	gve_clear_do_reset(priv);
1415 	gve_set_reset_in_progress(priv);
1416 	/* If we aren't attempting to teardown normally, just go turndown and
1417 	 * reset right away.
1418 	 */
1419 	if (!attempt_teardown) {
1420 		gve_turndown(priv);
1421 		gve_reset_and_teardown(priv, was_up);
1422 	} else {
1423 		/* Otherwise attempt to close normally */
1424 		if (was_up) {
1425 			err = gve_close(priv->dev);
1426 			/* If that fails reset as we did above */
1427 			if (err)
1428 				gve_reset_and_teardown(priv, was_up);
1429 		}
1430 		/* Clean up any remaining resources */
1431 		gve_teardown_priv_resources(priv);
1432 	}
1433 
1434 	/* Set it all back up */
1435 	err = gve_reset_recovery(priv, was_up);
1436 	gve_clear_reset_in_progress(priv);
1437 	priv->reset_cnt++;
1438 	priv->interface_up_cnt = 0;
1439 	priv->interface_down_cnt = 0;
1440 	priv->stats_report_trigger_cnt = 0;
1441 	return err;
1442 }
1443 
1444 static void gve_write_version(u8 __iomem *driver_version_register)
1445 {
1446 	const char *c = gve_version_prefix;
1447 
1448 	while (*c) {
1449 		writeb(*c, driver_version_register);
1450 		c++;
1451 	}
1452 
1453 	c = gve_version_str;
1454 	while (*c) {
1455 		writeb(*c, driver_version_register);
1456 		c++;
1457 	}
1458 	writeb('\n', driver_version_register);
1459 }
1460 
1461 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1462 {
1463 	int max_tx_queues, max_rx_queues;
1464 	struct net_device *dev;
1465 	__be32 __iomem *db_bar;
1466 	struct gve_registers __iomem *reg_bar;
1467 	struct gve_priv *priv;
1468 	int err;
1469 
1470 	err = pci_enable_device(pdev);
1471 	if (err)
1472 		return -ENXIO;
1473 
1474 	err = pci_request_regions(pdev, "gvnic-cfg");
1475 	if (err)
1476 		goto abort_with_enabled;
1477 
1478 	pci_set_master(pdev);
1479 
1480 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1481 	if (err) {
1482 		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1483 		goto abort_with_pci_region;
1484 	}
1485 
1486 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1487 	if (err) {
1488 		dev_err(&pdev->dev,
1489 			"Failed to set consistent dma mask: err=%d\n", err);
1490 		goto abort_with_pci_region;
1491 	}
1492 
1493 	reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1494 	if (!reg_bar) {
1495 		dev_err(&pdev->dev, "Failed to map pci bar!\n");
1496 		err = -ENOMEM;
1497 		goto abort_with_pci_region;
1498 	}
1499 
1500 	db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1501 	if (!db_bar) {
1502 		dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1503 		err = -ENOMEM;
1504 		goto abort_with_reg_bar;
1505 	}
1506 
1507 	gve_write_version(&reg_bar->driver_version);
1508 	/* Get max queues to alloc etherdev */
1509 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
1510 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
1511 	/* Alloc and setup the netdev and priv */
1512 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1513 	if (!dev) {
1514 		dev_err(&pdev->dev, "could not allocate netdev\n");
1515 		goto abort_with_db_bar;
1516 	}
1517 	SET_NETDEV_DEV(dev, &pdev->dev);
1518 	pci_set_drvdata(pdev, dev);
1519 	dev->ethtool_ops = &gve_ethtool_ops;
1520 	dev->netdev_ops = &gve_netdev_ops;
1521 
1522 	/* Set default and supported features.
1523 	 *
1524 	 * Features might be set in other locations as well (such as
1525 	 * `gve_adminq_describe_device`).
1526 	 */
1527 	dev->hw_features = NETIF_F_HIGHDMA;
1528 	dev->hw_features |= NETIF_F_SG;
1529 	dev->hw_features |= NETIF_F_HW_CSUM;
1530 	dev->hw_features |= NETIF_F_TSO;
1531 	dev->hw_features |= NETIF_F_TSO6;
1532 	dev->hw_features |= NETIF_F_TSO_ECN;
1533 	dev->hw_features |= NETIF_F_RXCSUM;
1534 	dev->hw_features |= NETIF_F_RXHASH;
1535 	dev->features = dev->hw_features;
1536 	dev->watchdog_timeo = 5 * HZ;
1537 	dev->min_mtu = ETH_MIN_MTU;
1538 	netif_carrier_off(dev);
1539 
1540 	priv = netdev_priv(dev);
1541 	priv->dev = dev;
1542 	priv->pdev = pdev;
1543 	priv->msg_enable = DEFAULT_MSG_LEVEL;
1544 	priv->reg_bar0 = reg_bar;
1545 	priv->db_bar2 = db_bar;
1546 	priv->service_task_flags = 0x0;
1547 	priv->state_flags = 0x0;
1548 	priv->ethtool_flags = 0x0;
1549 
1550 	gve_set_probe_in_progress(priv);
1551 	priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1552 	if (!priv->gve_wq) {
1553 		dev_err(&pdev->dev, "Could not allocate workqueue");
1554 		err = -ENOMEM;
1555 		goto abort_with_netdev;
1556 	}
1557 	INIT_WORK(&priv->service_task, gve_service_task);
1558 	INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1559 	priv->tx_cfg.max_queues = max_tx_queues;
1560 	priv->rx_cfg.max_queues = max_rx_queues;
1561 
1562 	err = gve_init_priv(priv, false);
1563 	if (err)
1564 		goto abort_with_wq;
1565 
1566 	err = register_netdev(dev);
1567 	if (err)
1568 		goto abort_with_wq;
1569 
1570 	dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1571 	dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
1572 	gve_clear_probe_in_progress(priv);
1573 	queue_work(priv->gve_wq, &priv->service_task);
1574 	return 0;
1575 
1576 abort_with_wq:
1577 	destroy_workqueue(priv->gve_wq);
1578 
1579 abort_with_netdev:
1580 	free_netdev(dev);
1581 
1582 abort_with_db_bar:
1583 	pci_iounmap(pdev, db_bar);
1584 
1585 abort_with_reg_bar:
1586 	pci_iounmap(pdev, reg_bar);
1587 
1588 abort_with_pci_region:
1589 	pci_release_regions(pdev);
1590 
1591 abort_with_enabled:
1592 	pci_disable_device(pdev);
1593 	return -ENXIO;
1594 }
1595 
1596 static void gve_remove(struct pci_dev *pdev)
1597 {
1598 	struct net_device *netdev = pci_get_drvdata(pdev);
1599 	struct gve_priv *priv = netdev_priv(netdev);
1600 	__be32 __iomem *db_bar = priv->db_bar2;
1601 	void __iomem *reg_bar = priv->reg_bar0;
1602 
1603 	unregister_netdev(netdev);
1604 	gve_teardown_priv_resources(priv);
1605 	destroy_workqueue(priv->gve_wq);
1606 	free_netdev(netdev);
1607 	pci_iounmap(pdev, db_bar);
1608 	pci_iounmap(pdev, reg_bar);
1609 	pci_release_regions(pdev);
1610 	pci_disable_device(pdev);
1611 }
1612 
1613 static const struct pci_device_id gve_id_table[] = {
1614 	{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1615 	{ }
1616 };
1617 
1618 static struct pci_driver gvnic_driver = {
1619 	.name		= "gvnic",
1620 	.id_table	= gve_id_table,
1621 	.probe		= gve_probe,
1622 	.remove		= gve_remove,
1623 };
1624 
1625 module_pci_driver(gvnic_driver);
1626 
1627 MODULE_DEVICE_TABLE(pci, gve_id_table);
1628 MODULE_AUTHOR("Google, Inc.");
1629 MODULE_DESCRIPTION("gVNIC Driver");
1630 MODULE_LICENSE("Dual MIT/GPL");
1631 MODULE_VERSION(GVE_VERSION);
1632