1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2019 Google, Inc.
5  */
6 
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
16 #include "gve.h"
17 #include "gve_adminq.h"
18 #include "gve_register.h"
19 
20 #define GVE_DEFAULT_RX_COPYBREAK	(256)
21 
22 #define DEFAULT_MSG_LEVEL	(NETIF_MSG_DRV | NETIF_MSG_LINK)
23 #define GVE_VERSION		"1.0.0"
24 #define GVE_VERSION_PREFIX	"GVE-"
25 
26 const char gve_version_str[] = GVE_VERSION;
27 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
28 
29 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
30 {
31 	struct gve_priv *priv = netdev_priv(dev);
32 	unsigned int start;
33 	int ring;
34 
35 	if (priv->rx) {
36 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
37 			do {
38 				start =
39 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
40 				s->rx_packets += priv->rx[ring].rpackets;
41 				s->rx_bytes += priv->rx[ring].rbytes;
42 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
43 						       start));
44 		}
45 	}
46 	if (priv->tx) {
47 		for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
48 			do {
49 				start =
50 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
51 				s->tx_packets += priv->tx[ring].pkt_done;
52 				s->tx_bytes += priv->tx[ring].bytes_done;
53 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
54 						       start));
55 		}
56 	}
57 }
58 
59 static int gve_alloc_counter_array(struct gve_priv *priv)
60 {
61 	priv->counter_array =
62 		dma_alloc_coherent(&priv->pdev->dev,
63 				   priv->num_event_counters *
64 				   sizeof(*priv->counter_array),
65 				   &priv->counter_array_bus, GFP_KERNEL);
66 	if (!priv->counter_array)
67 		return -ENOMEM;
68 
69 	return 0;
70 }
71 
72 static void gve_free_counter_array(struct gve_priv *priv)
73 {
74 	dma_free_coherent(&priv->pdev->dev,
75 			  priv->num_event_counters *
76 			  sizeof(*priv->counter_array),
77 			  priv->counter_array, priv->counter_array_bus);
78 	priv->counter_array = NULL;
79 }
80 
81 /* NIC requests to report stats */
82 static void gve_stats_report_task(struct work_struct *work)
83 {
84 	struct gve_priv *priv = container_of(work, struct gve_priv,
85 					     stats_report_task);
86 	if (gve_get_do_report_stats(priv)) {
87 		gve_handle_report_stats(priv);
88 		gve_clear_do_report_stats(priv);
89 	}
90 }
91 
92 static void gve_stats_report_schedule(struct gve_priv *priv)
93 {
94 	if (!gve_get_probe_in_progress(priv) &&
95 	    !gve_get_reset_in_progress(priv)) {
96 		gve_set_do_report_stats(priv);
97 		queue_work(priv->gve_wq, &priv->stats_report_task);
98 	}
99 }
100 
101 static void gve_stats_report_timer(struct timer_list *t)
102 {
103 	struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
104 
105 	mod_timer(&priv->stats_report_timer,
106 		  round_jiffies(jiffies +
107 		  msecs_to_jiffies(priv->stats_report_timer_period)));
108 	gve_stats_report_schedule(priv);
109 }
110 
111 static int gve_alloc_stats_report(struct gve_priv *priv)
112 {
113 	int tx_stats_num, rx_stats_num;
114 
115 	tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
116 		       priv->tx_cfg.num_queues;
117 	rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
118 		       priv->rx_cfg.num_queues;
119 	priv->stats_report_len = sizeof(struct gve_stats_report) +
120 				 (tx_stats_num + rx_stats_num) *
121 				 sizeof(struct stats);
122 	priv->stats_report =
123 		dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
124 				   &priv->stats_report_bus, GFP_KERNEL);
125 	if (!priv->stats_report)
126 		return -ENOMEM;
127 	/* Set up timer for the report-stats task */
128 	timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
129 	priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
130 	return 0;
131 }
132 
133 static void gve_free_stats_report(struct gve_priv *priv)
134 {
135 	del_timer_sync(&priv->stats_report_timer);
136 	dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
137 			  priv->stats_report, priv->stats_report_bus);
138 	priv->stats_report = NULL;
139 }
140 
141 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
142 {
143 	struct gve_priv *priv = arg;
144 
145 	queue_work(priv->gve_wq, &priv->service_task);
146 	return IRQ_HANDLED;
147 }
148 
149 static irqreturn_t gve_intr(int irq, void *arg)
150 {
151 	struct gve_notify_block *block = arg;
152 	struct gve_priv *priv = block->priv;
153 
154 	iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
155 	napi_schedule_irqoff(&block->napi);
156 	return IRQ_HANDLED;
157 }
158 
159 static int gve_napi_poll(struct napi_struct *napi, int budget)
160 {
161 	struct gve_notify_block *block;
162 	__be32 __iomem *irq_doorbell;
163 	bool reschedule = false;
164 	struct gve_priv *priv;
165 
166 	block = container_of(napi, struct gve_notify_block, napi);
167 	priv = block->priv;
168 
169 	if (block->tx)
170 		reschedule |= gve_tx_poll(block, budget);
171 	if (block->rx)
172 		reschedule |= gve_rx_poll(block, budget);
173 
174 	if (reschedule)
175 		return budget;
176 
177 	napi_complete(napi);
178 	irq_doorbell = gve_irq_doorbell(priv, block);
179 	iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
180 
181 	/* Double check we have no extra work.
182 	 * Ensure unmask synchronizes with checking for work.
183 	 */
184 	dma_rmb();
185 	if (block->tx)
186 		reschedule |= gve_tx_poll(block, -1);
187 	if (block->rx)
188 		reschedule |= gve_rx_poll(block, -1);
189 	if (reschedule && napi_reschedule(napi))
190 		iowrite32be(GVE_IRQ_MASK, irq_doorbell);
191 
192 	return 0;
193 }
194 
195 static int gve_alloc_notify_blocks(struct gve_priv *priv)
196 {
197 	int num_vecs_requested = priv->num_ntfy_blks + 1;
198 	char *name = priv->dev->name;
199 	unsigned int active_cpus;
200 	int vecs_enabled;
201 	int i, j;
202 	int err;
203 
204 	priv->msix_vectors = kvzalloc(num_vecs_requested *
205 				      sizeof(*priv->msix_vectors), GFP_KERNEL);
206 	if (!priv->msix_vectors)
207 		return -ENOMEM;
208 	for (i = 0; i < num_vecs_requested; i++)
209 		priv->msix_vectors[i].entry = i;
210 	vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
211 					     GVE_MIN_MSIX, num_vecs_requested);
212 	if (vecs_enabled < 0) {
213 		dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
214 			GVE_MIN_MSIX, vecs_enabled);
215 		err = vecs_enabled;
216 		goto abort_with_msix_vectors;
217 	}
218 	if (vecs_enabled != num_vecs_requested) {
219 		int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
220 		int vecs_per_type = new_num_ntfy_blks / 2;
221 		int vecs_left = new_num_ntfy_blks % 2;
222 
223 		priv->num_ntfy_blks = new_num_ntfy_blks;
224 		priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
225 						vecs_per_type);
226 		priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
227 						vecs_per_type + vecs_left);
228 		dev_err(&priv->pdev->dev,
229 			"Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
230 			vecs_enabled, priv->tx_cfg.max_queues,
231 			priv->rx_cfg.max_queues);
232 		if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
233 			priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
234 		if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
235 			priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
236 	}
237 	/* Half the notification blocks go to TX and half to RX */
238 	active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
239 
240 	/* Setup Management Vector  - the last vector */
241 	snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
242 		 name);
243 	err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
244 			  gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
245 	if (err) {
246 		dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
247 		goto abort_with_msix_enabled;
248 	}
249 	priv->ntfy_blocks =
250 		dma_alloc_coherent(&priv->pdev->dev,
251 				   priv->num_ntfy_blks *
252 				   sizeof(*priv->ntfy_blocks),
253 				   &priv->ntfy_block_bus, GFP_KERNEL);
254 	if (!priv->ntfy_blocks) {
255 		err = -ENOMEM;
256 		goto abort_with_mgmt_vector;
257 	}
258 	/* Setup the other blocks - the first n-1 vectors */
259 	for (i = 0; i < priv->num_ntfy_blks; i++) {
260 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
261 		int msix_idx = i;
262 
263 		snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
264 			 name, i);
265 		block->priv = priv;
266 		err = request_irq(priv->msix_vectors[msix_idx].vector,
267 				  gve_intr, 0, block->name, block);
268 		if (err) {
269 			dev_err(&priv->pdev->dev,
270 				"Failed to receive msix vector %d\n", i);
271 			goto abort_with_some_ntfy_blocks;
272 		}
273 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
274 				      get_cpu_mask(i % active_cpus));
275 	}
276 	return 0;
277 abort_with_some_ntfy_blocks:
278 	for (j = 0; j < i; j++) {
279 		struct gve_notify_block *block = &priv->ntfy_blocks[j];
280 		int msix_idx = j;
281 
282 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
283 				      NULL);
284 		free_irq(priv->msix_vectors[msix_idx].vector, block);
285 	}
286 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
287 			  sizeof(*priv->ntfy_blocks),
288 			  priv->ntfy_blocks, priv->ntfy_block_bus);
289 	priv->ntfy_blocks = NULL;
290 abort_with_mgmt_vector:
291 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
292 abort_with_msix_enabled:
293 	pci_disable_msix(priv->pdev);
294 abort_with_msix_vectors:
295 	kvfree(priv->msix_vectors);
296 	priv->msix_vectors = NULL;
297 	return err;
298 }
299 
300 static void gve_free_notify_blocks(struct gve_priv *priv)
301 {
302 	int i;
303 
304 	/* Free the irqs */
305 	for (i = 0; i < priv->num_ntfy_blks; i++) {
306 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
307 		int msix_idx = i;
308 
309 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
310 				      NULL);
311 		free_irq(priv->msix_vectors[msix_idx].vector, block);
312 	}
313 	dma_free_coherent(&priv->pdev->dev,
314 			  priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
315 			  priv->ntfy_blocks, priv->ntfy_block_bus);
316 	priv->ntfy_blocks = NULL;
317 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
318 	pci_disable_msix(priv->pdev);
319 	kvfree(priv->msix_vectors);
320 	priv->msix_vectors = NULL;
321 }
322 
323 static int gve_setup_device_resources(struct gve_priv *priv)
324 {
325 	int err;
326 
327 	err = gve_alloc_counter_array(priv);
328 	if (err)
329 		return err;
330 	err = gve_alloc_notify_blocks(priv);
331 	if (err)
332 		goto abort_with_counter;
333 	err = gve_alloc_stats_report(priv);
334 	if (err)
335 		goto abort_with_ntfy_blocks;
336 	err = gve_adminq_configure_device_resources(priv,
337 						    priv->counter_array_bus,
338 						    priv->num_event_counters,
339 						    priv->ntfy_block_bus,
340 						    priv->num_ntfy_blks);
341 	if (unlikely(err)) {
342 		dev_err(&priv->pdev->dev,
343 			"could not setup device_resources: err=%d\n", err);
344 		err = -ENXIO;
345 		goto abort_with_stats_report;
346 	}
347 	err = gve_adminq_report_stats(priv, priv->stats_report_len,
348 				      priv->stats_report_bus,
349 				      GVE_STATS_REPORT_TIMER_PERIOD);
350 	if (err)
351 		dev_err(&priv->pdev->dev,
352 			"Failed to report stats: err=%d\n", err);
353 	gve_set_device_resources_ok(priv);
354 	return 0;
355 abort_with_stats_report:
356 	gve_free_stats_report(priv);
357 abort_with_ntfy_blocks:
358 	gve_free_notify_blocks(priv);
359 abort_with_counter:
360 	gve_free_counter_array(priv);
361 	return err;
362 }
363 
364 static void gve_trigger_reset(struct gve_priv *priv);
365 
366 static void gve_teardown_device_resources(struct gve_priv *priv)
367 {
368 	int err;
369 
370 	/* Tell device its resources are being freed */
371 	if (gve_get_device_resources_ok(priv)) {
372 		/* detach the stats report */
373 		err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
374 		if (err) {
375 			dev_err(&priv->pdev->dev,
376 				"Failed to detach stats report: err=%d\n", err);
377 			gve_trigger_reset(priv);
378 		}
379 		err = gve_adminq_deconfigure_device_resources(priv);
380 		if (err) {
381 			dev_err(&priv->pdev->dev,
382 				"Could not deconfigure device resources: err=%d\n",
383 				err);
384 			gve_trigger_reset(priv);
385 		}
386 	}
387 	gve_free_counter_array(priv);
388 	gve_free_notify_blocks(priv);
389 	gve_free_stats_report(priv);
390 	gve_clear_device_resources_ok(priv);
391 }
392 
393 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
394 {
395 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
396 
397 	netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
398 		       NAPI_POLL_WEIGHT);
399 }
400 
401 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
402 {
403 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
404 
405 	netif_napi_del(&block->napi);
406 }
407 
408 static int gve_register_qpls(struct gve_priv *priv)
409 {
410 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
411 	int err;
412 	int i;
413 
414 	for (i = 0; i < num_qpls; i++) {
415 		err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
416 		if (err) {
417 			netif_err(priv, drv, priv->dev,
418 				  "failed to register queue page list %d\n",
419 				  priv->qpls[i].id);
420 			/* This failure will trigger a reset - no need to clean
421 			 * up
422 			 */
423 			return err;
424 		}
425 	}
426 	return 0;
427 }
428 
429 static int gve_unregister_qpls(struct gve_priv *priv)
430 {
431 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
432 	int err;
433 	int i;
434 
435 	for (i = 0; i < num_qpls; i++) {
436 		err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
437 		/* This failure will trigger a reset - no need to clean up */
438 		if (err) {
439 			netif_err(priv, drv, priv->dev,
440 				  "Failed to unregister queue page list %d\n",
441 				  priv->qpls[i].id);
442 			return err;
443 		}
444 	}
445 	return 0;
446 }
447 
448 static int gve_create_rings(struct gve_priv *priv)
449 {
450 	int err;
451 	int i;
452 
453 	err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
454 	if (err) {
455 		netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
456 			  priv->tx_cfg.num_queues);
457 		/* This failure will trigger a reset - no need to clean
458 		 * up
459 		 */
460 		return err;
461 	}
462 	netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
463 		  priv->tx_cfg.num_queues);
464 
465 	err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
466 	if (err) {
467 		netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
468 			  priv->rx_cfg.num_queues);
469 		/* This failure will trigger a reset - no need to clean
470 		 * up
471 		 */
472 		return err;
473 	}
474 	netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
475 		  priv->rx_cfg.num_queues);
476 
477 	/* Rx data ring has been prefilled with packet buffers at queue
478 	 * allocation time.
479 	 * Write the doorbell to provide descriptor slots and packet buffers
480 	 * to the NIC.
481 	 */
482 	for (i = 0; i < priv->rx_cfg.num_queues; i++)
483 		gve_rx_write_doorbell(priv, &priv->rx[i]);
484 
485 	return 0;
486 }
487 
488 static int gve_alloc_rings(struct gve_priv *priv)
489 {
490 	int ntfy_idx;
491 	int err;
492 	int i;
493 
494 	/* Setup tx rings */
495 	priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
496 			    GFP_KERNEL);
497 	if (!priv->tx)
498 		return -ENOMEM;
499 	err = gve_tx_alloc_rings(priv);
500 	if (err)
501 		goto free_tx;
502 	/* Setup rx rings */
503 	priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
504 			    GFP_KERNEL);
505 	if (!priv->rx) {
506 		err = -ENOMEM;
507 		goto free_tx_queue;
508 	}
509 	err = gve_rx_alloc_rings(priv);
510 	if (err)
511 		goto free_rx;
512 	/* Add tx napi & init sync stats*/
513 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
514 		u64_stats_init(&priv->tx[i].statss);
515 		ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
516 		gve_add_napi(priv, ntfy_idx);
517 	}
518 	/* Add rx napi  & init sync stats*/
519 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
520 		u64_stats_init(&priv->rx[i].statss);
521 		ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
522 		gve_add_napi(priv, ntfy_idx);
523 	}
524 
525 	return 0;
526 
527 free_rx:
528 	kvfree(priv->rx);
529 	priv->rx = NULL;
530 free_tx_queue:
531 	gve_tx_free_rings(priv);
532 free_tx:
533 	kvfree(priv->tx);
534 	priv->tx = NULL;
535 	return err;
536 }
537 
538 static int gve_destroy_rings(struct gve_priv *priv)
539 {
540 	int err;
541 
542 	err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
543 	if (err) {
544 		netif_err(priv, drv, priv->dev,
545 			  "failed to destroy tx queues\n");
546 		/* This failure will trigger a reset - no need to clean up */
547 		return err;
548 	}
549 	netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
550 	err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
551 	if (err) {
552 		netif_err(priv, drv, priv->dev,
553 			  "failed to destroy rx queues\n");
554 		/* This failure will trigger a reset - no need to clean up */
555 		return err;
556 	}
557 	netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
558 	return 0;
559 }
560 
561 static void gve_free_rings(struct gve_priv *priv)
562 {
563 	int ntfy_idx;
564 	int i;
565 
566 	if (priv->tx) {
567 		for (i = 0; i < priv->tx_cfg.num_queues; i++) {
568 			ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
569 			gve_remove_napi(priv, ntfy_idx);
570 		}
571 		gve_tx_free_rings(priv);
572 		kvfree(priv->tx);
573 		priv->tx = NULL;
574 	}
575 	if (priv->rx) {
576 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
577 			ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
578 			gve_remove_napi(priv, ntfy_idx);
579 		}
580 		gve_rx_free_rings(priv);
581 		kvfree(priv->rx);
582 		priv->rx = NULL;
583 	}
584 }
585 
586 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
587 		   struct page **page, dma_addr_t *dma,
588 		   enum dma_data_direction dir)
589 {
590 	*page = alloc_page(GFP_KERNEL);
591 	if (!*page) {
592 		priv->page_alloc_fail++;
593 		return -ENOMEM;
594 	}
595 	*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
596 	if (dma_mapping_error(dev, *dma)) {
597 		priv->dma_mapping_error++;
598 		put_page(*page);
599 		return -ENOMEM;
600 	}
601 	return 0;
602 }
603 
604 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
605 				     int pages)
606 {
607 	struct gve_queue_page_list *qpl = &priv->qpls[id];
608 	int err;
609 	int i;
610 
611 	if (pages + priv->num_registered_pages > priv->max_registered_pages) {
612 		netif_err(priv, drv, priv->dev,
613 			  "Reached max number of registered pages %llu > %llu\n",
614 			  pages + priv->num_registered_pages,
615 			  priv->max_registered_pages);
616 		return -EINVAL;
617 	}
618 
619 	qpl->id = id;
620 	qpl->num_entries = 0;
621 	qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
622 	/* caller handles clean up */
623 	if (!qpl->pages)
624 		return -ENOMEM;
625 	qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
626 				   GFP_KERNEL);
627 	/* caller handles clean up */
628 	if (!qpl->page_buses)
629 		return -ENOMEM;
630 
631 	for (i = 0; i < pages; i++) {
632 		err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
633 				     &qpl->page_buses[i],
634 				     gve_qpl_dma_dir(priv, id));
635 		/* caller handles clean up */
636 		if (err)
637 			return -ENOMEM;
638 		qpl->num_entries++;
639 	}
640 	priv->num_registered_pages += pages;
641 
642 	return 0;
643 }
644 
645 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
646 		   enum dma_data_direction dir)
647 {
648 	if (!dma_mapping_error(dev, dma))
649 		dma_unmap_page(dev, dma, PAGE_SIZE, dir);
650 	if (page)
651 		put_page(page);
652 }
653 
654 static void gve_free_queue_page_list(struct gve_priv *priv,
655 				     int id)
656 {
657 	struct gve_queue_page_list *qpl = &priv->qpls[id];
658 	int i;
659 
660 	if (!qpl->pages)
661 		return;
662 	if (!qpl->page_buses)
663 		goto free_pages;
664 
665 	for (i = 0; i < qpl->num_entries; i++)
666 		gve_free_page(&priv->pdev->dev, qpl->pages[i],
667 			      qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
668 
669 	kvfree(qpl->page_buses);
670 free_pages:
671 	kvfree(qpl->pages);
672 	priv->num_registered_pages -= qpl->num_entries;
673 }
674 
675 static int gve_alloc_qpls(struct gve_priv *priv)
676 {
677 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
678 	int i, j;
679 	int err;
680 
681 	priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
682 	if (!priv->qpls)
683 		return -ENOMEM;
684 
685 	for (i = 0; i < gve_num_tx_qpls(priv); i++) {
686 		err = gve_alloc_queue_page_list(priv, i,
687 						priv->tx_pages_per_qpl);
688 		if (err)
689 			goto free_qpls;
690 	}
691 	for (; i < num_qpls; i++) {
692 		err = gve_alloc_queue_page_list(priv, i,
693 						priv->rx_pages_per_qpl);
694 		if (err)
695 			goto free_qpls;
696 	}
697 
698 	priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
699 				     sizeof(unsigned long) * BITS_PER_BYTE;
700 	priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
701 					    sizeof(unsigned long), GFP_KERNEL);
702 	if (!priv->qpl_cfg.qpl_id_map) {
703 		err = -ENOMEM;
704 		goto free_qpls;
705 	}
706 
707 	return 0;
708 
709 free_qpls:
710 	for (j = 0; j <= i; j++)
711 		gve_free_queue_page_list(priv, j);
712 	kvfree(priv->qpls);
713 	return err;
714 }
715 
716 static void gve_free_qpls(struct gve_priv *priv)
717 {
718 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
719 	int i;
720 
721 	kvfree(priv->qpl_cfg.qpl_id_map);
722 
723 	for (i = 0; i < num_qpls; i++)
724 		gve_free_queue_page_list(priv, i);
725 
726 	kvfree(priv->qpls);
727 }
728 
729 /* Use this to schedule a reset when the device is capable of continuing
730  * to handle other requests in its current state. If it is not, do a reset
731  * in thread instead.
732  */
733 void gve_schedule_reset(struct gve_priv *priv)
734 {
735 	gve_set_do_reset(priv);
736 	queue_work(priv->gve_wq, &priv->service_task);
737 }
738 
739 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
740 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
741 static void gve_turndown(struct gve_priv *priv);
742 static void gve_turnup(struct gve_priv *priv);
743 
744 static int gve_open(struct net_device *dev)
745 {
746 	struct gve_priv *priv = netdev_priv(dev);
747 	int err;
748 
749 	err = gve_alloc_qpls(priv);
750 	if (err)
751 		return err;
752 	err = gve_alloc_rings(priv);
753 	if (err)
754 		goto free_qpls;
755 
756 	err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
757 	if (err)
758 		goto free_rings;
759 	err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
760 	if (err)
761 		goto free_rings;
762 
763 	err = gve_register_qpls(priv);
764 	if (err)
765 		goto reset;
766 	err = gve_create_rings(priv);
767 	if (err)
768 		goto reset;
769 	gve_set_device_rings_ok(priv);
770 
771 	if (gve_get_report_stats(priv))
772 		mod_timer(&priv->stats_report_timer,
773 			  round_jiffies(jiffies +
774 				msecs_to_jiffies(priv->stats_report_timer_period)));
775 
776 	gve_turnup(priv);
777 	queue_work(priv->gve_wq, &priv->service_task);
778 	priv->interface_up_cnt++;
779 	return 0;
780 
781 free_rings:
782 	gve_free_rings(priv);
783 free_qpls:
784 	gve_free_qpls(priv);
785 	return err;
786 
787 reset:
788 	/* This must have been called from a reset due to the rtnl lock
789 	 * so just return at this point.
790 	 */
791 	if (gve_get_reset_in_progress(priv))
792 		return err;
793 	/* Otherwise reset before returning */
794 	gve_reset_and_teardown(priv, true);
795 	/* if this fails there is nothing we can do so just ignore the return */
796 	gve_reset_recovery(priv, false);
797 	/* return the original error */
798 	return err;
799 }
800 
801 static int gve_close(struct net_device *dev)
802 {
803 	struct gve_priv *priv = netdev_priv(dev);
804 	int err;
805 
806 	netif_carrier_off(dev);
807 	if (gve_get_device_rings_ok(priv)) {
808 		gve_turndown(priv);
809 		err = gve_destroy_rings(priv);
810 		if (err)
811 			goto err;
812 		err = gve_unregister_qpls(priv);
813 		if (err)
814 			goto err;
815 		gve_clear_device_rings_ok(priv);
816 	}
817 	del_timer_sync(&priv->stats_report_timer);
818 
819 	gve_free_rings(priv);
820 	gve_free_qpls(priv);
821 	priv->interface_down_cnt++;
822 	return 0;
823 
824 err:
825 	/* This must have been called from a reset due to the rtnl lock
826 	 * so just return at this point.
827 	 */
828 	if (gve_get_reset_in_progress(priv))
829 		return err;
830 	/* Otherwise reset before returning */
831 	gve_reset_and_teardown(priv, true);
832 	return gve_reset_recovery(priv, false);
833 }
834 
835 int gve_adjust_queues(struct gve_priv *priv,
836 		      struct gve_queue_config new_rx_config,
837 		      struct gve_queue_config new_tx_config)
838 {
839 	int err;
840 
841 	if (netif_carrier_ok(priv->dev)) {
842 		/* To make this process as simple as possible we teardown the
843 		 * device, set the new configuration, and then bring the device
844 		 * up again.
845 		 */
846 		err = gve_close(priv->dev);
847 		/* we have already tried to reset in close,
848 		 * just fail at this point
849 		 */
850 		if (err)
851 			return err;
852 		priv->tx_cfg = new_tx_config;
853 		priv->rx_cfg = new_rx_config;
854 
855 		err = gve_open(priv->dev);
856 		if (err)
857 			goto err;
858 
859 		return 0;
860 	}
861 	/* Set the config for the next up. */
862 	priv->tx_cfg = new_tx_config;
863 	priv->rx_cfg = new_rx_config;
864 
865 	return 0;
866 err:
867 	netif_err(priv, drv, priv->dev,
868 		  "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
869 	gve_turndown(priv);
870 	return err;
871 }
872 
873 static void gve_turndown(struct gve_priv *priv)
874 {
875 	int idx;
876 
877 	if (netif_carrier_ok(priv->dev))
878 		netif_carrier_off(priv->dev);
879 
880 	if (!gve_get_napi_enabled(priv))
881 		return;
882 
883 	/* Disable napi to prevent more work from coming in */
884 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
885 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
886 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
887 
888 		napi_disable(&block->napi);
889 	}
890 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
891 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
892 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
893 
894 		napi_disable(&block->napi);
895 	}
896 
897 	/* Stop tx queues */
898 	netif_tx_disable(priv->dev);
899 
900 	gve_clear_napi_enabled(priv);
901 	gve_clear_report_stats(priv);
902 }
903 
904 static void gve_turnup(struct gve_priv *priv)
905 {
906 	int idx;
907 
908 	/* Start the tx queues */
909 	netif_tx_start_all_queues(priv->dev);
910 
911 	/* Enable napi and unmask interrupts for all queues */
912 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
913 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
914 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
915 
916 		napi_enable(&block->napi);
917 		iowrite32be(0, gve_irq_doorbell(priv, block));
918 	}
919 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
920 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
921 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
922 
923 		napi_enable(&block->napi);
924 		iowrite32be(0, gve_irq_doorbell(priv, block));
925 	}
926 
927 	gve_set_napi_enabled(priv);
928 }
929 
930 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
931 {
932 	struct gve_priv *priv = netdev_priv(dev);
933 
934 	gve_schedule_reset(priv);
935 	priv->tx_timeo_cnt++;
936 }
937 
938 static const struct net_device_ops gve_netdev_ops = {
939 	.ndo_start_xmit		=	gve_tx,
940 	.ndo_open		=	gve_open,
941 	.ndo_stop		=	gve_close,
942 	.ndo_get_stats64	=	gve_get_stats,
943 	.ndo_tx_timeout         =       gve_tx_timeout,
944 };
945 
946 static void gve_handle_status(struct gve_priv *priv, u32 status)
947 {
948 	if (GVE_DEVICE_STATUS_RESET_MASK & status) {
949 		dev_info(&priv->pdev->dev, "Device requested reset.\n");
950 		gve_set_do_reset(priv);
951 	}
952 	if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
953 		priv->stats_report_trigger_cnt++;
954 		gve_set_do_report_stats(priv);
955 	}
956 }
957 
958 static void gve_handle_reset(struct gve_priv *priv)
959 {
960 	/* A service task will be scheduled at the end of probe to catch any
961 	 * resets that need to happen, and we don't want to reset until
962 	 * probe is done.
963 	 */
964 	if (gve_get_probe_in_progress(priv))
965 		return;
966 
967 	if (gve_get_do_reset(priv)) {
968 		rtnl_lock();
969 		gve_reset(priv, false);
970 		rtnl_unlock();
971 	}
972 }
973 
974 void gve_handle_report_stats(struct gve_priv *priv)
975 {
976 	int idx, stats_idx = 0, tx_bytes;
977 	unsigned int start = 0;
978 	struct stats *stats = priv->stats_report->stats;
979 
980 	if (!gve_get_report_stats(priv))
981 		return;
982 
983 	be64_add_cpu(&priv->stats_report->written_count, 1);
984 	/* tx stats */
985 	if (priv->tx) {
986 		for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
987 			do {
988 				start = u64_stats_fetch_begin(&priv->tx[idx].statss);
989 				tx_bytes = priv->tx[idx].bytes_done;
990 			} while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
991 			stats[stats_idx++] = (struct stats) {
992 				.stat_name = cpu_to_be32(TX_WAKE_CNT),
993 				.value = cpu_to_be64(priv->tx[idx].wake_queue),
994 				.queue_id = cpu_to_be32(idx),
995 			};
996 			stats[stats_idx++] = (struct stats) {
997 				.stat_name = cpu_to_be32(TX_STOP_CNT),
998 				.value = cpu_to_be64(priv->tx[idx].stop_queue),
999 				.queue_id = cpu_to_be32(idx),
1000 			};
1001 			stats[stats_idx++] = (struct stats) {
1002 				.stat_name = cpu_to_be32(TX_FRAMES_SENT),
1003 				.value = cpu_to_be64(priv->tx[idx].req),
1004 				.queue_id = cpu_to_be32(idx),
1005 			};
1006 			stats[stats_idx++] = (struct stats) {
1007 				.stat_name = cpu_to_be32(TX_BYTES_SENT),
1008 				.value = cpu_to_be64(tx_bytes),
1009 				.queue_id = cpu_to_be32(idx),
1010 			};
1011 			stats[stats_idx++] = (struct stats) {
1012 				.stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1013 				.value = cpu_to_be64(priv->tx[idx].done),
1014 				.queue_id = cpu_to_be32(idx),
1015 			};
1016 		}
1017 	}
1018 	/* rx stats */
1019 	if (priv->rx) {
1020 		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1021 			stats[stats_idx++] = (struct stats) {
1022 				.stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1023 				.value = cpu_to_be64(priv->rx[idx].desc.seqno),
1024 				.queue_id = cpu_to_be32(idx),
1025 			};
1026 			stats[stats_idx++] = (struct stats) {
1027 				.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1028 				.value = cpu_to_be64(priv->rx[0].fill_cnt),
1029 				.queue_id = cpu_to_be32(idx),
1030 			};
1031 		}
1032 	}
1033 }
1034 
1035 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1036 {
1037 	if (!gve_get_napi_enabled(priv))
1038 		return;
1039 
1040 	if (link_status == netif_carrier_ok(priv->dev))
1041 		return;
1042 
1043 	if (link_status) {
1044 		netdev_info(priv->dev, "Device link is up.\n");
1045 		netif_carrier_on(priv->dev);
1046 	} else {
1047 		netdev_info(priv->dev, "Device link is down.\n");
1048 		netif_carrier_off(priv->dev);
1049 	}
1050 }
1051 
1052 /* Handle NIC status register changes, reset requests and report stats */
1053 static void gve_service_task(struct work_struct *work)
1054 {
1055 	struct gve_priv *priv = container_of(work, struct gve_priv,
1056 					     service_task);
1057 	u32 status = ioread32be(&priv->reg_bar0->device_status);
1058 
1059 	gve_handle_status(priv, status);
1060 
1061 	gve_handle_reset(priv);
1062 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1063 }
1064 
1065 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1066 {
1067 	int num_ntfy;
1068 	int err;
1069 
1070 	/* Set up the adminq */
1071 	err = gve_adminq_alloc(&priv->pdev->dev, priv);
1072 	if (err) {
1073 		dev_err(&priv->pdev->dev,
1074 			"Failed to alloc admin queue: err=%d\n", err);
1075 		return err;
1076 	}
1077 
1078 	if (skip_describe_device)
1079 		goto setup_device;
1080 
1081 	/* Get the initial information we need from the device */
1082 	err = gve_adminq_describe_device(priv);
1083 	if (err) {
1084 		dev_err(&priv->pdev->dev,
1085 			"Could not get device information: err=%d\n", err);
1086 		goto err;
1087 	}
1088 	if (priv->dev->max_mtu > PAGE_SIZE) {
1089 		priv->dev->max_mtu = PAGE_SIZE;
1090 		err = gve_adminq_set_mtu(priv, priv->dev->mtu);
1091 		if (err) {
1092 			dev_err(&priv->pdev->dev, "Could not set mtu");
1093 			goto err;
1094 		}
1095 	}
1096 	priv->dev->mtu = priv->dev->max_mtu;
1097 	num_ntfy = pci_msix_vec_count(priv->pdev);
1098 	if (num_ntfy <= 0) {
1099 		dev_err(&priv->pdev->dev,
1100 			"could not count MSI-x vectors: err=%d\n", num_ntfy);
1101 		err = num_ntfy;
1102 		goto err;
1103 	} else if (num_ntfy < GVE_MIN_MSIX) {
1104 		dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1105 			GVE_MIN_MSIX, num_ntfy);
1106 		err = -EINVAL;
1107 		goto err;
1108 	}
1109 
1110 	priv->num_registered_pages = 0;
1111 	priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1112 	/* gvnic has one Notification Block per MSI-x vector, except for the
1113 	 * management vector
1114 	 */
1115 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1116 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
1117 
1118 	priv->tx_cfg.max_queues =
1119 		min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1120 	priv->rx_cfg.max_queues =
1121 		min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1122 
1123 	priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1124 	priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1125 	if (priv->default_num_queues > 0) {
1126 		priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1127 						priv->tx_cfg.num_queues);
1128 		priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1129 						priv->rx_cfg.num_queues);
1130 	}
1131 
1132 	dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1133 		 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1134 	dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1135 		 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1136 
1137 setup_device:
1138 	err = gve_setup_device_resources(priv);
1139 	if (!err)
1140 		return 0;
1141 err:
1142 	gve_adminq_free(&priv->pdev->dev, priv);
1143 	return err;
1144 }
1145 
1146 static void gve_teardown_priv_resources(struct gve_priv *priv)
1147 {
1148 	gve_teardown_device_resources(priv);
1149 	gve_adminq_free(&priv->pdev->dev, priv);
1150 }
1151 
1152 static void gve_trigger_reset(struct gve_priv *priv)
1153 {
1154 	/* Reset the device by releasing the AQ */
1155 	gve_adminq_release(priv);
1156 }
1157 
1158 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1159 {
1160 	gve_trigger_reset(priv);
1161 	/* With the reset having already happened, close cannot fail */
1162 	if (was_up)
1163 		gve_close(priv->dev);
1164 	gve_teardown_priv_resources(priv);
1165 }
1166 
1167 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1168 {
1169 	int err;
1170 
1171 	err = gve_init_priv(priv, true);
1172 	if (err)
1173 		goto err;
1174 	if (was_up) {
1175 		err = gve_open(priv->dev);
1176 		if (err)
1177 			goto err;
1178 	}
1179 	return 0;
1180 err:
1181 	dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1182 	gve_turndown(priv);
1183 	return err;
1184 }
1185 
1186 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1187 {
1188 	bool was_up = netif_carrier_ok(priv->dev);
1189 	int err;
1190 
1191 	dev_info(&priv->pdev->dev, "Performing reset\n");
1192 	gve_clear_do_reset(priv);
1193 	gve_set_reset_in_progress(priv);
1194 	/* If we aren't attempting to teardown normally, just go turndown and
1195 	 * reset right away.
1196 	 */
1197 	if (!attempt_teardown) {
1198 		gve_turndown(priv);
1199 		gve_reset_and_teardown(priv, was_up);
1200 	} else {
1201 		/* Otherwise attempt to close normally */
1202 		if (was_up) {
1203 			err = gve_close(priv->dev);
1204 			/* If that fails reset as we did above */
1205 			if (err)
1206 				gve_reset_and_teardown(priv, was_up);
1207 		}
1208 		/* Clean up any remaining resources */
1209 		gve_teardown_priv_resources(priv);
1210 	}
1211 
1212 	/* Set it all back up */
1213 	err = gve_reset_recovery(priv, was_up);
1214 	gve_clear_reset_in_progress(priv);
1215 	priv->reset_cnt++;
1216 	priv->interface_up_cnt = 0;
1217 	priv->interface_down_cnt = 0;
1218 	priv->stats_report_trigger_cnt = 0;
1219 	return err;
1220 }
1221 
1222 static void gve_write_version(u8 __iomem *driver_version_register)
1223 {
1224 	const char *c = gve_version_prefix;
1225 
1226 	while (*c) {
1227 		writeb(*c, driver_version_register);
1228 		c++;
1229 	}
1230 
1231 	c = gve_version_str;
1232 	while (*c) {
1233 		writeb(*c, driver_version_register);
1234 		c++;
1235 	}
1236 	writeb('\n', driver_version_register);
1237 }
1238 
1239 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1240 {
1241 	int max_tx_queues, max_rx_queues;
1242 	struct net_device *dev;
1243 	__be32 __iomem *db_bar;
1244 	struct gve_registers __iomem *reg_bar;
1245 	struct gve_priv *priv;
1246 	int err;
1247 
1248 	err = pci_enable_device(pdev);
1249 	if (err)
1250 		return -ENXIO;
1251 
1252 	err = pci_request_regions(pdev, "gvnic-cfg");
1253 	if (err)
1254 		goto abort_with_enabled;
1255 
1256 	pci_set_master(pdev);
1257 
1258 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1259 	if (err) {
1260 		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1261 		goto abort_with_pci_region;
1262 	}
1263 
1264 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1265 	if (err) {
1266 		dev_err(&pdev->dev,
1267 			"Failed to set consistent dma mask: err=%d\n", err);
1268 		goto abort_with_pci_region;
1269 	}
1270 
1271 	reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1272 	if (!reg_bar) {
1273 		dev_err(&pdev->dev, "Failed to map pci bar!\n");
1274 		err = -ENOMEM;
1275 		goto abort_with_pci_region;
1276 	}
1277 
1278 	db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1279 	if (!db_bar) {
1280 		dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1281 		err = -ENOMEM;
1282 		goto abort_with_reg_bar;
1283 	}
1284 
1285 	gve_write_version(&reg_bar->driver_version);
1286 	/* Get max queues to alloc etherdev */
1287 	max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
1288 	max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
1289 	/* Alloc and setup the netdev and priv */
1290 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1291 	if (!dev) {
1292 		dev_err(&pdev->dev, "could not allocate netdev\n");
1293 		goto abort_with_db_bar;
1294 	}
1295 	SET_NETDEV_DEV(dev, &pdev->dev);
1296 	pci_set_drvdata(pdev, dev);
1297 	dev->ethtool_ops = &gve_ethtool_ops;
1298 	dev->netdev_ops = &gve_netdev_ops;
1299 	/* advertise features */
1300 	dev->hw_features = NETIF_F_HIGHDMA;
1301 	dev->hw_features |= NETIF_F_SG;
1302 	dev->hw_features |= NETIF_F_HW_CSUM;
1303 	dev->hw_features |= NETIF_F_TSO;
1304 	dev->hw_features |= NETIF_F_TSO6;
1305 	dev->hw_features |= NETIF_F_TSO_ECN;
1306 	dev->hw_features |= NETIF_F_RXCSUM;
1307 	dev->hw_features |= NETIF_F_RXHASH;
1308 	dev->features = dev->hw_features;
1309 	dev->watchdog_timeo = 5 * HZ;
1310 	dev->min_mtu = ETH_MIN_MTU;
1311 	netif_carrier_off(dev);
1312 
1313 	priv = netdev_priv(dev);
1314 	priv->dev = dev;
1315 	priv->pdev = pdev;
1316 	priv->msg_enable = DEFAULT_MSG_LEVEL;
1317 	priv->reg_bar0 = reg_bar;
1318 	priv->db_bar2 = db_bar;
1319 	priv->service_task_flags = 0x0;
1320 	priv->state_flags = 0x0;
1321 	priv->ethtool_flags = 0x0;
1322 
1323 	gve_set_probe_in_progress(priv);
1324 	priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1325 	if (!priv->gve_wq) {
1326 		dev_err(&pdev->dev, "Could not allocate workqueue");
1327 		err = -ENOMEM;
1328 		goto abort_with_netdev;
1329 	}
1330 	INIT_WORK(&priv->service_task, gve_service_task);
1331 	INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1332 	priv->tx_cfg.max_queues = max_tx_queues;
1333 	priv->rx_cfg.max_queues = max_rx_queues;
1334 
1335 	err = gve_init_priv(priv, false);
1336 	if (err)
1337 		goto abort_with_wq;
1338 
1339 	err = register_netdev(dev);
1340 	if (err)
1341 		goto abort_with_wq;
1342 
1343 	dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1344 	gve_clear_probe_in_progress(priv);
1345 	queue_work(priv->gve_wq, &priv->service_task);
1346 	return 0;
1347 
1348 abort_with_wq:
1349 	destroy_workqueue(priv->gve_wq);
1350 
1351 abort_with_netdev:
1352 	free_netdev(dev);
1353 
1354 abort_with_db_bar:
1355 	pci_iounmap(pdev, db_bar);
1356 
1357 abort_with_reg_bar:
1358 	pci_iounmap(pdev, reg_bar);
1359 
1360 abort_with_pci_region:
1361 	pci_release_regions(pdev);
1362 
1363 abort_with_enabled:
1364 	pci_disable_device(pdev);
1365 	return -ENXIO;
1366 }
1367 
1368 static void gve_remove(struct pci_dev *pdev)
1369 {
1370 	struct net_device *netdev = pci_get_drvdata(pdev);
1371 	struct gve_priv *priv = netdev_priv(netdev);
1372 	__be32 __iomem *db_bar = priv->db_bar2;
1373 	void __iomem *reg_bar = priv->reg_bar0;
1374 
1375 	unregister_netdev(netdev);
1376 	gve_teardown_priv_resources(priv);
1377 	destroy_workqueue(priv->gve_wq);
1378 	free_netdev(netdev);
1379 	pci_iounmap(pdev, db_bar);
1380 	pci_iounmap(pdev, reg_bar);
1381 	pci_release_regions(pdev);
1382 	pci_disable_device(pdev);
1383 }
1384 
1385 static const struct pci_device_id gve_id_table[] = {
1386 	{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1387 	{ }
1388 };
1389 
1390 static struct pci_driver gvnic_driver = {
1391 	.name		= "gvnic",
1392 	.id_table	= gve_id_table,
1393 	.probe		= gve_probe,
1394 	.remove		= gve_remove,
1395 };
1396 
1397 module_pci_driver(gvnic_driver);
1398 
1399 MODULE_DEVICE_TABLE(pci, gve_id_table);
1400 MODULE_AUTHOR("Google, Inc.");
1401 MODULE_DESCRIPTION("gVNIC Driver");
1402 MODULE_LICENSE("Dual MIT/GPL");
1403 MODULE_VERSION(GVE_VERSION);
1404