1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
16 #include "gve.h"
17 #include "gve_dqo.h"
18 #include "gve_adminq.h"
19 #include "gve_register.h"
20 
21 #define GVE_DEFAULT_RX_COPYBREAK	(256)
22 
23 #define DEFAULT_MSG_LEVEL	(NETIF_MSG_DRV | NETIF_MSG_LINK)
24 #define GVE_VERSION		"1.0.0"
25 #define GVE_VERSION_PREFIX	"GVE-"
26 
27 // Minimum amount of time between queue kicks in msec (10 seconds)
28 #define MIN_TX_TIMEOUT_GAP (1000 * 10)
29 
30 const char gve_version_str[] = GVE_VERSION;
31 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
32 
33 static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
34 {
35 	struct gve_priv *priv = netdev_priv(dev);
36 
37 	if (gve_is_gqi(priv))
38 		return gve_tx(skb, dev);
39 	else
40 		return gve_tx_dqo(skb, dev);
41 }
42 
43 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
44 {
45 	struct gve_priv *priv = netdev_priv(dev);
46 	unsigned int start;
47 	u64 packets, bytes;
48 	int ring;
49 
50 	if (priv->rx) {
51 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
52 			do {
53 				start =
54 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
55 				packets = priv->rx[ring].rpackets;
56 				bytes = priv->rx[ring].rbytes;
57 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
58 						       start));
59 			s->rx_packets += packets;
60 			s->rx_bytes += bytes;
61 		}
62 	}
63 	if (priv->tx) {
64 		for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
65 			do {
66 				start =
67 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
68 				packets = priv->tx[ring].pkt_done;
69 				bytes = priv->tx[ring].bytes_done;
70 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
71 						       start));
72 			s->tx_packets += packets;
73 			s->tx_bytes += bytes;
74 		}
75 	}
76 }
77 
78 static int gve_alloc_counter_array(struct gve_priv *priv)
79 {
80 	priv->counter_array =
81 		dma_alloc_coherent(&priv->pdev->dev,
82 				   priv->num_event_counters *
83 				   sizeof(*priv->counter_array),
84 				   &priv->counter_array_bus, GFP_KERNEL);
85 	if (!priv->counter_array)
86 		return -ENOMEM;
87 
88 	return 0;
89 }
90 
91 static void gve_free_counter_array(struct gve_priv *priv)
92 {
93 	if (!priv->counter_array)
94 		return;
95 
96 	dma_free_coherent(&priv->pdev->dev,
97 			  priv->num_event_counters *
98 			  sizeof(*priv->counter_array),
99 			  priv->counter_array, priv->counter_array_bus);
100 	priv->counter_array = NULL;
101 }
102 
103 /* NIC requests to report stats */
104 static void gve_stats_report_task(struct work_struct *work)
105 {
106 	struct gve_priv *priv = container_of(work, struct gve_priv,
107 					     stats_report_task);
108 	if (gve_get_do_report_stats(priv)) {
109 		gve_handle_report_stats(priv);
110 		gve_clear_do_report_stats(priv);
111 	}
112 }
113 
114 static void gve_stats_report_schedule(struct gve_priv *priv)
115 {
116 	if (!gve_get_probe_in_progress(priv) &&
117 	    !gve_get_reset_in_progress(priv)) {
118 		gve_set_do_report_stats(priv);
119 		queue_work(priv->gve_wq, &priv->stats_report_task);
120 	}
121 }
122 
123 static void gve_stats_report_timer(struct timer_list *t)
124 {
125 	struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
126 
127 	mod_timer(&priv->stats_report_timer,
128 		  round_jiffies(jiffies +
129 		  msecs_to_jiffies(priv->stats_report_timer_period)));
130 	gve_stats_report_schedule(priv);
131 }
132 
133 static int gve_alloc_stats_report(struct gve_priv *priv)
134 {
135 	int tx_stats_num, rx_stats_num;
136 
137 	tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
138 		       priv->tx_cfg.num_queues;
139 	rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
140 		       priv->rx_cfg.num_queues;
141 	priv->stats_report_len = struct_size(priv->stats_report, stats,
142 					     tx_stats_num + rx_stats_num);
143 	priv->stats_report =
144 		dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
145 				   &priv->stats_report_bus, GFP_KERNEL);
146 	if (!priv->stats_report)
147 		return -ENOMEM;
148 	/* Set up timer for the report-stats task */
149 	timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
150 	priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
151 	return 0;
152 }
153 
154 static void gve_free_stats_report(struct gve_priv *priv)
155 {
156 	if (!priv->stats_report)
157 		return;
158 
159 	del_timer_sync(&priv->stats_report_timer);
160 	dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
161 			  priv->stats_report, priv->stats_report_bus);
162 	priv->stats_report = NULL;
163 }
164 
165 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
166 {
167 	struct gve_priv *priv = arg;
168 
169 	queue_work(priv->gve_wq, &priv->service_task);
170 	return IRQ_HANDLED;
171 }
172 
173 static irqreturn_t gve_intr(int irq, void *arg)
174 {
175 	struct gve_notify_block *block = arg;
176 	struct gve_priv *priv = block->priv;
177 
178 	iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
179 	napi_schedule_irqoff(&block->napi);
180 	return IRQ_HANDLED;
181 }
182 
183 static irqreturn_t gve_intr_dqo(int irq, void *arg)
184 {
185 	struct gve_notify_block *block = arg;
186 
187 	/* Interrupts are automatically masked */
188 	napi_schedule_irqoff(&block->napi);
189 	return IRQ_HANDLED;
190 }
191 
192 static int gve_napi_poll(struct napi_struct *napi, int budget)
193 {
194 	struct gve_notify_block *block;
195 	__be32 __iomem *irq_doorbell;
196 	bool reschedule = false;
197 	struct gve_priv *priv;
198 	int work_done = 0;
199 
200 	block = container_of(napi, struct gve_notify_block, napi);
201 	priv = block->priv;
202 
203 	if (block->tx)
204 		reschedule |= gve_tx_poll(block, budget);
205 	if (block->rx) {
206 		work_done = gve_rx_poll(block, budget);
207 		reschedule |= work_done == budget;
208 	}
209 
210 	if (reschedule)
211 		return budget;
212 
213        /* Complete processing - don't unmask irq if busy polling is enabled */
214 	if (likely(napi_complete_done(napi, work_done))) {
215 		irq_doorbell = gve_irq_doorbell(priv, block);
216 		iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
217 
218 		/* Ensure IRQ ACK is visible before we check pending work.
219 		 * If queue had issued updates, it would be truly visible.
220 		 */
221 		mb();
222 
223 		if (block->tx)
224 			reschedule |= gve_tx_clean_pending(priv, block->tx);
225 		if (block->rx)
226 			reschedule |= gve_rx_work_pending(block->rx);
227 
228 		if (reschedule && napi_reschedule(napi))
229 			iowrite32be(GVE_IRQ_MASK, irq_doorbell);
230 	}
231 	return work_done;
232 }
233 
234 static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
235 {
236 	struct gve_notify_block *block =
237 		container_of(napi, struct gve_notify_block, napi);
238 	struct gve_priv *priv = block->priv;
239 	bool reschedule = false;
240 	int work_done = 0;
241 
242 	/* Clear PCI MSI-X Pending Bit Array (PBA)
243 	 *
244 	 * This bit is set if an interrupt event occurs while the vector is
245 	 * masked. If this bit is set and we reenable the interrupt, it will
246 	 * fire again. Since we're just about to poll the queue state, we don't
247 	 * need it to fire again.
248 	 *
249 	 * Under high softirq load, it's possible that the interrupt condition
250 	 * is triggered twice before we got the chance to process it.
251 	 */
252 	gve_write_irq_doorbell_dqo(priv, block,
253 				   GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
254 
255 	if (block->tx)
256 		reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
257 
258 	if (block->rx) {
259 		work_done = gve_rx_poll_dqo(block, budget);
260 		reschedule |= work_done == budget;
261 	}
262 
263 	if (reschedule)
264 		return budget;
265 
266 	if (likely(napi_complete_done(napi, work_done))) {
267 		/* Enable interrupts again.
268 		 *
269 		 * We don't need to repoll afterwards because HW supports the
270 		 * PCI MSI-X PBA feature.
271 		 *
272 		 * Another interrupt would be triggered if a new event came in
273 		 * since the last one.
274 		 */
275 		gve_write_irq_doorbell_dqo(priv, block,
276 					   GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
277 	}
278 
279 	return work_done;
280 }
281 
282 static int gve_alloc_notify_blocks(struct gve_priv *priv)
283 {
284 	int num_vecs_requested = priv->num_ntfy_blks + 1;
285 	char *name = priv->dev->name;
286 	unsigned int active_cpus;
287 	int vecs_enabled;
288 	int i, j;
289 	int err;
290 
291 	priv->msix_vectors = kvcalloc(num_vecs_requested,
292 				      sizeof(*priv->msix_vectors), GFP_KERNEL);
293 	if (!priv->msix_vectors)
294 		return -ENOMEM;
295 	for (i = 0; i < num_vecs_requested; i++)
296 		priv->msix_vectors[i].entry = i;
297 	vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
298 					     GVE_MIN_MSIX, num_vecs_requested);
299 	if (vecs_enabled < 0) {
300 		dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
301 			GVE_MIN_MSIX, vecs_enabled);
302 		err = vecs_enabled;
303 		goto abort_with_msix_vectors;
304 	}
305 	if (vecs_enabled != num_vecs_requested) {
306 		int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
307 		int vecs_per_type = new_num_ntfy_blks / 2;
308 		int vecs_left = new_num_ntfy_blks % 2;
309 
310 		priv->num_ntfy_blks = new_num_ntfy_blks;
311 		priv->mgmt_msix_idx = priv->num_ntfy_blks;
312 		priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
313 						vecs_per_type);
314 		priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
315 						vecs_per_type + vecs_left);
316 		dev_err(&priv->pdev->dev,
317 			"Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
318 			vecs_enabled, priv->tx_cfg.max_queues,
319 			priv->rx_cfg.max_queues);
320 		if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
321 			priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
322 		if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
323 			priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
324 	}
325 	/* Half the notification blocks go to TX and half to RX */
326 	active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
327 
328 	/* Setup Management Vector  - the last vector */
329 	snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
330 		 name);
331 	err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
332 			  gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
333 	if (err) {
334 		dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
335 		goto abort_with_msix_enabled;
336 	}
337 	priv->ntfy_blocks =
338 		dma_alloc_coherent(&priv->pdev->dev,
339 				   priv->num_ntfy_blks *
340 				   sizeof(*priv->ntfy_blocks),
341 				   &priv->ntfy_block_bus, GFP_KERNEL);
342 	if (!priv->ntfy_blocks) {
343 		err = -ENOMEM;
344 		goto abort_with_mgmt_vector;
345 	}
346 	/* Setup the other blocks - the first n-1 vectors */
347 	for (i = 0; i < priv->num_ntfy_blks; i++) {
348 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
349 		int msix_idx = i;
350 
351 		snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
352 			 name, i);
353 		block->priv = priv;
354 		err = request_irq(priv->msix_vectors[msix_idx].vector,
355 				  gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
356 				  0, block->name, block);
357 		if (err) {
358 			dev_err(&priv->pdev->dev,
359 				"Failed to receive msix vector %d\n", i);
360 			goto abort_with_some_ntfy_blocks;
361 		}
362 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
363 				      get_cpu_mask(i % active_cpus));
364 	}
365 	return 0;
366 abort_with_some_ntfy_blocks:
367 	for (j = 0; j < i; j++) {
368 		struct gve_notify_block *block = &priv->ntfy_blocks[j];
369 		int msix_idx = j;
370 
371 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
372 				      NULL);
373 		free_irq(priv->msix_vectors[msix_idx].vector, block);
374 	}
375 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
376 			  sizeof(*priv->ntfy_blocks),
377 			  priv->ntfy_blocks, priv->ntfy_block_bus);
378 	priv->ntfy_blocks = NULL;
379 abort_with_mgmt_vector:
380 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
381 abort_with_msix_enabled:
382 	pci_disable_msix(priv->pdev);
383 abort_with_msix_vectors:
384 	kvfree(priv->msix_vectors);
385 	priv->msix_vectors = NULL;
386 	return err;
387 }
388 
389 static void gve_free_notify_blocks(struct gve_priv *priv)
390 {
391 	int i;
392 
393 	if (!priv->msix_vectors)
394 		return;
395 
396 	/* Free the irqs */
397 	for (i = 0; i < priv->num_ntfy_blks; i++) {
398 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
399 		int msix_idx = i;
400 
401 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
402 				      NULL);
403 		free_irq(priv->msix_vectors[msix_idx].vector, block);
404 	}
405 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
406 	dma_free_coherent(&priv->pdev->dev,
407 			  priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
408 			  priv->ntfy_blocks, priv->ntfy_block_bus);
409 	priv->ntfy_blocks = NULL;
410 	pci_disable_msix(priv->pdev);
411 	kvfree(priv->msix_vectors);
412 	priv->msix_vectors = NULL;
413 }
414 
415 static int gve_setup_device_resources(struct gve_priv *priv)
416 {
417 	int err;
418 
419 	err = gve_alloc_counter_array(priv);
420 	if (err)
421 		return err;
422 	err = gve_alloc_notify_blocks(priv);
423 	if (err)
424 		goto abort_with_counter;
425 	err = gve_alloc_stats_report(priv);
426 	if (err)
427 		goto abort_with_ntfy_blocks;
428 	err = gve_adminq_configure_device_resources(priv,
429 						    priv->counter_array_bus,
430 						    priv->num_event_counters,
431 						    priv->ntfy_block_bus,
432 						    priv->num_ntfy_blks);
433 	if (unlikely(err)) {
434 		dev_err(&priv->pdev->dev,
435 			"could not setup device_resources: err=%d\n", err);
436 		err = -ENXIO;
437 		goto abort_with_stats_report;
438 	}
439 
440 	if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
441 		priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
442 					       GFP_KERNEL);
443 		if (!priv->ptype_lut_dqo) {
444 			err = -ENOMEM;
445 			goto abort_with_stats_report;
446 		}
447 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
448 		if (err) {
449 			dev_err(&priv->pdev->dev,
450 				"Failed to get ptype map: err=%d\n", err);
451 			goto abort_with_ptype_lut;
452 		}
453 	}
454 
455 	err = gve_adminq_report_stats(priv, priv->stats_report_len,
456 				      priv->stats_report_bus,
457 				      GVE_STATS_REPORT_TIMER_PERIOD);
458 	if (err)
459 		dev_err(&priv->pdev->dev,
460 			"Failed to report stats: err=%d\n", err);
461 	gve_set_device_resources_ok(priv);
462 	return 0;
463 
464 abort_with_ptype_lut:
465 	kvfree(priv->ptype_lut_dqo);
466 	priv->ptype_lut_dqo = NULL;
467 abort_with_stats_report:
468 	gve_free_stats_report(priv);
469 abort_with_ntfy_blocks:
470 	gve_free_notify_blocks(priv);
471 abort_with_counter:
472 	gve_free_counter_array(priv);
473 
474 	return err;
475 }
476 
477 static void gve_trigger_reset(struct gve_priv *priv);
478 
479 static void gve_teardown_device_resources(struct gve_priv *priv)
480 {
481 	int err;
482 
483 	/* Tell device its resources are being freed */
484 	if (gve_get_device_resources_ok(priv)) {
485 		/* detach the stats report */
486 		err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
487 		if (err) {
488 			dev_err(&priv->pdev->dev,
489 				"Failed to detach stats report: err=%d\n", err);
490 			gve_trigger_reset(priv);
491 		}
492 		err = gve_adminq_deconfigure_device_resources(priv);
493 		if (err) {
494 			dev_err(&priv->pdev->dev,
495 				"Could not deconfigure device resources: err=%d\n",
496 				err);
497 			gve_trigger_reset(priv);
498 		}
499 	}
500 
501 	kvfree(priv->ptype_lut_dqo);
502 	priv->ptype_lut_dqo = NULL;
503 
504 	gve_free_counter_array(priv);
505 	gve_free_notify_blocks(priv);
506 	gve_free_stats_report(priv);
507 	gve_clear_device_resources_ok(priv);
508 }
509 
510 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
511 			 int (*gve_poll)(struct napi_struct *, int))
512 {
513 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
514 
515 	netif_napi_add(priv->dev, &block->napi, gve_poll,
516 		       NAPI_POLL_WEIGHT);
517 }
518 
519 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
520 {
521 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
522 
523 	netif_napi_del(&block->napi);
524 }
525 
526 static int gve_register_qpls(struct gve_priv *priv)
527 {
528 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
529 	int err;
530 	int i;
531 
532 	for (i = 0; i < num_qpls; i++) {
533 		err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
534 		if (err) {
535 			netif_err(priv, drv, priv->dev,
536 				  "failed to register queue page list %d\n",
537 				  priv->qpls[i].id);
538 			/* This failure will trigger a reset - no need to clean
539 			 * up
540 			 */
541 			return err;
542 		}
543 	}
544 	return 0;
545 }
546 
547 static int gve_unregister_qpls(struct gve_priv *priv)
548 {
549 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
550 	int err;
551 	int i;
552 
553 	for (i = 0; i < num_qpls; i++) {
554 		err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
555 		/* This failure will trigger a reset - no need to clean up */
556 		if (err) {
557 			netif_err(priv, drv, priv->dev,
558 				  "Failed to unregister queue page list %d\n",
559 				  priv->qpls[i].id);
560 			return err;
561 		}
562 	}
563 	return 0;
564 }
565 
566 static int gve_create_rings(struct gve_priv *priv)
567 {
568 	int err;
569 	int i;
570 
571 	err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
572 	if (err) {
573 		netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
574 			  priv->tx_cfg.num_queues);
575 		/* This failure will trigger a reset - no need to clean
576 		 * up
577 		 */
578 		return err;
579 	}
580 	netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
581 		  priv->tx_cfg.num_queues);
582 
583 	err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
584 	if (err) {
585 		netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
586 			  priv->rx_cfg.num_queues);
587 		/* This failure will trigger a reset - no need to clean
588 		 * up
589 		 */
590 		return err;
591 	}
592 	netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
593 		  priv->rx_cfg.num_queues);
594 
595 	if (gve_is_gqi(priv)) {
596 		/* Rx data ring has been prefilled with packet buffers at queue
597 		 * allocation time.
598 		 *
599 		 * Write the doorbell to provide descriptor slots and packet
600 		 * buffers to the NIC.
601 		 */
602 		for (i = 0; i < priv->rx_cfg.num_queues; i++)
603 			gve_rx_write_doorbell(priv, &priv->rx[i]);
604 	} else {
605 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
606 			/* Post buffers and ring doorbell. */
607 			gve_rx_post_buffers_dqo(&priv->rx[i]);
608 		}
609 	}
610 
611 	return 0;
612 }
613 
614 static void add_napi_init_sync_stats(struct gve_priv *priv,
615 				     int (*napi_poll)(struct napi_struct *napi,
616 						      int budget))
617 {
618 	int i;
619 
620 	/* Add tx napi & init sync stats*/
621 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
622 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
623 
624 		u64_stats_init(&priv->tx[i].statss);
625 		priv->tx[i].ntfy_id = ntfy_idx;
626 		gve_add_napi(priv, ntfy_idx, napi_poll);
627 	}
628 	/* Add rx napi  & init sync stats*/
629 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
630 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
631 
632 		u64_stats_init(&priv->rx[i].statss);
633 		priv->rx[i].ntfy_id = ntfy_idx;
634 		gve_add_napi(priv, ntfy_idx, napi_poll);
635 	}
636 }
637 
638 static void gve_tx_free_rings(struct gve_priv *priv)
639 {
640 	if (gve_is_gqi(priv)) {
641 		gve_tx_free_rings_gqi(priv);
642 	} else {
643 		gve_tx_free_rings_dqo(priv);
644 	}
645 }
646 
647 static int gve_alloc_rings(struct gve_priv *priv)
648 {
649 	int err;
650 
651 	/* Setup tx rings */
652 	priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
653 			    GFP_KERNEL);
654 	if (!priv->tx)
655 		return -ENOMEM;
656 
657 	if (gve_is_gqi(priv))
658 		err = gve_tx_alloc_rings(priv);
659 	else
660 		err = gve_tx_alloc_rings_dqo(priv);
661 	if (err)
662 		goto free_tx;
663 
664 	/* Setup rx rings */
665 	priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
666 			    GFP_KERNEL);
667 	if (!priv->rx) {
668 		err = -ENOMEM;
669 		goto free_tx_queue;
670 	}
671 
672 	if (gve_is_gqi(priv))
673 		err = gve_rx_alloc_rings(priv);
674 	else
675 		err = gve_rx_alloc_rings_dqo(priv);
676 	if (err)
677 		goto free_rx;
678 
679 	if (gve_is_gqi(priv))
680 		add_napi_init_sync_stats(priv, gve_napi_poll);
681 	else
682 		add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
683 
684 	return 0;
685 
686 free_rx:
687 	kvfree(priv->rx);
688 	priv->rx = NULL;
689 free_tx_queue:
690 	gve_tx_free_rings(priv);
691 free_tx:
692 	kvfree(priv->tx);
693 	priv->tx = NULL;
694 	return err;
695 }
696 
697 static int gve_destroy_rings(struct gve_priv *priv)
698 {
699 	int err;
700 
701 	err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
702 	if (err) {
703 		netif_err(priv, drv, priv->dev,
704 			  "failed to destroy tx queues\n");
705 		/* This failure will trigger a reset - no need to clean up */
706 		return err;
707 	}
708 	netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
709 	err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
710 	if (err) {
711 		netif_err(priv, drv, priv->dev,
712 			  "failed to destroy rx queues\n");
713 		/* This failure will trigger a reset - no need to clean up */
714 		return err;
715 	}
716 	netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
717 	return 0;
718 }
719 
720 static void gve_rx_free_rings(struct gve_priv *priv)
721 {
722 	if (gve_is_gqi(priv))
723 		gve_rx_free_rings_gqi(priv);
724 	else
725 		gve_rx_free_rings_dqo(priv);
726 }
727 
728 static void gve_free_rings(struct gve_priv *priv)
729 {
730 	int ntfy_idx;
731 	int i;
732 
733 	if (priv->tx) {
734 		for (i = 0; i < priv->tx_cfg.num_queues; i++) {
735 			ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
736 			gve_remove_napi(priv, ntfy_idx);
737 		}
738 		gve_tx_free_rings(priv);
739 		kvfree(priv->tx);
740 		priv->tx = NULL;
741 	}
742 	if (priv->rx) {
743 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
744 			ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
745 			gve_remove_napi(priv, ntfy_idx);
746 		}
747 		gve_rx_free_rings(priv);
748 		kvfree(priv->rx);
749 		priv->rx = NULL;
750 	}
751 }
752 
753 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
754 		   struct page **page, dma_addr_t *dma,
755 		   enum dma_data_direction dir)
756 {
757 	*page = alloc_page(GFP_KERNEL);
758 	if (!*page) {
759 		priv->page_alloc_fail++;
760 		return -ENOMEM;
761 	}
762 	*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
763 	if (dma_mapping_error(dev, *dma)) {
764 		priv->dma_mapping_error++;
765 		put_page(*page);
766 		return -ENOMEM;
767 	}
768 	return 0;
769 }
770 
771 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
772 				     int pages)
773 {
774 	struct gve_queue_page_list *qpl = &priv->qpls[id];
775 	int err;
776 	int i;
777 
778 	if (pages + priv->num_registered_pages > priv->max_registered_pages) {
779 		netif_err(priv, drv, priv->dev,
780 			  "Reached max number of registered pages %llu > %llu\n",
781 			  pages + priv->num_registered_pages,
782 			  priv->max_registered_pages);
783 		return -EINVAL;
784 	}
785 
786 	qpl->id = id;
787 	qpl->num_entries = 0;
788 	qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
789 	/* caller handles clean up */
790 	if (!qpl->pages)
791 		return -ENOMEM;
792 	qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
793 	/* caller handles clean up */
794 	if (!qpl->page_buses)
795 		return -ENOMEM;
796 
797 	for (i = 0; i < pages; i++) {
798 		err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
799 				     &qpl->page_buses[i],
800 				     gve_qpl_dma_dir(priv, id));
801 		/* caller handles clean up */
802 		if (err)
803 			return -ENOMEM;
804 		qpl->num_entries++;
805 	}
806 	priv->num_registered_pages += pages;
807 
808 	return 0;
809 }
810 
811 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
812 		   enum dma_data_direction dir)
813 {
814 	if (!dma_mapping_error(dev, dma))
815 		dma_unmap_page(dev, dma, PAGE_SIZE, dir);
816 	if (page)
817 		put_page(page);
818 }
819 
820 static void gve_free_queue_page_list(struct gve_priv *priv,
821 				     int id)
822 {
823 	struct gve_queue_page_list *qpl = &priv->qpls[id];
824 	int i;
825 
826 	if (!qpl->pages)
827 		return;
828 	if (!qpl->page_buses)
829 		goto free_pages;
830 
831 	for (i = 0; i < qpl->num_entries; i++)
832 		gve_free_page(&priv->pdev->dev, qpl->pages[i],
833 			      qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
834 
835 	kvfree(qpl->page_buses);
836 free_pages:
837 	kvfree(qpl->pages);
838 	priv->num_registered_pages -= qpl->num_entries;
839 }
840 
841 static int gve_alloc_qpls(struct gve_priv *priv)
842 {
843 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
844 	int i, j;
845 	int err;
846 
847 	/* Raw addressing means no QPLs */
848 	if (priv->queue_format == GVE_GQI_RDA_FORMAT)
849 		return 0;
850 
851 	priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
852 	if (!priv->qpls)
853 		return -ENOMEM;
854 
855 	for (i = 0; i < gve_num_tx_qpls(priv); i++) {
856 		err = gve_alloc_queue_page_list(priv, i,
857 						priv->tx_pages_per_qpl);
858 		if (err)
859 			goto free_qpls;
860 	}
861 	for (; i < num_qpls; i++) {
862 		err = gve_alloc_queue_page_list(priv, i,
863 						priv->rx_data_slot_cnt);
864 		if (err)
865 			goto free_qpls;
866 	}
867 
868 	priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
869 				     sizeof(unsigned long) * BITS_PER_BYTE;
870 	priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
871 					    sizeof(unsigned long), GFP_KERNEL);
872 	if (!priv->qpl_cfg.qpl_id_map) {
873 		err = -ENOMEM;
874 		goto free_qpls;
875 	}
876 
877 	return 0;
878 
879 free_qpls:
880 	for (j = 0; j <= i; j++)
881 		gve_free_queue_page_list(priv, j);
882 	kvfree(priv->qpls);
883 	return err;
884 }
885 
886 static void gve_free_qpls(struct gve_priv *priv)
887 {
888 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
889 	int i;
890 
891 	/* Raw addressing means no QPLs */
892 	if (priv->queue_format == GVE_GQI_RDA_FORMAT)
893 		return;
894 
895 	kvfree(priv->qpl_cfg.qpl_id_map);
896 
897 	for (i = 0; i < num_qpls; i++)
898 		gve_free_queue_page_list(priv, i);
899 
900 	kvfree(priv->qpls);
901 }
902 
903 /* Use this to schedule a reset when the device is capable of continuing
904  * to handle other requests in its current state. If it is not, do a reset
905  * in thread instead.
906  */
907 void gve_schedule_reset(struct gve_priv *priv)
908 {
909 	gve_set_do_reset(priv);
910 	queue_work(priv->gve_wq, &priv->service_task);
911 }
912 
913 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
914 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
915 static void gve_turndown(struct gve_priv *priv);
916 static void gve_turnup(struct gve_priv *priv);
917 
918 static int gve_open(struct net_device *dev)
919 {
920 	struct gve_priv *priv = netdev_priv(dev);
921 	int err;
922 
923 	err = gve_alloc_qpls(priv);
924 	if (err)
925 		return err;
926 
927 	err = gve_alloc_rings(priv);
928 	if (err)
929 		goto free_qpls;
930 
931 	err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
932 	if (err)
933 		goto free_rings;
934 	err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
935 	if (err)
936 		goto free_rings;
937 
938 	err = gve_register_qpls(priv);
939 	if (err)
940 		goto reset;
941 
942 	if (!gve_is_gqi(priv)) {
943 		/* Hard code this for now. This may be tuned in the future for
944 		 * performance.
945 		 */
946 		priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
947 	}
948 	err = gve_create_rings(priv);
949 	if (err)
950 		goto reset;
951 
952 	gve_set_device_rings_ok(priv);
953 
954 	if (gve_get_report_stats(priv))
955 		mod_timer(&priv->stats_report_timer,
956 			  round_jiffies(jiffies +
957 				msecs_to_jiffies(priv->stats_report_timer_period)));
958 
959 	gve_turnup(priv);
960 	queue_work(priv->gve_wq, &priv->service_task);
961 	priv->interface_up_cnt++;
962 	return 0;
963 
964 free_rings:
965 	gve_free_rings(priv);
966 free_qpls:
967 	gve_free_qpls(priv);
968 	return err;
969 
970 reset:
971 	/* This must have been called from a reset due to the rtnl lock
972 	 * so just return at this point.
973 	 */
974 	if (gve_get_reset_in_progress(priv))
975 		return err;
976 	/* Otherwise reset before returning */
977 	gve_reset_and_teardown(priv, true);
978 	/* if this fails there is nothing we can do so just ignore the return */
979 	gve_reset_recovery(priv, false);
980 	/* return the original error */
981 	return err;
982 }
983 
984 static int gve_close(struct net_device *dev)
985 {
986 	struct gve_priv *priv = netdev_priv(dev);
987 	int err;
988 
989 	netif_carrier_off(dev);
990 	if (gve_get_device_rings_ok(priv)) {
991 		gve_turndown(priv);
992 		err = gve_destroy_rings(priv);
993 		if (err)
994 			goto err;
995 		err = gve_unregister_qpls(priv);
996 		if (err)
997 			goto err;
998 		gve_clear_device_rings_ok(priv);
999 	}
1000 	del_timer_sync(&priv->stats_report_timer);
1001 
1002 	gve_free_rings(priv);
1003 	gve_free_qpls(priv);
1004 	priv->interface_down_cnt++;
1005 	return 0;
1006 
1007 err:
1008 	/* This must have been called from a reset due to the rtnl lock
1009 	 * so just return at this point.
1010 	 */
1011 	if (gve_get_reset_in_progress(priv))
1012 		return err;
1013 	/* Otherwise reset before returning */
1014 	gve_reset_and_teardown(priv, true);
1015 	return gve_reset_recovery(priv, false);
1016 }
1017 
1018 int gve_adjust_queues(struct gve_priv *priv,
1019 		      struct gve_queue_config new_rx_config,
1020 		      struct gve_queue_config new_tx_config)
1021 {
1022 	int err;
1023 
1024 	if (netif_carrier_ok(priv->dev)) {
1025 		/* To make this process as simple as possible we teardown the
1026 		 * device, set the new configuration, and then bring the device
1027 		 * up again.
1028 		 */
1029 		err = gve_close(priv->dev);
1030 		/* we have already tried to reset in close,
1031 		 * just fail at this point
1032 		 */
1033 		if (err)
1034 			return err;
1035 		priv->tx_cfg = new_tx_config;
1036 		priv->rx_cfg = new_rx_config;
1037 
1038 		err = gve_open(priv->dev);
1039 		if (err)
1040 			goto err;
1041 
1042 		return 0;
1043 	}
1044 	/* Set the config for the next up. */
1045 	priv->tx_cfg = new_tx_config;
1046 	priv->rx_cfg = new_rx_config;
1047 
1048 	return 0;
1049 err:
1050 	netif_err(priv, drv, priv->dev,
1051 		  "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
1052 	gve_turndown(priv);
1053 	return err;
1054 }
1055 
1056 static void gve_turndown(struct gve_priv *priv)
1057 {
1058 	int idx;
1059 
1060 	if (netif_carrier_ok(priv->dev))
1061 		netif_carrier_off(priv->dev);
1062 
1063 	if (!gve_get_napi_enabled(priv))
1064 		return;
1065 
1066 	/* Disable napi to prevent more work from coming in */
1067 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1068 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1069 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1070 
1071 		napi_disable(&block->napi);
1072 	}
1073 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1074 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1075 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1076 
1077 		napi_disable(&block->napi);
1078 	}
1079 
1080 	/* Stop tx queues */
1081 	netif_tx_disable(priv->dev);
1082 
1083 	gve_clear_napi_enabled(priv);
1084 	gve_clear_report_stats(priv);
1085 }
1086 
1087 static void gve_turnup(struct gve_priv *priv)
1088 {
1089 	int idx;
1090 
1091 	/* Start the tx queues */
1092 	netif_tx_start_all_queues(priv->dev);
1093 
1094 	/* Enable napi and unmask interrupts for all queues */
1095 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1096 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1097 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1098 
1099 		napi_enable(&block->napi);
1100 		if (gve_is_gqi(priv)) {
1101 			iowrite32be(0, gve_irq_doorbell(priv, block));
1102 		} else {
1103 			u32 val = gve_set_itr_ratelimit_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO);
1104 
1105 			gve_write_irq_doorbell_dqo(priv, block, val);
1106 		}
1107 	}
1108 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1109 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1110 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1111 
1112 		napi_enable(&block->napi);
1113 		if (gve_is_gqi(priv)) {
1114 			iowrite32be(0, gve_irq_doorbell(priv, block));
1115 		} else {
1116 			u32 val = gve_set_itr_ratelimit_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO);
1117 
1118 			gve_write_irq_doorbell_dqo(priv, block, val);
1119 		}
1120 	}
1121 
1122 	gve_set_napi_enabled(priv);
1123 }
1124 
1125 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
1126 {
1127 	struct gve_notify_block *block;
1128 	struct gve_tx_ring *tx = NULL;
1129 	struct gve_priv *priv;
1130 	u32 last_nic_done;
1131 	u32 current_time;
1132 	u32 ntfy_idx;
1133 
1134 	netdev_info(dev, "Timeout on tx queue, %d", txqueue);
1135 	priv = netdev_priv(dev);
1136 	if (txqueue > priv->tx_cfg.num_queues)
1137 		goto reset;
1138 
1139 	ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
1140 	if (ntfy_idx >= priv->num_ntfy_blks)
1141 		goto reset;
1142 
1143 	block = &priv->ntfy_blocks[ntfy_idx];
1144 	tx = block->tx;
1145 
1146 	current_time = jiffies_to_msecs(jiffies);
1147 	if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
1148 		goto reset;
1149 
1150 	/* Check to see if there are missed completions, which will allow us to
1151 	 * kick the queue.
1152 	 */
1153 	last_nic_done = gve_tx_load_event_counter(priv, tx);
1154 	if (last_nic_done - tx->done) {
1155 		netdev_info(dev, "Kicking queue %d", txqueue);
1156 		iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
1157 		napi_schedule(&block->napi);
1158 		tx->last_kick_msec = current_time;
1159 		goto out;
1160 	} // Else reset.
1161 
1162 reset:
1163 	gve_schedule_reset(priv);
1164 
1165 out:
1166 	if (tx)
1167 		tx->queue_timeout++;
1168 	priv->tx_timeo_cnt++;
1169 }
1170 
1171 static int gve_set_features(struct net_device *netdev,
1172 			    netdev_features_t features)
1173 {
1174 	const netdev_features_t orig_features = netdev->features;
1175 	struct gve_priv *priv = netdev_priv(netdev);
1176 	int err;
1177 
1178 	if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
1179 		netdev->features ^= NETIF_F_LRO;
1180 		if (netif_carrier_ok(netdev)) {
1181 			/* To make this process as simple as possible we
1182 			 * teardown the device, set the new configuration,
1183 			 * and then bring the device up again.
1184 			 */
1185 			err = gve_close(netdev);
1186 			/* We have already tried to reset in close, just fail
1187 			 * at this point.
1188 			 */
1189 			if (err)
1190 				goto err;
1191 
1192 			err = gve_open(netdev);
1193 			if (err)
1194 				goto err;
1195 		}
1196 	}
1197 
1198 	return 0;
1199 err:
1200 	/* Reverts the change on error. */
1201 	netdev->features = orig_features;
1202 	netif_err(priv, drv, netdev,
1203 		  "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
1204 	return err;
1205 }
1206 
1207 static const struct net_device_ops gve_netdev_ops = {
1208 	.ndo_start_xmit		=	gve_start_xmit,
1209 	.ndo_open		=	gve_open,
1210 	.ndo_stop		=	gve_close,
1211 	.ndo_get_stats64	=	gve_get_stats,
1212 	.ndo_tx_timeout         =       gve_tx_timeout,
1213 	.ndo_set_features	=	gve_set_features,
1214 };
1215 
1216 static void gve_handle_status(struct gve_priv *priv, u32 status)
1217 {
1218 	if (GVE_DEVICE_STATUS_RESET_MASK & status) {
1219 		dev_info(&priv->pdev->dev, "Device requested reset.\n");
1220 		gve_set_do_reset(priv);
1221 	}
1222 	if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
1223 		priv->stats_report_trigger_cnt++;
1224 		gve_set_do_report_stats(priv);
1225 	}
1226 }
1227 
1228 static void gve_handle_reset(struct gve_priv *priv)
1229 {
1230 	/* A service task will be scheduled at the end of probe to catch any
1231 	 * resets that need to happen, and we don't want to reset until
1232 	 * probe is done.
1233 	 */
1234 	if (gve_get_probe_in_progress(priv))
1235 		return;
1236 
1237 	if (gve_get_do_reset(priv)) {
1238 		rtnl_lock();
1239 		gve_reset(priv, false);
1240 		rtnl_unlock();
1241 	}
1242 }
1243 
1244 void gve_handle_report_stats(struct gve_priv *priv)
1245 {
1246 	struct stats *stats = priv->stats_report->stats;
1247 	int idx, stats_idx = 0;
1248 	unsigned int start = 0;
1249 	u64 tx_bytes;
1250 
1251 	if (!gve_get_report_stats(priv))
1252 		return;
1253 
1254 	be64_add_cpu(&priv->stats_report->written_count, 1);
1255 	/* tx stats */
1256 	if (priv->tx) {
1257 		for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1258 			u32 last_completion = 0;
1259 			u32 tx_frames = 0;
1260 
1261 			/* DQO doesn't currently support these metrics. */
1262 			if (gve_is_gqi(priv)) {
1263 				last_completion = priv->tx[idx].done;
1264 				tx_frames = priv->tx[idx].req;
1265 			}
1266 
1267 			do {
1268 				start = u64_stats_fetch_begin(&priv->tx[idx].statss);
1269 				tx_bytes = priv->tx[idx].bytes_done;
1270 			} while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
1271 			stats[stats_idx++] = (struct stats) {
1272 				.stat_name = cpu_to_be32(TX_WAKE_CNT),
1273 				.value = cpu_to_be64(priv->tx[idx].wake_queue),
1274 				.queue_id = cpu_to_be32(idx),
1275 			};
1276 			stats[stats_idx++] = (struct stats) {
1277 				.stat_name = cpu_to_be32(TX_STOP_CNT),
1278 				.value = cpu_to_be64(priv->tx[idx].stop_queue),
1279 				.queue_id = cpu_to_be32(idx),
1280 			};
1281 			stats[stats_idx++] = (struct stats) {
1282 				.stat_name = cpu_to_be32(TX_FRAMES_SENT),
1283 				.value = cpu_to_be64(tx_frames),
1284 				.queue_id = cpu_to_be32(idx),
1285 			};
1286 			stats[stats_idx++] = (struct stats) {
1287 				.stat_name = cpu_to_be32(TX_BYTES_SENT),
1288 				.value = cpu_to_be64(tx_bytes),
1289 				.queue_id = cpu_to_be32(idx),
1290 			};
1291 			stats[stats_idx++] = (struct stats) {
1292 				.stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1293 				.value = cpu_to_be64(last_completion),
1294 				.queue_id = cpu_to_be32(idx),
1295 			};
1296 			stats[stats_idx++] = (struct stats) {
1297 				.stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
1298 				.value = cpu_to_be64(priv->tx[idx].queue_timeout),
1299 				.queue_id = cpu_to_be32(idx),
1300 			};
1301 		}
1302 	}
1303 	/* rx stats */
1304 	if (priv->rx) {
1305 		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1306 			stats[stats_idx++] = (struct stats) {
1307 				.stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1308 				.value = cpu_to_be64(priv->rx[idx].desc.seqno),
1309 				.queue_id = cpu_to_be32(idx),
1310 			};
1311 			stats[stats_idx++] = (struct stats) {
1312 				.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1313 				.value = cpu_to_be64(priv->rx[0].fill_cnt),
1314 				.queue_id = cpu_to_be32(idx),
1315 			};
1316 		}
1317 	}
1318 }
1319 
1320 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1321 {
1322 	if (!gve_get_napi_enabled(priv))
1323 		return;
1324 
1325 	if (link_status == netif_carrier_ok(priv->dev))
1326 		return;
1327 
1328 	if (link_status) {
1329 		netdev_info(priv->dev, "Device link is up.\n");
1330 		netif_carrier_on(priv->dev);
1331 	} else {
1332 		netdev_info(priv->dev, "Device link is down.\n");
1333 		netif_carrier_off(priv->dev);
1334 	}
1335 }
1336 
1337 /* Handle NIC status register changes, reset requests and report stats */
1338 static void gve_service_task(struct work_struct *work)
1339 {
1340 	struct gve_priv *priv = container_of(work, struct gve_priv,
1341 					     service_task);
1342 	u32 status = ioread32be(&priv->reg_bar0->device_status);
1343 
1344 	gve_handle_status(priv, status);
1345 
1346 	gve_handle_reset(priv);
1347 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1348 }
1349 
1350 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1351 {
1352 	int num_ntfy;
1353 	int err;
1354 
1355 	/* Set up the adminq */
1356 	err = gve_adminq_alloc(&priv->pdev->dev, priv);
1357 	if (err) {
1358 		dev_err(&priv->pdev->dev,
1359 			"Failed to alloc admin queue: err=%d\n", err);
1360 		return err;
1361 	}
1362 
1363 	if (skip_describe_device)
1364 		goto setup_device;
1365 
1366 	priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
1367 	/* Get the initial information we need from the device */
1368 	err = gve_adminq_describe_device(priv);
1369 	if (err) {
1370 		dev_err(&priv->pdev->dev,
1371 			"Could not get device information: err=%d\n", err);
1372 		goto err;
1373 	}
1374 	priv->dev->mtu = priv->dev->max_mtu;
1375 	num_ntfy = pci_msix_vec_count(priv->pdev);
1376 	if (num_ntfy <= 0) {
1377 		dev_err(&priv->pdev->dev,
1378 			"could not count MSI-x vectors: err=%d\n", num_ntfy);
1379 		err = num_ntfy;
1380 		goto err;
1381 	} else if (num_ntfy < GVE_MIN_MSIX) {
1382 		dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1383 			GVE_MIN_MSIX, num_ntfy);
1384 		err = -EINVAL;
1385 		goto err;
1386 	}
1387 
1388 	priv->num_registered_pages = 0;
1389 	priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1390 	/* gvnic has one Notification Block per MSI-x vector, except for the
1391 	 * management vector
1392 	 */
1393 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1394 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
1395 
1396 	priv->tx_cfg.max_queues =
1397 		min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1398 	priv->rx_cfg.max_queues =
1399 		min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1400 
1401 	priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1402 	priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1403 	if (priv->default_num_queues > 0) {
1404 		priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1405 						priv->tx_cfg.num_queues);
1406 		priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1407 						priv->rx_cfg.num_queues);
1408 	}
1409 
1410 	dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1411 		 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1412 	dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1413 		 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1414 
1415 setup_device:
1416 	err = gve_setup_device_resources(priv);
1417 	if (!err)
1418 		return 0;
1419 err:
1420 	gve_adminq_free(&priv->pdev->dev, priv);
1421 	return err;
1422 }
1423 
1424 static void gve_teardown_priv_resources(struct gve_priv *priv)
1425 {
1426 	gve_teardown_device_resources(priv);
1427 	gve_adminq_free(&priv->pdev->dev, priv);
1428 }
1429 
1430 static void gve_trigger_reset(struct gve_priv *priv)
1431 {
1432 	/* Reset the device by releasing the AQ */
1433 	gve_adminq_release(priv);
1434 }
1435 
1436 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1437 {
1438 	gve_trigger_reset(priv);
1439 	/* With the reset having already happened, close cannot fail */
1440 	if (was_up)
1441 		gve_close(priv->dev);
1442 	gve_teardown_priv_resources(priv);
1443 }
1444 
1445 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1446 {
1447 	int err;
1448 
1449 	err = gve_init_priv(priv, true);
1450 	if (err)
1451 		goto err;
1452 	if (was_up) {
1453 		err = gve_open(priv->dev);
1454 		if (err)
1455 			goto err;
1456 	}
1457 	return 0;
1458 err:
1459 	dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1460 	gve_turndown(priv);
1461 	return err;
1462 }
1463 
1464 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1465 {
1466 	bool was_up = netif_carrier_ok(priv->dev);
1467 	int err;
1468 
1469 	dev_info(&priv->pdev->dev, "Performing reset\n");
1470 	gve_clear_do_reset(priv);
1471 	gve_set_reset_in_progress(priv);
1472 	/* If we aren't attempting to teardown normally, just go turndown and
1473 	 * reset right away.
1474 	 */
1475 	if (!attempt_teardown) {
1476 		gve_turndown(priv);
1477 		gve_reset_and_teardown(priv, was_up);
1478 	} else {
1479 		/* Otherwise attempt to close normally */
1480 		if (was_up) {
1481 			err = gve_close(priv->dev);
1482 			/* If that fails reset as we did above */
1483 			if (err)
1484 				gve_reset_and_teardown(priv, was_up);
1485 		}
1486 		/* Clean up any remaining resources */
1487 		gve_teardown_priv_resources(priv);
1488 	}
1489 
1490 	/* Set it all back up */
1491 	err = gve_reset_recovery(priv, was_up);
1492 	gve_clear_reset_in_progress(priv);
1493 	priv->reset_cnt++;
1494 	priv->interface_up_cnt = 0;
1495 	priv->interface_down_cnt = 0;
1496 	priv->stats_report_trigger_cnt = 0;
1497 	return err;
1498 }
1499 
1500 static void gve_write_version(u8 __iomem *driver_version_register)
1501 {
1502 	const char *c = gve_version_prefix;
1503 
1504 	while (*c) {
1505 		writeb(*c, driver_version_register);
1506 		c++;
1507 	}
1508 
1509 	c = gve_version_str;
1510 	while (*c) {
1511 		writeb(*c, driver_version_register);
1512 		c++;
1513 	}
1514 	writeb('\n', driver_version_register);
1515 }
1516 
1517 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1518 {
1519 	int max_tx_queues, max_rx_queues;
1520 	struct net_device *dev;
1521 	__be32 __iomem *db_bar;
1522 	struct gve_registers __iomem *reg_bar;
1523 	struct gve_priv *priv;
1524 	int err;
1525 
1526 	err = pci_enable_device(pdev);
1527 	if (err)
1528 		return err;
1529 
1530 	err = pci_request_regions(pdev, "gvnic-cfg");
1531 	if (err)
1532 		goto abort_with_enabled;
1533 
1534 	pci_set_master(pdev);
1535 
1536 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1537 	if (err) {
1538 		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1539 		goto abort_with_pci_region;
1540 	}
1541 
1542 	reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1543 	if (!reg_bar) {
1544 		dev_err(&pdev->dev, "Failed to map pci bar!\n");
1545 		err = -ENOMEM;
1546 		goto abort_with_pci_region;
1547 	}
1548 
1549 	db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1550 	if (!db_bar) {
1551 		dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1552 		err = -ENOMEM;
1553 		goto abort_with_reg_bar;
1554 	}
1555 
1556 	gve_write_version(&reg_bar->driver_version);
1557 	/* Get max queues to alloc etherdev */
1558 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
1559 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
1560 	/* Alloc and setup the netdev and priv */
1561 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1562 	if (!dev) {
1563 		dev_err(&pdev->dev, "could not allocate netdev\n");
1564 		err = -ENOMEM;
1565 		goto abort_with_db_bar;
1566 	}
1567 	SET_NETDEV_DEV(dev, &pdev->dev);
1568 	pci_set_drvdata(pdev, dev);
1569 	dev->ethtool_ops = &gve_ethtool_ops;
1570 	dev->netdev_ops = &gve_netdev_ops;
1571 
1572 	/* Set default and supported features.
1573 	 *
1574 	 * Features might be set in other locations as well (such as
1575 	 * `gve_adminq_describe_device`).
1576 	 */
1577 	dev->hw_features = NETIF_F_HIGHDMA;
1578 	dev->hw_features |= NETIF_F_SG;
1579 	dev->hw_features |= NETIF_F_HW_CSUM;
1580 	dev->hw_features |= NETIF_F_TSO;
1581 	dev->hw_features |= NETIF_F_TSO6;
1582 	dev->hw_features |= NETIF_F_TSO_ECN;
1583 	dev->hw_features |= NETIF_F_RXCSUM;
1584 	dev->hw_features |= NETIF_F_RXHASH;
1585 	dev->features = dev->hw_features;
1586 	dev->watchdog_timeo = 5 * HZ;
1587 	dev->min_mtu = ETH_MIN_MTU;
1588 	netif_carrier_off(dev);
1589 
1590 	priv = netdev_priv(dev);
1591 	priv->dev = dev;
1592 	priv->pdev = pdev;
1593 	priv->msg_enable = DEFAULT_MSG_LEVEL;
1594 	priv->reg_bar0 = reg_bar;
1595 	priv->db_bar2 = db_bar;
1596 	priv->service_task_flags = 0x0;
1597 	priv->state_flags = 0x0;
1598 	priv->ethtool_flags = 0x0;
1599 
1600 	gve_set_probe_in_progress(priv);
1601 	priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1602 	if (!priv->gve_wq) {
1603 		dev_err(&pdev->dev, "Could not allocate workqueue");
1604 		err = -ENOMEM;
1605 		goto abort_with_netdev;
1606 	}
1607 	INIT_WORK(&priv->service_task, gve_service_task);
1608 	INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1609 	priv->tx_cfg.max_queues = max_tx_queues;
1610 	priv->rx_cfg.max_queues = max_rx_queues;
1611 
1612 	err = gve_init_priv(priv, false);
1613 	if (err)
1614 		goto abort_with_wq;
1615 
1616 	err = register_netdev(dev);
1617 	if (err)
1618 		goto abort_with_gve_init;
1619 
1620 	dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1621 	dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
1622 	gve_clear_probe_in_progress(priv);
1623 	queue_work(priv->gve_wq, &priv->service_task);
1624 	return 0;
1625 
1626 abort_with_gve_init:
1627 	gve_teardown_priv_resources(priv);
1628 
1629 abort_with_wq:
1630 	destroy_workqueue(priv->gve_wq);
1631 
1632 abort_with_netdev:
1633 	free_netdev(dev);
1634 
1635 abort_with_db_bar:
1636 	pci_iounmap(pdev, db_bar);
1637 
1638 abort_with_reg_bar:
1639 	pci_iounmap(pdev, reg_bar);
1640 
1641 abort_with_pci_region:
1642 	pci_release_regions(pdev);
1643 
1644 abort_with_enabled:
1645 	pci_disable_device(pdev);
1646 	return err;
1647 }
1648 
1649 static void gve_remove(struct pci_dev *pdev)
1650 {
1651 	struct net_device *netdev = pci_get_drvdata(pdev);
1652 	struct gve_priv *priv = netdev_priv(netdev);
1653 	__be32 __iomem *db_bar = priv->db_bar2;
1654 	void __iomem *reg_bar = priv->reg_bar0;
1655 
1656 	unregister_netdev(netdev);
1657 	gve_teardown_priv_resources(priv);
1658 	destroy_workqueue(priv->gve_wq);
1659 	free_netdev(netdev);
1660 	pci_iounmap(pdev, db_bar);
1661 	pci_iounmap(pdev, reg_bar);
1662 	pci_release_regions(pdev);
1663 	pci_disable_device(pdev);
1664 }
1665 
1666 static const struct pci_device_id gve_id_table[] = {
1667 	{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1668 	{ }
1669 };
1670 
1671 static struct pci_driver gvnic_driver = {
1672 	.name		= "gvnic",
1673 	.id_table	= gve_id_table,
1674 	.probe		= gve_probe,
1675 	.remove		= gve_remove,
1676 };
1677 
1678 module_pci_driver(gvnic_driver);
1679 
1680 MODULE_DEVICE_TABLE(pci, gve_id_table);
1681 MODULE_AUTHOR("Google, Inc.");
1682 MODULE_DESCRIPTION("gVNIC Driver");
1683 MODULE_LICENSE("Dual MIT/GPL");
1684 MODULE_VERSION(GVE_VERSION);
1685