xref: /openbmc/linux/drivers/net/virtio_net.c (revision e377fcc8486d40867c6c217077ad0fa40977e060)
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 //#define DEBUG
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/module.h>
23 #include <linux/virtio.h>
24 #include <linux/virtio_net.h>
25 #include <linux/bpf.h>
26 #include <linux/bpf_trace.h>
27 #include <linux/scatterlist.h>
28 #include <linux/if_vlan.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/average.h>
32 
33 static int napi_weight = NAPI_POLL_WEIGHT;
34 module_param(napi_weight, int, 0444);
35 
36 static bool csum = true, gso = true;
37 module_param(csum, bool, 0444);
38 module_param(gso, bool, 0444);
39 
40 /* FIXME: MTU in config. */
41 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
42 #define GOOD_COPY_LEN	128
43 
44 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
45 
46 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
47 #define VIRTIO_XDP_HEADROOM 256
48 
49 /* RX packet size EWMA. The average packet size is used to determine the packet
50  * buffer size when refilling RX rings. As the entire RX ring may be refilled
51  * at once, the weight is chosen so that the EWMA will be insensitive to short-
52  * term, transient changes in packet size.
53  */
54 DECLARE_EWMA(pkt_len, 0, 64)
55 
56 #define VIRTNET_DRIVER_VERSION "1.0.0"
57 
58 struct virtnet_stats {
59 	struct u64_stats_sync tx_syncp;
60 	struct u64_stats_sync rx_syncp;
61 	u64 tx_bytes;
62 	u64 tx_packets;
63 
64 	u64 rx_bytes;
65 	u64 rx_packets;
66 };
67 
68 /* Internal representation of a send virtqueue */
69 struct send_queue {
70 	/* Virtqueue associated with this send _queue */
71 	struct virtqueue *vq;
72 
73 	/* TX: fragments + linear part + virtio header */
74 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
75 
76 	/* Name of the send queue: output.$index */
77 	char name[40];
78 };
79 
80 /* Internal representation of a receive virtqueue */
81 struct receive_queue {
82 	/* Virtqueue associated with this receive_queue */
83 	struct virtqueue *vq;
84 
85 	struct napi_struct napi;
86 
87 	struct bpf_prog __rcu *xdp_prog;
88 
89 	/* Chain pages by the private ptr. */
90 	struct page *pages;
91 
92 	/* Average packet length for mergeable receive buffers. */
93 	struct ewma_pkt_len mrg_avg_pkt_len;
94 
95 	/* Page frag for packet buffer allocation. */
96 	struct page_frag alloc_frag;
97 
98 	/* RX: fragments + linear part + virtio header */
99 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
100 
101 	/* Name of this receive queue: input.$index */
102 	char name[40];
103 };
104 
105 struct virtnet_info {
106 	struct virtio_device *vdev;
107 	struct virtqueue *cvq;
108 	struct net_device *dev;
109 	struct send_queue *sq;
110 	struct receive_queue *rq;
111 	unsigned int status;
112 
113 	/* Max # of queue pairs supported by the device */
114 	u16 max_queue_pairs;
115 
116 	/* # of queue pairs currently used by the driver */
117 	u16 curr_queue_pairs;
118 
119 	/* # of XDP queue pairs currently used by the driver */
120 	u16 xdp_queue_pairs;
121 
122 	/* I like... big packets and I cannot lie! */
123 	bool big_packets;
124 
125 	/* Host will merge rx buffers for big packets (shake it! shake it!) */
126 	bool mergeable_rx_bufs;
127 
128 	/* Has control virtqueue */
129 	bool has_cvq;
130 
131 	/* Host can handle any s/g split between our header and packet data */
132 	bool any_header_sg;
133 
134 	/* Packet virtio header size */
135 	u8 hdr_len;
136 
137 	/* Active statistics */
138 	struct virtnet_stats __percpu *stats;
139 
140 	/* Work struct for refilling if we run low on memory. */
141 	struct delayed_work refill;
142 
143 	/* Work struct for config space updates */
144 	struct work_struct config_work;
145 
146 	/* Does the affinity hint is set for virtqueues? */
147 	bool affinity_hint_set;
148 
149 	/* CPU hotplug instances for online & dead */
150 	struct hlist_node node;
151 	struct hlist_node node_dead;
152 
153 	/* Control VQ buffers: protected by the rtnl lock */
154 	struct virtio_net_ctrl_hdr ctrl_hdr;
155 	virtio_net_ctrl_ack ctrl_status;
156 	struct virtio_net_ctrl_mq ctrl_mq;
157 	u8 ctrl_promisc;
158 	u8 ctrl_allmulti;
159 	u16 ctrl_vid;
160 
161 	/* Ethtool settings */
162 	u8 duplex;
163 	u32 speed;
164 };
165 
166 struct padded_vnet_hdr {
167 	struct virtio_net_hdr_mrg_rxbuf hdr;
168 	/*
169 	 * hdr is in a separate sg buffer, and data sg buffer shares same page
170 	 * with this header sg. This padding makes next sg 16 byte aligned
171 	 * after the header.
172 	 */
173 	char padding[4];
174 };
175 
176 /* Converting between virtqueue no. and kernel tx/rx queue no.
177  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
178  */
179 static int vq2txq(struct virtqueue *vq)
180 {
181 	return (vq->index - 1) / 2;
182 }
183 
184 static int txq2vq(int txq)
185 {
186 	return txq * 2 + 1;
187 }
188 
189 static int vq2rxq(struct virtqueue *vq)
190 {
191 	return vq->index / 2;
192 }
193 
194 static int rxq2vq(int rxq)
195 {
196 	return rxq * 2;
197 }
198 
199 static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
200 {
201 	return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
202 }
203 
204 /*
205  * private is used to chain pages for big packets, put the whole
206  * most recent used list in the beginning for reuse
207  */
208 static void give_pages(struct receive_queue *rq, struct page *page)
209 {
210 	struct page *end;
211 
212 	/* Find end of list, sew whole thing into vi->rq.pages. */
213 	for (end = page; end->private; end = (struct page *)end->private);
214 	end->private = (unsigned long)rq->pages;
215 	rq->pages = page;
216 }
217 
218 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
219 {
220 	struct page *p = rq->pages;
221 
222 	if (p) {
223 		rq->pages = (struct page *)p->private;
224 		/* clear private here, it is used to chain pages */
225 		p->private = 0;
226 	} else
227 		p = alloc_page(gfp_mask);
228 	return p;
229 }
230 
231 static void skb_xmit_done(struct virtqueue *vq)
232 {
233 	struct virtnet_info *vi = vq->vdev->priv;
234 
235 	/* Suppress further interrupts. */
236 	virtqueue_disable_cb(vq);
237 
238 	/* We were probably waiting for more output buffers. */
239 	netif_wake_subqueue(vi->dev, vq2txq(vq));
240 }
241 
242 /* Called from bottom half context */
243 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
244 				   struct receive_queue *rq,
245 				   struct page *page, unsigned int offset,
246 				   unsigned int len, unsigned int truesize)
247 {
248 	struct sk_buff *skb;
249 	struct virtio_net_hdr_mrg_rxbuf *hdr;
250 	unsigned int copy, hdr_len, hdr_padded_len;
251 	char *p;
252 
253 	p = page_address(page) + offset;
254 
255 	/* copy small packet so we can reuse these pages for small data */
256 	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
257 	if (unlikely(!skb))
258 		return NULL;
259 
260 	hdr = skb_vnet_hdr(skb);
261 
262 	hdr_len = vi->hdr_len;
263 	if (vi->mergeable_rx_bufs)
264 		hdr_padded_len = sizeof *hdr;
265 	else
266 		hdr_padded_len = sizeof(struct padded_vnet_hdr);
267 
268 	memcpy(hdr, p, hdr_len);
269 
270 	len -= hdr_len;
271 	offset += hdr_padded_len;
272 	p += hdr_padded_len;
273 
274 	copy = len;
275 	if (copy > skb_tailroom(skb))
276 		copy = skb_tailroom(skb);
277 	memcpy(skb_put(skb, copy), p, copy);
278 
279 	len -= copy;
280 	offset += copy;
281 
282 	if (vi->mergeable_rx_bufs) {
283 		if (len)
284 			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
285 		else
286 			put_page(page);
287 		return skb;
288 	}
289 
290 	/*
291 	 * Verify that we can indeed put this data into a skb.
292 	 * This is here to handle cases when the device erroneously
293 	 * tries to receive more than is possible. This is usually
294 	 * the case of a broken device.
295 	 */
296 	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
297 		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
298 		dev_kfree_skb(skb);
299 		return NULL;
300 	}
301 	BUG_ON(offset >= PAGE_SIZE);
302 	while (len) {
303 		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
304 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
305 				frag_size, truesize);
306 		len -= frag_size;
307 		page = (struct page *)page->private;
308 		offset = 0;
309 	}
310 
311 	if (page)
312 		give_pages(rq, page);
313 
314 	return skb;
315 }
316 
317 static bool virtnet_xdp_xmit(struct virtnet_info *vi,
318 			     struct receive_queue *rq,
319 			     struct xdp_buff *xdp)
320 {
321 	struct virtio_net_hdr_mrg_rxbuf *hdr;
322 	unsigned int len;
323 	struct send_queue *sq;
324 	unsigned int qp;
325 	void *xdp_sent;
326 	int err;
327 
328 	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
329 	sq = &vi->sq[qp];
330 
331 	/* Free up any pending old buffers before queueing new ones. */
332 	while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
333 		struct page *sent_page = virt_to_head_page(xdp_sent);
334 
335 		put_page(sent_page);
336 	}
337 
338 	xdp->data -= vi->hdr_len;
339 	/* Zero header and leave csum up to XDP layers */
340 	hdr = xdp->data;
341 	memset(hdr, 0, vi->hdr_len);
342 
343 	sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
344 
345 	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
346 	if (unlikely(err)) {
347 		struct page *page = virt_to_head_page(xdp->data);
348 
349 		put_page(page);
350 		return false;
351 	}
352 
353 	virtqueue_kick(sq->vq);
354 	return true;
355 }
356 
357 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
358 {
359 	return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
360 }
361 
362 static struct sk_buff *receive_small(struct net_device *dev,
363 				     struct virtnet_info *vi,
364 				     struct receive_queue *rq,
365 				     void *buf, unsigned int len)
366 {
367 	struct sk_buff *skb;
368 	struct bpf_prog *xdp_prog;
369 	unsigned int xdp_headroom = virtnet_get_headroom(vi);
370 	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
371 	unsigned int headroom = vi->hdr_len + header_offset;
372 	unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
373 			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
374 	unsigned int delta = 0;
375 	len -= vi->hdr_len;
376 
377 	rcu_read_lock();
378 	xdp_prog = rcu_dereference(rq->xdp_prog);
379 	if (xdp_prog) {
380 		struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
381 		struct xdp_buff xdp;
382 		void *orig_data;
383 		u32 act;
384 
385 		if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
386 			goto err_xdp;
387 
388 		xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
389 		xdp.data = xdp.data_hard_start + xdp_headroom;
390 		xdp.data_end = xdp.data + len;
391 		orig_data = xdp.data;
392 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
393 
394 		switch (act) {
395 		case XDP_PASS:
396 			/* Recalculate length in case bpf program changed it */
397 			delta = orig_data - xdp.data;
398 			break;
399 		case XDP_TX:
400 			if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp)))
401 				trace_xdp_exception(vi->dev, xdp_prog, act);
402 			rcu_read_unlock();
403 			goto xdp_xmit;
404 		default:
405 			bpf_warn_invalid_xdp_action(act);
406 		case XDP_ABORTED:
407 			trace_xdp_exception(vi->dev, xdp_prog, act);
408 		case XDP_DROP:
409 			goto err_xdp;
410 		}
411 	}
412 	rcu_read_unlock();
413 
414 	skb = build_skb(buf, buflen);
415 	if (!skb) {
416 		put_page(virt_to_head_page(buf));
417 		goto err;
418 	}
419 	skb_reserve(skb, headroom - delta);
420 	skb_put(skb, len + delta);
421 	if (!delta) {
422 		buf += header_offset;
423 		memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
424 	} /* keep zeroed vnet hdr since packet was changed by bpf */
425 
426 err:
427 	return skb;
428 
429 err_xdp:
430 	rcu_read_unlock();
431 	dev->stats.rx_dropped++;
432 	put_page(virt_to_head_page(buf));
433 xdp_xmit:
434 	return NULL;
435 }
436 
437 static struct sk_buff *receive_big(struct net_device *dev,
438 				   struct virtnet_info *vi,
439 				   struct receive_queue *rq,
440 				   void *buf,
441 				   unsigned int len)
442 {
443 	struct page *page = buf;
444 	struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
445 
446 	if (unlikely(!skb))
447 		goto err;
448 
449 	return skb;
450 
451 err:
452 	dev->stats.rx_dropped++;
453 	give_pages(rq, page);
454 	return NULL;
455 }
456 
457 /* The conditions to enable XDP should preclude the underlying device from
458  * sending packets across multiple buffers (num_buf > 1). However per spec
459  * it does not appear to be illegal to do so but rather just against convention.
460  * So in order to avoid making a system unresponsive the packets are pushed
461  * into a page and the XDP program is run. This will be extremely slow and we
462  * push a warning to the user to fix this as soon as possible. Fixing this may
463  * require resolving the underlying hardware to determine why multiple buffers
464  * are being received or simply loading the XDP program in the ingress stack
465  * after the skb is built because there is no advantage to running it here
466  * anymore.
467  */
468 static struct page *xdp_linearize_page(struct receive_queue *rq,
469 				       u16 *num_buf,
470 				       struct page *p,
471 				       int offset,
472 				       unsigned int *len)
473 {
474 	struct page *page = alloc_page(GFP_ATOMIC);
475 	unsigned int page_off = VIRTIO_XDP_HEADROOM;
476 
477 	if (!page)
478 		return NULL;
479 
480 	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
481 	page_off += *len;
482 
483 	while (--*num_buf) {
484 		unsigned int buflen;
485 		void *buf;
486 		int off;
487 
488 		buf = virtqueue_get_buf(rq->vq, &buflen);
489 		if (unlikely(!buf))
490 			goto err_buf;
491 
492 		p = virt_to_head_page(buf);
493 		off = buf - page_address(p);
494 
495 		/* guard against a misconfigured or uncooperative backend that
496 		 * is sending packet larger than the MTU.
497 		 */
498 		if ((page_off + buflen) > PAGE_SIZE) {
499 			put_page(p);
500 			goto err_buf;
501 		}
502 
503 		memcpy(page_address(page) + page_off,
504 		       page_address(p) + off, buflen);
505 		page_off += buflen;
506 		put_page(p);
507 	}
508 
509 	/* Headroom does not contribute to packet length */
510 	*len = page_off - VIRTIO_XDP_HEADROOM;
511 	return page;
512 err_buf:
513 	__free_pages(page, 0);
514 	return NULL;
515 }
516 
517 static struct sk_buff *receive_mergeable(struct net_device *dev,
518 					 struct virtnet_info *vi,
519 					 struct receive_queue *rq,
520 					 void *buf,
521 					 void *ctx,
522 					 unsigned int len)
523 {
524 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
525 	u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
526 	struct page *page = virt_to_head_page(buf);
527 	int offset = buf - page_address(page);
528 	struct sk_buff *head_skb, *curr_skb;
529 	struct bpf_prog *xdp_prog;
530 	unsigned int truesize;
531 
532 	head_skb = NULL;
533 
534 	rcu_read_lock();
535 	xdp_prog = rcu_dereference(rq->xdp_prog);
536 	if (xdp_prog) {
537 		struct page *xdp_page;
538 		struct xdp_buff xdp;
539 		void *data;
540 		u32 act;
541 
542 		/* This happens when rx buffer size is underestimated */
543 		if (unlikely(num_buf > 1)) {
544 			/* linearize data for XDP */
545 			xdp_page = xdp_linearize_page(rq, &num_buf,
546 						      page, offset, &len);
547 			if (!xdp_page)
548 				goto err_xdp;
549 			offset = VIRTIO_XDP_HEADROOM;
550 		} else {
551 			xdp_page = page;
552 		}
553 
554 		/* Transient failure which in theory could occur if
555 		 * in-flight packets from before XDP was enabled reach
556 		 * the receive path after XDP is loaded. In practice I
557 		 * was not able to create this condition.
558 		 */
559 		if (unlikely(hdr->hdr.gso_type))
560 			goto err_xdp;
561 
562 		/* Allow consuming headroom but reserve enough space to push
563 		 * the descriptor on if we get an XDP_TX return code.
564 		 */
565 		data = page_address(xdp_page) + offset;
566 		xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
567 		xdp.data = data + vi->hdr_len;
568 		xdp.data_end = xdp.data + (len - vi->hdr_len);
569 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
570 
571 		switch (act) {
572 		case XDP_PASS:
573 			/* recalculate offset to account for any header
574 			 * adjustments. Note other cases do not build an
575 			 * skb and avoid using offset
576 			 */
577 			offset = xdp.data -
578 					page_address(xdp_page) - vi->hdr_len;
579 
580 			/* We can only create skb based on xdp_page. */
581 			if (unlikely(xdp_page != page)) {
582 				rcu_read_unlock();
583 				put_page(page);
584 				head_skb = page_to_skb(vi, rq, xdp_page,
585 						       offset, len, PAGE_SIZE);
586 				ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
587 				return head_skb;
588 			}
589 			break;
590 		case XDP_TX:
591 			if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp)))
592 				trace_xdp_exception(vi->dev, xdp_prog, act);
593 			ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
594 			if (unlikely(xdp_page != page))
595 				goto err_xdp;
596 			rcu_read_unlock();
597 			goto xdp_xmit;
598 		default:
599 			bpf_warn_invalid_xdp_action(act);
600 		case XDP_ABORTED:
601 			trace_xdp_exception(vi->dev, xdp_prog, act);
602 		case XDP_DROP:
603 			if (unlikely(xdp_page != page))
604 				__free_pages(xdp_page, 0);
605 			ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
606 			goto err_xdp;
607 		}
608 	}
609 	rcu_read_unlock();
610 
611 	if (unlikely(len > (unsigned long)ctx)) {
612 		pr_debug("%s: rx error: len %u exceeds truesize 0x%lu\n",
613 			 dev->name, len, (unsigned long)ctx);
614 		dev->stats.rx_length_errors++;
615 		goto err_skb;
616 	}
617 	truesize = (unsigned long)ctx;
618 	head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
619 	curr_skb = head_skb;
620 
621 	if (unlikely(!curr_skb))
622 		goto err_skb;
623 	while (--num_buf) {
624 		int num_skb_frags;
625 
626 		buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
627 		if (unlikely(!ctx)) {
628 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
629 				 dev->name, num_buf,
630 				 virtio16_to_cpu(vi->vdev,
631 						 hdr->num_buffers));
632 			dev->stats.rx_length_errors++;
633 			goto err_buf;
634 		}
635 
636 		page = virt_to_head_page(buf);
637 		if (unlikely(len > (unsigned long)ctx)) {
638 			pr_debug("%s: rx error: len %u exceeds truesize 0x%lu\n",
639 				 dev->name, len, (unsigned long)ctx);
640 			dev->stats.rx_length_errors++;
641 			goto err_skb;
642 		}
643 		truesize = (unsigned long)ctx;
644 
645 		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
646 		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
647 			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
648 
649 			if (unlikely(!nskb))
650 				goto err_skb;
651 			if (curr_skb == head_skb)
652 				skb_shinfo(curr_skb)->frag_list = nskb;
653 			else
654 				curr_skb->next = nskb;
655 			curr_skb = nskb;
656 			head_skb->truesize += nskb->truesize;
657 			num_skb_frags = 0;
658 		}
659 		if (curr_skb != head_skb) {
660 			head_skb->data_len += len;
661 			head_skb->len += len;
662 			head_skb->truesize += truesize;
663 		}
664 		offset = buf - page_address(page);
665 		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
666 			put_page(page);
667 			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
668 					     len, truesize);
669 		} else {
670 			skb_add_rx_frag(curr_skb, num_skb_frags, page,
671 					offset, len, truesize);
672 		}
673 	}
674 
675 	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
676 	return head_skb;
677 
678 err_xdp:
679 	rcu_read_unlock();
680 err_skb:
681 	put_page(page);
682 	while (--num_buf) {
683 		buf = virtqueue_get_buf(rq->vq, &len);
684 		if (unlikely(!buf)) {
685 			pr_debug("%s: rx error: %d buffers missing\n",
686 				 dev->name, num_buf);
687 			dev->stats.rx_length_errors++;
688 			break;
689 		}
690 		page = virt_to_head_page(buf);
691 		put_page(page);
692 	}
693 err_buf:
694 	dev->stats.rx_dropped++;
695 	dev_kfree_skb(head_skb);
696 xdp_xmit:
697 	return NULL;
698 }
699 
700 static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
701 		       void *buf, unsigned int len, void **ctx)
702 {
703 	struct net_device *dev = vi->dev;
704 	struct sk_buff *skb;
705 	struct virtio_net_hdr_mrg_rxbuf *hdr;
706 	int ret;
707 
708 	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
709 		pr_debug("%s: short packet %i\n", dev->name, len);
710 		dev->stats.rx_length_errors++;
711 		if (vi->mergeable_rx_bufs) {
712 			put_page(virt_to_head_page(buf));
713 		} else if (vi->big_packets) {
714 			give_pages(rq, buf);
715 		} else {
716 			put_page(virt_to_head_page(buf));
717 		}
718 		return 0;
719 	}
720 
721 	if (vi->mergeable_rx_bufs)
722 		skb = receive_mergeable(dev, vi, rq, buf, ctx, len);
723 	else if (vi->big_packets)
724 		skb = receive_big(dev, vi, rq, buf, len);
725 	else
726 		skb = receive_small(dev, vi, rq, buf, len);
727 
728 	if (unlikely(!skb))
729 		return 0;
730 
731 	hdr = skb_vnet_hdr(skb);
732 
733 	ret = skb->len;
734 
735 	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
736 		skb->ip_summed = CHECKSUM_UNNECESSARY;
737 
738 	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
739 				  virtio_is_little_endian(vi->vdev))) {
740 		net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
741 				     dev->name, hdr->hdr.gso_type,
742 				     hdr->hdr.gso_size);
743 		goto frame_err;
744 	}
745 
746 	skb->protocol = eth_type_trans(skb, dev);
747 	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
748 		 ntohs(skb->protocol), skb->len, skb->pkt_type);
749 
750 	napi_gro_receive(&rq->napi, skb);
751 	return ret;
752 
753 frame_err:
754 	dev->stats.rx_frame_errors++;
755 	dev_kfree_skb(skb);
756 	return 0;
757 }
758 
759 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
760 			     gfp_t gfp)
761 {
762 	struct page_frag *alloc_frag = &rq->alloc_frag;
763 	char *buf;
764 	unsigned int xdp_headroom = virtnet_get_headroom(vi);
765 	int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
766 	int err;
767 
768 	len = SKB_DATA_ALIGN(len) +
769 	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
770 	if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
771 		return -ENOMEM;
772 
773 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
774 	get_page(alloc_frag->page);
775 	alloc_frag->offset += len;
776 	sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
777 		    vi->hdr_len + GOOD_PACKET_LEN);
778 	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
779 	if (err < 0)
780 		put_page(virt_to_head_page(buf));
781 
782 	return err;
783 }
784 
785 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
786 			   gfp_t gfp)
787 {
788 	struct page *first, *list = NULL;
789 	char *p;
790 	int i, err, offset;
791 
792 	sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
793 
794 	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
795 	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
796 		first = get_a_page(rq, gfp);
797 		if (!first) {
798 			if (list)
799 				give_pages(rq, list);
800 			return -ENOMEM;
801 		}
802 		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
803 
804 		/* chain new page in list head to match sg */
805 		first->private = (unsigned long)list;
806 		list = first;
807 	}
808 
809 	first = get_a_page(rq, gfp);
810 	if (!first) {
811 		give_pages(rq, list);
812 		return -ENOMEM;
813 	}
814 	p = page_address(first);
815 
816 	/* rq->sg[0], rq->sg[1] share the same page */
817 	/* a separated rq->sg[0] for header - required in case !any_header_sg */
818 	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
819 
820 	/* rq->sg[1] for data packet, from offset */
821 	offset = sizeof(struct padded_vnet_hdr);
822 	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
823 
824 	/* chain first in list head */
825 	first->private = (unsigned long)list;
826 	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
827 				  first, gfp);
828 	if (err < 0)
829 		give_pages(rq, first);
830 
831 	return err;
832 }
833 
834 static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
835 {
836 	const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
837 	unsigned int len;
838 
839 	len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
840 			GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
841 	return ALIGN(len, L1_CACHE_BYTES);
842 }
843 
844 static int add_recvbuf_mergeable(struct virtnet_info *vi,
845 				 struct receive_queue *rq, gfp_t gfp)
846 {
847 	struct page_frag *alloc_frag = &rq->alloc_frag;
848 	unsigned int headroom = virtnet_get_headroom(vi);
849 	char *buf;
850 	void *ctx;
851 	int err;
852 	unsigned int len, hole;
853 
854 	len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
855 	if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp)))
856 		return -ENOMEM;
857 
858 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
859 	buf += headroom; /* advance address leaving hole at front of pkt */
860 	ctx = (void *)(unsigned long)len;
861 	get_page(alloc_frag->page);
862 	alloc_frag->offset += len + headroom;
863 	hole = alloc_frag->size - alloc_frag->offset;
864 	if (hole < len + headroom) {
865 		/* To avoid internal fragmentation, if there is very likely not
866 		 * enough space for another buffer, add the remaining space to
867 		 * the current buffer. This extra space is not included in
868 		 * the truesize stored in ctx.
869 		 */
870 		len += hole;
871 		alloc_frag->offset += hole;
872 	}
873 
874 	sg_init_one(rq->sg, buf, len);
875 	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
876 	if (err < 0)
877 		put_page(virt_to_head_page(buf));
878 
879 	return err;
880 }
881 
882 /*
883  * Returns false if we couldn't fill entirely (OOM).
884  *
885  * Normally run in the receive path, but can also be run from ndo_open
886  * before we're receiving packets, or from refill_work which is
887  * careful to disable receiving (using napi_disable).
888  */
889 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
890 			  gfp_t gfp)
891 {
892 	int err;
893 	bool oom;
894 
895 	gfp |= __GFP_COLD;
896 	do {
897 		if (vi->mergeable_rx_bufs)
898 			err = add_recvbuf_mergeable(vi, rq, gfp);
899 		else if (vi->big_packets)
900 			err = add_recvbuf_big(vi, rq, gfp);
901 		else
902 			err = add_recvbuf_small(vi, rq, gfp);
903 
904 		oom = err == -ENOMEM;
905 		if (err)
906 			break;
907 	} while (rq->vq->num_free);
908 	virtqueue_kick(rq->vq);
909 	return !oom;
910 }
911 
912 static void skb_recv_done(struct virtqueue *rvq)
913 {
914 	struct virtnet_info *vi = rvq->vdev->priv;
915 	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
916 
917 	/* Schedule NAPI, Suppress further interrupts if successful. */
918 	if (napi_schedule_prep(&rq->napi)) {
919 		virtqueue_disable_cb(rvq);
920 		__napi_schedule(&rq->napi);
921 	}
922 }
923 
924 static void virtnet_napi_enable(struct receive_queue *rq)
925 {
926 	napi_enable(&rq->napi);
927 
928 	/* If all buffers were filled by other side before we napi_enabled, we
929 	 * won't get another interrupt, so process any outstanding packets
930 	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
931 	 * We synchronize against interrupts via NAPI_STATE_SCHED */
932 	if (napi_schedule_prep(&rq->napi)) {
933 		virtqueue_disable_cb(rq->vq);
934 		local_bh_disable();
935 		__napi_schedule(&rq->napi);
936 		local_bh_enable();
937 	}
938 }
939 
940 static void refill_work(struct work_struct *work)
941 {
942 	struct virtnet_info *vi =
943 		container_of(work, struct virtnet_info, refill.work);
944 	bool still_empty;
945 	int i;
946 
947 	for (i = 0; i < vi->curr_queue_pairs; i++) {
948 		struct receive_queue *rq = &vi->rq[i];
949 
950 		napi_disable(&rq->napi);
951 		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
952 		virtnet_napi_enable(rq);
953 
954 		/* In theory, this can happen: if we don't get any buffers in
955 		 * we will *never* try to fill again.
956 		 */
957 		if (still_empty)
958 			schedule_delayed_work(&vi->refill, HZ/2);
959 	}
960 }
961 
962 static int virtnet_receive(struct receive_queue *rq, int budget)
963 {
964 	struct virtnet_info *vi = rq->vq->vdev->priv;
965 	unsigned int len, received = 0, bytes = 0;
966 	void *buf;
967 	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
968 
969 	if (vi->mergeable_rx_bufs) {
970 		void *ctx;
971 
972 		while (received < budget &&
973 		       (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
974 			bytes += receive_buf(vi, rq, buf, len, ctx);
975 			received++;
976 		}
977 	} else {
978 		while (received < budget &&
979 		       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
980 			bytes += receive_buf(vi, rq, buf, len, NULL);
981 			received++;
982 		}
983 	}
984 
985 	if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
986 		if (!try_fill_recv(vi, rq, GFP_ATOMIC))
987 			schedule_delayed_work(&vi->refill, 0);
988 	}
989 
990 	u64_stats_update_begin(&stats->rx_syncp);
991 	stats->rx_bytes += bytes;
992 	stats->rx_packets += received;
993 	u64_stats_update_end(&stats->rx_syncp);
994 
995 	return received;
996 }
997 
998 static int virtnet_poll(struct napi_struct *napi, int budget)
999 {
1000 	struct receive_queue *rq =
1001 		container_of(napi, struct receive_queue, napi);
1002 	unsigned int r, received;
1003 
1004 	received = virtnet_receive(rq, budget);
1005 
1006 	/* Out of packets? */
1007 	if (received < budget) {
1008 		r = virtqueue_enable_cb_prepare(rq->vq);
1009 		if (napi_complete_done(napi, received)) {
1010 			if (unlikely(virtqueue_poll(rq->vq, r)) &&
1011 			    napi_schedule_prep(napi)) {
1012 				virtqueue_disable_cb(rq->vq);
1013 				__napi_schedule(napi);
1014 			}
1015 		}
1016 	}
1017 
1018 	return received;
1019 }
1020 
1021 static int virtnet_open(struct net_device *dev)
1022 {
1023 	struct virtnet_info *vi = netdev_priv(dev);
1024 	int i;
1025 
1026 	for (i = 0; i < vi->max_queue_pairs; i++) {
1027 		if (i < vi->curr_queue_pairs)
1028 			/* Make sure we have some buffers: if oom use wq. */
1029 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1030 				schedule_delayed_work(&vi->refill, 0);
1031 		virtnet_napi_enable(&vi->rq[i]);
1032 	}
1033 
1034 	return 0;
1035 }
1036 
1037 static void free_old_xmit_skbs(struct send_queue *sq)
1038 {
1039 	struct sk_buff *skb;
1040 	unsigned int len;
1041 	struct virtnet_info *vi = sq->vq->vdev->priv;
1042 	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
1043 	unsigned int packets = 0;
1044 	unsigned int bytes = 0;
1045 
1046 	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1047 		pr_debug("Sent skb %p\n", skb);
1048 
1049 		bytes += skb->len;
1050 		packets++;
1051 
1052 		dev_kfree_skb_any(skb);
1053 	}
1054 
1055 	/* Avoid overhead when no packets have been processed
1056 	 * happens when called speculatively from start_xmit.
1057 	 */
1058 	if (!packets)
1059 		return;
1060 
1061 	u64_stats_update_begin(&stats->tx_syncp);
1062 	stats->tx_bytes += bytes;
1063 	stats->tx_packets += packets;
1064 	u64_stats_update_end(&stats->tx_syncp);
1065 }
1066 
1067 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
1068 {
1069 	struct virtio_net_hdr_mrg_rxbuf *hdr;
1070 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
1071 	struct virtnet_info *vi = sq->vq->vdev->priv;
1072 	unsigned num_sg;
1073 	unsigned hdr_len = vi->hdr_len;
1074 	bool can_push;
1075 
1076 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
1077 
1078 	can_push = vi->any_header_sg &&
1079 		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
1080 		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
1081 	/* Even if we can, don't push here yet as this would skew
1082 	 * csum_start offset below. */
1083 	if (can_push)
1084 		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
1085 	else
1086 		hdr = skb_vnet_hdr(skb);
1087 
1088 	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1089 				    virtio_is_little_endian(vi->vdev), false))
1090 		BUG();
1091 
1092 	if (vi->mergeable_rx_bufs)
1093 		hdr->num_buffers = 0;
1094 
1095 	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
1096 	if (can_push) {
1097 		__skb_push(skb, hdr_len);
1098 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
1099 		/* Pull header back to avoid skew in tx bytes calculations. */
1100 		__skb_pull(skb, hdr_len);
1101 	} else {
1102 		sg_set_buf(sq->sg, hdr, hdr_len);
1103 		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
1104 	}
1105 	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
1106 }
1107 
1108 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1109 {
1110 	struct virtnet_info *vi = netdev_priv(dev);
1111 	int qnum = skb_get_queue_mapping(skb);
1112 	struct send_queue *sq = &vi->sq[qnum];
1113 	int err;
1114 	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1115 	bool kick = !skb->xmit_more;
1116 
1117 	/* Free up any pending old buffers before queueing new ones. */
1118 	free_old_xmit_skbs(sq);
1119 
1120 	/* timestamp packet in software */
1121 	skb_tx_timestamp(skb);
1122 
1123 	/* Try to transmit */
1124 	err = xmit_skb(sq, skb);
1125 
1126 	/* This should not happen! */
1127 	if (unlikely(err)) {
1128 		dev->stats.tx_fifo_errors++;
1129 		if (net_ratelimit())
1130 			dev_warn(&dev->dev,
1131 				 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
1132 		dev->stats.tx_dropped++;
1133 		dev_kfree_skb_any(skb);
1134 		return NETDEV_TX_OK;
1135 	}
1136 
1137 	/* Don't wait up for transmitted skbs to be freed. */
1138 	skb_orphan(skb);
1139 	nf_reset(skb);
1140 
1141 	/* If running out of space, stop queue to avoid getting packets that we
1142 	 * are then unable to transmit.
1143 	 * An alternative would be to force queuing layer to requeue the skb by
1144 	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1145 	 * returned in a normal path of operation: it means that driver is not
1146 	 * maintaining the TX queue stop/start state properly, and causes
1147 	 * the stack to do a non-trivial amount of useless work.
1148 	 * Since most packets only take 1 or 2 ring slots, stopping the queue
1149 	 * early means 16 slots are typically wasted.
1150 	 */
1151 	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1152 		netif_stop_subqueue(dev, qnum);
1153 		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1154 			/* More just got used, free them then recheck. */
1155 			free_old_xmit_skbs(sq);
1156 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1157 				netif_start_subqueue(dev, qnum);
1158 				virtqueue_disable_cb(sq->vq);
1159 			}
1160 		}
1161 	}
1162 
1163 	if (kick || netif_xmit_stopped(txq))
1164 		virtqueue_kick(sq->vq);
1165 
1166 	return NETDEV_TX_OK;
1167 }
1168 
1169 /*
1170  * Send command via the control virtqueue and check status.  Commands
1171  * supported by the hypervisor, as indicated by feature bits, should
1172  * never fail unless improperly formatted.
1173  */
1174 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1175 				 struct scatterlist *out)
1176 {
1177 	struct scatterlist *sgs[4], hdr, stat;
1178 	unsigned out_num = 0, tmp;
1179 
1180 	/* Caller should know better */
1181 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1182 
1183 	vi->ctrl_status = ~0;
1184 	vi->ctrl_hdr.class = class;
1185 	vi->ctrl_hdr.cmd = cmd;
1186 	/* Add header */
1187 	sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
1188 	sgs[out_num++] = &hdr;
1189 
1190 	if (out)
1191 		sgs[out_num++] = out;
1192 
1193 	/* Add return status. */
1194 	sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
1195 	sgs[out_num] = &stat;
1196 
1197 	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1198 	virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1199 
1200 	if (unlikely(!virtqueue_kick(vi->cvq)))
1201 		return vi->ctrl_status == VIRTIO_NET_OK;
1202 
1203 	/* Spin for a response, the kick causes an ioport write, trapping
1204 	 * into the hypervisor, so the request should be handled immediately.
1205 	 */
1206 	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1207 	       !virtqueue_is_broken(vi->cvq))
1208 		cpu_relax();
1209 
1210 	return vi->ctrl_status == VIRTIO_NET_OK;
1211 }
1212 
1213 static int virtnet_set_mac_address(struct net_device *dev, void *p)
1214 {
1215 	struct virtnet_info *vi = netdev_priv(dev);
1216 	struct virtio_device *vdev = vi->vdev;
1217 	int ret;
1218 	struct sockaddr *addr;
1219 	struct scatterlist sg;
1220 
1221 	addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
1222 	if (!addr)
1223 		return -ENOMEM;
1224 
1225 	ret = eth_prepare_mac_addr_change(dev, addr);
1226 	if (ret)
1227 		goto out;
1228 
1229 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1230 		sg_init_one(&sg, addr->sa_data, dev->addr_len);
1231 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1232 					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
1233 			dev_warn(&vdev->dev,
1234 				 "Failed to set mac address by vq command.\n");
1235 			ret = -EINVAL;
1236 			goto out;
1237 		}
1238 	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1239 		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1240 		unsigned int i;
1241 
1242 		/* Naturally, this has an atomicity problem. */
1243 		for (i = 0; i < dev->addr_len; i++)
1244 			virtio_cwrite8(vdev,
1245 				       offsetof(struct virtio_net_config, mac) +
1246 				       i, addr->sa_data[i]);
1247 	}
1248 
1249 	eth_commit_mac_addr_change(dev, p);
1250 	ret = 0;
1251 
1252 out:
1253 	kfree(addr);
1254 	return ret;
1255 }
1256 
1257 static void virtnet_stats(struct net_device *dev,
1258 			  struct rtnl_link_stats64 *tot)
1259 {
1260 	struct virtnet_info *vi = netdev_priv(dev);
1261 	int cpu;
1262 	unsigned int start;
1263 
1264 	for_each_possible_cpu(cpu) {
1265 		struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
1266 		u64 tpackets, tbytes, rpackets, rbytes;
1267 
1268 		do {
1269 			start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
1270 			tpackets = stats->tx_packets;
1271 			tbytes   = stats->tx_bytes;
1272 		} while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
1273 
1274 		do {
1275 			start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
1276 			rpackets = stats->rx_packets;
1277 			rbytes   = stats->rx_bytes;
1278 		} while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
1279 
1280 		tot->rx_packets += rpackets;
1281 		tot->tx_packets += tpackets;
1282 		tot->rx_bytes   += rbytes;
1283 		tot->tx_bytes   += tbytes;
1284 	}
1285 
1286 	tot->tx_dropped = dev->stats.tx_dropped;
1287 	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1288 	tot->rx_dropped = dev->stats.rx_dropped;
1289 	tot->rx_length_errors = dev->stats.rx_length_errors;
1290 	tot->rx_frame_errors = dev->stats.rx_frame_errors;
1291 }
1292 
1293 #ifdef CONFIG_NET_POLL_CONTROLLER
1294 static void virtnet_netpoll(struct net_device *dev)
1295 {
1296 	struct virtnet_info *vi = netdev_priv(dev);
1297 	int i;
1298 
1299 	for (i = 0; i < vi->curr_queue_pairs; i++)
1300 		napi_schedule(&vi->rq[i].napi);
1301 }
1302 #endif
1303 
1304 static void virtnet_ack_link_announce(struct virtnet_info *vi)
1305 {
1306 	rtnl_lock();
1307 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1308 				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1309 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1310 	rtnl_unlock();
1311 }
1312 
1313 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1314 {
1315 	struct scatterlist sg;
1316 	struct net_device *dev = vi->dev;
1317 
1318 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1319 		return 0;
1320 
1321 	vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1322 	sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq));
1323 
1324 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1325 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
1326 		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1327 			 queue_pairs);
1328 		return -EINVAL;
1329 	} else {
1330 		vi->curr_queue_pairs = queue_pairs;
1331 		/* virtnet_open() will refill when device is going to up. */
1332 		if (dev->flags & IFF_UP)
1333 			schedule_delayed_work(&vi->refill, 0);
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1340 {
1341 	int err;
1342 
1343 	rtnl_lock();
1344 	err = _virtnet_set_queues(vi, queue_pairs);
1345 	rtnl_unlock();
1346 	return err;
1347 }
1348 
1349 static int virtnet_close(struct net_device *dev)
1350 {
1351 	struct virtnet_info *vi = netdev_priv(dev);
1352 	int i;
1353 
1354 	/* Make sure refill_work doesn't re-enable napi! */
1355 	cancel_delayed_work_sync(&vi->refill);
1356 
1357 	for (i = 0; i < vi->max_queue_pairs; i++)
1358 		napi_disable(&vi->rq[i].napi);
1359 
1360 	return 0;
1361 }
1362 
1363 static void virtnet_set_rx_mode(struct net_device *dev)
1364 {
1365 	struct virtnet_info *vi = netdev_priv(dev);
1366 	struct scatterlist sg[2];
1367 	struct virtio_net_ctrl_mac *mac_data;
1368 	struct netdev_hw_addr *ha;
1369 	int uc_count;
1370 	int mc_count;
1371 	void *buf;
1372 	int i;
1373 
1374 	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
1375 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1376 		return;
1377 
1378 	vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
1379 	vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1380 
1381 	sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
1382 
1383 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1384 				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
1385 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1386 			 vi->ctrl_promisc ? "en" : "dis");
1387 
1388 	sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
1389 
1390 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1391 				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1392 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1393 			 vi->ctrl_allmulti ? "en" : "dis");
1394 
1395 	uc_count = netdev_uc_count(dev);
1396 	mc_count = netdev_mc_count(dev);
1397 	/* MAC filter - use one buffer for both lists */
1398 	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1399 		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1400 	mac_data = buf;
1401 	if (!buf)
1402 		return;
1403 
1404 	sg_init_table(sg, 2);
1405 
1406 	/* Store the unicast list and count in the front of the buffer */
1407 	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
1408 	i = 0;
1409 	netdev_for_each_uc_addr(ha, dev)
1410 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1411 
1412 	sg_set_buf(&sg[0], mac_data,
1413 		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1414 
1415 	/* multicast list and count fill the end */
1416 	mac_data = (void *)&mac_data->macs[uc_count][0];
1417 
1418 	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
1419 	i = 0;
1420 	netdev_for_each_mc_addr(ha, dev)
1421 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1422 
1423 	sg_set_buf(&sg[1], mac_data,
1424 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1425 
1426 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1427 				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
1428 		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1429 
1430 	kfree(buf);
1431 }
1432 
1433 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1434 				   __be16 proto, u16 vid)
1435 {
1436 	struct virtnet_info *vi = netdev_priv(dev);
1437 	struct scatterlist sg;
1438 
1439 	vi->ctrl_vid = vid;
1440 	sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
1441 
1442 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1443 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
1444 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1445 	return 0;
1446 }
1447 
1448 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1449 				    __be16 proto, u16 vid)
1450 {
1451 	struct virtnet_info *vi = netdev_priv(dev);
1452 	struct scatterlist sg;
1453 
1454 	vi->ctrl_vid = vid;
1455 	sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
1456 
1457 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1458 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
1459 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1460 	return 0;
1461 }
1462 
1463 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1464 {
1465 	int i;
1466 
1467 	if (vi->affinity_hint_set) {
1468 		for (i = 0; i < vi->max_queue_pairs; i++) {
1469 			virtqueue_set_affinity(vi->rq[i].vq, -1);
1470 			virtqueue_set_affinity(vi->sq[i].vq, -1);
1471 		}
1472 
1473 		vi->affinity_hint_set = false;
1474 	}
1475 }
1476 
1477 static void virtnet_set_affinity(struct virtnet_info *vi)
1478 {
1479 	int i;
1480 	int cpu;
1481 
1482 	/* In multiqueue mode, when the number of cpu is equal to the number of
1483 	 * queue pairs, we let the queue pairs to be private to one cpu by
1484 	 * setting the affinity hint to eliminate the contention.
1485 	 */
1486 	if (vi->curr_queue_pairs == 1 ||
1487 	    vi->max_queue_pairs != num_online_cpus()) {
1488 		virtnet_clean_affinity(vi, -1);
1489 		return;
1490 	}
1491 
1492 	i = 0;
1493 	for_each_online_cpu(cpu) {
1494 		virtqueue_set_affinity(vi->rq[i].vq, cpu);
1495 		virtqueue_set_affinity(vi->sq[i].vq, cpu);
1496 		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1497 		i++;
1498 	}
1499 
1500 	vi->affinity_hint_set = true;
1501 }
1502 
1503 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
1504 {
1505 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1506 						   node);
1507 	virtnet_set_affinity(vi);
1508 	return 0;
1509 }
1510 
1511 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
1512 {
1513 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1514 						   node_dead);
1515 	virtnet_set_affinity(vi);
1516 	return 0;
1517 }
1518 
1519 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
1520 {
1521 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1522 						   node);
1523 
1524 	virtnet_clean_affinity(vi, cpu);
1525 	return 0;
1526 }
1527 
1528 static enum cpuhp_state virtionet_online;
1529 
1530 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
1531 {
1532 	int ret;
1533 
1534 	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
1535 	if (ret)
1536 		return ret;
1537 	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1538 					       &vi->node_dead);
1539 	if (!ret)
1540 		return ret;
1541 	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1542 	return ret;
1543 }
1544 
1545 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
1546 {
1547 	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1548 	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1549 					    &vi->node_dead);
1550 }
1551 
1552 static void virtnet_get_ringparam(struct net_device *dev,
1553 				struct ethtool_ringparam *ring)
1554 {
1555 	struct virtnet_info *vi = netdev_priv(dev);
1556 
1557 	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1558 	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
1559 	ring->rx_pending = ring->rx_max_pending;
1560 	ring->tx_pending = ring->tx_max_pending;
1561 }
1562 
1563 
1564 static void virtnet_get_drvinfo(struct net_device *dev,
1565 				struct ethtool_drvinfo *info)
1566 {
1567 	struct virtnet_info *vi = netdev_priv(dev);
1568 	struct virtio_device *vdev = vi->vdev;
1569 
1570 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1571 	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1572 	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1573 
1574 }
1575 
1576 /* TODO: Eliminate OOO packets during switching */
1577 static int virtnet_set_channels(struct net_device *dev,
1578 				struct ethtool_channels *channels)
1579 {
1580 	struct virtnet_info *vi = netdev_priv(dev);
1581 	u16 queue_pairs = channels->combined_count;
1582 	int err;
1583 
1584 	/* We don't support separate rx/tx channels.
1585 	 * We don't allow setting 'other' channels.
1586 	 */
1587 	if (channels->rx_count || channels->tx_count || channels->other_count)
1588 		return -EINVAL;
1589 
1590 	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
1591 		return -EINVAL;
1592 
1593 	/* For now we don't support modifying channels while XDP is loaded
1594 	 * also when XDP is loaded all RX queues have XDP programs so we only
1595 	 * need to check a single RX queue.
1596 	 */
1597 	if (vi->rq[0].xdp_prog)
1598 		return -EINVAL;
1599 
1600 	get_online_cpus();
1601 	err = _virtnet_set_queues(vi, queue_pairs);
1602 	if (!err) {
1603 		netif_set_real_num_tx_queues(dev, queue_pairs);
1604 		netif_set_real_num_rx_queues(dev, queue_pairs);
1605 
1606 		virtnet_set_affinity(vi);
1607 	}
1608 	put_online_cpus();
1609 
1610 	return err;
1611 }
1612 
1613 static void virtnet_get_channels(struct net_device *dev,
1614 				 struct ethtool_channels *channels)
1615 {
1616 	struct virtnet_info *vi = netdev_priv(dev);
1617 
1618 	channels->combined_count = vi->curr_queue_pairs;
1619 	channels->max_combined = vi->max_queue_pairs;
1620 	channels->max_other = 0;
1621 	channels->rx_count = 0;
1622 	channels->tx_count = 0;
1623 	channels->other_count = 0;
1624 }
1625 
1626 /* Check if the user is trying to change anything besides speed/duplex */
1627 static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd)
1628 {
1629 	struct ethtool_cmd diff1 = *cmd;
1630 	struct ethtool_cmd diff2 = {};
1631 
1632 	/* cmd is always set so we need to clear it, validate the port type
1633 	 * and also without autonegotiation we can ignore advertising
1634 	 */
1635 	ethtool_cmd_speed_set(&diff1, 0);
1636 	diff2.port = PORT_OTHER;
1637 	diff1.advertising = 0;
1638 	diff1.duplex = 0;
1639 	diff1.cmd = 0;
1640 
1641 	return !memcmp(&diff1, &diff2, sizeof(diff1));
1642 }
1643 
1644 static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1645 {
1646 	struct virtnet_info *vi = netdev_priv(dev);
1647 	u32 speed;
1648 
1649 	speed = ethtool_cmd_speed(cmd);
1650 	/* don't allow custom speed and duplex */
1651 	if (!ethtool_validate_speed(speed) ||
1652 	    !ethtool_validate_duplex(cmd->duplex) ||
1653 	    !virtnet_validate_ethtool_cmd(cmd))
1654 		return -EINVAL;
1655 	vi->speed = speed;
1656 	vi->duplex = cmd->duplex;
1657 
1658 	return 0;
1659 }
1660 
1661 static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1662 {
1663 	struct virtnet_info *vi = netdev_priv(dev);
1664 
1665 	ethtool_cmd_speed_set(cmd, vi->speed);
1666 	cmd->duplex = vi->duplex;
1667 	cmd->port = PORT_OTHER;
1668 
1669 	return 0;
1670 }
1671 
1672 static void virtnet_init_settings(struct net_device *dev)
1673 {
1674 	struct virtnet_info *vi = netdev_priv(dev);
1675 
1676 	vi->speed = SPEED_UNKNOWN;
1677 	vi->duplex = DUPLEX_UNKNOWN;
1678 }
1679 
1680 static const struct ethtool_ops virtnet_ethtool_ops = {
1681 	.get_drvinfo = virtnet_get_drvinfo,
1682 	.get_link = ethtool_op_get_link,
1683 	.get_ringparam = virtnet_get_ringparam,
1684 	.set_channels = virtnet_set_channels,
1685 	.get_channels = virtnet_get_channels,
1686 	.get_ts_info = ethtool_op_get_ts_info,
1687 	.get_settings = virtnet_get_settings,
1688 	.set_settings = virtnet_set_settings,
1689 };
1690 
1691 static void virtnet_freeze_down(struct virtio_device *vdev)
1692 {
1693 	struct virtnet_info *vi = vdev->priv;
1694 	int i;
1695 
1696 	/* Make sure no work handler is accessing the device */
1697 	flush_work(&vi->config_work);
1698 
1699 	netif_device_detach(vi->dev);
1700 	cancel_delayed_work_sync(&vi->refill);
1701 
1702 	if (netif_running(vi->dev)) {
1703 		for (i = 0; i < vi->max_queue_pairs; i++)
1704 			napi_disable(&vi->rq[i].napi);
1705 	}
1706 }
1707 
1708 static int init_vqs(struct virtnet_info *vi);
1709 static void _remove_vq_common(struct virtnet_info *vi);
1710 
1711 static int virtnet_restore_up(struct virtio_device *vdev)
1712 {
1713 	struct virtnet_info *vi = vdev->priv;
1714 	int err, i;
1715 
1716 	err = init_vqs(vi);
1717 	if (err)
1718 		return err;
1719 
1720 	virtio_device_ready(vdev);
1721 
1722 	if (netif_running(vi->dev)) {
1723 		for (i = 0; i < vi->curr_queue_pairs; i++)
1724 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1725 				schedule_delayed_work(&vi->refill, 0);
1726 
1727 		for (i = 0; i < vi->max_queue_pairs; i++)
1728 			virtnet_napi_enable(&vi->rq[i]);
1729 	}
1730 
1731 	netif_device_attach(vi->dev);
1732 	return err;
1733 }
1734 
1735 static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp)
1736 {
1737 	struct virtio_device *dev = vi->vdev;
1738 	int ret;
1739 
1740 	virtio_config_disable(dev);
1741 	dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
1742 	virtnet_freeze_down(dev);
1743 	_remove_vq_common(vi);
1744 
1745 	dev->config->reset(dev);
1746 	virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
1747 	virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
1748 
1749 	ret = virtio_finalize_features(dev);
1750 	if (ret)
1751 		goto err;
1752 
1753 	vi->xdp_queue_pairs = xdp_qp;
1754 	ret = virtnet_restore_up(dev);
1755 	if (ret)
1756 		goto err;
1757 	ret = _virtnet_set_queues(vi, curr_qp);
1758 	if (ret)
1759 		goto err;
1760 
1761 	virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
1762 	virtio_config_enable(dev);
1763 	return 0;
1764 err:
1765 	virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
1766 	return ret;
1767 }
1768 
1769 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
1770 {
1771 	unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
1772 	struct virtnet_info *vi = netdev_priv(dev);
1773 	struct bpf_prog *old_prog;
1774 	u16 xdp_qp = 0, curr_qp;
1775 	int i, err;
1776 
1777 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1778 	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1779 	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
1780 	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) {
1781 		netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n");
1782 		return -EOPNOTSUPP;
1783 	}
1784 
1785 	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
1786 		netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n");
1787 		return -EINVAL;
1788 	}
1789 
1790 	if (dev->mtu > max_sz) {
1791 		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
1792 		return -EINVAL;
1793 	}
1794 
1795 	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
1796 	if (prog)
1797 		xdp_qp = nr_cpu_ids;
1798 
1799 	/* XDP requires extra queues for XDP_TX */
1800 	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
1801 		netdev_warn(dev, "request %i queues but max is %i\n",
1802 			    curr_qp + xdp_qp, vi->max_queue_pairs);
1803 		return -ENOMEM;
1804 	}
1805 
1806 	if (prog) {
1807 		prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
1808 		if (IS_ERR(prog))
1809 			return PTR_ERR(prog);
1810 	}
1811 
1812 	/* Changing the headroom in buffers is a disruptive operation because
1813 	 * existing buffers must be flushed and reallocated. This will happen
1814 	 * when a xdp program is initially added or xdp is disabled by removing
1815 	 * the xdp program resulting in number of XDP queues changing.
1816 	 */
1817 	if (vi->xdp_queue_pairs != xdp_qp) {
1818 		err = virtnet_reset(vi, curr_qp + xdp_qp, xdp_qp);
1819 		if (err) {
1820 			dev_warn(&dev->dev, "XDP reset failure.\n");
1821 			goto virtio_reset_err;
1822 		}
1823 	}
1824 
1825 	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
1826 
1827 	for (i = 0; i < vi->max_queue_pairs; i++) {
1828 		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
1829 		rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
1830 		if (old_prog)
1831 			bpf_prog_put(old_prog);
1832 	}
1833 
1834 	return 0;
1835 
1836 virtio_reset_err:
1837 	/* On reset error do our best to unwind XDP changes inflight and return
1838 	 * error up to user space for resolution. The underlying reset hung on
1839 	 * us so not much we can do here.
1840 	 */
1841 	if (prog)
1842 		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
1843 	return err;
1844 }
1845 
1846 static bool virtnet_xdp_query(struct net_device *dev)
1847 {
1848 	struct virtnet_info *vi = netdev_priv(dev);
1849 	int i;
1850 
1851 	for (i = 0; i < vi->max_queue_pairs; i++) {
1852 		if (vi->rq[i].xdp_prog)
1853 			return true;
1854 	}
1855 	return false;
1856 }
1857 
1858 static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
1859 {
1860 	switch (xdp->command) {
1861 	case XDP_SETUP_PROG:
1862 		return virtnet_xdp_set(dev, xdp->prog);
1863 	case XDP_QUERY_PROG:
1864 		xdp->prog_attached = virtnet_xdp_query(dev);
1865 		return 0;
1866 	default:
1867 		return -EINVAL;
1868 	}
1869 }
1870 
1871 static const struct net_device_ops virtnet_netdev = {
1872 	.ndo_open            = virtnet_open,
1873 	.ndo_stop   	     = virtnet_close,
1874 	.ndo_start_xmit      = start_xmit,
1875 	.ndo_validate_addr   = eth_validate_addr,
1876 	.ndo_set_mac_address = virtnet_set_mac_address,
1877 	.ndo_set_rx_mode     = virtnet_set_rx_mode,
1878 	.ndo_get_stats64     = virtnet_stats,
1879 	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
1880 	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1881 #ifdef CONFIG_NET_POLL_CONTROLLER
1882 	.ndo_poll_controller = virtnet_netpoll,
1883 #endif
1884 	.ndo_xdp		= virtnet_xdp,
1885 };
1886 
1887 static void virtnet_config_changed_work(struct work_struct *work)
1888 {
1889 	struct virtnet_info *vi =
1890 		container_of(work, struct virtnet_info, config_work);
1891 	u16 v;
1892 
1893 	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
1894 				 struct virtio_net_config, status, &v) < 0)
1895 		return;
1896 
1897 	if (v & VIRTIO_NET_S_ANNOUNCE) {
1898 		netdev_notify_peers(vi->dev);
1899 		virtnet_ack_link_announce(vi);
1900 	}
1901 
1902 	/* Ignore unknown (future) status bits */
1903 	v &= VIRTIO_NET_S_LINK_UP;
1904 
1905 	if (vi->status == v)
1906 		return;
1907 
1908 	vi->status = v;
1909 
1910 	if (vi->status & VIRTIO_NET_S_LINK_UP) {
1911 		netif_carrier_on(vi->dev);
1912 		netif_tx_wake_all_queues(vi->dev);
1913 	} else {
1914 		netif_carrier_off(vi->dev);
1915 		netif_tx_stop_all_queues(vi->dev);
1916 	}
1917 }
1918 
1919 static void virtnet_config_changed(struct virtio_device *vdev)
1920 {
1921 	struct virtnet_info *vi = vdev->priv;
1922 
1923 	schedule_work(&vi->config_work);
1924 }
1925 
1926 static void virtnet_free_queues(struct virtnet_info *vi)
1927 {
1928 	int i;
1929 
1930 	for (i = 0; i < vi->max_queue_pairs; i++) {
1931 		napi_hash_del(&vi->rq[i].napi);
1932 		netif_napi_del(&vi->rq[i].napi);
1933 	}
1934 
1935 	/* We called napi_hash_del() before netif_napi_del(),
1936 	 * we need to respect an RCU grace period before freeing vi->rq
1937 	 */
1938 	synchronize_net();
1939 
1940 	kfree(vi->rq);
1941 	kfree(vi->sq);
1942 }
1943 
1944 static void _free_receive_bufs(struct virtnet_info *vi)
1945 {
1946 	struct bpf_prog *old_prog;
1947 	int i;
1948 
1949 	for (i = 0; i < vi->max_queue_pairs; i++) {
1950 		while (vi->rq[i].pages)
1951 			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1952 
1953 		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
1954 		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
1955 		if (old_prog)
1956 			bpf_prog_put(old_prog);
1957 	}
1958 }
1959 
1960 static void free_receive_bufs(struct virtnet_info *vi)
1961 {
1962 	rtnl_lock();
1963 	_free_receive_bufs(vi);
1964 	rtnl_unlock();
1965 }
1966 
1967 static void free_receive_page_frags(struct virtnet_info *vi)
1968 {
1969 	int i;
1970 	for (i = 0; i < vi->max_queue_pairs; i++)
1971 		if (vi->rq[i].alloc_frag.page)
1972 			put_page(vi->rq[i].alloc_frag.page);
1973 }
1974 
1975 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1976 {
1977 	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1978 		return false;
1979 	else if (q < vi->curr_queue_pairs)
1980 		return true;
1981 	else
1982 		return false;
1983 }
1984 
1985 static void free_unused_bufs(struct virtnet_info *vi)
1986 {
1987 	void *buf;
1988 	int i;
1989 
1990 	for (i = 0; i < vi->max_queue_pairs; i++) {
1991 		struct virtqueue *vq = vi->sq[i].vq;
1992 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1993 			if (!is_xdp_raw_buffer_queue(vi, i))
1994 				dev_kfree_skb(buf);
1995 			else
1996 				put_page(virt_to_head_page(buf));
1997 		}
1998 	}
1999 
2000 	for (i = 0; i < vi->max_queue_pairs; i++) {
2001 		struct virtqueue *vq = vi->rq[i].vq;
2002 
2003 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2004 			if (vi->mergeable_rx_bufs) {
2005 				put_page(virt_to_head_page(buf));
2006 			} else if (vi->big_packets) {
2007 				give_pages(&vi->rq[i], buf);
2008 			} else {
2009 				put_page(virt_to_head_page(buf));
2010 			}
2011 		}
2012 	}
2013 }
2014 
2015 static void virtnet_del_vqs(struct virtnet_info *vi)
2016 {
2017 	struct virtio_device *vdev = vi->vdev;
2018 
2019 	virtnet_clean_affinity(vi, -1);
2020 
2021 	vdev->config->del_vqs(vdev);
2022 
2023 	virtnet_free_queues(vi);
2024 }
2025 
2026 static int virtnet_find_vqs(struct virtnet_info *vi)
2027 {
2028 	vq_callback_t **callbacks;
2029 	struct virtqueue **vqs;
2030 	int ret = -ENOMEM;
2031 	int i, total_vqs;
2032 	const char **names;
2033 	bool *ctx;
2034 
2035 	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
2036 	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
2037 	 * possible control vq.
2038 	 */
2039 	total_vqs = vi->max_queue_pairs * 2 +
2040 		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
2041 
2042 	/* Allocate space for find_vqs parameters */
2043 	vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
2044 	if (!vqs)
2045 		goto err_vq;
2046 	callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
2047 	if (!callbacks)
2048 		goto err_callback;
2049 	names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
2050 	if (!names)
2051 		goto err_names;
2052 	if (vi->mergeable_rx_bufs) {
2053 		ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL);
2054 		if (!ctx)
2055 			goto err_ctx;
2056 	} else {
2057 		ctx = NULL;
2058 	}
2059 
2060 	/* Parameters for control virtqueue, if any */
2061 	if (vi->has_cvq) {
2062 		callbacks[total_vqs - 1] = NULL;
2063 		names[total_vqs - 1] = "control";
2064 	}
2065 
2066 	/* Allocate/initialize parameters for send/receive virtqueues */
2067 	for (i = 0; i < vi->max_queue_pairs; i++) {
2068 		callbacks[rxq2vq(i)] = skb_recv_done;
2069 		callbacks[txq2vq(i)] = skb_xmit_done;
2070 		sprintf(vi->rq[i].name, "input.%d", i);
2071 		sprintf(vi->sq[i].name, "output.%d", i);
2072 		names[rxq2vq(i)] = vi->rq[i].name;
2073 		names[txq2vq(i)] = vi->sq[i].name;
2074 		if (ctx)
2075 			ctx[rxq2vq(i)] = true;
2076 	}
2077 
2078 	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
2079 					 names, ctx, NULL);
2080 	if (ret)
2081 		goto err_find;
2082 
2083 	if (vi->has_cvq) {
2084 		vi->cvq = vqs[total_vqs - 1];
2085 		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
2086 			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2087 	}
2088 
2089 	for (i = 0; i < vi->max_queue_pairs; i++) {
2090 		vi->rq[i].vq = vqs[rxq2vq(i)];
2091 		vi->sq[i].vq = vqs[txq2vq(i)];
2092 	}
2093 
2094 	kfree(names);
2095 	kfree(callbacks);
2096 	kfree(vqs);
2097 
2098 	return 0;
2099 
2100 err_find:
2101 	kfree(ctx);
2102 err_ctx:
2103 	kfree(names);
2104 err_names:
2105 	kfree(callbacks);
2106 err_callback:
2107 	kfree(vqs);
2108 err_vq:
2109 	return ret;
2110 }
2111 
2112 static int virtnet_alloc_queues(struct virtnet_info *vi)
2113 {
2114 	int i;
2115 
2116 	vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
2117 	if (!vi->sq)
2118 		goto err_sq;
2119 	vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
2120 	if (!vi->rq)
2121 		goto err_rq;
2122 
2123 	INIT_DELAYED_WORK(&vi->refill, refill_work);
2124 	for (i = 0; i < vi->max_queue_pairs; i++) {
2125 		vi->rq[i].pages = NULL;
2126 		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
2127 			       napi_weight);
2128 
2129 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
2130 		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
2131 		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
2132 	}
2133 
2134 	return 0;
2135 
2136 err_rq:
2137 	kfree(vi->sq);
2138 err_sq:
2139 	return -ENOMEM;
2140 }
2141 
2142 static int init_vqs(struct virtnet_info *vi)
2143 {
2144 	int ret;
2145 
2146 	/* Allocate send & receive queues */
2147 	ret = virtnet_alloc_queues(vi);
2148 	if (ret)
2149 		goto err;
2150 
2151 	ret = virtnet_find_vqs(vi);
2152 	if (ret)
2153 		goto err_free;
2154 
2155 	get_online_cpus();
2156 	virtnet_set_affinity(vi);
2157 	put_online_cpus();
2158 
2159 	return 0;
2160 
2161 err_free:
2162 	virtnet_free_queues(vi);
2163 err:
2164 	return ret;
2165 }
2166 
2167 #ifdef CONFIG_SYSFS
2168 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
2169 		struct rx_queue_attribute *attribute, char *buf)
2170 {
2171 	struct virtnet_info *vi = netdev_priv(queue->dev);
2172 	unsigned int queue_index = get_netdev_rx_queue_index(queue);
2173 	struct ewma_pkt_len *avg;
2174 
2175 	BUG_ON(queue_index >= vi->max_queue_pairs);
2176 	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
2177 	return sprintf(buf, "%u\n", get_mergeable_buf_len(avg));
2178 }
2179 
2180 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
2181 	__ATTR_RO(mergeable_rx_buffer_size);
2182 
2183 static struct attribute *virtio_net_mrg_rx_attrs[] = {
2184 	&mergeable_rx_buffer_size_attribute.attr,
2185 	NULL
2186 };
2187 
2188 static const struct attribute_group virtio_net_mrg_rx_group = {
2189 	.name = "virtio_net",
2190 	.attrs = virtio_net_mrg_rx_attrs
2191 };
2192 #endif
2193 
2194 static bool virtnet_fail_on_feature(struct virtio_device *vdev,
2195 				    unsigned int fbit,
2196 				    const char *fname, const char *dname)
2197 {
2198 	if (!virtio_has_feature(vdev, fbit))
2199 		return false;
2200 
2201 	dev_err(&vdev->dev, "device advertises feature %s but not %s",
2202 		fname, dname);
2203 
2204 	return true;
2205 }
2206 
2207 #define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
2208 	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
2209 
2210 static bool virtnet_validate_features(struct virtio_device *vdev)
2211 {
2212 	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
2213 	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
2214 			     "VIRTIO_NET_F_CTRL_VQ") ||
2215 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
2216 			     "VIRTIO_NET_F_CTRL_VQ") ||
2217 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
2218 			     "VIRTIO_NET_F_CTRL_VQ") ||
2219 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
2220 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
2221 			     "VIRTIO_NET_F_CTRL_VQ"))) {
2222 		return false;
2223 	}
2224 
2225 	return true;
2226 }
2227 
2228 #define MIN_MTU ETH_MIN_MTU
2229 #define MAX_MTU ETH_MAX_MTU
2230 
2231 static int virtnet_validate(struct virtio_device *vdev)
2232 {
2233 	if (!vdev->config->get) {
2234 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
2235 			__func__);
2236 		return -EINVAL;
2237 	}
2238 
2239 	if (!virtnet_validate_features(vdev))
2240 		return -EINVAL;
2241 
2242 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2243 		int mtu = virtio_cread16(vdev,
2244 					 offsetof(struct virtio_net_config,
2245 						  mtu));
2246 		if (mtu < MIN_MTU)
2247 			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
2248 	}
2249 
2250 	return 0;
2251 }
2252 
2253 static int virtnet_probe(struct virtio_device *vdev)
2254 {
2255 	int i, err;
2256 	struct net_device *dev;
2257 	struct virtnet_info *vi;
2258 	u16 max_queue_pairs;
2259 	int mtu;
2260 
2261 	/* Find if host supports multiqueue virtio_net device */
2262 	err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
2263 				   struct virtio_net_config,
2264 				   max_virtqueue_pairs, &max_queue_pairs);
2265 
2266 	/* We need at least 2 queue's */
2267 	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
2268 	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
2269 	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2270 		max_queue_pairs = 1;
2271 
2272 	/* Allocate ourselves a network device with room for our info */
2273 	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
2274 	if (!dev)
2275 		return -ENOMEM;
2276 
2277 	/* Set up network device as normal. */
2278 	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2279 	dev->netdev_ops = &virtnet_netdev;
2280 	dev->features = NETIF_F_HIGHDMA;
2281 
2282 	dev->ethtool_ops = &virtnet_ethtool_ops;
2283 	SET_NETDEV_DEV(dev, &vdev->dev);
2284 
2285 	/* Do we support "hardware" checksums? */
2286 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
2287 		/* This opens up the world of extra features. */
2288 		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2289 		if (csum)
2290 			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2291 
2292 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
2293 			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
2294 				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
2295 		}
2296 		/* Individual feature bits: what can host handle? */
2297 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
2298 			dev->hw_features |= NETIF_F_TSO;
2299 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
2300 			dev->hw_features |= NETIF_F_TSO6;
2301 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
2302 			dev->hw_features |= NETIF_F_TSO_ECN;
2303 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
2304 			dev->hw_features |= NETIF_F_UFO;
2305 
2306 		dev->features |= NETIF_F_GSO_ROBUST;
2307 
2308 		if (gso)
2309 			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
2310 		/* (!csum && gso) case will be fixed by register_netdev() */
2311 	}
2312 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
2313 		dev->features |= NETIF_F_RXCSUM;
2314 
2315 	dev->vlan_features = dev->features;
2316 
2317 	/* MTU range: 68 - 65535 */
2318 	dev->min_mtu = MIN_MTU;
2319 	dev->max_mtu = MAX_MTU;
2320 
2321 	/* Configuration may specify what MAC to use.  Otherwise random. */
2322 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
2323 		virtio_cread_bytes(vdev,
2324 				   offsetof(struct virtio_net_config, mac),
2325 				   dev->dev_addr, dev->addr_len);
2326 	else
2327 		eth_hw_addr_random(dev);
2328 
2329 	/* Set up our device-specific information */
2330 	vi = netdev_priv(dev);
2331 	vi->dev = dev;
2332 	vi->vdev = vdev;
2333 	vdev->priv = vi;
2334 	vi->stats = alloc_percpu(struct virtnet_stats);
2335 	err = -ENOMEM;
2336 	if (vi->stats == NULL)
2337 		goto free;
2338 
2339 	for_each_possible_cpu(i) {
2340 		struct virtnet_stats *virtnet_stats;
2341 		virtnet_stats = per_cpu_ptr(vi->stats, i);
2342 		u64_stats_init(&virtnet_stats->tx_syncp);
2343 		u64_stats_init(&virtnet_stats->rx_syncp);
2344 	}
2345 
2346 	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
2347 
2348 	/* If we can receive ANY GSO packets, we must allocate large ones. */
2349 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2350 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2351 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
2352 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
2353 		vi->big_packets = true;
2354 
2355 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
2356 		vi->mergeable_rx_bufs = true;
2357 
2358 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
2359 	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2360 		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2361 	else
2362 		vi->hdr_len = sizeof(struct virtio_net_hdr);
2363 
2364 	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
2365 	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2366 		vi->any_header_sg = true;
2367 
2368 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2369 		vi->has_cvq = true;
2370 
2371 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2372 		mtu = virtio_cread16(vdev,
2373 				     offsetof(struct virtio_net_config,
2374 					      mtu));
2375 		if (mtu < dev->min_mtu) {
2376 			/* Should never trigger: MTU was previously validated
2377 			 * in virtnet_validate.
2378 			 */
2379 			dev_err(&vdev->dev, "device MTU appears to have changed "
2380 				"it is now %d < %d", mtu, dev->min_mtu);
2381 			goto free_stats;
2382 		}
2383 
2384 		dev->mtu = mtu;
2385 		dev->max_mtu = mtu;
2386 
2387 		/* TODO: size buffers correctly in this case. */
2388 		if (dev->mtu > ETH_DATA_LEN)
2389 			vi->big_packets = true;
2390 	}
2391 
2392 	if (vi->any_header_sg)
2393 		dev->needed_headroom = vi->hdr_len;
2394 
2395 	/* Enable multiqueue by default */
2396 	if (num_online_cpus() >= max_queue_pairs)
2397 		vi->curr_queue_pairs = max_queue_pairs;
2398 	else
2399 		vi->curr_queue_pairs = num_online_cpus();
2400 	vi->max_queue_pairs = max_queue_pairs;
2401 
2402 	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
2403 	err = init_vqs(vi);
2404 	if (err)
2405 		goto free_stats;
2406 
2407 #ifdef CONFIG_SYSFS
2408 	if (vi->mergeable_rx_bufs)
2409 		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
2410 #endif
2411 	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
2412 	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
2413 
2414 	virtnet_init_settings(dev);
2415 
2416 	err = register_netdev(dev);
2417 	if (err) {
2418 		pr_debug("virtio_net: registering device failed\n");
2419 		goto free_vqs;
2420 	}
2421 
2422 	virtio_device_ready(vdev);
2423 
2424 	err = virtnet_cpu_notif_add(vi);
2425 	if (err) {
2426 		pr_debug("virtio_net: registering cpu notifier failed\n");
2427 		goto free_unregister_netdev;
2428 	}
2429 
2430 	virtnet_set_queues(vi, vi->curr_queue_pairs);
2431 
2432 	/* Assume link up if device can't report link status,
2433 	   otherwise get link status from config. */
2434 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
2435 		netif_carrier_off(dev);
2436 		schedule_work(&vi->config_work);
2437 	} else {
2438 		vi->status = VIRTIO_NET_S_LINK_UP;
2439 		netif_carrier_on(dev);
2440 	}
2441 
2442 	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
2443 		 dev->name, max_queue_pairs);
2444 
2445 	return 0;
2446 
2447 free_unregister_netdev:
2448 	vi->vdev->config->reset(vdev);
2449 
2450 	unregister_netdev(dev);
2451 free_vqs:
2452 	cancel_delayed_work_sync(&vi->refill);
2453 	free_receive_page_frags(vi);
2454 	virtnet_del_vqs(vi);
2455 free_stats:
2456 	free_percpu(vi->stats);
2457 free:
2458 	free_netdev(dev);
2459 	return err;
2460 }
2461 
2462 static void _remove_vq_common(struct virtnet_info *vi)
2463 {
2464 	vi->vdev->config->reset(vi->vdev);
2465 	free_unused_bufs(vi);
2466 	_free_receive_bufs(vi);
2467 	free_receive_page_frags(vi);
2468 	virtnet_del_vqs(vi);
2469 }
2470 
2471 static void remove_vq_common(struct virtnet_info *vi)
2472 {
2473 	vi->vdev->config->reset(vi->vdev);
2474 
2475 	/* Free unused buffers in both send and recv, if any. */
2476 	free_unused_bufs(vi);
2477 
2478 	free_receive_bufs(vi);
2479 
2480 	free_receive_page_frags(vi);
2481 
2482 	virtnet_del_vqs(vi);
2483 }
2484 
2485 static void virtnet_remove(struct virtio_device *vdev)
2486 {
2487 	struct virtnet_info *vi = vdev->priv;
2488 
2489 	virtnet_cpu_notif_remove(vi);
2490 
2491 	/* Make sure no work handler is accessing the device. */
2492 	flush_work(&vi->config_work);
2493 
2494 	unregister_netdev(vi->dev);
2495 
2496 	remove_vq_common(vi);
2497 
2498 	free_percpu(vi->stats);
2499 	free_netdev(vi->dev);
2500 }
2501 
2502 #ifdef CONFIG_PM_SLEEP
2503 static int virtnet_freeze(struct virtio_device *vdev)
2504 {
2505 	struct virtnet_info *vi = vdev->priv;
2506 
2507 	virtnet_cpu_notif_remove(vi);
2508 	virtnet_freeze_down(vdev);
2509 	remove_vq_common(vi);
2510 
2511 	return 0;
2512 }
2513 
2514 static int virtnet_restore(struct virtio_device *vdev)
2515 {
2516 	struct virtnet_info *vi = vdev->priv;
2517 	int err;
2518 
2519 	err = virtnet_restore_up(vdev);
2520 	if (err)
2521 		return err;
2522 	virtnet_set_queues(vi, vi->curr_queue_pairs);
2523 
2524 	err = virtnet_cpu_notif_add(vi);
2525 	if (err)
2526 		return err;
2527 
2528 	return 0;
2529 }
2530 #endif
2531 
2532 static struct virtio_device_id id_table[] = {
2533 	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
2534 	{ 0 },
2535 };
2536 
2537 #define VIRTNET_FEATURES \
2538 	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
2539 	VIRTIO_NET_F_MAC, \
2540 	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
2541 	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
2542 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
2543 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
2544 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
2545 	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
2546 	VIRTIO_NET_F_CTRL_MAC_ADDR, \
2547 	VIRTIO_NET_F_MTU
2548 
2549 static unsigned int features[] = {
2550 	VIRTNET_FEATURES,
2551 };
2552 
2553 static unsigned int features_legacy[] = {
2554 	VIRTNET_FEATURES,
2555 	VIRTIO_NET_F_GSO,
2556 	VIRTIO_F_ANY_LAYOUT,
2557 };
2558 
2559 static struct virtio_driver virtio_net_driver = {
2560 	.feature_table = features,
2561 	.feature_table_size = ARRAY_SIZE(features),
2562 	.feature_table_legacy = features_legacy,
2563 	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
2564 	.driver.name =	KBUILD_MODNAME,
2565 	.driver.owner =	THIS_MODULE,
2566 	.id_table =	id_table,
2567 	.validate =	virtnet_validate,
2568 	.probe =	virtnet_probe,
2569 	.remove =	virtnet_remove,
2570 	.config_changed = virtnet_config_changed,
2571 #ifdef CONFIG_PM_SLEEP
2572 	.freeze =	virtnet_freeze,
2573 	.restore =	virtnet_restore,
2574 #endif
2575 };
2576 
2577 static __init int virtio_net_driver_init(void)
2578 {
2579 	int ret;
2580 
2581 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
2582 				      virtnet_cpu_online,
2583 				      virtnet_cpu_down_prep);
2584 	if (ret < 0)
2585 		goto out;
2586 	virtionet_online = ret;
2587 	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
2588 				      NULL, virtnet_cpu_dead);
2589 	if (ret)
2590 		goto err_dead;
2591 
2592         ret = register_virtio_driver(&virtio_net_driver);
2593 	if (ret)
2594 		goto err_virtio;
2595 	return 0;
2596 err_virtio:
2597 	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2598 err_dead:
2599 	cpuhp_remove_multi_state(virtionet_online);
2600 out:
2601 	return ret;
2602 }
2603 module_init(virtio_net_driver_init);
2604 
2605 static __exit void virtio_net_driver_exit(void)
2606 {
2607 	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2608 	cpuhp_remove_multi_state(virtionet_online);
2609 	unregister_virtio_driver(&virtio_net_driver);
2610 }
2611 module_exit(virtio_net_driver_exit);
2612 
2613 MODULE_DEVICE_TABLE(virtio, id_table);
2614 MODULE_DESCRIPTION("Virtio network driver");
2615 MODULE_LICENSE("GPL");
2616