xref: /openbmc/linux/drivers/net/virtio_net.c (revision 2ffa75988fff39741e60141ce4a349e2419b41e6)
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 //#define DEBUG
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/module.h>
23 #include <linux/virtio.h>
24 #include <linux/virtio_net.h>
25 #include <linux/scatterlist.h>
26 #include <linux/if_vlan.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/average.h>
30 
31 static int napi_weight = NAPI_POLL_WEIGHT;
32 module_param(napi_weight, int, 0444);
33 
34 static bool csum = true, gso = true;
35 module_param(csum, bool, 0444);
36 module_param(gso, bool, 0444);
37 
38 /* FIXME: MTU in config. */
39 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
40 #define GOOD_COPY_LEN	128
41 
42 /* Weight used for the RX packet size EWMA. The average packet size is used to
43  * determine the packet buffer size when refilling RX rings. As the entire RX
44  * ring may be refilled at once, the weight is chosen so that the EWMA will be
45  * insensitive to short-term, transient changes in packet size.
46  */
47 #define RECEIVE_AVG_WEIGHT 64
48 
49 /* Minimum alignment for mergeable packet buffers. */
50 #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
51 
52 #define VIRTNET_DRIVER_VERSION "1.0.0"
53 
54 struct virtnet_stats {
55 	struct u64_stats_sync tx_syncp;
56 	struct u64_stats_sync rx_syncp;
57 	u64 tx_bytes;
58 	u64 tx_packets;
59 
60 	u64 rx_bytes;
61 	u64 rx_packets;
62 };
63 
64 /* Internal representation of a send virtqueue */
65 struct send_queue {
66 	/* Virtqueue associated with this send _queue */
67 	struct virtqueue *vq;
68 
69 	/* TX: fragments + linear part + virtio header */
70 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
71 
72 	/* Name of the send queue: output.$index */
73 	char name[40];
74 };
75 
76 /* Internal representation of a receive virtqueue */
77 struct receive_queue {
78 	/* Virtqueue associated with this receive_queue */
79 	struct virtqueue *vq;
80 
81 	struct napi_struct napi;
82 
83 	/* Chain pages by the private ptr. */
84 	struct page *pages;
85 
86 	/* Average packet length for mergeable receive buffers. */
87 	struct ewma mrg_avg_pkt_len;
88 
89 	/* Page frag for packet buffer allocation. */
90 	struct page_frag alloc_frag;
91 
92 	/* RX: fragments + linear part + virtio header */
93 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
94 
95 	/* Name of this receive queue: input.$index */
96 	char name[40];
97 };
98 
99 struct virtnet_info {
100 	struct virtio_device *vdev;
101 	struct virtqueue *cvq;
102 	struct net_device *dev;
103 	struct send_queue *sq;
104 	struct receive_queue *rq;
105 	unsigned int status;
106 
107 	/* Max # of queue pairs supported by the device */
108 	u16 max_queue_pairs;
109 
110 	/* # of queue pairs currently used by the driver */
111 	u16 curr_queue_pairs;
112 
113 	/* I like... big packets and I cannot lie! */
114 	bool big_packets;
115 
116 	/* Host will merge rx buffers for big packets (shake it! shake it!) */
117 	bool mergeable_rx_bufs;
118 
119 	/* Has control virtqueue */
120 	bool has_cvq;
121 
122 	/* Host can handle any s/g split between our header and packet data */
123 	bool any_header_sg;
124 
125 	/* enable config space updates */
126 	bool config_enable;
127 
128 	/* Active statistics */
129 	struct virtnet_stats __percpu *stats;
130 
131 	/* Work struct for refilling if we run low on memory. */
132 	struct delayed_work refill;
133 
134 	/* Work struct for config space updates */
135 	struct work_struct config_work;
136 
137 	/* Lock for config space updates */
138 	struct mutex config_lock;
139 
140 	/* Does the affinity hint is set for virtqueues? */
141 	bool affinity_hint_set;
142 
143 	/* CPU hot plug notifier */
144 	struct notifier_block nb;
145 };
146 
147 struct skb_vnet_hdr {
148 	union {
149 		struct virtio_net_hdr hdr;
150 		struct virtio_net_hdr_mrg_rxbuf mhdr;
151 	};
152 };
153 
154 struct padded_vnet_hdr {
155 	struct virtio_net_hdr hdr;
156 	/*
157 	 * virtio_net_hdr should be in a separated sg buffer because of a
158 	 * QEMU bug, and data sg buffer shares same page with this header sg.
159 	 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
160 	 */
161 	char padding[6];
162 };
163 
164 /* Converting between virtqueue no. and kernel tx/rx queue no.
165  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
166  */
167 static int vq2txq(struct virtqueue *vq)
168 {
169 	return (vq->index - 1) / 2;
170 }
171 
172 static int txq2vq(int txq)
173 {
174 	return txq * 2 + 1;
175 }
176 
177 static int vq2rxq(struct virtqueue *vq)
178 {
179 	return vq->index / 2;
180 }
181 
182 static int rxq2vq(int rxq)
183 {
184 	return rxq * 2;
185 }
186 
187 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
188 {
189 	return (struct skb_vnet_hdr *)skb->cb;
190 }
191 
192 /*
193  * private is used to chain pages for big packets, put the whole
194  * most recent used list in the beginning for reuse
195  */
196 static void give_pages(struct receive_queue *rq, struct page *page)
197 {
198 	struct page *end;
199 
200 	/* Find end of list, sew whole thing into vi->rq.pages. */
201 	for (end = page; end->private; end = (struct page *)end->private);
202 	end->private = (unsigned long)rq->pages;
203 	rq->pages = page;
204 }
205 
206 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
207 {
208 	struct page *p = rq->pages;
209 
210 	if (p) {
211 		rq->pages = (struct page *)p->private;
212 		/* clear private here, it is used to chain pages */
213 		p->private = 0;
214 	} else
215 		p = alloc_page(gfp_mask);
216 	return p;
217 }
218 
219 static void skb_xmit_done(struct virtqueue *vq)
220 {
221 	struct virtnet_info *vi = vq->vdev->priv;
222 
223 	/* Suppress further interrupts. */
224 	virtqueue_disable_cb(vq);
225 
226 	/* We were probably waiting for more output buffers. */
227 	netif_wake_subqueue(vi->dev, vq2txq(vq));
228 }
229 
230 static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
231 {
232 	unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1);
233 	return (truesize + 1) * MERGEABLE_BUFFER_ALIGN;
234 }
235 
236 static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx)
237 {
238 	return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN);
239 
240 }
241 
242 static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
243 {
244 	unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN;
245 	return (unsigned long)buf | (size - 1);
246 }
247 
248 /* Called from bottom half context */
249 static struct sk_buff *page_to_skb(struct receive_queue *rq,
250 				   struct page *page, unsigned int offset,
251 				   unsigned int len, unsigned int truesize)
252 {
253 	struct virtnet_info *vi = rq->vq->vdev->priv;
254 	struct sk_buff *skb;
255 	struct skb_vnet_hdr *hdr;
256 	unsigned int copy, hdr_len, hdr_padded_len;
257 	char *p;
258 
259 	p = page_address(page) + offset;
260 
261 	/* copy small packet so we can reuse these pages for small data */
262 	skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
263 	if (unlikely(!skb))
264 		return NULL;
265 
266 	hdr = skb_vnet_hdr(skb);
267 
268 	if (vi->mergeable_rx_bufs) {
269 		hdr_len = sizeof hdr->mhdr;
270 		hdr_padded_len = sizeof hdr->mhdr;
271 	} else {
272 		hdr_len = sizeof hdr->hdr;
273 		hdr_padded_len = sizeof(struct padded_vnet_hdr);
274 	}
275 
276 	memcpy(hdr, p, hdr_len);
277 
278 	len -= hdr_len;
279 	offset += hdr_padded_len;
280 	p += hdr_padded_len;
281 
282 	copy = len;
283 	if (copy > skb_tailroom(skb))
284 		copy = skb_tailroom(skb);
285 	memcpy(skb_put(skb, copy), p, copy);
286 
287 	len -= copy;
288 	offset += copy;
289 
290 	if (vi->mergeable_rx_bufs) {
291 		if (len)
292 			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
293 		else
294 			put_page(page);
295 		return skb;
296 	}
297 
298 	/*
299 	 * Verify that we can indeed put this data into a skb.
300 	 * This is here to handle cases when the device erroneously
301 	 * tries to receive more than is possible. This is usually
302 	 * the case of a broken device.
303 	 */
304 	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
305 		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
306 		dev_kfree_skb(skb);
307 		return NULL;
308 	}
309 	BUG_ON(offset >= PAGE_SIZE);
310 	while (len) {
311 		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
312 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
313 				frag_size, truesize);
314 		len -= frag_size;
315 		page = (struct page *)page->private;
316 		offset = 0;
317 	}
318 
319 	if (page)
320 		give_pages(rq, page);
321 
322 	return skb;
323 }
324 
325 static struct sk_buff *receive_small(void *buf, unsigned int len)
326 {
327 	struct sk_buff * skb = buf;
328 
329 	len -= sizeof(struct virtio_net_hdr);
330 	skb_trim(skb, len);
331 
332 	return skb;
333 }
334 
335 static struct sk_buff *receive_big(struct net_device *dev,
336 				   struct receive_queue *rq,
337 				   void *buf,
338 				   unsigned int len)
339 {
340 	struct page *page = buf;
341 	struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
342 
343 	if (unlikely(!skb))
344 		goto err;
345 
346 	return skb;
347 
348 err:
349 	dev->stats.rx_dropped++;
350 	give_pages(rq, page);
351 	return NULL;
352 }
353 
354 static struct sk_buff *receive_mergeable(struct net_device *dev,
355 					 struct receive_queue *rq,
356 					 unsigned long ctx,
357 					 unsigned int len)
358 {
359 	void *buf = mergeable_ctx_to_buf_address(ctx);
360 	struct skb_vnet_hdr *hdr = buf;
361 	int num_buf = hdr->mhdr.num_buffers;
362 	struct page *page = virt_to_head_page(buf);
363 	int offset = buf - page_address(page);
364 	unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
365 
366 	struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize);
367 	struct sk_buff *curr_skb = head_skb;
368 
369 	if (unlikely(!curr_skb))
370 		goto err_skb;
371 	while (--num_buf) {
372 		int num_skb_frags;
373 
374 		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
375 		if (unlikely(!ctx)) {
376 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
377 				 dev->name, num_buf, hdr->mhdr.num_buffers);
378 			dev->stats.rx_length_errors++;
379 			goto err_buf;
380 		}
381 
382 		buf = mergeable_ctx_to_buf_address(ctx);
383 		page = virt_to_head_page(buf);
384 
385 		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
386 		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
387 			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
388 
389 			if (unlikely(!nskb))
390 				goto err_skb;
391 			if (curr_skb == head_skb)
392 				skb_shinfo(curr_skb)->frag_list = nskb;
393 			else
394 				curr_skb->next = nskb;
395 			curr_skb = nskb;
396 			head_skb->truesize += nskb->truesize;
397 			num_skb_frags = 0;
398 		}
399 		truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
400 		if (curr_skb != head_skb) {
401 			head_skb->data_len += len;
402 			head_skb->len += len;
403 			head_skb->truesize += truesize;
404 		}
405 		offset = buf - page_address(page);
406 		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
407 			put_page(page);
408 			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
409 					     len, truesize);
410 		} else {
411 			skb_add_rx_frag(curr_skb, num_skb_frags, page,
412 					offset, len, truesize);
413 		}
414 	}
415 
416 	ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
417 	return head_skb;
418 
419 err_skb:
420 	put_page(page);
421 	while (--num_buf) {
422 		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
423 		if (unlikely(!ctx)) {
424 			pr_debug("%s: rx error: %d buffers missing\n",
425 				 dev->name, num_buf);
426 			dev->stats.rx_length_errors++;
427 			break;
428 		}
429 		page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx));
430 		put_page(page);
431 	}
432 err_buf:
433 	dev->stats.rx_dropped++;
434 	dev_kfree_skb(head_skb);
435 	return NULL;
436 }
437 
438 static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
439 {
440 	struct virtnet_info *vi = rq->vq->vdev->priv;
441 	struct net_device *dev = vi->dev;
442 	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
443 	struct sk_buff *skb;
444 	struct skb_vnet_hdr *hdr;
445 
446 	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
447 		pr_debug("%s: short packet %i\n", dev->name, len);
448 		dev->stats.rx_length_errors++;
449 		if (vi->mergeable_rx_bufs) {
450 			unsigned long ctx = (unsigned long)buf;
451 			void *base = mergeable_ctx_to_buf_address(ctx);
452 			put_page(virt_to_head_page(base));
453 		} else if (vi->big_packets) {
454 			give_pages(rq, buf);
455 		} else {
456 			dev_kfree_skb(buf);
457 		}
458 		return;
459 	}
460 
461 	if (vi->mergeable_rx_bufs)
462 		skb = receive_mergeable(dev, rq, (unsigned long)buf, len);
463 	else if (vi->big_packets)
464 		skb = receive_big(dev, rq, buf, len);
465 	else
466 		skb = receive_small(buf, len);
467 
468 	if (unlikely(!skb))
469 		return;
470 
471 	hdr = skb_vnet_hdr(skb);
472 
473 	u64_stats_update_begin(&stats->rx_syncp);
474 	stats->rx_bytes += skb->len;
475 	stats->rx_packets++;
476 	u64_stats_update_end(&stats->rx_syncp);
477 
478 	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
479 		pr_debug("Needs csum!\n");
480 		if (!skb_partial_csum_set(skb,
481 					  hdr->hdr.csum_start,
482 					  hdr->hdr.csum_offset))
483 			goto frame_err;
484 	} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
485 		skb->ip_summed = CHECKSUM_UNNECESSARY;
486 	}
487 
488 	skb->protocol = eth_type_trans(skb, dev);
489 	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
490 		 ntohs(skb->protocol), skb->len, skb->pkt_type);
491 
492 	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
493 		pr_debug("GSO!\n");
494 		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
495 		case VIRTIO_NET_HDR_GSO_TCPV4:
496 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
497 			break;
498 		case VIRTIO_NET_HDR_GSO_UDP:
499 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
500 			break;
501 		case VIRTIO_NET_HDR_GSO_TCPV6:
502 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
503 			break;
504 		default:
505 			net_warn_ratelimited("%s: bad gso type %u.\n",
506 					     dev->name, hdr->hdr.gso_type);
507 			goto frame_err;
508 		}
509 
510 		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
511 			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
512 
513 		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
514 		if (skb_shinfo(skb)->gso_size == 0) {
515 			net_warn_ratelimited("%s: zero gso size.\n", dev->name);
516 			goto frame_err;
517 		}
518 
519 		/* Header must be checked, and gso_segs computed. */
520 		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
521 		skb_shinfo(skb)->gso_segs = 0;
522 	}
523 
524 	netif_receive_skb(skb);
525 	return;
526 
527 frame_err:
528 	dev->stats.rx_frame_errors++;
529 	dev_kfree_skb(skb);
530 }
531 
532 static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
533 {
534 	struct virtnet_info *vi = rq->vq->vdev->priv;
535 	struct sk_buff *skb;
536 	struct skb_vnet_hdr *hdr;
537 	int err;
538 
539 	skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
540 	if (unlikely(!skb))
541 		return -ENOMEM;
542 
543 	skb_put(skb, GOOD_PACKET_LEN);
544 
545 	hdr = skb_vnet_hdr(skb);
546 	sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
547 
548 	skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
549 
550 	err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
551 	if (err < 0)
552 		dev_kfree_skb(skb);
553 
554 	return err;
555 }
556 
557 static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
558 {
559 	struct page *first, *list = NULL;
560 	char *p;
561 	int i, err, offset;
562 
563 	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
564 	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
565 		first = get_a_page(rq, gfp);
566 		if (!first) {
567 			if (list)
568 				give_pages(rq, list);
569 			return -ENOMEM;
570 		}
571 		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
572 
573 		/* chain new page in list head to match sg */
574 		first->private = (unsigned long)list;
575 		list = first;
576 	}
577 
578 	first = get_a_page(rq, gfp);
579 	if (!first) {
580 		give_pages(rq, list);
581 		return -ENOMEM;
582 	}
583 	p = page_address(first);
584 
585 	/* rq->sg[0], rq->sg[1] share the same page */
586 	/* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
587 	sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
588 
589 	/* rq->sg[1] for data packet, from offset */
590 	offset = sizeof(struct padded_vnet_hdr);
591 	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
592 
593 	/* chain first in list head */
594 	first->private = (unsigned long)list;
595 	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
596 				  first, gfp);
597 	if (err < 0)
598 		give_pages(rq, first);
599 
600 	return err;
601 }
602 
603 static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
604 {
605 	const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
606 	unsigned int len;
607 
608 	len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len),
609 			GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
610 	return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
611 }
612 
613 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
614 {
615 	struct page_frag *alloc_frag = &rq->alloc_frag;
616 	char *buf;
617 	unsigned long ctx;
618 	int err;
619 	unsigned int len, hole;
620 
621 	len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
622 	if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
623 		return -ENOMEM;
624 
625 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
626 	ctx = mergeable_buf_to_ctx(buf, len);
627 	get_page(alloc_frag->page);
628 	alloc_frag->offset += len;
629 	hole = alloc_frag->size - alloc_frag->offset;
630 	if (hole < len) {
631 		/* To avoid internal fragmentation, if there is very likely not
632 		 * enough space for another buffer, add the remaining space to
633 		 * the current buffer. This extra space is not included in
634 		 * the truesize stored in ctx.
635 		 */
636 		len += hole;
637 		alloc_frag->offset += hole;
638 	}
639 
640 	sg_init_one(rq->sg, buf, len);
641 	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp);
642 	if (err < 0)
643 		put_page(virt_to_head_page(buf));
644 
645 	return err;
646 }
647 
648 /*
649  * Returns false if we couldn't fill entirely (OOM).
650  *
651  * Normally run in the receive path, but can also be run from ndo_open
652  * before we're receiving packets, or from refill_work which is
653  * careful to disable receiving (using napi_disable).
654  */
655 static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
656 {
657 	struct virtnet_info *vi = rq->vq->vdev->priv;
658 	int err;
659 	bool oom;
660 
661 	gfp |= __GFP_COLD;
662 	do {
663 		if (vi->mergeable_rx_bufs)
664 			err = add_recvbuf_mergeable(rq, gfp);
665 		else if (vi->big_packets)
666 			err = add_recvbuf_big(rq, gfp);
667 		else
668 			err = add_recvbuf_small(rq, gfp);
669 
670 		oom = err == -ENOMEM;
671 		if (err)
672 			break;
673 	} while (rq->vq->num_free);
674 	virtqueue_kick(rq->vq);
675 	return !oom;
676 }
677 
678 static void skb_recv_done(struct virtqueue *rvq)
679 {
680 	struct virtnet_info *vi = rvq->vdev->priv;
681 	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
682 
683 	/* Schedule NAPI, Suppress further interrupts if successful. */
684 	if (napi_schedule_prep(&rq->napi)) {
685 		virtqueue_disable_cb(rvq);
686 		__napi_schedule(&rq->napi);
687 	}
688 }
689 
690 static void virtnet_napi_enable(struct receive_queue *rq)
691 {
692 	napi_enable(&rq->napi);
693 
694 	/* If all buffers were filled by other side before we napi_enabled, we
695 	 * won't get another interrupt, so process any outstanding packets
696 	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
697 	 * We synchronize against interrupts via NAPI_STATE_SCHED */
698 	if (napi_schedule_prep(&rq->napi)) {
699 		virtqueue_disable_cb(rq->vq);
700 		local_bh_disable();
701 		__napi_schedule(&rq->napi);
702 		local_bh_enable();
703 	}
704 }
705 
706 static void refill_work(struct work_struct *work)
707 {
708 	struct virtnet_info *vi =
709 		container_of(work, struct virtnet_info, refill.work);
710 	bool still_empty;
711 	int i;
712 
713 	for (i = 0; i < vi->curr_queue_pairs; i++) {
714 		struct receive_queue *rq = &vi->rq[i];
715 
716 		napi_disable(&rq->napi);
717 		still_empty = !try_fill_recv(rq, GFP_KERNEL);
718 		virtnet_napi_enable(rq);
719 
720 		/* In theory, this can happen: if we don't get any buffers in
721 		 * we will *never* try to fill again.
722 		 */
723 		if (still_empty)
724 			schedule_delayed_work(&vi->refill, HZ/2);
725 	}
726 }
727 
728 static int virtnet_receive(struct receive_queue *rq, int budget)
729 {
730 	struct virtnet_info *vi = rq->vq->vdev->priv;
731 	unsigned int len, received = 0;
732 	void *buf;
733 
734 	while (received < budget &&
735 	       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
736 		receive_buf(rq, buf, len);
737 		received++;
738 	}
739 
740 	if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
741 		if (!try_fill_recv(rq, GFP_ATOMIC))
742 			schedule_delayed_work(&vi->refill, 0);
743 	}
744 
745 	return received;
746 }
747 
748 static int virtnet_poll(struct napi_struct *napi, int budget)
749 {
750 	struct receive_queue *rq =
751 		container_of(napi, struct receive_queue, napi);
752 	unsigned int r, received = 0;
753 
754 again:
755 	received += virtnet_receive(rq, budget - received);
756 
757 	/* Out of packets? */
758 	if (received < budget) {
759 		r = virtqueue_enable_cb_prepare(rq->vq);
760 		napi_complete(napi);
761 		if (unlikely(virtqueue_poll(rq->vq, r)) &&
762 		    napi_schedule_prep(napi)) {
763 			virtqueue_disable_cb(rq->vq);
764 			__napi_schedule(napi);
765 			goto again;
766 		}
767 	}
768 
769 	return received;
770 }
771 
772 static int virtnet_open(struct net_device *dev)
773 {
774 	struct virtnet_info *vi = netdev_priv(dev);
775 	int i;
776 
777 	for (i = 0; i < vi->max_queue_pairs; i++) {
778 		if (i < vi->curr_queue_pairs)
779 			/* Make sure we have some buffers: if oom use wq. */
780 			if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
781 				schedule_delayed_work(&vi->refill, 0);
782 		virtnet_napi_enable(&vi->rq[i]);
783 	}
784 
785 	return 0;
786 }
787 
788 static void free_old_xmit_skbs(struct send_queue *sq)
789 {
790 	struct sk_buff *skb;
791 	unsigned int len;
792 	struct virtnet_info *vi = sq->vq->vdev->priv;
793 	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
794 
795 	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
796 		pr_debug("Sent skb %p\n", skb);
797 
798 		u64_stats_update_begin(&stats->tx_syncp);
799 		stats->tx_bytes += skb->len;
800 		stats->tx_packets++;
801 		u64_stats_update_end(&stats->tx_syncp);
802 
803 		dev_kfree_skb_any(skb);
804 	}
805 }
806 
807 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
808 {
809 	struct skb_vnet_hdr *hdr;
810 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
811 	struct virtnet_info *vi = sq->vq->vdev->priv;
812 	unsigned num_sg;
813 	unsigned hdr_len;
814 	bool can_push;
815 
816 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
817 	if (vi->mergeable_rx_bufs)
818 		hdr_len = sizeof hdr->mhdr;
819 	else
820 		hdr_len = sizeof hdr->hdr;
821 
822 	can_push = vi->any_header_sg &&
823 		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
824 		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
825 	/* Even if we can, don't push here yet as this would skew
826 	 * csum_start offset below. */
827 	if (can_push)
828 		hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
829 	else
830 		hdr = skb_vnet_hdr(skb);
831 
832 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
833 		hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
834 		hdr->hdr.csum_start = skb_checksum_start_offset(skb);
835 		hdr->hdr.csum_offset = skb->csum_offset;
836 	} else {
837 		hdr->hdr.flags = 0;
838 		hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
839 	}
840 
841 	if (skb_is_gso(skb)) {
842 		hdr->hdr.hdr_len = skb_headlen(skb);
843 		hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
844 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
845 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
846 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
847 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
848 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
849 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
850 		else
851 			BUG();
852 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
853 			hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
854 	} else {
855 		hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
856 		hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
857 	}
858 
859 	if (vi->mergeable_rx_bufs)
860 		hdr->mhdr.num_buffers = 0;
861 
862 	if (can_push) {
863 		__skb_push(skb, hdr_len);
864 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
865 		/* Pull header back to avoid skew in tx bytes calculations. */
866 		__skb_pull(skb, hdr_len);
867 	} else {
868 		sg_set_buf(sq->sg, hdr, hdr_len);
869 		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
870 	}
871 	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
872 }
873 
874 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
875 {
876 	struct virtnet_info *vi = netdev_priv(dev);
877 	int qnum = skb_get_queue_mapping(skb);
878 	struct send_queue *sq = &vi->sq[qnum];
879 	int err;
880 
881 	/* Free up any pending old buffers before queueing new ones. */
882 	free_old_xmit_skbs(sq);
883 
884 	/* Try to transmit */
885 	err = xmit_skb(sq, skb);
886 
887 	/* This should not happen! */
888 	if (unlikely(err)) {
889 		dev->stats.tx_fifo_errors++;
890 		if (net_ratelimit())
891 			dev_warn(&dev->dev,
892 				 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
893 		dev->stats.tx_dropped++;
894 		dev_kfree_skb_any(skb);
895 		return NETDEV_TX_OK;
896 	}
897 	virtqueue_kick(sq->vq);
898 
899 	/* Don't wait up for transmitted skbs to be freed. */
900 	skb_orphan(skb);
901 	nf_reset(skb);
902 
903 	/* Apparently nice girls don't return TX_BUSY; stop the queue
904 	 * before it gets out of hand.  Naturally, this wastes entries. */
905 	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
906 		netif_stop_subqueue(dev, qnum);
907 		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
908 			/* More just got used, free them then recheck. */
909 			free_old_xmit_skbs(sq);
910 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
911 				netif_start_subqueue(dev, qnum);
912 				virtqueue_disable_cb(sq->vq);
913 			}
914 		}
915 	}
916 
917 	return NETDEV_TX_OK;
918 }
919 
920 /*
921  * Send command via the control virtqueue and check status.  Commands
922  * supported by the hypervisor, as indicated by feature bits, should
923  * never fail unless improperly formatted.
924  */
925 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
926 				 struct scatterlist *out)
927 {
928 	struct scatterlist *sgs[4], hdr, stat;
929 	struct virtio_net_ctrl_hdr ctrl;
930 	virtio_net_ctrl_ack status = ~0;
931 	unsigned out_num = 0, tmp;
932 
933 	/* Caller should know better */
934 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
935 
936 	ctrl.class = class;
937 	ctrl.cmd = cmd;
938 	/* Add header */
939 	sg_init_one(&hdr, &ctrl, sizeof(ctrl));
940 	sgs[out_num++] = &hdr;
941 
942 	if (out)
943 		sgs[out_num++] = out;
944 
945 	/* Add return status. */
946 	sg_init_one(&stat, &status, sizeof(status));
947 	sgs[out_num] = &stat;
948 
949 	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
950 	virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
951 
952 	if (unlikely(!virtqueue_kick(vi->cvq)))
953 		return status == VIRTIO_NET_OK;
954 
955 	/* Spin for a response, the kick causes an ioport write, trapping
956 	 * into the hypervisor, so the request should be handled immediately.
957 	 */
958 	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
959 	       !virtqueue_is_broken(vi->cvq))
960 		cpu_relax();
961 
962 	return status == VIRTIO_NET_OK;
963 }
964 
965 static int virtnet_set_mac_address(struct net_device *dev, void *p)
966 {
967 	struct virtnet_info *vi = netdev_priv(dev);
968 	struct virtio_device *vdev = vi->vdev;
969 	int ret;
970 	struct sockaddr *addr = p;
971 	struct scatterlist sg;
972 
973 	ret = eth_prepare_mac_addr_change(dev, p);
974 	if (ret)
975 		return ret;
976 
977 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
978 		sg_init_one(&sg, addr->sa_data, dev->addr_len);
979 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
980 					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
981 			dev_warn(&vdev->dev,
982 				 "Failed to set mac address by vq command.\n");
983 			return -EINVAL;
984 		}
985 	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
986 		unsigned int i;
987 
988 		/* Naturally, this has an atomicity problem. */
989 		for (i = 0; i < dev->addr_len; i++)
990 			virtio_cwrite8(vdev,
991 				       offsetof(struct virtio_net_config, mac) +
992 				       i, addr->sa_data[i]);
993 	}
994 
995 	eth_commit_mac_addr_change(dev, p);
996 
997 	return 0;
998 }
999 
1000 static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
1001 					       struct rtnl_link_stats64 *tot)
1002 {
1003 	struct virtnet_info *vi = netdev_priv(dev);
1004 	int cpu;
1005 	unsigned int start;
1006 
1007 	for_each_possible_cpu(cpu) {
1008 		struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
1009 		u64 tpackets, tbytes, rpackets, rbytes;
1010 
1011 		do {
1012 			start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
1013 			tpackets = stats->tx_packets;
1014 			tbytes   = stats->tx_bytes;
1015 		} while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
1016 
1017 		do {
1018 			start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
1019 			rpackets = stats->rx_packets;
1020 			rbytes   = stats->rx_bytes;
1021 		} while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
1022 
1023 		tot->rx_packets += rpackets;
1024 		tot->tx_packets += tpackets;
1025 		tot->rx_bytes   += rbytes;
1026 		tot->tx_bytes   += tbytes;
1027 	}
1028 
1029 	tot->tx_dropped = dev->stats.tx_dropped;
1030 	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1031 	tot->rx_dropped = dev->stats.rx_dropped;
1032 	tot->rx_length_errors = dev->stats.rx_length_errors;
1033 	tot->rx_frame_errors = dev->stats.rx_frame_errors;
1034 
1035 	return tot;
1036 }
1037 
1038 #ifdef CONFIG_NET_POLL_CONTROLLER
1039 static void virtnet_netpoll(struct net_device *dev)
1040 {
1041 	struct virtnet_info *vi = netdev_priv(dev);
1042 	int i;
1043 
1044 	for (i = 0; i < vi->curr_queue_pairs; i++)
1045 		napi_schedule(&vi->rq[i].napi);
1046 }
1047 #endif
1048 
1049 static void virtnet_ack_link_announce(struct virtnet_info *vi)
1050 {
1051 	rtnl_lock();
1052 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1053 				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1054 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1055 	rtnl_unlock();
1056 }
1057 
1058 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1059 {
1060 	struct scatterlist sg;
1061 	struct virtio_net_ctrl_mq s;
1062 	struct net_device *dev = vi->dev;
1063 
1064 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1065 		return 0;
1066 
1067 	s.virtqueue_pairs = queue_pairs;
1068 	sg_init_one(&sg, &s, sizeof(s));
1069 
1070 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1071 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
1072 		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1073 			 queue_pairs);
1074 		return -EINVAL;
1075 	} else {
1076 		vi->curr_queue_pairs = queue_pairs;
1077 		/* virtnet_open() will refill when device is going to up. */
1078 		if (dev->flags & IFF_UP)
1079 			schedule_delayed_work(&vi->refill, 0);
1080 	}
1081 
1082 	return 0;
1083 }
1084 
1085 static int virtnet_close(struct net_device *dev)
1086 {
1087 	struct virtnet_info *vi = netdev_priv(dev);
1088 	int i;
1089 
1090 	/* Make sure refill_work doesn't re-enable napi! */
1091 	cancel_delayed_work_sync(&vi->refill);
1092 
1093 	for (i = 0; i < vi->max_queue_pairs; i++)
1094 		napi_disable(&vi->rq[i].napi);
1095 
1096 	return 0;
1097 }
1098 
1099 static void virtnet_set_rx_mode(struct net_device *dev)
1100 {
1101 	struct virtnet_info *vi = netdev_priv(dev);
1102 	struct scatterlist sg[2];
1103 	u8 promisc, allmulti;
1104 	struct virtio_net_ctrl_mac *mac_data;
1105 	struct netdev_hw_addr *ha;
1106 	int uc_count;
1107 	int mc_count;
1108 	void *buf;
1109 	int i;
1110 
1111 	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
1112 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1113 		return;
1114 
1115 	promisc = ((dev->flags & IFF_PROMISC) != 0);
1116 	allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1117 
1118 	sg_init_one(sg, &promisc, sizeof(promisc));
1119 
1120 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1121 				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
1122 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1123 			 promisc ? "en" : "dis");
1124 
1125 	sg_init_one(sg, &allmulti, sizeof(allmulti));
1126 
1127 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1128 				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1129 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1130 			 allmulti ? "en" : "dis");
1131 
1132 	uc_count = netdev_uc_count(dev);
1133 	mc_count = netdev_mc_count(dev);
1134 	/* MAC filter - use one buffer for both lists */
1135 	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1136 		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1137 	mac_data = buf;
1138 	if (!buf)
1139 		return;
1140 
1141 	sg_init_table(sg, 2);
1142 
1143 	/* Store the unicast list and count in the front of the buffer */
1144 	mac_data->entries = uc_count;
1145 	i = 0;
1146 	netdev_for_each_uc_addr(ha, dev)
1147 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1148 
1149 	sg_set_buf(&sg[0], mac_data,
1150 		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1151 
1152 	/* multicast list and count fill the end */
1153 	mac_data = (void *)&mac_data->macs[uc_count][0];
1154 
1155 	mac_data->entries = mc_count;
1156 	i = 0;
1157 	netdev_for_each_mc_addr(ha, dev)
1158 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1159 
1160 	sg_set_buf(&sg[1], mac_data,
1161 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1162 
1163 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1164 				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
1165 		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1166 
1167 	kfree(buf);
1168 }
1169 
1170 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1171 				   __be16 proto, u16 vid)
1172 {
1173 	struct virtnet_info *vi = netdev_priv(dev);
1174 	struct scatterlist sg;
1175 
1176 	sg_init_one(&sg, &vid, sizeof(vid));
1177 
1178 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1179 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
1180 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1181 	return 0;
1182 }
1183 
1184 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1185 				    __be16 proto, u16 vid)
1186 {
1187 	struct virtnet_info *vi = netdev_priv(dev);
1188 	struct scatterlist sg;
1189 
1190 	sg_init_one(&sg, &vid, sizeof(vid));
1191 
1192 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1193 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
1194 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1195 	return 0;
1196 }
1197 
1198 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1199 {
1200 	int i;
1201 
1202 	if (vi->affinity_hint_set) {
1203 		for (i = 0; i < vi->max_queue_pairs; i++) {
1204 			virtqueue_set_affinity(vi->rq[i].vq, -1);
1205 			virtqueue_set_affinity(vi->sq[i].vq, -1);
1206 		}
1207 
1208 		vi->affinity_hint_set = false;
1209 	}
1210 }
1211 
1212 static void virtnet_set_affinity(struct virtnet_info *vi)
1213 {
1214 	int i;
1215 	int cpu;
1216 
1217 	/* In multiqueue mode, when the number of cpu is equal to the number of
1218 	 * queue pairs, we let the queue pairs to be private to one cpu by
1219 	 * setting the affinity hint to eliminate the contention.
1220 	 */
1221 	if (vi->curr_queue_pairs == 1 ||
1222 	    vi->max_queue_pairs != num_online_cpus()) {
1223 		virtnet_clean_affinity(vi, -1);
1224 		return;
1225 	}
1226 
1227 	i = 0;
1228 	for_each_online_cpu(cpu) {
1229 		virtqueue_set_affinity(vi->rq[i].vq, cpu);
1230 		virtqueue_set_affinity(vi->sq[i].vq, cpu);
1231 		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1232 		i++;
1233 	}
1234 
1235 	vi->affinity_hint_set = true;
1236 }
1237 
1238 static int virtnet_cpu_callback(struct notifier_block *nfb,
1239 			        unsigned long action, void *hcpu)
1240 {
1241 	struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
1242 
1243 	switch(action & ~CPU_TASKS_FROZEN) {
1244 	case CPU_ONLINE:
1245 	case CPU_DOWN_FAILED:
1246 	case CPU_DEAD:
1247 		virtnet_set_affinity(vi);
1248 		break;
1249 	case CPU_DOWN_PREPARE:
1250 		virtnet_clean_affinity(vi, (long)hcpu);
1251 		break;
1252 	default:
1253 		break;
1254 	}
1255 
1256 	return NOTIFY_OK;
1257 }
1258 
1259 static void virtnet_get_ringparam(struct net_device *dev,
1260 				struct ethtool_ringparam *ring)
1261 {
1262 	struct virtnet_info *vi = netdev_priv(dev);
1263 
1264 	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1265 	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
1266 	ring->rx_pending = ring->rx_max_pending;
1267 	ring->tx_pending = ring->tx_max_pending;
1268 }
1269 
1270 
1271 static void virtnet_get_drvinfo(struct net_device *dev,
1272 				struct ethtool_drvinfo *info)
1273 {
1274 	struct virtnet_info *vi = netdev_priv(dev);
1275 	struct virtio_device *vdev = vi->vdev;
1276 
1277 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1278 	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1279 	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1280 
1281 }
1282 
1283 /* TODO: Eliminate OOO packets during switching */
1284 static int virtnet_set_channels(struct net_device *dev,
1285 				struct ethtool_channels *channels)
1286 {
1287 	struct virtnet_info *vi = netdev_priv(dev);
1288 	u16 queue_pairs = channels->combined_count;
1289 	int err;
1290 
1291 	/* We don't support separate rx/tx channels.
1292 	 * We don't allow setting 'other' channels.
1293 	 */
1294 	if (channels->rx_count || channels->tx_count || channels->other_count)
1295 		return -EINVAL;
1296 
1297 	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
1298 		return -EINVAL;
1299 
1300 	get_online_cpus();
1301 	err = virtnet_set_queues(vi, queue_pairs);
1302 	if (!err) {
1303 		netif_set_real_num_tx_queues(dev, queue_pairs);
1304 		netif_set_real_num_rx_queues(dev, queue_pairs);
1305 
1306 		virtnet_set_affinity(vi);
1307 	}
1308 	put_online_cpus();
1309 
1310 	return err;
1311 }
1312 
1313 static void virtnet_get_channels(struct net_device *dev,
1314 				 struct ethtool_channels *channels)
1315 {
1316 	struct virtnet_info *vi = netdev_priv(dev);
1317 
1318 	channels->combined_count = vi->curr_queue_pairs;
1319 	channels->max_combined = vi->max_queue_pairs;
1320 	channels->max_other = 0;
1321 	channels->rx_count = 0;
1322 	channels->tx_count = 0;
1323 	channels->other_count = 0;
1324 }
1325 
1326 static const struct ethtool_ops virtnet_ethtool_ops = {
1327 	.get_drvinfo = virtnet_get_drvinfo,
1328 	.get_link = ethtool_op_get_link,
1329 	.get_ringparam = virtnet_get_ringparam,
1330 	.set_channels = virtnet_set_channels,
1331 	.get_channels = virtnet_get_channels,
1332 };
1333 
1334 #define MIN_MTU 68
1335 #define MAX_MTU 65535
1336 
1337 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
1338 {
1339 	if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
1340 		return -EINVAL;
1341 	dev->mtu = new_mtu;
1342 	return 0;
1343 }
1344 
1345 static const struct net_device_ops virtnet_netdev = {
1346 	.ndo_open            = virtnet_open,
1347 	.ndo_stop   	     = virtnet_close,
1348 	.ndo_start_xmit      = start_xmit,
1349 	.ndo_validate_addr   = eth_validate_addr,
1350 	.ndo_set_mac_address = virtnet_set_mac_address,
1351 	.ndo_set_rx_mode     = virtnet_set_rx_mode,
1352 	.ndo_change_mtu	     = virtnet_change_mtu,
1353 	.ndo_get_stats64     = virtnet_stats,
1354 	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
1355 	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1356 #ifdef CONFIG_NET_POLL_CONTROLLER
1357 	.ndo_poll_controller = virtnet_netpoll,
1358 #endif
1359 };
1360 
1361 static void virtnet_config_changed_work(struct work_struct *work)
1362 {
1363 	struct virtnet_info *vi =
1364 		container_of(work, struct virtnet_info, config_work);
1365 	u16 v;
1366 
1367 	mutex_lock(&vi->config_lock);
1368 	if (!vi->config_enable)
1369 		goto done;
1370 
1371 	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
1372 				 struct virtio_net_config, status, &v) < 0)
1373 		goto done;
1374 
1375 	if (v & VIRTIO_NET_S_ANNOUNCE) {
1376 		netdev_notify_peers(vi->dev);
1377 		virtnet_ack_link_announce(vi);
1378 	}
1379 
1380 	/* Ignore unknown (future) status bits */
1381 	v &= VIRTIO_NET_S_LINK_UP;
1382 
1383 	if (vi->status == v)
1384 		goto done;
1385 
1386 	vi->status = v;
1387 
1388 	if (vi->status & VIRTIO_NET_S_LINK_UP) {
1389 		netif_carrier_on(vi->dev);
1390 		netif_tx_wake_all_queues(vi->dev);
1391 	} else {
1392 		netif_carrier_off(vi->dev);
1393 		netif_tx_stop_all_queues(vi->dev);
1394 	}
1395 done:
1396 	mutex_unlock(&vi->config_lock);
1397 }
1398 
1399 static void virtnet_config_changed(struct virtio_device *vdev)
1400 {
1401 	struct virtnet_info *vi = vdev->priv;
1402 
1403 	schedule_work(&vi->config_work);
1404 }
1405 
1406 static void virtnet_free_queues(struct virtnet_info *vi)
1407 {
1408 	int i;
1409 
1410 	for (i = 0; i < vi->max_queue_pairs; i++)
1411 		netif_napi_del(&vi->rq[i].napi);
1412 
1413 	kfree(vi->rq);
1414 	kfree(vi->sq);
1415 }
1416 
1417 static void free_receive_bufs(struct virtnet_info *vi)
1418 {
1419 	int i;
1420 
1421 	for (i = 0; i < vi->max_queue_pairs; i++) {
1422 		while (vi->rq[i].pages)
1423 			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1424 	}
1425 }
1426 
1427 static void free_receive_page_frags(struct virtnet_info *vi)
1428 {
1429 	int i;
1430 	for (i = 0; i < vi->max_queue_pairs; i++)
1431 		if (vi->rq[i].alloc_frag.page)
1432 			put_page(vi->rq[i].alloc_frag.page);
1433 }
1434 
1435 static void free_unused_bufs(struct virtnet_info *vi)
1436 {
1437 	void *buf;
1438 	int i;
1439 
1440 	for (i = 0; i < vi->max_queue_pairs; i++) {
1441 		struct virtqueue *vq = vi->sq[i].vq;
1442 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
1443 			dev_kfree_skb(buf);
1444 	}
1445 
1446 	for (i = 0; i < vi->max_queue_pairs; i++) {
1447 		struct virtqueue *vq = vi->rq[i].vq;
1448 
1449 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1450 			if (vi->mergeable_rx_bufs) {
1451 				unsigned long ctx = (unsigned long)buf;
1452 				void *base = mergeable_ctx_to_buf_address(ctx);
1453 				put_page(virt_to_head_page(base));
1454 			} else if (vi->big_packets) {
1455 				give_pages(&vi->rq[i], buf);
1456 			} else {
1457 				dev_kfree_skb(buf);
1458 			}
1459 		}
1460 	}
1461 }
1462 
1463 static void virtnet_del_vqs(struct virtnet_info *vi)
1464 {
1465 	struct virtio_device *vdev = vi->vdev;
1466 
1467 	virtnet_clean_affinity(vi, -1);
1468 
1469 	vdev->config->del_vqs(vdev);
1470 
1471 	virtnet_free_queues(vi);
1472 }
1473 
1474 static int virtnet_find_vqs(struct virtnet_info *vi)
1475 {
1476 	vq_callback_t **callbacks;
1477 	struct virtqueue **vqs;
1478 	int ret = -ENOMEM;
1479 	int i, total_vqs;
1480 	const char **names;
1481 
1482 	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
1483 	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
1484 	 * possible control vq.
1485 	 */
1486 	total_vqs = vi->max_queue_pairs * 2 +
1487 		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
1488 
1489 	/* Allocate space for find_vqs parameters */
1490 	vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
1491 	if (!vqs)
1492 		goto err_vq;
1493 	callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
1494 	if (!callbacks)
1495 		goto err_callback;
1496 	names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
1497 	if (!names)
1498 		goto err_names;
1499 
1500 	/* Parameters for control virtqueue, if any */
1501 	if (vi->has_cvq) {
1502 		callbacks[total_vqs - 1] = NULL;
1503 		names[total_vqs - 1] = "control";
1504 	}
1505 
1506 	/* Allocate/initialize parameters for send/receive virtqueues */
1507 	for (i = 0; i < vi->max_queue_pairs; i++) {
1508 		callbacks[rxq2vq(i)] = skb_recv_done;
1509 		callbacks[txq2vq(i)] = skb_xmit_done;
1510 		sprintf(vi->rq[i].name, "input.%d", i);
1511 		sprintf(vi->sq[i].name, "output.%d", i);
1512 		names[rxq2vq(i)] = vi->rq[i].name;
1513 		names[txq2vq(i)] = vi->sq[i].name;
1514 	}
1515 
1516 	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
1517 					 names);
1518 	if (ret)
1519 		goto err_find;
1520 
1521 	if (vi->has_cvq) {
1522 		vi->cvq = vqs[total_vqs - 1];
1523 		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1524 			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1525 	}
1526 
1527 	for (i = 0; i < vi->max_queue_pairs; i++) {
1528 		vi->rq[i].vq = vqs[rxq2vq(i)];
1529 		vi->sq[i].vq = vqs[txq2vq(i)];
1530 	}
1531 
1532 	kfree(names);
1533 	kfree(callbacks);
1534 	kfree(vqs);
1535 
1536 	return 0;
1537 
1538 err_find:
1539 	kfree(names);
1540 err_names:
1541 	kfree(callbacks);
1542 err_callback:
1543 	kfree(vqs);
1544 err_vq:
1545 	return ret;
1546 }
1547 
1548 static int virtnet_alloc_queues(struct virtnet_info *vi)
1549 {
1550 	int i;
1551 
1552 	vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
1553 	if (!vi->sq)
1554 		goto err_sq;
1555 	vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
1556 	if (!vi->rq)
1557 		goto err_rq;
1558 
1559 	INIT_DELAYED_WORK(&vi->refill, refill_work);
1560 	for (i = 0; i < vi->max_queue_pairs; i++) {
1561 		vi->rq[i].pages = NULL;
1562 		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
1563 			       napi_weight);
1564 
1565 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
1566 		ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
1567 		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
1568 	}
1569 
1570 	return 0;
1571 
1572 err_rq:
1573 	kfree(vi->sq);
1574 err_sq:
1575 	return -ENOMEM;
1576 }
1577 
1578 static int init_vqs(struct virtnet_info *vi)
1579 {
1580 	int ret;
1581 
1582 	/* Allocate send & receive queues */
1583 	ret = virtnet_alloc_queues(vi);
1584 	if (ret)
1585 		goto err;
1586 
1587 	ret = virtnet_find_vqs(vi);
1588 	if (ret)
1589 		goto err_free;
1590 
1591 	get_online_cpus();
1592 	virtnet_set_affinity(vi);
1593 	put_online_cpus();
1594 
1595 	return 0;
1596 
1597 err_free:
1598 	virtnet_free_queues(vi);
1599 err:
1600 	return ret;
1601 }
1602 
1603 #ifdef CONFIG_SYSFS
1604 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
1605 		struct rx_queue_attribute *attribute, char *buf)
1606 {
1607 	struct virtnet_info *vi = netdev_priv(queue->dev);
1608 	unsigned int queue_index = get_netdev_rx_queue_index(queue);
1609 	struct ewma *avg;
1610 
1611 	BUG_ON(queue_index >= vi->max_queue_pairs);
1612 	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
1613 	return sprintf(buf, "%u\n", get_mergeable_buf_len(avg));
1614 }
1615 
1616 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
1617 	__ATTR_RO(mergeable_rx_buffer_size);
1618 
1619 static struct attribute *virtio_net_mrg_rx_attrs[] = {
1620 	&mergeable_rx_buffer_size_attribute.attr,
1621 	NULL
1622 };
1623 
1624 static const struct attribute_group virtio_net_mrg_rx_group = {
1625 	.name = "virtio_net",
1626 	.attrs = virtio_net_mrg_rx_attrs
1627 };
1628 #endif
1629 
1630 static int virtnet_probe(struct virtio_device *vdev)
1631 {
1632 	int i, err;
1633 	struct net_device *dev;
1634 	struct virtnet_info *vi;
1635 	u16 max_queue_pairs;
1636 
1637 	/* Find if host supports multiqueue virtio_net device */
1638 	err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
1639 				   struct virtio_net_config,
1640 				   max_virtqueue_pairs, &max_queue_pairs);
1641 
1642 	/* We need at least 2 queue's */
1643 	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1644 	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1645 	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1646 		max_queue_pairs = 1;
1647 
1648 	/* Allocate ourselves a network device with room for our info */
1649 	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
1650 	if (!dev)
1651 		return -ENOMEM;
1652 
1653 	/* Set up network device as normal. */
1654 	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1655 	dev->netdev_ops = &virtnet_netdev;
1656 	dev->features = NETIF_F_HIGHDMA;
1657 
1658 	dev->ethtool_ops = &virtnet_ethtool_ops;
1659 	SET_NETDEV_DEV(dev, &vdev->dev);
1660 
1661 	/* Do we support "hardware" checksums? */
1662 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1663 		/* This opens up the world of extra features. */
1664 		dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1665 		if (csum)
1666 			dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1667 
1668 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1669 			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
1670 				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
1671 		}
1672 		/* Individual feature bits: what can host handle? */
1673 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
1674 			dev->hw_features |= NETIF_F_TSO;
1675 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
1676 			dev->hw_features |= NETIF_F_TSO6;
1677 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1678 			dev->hw_features |= NETIF_F_TSO_ECN;
1679 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1680 			dev->hw_features |= NETIF_F_UFO;
1681 
1682 		if (gso)
1683 			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1684 		/* (!csum && gso) case will be fixed by register_netdev() */
1685 	}
1686 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
1687 		dev->features |= NETIF_F_RXCSUM;
1688 
1689 	dev->vlan_features = dev->features;
1690 
1691 	/* Configuration may specify what MAC to use.  Otherwise random. */
1692 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
1693 		virtio_cread_bytes(vdev,
1694 				   offsetof(struct virtio_net_config, mac),
1695 				   dev->dev_addr, dev->addr_len);
1696 	else
1697 		eth_hw_addr_random(dev);
1698 
1699 	/* Set up our device-specific information */
1700 	vi = netdev_priv(dev);
1701 	vi->dev = dev;
1702 	vi->vdev = vdev;
1703 	vdev->priv = vi;
1704 	vi->stats = alloc_percpu(struct virtnet_stats);
1705 	err = -ENOMEM;
1706 	if (vi->stats == NULL)
1707 		goto free;
1708 
1709 	for_each_possible_cpu(i) {
1710 		struct virtnet_stats *virtnet_stats;
1711 		virtnet_stats = per_cpu_ptr(vi->stats, i);
1712 		u64_stats_init(&virtnet_stats->tx_syncp);
1713 		u64_stats_init(&virtnet_stats->rx_syncp);
1714 	}
1715 
1716 	mutex_init(&vi->config_lock);
1717 	vi->config_enable = true;
1718 	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
1719 
1720 	/* If we can receive ANY GSO packets, we must allocate large ones. */
1721 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1722 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1723 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
1724 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
1725 		vi->big_packets = true;
1726 
1727 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1728 		vi->mergeable_rx_bufs = true;
1729 
1730 	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
1731 		vi->any_header_sg = true;
1732 
1733 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1734 		vi->has_cvq = true;
1735 
1736 	if (vi->any_header_sg) {
1737 		if (vi->mergeable_rx_bufs)
1738 			dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1739 		else
1740 			dev->needed_headroom = sizeof(struct virtio_net_hdr);
1741 	}
1742 
1743 	/* Use single tx/rx queue pair as default */
1744 	vi->curr_queue_pairs = 1;
1745 	vi->max_queue_pairs = max_queue_pairs;
1746 
1747 	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1748 	err = init_vqs(vi);
1749 	if (err)
1750 		goto free_stats;
1751 
1752 #ifdef CONFIG_SYSFS
1753 	if (vi->mergeable_rx_bufs)
1754 		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
1755 #endif
1756 	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
1757 	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
1758 
1759 	err = register_netdev(dev);
1760 	if (err) {
1761 		pr_debug("virtio_net: registering device failed\n");
1762 		goto free_vqs;
1763 	}
1764 
1765 	/* Last of all, set up some receive buffers. */
1766 	for (i = 0; i < vi->curr_queue_pairs; i++) {
1767 		try_fill_recv(&vi->rq[i], GFP_KERNEL);
1768 
1769 		/* If we didn't even get one input buffer, we're useless. */
1770 		if (vi->rq[i].vq->num_free ==
1771 		    virtqueue_get_vring_size(vi->rq[i].vq)) {
1772 			free_unused_bufs(vi);
1773 			err = -ENOMEM;
1774 			goto free_recv_bufs;
1775 		}
1776 	}
1777 
1778 	vi->nb.notifier_call = &virtnet_cpu_callback;
1779 	err = register_hotcpu_notifier(&vi->nb);
1780 	if (err) {
1781 		pr_debug("virtio_net: registering cpu notifier failed\n");
1782 		goto free_recv_bufs;
1783 	}
1784 
1785 	/* Assume link up if device can't report link status,
1786 	   otherwise get link status from config. */
1787 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1788 		netif_carrier_off(dev);
1789 		schedule_work(&vi->config_work);
1790 	} else {
1791 		vi->status = VIRTIO_NET_S_LINK_UP;
1792 		netif_carrier_on(dev);
1793 	}
1794 
1795 	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
1796 		 dev->name, max_queue_pairs);
1797 
1798 	return 0;
1799 
1800 free_recv_bufs:
1801 	free_receive_bufs(vi);
1802 	unregister_netdev(dev);
1803 free_vqs:
1804 	cancel_delayed_work_sync(&vi->refill);
1805 	free_receive_page_frags(vi);
1806 	virtnet_del_vqs(vi);
1807 free_stats:
1808 	free_percpu(vi->stats);
1809 free:
1810 	free_netdev(dev);
1811 	return err;
1812 }
1813 
1814 static void remove_vq_common(struct virtnet_info *vi)
1815 {
1816 	vi->vdev->config->reset(vi->vdev);
1817 
1818 	/* Free unused buffers in both send and recv, if any. */
1819 	free_unused_bufs(vi);
1820 
1821 	free_receive_bufs(vi);
1822 
1823 	free_receive_page_frags(vi);
1824 
1825 	virtnet_del_vqs(vi);
1826 }
1827 
1828 static void virtnet_remove(struct virtio_device *vdev)
1829 {
1830 	struct virtnet_info *vi = vdev->priv;
1831 
1832 	unregister_hotcpu_notifier(&vi->nb);
1833 
1834 	/* Prevent config work handler from accessing the device. */
1835 	mutex_lock(&vi->config_lock);
1836 	vi->config_enable = false;
1837 	mutex_unlock(&vi->config_lock);
1838 
1839 	unregister_netdev(vi->dev);
1840 
1841 	remove_vq_common(vi);
1842 
1843 	flush_work(&vi->config_work);
1844 
1845 	free_percpu(vi->stats);
1846 	free_netdev(vi->dev);
1847 }
1848 
1849 #ifdef CONFIG_PM_SLEEP
1850 static int virtnet_freeze(struct virtio_device *vdev)
1851 {
1852 	struct virtnet_info *vi = vdev->priv;
1853 	int i;
1854 
1855 	unregister_hotcpu_notifier(&vi->nb);
1856 
1857 	/* Prevent config work handler from accessing the device */
1858 	mutex_lock(&vi->config_lock);
1859 	vi->config_enable = false;
1860 	mutex_unlock(&vi->config_lock);
1861 
1862 	netif_device_detach(vi->dev);
1863 	cancel_delayed_work_sync(&vi->refill);
1864 
1865 	if (netif_running(vi->dev))
1866 		for (i = 0; i < vi->max_queue_pairs; i++) {
1867 			napi_disable(&vi->rq[i].napi);
1868 			netif_napi_del(&vi->rq[i].napi);
1869 		}
1870 
1871 	remove_vq_common(vi);
1872 
1873 	flush_work(&vi->config_work);
1874 
1875 	return 0;
1876 }
1877 
1878 static int virtnet_restore(struct virtio_device *vdev)
1879 {
1880 	struct virtnet_info *vi = vdev->priv;
1881 	int err, i;
1882 
1883 	err = init_vqs(vi);
1884 	if (err)
1885 		return err;
1886 
1887 	if (netif_running(vi->dev)) {
1888 		for (i = 0; i < vi->curr_queue_pairs; i++)
1889 			if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1890 				schedule_delayed_work(&vi->refill, 0);
1891 
1892 		for (i = 0; i < vi->max_queue_pairs; i++)
1893 			virtnet_napi_enable(&vi->rq[i]);
1894 	}
1895 
1896 	netif_device_attach(vi->dev);
1897 
1898 	mutex_lock(&vi->config_lock);
1899 	vi->config_enable = true;
1900 	mutex_unlock(&vi->config_lock);
1901 
1902 	rtnl_lock();
1903 	virtnet_set_queues(vi, vi->curr_queue_pairs);
1904 	rtnl_unlock();
1905 
1906 	err = register_hotcpu_notifier(&vi->nb);
1907 	if (err)
1908 		return err;
1909 
1910 	return 0;
1911 }
1912 #endif
1913 
1914 static struct virtio_device_id id_table[] = {
1915 	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1916 	{ 0 },
1917 };
1918 
1919 static unsigned int features[] = {
1920 	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1921 	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1922 	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1923 	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1924 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1925 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1926 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1927 	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
1928 	VIRTIO_NET_F_CTRL_MAC_ADDR,
1929 	VIRTIO_F_ANY_LAYOUT,
1930 };
1931 
1932 static struct virtio_driver virtio_net_driver = {
1933 	.feature_table = features,
1934 	.feature_table_size = ARRAY_SIZE(features),
1935 	.driver.name =	KBUILD_MODNAME,
1936 	.driver.owner =	THIS_MODULE,
1937 	.id_table =	id_table,
1938 	.probe =	virtnet_probe,
1939 	.remove =	virtnet_remove,
1940 	.config_changed = virtnet_config_changed,
1941 #ifdef CONFIG_PM_SLEEP
1942 	.freeze =	virtnet_freeze,
1943 	.restore =	virtnet_restore,
1944 #endif
1945 };
1946 
1947 module_virtio_driver(virtio_net_driver);
1948 
1949 MODULE_DEVICE_TABLE(virtio, id_table);
1950 MODULE_DESCRIPTION("Virtio network driver");
1951 MODULE_LICENSE("GPL");
1952