xref: /openbmc/linux/drivers/net/xen-netfront.c (revision d6613aa750ad66b25737231415050f1dca924eb7)
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47 
48 #include <asm/xen/page.h>
49 #include <xen/xen.h>
50 #include <xen/xenbus.h>
51 #include <xen/events.h>
52 #include <xen/page.h>
53 #include <xen/platform_pci.h>
54 #include <xen/grant_table.h>
55 
56 #include <xen/interface/io/netif.h>
57 #include <xen/interface/memory.h>
58 #include <xen/interface/grant_table.h>
59 
60 /* Module parameters */
61 static unsigned int xennet_max_queues;
62 module_param_named(max_queues, xennet_max_queues, uint, 0644);
63 MODULE_PARM_DESC(max_queues,
64 		 "Maximum number of queues per virtual interface");
65 
66 static const struct ethtool_ops xennet_ethtool_ops;
67 
68 struct netfront_cb {
69 	int pull_to;
70 };
71 
72 #define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
73 
74 #define RX_COPY_THRESHOLD 256
75 
76 #define GRANT_INVALID_REF	0
77 
78 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
79 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
80 
81 /* Minimum number of Rx slots (includes slot for GSO metadata). */
82 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
83 
84 /* Queue name is interface name with "-qNNN" appended */
85 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
86 
87 /* IRQ name is queue name with "-tx" or "-rx" appended */
88 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
89 
90 struct netfront_stats {
91 	u64			rx_packets;
92 	u64			tx_packets;
93 	u64			rx_bytes;
94 	u64			tx_bytes;
95 	struct u64_stats_sync	syncp;
96 };
97 
98 struct netfront_info;
99 
100 struct netfront_queue {
101 	unsigned int id; /* Queue ID, 0-based */
102 	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
103 	struct netfront_info *info;
104 
105 	struct napi_struct napi;
106 
107 	/* Split event channels support, tx_* == rx_* when using
108 	 * single event channel.
109 	 */
110 	unsigned int tx_evtchn, rx_evtchn;
111 	unsigned int tx_irq, rx_irq;
112 	/* Only used when split event channels support is enabled */
113 	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
114 	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
115 
116 	spinlock_t   tx_lock;
117 	struct xen_netif_tx_front_ring tx;
118 	int tx_ring_ref;
119 
120 	/*
121 	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
122 	 * are linked from tx_skb_freelist through skb_entry.link.
123 	 *
124 	 *  NB. Freelist index entries are always going to be less than
125 	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
126 	 *  greater than PAGE_OFFSET: we use this property to distinguish
127 	 *  them.
128 	 */
129 	union skb_entry {
130 		struct sk_buff *skb;
131 		unsigned long link;
132 	} tx_skbs[NET_TX_RING_SIZE];
133 	grant_ref_t gref_tx_head;
134 	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
135 	struct page *grant_tx_page[NET_TX_RING_SIZE];
136 	unsigned tx_skb_freelist;
137 
138 	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
139 	struct xen_netif_rx_front_ring rx;
140 	int rx_ring_ref;
141 
142 	struct timer_list rx_refill_timer;
143 
144 	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
145 	grant_ref_t gref_rx_head;
146 	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
147 
148 	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
149 	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
150 	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
151 };
152 
153 struct netfront_info {
154 	struct list_head list;
155 	struct net_device *netdev;
156 
157 	struct xenbus_device *xbdev;
158 
159 	/* Multi-queue support */
160 	struct netfront_queue *queues;
161 
162 	/* Statistics */
163 	struct netfront_stats __percpu *stats;
164 
165 	atomic_t rx_gso_checksum_fixup;
166 };
167 
168 struct netfront_rx_info {
169 	struct xen_netif_rx_response rx;
170 	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
171 };
172 
173 static void skb_entry_set_link(union skb_entry *list, unsigned short id)
174 {
175 	list->link = id;
176 }
177 
178 static int skb_entry_is_link(const union skb_entry *list)
179 {
180 	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
181 	return (unsigned long)list->skb < PAGE_OFFSET;
182 }
183 
184 /*
185  * Access macros for acquiring freeing slots in tx_skbs[].
186  */
187 
188 static void add_id_to_freelist(unsigned *head, union skb_entry *list,
189 			       unsigned short id)
190 {
191 	skb_entry_set_link(&list[id], *head);
192 	*head = id;
193 }
194 
195 static unsigned short get_id_from_freelist(unsigned *head,
196 					   union skb_entry *list)
197 {
198 	unsigned int id = *head;
199 	*head = list[id].link;
200 	return id;
201 }
202 
203 static int xennet_rxidx(RING_IDX idx)
204 {
205 	return idx & (NET_RX_RING_SIZE - 1);
206 }
207 
208 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
209 					 RING_IDX ri)
210 {
211 	int i = xennet_rxidx(ri);
212 	struct sk_buff *skb = queue->rx_skbs[i];
213 	queue->rx_skbs[i] = NULL;
214 	return skb;
215 }
216 
217 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
218 					    RING_IDX ri)
219 {
220 	int i = xennet_rxidx(ri);
221 	grant_ref_t ref = queue->grant_rx_ref[i];
222 	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
223 	return ref;
224 }
225 
226 #ifdef CONFIG_SYSFS
227 static int xennet_sysfs_addif(struct net_device *netdev);
228 static void xennet_sysfs_delif(struct net_device *netdev);
229 #else /* !CONFIG_SYSFS */
230 #define xennet_sysfs_addif(dev) (0)
231 #define xennet_sysfs_delif(dev) do { } while (0)
232 #endif
233 
234 static bool xennet_can_sg(struct net_device *dev)
235 {
236 	return dev->features & NETIF_F_SG;
237 }
238 
239 
240 static void rx_refill_timeout(unsigned long data)
241 {
242 	struct netfront_queue *queue = (struct netfront_queue *)data;
243 	napi_schedule(&queue->napi);
244 }
245 
246 static int netfront_tx_slot_available(struct netfront_queue *queue)
247 {
248 	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
249 		(NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
250 }
251 
252 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
253 {
254 	struct net_device *dev = queue->info->netdev;
255 	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
256 
257 	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
258 	    netfront_tx_slot_available(queue) &&
259 	    likely(netif_running(dev)))
260 		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
261 }
262 
263 
264 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
265 {
266 	struct sk_buff *skb;
267 	struct page *page;
268 
269 	skb = __netdev_alloc_skb(queue->info->netdev,
270 				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
271 				 GFP_ATOMIC | __GFP_NOWARN);
272 	if (unlikely(!skb))
273 		return NULL;
274 
275 	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
276 	if (!page) {
277 		kfree_skb(skb);
278 		return NULL;
279 	}
280 	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
281 
282 	/* Align ip header to a 16 bytes boundary */
283 	skb_reserve(skb, NET_IP_ALIGN);
284 	skb->dev = queue->info->netdev;
285 
286 	return skb;
287 }
288 
289 
290 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
291 {
292 	RING_IDX req_prod = queue->rx.req_prod_pvt;
293 	int notify;
294 
295 	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
296 		return;
297 
298 	for (req_prod = queue->rx.req_prod_pvt;
299 	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
300 	     req_prod++) {
301 		struct sk_buff *skb;
302 		unsigned short id;
303 		grant_ref_t ref;
304 		unsigned long pfn;
305 		struct xen_netif_rx_request *req;
306 
307 		skb = xennet_alloc_one_rx_buffer(queue);
308 		if (!skb)
309 			break;
310 
311 		id = xennet_rxidx(req_prod);
312 
313 		BUG_ON(queue->rx_skbs[id]);
314 		queue->rx_skbs[id] = skb;
315 
316 		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
317 		BUG_ON((signed short)ref < 0);
318 		queue->grant_rx_ref[id] = ref;
319 
320 		pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
321 
322 		req = RING_GET_REQUEST(&queue->rx, req_prod);
323 		gnttab_grant_foreign_access_ref(ref,
324 						queue->info->xbdev->otherend_id,
325 						pfn_to_mfn(pfn),
326 						0);
327 
328 		req->id = id;
329 		req->gref = ref;
330 	}
331 
332 	queue->rx.req_prod_pvt = req_prod;
333 
334 	/* Not enough requests? Try again later. */
335 	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
336 		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
337 		return;
338 	}
339 
340 	wmb();		/* barrier so backend seens requests */
341 
342 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
343 	if (notify)
344 		notify_remote_via_irq(queue->rx_irq);
345 }
346 
347 static int xennet_open(struct net_device *dev)
348 {
349 	struct netfront_info *np = netdev_priv(dev);
350 	unsigned int num_queues = dev->real_num_tx_queues;
351 	unsigned int i = 0;
352 	struct netfront_queue *queue = NULL;
353 
354 	for (i = 0; i < num_queues; ++i) {
355 		queue = &np->queues[i];
356 		napi_enable(&queue->napi);
357 
358 		spin_lock_bh(&queue->rx_lock);
359 		if (netif_carrier_ok(dev)) {
360 			xennet_alloc_rx_buffers(queue);
361 			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
362 			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
363 				napi_schedule(&queue->napi);
364 		}
365 		spin_unlock_bh(&queue->rx_lock);
366 	}
367 
368 	netif_tx_start_all_queues(dev);
369 
370 	return 0;
371 }
372 
373 static void xennet_tx_buf_gc(struct netfront_queue *queue)
374 {
375 	RING_IDX cons, prod;
376 	unsigned short id;
377 	struct sk_buff *skb;
378 
379 	BUG_ON(!netif_carrier_ok(queue->info->netdev));
380 
381 	do {
382 		prod = queue->tx.sring->rsp_prod;
383 		rmb(); /* Ensure we see responses up to 'rp'. */
384 
385 		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
386 			struct xen_netif_tx_response *txrsp;
387 
388 			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
389 			if (txrsp->status == XEN_NETIF_RSP_NULL)
390 				continue;
391 
392 			id  = txrsp->id;
393 			skb = queue->tx_skbs[id].skb;
394 			if (unlikely(gnttab_query_foreign_access(
395 				queue->grant_tx_ref[id]) != 0)) {
396 				pr_alert("%s: warning -- grant still in use by backend domain\n",
397 					 __func__);
398 				BUG();
399 			}
400 			gnttab_end_foreign_access_ref(
401 				queue->grant_tx_ref[id], GNTMAP_readonly);
402 			gnttab_release_grant_reference(
403 				&queue->gref_tx_head, queue->grant_tx_ref[id]);
404 			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
405 			queue->grant_tx_page[id] = NULL;
406 			add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
407 			dev_kfree_skb_irq(skb);
408 		}
409 
410 		queue->tx.rsp_cons = prod;
411 
412 		/*
413 		 * Set a new event, then check for race with update of tx_cons.
414 		 * Note that it is essential to schedule a callback, no matter
415 		 * how few buffers are pending. Even if there is space in the
416 		 * transmit ring, higher layers may be blocked because too much
417 		 * data is outstanding: in such cases notification from Xen is
418 		 * likely to be the only kick that we'll get.
419 		 */
420 		queue->tx.sring->rsp_event =
421 			prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
422 		mb();		/* update shared area */
423 	} while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
424 
425 	xennet_maybe_wake_tx(queue);
426 }
427 
428 static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
429 			      struct xen_netif_tx_request *tx)
430 {
431 	char *data = skb->data;
432 	unsigned long mfn;
433 	RING_IDX prod = queue->tx.req_prod_pvt;
434 	int frags = skb_shinfo(skb)->nr_frags;
435 	unsigned int offset = offset_in_page(data);
436 	unsigned int len = skb_headlen(skb);
437 	unsigned int id;
438 	grant_ref_t ref;
439 	int i;
440 
441 	/* While the header overlaps a page boundary (including being
442 	   larger than a page), split it it into page-sized chunks. */
443 	while (len > PAGE_SIZE - offset) {
444 		tx->size = PAGE_SIZE - offset;
445 		tx->flags |= XEN_NETTXF_more_data;
446 		len -= tx->size;
447 		data += tx->size;
448 		offset = 0;
449 
450 		id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
451 		queue->tx_skbs[id].skb = skb_get(skb);
452 		tx = RING_GET_REQUEST(&queue->tx, prod++);
453 		tx->id = id;
454 		ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
455 		BUG_ON((signed short)ref < 0);
456 
457 		mfn = virt_to_mfn(data);
458 		gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
459 						mfn, GNTMAP_readonly);
460 
461 		queue->grant_tx_page[id] = virt_to_page(data);
462 		tx->gref = queue->grant_tx_ref[id] = ref;
463 		tx->offset = offset;
464 		tx->size = len;
465 		tx->flags = 0;
466 	}
467 
468 	/* Grant backend access to each skb fragment page. */
469 	for (i = 0; i < frags; i++) {
470 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
471 		struct page *page = skb_frag_page(frag);
472 
473 		len = skb_frag_size(frag);
474 		offset = frag->page_offset;
475 
476 		/* Skip unused frames from start of page */
477 		page += offset >> PAGE_SHIFT;
478 		offset &= ~PAGE_MASK;
479 
480 		while (len > 0) {
481 			unsigned long bytes;
482 
483 			bytes = PAGE_SIZE - offset;
484 			if (bytes > len)
485 				bytes = len;
486 
487 			tx->flags |= XEN_NETTXF_more_data;
488 
489 			id = get_id_from_freelist(&queue->tx_skb_freelist,
490 						  queue->tx_skbs);
491 			queue->tx_skbs[id].skb = skb_get(skb);
492 			tx = RING_GET_REQUEST(&queue->tx, prod++);
493 			tx->id = id;
494 			ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
495 			BUG_ON((signed short)ref < 0);
496 
497 			mfn = pfn_to_mfn(page_to_pfn(page));
498 			gnttab_grant_foreign_access_ref(ref,
499 							queue->info->xbdev->otherend_id,
500 							mfn, GNTMAP_readonly);
501 
502 			queue->grant_tx_page[id] = page;
503 			tx->gref = queue->grant_tx_ref[id] = ref;
504 			tx->offset = offset;
505 			tx->size = bytes;
506 			tx->flags = 0;
507 
508 			offset += bytes;
509 			len -= bytes;
510 
511 			/* Next frame */
512 			if (offset == PAGE_SIZE && len) {
513 				BUG_ON(!PageCompound(page));
514 				page++;
515 				offset = 0;
516 			}
517 		}
518 	}
519 
520 	queue->tx.req_prod_pvt = prod;
521 }
522 
523 /*
524  * Count how many ring slots are required to send the frags of this
525  * skb. Each frag might be a compound page.
526  */
527 static int xennet_count_skb_frag_slots(struct sk_buff *skb)
528 {
529 	int i, frags = skb_shinfo(skb)->nr_frags;
530 	int pages = 0;
531 
532 	for (i = 0; i < frags; i++) {
533 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
534 		unsigned long size = skb_frag_size(frag);
535 		unsigned long offset = frag->page_offset;
536 
537 		/* Skip unused frames from start of page */
538 		offset &= ~PAGE_MASK;
539 
540 		pages += PFN_UP(offset + size);
541 	}
542 
543 	return pages;
544 }
545 
546 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
547 			       void *accel_priv, select_queue_fallback_t fallback)
548 {
549 	unsigned int num_queues = dev->real_num_tx_queues;
550 	u32 hash;
551 	u16 queue_idx;
552 
553 	/* First, check if there is only one queue */
554 	if (num_queues == 1) {
555 		queue_idx = 0;
556 	} else {
557 		hash = skb_get_hash(skb);
558 		queue_idx = hash % num_queues;
559 	}
560 
561 	return queue_idx;
562 }
563 
564 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
565 {
566 	unsigned short id;
567 	struct netfront_info *np = netdev_priv(dev);
568 	struct netfront_stats *stats = this_cpu_ptr(np->stats);
569 	struct xen_netif_tx_request *tx;
570 	char *data = skb->data;
571 	RING_IDX i;
572 	grant_ref_t ref;
573 	unsigned long mfn;
574 	int notify;
575 	int slots;
576 	unsigned int offset = offset_in_page(data);
577 	unsigned int len = skb_headlen(skb);
578 	unsigned long flags;
579 	struct netfront_queue *queue = NULL;
580 	unsigned int num_queues = dev->real_num_tx_queues;
581 	u16 queue_index;
582 
583 	/* Drop the packet if no queues are set up */
584 	if (num_queues < 1)
585 		goto drop;
586 	/* Determine which queue to transmit this SKB on */
587 	queue_index = skb_get_queue_mapping(skb);
588 	queue = &np->queues[queue_index];
589 
590 	/* If skb->len is too big for wire format, drop skb and alert
591 	 * user about misconfiguration.
592 	 */
593 	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
594 		net_alert_ratelimited(
595 			"xennet: skb->len = %u, too big for wire format\n",
596 			skb->len);
597 		goto drop;
598 	}
599 
600 	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
601 		xennet_count_skb_frag_slots(skb);
602 	if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
603 		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
604 				    slots, skb->len);
605 		if (skb_linearize(skb))
606 			goto drop;
607 		data = skb->data;
608 		offset = offset_in_page(data);
609 		len = skb_headlen(skb);
610 	}
611 
612 	spin_lock_irqsave(&queue->tx_lock, flags);
613 
614 	if (unlikely(!netif_carrier_ok(dev) ||
615 		     (slots > 1 && !xennet_can_sg(dev)) ||
616 		     netif_needs_gso(dev, skb, netif_skb_features(skb)))) {
617 		spin_unlock_irqrestore(&queue->tx_lock, flags);
618 		goto drop;
619 	}
620 
621 	i = queue->tx.req_prod_pvt;
622 
623 	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
624 	queue->tx_skbs[id].skb = skb;
625 
626 	tx = RING_GET_REQUEST(&queue->tx, i);
627 
628 	tx->id   = id;
629 	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
630 	BUG_ON((signed short)ref < 0);
631 	mfn = virt_to_mfn(data);
632 	gnttab_grant_foreign_access_ref(
633 		ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
634 	queue->grant_tx_page[id] = virt_to_page(data);
635 	tx->gref = queue->grant_tx_ref[id] = ref;
636 	tx->offset = offset;
637 	tx->size = len;
638 
639 	tx->flags = 0;
640 	if (skb->ip_summed == CHECKSUM_PARTIAL)
641 		/* local packet? */
642 		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
643 	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
644 		/* remote but checksummed. */
645 		tx->flags |= XEN_NETTXF_data_validated;
646 
647 	if (skb_shinfo(skb)->gso_size) {
648 		struct xen_netif_extra_info *gso;
649 
650 		gso = (struct xen_netif_extra_info *)
651 			RING_GET_REQUEST(&queue->tx, ++i);
652 
653 		tx->flags |= XEN_NETTXF_extra_info;
654 
655 		gso->u.gso.size = skb_shinfo(skb)->gso_size;
656 		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
657 			XEN_NETIF_GSO_TYPE_TCPV6 :
658 			XEN_NETIF_GSO_TYPE_TCPV4;
659 		gso->u.gso.pad = 0;
660 		gso->u.gso.features = 0;
661 
662 		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
663 		gso->flags = 0;
664 	}
665 
666 	queue->tx.req_prod_pvt = i + 1;
667 
668 	xennet_make_frags(skb, queue, tx);
669 	tx->size = skb->len;
670 
671 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
672 	if (notify)
673 		notify_remote_via_irq(queue->tx_irq);
674 
675 	u64_stats_update_begin(&stats->syncp);
676 	stats->tx_bytes += skb->len;
677 	stats->tx_packets++;
678 	u64_stats_update_end(&stats->syncp);
679 
680 	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
681 	xennet_tx_buf_gc(queue);
682 
683 	if (!netfront_tx_slot_available(queue))
684 		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
685 
686 	spin_unlock_irqrestore(&queue->tx_lock, flags);
687 
688 	return NETDEV_TX_OK;
689 
690  drop:
691 	dev->stats.tx_dropped++;
692 	dev_kfree_skb_any(skb);
693 	return NETDEV_TX_OK;
694 }
695 
696 static int xennet_close(struct net_device *dev)
697 {
698 	struct netfront_info *np = netdev_priv(dev);
699 	unsigned int num_queues = dev->real_num_tx_queues;
700 	unsigned int i;
701 	struct netfront_queue *queue;
702 	netif_tx_stop_all_queues(np->netdev);
703 	for (i = 0; i < num_queues; ++i) {
704 		queue = &np->queues[i];
705 		napi_disable(&queue->napi);
706 	}
707 	return 0;
708 }
709 
710 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
711 				grant_ref_t ref)
712 {
713 	int new = xennet_rxidx(queue->rx.req_prod_pvt);
714 
715 	BUG_ON(queue->rx_skbs[new]);
716 	queue->rx_skbs[new] = skb;
717 	queue->grant_rx_ref[new] = ref;
718 	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
719 	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
720 	queue->rx.req_prod_pvt++;
721 }
722 
723 static int xennet_get_extras(struct netfront_queue *queue,
724 			     struct xen_netif_extra_info *extras,
725 			     RING_IDX rp)
726 
727 {
728 	struct xen_netif_extra_info *extra;
729 	struct device *dev = &queue->info->netdev->dev;
730 	RING_IDX cons = queue->rx.rsp_cons;
731 	int err = 0;
732 
733 	do {
734 		struct sk_buff *skb;
735 		grant_ref_t ref;
736 
737 		if (unlikely(cons + 1 == rp)) {
738 			if (net_ratelimit())
739 				dev_warn(dev, "Missing extra info\n");
740 			err = -EBADR;
741 			break;
742 		}
743 
744 		extra = (struct xen_netif_extra_info *)
745 			RING_GET_RESPONSE(&queue->rx, ++cons);
746 
747 		if (unlikely(!extra->type ||
748 			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
749 			if (net_ratelimit())
750 				dev_warn(dev, "Invalid extra type: %d\n",
751 					extra->type);
752 			err = -EINVAL;
753 		} else {
754 			memcpy(&extras[extra->type - 1], extra,
755 			       sizeof(*extra));
756 		}
757 
758 		skb = xennet_get_rx_skb(queue, cons);
759 		ref = xennet_get_rx_ref(queue, cons);
760 		xennet_move_rx_slot(queue, skb, ref);
761 	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
762 
763 	queue->rx.rsp_cons = cons;
764 	return err;
765 }
766 
767 static int xennet_get_responses(struct netfront_queue *queue,
768 				struct netfront_rx_info *rinfo, RING_IDX rp,
769 				struct sk_buff_head *list)
770 {
771 	struct xen_netif_rx_response *rx = &rinfo->rx;
772 	struct xen_netif_extra_info *extras = rinfo->extras;
773 	struct device *dev = &queue->info->netdev->dev;
774 	RING_IDX cons = queue->rx.rsp_cons;
775 	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
776 	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
777 	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
778 	int slots = 1;
779 	int err = 0;
780 	unsigned long ret;
781 
782 	if (rx->flags & XEN_NETRXF_extra_info) {
783 		err = xennet_get_extras(queue, extras, rp);
784 		cons = queue->rx.rsp_cons;
785 	}
786 
787 	for (;;) {
788 		if (unlikely(rx->status < 0 ||
789 			     rx->offset + rx->status > PAGE_SIZE)) {
790 			if (net_ratelimit())
791 				dev_warn(dev, "rx->offset: %x, size: %u\n",
792 					 rx->offset, rx->status);
793 			xennet_move_rx_slot(queue, skb, ref);
794 			err = -EINVAL;
795 			goto next;
796 		}
797 
798 		/*
799 		 * This definitely indicates a bug, either in this driver or in
800 		 * the backend driver. In future this should flag the bad
801 		 * situation to the system controller to reboot the backend.
802 		 */
803 		if (ref == GRANT_INVALID_REF) {
804 			if (net_ratelimit())
805 				dev_warn(dev, "Bad rx response id %d.\n",
806 					 rx->id);
807 			err = -EINVAL;
808 			goto next;
809 		}
810 
811 		ret = gnttab_end_foreign_access_ref(ref, 0);
812 		BUG_ON(!ret);
813 
814 		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
815 
816 		__skb_queue_tail(list, skb);
817 
818 next:
819 		if (!(rx->flags & XEN_NETRXF_more_data))
820 			break;
821 
822 		if (cons + slots == rp) {
823 			if (net_ratelimit())
824 				dev_warn(dev, "Need more slots\n");
825 			err = -ENOENT;
826 			break;
827 		}
828 
829 		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
830 		skb = xennet_get_rx_skb(queue, cons + slots);
831 		ref = xennet_get_rx_ref(queue, cons + slots);
832 		slots++;
833 	}
834 
835 	if (unlikely(slots > max)) {
836 		if (net_ratelimit())
837 			dev_warn(dev, "Too many slots\n");
838 		err = -E2BIG;
839 	}
840 
841 	if (unlikely(err))
842 		queue->rx.rsp_cons = cons + slots;
843 
844 	return err;
845 }
846 
847 static int xennet_set_skb_gso(struct sk_buff *skb,
848 			      struct xen_netif_extra_info *gso)
849 {
850 	if (!gso->u.gso.size) {
851 		if (net_ratelimit())
852 			pr_warn("GSO size must not be zero\n");
853 		return -EINVAL;
854 	}
855 
856 	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
857 	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
858 		if (net_ratelimit())
859 			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
860 		return -EINVAL;
861 	}
862 
863 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
864 	skb_shinfo(skb)->gso_type =
865 		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
866 		SKB_GSO_TCPV4 :
867 		SKB_GSO_TCPV6;
868 
869 	/* Header must be checked, and gso_segs computed. */
870 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
871 	skb_shinfo(skb)->gso_segs = 0;
872 
873 	return 0;
874 }
875 
876 static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
877 				  struct sk_buff *skb,
878 				  struct sk_buff_head *list)
879 {
880 	struct skb_shared_info *shinfo = skb_shinfo(skb);
881 	RING_IDX cons = queue->rx.rsp_cons;
882 	struct sk_buff *nskb;
883 
884 	while ((nskb = __skb_dequeue(list))) {
885 		struct xen_netif_rx_response *rx =
886 			RING_GET_RESPONSE(&queue->rx, ++cons);
887 		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
888 
889 		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
890 			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
891 
892 			BUG_ON(pull_to <= skb_headlen(skb));
893 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
894 		}
895 		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
896 
897 		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
898 				rx->offset, rx->status, PAGE_SIZE);
899 
900 		skb_shinfo(nskb)->nr_frags = 0;
901 		kfree_skb(nskb);
902 	}
903 
904 	return cons;
905 }
906 
907 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
908 {
909 	bool recalculate_partial_csum = false;
910 
911 	/*
912 	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
913 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
914 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
915 	 * recalculate the partial checksum.
916 	 */
917 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
918 		struct netfront_info *np = netdev_priv(dev);
919 		atomic_inc(&np->rx_gso_checksum_fixup);
920 		skb->ip_summed = CHECKSUM_PARTIAL;
921 		recalculate_partial_csum = true;
922 	}
923 
924 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
925 	if (skb->ip_summed != CHECKSUM_PARTIAL)
926 		return 0;
927 
928 	return skb_checksum_setup(skb, recalculate_partial_csum);
929 }
930 
931 static int handle_incoming_queue(struct netfront_queue *queue,
932 				 struct sk_buff_head *rxq)
933 {
934 	struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
935 	int packets_dropped = 0;
936 	struct sk_buff *skb;
937 
938 	while ((skb = __skb_dequeue(rxq)) != NULL) {
939 		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
940 
941 		if (pull_to > skb_headlen(skb))
942 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
943 
944 		/* Ethernet work: Delayed to here as it peeks the header. */
945 		skb->protocol = eth_type_trans(skb, queue->info->netdev);
946 		skb_reset_network_header(skb);
947 
948 		if (checksum_setup(queue->info->netdev, skb)) {
949 			kfree_skb(skb);
950 			packets_dropped++;
951 			queue->info->netdev->stats.rx_errors++;
952 			continue;
953 		}
954 
955 		u64_stats_update_begin(&stats->syncp);
956 		stats->rx_packets++;
957 		stats->rx_bytes += skb->len;
958 		u64_stats_update_end(&stats->syncp);
959 
960 		/* Pass it up. */
961 		napi_gro_receive(&queue->napi, skb);
962 	}
963 
964 	return packets_dropped;
965 }
966 
967 static int xennet_poll(struct napi_struct *napi, int budget)
968 {
969 	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
970 	struct net_device *dev = queue->info->netdev;
971 	struct sk_buff *skb;
972 	struct netfront_rx_info rinfo;
973 	struct xen_netif_rx_response *rx = &rinfo.rx;
974 	struct xen_netif_extra_info *extras = rinfo.extras;
975 	RING_IDX i, rp;
976 	int work_done;
977 	struct sk_buff_head rxq;
978 	struct sk_buff_head errq;
979 	struct sk_buff_head tmpq;
980 	int err;
981 
982 	spin_lock(&queue->rx_lock);
983 
984 	skb_queue_head_init(&rxq);
985 	skb_queue_head_init(&errq);
986 	skb_queue_head_init(&tmpq);
987 
988 	rp = queue->rx.sring->rsp_prod;
989 	rmb(); /* Ensure we see queued responses up to 'rp'. */
990 
991 	i = queue->rx.rsp_cons;
992 	work_done = 0;
993 	while ((i != rp) && (work_done < budget)) {
994 		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
995 		memset(extras, 0, sizeof(rinfo.extras));
996 
997 		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
998 
999 		if (unlikely(err)) {
1000 err:
1001 			while ((skb = __skb_dequeue(&tmpq)))
1002 				__skb_queue_tail(&errq, skb);
1003 			dev->stats.rx_errors++;
1004 			i = queue->rx.rsp_cons;
1005 			continue;
1006 		}
1007 
1008 		skb = __skb_dequeue(&tmpq);
1009 
1010 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1011 			struct xen_netif_extra_info *gso;
1012 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1013 
1014 			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1015 				__skb_queue_head(&tmpq, skb);
1016 				queue->rx.rsp_cons += skb_queue_len(&tmpq);
1017 				goto err;
1018 			}
1019 		}
1020 
1021 		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1022 		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1023 			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1024 
1025 		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1026 		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1027 		skb->data_len = rx->status;
1028 		skb->len += rx->status;
1029 
1030 		i = xennet_fill_frags(queue, skb, &tmpq);
1031 
1032 		if (rx->flags & XEN_NETRXF_csum_blank)
1033 			skb->ip_summed = CHECKSUM_PARTIAL;
1034 		else if (rx->flags & XEN_NETRXF_data_validated)
1035 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1036 
1037 		__skb_queue_tail(&rxq, skb);
1038 
1039 		queue->rx.rsp_cons = ++i;
1040 		work_done++;
1041 	}
1042 
1043 	__skb_queue_purge(&errq);
1044 
1045 	work_done -= handle_incoming_queue(queue, &rxq);
1046 
1047 	xennet_alloc_rx_buffers(queue);
1048 
1049 	if (work_done < budget) {
1050 		int more_to_do = 0;
1051 
1052 		napi_complete(napi);
1053 
1054 		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1055 		if (more_to_do)
1056 			napi_schedule(napi);
1057 	}
1058 
1059 	spin_unlock(&queue->rx_lock);
1060 
1061 	return work_done;
1062 }
1063 
1064 static int xennet_change_mtu(struct net_device *dev, int mtu)
1065 {
1066 	int max = xennet_can_sg(dev) ?
1067 		XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
1068 
1069 	if (mtu > max)
1070 		return -EINVAL;
1071 	dev->mtu = mtu;
1072 	return 0;
1073 }
1074 
1075 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1076 						    struct rtnl_link_stats64 *tot)
1077 {
1078 	struct netfront_info *np = netdev_priv(dev);
1079 	int cpu;
1080 
1081 	for_each_possible_cpu(cpu) {
1082 		struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1083 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1084 		unsigned int start;
1085 
1086 		do {
1087 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1088 
1089 			rx_packets = stats->rx_packets;
1090 			tx_packets = stats->tx_packets;
1091 			rx_bytes = stats->rx_bytes;
1092 			tx_bytes = stats->tx_bytes;
1093 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1094 
1095 		tot->rx_packets += rx_packets;
1096 		tot->tx_packets += tx_packets;
1097 		tot->rx_bytes   += rx_bytes;
1098 		tot->tx_bytes   += tx_bytes;
1099 	}
1100 
1101 	tot->rx_errors  = dev->stats.rx_errors;
1102 	tot->tx_dropped = dev->stats.tx_dropped;
1103 
1104 	return tot;
1105 }
1106 
1107 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1108 {
1109 	struct sk_buff *skb;
1110 	int i;
1111 
1112 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1113 		/* Skip over entries which are actually freelist references */
1114 		if (skb_entry_is_link(&queue->tx_skbs[i]))
1115 			continue;
1116 
1117 		skb = queue->tx_skbs[i].skb;
1118 		get_page(queue->grant_tx_page[i]);
1119 		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1120 					  GNTMAP_readonly,
1121 					  (unsigned long)page_address(queue->grant_tx_page[i]));
1122 		queue->grant_tx_page[i] = NULL;
1123 		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1124 		add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1125 		dev_kfree_skb_irq(skb);
1126 	}
1127 }
1128 
1129 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1130 {
1131 	int id, ref;
1132 
1133 	spin_lock_bh(&queue->rx_lock);
1134 
1135 	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1136 		struct sk_buff *skb;
1137 		struct page *page;
1138 
1139 		skb = queue->rx_skbs[id];
1140 		if (!skb)
1141 			continue;
1142 
1143 		ref = queue->grant_rx_ref[id];
1144 		if (ref == GRANT_INVALID_REF)
1145 			continue;
1146 
1147 		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1148 
1149 		/* gnttab_end_foreign_access() needs a page ref until
1150 		 * foreign access is ended (which may be deferred).
1151 		 */
1152 		get_page(page);
1153 		gnttab_end_foreign_access(ref, 0,
1154 					  (unsigned long)page_address(page));
1155 		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1156 
1157 		kfree_skb(skb);
1158 	}
1159 
1160 	spin_unlock_bh(&queue->rx_lock);
1161 }
1162 
1163 static netdev_features_t xennet_fix_features(struct net_device *dev,
1164 	netdev_features_t features)
1165 {
1166 	struct netfront_info *np = netdev_priv(dev);
1167 	int val;
1168 
1169 	if (features & NETIF_F_SG) {
1170 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1171 				 "%d", &val) < 0)
1172 			val = 0;
1173 
1174 		if (!val)
1175 			features &= ~NETIF_F_SG;
1176 	}
1177 
1178 	if (features & NETIF_F_IPV6_CSUM) {
1179 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1180 				 "feature-ipv6-csum-offload", "%d", &val) < 0)
1181 			val = 0;
1182 
1183 		if (!val)
1184 			features &= ~NETIF_F_IPV6_CSUM;
1185 	}
1186 
1187 	if (features & NETIF_F_TSO) {
1188 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1189 				 "feature-gso-tcpv4", "%d", &val) < 0)
1190 			val = 0;
1191 
1192 		if (!val)
1193 			features &= ~NETIF_F_TSO;
1194 	}
1195 
1196 	if (features & NETIF_F_TSO6) {
1197 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1198 				 "feature-gso-tcpv6", "%d", &val) < 0)
1199 			val = 0;
1200 
1201 		if (!val)
1202 			features &= ~NETIF_F_TSO6;
1203 	}
1204 
1205 	return features;
1206 }
1207 
1208 static int xennet_set_features(struct net_device *dev,
1209 	netdev_features_t features)
1210 {
1211 	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1212 		netdev_info(dev, "Reducing MTU because no SG offload");
1213 		dev->mtu = ETH_DATA_LEN;
1214 	}
1215 
1216 	return 0;
1217 }
1218 
1219 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1220 {
1221 	struct netfront_queue *queue = dev_id;
1222 	unsigned long flags;
1223 
1224 	spin_lock_irqsave(&queue->tx_lock, flags);
1225 	xennet_tx_buf_gc(queue);
1226 	spin_unlock_irqrestore(&queue->tx_lock, flags);
1227 
1228 	return IRQ_HANDLED;
1229 }
1230 
1231 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1232 {
1233 	struct netfront_queue *queue = dev_id;
1234 	struct net_device *dev = queue->info->netdev;
1235 
1236 	if (likely(netif_carrier_ok(dev) &&
1237 		   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1238 		napi_schedule(&queue->napi);
1239 
1240 	return IRQ_HANDLED;
1241 }
1242 
1243 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1244 {
1245 	xennet_tx_interrupt(irq, dev_id);
1246 	xennet_rx_interrupt(irq, dev_id);
1247 	return IRQ_HANDLED;
1248 }
1249 
1250 #ifdef CONFIG_NET_POLL_CONTROLLER
1251 static void xennet_poll_controller(struct net_device *dev)
1252 {
1253 	/* Poll each queue */
1254 	struct netfront_info *info = netdev_priv(dev);
1255 	unsigned int num_queues = dev->real_num_tx_queues;
1256 	unsigned int i;
1257 	for (i = 0; i < num_queues; ++i)
1258 		xennet_interrupt(0, &info->queues[i]);
1259 }
1260 #endif
1261 
1262 static const struct net_device_ops xennet_netdev_ops = {
1263 	.ndo_open            = xennet_open,
1264 	.ndo_stop            = xennet_close,
1265 	.ndo_start_xmit      = xennet_start_xmit,
1266 	.ndo_change_mtu	     = xennet_change_mtu,
1267 	.ndo_get_stats64     = xennet_get_stats64,
1268 	.ndo_set_mac_address = eth_mac_addr,
1269 	.ndo_validate_addr   = eth_validate_addr,
1270 	.ndo_fix_features    = xennet_fix_features,
1271 	.ndo_set_features    = xennet_set_features,
1272 	.ndo_select_queue    = xennet_select_queue,
1273 #ifdef CONFIG_NET_POLL_CONTROLLER
1274 	.ndo_poll_controller = xennet_poll_controller,
1275 #endif
1276 };
1277 
1278 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1279 {
1280 	int err;
1281 	struct net_device *netdev;
1282 	struct netfront_info *np;
1283 
1284 	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1285 	if (!netdev)
1286 		return ERR_PTR(-ENOMEM);
1287 
1288 	np                   = netdev_priv(netdev);
1289 	np->xbdev            = dev;
1290 
1291 	/* No need to use rtnl_lock() before the call below as it
1292 	 * happens before register_netdev().
1293 	 */
1294 	netif_set_real_num_tx_queues(netdev, 0);
1295 	np->queues = NULL;
1296 
1297 	err = -ENOMEM;
1298 	np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1299 	if (np->stats == NULL)
1300 		goto exit;
1301 
1302 	netdev->netdev_ops	= &xennet_netdev_ops;
1303 
1304 	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1305 				  NETIF_F_GSO_ROBUST;
1306 	netdev->hw_features	= NETIF_F_SG |
1307 				  NETIF_F_IPV6_CSUM |
1308 				  NETIF_F_TSO | NETIF_F_TSO6;
1309 
1310 	/*
1311          * Assume that all hw features are available for now. This set
1312          * will be adjusted by the call to netdev_update_features() in
1313          * xennet_connect() which is the earliest point where we can
1314          * negotiate with the backend regarding supported features.
1315          */
1316 	netdev->features |= netdev->hw_features;
1317 
1318 	netdev->ethtool_ops = &xennet_ethtool_ops;
1319 	SET_NETDEV_DEV(netdev, &dev->dev);
1320 
1321 	netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1322 
1323 	np->netdev = netdev;
1324 
1325 	netif_carrier_off(netdev);
1326 
1327 	return netdev;
1328 
1329  exit:
1330 	free_netdev(netdev);
1331 	return ERR_PTR(err);
1332 }
1333 
1334 /**
1335  * Entry point to this code when a new device is created.  Allocate the basic
1336  * structures and the ring buffers for communication with the backend, and
1337  * inform the backend of the appropriate details for those.
1338  */
1339 static int netfront_probe(struct xenbus_device *dev,
1340 			  const struct xenbus_device_id *id)
1341 {
1342 	int err;
1343 	struct net_device *netdev;
1344 	struct netfront_info *info;
1345 
1346 	netdev = xennet_create_dev(dev);
1347 	if (IS_ERR(netdev)) {
1348 		err = PTR_ERR(netdev);
1349 		xenbus_dev_fatal(dev, err, "creating netdev");
1350 		return err;
1351 	}
1352 
1353 	info = netdev_priv(netdev);
1354 	dev_set_drvdata(&dev->dev, info);
1355 
1356 	err = register_netdev(info->netdev);
1357 	if (err) {
1358 		pr_warn("%s: register_netdev err=%d\n", __func__, err);
1359 		goto fail;
1360 	}
1361 
1362 	err = xennet_sysfs_addif(info->netdev);
1363 	if (err) {
1364 		unregister_netdev(info->netdev);
1365 		pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1366 		goto fail;
1367 	}
1368 
1369 	return 0;
1370 
1371  fail:
1372 	free_netdev(netdev);
1373 	dev_set_drvdata(&dev->dev, NULL);
1374 	return err;
1375 }
1376 
1377 static void xennet_end_access(int ref, void *page)
1378 {
1379 	/* This frees the page as a side-effect */
1380 	if (ref != GRANT_INVALID_REF)
1381 		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1382 }
1383 
1384 static void xennet_disconnect_backend(struct netfront_info *info)
1385 {
1386 	unsigned int i = 0;
1387 	unsigned int num_queues = info->netdev->real_num_tx_queues;
1388 
1389 	netif_carrier_off(info->netdev);
1390 
1391 	for (i = 0; i < num_queues; ++i) {
1392 		struct netfront_queue *queue = &info->queues[i];
1393 
1394 		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1395 			unbind_from_irqhandler(queue->tx_irq, queue);
1396 		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1397 			unbind_from_irqhandler(queue->tx_irq, queue);
1398 			unbind_from_irqhandler(queue->rx_irq, queue);
1399 		}
1400 		queue->tx_evtchn = queue->rx_evtchn = 0;
1401 		queue->tx_irq = queue->rx_irq = 0;
1402 
1403 		napi_synchronize(&queue->napi);
1404 
1405 		xennet_release_tx_bufs(queue);
1406 		xennet_release_rx_bufs(queue);
1407 		gnttab_free_grant_references(queue->gref_tx_head);
1408 		gnttab_free_grant_references(queue->gref_rx_head);
1409 
1410 		/* End access and free the pages */
1411 		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1412 		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1413 
1414 		queue->tx_ring_ref = GRANT_INVALID_REF;
1415 		queue->rx_ring_ref = GRANT_INVALID_REF;
1416 		queue->tx.sring = NULL;
1417 		queue->rx.sring = NULL;
1418 	}
1419 }
1420 
1421 /**
1422  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1423  * driver restart.  We tear down our netif structure and recreate it, but
1424  * leave the device-layer structures intact so that this is transparent to the
1425  * rest of the kernel.
1426  */
1427 static int netfront_resume(struct xenbus_device *dev)
1428 {
1429 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1430 
1431 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1432 
1433 	xennet_disconnect_backend(info);
1434 	return 0;
1435 }
1436 
1437 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1438 {
1439 	char *s, *e, *macstr;
1440 	int i;
1441 
1442 	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1443 	if (IS_ERR(macstr))
1444 		return PTR_ERR(macstr);
1445 
1446 	for (i = 0; i < ETH_ALEN; i++) {
1447 		mac[i] = simple_strtoul(s, &e, 16);
1448 		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1449 			kfree(macstr);
1450 			return -ENOENT;
1451 		}
1452 		s = e+1;
1453 	}
1454 
1455 	kfree(macstr);
1456 	return 0;
1457 }
1458 
1459 static int setup_netfront_single(struct netfront_queue *queue)
1460 {
1461 	int err;
1462 
1463 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1464 	if (err < 0)
1465 		goto fail;
1466 
1467 	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1468 					xennet_interrupt,
1469 					0, queue->info->netdev->name, queue);
1470 	if (err < 0)
1471 		goto bind_fail;
1472 	queue->rx_evtchn = queue->tx_evtchn;
1473 	queue->rx_irq = queue->tx_irq = err;
1474 
1475 	return 0;
1476 
1477 bind_fail:
1478 	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1479 	queue->tx_evtchn = 0;
1480 fail:
1481 	return err;
1482 }
1483 
1484 static int setup_netfront_split(struct netfront_queue *queue)
1485 {
1486 	int err;
1487 
1488 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1489 	if (err < 0)
1490 		goto fail;
1491 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1492 	if (err < 0)
1493 		goto alloc_rx_evtchn_fail;
1494 
1495 	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1496 		 "%s-tx", queue->name);
1497 	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1498 					xennet_tx_interrupt,
1499 					0, queue->tx_irq_name, queue);
1500 	if (err < 0)
1501 		goto bind_tx_fail;
1502 	queue->tx_irq = err;
1503 
1504 	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1505 		 "%s-rx", queue->name);
1506 	err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1507 					xennet_rx_interrupt,
1508 					0, queue->rx_irq_name, queue);
1509 	if (err < 0)
1510 		goto bind_rx_fail;
1511 	queue->rx_irq = err;
1512 
1513 	return 0;
1514 
1515 bind_rx_fail:
1516 	unbind_from_irqhandler(queue->tx_irq, queue);
1517 	queue->tx_irq = 0;
1518 bind_tx_fail:
1519 	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1520 	queue->rx_evtchn = 0;
1521 alloc_rx_evtchn_fail:
1522 	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1523 	queue->tx_evtchn = 0;
1524 fail:
1525 	return err;
1526 }
1527 
1528 static int setup_netfront(struct xenbus_device *dev,
1529 			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1530 {
1531 	struct xen_netif_tx_sring *txs;
1532 	struct xen_netif_rx_sring *rxs;
1533 	int err;
1534 
1535 	queue->tx_ring_ref = GRANT_INVALID_REF;
1536 	queue->rx_ring_ref = GRANT_INVALID_REF;
1537 	queue->rx.sring = NULL;
1538 	queue->tx.sring = NULL;
1539 
1540 	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1541 	if (!txs) {
1542 		err = -ENOMEM;
1543 		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1544 		goto fail;
1545 	}
1546 	SHARED_RING_INIT(txs);
1547 	FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1548 
1549 	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1550 	if (err < 0)
1551 		goto grant_tx_ring_fail;
1552 	queue->tx_ring_ref = err;
1553 
1554 	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1555 	if (!rxs) {
1556 		err = -ENOMEM;
1557 		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1558 		goto alloc_rx_ring_fail;
1559 	}
1560 	SHARED_RING_INIT(rxs);
1561 	FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1562 
1563 	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1564 	if (err < 0)
1565 		goto grant_rx_ring_fail;
1566 	queue->rx_ring_ref = err;
1567 
1568 	if (feature_split_evtchn)
1569 		err = setup_netfront_split(queue);
1570 	/* setup single event channel if
1571 	 *  a) feature-split-event-channels == 0
1572 	 *  b) feature-split-event-channels == 1 but failed to setup
1573 	 */
1574 	if (!feature_split_evtchn || (feature_split_evtchn && err))
1575 		err = setup_netfront_single(queue);
1576 
1577 	if (err)
1578 		goto alloc_evtchn_fail;
1579 
1580 	return 0;
1581 
1582 	/* If we fail to setup netfront, it is safe to just revoke access to
1583 	 * granted pages because backend is not accessing it at this point.
1584 	 */
1585 alloc_evtchn_fail:
1586 	gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1587 grant_rx_ring_fail:
1588 	free_page((unsigned long)rxs);
1589 alloc_rx_ring_fail:
1590 	gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1591 grant_tx_ring_fail:
1592 	free_page((unsigned long)txs);
1593 fail:
1594 	return err;
1595 }
1596 
1597 /* Queue-specific initialisation
1598  * This used to be done in xennet_create_dev() but must now
1599  * be run per-queue.
1600  */
1601 static int xennet_init_queue(struct netfront_queue *queue)
1602 {
1603 	unsigned short i;
1604 	int err = 0;
1605 
1606 	spin_lock_init(&queue->tx_lock);
1607 	spin_lock_init(&queue->rx_lock);
1608 
1609 	init_timer(&queue->rx_refill_timer);
1610 	queue->rx_refill_timer.data = (unsigned long)queue;
1611 	queue->rx_refill_timer.function = rx_refill_timeout;
1612 
1613 	snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1614 		 queue->info->netdev->name, queue->id);
1615 
1616 	/* Initialise tx_skbs as a free chain containing every entry. */
1617 	queue->tx_skb_freelist = 0;
1618 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1619 		skb_entry_set_link(&queue->tx_skbs[i], i+1);
1620 		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1621 		queue->grant_tx_page[i] = NULL;
1622 	}
1623 
1624 	/* Clear out rx_skbs */
1625 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1626 		queue->rx_skbs[i] = NULL;
1627 		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1628 	}
1629 
1630 	/* A grant for every tx ring slot */
1631 	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1632 					  &queue->gref_tx_head) < 0) {
1633 		pr_alert("can't alloc tx grant refs\n");
1634 		err = -ENOMEM;
1635 		goto exit;
1636 	}
1637 
1638 	/* A grant for every rx ring slot */
1639 	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1640 					  &queue->gref_rx_head) < 0) {
1641 		pr_alert("can't alloc rx grant refs\n");
1642 		err = -ENOMEM;
1643 		goto exit_free_tx;
1644 	}
1645 
1646 	return 0;
1647 
1648  exit_free_tx:
1649 	gnttab_free_grant_references(queue->gref_tx_head);
1650  exit:
1651 	return err;
1652 }
1653 
1654 static int write_queue_xenstore_keys(struct netfront_queue *queue,
1655 			   struct xenbus_transaction *xbt, int write_hierarchical)
1656 {
1657 	/* Write the queue-specific keys into XenStore in the traditional
1658 	 * way for a single queue, or in a queue subkeys for multiple
1659 	 * queues.
1660 	 */
1661 	struct xenbus_device *dev = queue->info->xbdev;
1662 	int err;
1663 	const char *message;
1664 	char *path;
1665 	size_t pathsize;
1666 
1667 	/* Choose the correct place to write the keys */
1668 	if (write_hierarchical) {
1669 		pathsize = strlen(dev->nodename) + 10;
1670 		path = kzalloc(pathsize, GFP_KERNEL);
1671 		if (!path) {
1672 			err = -ENOMEM;
1673 			message = "out of memory while writing ring references";
1674 			goto error;
1675 		}
1676 		snprintf(path, pathsize, "%s/queue-%u",
1677 				dev->nodename, queue->id);
1678 	} else {
1679 		path = (char *)dev->nodename;
1680 	}
1681 
1682 	/* Write ring references */
1683 	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1684 			queue->tx_ring_ref);
1685 	if (err) {
1686 		message = "writing tx-ring-ref";
1687 		goto error;
1688 	}
1689 
1690 	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1691 			queue->rx_ring_ref);
1692 	if (err) {
1693 		message = "writing rx-ring-ref";
1694 		goto error;
1695 	}
1696 
1697 	/* Write event channels; taking into account both shared
1698 	 * and split event channel scenarios.
1699 	 */
1700 	if (queue->tx_evtchn == queue->rx_evtchn) {
1701 		/* Shared event channel */
1702 		err = xenbus_printf(*xbt, path,
1703 				"event-channel", "%u", queue->tx_evtchn);
1704 		if (err) {
1705 			message = "writing event-channel";
1706 			goto error;
1707 		}
1708 	} else {
1709 		/* Split event channels */
1710 		err = xenbus_printf(*xbt, path,
1711 				"event-channel-tx", "%u", queue->tx_evtchn);
1712 		if (err) {
1713 			message = "writing event-channel-tx";
1714 			goto error;
1715 		}
1716 
1717 		err = xenbus_printf(*xbt, path,
1718 				"event-channel-rx", "%u", queue->rx_evtchn);
1719 		if (err) {
1720 			message = "writing event-channel-rx";
1721 			goto error;
1722 		}
1723 	}
1724 
1725 	if (write_hierarchical)
1726 		kfree(path);
1727 	return 0;
1728 
1729 error:
1730 	if (write_hierarchical)
1731 		kfree(path);
1732 	xenbus_dev_fatal(dev, err, "%s", message);
1733 	return err;
1734 }
1735 
1736 static void xennet_destroy_queues(struct netfront_info *info)
1737 {
1738 	unsigned int i;
1739 
1740 	rtnl_lock();
1741 
1742 	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1743 		struct netfront_queue *queue = &info->queues[i];
1744 
1745 		if (netif_running(info->netdev))
1746 			napi_disable(&queue->napi);
1747 		netif_napi_del(&queue->napi);
1748 	}
1749 
1750 	rtnl_unlock();
1751 
1752 	kfree(info->queues);
1753 	info->queues = NULL;
1754 }
1755 
1756 static int xennet_create_queues(struct netfront_info *info,
1757 				unsigned int num_queues)
1758 {
1759 	unsigned int i;
1760 	int ret;
1761 
1762 	info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
1763 			       GFP_KERNEL);
1764 	if (!info->queues)
1765 		return -ENOMEM;
1766 
1767 	rtnl_lock();
1768 
1769 	for (i = 0; i < num_queues; i++) {
1770 		struct netfront_queue *queue = &info->queues[i];
1771 
1772 		queue->id = i;
1773 		queue->info = info;
1774 
1775 		ret = xennet_init_queue(queue);
1776 		if (ret < 0) {
1777 			dev_warn(&info->netdev->dev,
1778 				 "only created %d queues\n", i);
1779 			num_queues = i;
1780 			break;
1781 		}
1782 
1783 		netif_napi_add(queue->info->netdev, &queue->napi,
1784 			       xennet_poll, 64);
1785 		if (netif_running(info->netdev))
1786 			napi_enable(&queue->napi);
1787 	}
1788 
1789 	netif_set_real_num_tx_queues(info->netdev, num_queues);
1790 
1791 	rtnl_unlock();
1792 
1793 	if (num_queues == 0) {
1794 		dev_err(&info->netdev->dev, "no queues\n");
1795 		return -EINVAL;
1796 	}
1797 	return 0;
1798 }
1799 
1800 /* Common code used when first setting up, and when resuming. */
1801 static int talk_to_netback(struct xenbus_device *dev,
1802 			   struct netfront_info *info)
1803 {
1804 	const char *message;
1805 	struct xenbus_transaction xbt;
1806 	int err;
1807 	unsigned int feature_split_evtchn;
1808 	unsigned int i = 0;
1809 	unsigned int max_queues = 0;
1810 	struct netfront_queue *queue = NULL;
1811 	unsigned int num_queues = 1;
1812 
1813 	info->netdev->irq = 0;
1814 
1815 	/* Check if backend supports multiple queues */
1816 	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1817 			   "multi-queue-max-queues", "%u", &max_queues);
1818 	if (err < 0)
1819 		max_queues = 1;
1820 	num_queues = min(max_queues, xennet_max_queues);
1821 
1822 	/* Check feature-split-event-channels */
1823 	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1824 			   "feature-split-event-channels", "%u",
1825 			   &feature_split_evtchn);
1826 	if (err < 0)
1827 		feature_split_evtchn = 0;
1828 
1829 	/* Read mac addr. */
1830 	err = xen_net_read_mac(dev, info->netdev->dev_addr);
1831 	if (err) {
1832 		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1833 		goto out;
1834 	}
1835 
1836 	if (info->queues)
1837 		xennet_destroy_queues(info);
1838 
1839 	err = xennet_create_queues(info, num_queues);
1840 	if (err < 0)
1841 		goto destroy_ring;
1842 
1843 	/* Create shared ring, alloc event channel -- for each queue */
1844 	for (i = 0; i < num_queues; ++i) {
1845 		queue = &info->queues[i];
1846 		err = setup_netfront(dev, queue, feature_split_evtchn);
1847 		if (err) {
1848 			/* setup_netfront() will tidy up the current
1849 			 * queue on error, but we need to clean up
1850 			 * those already allocated.
1851 			 */
1852 			if (i > 0) {
1853 				rtnl_lock();
1854 				netif_set_real_num_tx_queues(info->netdev, i);
1855 				rtnl_unlock();
1856 				goto destroy_ring;
1857 			} else {
1858 				goto out;
1859 			}
1860 		}
1861 	}
1862 
1863 again:
1864 	err = xenbus_transaction_start(&xbt);
1865 	if (err) {
1866 		xenbus_dev_fatal(dev, err, "starting transaction");
1867 		goto destroy_ring;
1868 	}
1869 
1870 	if (num_queues == 1) {
1871 		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1872 		if (err)
1873 			goto abort_transaction_no_dev_fatal;
1874 	} else {
1875 		/* Write the number of queues */
1876 		err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
1877 				    "%u", num_queues);
1878 		if (err) {
1879 			message = "writing multi-queue-num-queues";
1880 			goto abort_transaction_no_dev_fatal;
1881 		}
1882 
1883 		/* Write the keys for each queue */
1884 		for (i = 0; i < num_queues; ++i) {
1885 			queue = &info->queues[i];
1886 			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1887 			if (err)
1888 				goto abort_transaction_no_dev_fatal;
1889 		}
1890 	}
1891 
1892 	/* The remaining keys are not queue-specific */
1893 	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1894 			    1);
1895 	if (err) {
1896 		message = "writing request-rx-copy";
1897 		goto abort_transaction;
1898 	}
1899 
1900 	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1901 	if (err) {
1902 		message = "writing feature-rx-notify";
1903 		goto abort_transaction;
1904 	}
1905 
1906 	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1907 	if (err) {
1908 		message = "writing feature-sg";
1909 		goto abort_transaction;
1910 	}
1911 
1912 	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1913 	if (err) {
1914 		message = "writing feature-gso-tcpv4";
1915 		goto abort_transaction;
1916 	}
1917 
1918 	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1919 	if (err) {
1920 		message = "writing feature-gso-tcpv6";
1921 		goto abort_transaction;
1922 	}
1923 
1924 	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1925 			   "1");
1926 	if (err) {
1927 		message = "writing feature-ipv6-csum-offload";
1928 		goto abort_transaction;
1929 	}
1930 
1931 	err = xenbus_transaction_end(xbt, 0);
1932 	if (err) {
1933 		if (err == -EAGAIN)
1934 			goto again;
1935 		xenbus_dev_fatal(dev, err, "completing transaction");
1936 		goto destroy_ring;
1937 	}
1938 
1939 	return 0;
1940 
1941  abort_transaction:
1942 	xenbus_dev_fatal(dev, err, "%s", message);
1943 abort_transaction_no_dev_fatal:
1944 	xenbus_transaction_end(xbt, 1);
1945  destroy_ring:
1946 	xennet_disconnect_backend(info);
1947 	kfree(info->queues);
1948 	info->queues = NULL;
1949 	rtnl_lock();
1950 	netif_set_real_num_tx_queues(info->netdev, 0);
1951 	rtnl_unlock();
1952  out:
1953 	return err;
1954 }
1955 
1956 static int xennet_connect(struct net_device *dev)
1957 {
1958 	struct netfront_info *np = netdev_priv(dev);
1959 	unsigned int num_queues = 0;
1960 	int err;
1961 	unsigned int feature_rx_copy;
1962 	unsigned int j = 0;
1963 	struct netfront_queue *queue = NULL;
1964 
1965 	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1966 			   "feature-rx-copy", "%u", &feature_rx_copy);
1967 	if (err != 1)
1968 		feature_rx_copy = 0;
1969 
1970 	if (!feature_rx_copy) {
1971 		dev_info(&dev->dev,
1972 			 "backend does not support copying receive path\n");
1973 		return -ENODEV;
1974 	}
1975 
1976 	err = talk_to_netback(np->xbdev, np);
1977 	if (err)
1978 		return err;
1979 
1980 	/* talk_to_netback() sets the correct number of queues */
1981 	num_queues = dev->real_num_tx_queues;
1982 
1983 	rtnl_lock();
1984 	netdev_update_features(dev);
1985 	rtnl_unlock();
1986 
1987 	/*
1988 	 * All public and private state should now be sane.  Get
1989 	 * ready to start sending and receiving packets and give the driver
1990 	 * domain a kick because we've probably just requeued some
1991 	 * packets.
1992 	 */
1993 	netif_carrier_on(np->netdev);
1994 	for (j = 0; j < num_queues; ++j) {
1995 		queue = &np->queues[j];
1996 
1997 		notify_remote_via_irq(queue->tx_irq);
1998 		if (queue->tx_irq != queue->rx_irq)
1999 			notify_remote_via_irq(queue->rx_irq);
2000 
2001 		spin_lock_irq(&queue->tx_lock);
2002 		xennet_tx_buf_gc(queue);
2003 		spin_unlock_irq(&queue->tx_lock);
2004 
2005 		spin_lock_bh(&queue->rx_lock);
2006 		xennet_alloc_rx_buffers(queue);
2007 		spin_unlock_bh(&queue->rx_lock);
2008 	}
2009 
2010 	return 0;
2011 }
2012 
2013 /**
2014  * Callback received when the backend's state changes.
2015  */
2016 static void netback_changed(struct xenbus_device *dev,
2017 			    enum xenbus_state backend_state)
2018 {
2019 	struct netfront_info *np = dev_get_drvdata(&dev->dev);
2020 	struct net_device *netdev = np->netdev;
2021 
2022 	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2023 
2024 	switch (backend_state) {
2025 	case XenbusStateInitialising:
2026 	case XenbusStateInitialised:
2027 	case XenbusStateReconfiguring:
2028 	case XenbusStateReconfigured:
2029 	case XenbusStateUnknown:
2030 		break;
2031 
2032 	case XenbusStateInitWait:
2033 		if (dev->state != XenbusStateInitialising)
2034 			break;
2035 		if (xennet_connect(netdev) != 0)
2036 			break;
2037 		xenbus_switch_state(dev, XenbusStateConnected);
2038 		break;
2039 
2040 	case XenbusStateConnected:
2041 		netdev_notify_peers(netdev);
2042 		break;
2043 
2044 	case XenbusStateClosed:
2045 		if (dev->state == XenbusStateClosed)
2046 			break;
2047 		/* Missed the backend's CLOSING state -- fallthrough */
2048 	case XenbusStateClosing:
2049 		xenbus_frontend_closed(dev);
2050 		break;
2051 	}
2052 }
2053 
2054 static const struct xennet_stat {
2055 	char name[ETH_GSTRING_LEN];
2056 	u16 offset;
2057 } xennet_stats[] = {
2058 	{
2059 		"rx_gso_checksum_fixup",
2060 		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2061 	},
2062 };
2063 
2064 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2065 {
2066 	switch (string_set) {
2067 	case ETH_SS_STATS:
2068 		return ARRAY_SIZE(xennet_stats);
2069 	default:
2070 		return -EINVAL;
2071 	}
2072 }
2073 
2074 static void xennet_get_ethtool_stats(struct net_device *dev,
2075 				     struct ethtool_stats *stats, u64 * data)
2076 {
2077 	void *np = netdev_priv(dev);
2078 	int i;
2079 
2080 	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2081 		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2082 }
2083 
2084 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2085 {
2086 	int i;
2087 
2088 	switch (stringset) {
2089 	case ETH_SS_STATS:
2090 		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2091 			memcpy(data + i * ETH_GSTRING_LEN,
2092 			       xennet_stats[i].name, ETH_GSTRING_LEN);
2093 		break;
2094 	}
2095 }
2096 
2097 static const struct ethtool_ops xennet_ethtool_ops =
2098 {
2099 	.get_link = ethtool_op_get_link,
2100 
2101 	.get_sset_count = xennet_get_sset_count,
2102 	.get_ethtool_stats = xennet_get_ethtool_stats,
2103 	.get_strings = xennet_get_strings,
2104 };
2105 
2106 #ifdef CONFIG_SYSFS
2107 static ssize_t show_rxbuf(struct device *dev,
2108 			  struct device_attribute *attr, char *buf)
2109 {
2110 	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2111 }
2112 
2113 static ssize_t store_rxbuf(struct device *dev,
2114 			   struct device_attribute *attr,
2115 			   const char *buf, size_t len)
2116 {
2117 	char *endp;
2118 	unsigned long target;
2119 
2120 	if (!capable(CAP_NET_ADMIN))
2121 		return -EPERM;
2122 
2123 	target = simple_strtoul(buf, &endp, 0);
2124 	if (endp == buf)
2125 		return -EBADMSG;
2126 
2127 	/* rxbuf_min and rxbuf_max are no longer configurable. */
2128 
2129 	return len;
2130 }
2131 
2132 static struct device_attribute xennet_attrs[] = {
2133 	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
2134 	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
2135 	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL),
2136 };
2137 
2138 static int xennet_sysfs_addif(struct net_device *netdev)
2139 {
2140 	int i;
2141 	int err;
2142 
2143 	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2144 		err = device_create_file(&netdev->dev,
2145 					   &xennet_attrs[i]);
2146 		if (err)
2147 			goto fail;
2148 	}
2149 	return 0;
2150 
2151  fail:
2152 	while (--i >= 0)
2153 		device_remove_file(&netdev->dev, &xennet_attrs[i]);
2154 	return err;
2155 }
2156 
2157 static void xennet_sysfs_delif(struct net_device *netdev)
2158 {
2159 	int i;
2160 
2161 	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2162 		device_remove_file(&netdev->dev, &xennet_attrs[i]);
2163 }
2164 
2165 #endif /* CONFIG_SYSFS */
2166 
2167 static int xennet_remove(struct xenbus_device *dev)
2168 {
2169 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2170 	unsigned int num_queues = info->netdev->real_num_tx_queues;
2171 	struct netfront_queue *queue = NULL;
2172 	unsigned int i = 0;
2173 
2174 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2175 
2176 	xennet_disconnect_backend(info);
2177 
2178 	xennet_sysfs_delif(info->netdev);
2179 
2180 	unregister_netdev(info->netdev);
2181 
2182 	for (i = 0; i < num_queues; ++i) {
2183 		queue = &info->queues[i];
2184 		del_timer_sync(&queue->rx_refill_timer);
2185 	}
2186 
2187 	if (num_queues) {
2188 		kfree(info->queues);
2189 		info->queues = NULL;
2190 	}
2191 
2192 	free_percpu(info->stats);
2193 
2194 	free_netdev(info->netdev);
2195 
2196 	return 0;
2197 }
2198 
2199 static const struct xenbus_device_id netfront_ids[] = {
2200 	{ "vif" },
2201 	{ "" }
2202 };
2203 
2204 static struct xenbus_driver netfront_driver = {
2205 	.ids = netfront_ids,
2206 	.probe = netfront_probe,
2207 	.remove = xennet_remove,
2208 	.resume = netfront_resume,
2209 	.otherend_changed = netback_changed,
2210 };
2211 
2212 static int __init netif_init(void)
2213 {
2214 	if (!xen_domain())
2215 		return -ENODEV;
2216 
2217 	if (!xen_has_pv_nic_devices())
2218 		return -ENODEV;
2219 
2220 	pr_info("Initialising Xen virtual ethernet driver\n");
2221 
2222 	/* Allow as many queues as there are CPUs, by default */
2223 	xennet_max_queues = num_online_cpus();
2224 
2225 	return xenbus_register_frontend(&netfront_driver);
2226 }
2227 module_init(netif_init);
2228 
2229 
2230 static void __exit netif_exit(void)
2231 {
2232 	xenbus_unregister_driver(&netfront_driver);
2233 }
2234 module_exit(netif_exit);
2235 
2236 MODULE_DESCRIPTION("Xen virtual network device frontend");
2237 MODULE_LICENSE("GPL");
2238 MODULE_ALIAS("xen:vif");
2239 MODULE_ALIAS("xennet");
2240