xref: /openbmc/linux/drivers/net/xen-netfront.c (revision ae3473231e77a3f1909d48cd144cebe5e1d049b3)
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47 
48 #include <xen/xen.h>
49 #include <xen/xenbus.h>
50 #include <xen/events.h>
51 #include <xen/page.h>
52 #include <xen/platform_pci.h>
53 #include <xen/grant_table.h>
54 
55 #include <xen/interface/io/netif.h>
56 #include <xen/interface/memory.h>
57 #include <xen/interface/grant_table.h>
58 
59 /* Module parameters */
60 static unsigned int xennet_max_queues;
61 module_param_named(max_queues, xennet_max_queues, uint, 0644);
62 MODULE_PARM_DESC(max_queues,
63 		 "Maximum number of queues per virtual interface");
64 
65 static const struct ethtool_ops xennet_ethtool_ops;
66 
67 struct netfront_cb {
68 	int pull_to;
69 };
70 
71 #define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
72 
73 #define RX_COPY_THRESHOLD 256
74 
75 #define GRANT_INVALID_REF	0
76 
77 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
78 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
79 
80 /* Minimum number of Rx slots (includes slot for GSO metadata). */
81 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
82 
83 /* Queue name is interface name with "-qNNN" appended */
84 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
85 
86 /* IRQ name is queue name with "-tx" or "-rx" appended */
87 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
88 
89 struct netfront_stats {
90 	u64			packets;
91 	u64			bytes;
92 	struct u64_stats_sync	syncp;
93 };
94 
95 struct netfront_info;
96 
97 struct netfront_queue {
98 	unsigned int id; /* Queue ID, 0-based */
99 	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
100 	struct netfront_info *info;
101 
102 	struct napi_struct napi;
103 
104 	/* Split event channels support, tx_* == rx_* when using
105 	 * single event channel.
106 	 */
107 	unsigned int tx_evtchn, rx_evtchn;
108 	unsigned int tx_irq, rx_irq;
109 	/* Only used when split event channels support is enabled */
110 	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
111 	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
112 
113 	spinlock_t   tx_lock;
114 	struct xen_netif_tx_front_ring tx;
115 	int tx_ring_ref;
116 
117 	/*
118 	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
119 	 * are linked from tx_skb_freelist through skb_entry.link.
120 	 *
121 	 *  NB. Freelist index entries are always going to be less than
122 	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
123 	 *  greater than PAGE_OFFSET: we use this property to distinguish
124 	 *  them.
125 	 */
126 	union skb_entry {
127 		struct sk_buff *skb;
128 		unsigned long link;
129 	} tx_skbs[NET_TX_RING_SIZE];
130 	grant_ref_t gref_tx_head;
131 	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
132 	struct page *grant_tx_page[NET_TX_RING_SIZE];
133 	unsigned tx_skb_freelist;
134 
135 	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
136 	struct xen_netif_rx_front_ring rx;
137 	int rx_ring_ref;
138 
139 	struct timer_list rx_refill_timer;
140 
141 	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
142 	grant_ref_t gref_rx_head;
143 	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
144 };
145 
146 struct netfront_info {
147 	struct list_head list;
148 	struct net_device *netdev;
149 
150 	struct xenbus_device *xbdev;
151 
152 	/* Multi-queue support */
153 	struct netfront_queue *queues;
154 
155 	/* Statistics */
156 	struct netfront_stats __percpu *rx_stats;
157 	struct netfront_stats __percpu *tx_stats;
158 
159 	atomic_t rx_gso_checksum_fixup;
160 };
161 
162 struct netfront_rx_info {
163 	struct xen_netif_rx_response rx;
164 	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
165 };
166 
167 static void skb_entry_set_link(union skb_entry *list, unsigned short id)
168 {
169 	list->link = id;
170 }
171 
172 static int skb_entry_is_link(const union skb_entry *list)
173 {
174 	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
175 	return (unsigned long)list->skb < PAGE_OFFSET;
176 }
177 
178 /*
179  * Access macros for acquiring freeing slots in tx_skbs[].
180  */
181 
182 static void add_id_to_freelist(unsigned *head, union skb_entry *list,
183 			       unsigned short id)
184 {
185 	skb_entry_set_link(&list[id], *head);
186 	*head = id;
187 }
188 
189 static unsigned short get_id_from_freelist(unsigned *head,
190 					   union skb_entry *list)
191 {
192 	unsigned int id = *head;
193 	*head = list[id].link;
194 	return id;
195 }
196 
197 static int xennet_rxidx(RING_IDX idx)
198 {
199 	return idx & (NET_RX_RING_SIZE - 1);
200 }
201 
202 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
203 					 RING_IDX ri)
204 {
205 	int i = xennet_rxidx(ri);
206 	struct sk_buff *skb = queue->rx_skbs[i];
207 	queue->rx_skbs[i] = NULL;
208 	return skb;
209 }
210 
211 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
212 					    RING_IDX ri)
213 {
214 	int i = xennet_rxidx(ri);
215 	grant_ref_t ref = queue->grant_rx_ref[i];
216 	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
217 	return ref;
218 }
219 
220 #ifdef CONFIG_SYSFS
221 static const struct attribute_group xennet_dev_group;
222 #endif
223 
224 static bool xennet_can_sg(struct net_device *dev)
225 {
226 	return dev->features & NETIF_F_SG;
227 }
228 
229 
230 static void rx_refill_timeout(unsigned long data)
231 {
232 	struct netfront_queue *queue = (struct netfront_queue *)data;
233 	napi_schedule(&queue->napi);
234 }
235 
236 static int netfront_tx_slot_available(struct netfront_queue *queue)
237 {
238 	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
239 		(NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
240 }
241 
242 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
243 {
244 	struct net_device *dev = queue->info->netdev;
245 	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
246 
247 	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
248 	    netfront_tx_slot_available(queue) &&
249 	    likely(netif_running(dev)))
250 		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
251 }
252 
253 
254 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
255 {
256 	struct sk_buff *skb;
257 	struct page *page;
258 
259 	skb = __netdev_alloc_skb(queue->info->netdev,
260 				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
261 				 GFP_ATOMIC | __GFP_NOWARN);
262 	if (unlikely(!skb))
263 		return NULL;
264 
265 	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
266 	if (!page) {
267 		kfree_skb(skb);
268 		return NULL;
269 	}
270 	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
271 
272 	/* Align ip header to a 16 bytes boundary */
273 	skb_reserve(skb, NET_IP_ALIGN);
274 	skb->dev = queue->info->netdev;
275 
276 	return skb;
277 }
278 
279 
280 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
281 {
282 	RING_IDX req_prod = queue->rx.req_prod_pvt;
283 	int notify;
284 
285 	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
286 		return;
287 
288 	for (req_prod = queue->rx.req_prod_pvt;
289 	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
290 	     req_prod++) {
291 		struct sk_buff *skb;
292 		unsigned short id;
293 		grant_ref_t ref;
294 		struct page *page;
295 		struct xen_netif_rx_request *req;
296 
297 		skb = xennet_alloc_one_rx_buffer(queue);
298 		if (!skb)
299 			break;
300 
301 		id = xennet_rxidx(req_prod);
302 
303 		BUG_ON(queue->rx_skbs[id]);
304 		queue->rx_skbs[id] = skb;
305 
306 		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
307 		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
308 		queue->grant_rx_ref[id] = ref;
309 
310 		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
311 
312 		req = RING_GET_REQUEST(&queue->rx, req_prod);
313 		gnttab_page_grant_foreign_access_ref_one(ref,
314 							 queue->info->xbdev->otherend_id,
315 							 page,
316 							 0);
317 		req->id = id;
318 		req->gref = ref;
319 	}
320 
321 	queue->rx.req_prod_pvt = req_prod;
322 
323 	/* Not enough requests? Try again later. */
324 	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
325 		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
326 		return;
327 	}
328 
329 	wmb();		/* barrier so backend seens requests */
330 
331 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
332 	if (notify)
333 		notify_remote_via_irq(queue->rx_irq);
334 }
335 
336 static int xennet_open(struct net_device *dev)
337 {
338 	struct netfront_info *np = netdev_priv(dev);
339 	unsigned int num_queues = dev->real_num_tx_queues;
340 	unsigned int i = 0;
341 	struct netfront_queue *queue = NULL;
342 
343 	for (i = 0; i < num_queues; ++i) {
344 		queue = &np->queues[i];
345 		napi_enable(&queue->napi);
346 
347 		spin_lock_bh(&queue->rx_lock);
348 		if (netif_carrier_ok(dev)) {
349 			xennet_alloc_rx_buffers(queue);
350 			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
351 			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
352 				napi_schedule(&queue->napi);
353 		}
354 		spin_unlock_bh(&queue->rx_lock);
355 	}
356 
357 	netif_tx_start_all_queues(dev);
358 
359 	return 0;
360 }
361 
362 static void xennet_tx_buf_gc(struct netfront_queue *queue)
363 {
364 	RING_IDX cons, prod;
365 	unsigned short id;
366 	struct sk_buff *skb;
367 	bool more_to_do;
368 
369 	BUG_ON(!netif_carrier_ok(queue->info->netdev));
370 
371 	do {
372 		prod = queue->tx.sring->rsp_prod;
373 		rmb(); /* Ensure we see responses up to 'rp'. */
374 
375 		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
376 			struct xen_netif_tx_response *txrsp;
377 
378 			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
379 			if (txrsp->status == XEN_NETIF_RSP_NULL)
380 				continue;
381 
382 			id  = txrsp->id;
383 			skb = queue->tx_skbs[id].skb;
384 			if (unlikely(gnttab_query_foreign_access(
385 				queue->grant_tx_ref[id]) != 0)) {
386 				pr_alert("%s: warning -- grant still in use by backend domain\n",
387 					 __func__);
388 				BUG();
389 			}
390 			gnttab_end_foreign_access_ref(
391 				queue->grant_tx_ref[id], GNTMAP_readonly);
392 			gnttab_release_grant_reference(
393 				&queue->gref_tx_head, queue->grant_tx_ref[id]);
394 			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
395 			queue->grant_tx_page[id] = NULL;
396 			add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
397 			dev_kfree_skb_irq(skb);
398 		}
399 
400 		queue->tx.rsp_cons = prod;
401 
402 		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
403 	} while (more_to_do);
404 
405 	xennet_maybe_wake_tx(queue);
406 }
407 
408 struct xennet_gnttab_make_txreq {
409 	struct netfront_queue *queue;
410 	struct sk_buff *skb;
411 	struct page *page;
412 	struct xen_netif_tx_request *tx; /* Last request */
413 	unsigned int size;
414 };
415 
416 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
417 				  unsigned int len, void *data)
418 {
419 	struct xennet_gnttab_make_txreq *info = data;
420 	unsigned int id;
421 	struct xen_netif_tx_request *tx;
422 	grant_ref_t ref;
423 	/* convenient aliases */
424 	struct page *page = info->page;
425 	struct netfront_queue *queue = info->queue;
426 	struct sk_buff *skb = info->skb;
427 
428 	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
429 	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
430 	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
431 	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
432 
433 	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
434 					gfn, GNTMAP_readonly);
435 
436 	queue->tx_skbs[id].skb = skb;
437 	queue->grant_tx_page[id] = page;
438 	queue->grant_tx_ref[id] = ref;
439 
440 	tx->id = id;
441 	tx->gref = ref;
442 	tx->offset = offset;
443 	tx->size = len;
444 	tx->flags = 0;
445 
446 	info->tx = tx;
447 	info->size += tx->size;
448 }
449 
450 static struct xen_netif_tx_request *xennet_make_first_txreq(
451 	struct netfront_queue *queue, struct sk_buff *skb,
452 	struct page *page, unsigned int offset, unsigned int len)
453 {
454 	struct xennet_gnttab_make_txreq info = {
455 		.queue = queue,
456 		.skb = skb,
457 		.page = page,
458 		.size = 0,
459 	};
460 
461 	gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
462 
463 	return info.tx;
464 }
465 
466 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
467 				  unsigned int len, void *data)
468 {
469 	struct xennet_gnttab_make_txreq *info = data;
470 
471 	info->tx->flags |= XEN_NETTXF_more_data;
472 	skb_get(info->skb);
473 	xennet_tx_setup_grant(gfn, offset, len, data);
474 }
475 
476 static struct xen_netif_tx_request *xennet_make_txreqs(
477 	struct netfront_queue *queue, struct xen_netif_tx_request *tx,
478 	struct sk_buff *skb, struct page *page,
479 	unsigned int offset, unsigned int len)
480 {
481 	struct xennet_gnttab_make_txreq info = {
482 		.queue = queue,
483 		.skb = skb,
484 		.tx = tx,
485 	};
486 
487 	/* Skip unused frames from start of page */
488 	page += offset >> PAGE_SHIFT;
489 	offset &= ~PAGE_MASK;
490 
491 	while (len) {
492 		info.page = page;
493 		info.size = 0;
494 
495 		gnttab_foreach_grant_in_range(page, offset, len,
496 					      xennet_make_one_txreq,
497 					      &info);
498 
499 		page++;
500 		offset = 0;
501 		len -= info.size;
502 	}
503 
504 	return info.tx;
505 }
506 
507 /*
508  * Count how many ring slots are required to send this skb. Each frag
509  * might be a compound page.
510  */
511 static int xennet_count_skb_slots(struct sk_buff *skb)
512 {
513 	int i, frags = skb_shinfo(skb)->nr_frags;
514 	int slots;
515 
516 	slots = gnttab_count_grant(offset_in_page(skb->data),
517 				   skb_headlen(skb));
518 
519 	for (i = 0; i < frags; i++) {
520 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
521 		unsigned long size = skb_frag_size(frag);
522 		unsigned long offset = frag->page_offset;
523 
524 		/* Skip unused frames from start of page */
525 		offset &= ~PAGE_MASK;
526 
527 		slots += gnttab_count_grant(offset, size);
528 	}
529 
530 	return slots;
531 }
532 
533 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
534 			       void *accel_priv, select_queue_fallback_t fallback)
535 {
536 	unsigned int num_queues = dev->real_num_tx_queues;
537 	u32 hash;
538 	u16 queue_idx;
539 
540 	/* First, check if there is only one queue */
541 	if (num_queues == 1) {
542 		queue_idx = 0;
543 	} else {
544 		hash = skb_get_hash(skb);
545 		queue_idx = hash % num_queues;
546 	}
547 
548 	return queue_idx;
549 }
550 
551 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
552 
553 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
554 {
555 	struct netfront_info *np = netdev_priv(dev);
556 	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
557 	struct xen_netif_tx_request *tx, *first_tx;
558 	unsigned int i;
559 	int notify;
560 	int slots;
561 	struct page *page;
562 	unsigned int offset;
563 	unsigned int len;
564 	unsigned long flags;
565 	struct netfront_queue *queue = NULL;
566 	unsigned int num_queues = dev->real_num_tx_queues;
567 	u16 queue_index;
568 	struct sk_buff *nskb;
569 
570 	/* Drop the packet if no queues are set up */
571 	if (num_queues < 1)
572 		goto drop;
573 	/* Determine which queue to transmit this SKB on */
574 	queue_index = skb_get_queue_mapping(skb);
575 	queue = &np->queues[queue_index];
576 
577 	/* If skb->len is too big for wire format, drop skb and alert
578 	 * user about misconfiguration.
579 	 */
580 	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
581 		net_alert_ratelimited(
582 			"xennet: skb->len = %u, too big for wire format\n",
583 			skb->len);
584 		goto drop;
585 	}
586 
587 	slots = xennet_count_skb_slots(skb);
588 	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
589 		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
590 				    slots, skb->len);
591 		if (skb_linearize(skb))
592 			goto drop;
593 	}
594 
595 	page = virt_to_page(skb->data);
596 	offset = offset_in_page(skb->data);
597 
598 	/* The first req should be at least ETH_HLEN size or the packet will be
599 	 * dropped by netback.
600 	 */
601 	if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
602 		nskb = skb_copy(skb, GFP_ATOMIC);
603 		if (!nskb)
604 			goto drop;
605 		dev_kfree_skb_any(skb);
606 		skb = nskb;
607 		page = virt_to_page(skb->data);
608 		offset = offset_in_page(skb->data);
609 	}
610 
611 	len = skb_headlen(skb);
612 
613 	spin_lock_irqsave(&queue->tx_lock, flags);
614 
615 	if (unlikely(!netif_carrier_ok(dev) ||
616 		     (slots > 1 && !xennet_can_sg(dev)) ||
617 		     netif_needs_gso(skb, netif_skb_features(skb)))) {
618 		spin_unlock_irqrestore(&queue->tx_lock, flags);
619 		goto drop;
620 	}
621 
622 	/* First request for the linear area. */
623 	first_tx = tx = xennet_make_first_txreq(queue, skb,
624 						page, offset, len);
625 	offset += tx->size;
626 	if (offset == PAGE_SIZE) {
627 		page++;
628 		offset = 0;
629 	}
630 	len -= tx->size;
631 
632 	if (skb->ip_summed == CHECKSUM_PARTIAL)
633 		/* local packet? */
634 		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
635 	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
636 		/* remote but checksummed. */
637 		tx->flags |= XEN_NETTXF_data_validated;
638 
639 	/* Optional extra info after the first request. */
640 	if (skb_shinfo(skb)->gso_size) {
641 		struct xen_netif_extra_info *gso;
642 
643 		gso = (struct xen_netif_extra_info *)
644 			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
645 
646 		tx->flags |= XEN_NETTXF_extra_info;
647 
648 		gso->u.gso.size = skb_shinfo(skb)->gso_size;
649 		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
650 			XEN_NETIF_GSO_TYPE_TCPV6 :
651 			XEN_NETIF_GSO_TYPE_TCPV4;
652 		gso->u.gso.pad = 0;
653 		gso->u.gso.features = 0;
654 
655 		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
656 		gso->flags = 0;
657 	}
658 
659 	/* Requests for the rest of the linear area. */
660 	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
661 
662 	/* Requests for all the frags. */
663 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
664 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
665 		tx = xennet_make_txreqs(queue, tx, skb,
666 					skb_frag_page(frag), frag->page_offset,
667 					skb_frag_size(frag));
668 	}
669 
670 	/* First request has the packet length. */
671 	first_tx->size = skb->len;
672 
673 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
674 	if (notify)
675 		notify_remote_via_irq(queue->tx_irq);
676 
677 	u64_stats_update_begin(&tx_stats->syncp);
678 	tx_stats->bytes += skb->len;
679 	tx_stats->packets++;
680 	u64_stats_update_end(&tx_stats->syncp);
681 
682 	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
683 	xennet_tx_buf_gc(queue);
684 
685 	if (!netfront_tx_slot_available(queue))
686 		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
687 
688 	spin_unlock_irqrestore(&queue->tx_lock, flags);
689 
690 	return NETDEV_TX_OK;
691 
692  drop:
693 	dev->stats.tx_dropped++;
694 	dev_kfree_skb_any(skb);
695 	return NETDEV_TX_OK;
696 }
697 
698 static int xennet_close(struct net_device *dev)
699 {
700 	struct netfront_info *np = netdev_priv(dev);
701 	unsigned int num_queues = dev->real_num_tx_queues;
702 	unsigned int i;
703 	struct netfront_queue *queue;
704 	netif_tx_stop_all_queues(np->netdev);
705 	for (i = 0; i < num_queues; ++i) {
706 		queue = &np->queues[i];
707 		napi_disable(&queue->napi);
708 	}
709 	return 0;
710 }
711 
712 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
713 				grant_ref_t ref)
714 {
715 	int new = xennet_rxidx(queue->rx.req_prod_pvt);
716 
717 	BUG_ON(queue->rx_skbs[new]);
718 	queue->rx_skbs[new] = skb;
719 	queue->grant_rx_ref[new] = ref;
720 	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
721 	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
722 	queue->rx.req_prod_pvt++;
723 }
724 
725 static int xennet_get_extras(struct netfront_queue *queue,
726 			     struct xen_netif_extra_info *extras,
727 			     RING_IDX rp)
728 
729 {
730 	struct xen_netif_extra_info *extra;
731 	struct device *dev = &queue->info->netdev->dev;
732 	RING_IDX cons = queue->rx.rsp_cons;
733 	int err = 0;
734 
735 	do {
736 		struct sk_buff *skb;
737 		grant_ref_t ref;
738 
739 		if (unlikely(cons + 1 == rp)) {
740 			if (net_ratelimit())
741 				dev_warn(dev, "Missing extra info\n");
742 			err = -EBADR;
743 			break;
744 		}
745 
746 		extra = (struct xen_netif_extra_info *)
747 			RING_GET_RESPONSE(&queue->rx, ++cons);
748 
749 		if (unlikely(!extra->type ||
750 			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
751 			if (net_ratelimit())
752 				dev_warn(dev, "Invalid extra type: %d\n",
753 					extra->type);
754 			err = -EINVAL;
755 		} else {
756 			memcpy(&extras[extra->type - 1], extra,
757 			       sizeof(*extra));
758 		}
759 
760 		skb = xennet_get_rx_skb(queue, cons);
761 		ref = xennet_get_rx_ref(queue, cons);
762 		xennet_move_rx_slot(queue, skb, ref);
763 	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
764 
765 	queue->rx.rsp_cons = cons;
766 	return err;
767 }
768 
769 static int xennet_get_responses(struct netfront_queue *queue,
770 				struct netfront_rx_info *rinfo, RING_IDX rp,
771 				struct sk_buff_head *list)
772 {
773 	struct xen_netif_rx_response *rx = &rinfo->rx;
774 	struct xen_netif_extra_info *extras = rinfo->extras;
775 	struct device *dev = &queue->info->netdev->dev;
776 	RING_IDX cons = queue->rx.rsp_cons;
777 	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
778 	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
779 	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
780 	int slots = 1;
781 	int err = 0;
782 	unsigned long ret;
783 
784 	if (rx->flags & XEN_NETRXF_extra_info) {
785 		err = xennet_get_extras(queue, extras, rp);
786 		cons = queue->rx.rsp_cons;
787 	}
788 
789 	for (;;) {
790 		if (unlikely(rx->status < 0 ||
791 			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
792 			if (net_ratelimit())
793 				dev_warn(dev, "rx->offset: %u, size: %d\n",
794 					 rx->offset, rx->status);
795 			xennet_move_rx_slot(queue, skb, ref);
796 			err = -EINVAL;
797 			goto next;
798 		}
799 
800 		/*
801 		 * This definitely indicates a bug, either in this driver or in
802 		 * the backend driver. In future this should flag the bad
803 		 * situation to the system controller to reboot the backend.
804 		 */
805 		if (ref == GRANT_INVALID_REF) {
806 			if (net_ratelimit())
807 				dev_warn(dev, "Bad rx response id %d.\n",
808 					 rx->id);
809 			err = -EINVAL;
810 			goto next;
811 		}
812 
813 		ret = gnttab_end_foreign_access_ref(ref, 0);
814 		BUG_ON(!ret);
815 
816 		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
817 
818 		__skb_queue_tail(list, skb);
819 
820 next:
821 		if (!(rx->flags & XEN_NETRXF_more_data))
822 			break;
823 
824 		if (cons + slots == rp) {
825 			if (net_ratelimit())
826 				dev_warn(dev, "Need more slots\n");
827 			err = -ENOENT;
828 			break;
829 		}
830 
831 		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
832 		skb = xennet_get_rx_skb(queue, cons + slots);
833 		ref = xennet_get_rx_ref(queue, cons + slots);
834 		slots++;
835 	}
836 
837 	if (unlikely(slots > max)) {
838 		if (net_ratelimit())
839 			dev_warn(dev, "Too many slots\n");
840 		err = -E2BIG;
841 	}
842 
843 	if (unlikely(err))
844 		queue->rx.rsp_cons = cons + slots;
845 
846 	return err;
847 }
848 
849 static int xennet_set_skb_gso(struct sk_buff *skb,
850 			      struct xen_netif_extra_info *gso)
851 {
852 	if (!gso->u.gso.size) {
853 		if (net_ratelimit())
854 			pr_warn("GSO size must not be zero\n");
855 		return -EINVAL;
856 	}
857 
858 	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
859 	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
860 		if (net_ratelimit())
861 			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
862 		return -EINVAL;
863 	}
864 
865 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
866 	skb_shinfo(skb)->gso_type =
867 		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
868 		SKB_GSO_TCPV4 :
869 		SKB_GSO_TCPV6;
870 
871 	/* Header must be checked, and gso_segs computed. */
872 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
873 	skb_shinfo(skb)->gso_segs = 0;
874 
875 	return 0;
876 }
877 
878 static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
879 				  struct sk_buff *skb,
880 				  struct sk_buff_head *list)
881 {
882 	struct skb_shared_info *shinfo = skb_shinfo(skb);
883 	RING_IDX cons = queue->rx.rsp_cons;
884 	struct sk_buff *nskb;
885 
886 	while ((nskb = __skb_dequeue(list))) {
887 		struct xen_netif_rx_response *rx =
888 			RING_GET_RESPONSE(&queue->rx, ++cons);
889 		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
890 
891 		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
892 			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
893 
894 			BUG_ON(pull_to <= skb_headlen(skb));
895 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
896 		}
897 		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
898 
899 		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
900 				rx->offset, rx->status, PAGE_SIZE);
901 
902 		skb_shinfo(nskb)->nr_frags = 0;
903 		kfree_skb(nskb);
904 	}
905 
906 	return cons;
907 }
908 
909 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
910 {
911 	bool recalculate_partial_csum = false;
912 
913 	/*
914 	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
915 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
916 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
917 	 * recalculate the partial checksum.
918 	 */
919 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
920 		struct netfront_info *np = netdev_priv(dev);
921 		atomic_inc(&np->rx_gso_checksum_fixup);
922 		skb->ip_summed = CHECKSUM_PARTIAL;
923 		recalculate_partial_csum = true;
924 	}
925 
926 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
927 	if (skb->ip_summed != CHECKSUM_PARTIAL)
928 		return 0;
929 
930 	return skb_checksum_setup(skb, recalculate_partial_csum);
931 }
932 
933 static int handle_incoming_queue(struct netfront_queue *queue,
934 				 struct sk_buff_head *rxq)
935 {
936 	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
937 	int packets_dropped = 0;
938 	struct sk_buff *skb;
939 
940 	while ((skb = __skb_dequeue(rxq)) != NULL) {
941 		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
942 
943 		if (pull_to > skb_headlen(skb))
944 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
945 
946 		/* Ethernet work: Delayed to here as it peeks the header. */
947 		skb->protocol = eth_type_trans(skb, queue->info->netdev);
948 		skb_reset_network_header(skb);
949 
950 		if (checksum_setup(queue->info->netdev, skb)) {
951 			kfree_skb(skb);
952 			packets_dropped++;
953 			queue->info->netdev->stats.rx_errors++;
954 			continue;
955 		}
956 
957 		u64_stats_update_begin(&rx_stats->syncp);
958 		rx_stats->packets++;
959 		rx_stats->bytes += skb->len;
960 		u64_stats_update_end(&rx_stats->syncp);
961 
962 		/* Pass it up. */
963 		napi_gro_receive(&queue->napi, skb);
964 	}
965 
966 	return packets_dropped;
967 }
968 
969 static int xennet_poll(struct napi_struct *napi, int budget)
970 {
971 	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
972 	struct net_device *dev = queue->info->netdev;
973 	struct sk_buff *skb;
974 	struct netfront_rx_info rinfo;
975 	struct xen_netif_rx_response *rx = &rinfo.rx;
976 	struct xen_netif_extra_info *extras = rinfo.extras;
977 	RING_IDX i, rp;
978 	int work_done;
979 	struct sk_buff_head rxq;
980 	struct sk_buff_head errq;
981 	struct sk_buff_head tmpq;
982 	int err;
983 
984 	spin_lock(&queue->rx_lock);
985 
986 	skb_queue_head_init(&rxq);
987 	skb_queue_head_init(&errq);
988 	skb_queue_head_init(&tmpq);
989 
990 	rp = queue->rx.sring->rsp_prod;
991 	rmb(); /* Ensure we see queued responses up to 'rp'. */
992 
993 	i = queue->rx.rsp_cons;
994 	work_done = 0;
995 	while ((i != rp) && (work_done < budget)) {
996 		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
997 		memset(extras, 0, sizeof(rinfo.extras));
998 
999 		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
1000 
1001 		if (unlikely(err)) {
1002 err:
1003 			while ((skb = __skb_dequeue(&tmpq)))
1004 				__skb_queue_tail(&errq, skb);
1005 			dev->stats.rx_errors++;
1006 			i = queue->rx.rsp_cons;
1007 			continue;
1008 		}
1009 
1010 		skb = __skb_dequeue(&tmpq);
1011 
1012 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1013 			struct xen_netif_extra_info *gso;
1014 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1015 
1016 			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1017 				__skb_queue_head(&tmpq, skb);
1018 				queue->rx.rsp_cons += skb_queue_len(&tmpq);
1019 				goto err;
1020 			}
1021 		}
1022 
1023 		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1024 		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1025 			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1026 
1027 		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1028 		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1029 		skb->data_len = rx->status;
1030 		skb->len += rx->status;
1031 
1032 		i = xennet_fill_frags(queue, skb, &tmpq);
1033 
1034 		if (rx->flags & XEN_NETRXF_csum_blank)
1035 			skb->ip_summed = CHECKSUM_PARTIAL;
1036 		else if (rx->flags & XEN_NETRXF_data_validated)
1037 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1038 
1039 		__skb_queue_tail(&rxq, skb);
1040 
1041 		queue->rx.rsp_cons = ++i;
1042 		work_done++;
1043 	}
1044 
1045 	__skb_queue_purge(&errq);
1046 
1047 	work_done -= handle_incoming_queue(queue, &rxq);
1048 
1049 	xennet_alloc_rx_buffers(queue);
1050 
1051 	if (work_done < budget) {
1052 		int more_to_do = 0;
1053 
1054 		napi_complete(napi);
1055 
1056 		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1057 		if (more_to_do)
1058 			napi_schedule(napi);
1059 	}
1060 
1061 	spin_unlock(&queue->rx_lock);
1062 
1063 	return work_done;
1064 }
1065 
1066 static int xennet_change_mtu(struct net_device *dev, int mtu)
1067 {
1068 	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1069 
1070 	if (mtu > max)
1071 		return -EINVAL;
1072 	dev->mtu = mtu;
1073 	return 0;
1074 }
1075 
1076 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1077 						    struct rtnl_link_stats64 *tot)
1078 {
1079 	struct netfront_info *np = netdev_priv(dev);
1080 	int cpu;
1081 
1082 	for_each_possible_cpu(cpu) {
1083 		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1084 		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1085 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1086 		unsigned int start;
1087 
1088 		do {
1089 			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1090 			tx_packets = tx_stats->packets;
1091 			tx_bytes = tx_stats->bytes;
1092 		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1093 
1094 		do {
1095 			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1096 			rx_packets = rx_stats->packets;
1097 			rx_bytes = rx_stats->bytes;
1098 		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1099 
1100 		tot->rx_packets += rx_packets;
1101 		tot->tx_packets += tx_packets;
1102 		tot->rx_bytes   += rx_bytes;
1103 		tot->tx_bytes   += tx_bytes;
1104 	}
1105 
1106 	tot->rx_errors  = dev->stats.rx_errors;
1107 	tot->tx_dropped = dev->stats.tx_dropped;
1108 
1109 	return tot;
1110 }
1111 
1112 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1113 {
1114 	struct sk_buff *skb;
1115 	int i;
1116 
1117 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1118 		/* Skip over entries which are actually freelist references */
1119 		if (skb_entry_is_link(&queue->tx_skbs[i]))
1120 			continue;
1121 
1122 		skb = queue->tx_skbs[i].skb;
1123 		get_page(queue->grant_tx_page[i]);
1124 		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1125 					  GNTMAP_readonly,
1126 					  (unsigned long)page_address(queue->grant_tx_page[i]));
1127 		queue->grant_tx_page[i] = NULL;
1128 		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1129 		add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1130 		dev_kfree_skb_irq(skb);
1131 	}
1132 }
1133 
1134 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1135 {
1136 	int id, ref;
1137 
1138 	spin_lock_bh(&queue->rx_lock);
1139 
1140 	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1141 		struct sk_buff *skb;
1142 		struct page *page;
1143 
1144 		skb = queue->rx_skbs[id];
1145 		if (!skb)
1146 			continue;
1147 
1148 		ref = queue->grant_rx_ref[id];
1149 		if (ref == GRANT_INVALID_REF)
1150 			continue;
1151 
1152 		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1153 
1154 		/* gnttab_end_foreign_access() needs a page ref until
1155 		 * foreign access is ended (which may be deferred).
1156 		 */
1157 		get_page(page);
1158 		gnttab_end_foreign_access(ref, 0,
1159 					  (unsigned long)page_address(page));
1160 		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1161 
1162 		kfree_skb(skb);
1163 	}
1164 
1165 	spin_unlock_bh(&queue->rx_lock);
1166 }
1167 
1168 static netdev_features_t xennet_fix_features(struct net_device *dev,
1169 	netdev_features_t features)
1170 {
1171 	struct netfront_info *np = netdev_priv(dev);
1172 
1173 	if (features & NETIF_F_SG &&
1174 	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1175 		features &= ~NETIF_F_SG;
1176 
1177 	if (features & NETIF_F_IPV6_CSUM &&
1178 	    !xenbus_read_unsigned(np->xbdev->otherend,
1179 				  "feature-ipv6-csum-offload", 0))
1180 		features &= ~NETIF_F_IPV6_CSUM;
1181 
1182 	if (features & NETIF_F_TSO &&
1183 	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1184 		features &= ~NETIF_F_TSO;
1185 
1186 	if (features & NETIF_F_TSO6 &&
1187 	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1188 		features &= ~NETIF_F_TSO6;
1189 
1190 	return features;
1191 }
1192 
1193 static int xennet_set_features(struct net_device *dev,
1194 	netdev_features_t features)
1195 {
1196 	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1197 		netdev_info(dev, "Reducing MTU because no SG offload");
1198 		dev->mtu = ETH_DATA_LEN;
1199 	}
1200 
1201 	return 0;
1202 }
1203 
1204 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1205 {
1206 	struct netfront_queue *queue = dev_id;
1207 	unsigned long flags;
1208 
1209 	spin_lock_irqsave(&queue->tx_lock, flags);
1210 	xennet_tx_buf_gc(queue);
1211 	spin_unlock_irqrestore(&queue->tx_lock, flags);
1212 
1213 	return IRQ_HANDLED;
1214 }
1215 
1216 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1217 {
1218 	struct netfront_queue *queue = dev_id;
1219 	struct net_device *dev = queue->info->netdev;
1220 
1221 	if (likely(netif_carrier_ok(dev) &&
1222 		   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1223 		napi_schedule(&queue->napi);
1224 
1225 	return IRQ_HANDLED;
1226 }
1227 
1228 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1229 {
1230 	xennet_tx_interrupt(irq, dev_id);
1231 	xennet_rx_interrupt(irq, dev_id);
1232 	return IRQ_HANDLED;
1233 }
1234 
1235 #ifdef CONFIG_NET_POLL_CONTROLLER
1236 static void xennet_poll_controller(struct net_device *dev)
1237 {
1238 	/* Poll each queue */
1239 	struct netfront_info *info = netdev_priv(dev);
1240 	unsigned int num_queues = dev->real_num_tx_queues;
1241 	unsigned int i;
1242 	for (i = 0; i < num_queues; ++i)
1243 		xennet_interrupt(0, &info->queues[i]);
1244 }
1245 #endif
1246 
1247 static const struct net_device_ops xennet_netdev_ops = {
1248 	.ndo_open            = xennet_open,
1249 	.ndo_stop            = xennet_close,
1250 	.ndo_start_xmit      = xennet_start_xmit,
1251 	.ndo_change_mtu	     = xennet_change_mtu,
1252 	.ndo_get_stats64     = xennet_get_stats64,
1253 	.ndo_set_mac_address = eth_mac_addr,
1254 	.ndo_validate_addr   = eth_validate_addr,
1255 	.ndo_fix_features    = xennet_fix_features,
1256 	.ndo_set_features    = xennet_set_features,
1257 	.ndo_select_queue    = xennet_select_queue,
1258 #ifdef CONFIG_NET_POLL_CONTROLLER
1259 	.ndo_poll_controller = xennet_poll_controller,
1260 #endif
1261 };
1262 
1263 static void xennet_free_netdev(struct net_device *netdev)
1264 {
1265 	struct netfront_info *np = netdev_priv(netdev);
1266 
1267 	free_percpu(np->rx_stats);
1268 	free_percpu(np->tx_stats);
1269 	free_netdev(netdev);
1270 }
1271 
1272 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1273 {
1274 	int err;
1275 	struct net_device *netdev;
1276 	struct netfront_info *np;
1277 
1278 	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1279 	if (!netdev)
1280 		return ERR_PTR(-ENOMEM);
1281 
1282 	np                   = netdev_priv(netdev);
1283 	np->xbdev            = dev;
1284 
1285 	np->queues = NULL;
1286 
1287 	err = -ENOMEM;
1288 	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1289 	if (np->rx_stats == NULL)
1290 		goto exit;
1291 	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1292 	if (np->tx_stats == NULL)
1293 		goto exit;
1294 
1295 	netdev->netdev_ops	= &xennet_netdev_ops;
1296 
1297 	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1298 				  NETIF_F_GSO_ROBUST;
1299 	netdev->hw_features	= NETIF_F_SG |
1300 				  NETIF_F_IPV6_CSUM |
1301 				  NETIF_F_TSO | NETIF_F_TSO6;
1302 
1303 	/*
1304          * Assume that all hw features are available for now. This set
1305          * will be adjusted by the call to netdev_update_features() in
1306          * xennet_connect() which is the earliest point where we can
1307          * negotiate with the backend regarding supported features.
1308          */
1309 	netdev->features |= netdev->hw_features;
1310 
1311 	netdev->ethtool_ops = &xennet_ethtool_ops;
1312 	netdev->min_mtu = 0;
1313 	netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1314 	SET_NETDEV_DEV(netdev, &dev->dev);
1315 
1316 	np->netdev = netdev;
1317 
1318 	netif_carrier_off(netdev);
1319 
1320 	return netdev;
1321 
1322  exit:
1323 	xennet_free_netdev(netdev);
1324 	return ERR_PTR(err);
1325 }
1326 
1327 /**
1328  * Entry point to this code when a new device is created.  Allocate the basic
1329  * structures and the ring buffers for communication with the backend, and
1330  * inform the backend of the appropriate details for those.
1331  */
1332 static int netfront_probe(struct xenbus_device *dev,
1333 			  const struct xenbus_device_id *id)
1334 {
1335 	int err;
1336 	struct net_device *netdev;
1337 	struct netfront_info *info;
1338 
1339 	netdev = xennet_create_dev(dev);
1340 	if (IS_ERR(netdev)) {
1341 		err = PTR_ERR(netdev);
1342 		xenbus_dev_fatal(dev, err, "creating netdev");
1343 		return err;
1344 	}
1345 
1346 	info = netdev_priv(netdev);
1347 	dev_set_drvdata(&dev->dev, info);
1348 #ifdef CONFIG_SYSFS
1349 	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1350 #endif
1351 	err = register_netdev(info->netdev);
1352 	if (err) {
1353 		pr_warn("%s: register_netdev err=%d\n", __func__, err);
1354 		goto fail;
1355 	}
1356 
1357 	return 0;
1358 
1359  fail:
1360 	xennet_free_netdev(netdev);
1361 	dev_set_drvdata(&dev->dev, NULL);
1362 	return err;
1363 }
1364 
1365 static void xennet_end_access(int ref, void *page)
1366 {
1367 	/* This frees the page as a side-effect */
1368 	if (ref != GRANT_INVALID_REF)
1369 		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1370 }
1371 
1372 static void xennet_disconnect_backend(struct netfront_info *info)
1373 {
1374 	unsigned int i = 0;
1375 	unsigned int num_queues = info->netdev->real_num_tx_queues;
1376 
1377 	netif_carrier_off(info->netdev);
1378 
1379 	for (i = 0; i < num_queues && info->queues; ++i) {
1380 		struct netfront_queue *queue = &info->queues[i];
1381 
1382 		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1383 			unbind_from_irqhandler(queue->tx_irq, queue);
1384 		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1385 			unbind_from_irqhandler(queue->tx_irq, queue);
1386 			unbind_from_irqhandler(queue->rx_irq, queue);
1387 		}
1388 		queue->tx_evtchn = queue->rx_evtchn = 0;
1389 		queue->tx_irq = queue->rx_irq = 0;
1390 
1391 		if (netif_running(info->netdev))
1392 			napi_synchronize(&queue->napi);
1393 
1394 		xennet_release_tx_bufs(queue);
1395 		xennet_release_rx_bufs(queue);
1396 		gnttab_free_grant_references(queue->gref_tx_head);
1397 		gnttab_free_grant_references(queue->gref_rx_head);
1398 
1399 		/* End access and free the pages */
1400 		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1401 		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1402 
1403 		queue->tx_ring_ref = GRANT_INVALID_REF;
1404 		queue->rx_ring_ref = GRANT_INVALID_REF;
1405 		queue->tx.sring = NULL;
1406 		queue->rx.sring = NULL;
1407 	}
1408 }
1409 
1410 /**
1411  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1412  * driver restart.  We tear down our netif structure and recreate it, but
1413  * leave the device-layer structures intact so that this is transparent to the
1414  * rest of the kernel.
1415  */
1416 static int netfront_resume(struct xenbus_device *dev)
1417 {
1418 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1419 
1420 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1421 
1422 	xennet_disconnect_backend(info);
1423 	return 0;
1424 }
1425 
1426 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1427 {
1428 	char *s, *e, *macstr;
1429 	int i;
1430 
1431 	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1432 	if (IS_ERR(macstr))
1433 		return PTR_ERR(macstr);
1434 
1435 	for (i = 0; i < ETH_ALEN; i++) {
1436 		mac[i] = simple_strtoul(s, &e, 16);
1437 		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1438 			kfree(macstr);
1439 			return -ENOENT;
1440 		}
1441 		s = e+1;
1442 	}
1443 
1444 	kfree(macstr);
1445 	return 0;
1446 }
1447 
1448 static int setup_netfront_single(struct netfront_queue *queue)
1449 {
1450 	int err;
1451 
1452 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1453 	if (err < 0)
1454 		goto fail;
1455 
1456 	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1457 					xennet_interrupt,
1458 					0, queue->info->netdev->name, queue);
1459 	if (err < 0)
1460 		goto bind_fail;
1461 	queue->rx_evtchn = queue->tx_evtchn;
1462 	queue->rx_irq = queue->tx_irq = err;
1463 
1464 	return 0;
1465 
1466 bind_fail:
1467 	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1468 	queue->tx_evtchn = 0;
1469 fail:
1470 	return err;
1471 }
1472 
1473 static int setup_netfront_split(struct netfront_queue *queue)
1474 {
1475 	int err;
1476 
1477 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1478 	if (err < 0)
1479 		goto fail;
1480 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1481 	if (err < 0)
1482 		goto alloc_rx_evtchn_fail;
1483 
1484 	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1485 		 "%s-tx", queue->name);
1486 	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1487 					xennet_tx_interrupt,
1488 					0, queue->tx_irq_name, queue);
1489 	if (err < 0)
1490 		goto bind_tx_fail;
1491 	queue->tx_irq = err;
1492 
1493 	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1494 		 "%s-rx", queue->name);
1495 	err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1496 					xennet_rx_interrupt,
1497 					0, queue->rx_irq_name, queue);
1498 	if (err < 0)
1499 		goto bind_rx_fail;
1500 	queue->rx_irq = err;
1501 
1502 	return 0;
1503 
1504 bind_rx_fail:
1505 	unbind_from_irqhandler(queue->tx_irq, queue);
1506 	queue->tx_irq = 0;
1507 bind_tx_fail:
1508 	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1509 	queue->rx_evtchn = 0;
1510 alloc_rx_evtchn_fail:
1511 	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1512 	queue->tx_evtchn = 0;
1513 fail:
1514 	return err;
1515 }
1516 
1517 static int setup_netfront(struct xenbus_device *dev,
1518 			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1519 {
1520 	struct xen_netif_tx_sring *txs;
1521 	struct xen_netif_rx_sring *rxs;
1522 	grant_ref_t gref;
1523 	int err;
1524 
1525 	queue->tx_ring_ref = GRANT_INVALID_REF;
1526 	queue->rx_ring_ref = GRANT_INVALID_REF;
1527 	queue->rx.sring = NULL;
1528 	queue->tx.sring = NULL;
1529 
1530 	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1531 	if (!txs) {
1532 		err = -ENOMEM;
1533 		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1534 		goto fail;
1535 	}
1536 	SHARED_RING_INIT(txs);
1537 	FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1538 
1539 	err = xenbus_grant_ring(dev, txs, 1, &gref);
1540 	if (err < 0)
1541 		goto grant_tx_ring_fail;
1542 	queue->tx_ring_ref = gref;
1543 
1544 	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1545 	if (!rxs) {
1546 		err = -ENOMEM;
1547 		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1548 		goto alloc_rx_ring_fail;
1549 	}
1550 	SHARED_RING_INIT(rxs);
1551 	FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1552 
1553 	err = xenbus_grant_ring(dev, rxs, 1, &gref);
1554 	if (err < 0)
1555 		goto grant_rx_ring_fail;
1556 	queue->rx_ring_ref = gref;
1557 
1558 	if (feature_split_evtchn)
1559 		err = setup_netfront_split(queue);
1560 	/* setup single event channel if
1561 	 *  a) feature-split-event-channels == 0
1562 	 *  b) feature-split-event-channels == 1 but failed to setup
1563 	 */
1564 	if (!feature_split_evtchn || (feature_split_evtchn && err))
1565 		err = setup_netfront_single(queue);
1566 
1567 	if (err)
1568 		goto alloc_evtchn_fail;
1569 
1570 	return 0;
1571 
1572 	/* If we fail to setup netfront, it is safe to just revoke access to
1573 	 * granted pages because backend is not accessing it at this point.
1574 	 */
1575 alloc_evtchn_fail:
1576 	gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1577 grant_rx_ring_fail:
1578 	free_page((unsigned long)rxs);
1579 alloc_rx_ring_fail:
1580 	gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1581 grant_tx_ring_fail:
1582 	free_page((unsigned long)txs);
1583 fail:
1584 	return err;
1585 }
1586 
1587 /* Queue-specific initialisation
1588  * This used to be done in xennet_create_dev() but must now
1589  * be run per-queue.
1590  */
1591 static int xennet_init_queue(struct netfront_queue *queue)
1592 {
1593 	unsigned short i;
1594 	int err = 0;
1595 
1596 	spin_lock_init(&queue->tx_lock);
1597 	spin_lock_init(&queue->rx_lock);
1598 
1599 	setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
1600 		    (unsigned long)queue);
1601 
1602 	snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1603 		 queue->info->netdev->name, queue->id);
1604 
1605 	/* Initialise tx_skbs as a free chain containing every entry. */
1606 	queue->tx_skb_freelist = 0;
1607 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1608 		skb_entry_set_link(&queue->tx_skbs[i], i+1);
1609 		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1610 		queue->grant_tx_page[i] = NULL;
1611 	}
1612 
1613 	/* Clear out rx_skbs */
1614 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1615 		queue->rx_skbs[i] = NULL;
1616 		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1617 	}
1618 
1619 	/* A grant for every tx ring slot */
1620 	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1621 					  &queue->gref_tx_head) < 0) {
1622 		pr_alert("can't alloc tx grant refs\n");
1623 		err = -ENOMEM;
1624 		goto exit;
1625 	}
1626 
1627 	/* A grant for every rx ring slot */
1628 	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1629 					  &queue->gref_rx_head) < 0) {
1630 		pr_alert("can't alloc rx grant refs\n");
1631 		err = -ENOMEM;
1632 		goto exit_free_tx;
1633 	}
1634 
1635 	return 0;
1636 
1637  exit_free_tx:
1638 	gnttab_free_grant_references(queue->gref_tx_head);
1639  exit:
1640 	return err;
1641 }
1642 
1643 static int write_queue_xenstore_keys(struct netfront_queue *queue,
1644 			   struct xenbus_transaction *xbt, int write_hierarchical)
1645 {
1646 	/* Write the queue-specific keys into XenStore in the traditional
1647 	 * way for a single queue, or in a queue subkeys for multiple
1648 	 * queues.
1649 	 */
1650 	struct xenbus_device *dev = queue->info->xbdev;
1651 	int err;
1652 	const char *message;
1653 	char *path;
1654 	size_t pathsize;
1655 
1656 	/* Choose the correct place to write the keys */
1657 	if (write_hierarchical) {
1658 		pathsize = strlen(dev->nodename) + 10;
1659 		path = kzalloc(pathsize, GFP_KERNEL);
1660 		if (!path) {
1661 			err = -ENOMEM;
1662 			message = "out of memory while writing ring references";
1663 			goto error;
1664 		}
1665 		snprintf(path, pathsize, "%s/queue-%u",
1666 				dev->nodename, queue->id);
1667 	} else {
1668 		path = (char *)dev->nodename;
1669 	}
1670 
1671 	/* Write ring references */
1672 	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1673 			queue->tx_ring_ref);
1674 	if (err) {
1675 		message = "writing tx-ring-ref";
1676 		goto error;
1677 	}
1678 
1679 	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1680 			queue->rx_ring_ref);
1681 	if (err) {
1682 		message = "writing rx-ring-ref";
1683 		goto error;
1684 	}
1685 
1686 	/* Write event channels; taking into account both shared
1687 	 * and split event channel scenarios.
1688 	 */
1689 	if (queue->tx_evtchn == queue->rx_evtchn) {
1690 		/* Shared event channel */
1691 		err = xenbus_printf(*xbt, path,
1692 				"event-channel", "%u", queue->tx_evtchn);
1693 		if (err) {
1694 			message = "writing event-channel";
1695 			goto error;
1696 		}
1697 	} else {
1698 		/* Split event channels */
1699 		err = xenbus_printf(*xbt, path,
1700 				"event-channel-tx", "%u", queue->tx_evtchn);
1701 		if (err) {
1702 			message = "writing event-channel-tx";
1703 			goto error;
1704 		}
1705 
1706 		err = xenbus_printf(*xbt, path,
1707 				"event-channel-rx", "%u", queue->rx_evtchn);
1708 		if (err) {
1709 			message = "writing event-channel-rx";
1710 			goto error;
1711 		}
1712 	}
1713 
1714 	if (write_hierarchical)
1715 		kfree(path);
1716 	return 0;
1717 
1718 error:
1719 	if (write_hierarchical)
1720 		kfree(path);
1721 	xenbus_dev_fatal(dev, err, "%s", message);
1722 	return err;
1723 }
1724 
1725 static void xennet_destroy_queues(struct netfront_info *info)
1726 {
1727 	unsigned int i;
1728 
1729 	rtnl_lock();
1730 
1731 	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1732 		struct netfront_queue *queue = &info->queues[i];
1733 
1734 		if (netif_running(info->netdev))
1735 			napi_disable(&queue->napi);
1736 		del_timer_sync(&queue->rx_refill_timer);
1737 		netif_napi_del(&queue->napi);
1738 	}
1739 
1740 	rtnl_unlock();
1741 
1742 	kfree(info->queues);
1743 	info->queues = NULL;
1744 }
1745 
1746 static int xennet_create_queues(struct netfront_info *info,
1747 				unsigned int *num_queues)
1748 {
1749 	unsigned int i;
1750 	int ret;
1751 
1752 	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1753 			       GFP_KERNEL);
1754 	if (!info->queues)
1755 		return -ENOMEM;
1756 
1757 	rtnl_lock();
1758 
1759 	for (i = 0; i < *num_queues; i++) {
1760 		struct netfront_queue *queue = &info->queues[i];
1761 
1762 		queue->id = i;
1763 		queue->info = info;
1764 
1765 		ret = xennet_init_queue(queue);
1766 		if (ret < 0) {
1767 			dev_warn(&info->netdev->dev,
1768 				 "only created %d queues\n", i);
1769 			*num_queues = i;
1770 			break;
1771 		}
1772 
1773 		netif_napi_add(queue->info->netdev, &queue->napi,
1774 			       xennet_poll, 64);
1775 		if (netif_running(info->netdev))
1776 			napi_enable(&queue->napi);
1777 	}
1778 
1779 	netif_set_real_num_tx_queues(info->netdev, *num_queues);
1780 
1781 	rtnl_unlock();
1782 
1783 	if (*num_queues == 0) {
1784 		dev_err(&info->netdev->dev, "no queues\n");
1785 		return -EINVAL;
1786 	}
1787 	return 0;
1788 }
1789 
1790 /* Common code used when first setting up, and when resuming. */
1791 static int talk_to_netback(struct xenbus_device *dev,
1792 			   struct netfront_info *info)
1793 {
1794 	const char *message;
1795 	struct xenbus_transaction xbt;
1796 	int err;
1797 	unsigned int feature_split_evtchn;
1798 	unsigned int i = 0;
1799 	unsigned int max_queues = 0;
1800 	struct netfront_queue *queue = NULL;
1801 	unsigned int num_queues = 1;
1802 
1803 	info->netdev->irq = 0;
1804 
1805 	/* Check if backend supports multiple queues */
1806 	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1807 					  "multi-queue-max-queues", 1);
1808 	num_queues = min(max_queues, xennet_max_queues);
1809 
1810 	/* Check feature-split-event-channels */
1811 	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
1812 					"feature-split-event-channels", 0);
1813 
1814 	/* Read mac addr. */
1815 	err = xen_net_read_mac(dev, info->netdev->dev_addr);
1816 	if (err) {
1817 		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1818 		goto out;
1819 	}
1820 
1821 	if (info->queues)
1822 		xennet_destroy_queues(info);
1823 
1824 	err = xennet_create_queues(info, &num_queues);
1825 	if (err < 0)
1826 		goto destroy_ring;
1827 
1828 	/* Create shared ring, alloc event channel -- for each queue */
1829 	for (i = 0; i < num_queues; ++i) {
1830 		queue = &info->queues[i];
1831 		err = setup_netfront(dev, queue, feature_split_evtchn);
1832 		if (err) {
1833 			/* setup_netfront() will tidy up the current
1834 			 * queue on error, but we need to clean up
1835 			 * those already allocated.
1836 			 */
1837 			if (i > 0) {
1838 				rtnl_lock();
1839 				netif_set_real_num_tx_queues(info->netdev, i);
1840 				rtnl_unlock();
1841 				goto destroy_ring;
1842 			} else {
1843 				goto out;
1844 			}
1845 		}
1846 	}
1847 
1848 again:
1849 	err = xenbus_transaction_start(&xbt);
1850 	if (err) {
1851 		xenbus_dev_fatal(dev, err, "starting transaction");
1852 		goto destroy_ring;
1853 	}
1854 
1855 	if (xenbus_exists(XBT_NIL,
1856 			  info->xbdev->otherend, "multi-queue-max-queues")) {
1857 		/* Write the number of queues */
1858 		err = xenbus_printf(xbt, dev->nodename,
1859 				    "multi-queue-num-queues", "%u", num_queues);
1860 		if (err) {
1861 			message = "writing multi-queue-num-queues";
1862 			goto abort_transaction_no_dev_fatal;
1863 		}
1864 	}
1865 
1866 	if (num_queues == 1) {
1867 		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1868 		if (err)
1869 			goto abort_transaction_no_dev_fatal;
1870 	} else {
1871 		/* Write the keys for each queue */
1872 		for (i = 0; i < num_queues; ++i) {
1873 			queue = &info->queues[i];
1874 			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1875 			if (err)
1876 				goto abort_transaction_no_dev_fatal;
1877 		}
1878 	}
1879 
1880 	/* The remaining keys are not queue-specific */
1881 	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1882 			    1);
1883 	if (err) {
1884 		message = "writing request-rx-copy";
1885 		goto abort_transaction;
1886 	}
1887 
1888 	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1889 	if (err) {
1890 		message = "writing feature-rx-notify";
1891 		goto abort_transaction;
1892 	}
1893 
1894 	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1895 	if (err) {
1896 		message = "writing feature-sg";
1897 		goto abort_transaction;
1898 	}
1899 
1900 	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1901 	if (err) {
1902 		message = "writing feature-gso-tcpv4";
1903 		goto abort_transaction;
1904 	}
1905 
1906 	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1907 	if (err) {
1908 		message = "writing feature-gso-tcpv6";
1909 		goto abort_transaction;
1910 	}
1911 
1912 	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1913 			   "1");
1914 	if (err) {
1915 		message = "writing feature-ipv6-csum-offload";
1916 		goto abort_transaction;
1917 	}
1918 
1919 	err = xenbus_transaction_end(xbt, 0);
1920 	if (err) {
1921 		if (err == -EAGAIN)
1922 			goto again;
1923 		xenbus_dev_fatal(dev, err, "completing transaction");
1924 		goto destroy_ring;
1925 	}
1926 
1927 	return 0;
1928 
1929  abort_transaction:
1930 	xenbus_dev_fatal(dev, err, "%s", message);
1931 abort_transaction_no_dev_fatal:
1932 	xenbus_transaction_end(xbt, 1);
1933  destroy_ring:
1934 	xennet_disconnect_backend(info);
1935 	kfree(info->queues);
1936 	info->queues = NULL;
1937  out:
1938 	return err;
1939 }
1940 
1941 static int xennet_connect(struct net_device *dev)
1942 {
1943 	struct netfront_info *np = netdev_priv(dev);
1944 	unsigned int num_queues = 0;
1945 	int err;
1946 	unsigned int j = 0;
1947 	struct netfront_queue *queue = NULL;
1948 
1949 	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
1950 		dev_info(&dev->dev,
1951 			 "backend does not support copying receive path\n");
1952 		return -ENODEV;
1953 	}
1954 
1955 	err = talk_to_netback(np->xbdev, np);
1956 	if (err)
1957 		return err;
1958 
1959 	/* talk_to_netback() sets the correct number of queues */
1960 	num_queues = dev->real_num_tx_queues;
1961 
1962 	rtnl_lock();
1963 	netdev_update_features(dev);
1964 	rtnl_unlock();
1965 
1966 	/*
1967 	 * All public and private state should now be sane.  Get
1968 	 * ready to start sending and receiving packets and give the driver
1969 	 * domain a kick because we've probably just requeued some
1970 	 * packets.
1971 	 */
1972 	netif_carrier_on(np->netdev);
1973 	for (j = 0; j < num_queues; ++j) {
1974 		queue = &np->queues[j];
1975 
1976 		notify_remote_via_irq(queue->tx_irq);
1977 		if (queue->tx_irq != queue->rx_irq)
1978 			notify_remote_via_irq(queue->rx_irq);
1979 
1980 		spin_lock_irq(&queue->tx_lock);
1981 		xennet_tx_buf_gc(queue);
1982 		spin_unlock_irq(&queue->tx_lock);
1983 
1984 		spin_lock_bh(&queue->rx_lock);
1985 		xennet_alloc_rx_buffers(queue);
1986 		spin_unlock_bh(&queue->rx_lock);
1987 	}
1988 
1989 	return 0;
1990 }
1991 
1992 /**
1993  * Callback received when the backend's state changes.
1994  */
1995 static void netback_changed(struct xenbus_device *dev,
1996 			    enum xenbus_state backend_state)
1997 {
1998 	struct netfront_info *np = dev_get_drvdata(&dev->dev);
1999 	struct net_device *netdev = np->netdev;
2000 
2001 	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2002 
2003 	switch (backend_state) {
2004 	case XenbusStateInitialising:
2005 	case XenbusStateInitialised:
2006 	case XenbusStateReconfiguring:
2007 	case XenbusStateReconfigured:
2008 	case XenbusStateUnknown:
2009 		break;
2010 
2011 	case XenbusStateInitWait:
2012 		if (dev->state != XenbusStateInitialising)
2013 			break;
2014 		if (xennet_connect(netdev) != 0)
2015 			break;
2016 		xenbus_switch_state(dev, XenbusStateConnected);
2017 		break;
2018 
2019 	case XenbusStateConnected:
2020 		netdev_notify_peers(netdev);
2021 		break;
2022 
2023 	case XenbusStateClosed:
2024 		if (dev->state == XenbusStateClosed)
2025 			break;
2026 		/* Missed the backend's CLOSING state -- fallthrough */
2027 	case XenbusStateClosing:
2028 		xenbus_frontend_closed(dev);
2029 		break;
2030 	}
2031 }
2032 
2033 static const struct xennet_stat {
2034 	char name[ETH_GSTRING_LEN];
2035 	u16 offset;
2036 } xennet_stats[] = {
2037 	{
2038 		"rx_gso_checksum_fixup",
2039 		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2040 	},
2041 };
2042 
2043 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2044 {
2045 	switch (string_set) {
2046 	case ETH_SS_STATS:
2047 		return ARRAY_SIZE(xennet_stats);
2048 	default:
2049 		return -EINVAL;
2050 	}
2051 }
2052 
2053 static void xennet_get_ethtool_stats(struct net_device *dev,
2054 				     struct ethtool_stats *stats, u64 * data)
2055 {
2056 	void *np = netdev_priv(dev);
2057 	int i;
2058 
2059 	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2060 		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2061 }
2062 
2063 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2064 {
2065 	int i;
2066 
2067 	switch (stringset) {
2068 	case ETH_SS_STATS:
2069 		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2070 			memcpy(data + i * ETH_GSTRING_LEN,
2071 			       xennet_stats[i].name, ETH_GSTRING_LEN);
2072 		break;
2073 	}
2074 }
2075 
2076 static const struct ethtool_ops xennet_ethtool_ops =
2077 {
2078 	.get_link = ethtool_op_get_link,
2079 
2080 	.get_sset_count = xennet_get_sset_count,
2081 	.get_ethtool_stats = xennet_get_ethtool_stats,
2082 	.get_strings = xennet_get_strings,
2083 };
2084 
2085 #ifdef CONFIG_SYSFS
2086 static ssize_t show_rxbuf(struct device *dev,
2087 			  struct device_attribute *attr, char *buf)
2088 {
2089 	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2090 }
2091 
2092 static ssize_t store_rxbuf(struct device *dev,
2093 			   struct device_attribute *attr,
2094 			   const char *buf, size_t len)
2095 {
2096 	char *endp;
2097 	unsigned long target;
2098 
2099 	if (!capable(CAP_NET_ADMIN))
2100 		return -EPERM;
2101 
2102 	target = simple_strtoul(buf, &endp, 0);
2103 	if (endp == buf)
2104 		return -EBADMSG;
2105 
2106 	/* rxbuf_min and rxbuf_max are no longer configurable. */
2107 
2108 	return len;
2109 }
2110 
2111 static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2112 static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2113 static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
2114 
2115 static struct attribute *xennet_dev_attrs[] = {
2116 	&dev_attr_rxbuf_min.attr,
2117 	&dev_attr_rxbuf_max.attr,
2118 	&dev_attr_rxbuf_cur.attr,
2119 	NULL
2120 };
2121 
2122 static const struct attribute_group xennet_dev_group = {
2123 	.attrs = xennet_dev_attrs
2124 };
2125 #endif /* CONFIG_SYSFS */
2126 
2127 static int xennet_remove(struct xenbus_device *dev)
2128 {
2129 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2130 
2131 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2132 
2133 	xennet_disconnect_backend(info);
2134 
2135 	unregister_netdev(info->netdev);
2136 
2137 	if (info->queues)
2138 		xennet_destroy_queues(info);
2139 	xennet_free_netdev(info->netdev);
2140 
2141 	return 0;
2142 }
2143 
2144 static const struct xenbus_device_id netfront_ids[] = {
2145 	{ "vif" },
2146 	{ "" }
2147 };
2148 
2149 static struct xenbus_driver netfront_driver = {
2150 	.ids = netfront_ids,
2151 	.probe = netfront_probe,
2152 	.remove = xennet_remove,
2153 	.resume = netfront_resume,
2154 	.otherend_changed = netback_changed,
2155 };
2156 
2157 static int __init netif_init(void)
2158 {
2159 	if (!xen_domain())
2160 		return -ENODEV;
2161 
2162 	if (!xen_has_pv_nic_devices())
2163 		return -ENODEV;
2164 
2165 	pr_info("Initialising Xen virtual ethernet driver\n");
2166 
2167 	/* Allow as many queues as there are CPUs if user has not
2168 	 * specified a value.
2169 	 */
2170 	if (xennet_max_queues == 0)
2171 		xennet_max_queues = num_online_cpus();
2172 
2173 	return xenbus_register_frontend(&netfront_driver);
2174 }
2175 module_init(netif_init);
2176 
2177 
2178 static void __exit netif_exit(void)
2179 {
2180 	xenbus_unregister_driver(&netfront_driver);
2181 }
2182 module_exit(netif_exit);
2183 
2184 MODULE_DESCRIPTION("Xen virtual network device frontend");
2185 MODULE_LICENSE("GPL");
2186 MODULE_ALIAS("xen:vif");
2187 MODULE_ALIAS("xennet");
2188