xref: /openbmc/linux/drivers/net/xen-netfront.c (revision 9ffc93f2)
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31 
32 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/if_ether.h>
39 #include <linux/tcp.h>
40 #include <linux/udp.h>
41 #include <linux/moduleparam.h>
42 #include <linux/mm.h>
43 #include <linux/slab.h>
44 #include <net/ip.h>
45 
46 #include <xen/xen.h>
47 #include <xen/xenbus.h>
48 #include <xen/events.h>
49 #include <xen/page.h>
50 #include <xen/grant_table.h>
51 
52 #include <xen/interface/io/netif.h>
53 #include <xen/interface/memory.h>
54 #include <xen/interface/grant_table.h>
55 
56 static const struct ethtool_ops xennet_ethtool_ops;
57 
58 struct netfront_cb {
59 	struct page *page;
60 	unsigned offset;
61 };
62 
63 #define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
64 
65 #define RX_COPY_THRESHOLD 256
66 
67 #define GRANT_INVALID_REF	0
68 
69 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
70 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
71 #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
72 
73 struct netfront_stats {
74 	u64			rx_packets;
75 	u64			tx_packets;
76 	u64			rx_bytes;
77 	u64			tx_bytes;
78 	struct u64_stats_sync	syncp;
79 };
80 
81 struct netfront_info {
82 	struct list_head list;
83 	struct net_device *netdev;
84 
85 	struct napi_struct napi;
86 
87 	unsigned int evtchn;
88 	struct xenbus_device *xbdev;
89 
90 	spinlock_t   tx_lock;
91 	struct xen_netif_tx_front_ring tx;
92 	int tx_ring_ref;
93 
94 	/*
95 	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
96 	 * are linked from tx_skb_freelist through skb_entry.link.
97 	 *
98 	 *  NB. Freelist index entries are always going to be less than
99 	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
100 	 *  greater than PAGE_OFFSET: we use this property to distinguish
101 	 *  them.
102 	 */
103 	union skb_entry {
104 		struct sk_buff *skb;
105 		unsigned long link;
106 	} tx_skbs[NET_TX_RING_SIZE];
107 	grant_ref_t gref_tx_head;
108 	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
109 	unsigned tx_skb_freelist;
110 
111 	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
112 	struct xen_netif_rx_front_ring rx;
113 	int rx_ring_ref;
114 
115 	/* Receive-ring batched refills. */
116 #define RX_MIN_TARGET 8
117 #define RX_DFL_MIN_TARGET 64
118 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
119 	unsigned rx_min_target, rx_max_target, rx_target;
120 	struct sk_buff_head rx_batch;
121 
122 	struct timer_list rx_refill_timer;
123 
124 	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
125 	grant_ref_t gref_rx_head;
126 	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
127 
128 	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
129 	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
130 	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
131 
132 	/* Statistics */
133 	struct netfront_stats __percpu *stats;
134 
135 	unsigned long rx_gso_checksum_fixup;
136 };
137 
138 struct netfront_rx_info {
139 	struct xen_netif_rx_response rx;
140 	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
141 };
142 
143 static void skb_entry_set_link(union skb_entry *list, unsigned short id)
144 {
145 	list->link = id;
146 }
147 
148 static int skb_entry_is_link(const union skb_entry *list)
149 {
150 	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
151 	return (unsigned long)list->skb < PAGE_OFFSET;
152 }
153 
154 /*
155  * Access macros for acquiring freeing slots in tx_skbs[].
156  */
157 
158 static void add_id_to_freelist(unsigned *head, union skb_entry *list,
159 			       unsigned short id)
160 {
161 	skb_entry_set_link(&list[id], *head);
162 	*head = id;
163 }
164 
165 static unsigned short get_id_from_freelist(unsigned *head,
166 					   union skb_entry *list)
167 {
168 	unsigned int id = *head;
169 	*head = list[id].link;
170 	return id;
171 }
172 
173 static int xennet_rxidx(RING_IDX idx)
174 {
175 	return idx & (NET_RX_RING_SIZE - 1);
176 }
177 
178 static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
179 					 RING_IDX ri)
180 {
181 	int i = xennet_rxidx(ri);
182 	struct sk_buff *skb = np->rx_skbs[i];
183 	np->rx_skbs[i] = NULL;
184 	return skb;
185 }
186 
187 static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
188 					    RING_IDX ri)
189 {
190 	int i = xennet_rxidx(ri);
191 	grant_ref_t ref = np->grant_rx_ref[i];
192 	np->grant_rx_ref[i] = GRANT_INVALID_REF;
193 	return ref;
194 }
195 
196 #ifdef CONFIG_SYSFS
197 static int xennet_sysfs_addif(struct net_device *netdev);
198 static void xennet_sysfs_delif(struct net_device *netdev);
199 #else /* !CONFIG_SYSFS */
200 #define xennet_sysfs_addif(dev) (0)
201 #define xennet_sysfs_delif(dev) do { } while (0)
202 #endif
203 
204 static bool xennet_can_sg(struct net_device *dev)
205 {
206 	return dev->features & NETIF_F_SG;
207 }
208 
209 
210 static void rx_refill_timeout(unsigned long data)
211 {
212 	struct net_device *dev = (struct net_device *)data;
213 	struct netfront_info *np = netdev_priv(dev);
214 	napi_schedule(&np->napi);
215 }
216 
217 static int netfront_tx_slot_available(struct netfront_info *np)
218 {
219 	return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
220 		(TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
221 }
222 
223 static void xennet_maybe_wake_tx(struct net_device *dev)
224 {
225 	struct netfront_info *np = netdev_priv(dev);
226 
227 	if (unlikely(netif_queue_stopped(dev)) &&
228 	    netfront_tx_slot_available(np) &&
229 	    likely(netif_running(dev)))
230 		netif_wake_queue(dev);
231 }
232 
233 static void xennet_alloc_rx_buffers(struct net_device *dev)
234 {
235 	unsigned short id;
236 	struct netfront_info *np = netdev_priv(dev);
237 	struct sk_buff *skb;
238 	struct page *page;
239 	int i, batch_target, notify;
240 	RING_IDX req_prod = np->rx.req_prod_pvt;
241 	grant_ref_t ref;
242 	unsigned long pfn;
243 	void *vaddr;
244 	struct xen_netif_rx_request *req;
245 
246 	if (unlikely(!netif_carrier_ok(dev)))
247 		return;
248 
249 	/*
250 	 * Allocate skbuffs greedily, even though we batch updates to the
251 	 * receive ring. This creates a less bursty demand on the memory
252 	 * allocator, so should reduce the chance of failed allocation requests
253 	 * both for ourself and for other kernel subsystems.
254 	 */
255 	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
256 	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
257 		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
258 					 GFP_ATOMIC | __GFP_NOWARN);
259 		if (unlikely(!skb))
260 			goto no_skb;
261 
262 		/* Align ip header to a 16 bytes boundary */
263 		skb_reserve(skb, NET_IP_ALIGN);
264 
265 		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
266 		if (!page) {
267 			kfree_skb(skb);
268 no_skb:
269 			/* Any skbuffs queued for refill? Force them out. */
270 			if (i != 0)
271 				goto refill;
272 			/* Could not allocate any skbuffs. Try again later. */
273 			mod_timer(&np->rx_refill_timer,
274 				  jiffies + (HZ/10));
275 			break;
276 		}
277 
278 		__skb_fill_page_desc(skb, 0, page, 0, 0);
279 		skb_shinfo(skb)->nr_frags = 1;
280 		__skb_queue_tail(&np->rx_batch, skb);
281 	}
282 
283 	/* Is the batch large enough to be worthwhile? */
284 	if (i < (np->rx_target/2)) {
285 		if (req_prod > np->rx.sring->req_prod)
286 			goto push;
287 		return;
288 	}
289 
290 	/* Adjust our fill target if we risked running out of buffers. */
291 	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
292 	    ((np->rx_target *= 2) > np->rx_max_target))
293 		np->rx_target = np->rx_max_target;
294 
295  refill:
296 	for (i = 0; ; i++) {
297 		skb = __skb_dequeue(&np->rx_batch);
298 		if (skb == NULL)
299 			break;
300 
301 		skb->dev = dev;
302 
303 		id = xennet_rxidx(req_prod + i);
304 
305 		BUG_ON(np->rx_skbs[id]);
306 		np->rx_skbs[id] = skb;
307 
308 		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
309 		BUG_ON((signed short)ref < 0);
310 		np->grant_rx_ref[id] = ref;
311 
312 		pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
313 		vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
314 
315 		req = RING_GET_REQUEST(&np->rx, req_prod + i);
316 		gnttab_grant_foreign_access_ref(ref,
317 						np->xbdev->otherend_id,
318 						pfn_to_mfn(pfn),
319 						0);
320 
321 		req->id = id;
322 		req->gref = ref;
323 	}
324 
325 	wmb();		/* barrier so backend seens requests */
326 
327 	/* Above is a suitable barrier to ensure backend will see requests. */
328 	np->rx.req_prod_pvt = req_prod + i;
329  push:
330 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
331 	if (notify)
332 		notify_remote_via_irq(np->netdev->irq);
333 }
334 
335 static int xennet_open(struct net_device *dev)
336 {
337 	struct netfront_info *np = netdev_priv(dev);
338 
339 	napi_enable(&np->napi);
340 
341 	spin_lock_bh(&np->rx_lock);
342 	if (netif_carrier_ok(dev)) {
343 		xennet_alloc_rx_buffers(dev);
344 		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
345 		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
346 			napi_schedule(&np->napi);
347 	}
348 	spin_unlock_bh(&np->rx_lock);
349 
350 	netif_start_queue(dev);
351 
352 	return 0;
353 }
354 
355 static void xennet_tx_buf_gc(struct net_device *dev)
356 {
357 	RING_IDX cons, prod;
358 	unsigned short id;
359 	struct netfront_info *np = netdev_priv(dev);
360 	struct sk_buff *skb;
361 
362 	BUG_ON(!netif_carrier_ok(dev));
363 
364 	do {
365 		prod = np->tx.sring->rsp_prod;
366 		rmb(); /* Ensure we see responses up to 'rp'. */
367 
368 		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
369 			struct xen_netif_tx_response *txrsp;
370 
371 			txrsp = RING_GET_RESPONSE(&np->tx, cons);
372 			if (txrsp->status == XEN_NETIF_RSP_NULL)
373 				continue;
374 
375 			id  = txrsp->id;
376 			skb = np->tx_skbs[id].skb;
377 			if (unlikely(gnttab_query_foreign_access(
378 				np->grant_tx_ref[id]) != 0)) {
379 				printk(KERN_ALERT "xennet_tx_buf_gc: warning "
380 				       "-- grant still in use by backend "
381 				       "domain.\n");
382 				BUG();
383 			}
384 			gnttab_end_foreign_access_ref(
385 				np->grant_tx_ref[id], GNTMAP_readonly);
386 			gnttab_release_grant_reference(
387 				&np->gref_tx_head, np->grant_tx_ref[id]);
388 			np->grant_tx_ref[id] = GRANT_INVALID_REF;
389 			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
390 			dev_kfree_skb_irq(skb);
391 		}
392 
393 		np->tx.rsp_cons = prod;
394 
395 		/*
396 		 * Set a new event, then check for race with update of tx_cons.
397 		 * Note that it is essential to schedule a callback, no matter
398 		 * how few buffers are pending. Even if there is space in the
399 		 * transmit ring, higher layers may be blocked because too much
400 		 * data is outstanding: in such cases notification from Xen is
401 		 * likely to be the only kick that we'll get.
402 		 */
403 		np->tx.sring->rsp_event =
404 			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
405 		mb();		/* update shared area */
406 	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
407 
408 	xennet_maybe_wake_tx(dev);
409 }
410 
411 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
412 			      struct xen_netif_tx_request *tx)
413 {
414 	struct netfront_info *np = netdev_priv(dev);
415 	char *data = skb->data;
416 	unsigned long mfn;
417 	RING_IDX prod = np->tx.req_prod_pvt;
418 	int frags = skb_shinfo(skb)->nr_frags;
419 	unsigned int offset = offset_in_page(data);
420 	unsigned int len = skb_headlen(skb);
421 	unsigned int id;
422 	grant_ref_t ref;
423 	int i;
424 
425 	/* While the header overlaps a page boundary (including being
426 	   larger than a page), split it it into page-sized chunks. */
427 	while (len > PAGE_SIZE - offset) {
428 		tx->size = PAGE_SIZE - offset;
429 		tx->flags |= XEN_NETTXF_more_data;
430 		len -= tx->size;
431 		data += tx->size;
432 		offset = 0;
433 
434 		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
435 		np->tx_skbs[id].skb = skb_get(skb);
436 		tx = RING_GET_REQUEST(&np->tx, prod++);
437 		tx->id = id;
438 		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
439 		BUG_ON((signed short)ref < 0);
440 
441 		mfn = virt_to_mfn(data);
442 		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
443 						mfn, GNTMAP_readonly);
444 
445 		tx->gref = np->grant_tx_ref[id] = ref;
446 		tx->offset = offset;
447 		tx->size = len;
448 		tx->flags = 0;
449 	}
450 
451 	/* Grant backend access to each skb fragment page. */
452 	for (i = 0; i < frags; i++) {
453 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
454 
455 		tx->flags |= XEN_NETTXF_more_data;
456 
457 		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
458 		np->tx_skbs[id].skb = skb_get(skb);
459 		tx = RING_GET_REQUEST(&np->tx, prod++);
460 		tx->id = id;
461 		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
462 		BUG_ON((signed short)ref < 0);
463 
464 		mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));
465 		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
466 						mfn, GNTMAP_readonly);
467 
468 		tx->gref = np->grant_tx_ref[id] = ref;
469 		tx->offset = frag->page_offset;
470 		tx->size = skb_frag_size(frag);
471 		tx->flags = 0;
472 	}
473 
474 	np->tx.req_prod_pvt = prod;
475 }
476 
477 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
478 {
479 	unsigned short id;
480 	struct netfront_info *np = netdev_priv(dev);
481 	struct netfront_stats *stats = this_cpu_ptr(np->stats);
482 	struct xen_netif_tx_request *tx;
483 	struct xen_netif_extra_info *extra;
484 	char *data = skb->data;
485 	RING_IDX i;
486 	grant_ref_t ref;
487 	unsigned long mfn;
488 	int notify;
489 	int frags = skb_shinfo(skb)->nr_frags;
490 	unsigned int offset = offset_in_page(data);
491 	unsigned int len = skb_headlen(skb);
492 	unsigned long flags;
493 
494 	frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
495 	if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
496 		printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
497 		       frags);
498 		dump_stack();
499 		goto drop;
500 	}
501 
502 	spin_lock_irqsave(&np->tx_lock, flags);
503 
504 	if (unlikely(!netif_carrier_ok(dev) ||
505 		     (frags > 1 && !xennet_can_sg(dev)) ||
506 		     netif_needs_gso(skb, netif_skb_features(skb)))) {
507 		spin_unlock_irqrestore(&np->tx_lock, flags);
508 		goto drop;
509 	}
510 
511 	i = np->tx.req_prod_pvt;
512 
513 	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
514 	np->tx_skbs[id].skb = skb;
515 
516 	tx = RING_GET_REQUEST(&np->tx, i);
517 
518 	tx->id   = id;
519 	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
520 	BUG_ON((signed short)ref < 0);
521 	mfn = virt_to_mfn(data);
522 	gnttab_grant_foreign_access_ref(
523 		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
524 	tx->gref = np->grant_tx_ref[id] = ref;
525 	tx->offset = offset;
526 	tx->size = len;
527 	extra = NULL;
528 
529 	tx->flags = 0;
530 	if (skb->ip_summed == CHECKSUM_PARTIAL)
531 		/* local packet? */
532 		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
533 	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
534 		/* remote but checksummed. */
535 		tx->flags |= XEN_NETTXF_data_validated;
536 
537 	if (skb_shinfo(skb)->gso_size) {
538 		struct xen_netif_extra_info *gso;
539 
540 		gso = (struct xen_netif_extra_info *)
541 			RING_GET_REQUEST(&np->tx, ++i);
542 
543 		if (extra)
544 			extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
545 		else
546 			tx->flags |= XEN_NETTXF_extra_info;
547 
548 		gso->u.gso.size = skb_shinfo(skb)->gso_size;
549 		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
550 		gso->u.gso.pad = 0;
551 		gso->u.gso.features = 0;
552 
553 		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
554 		gso->flags = 0;
555 		extra = gso;
556 	}
557 
558 	np->tx.req_prod_pvt = i + 1;
559 
560 	xennet_make_frags(skb, dev, tx);
561 	tx->size = skb->len;
562 
563 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
564 	if (notify)
565 		notify_remote_via_irq(np->netdev->irq);
566 
567 	u64_stats_update_begin(&stats->syncp);
568 	stats->tx_bytes += skb->len;
569 	stats->tx_packets++;
570 	u64_stats_update_end(&stats->syncp);
571 
572 	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
573 	xennet_tx_buf_gc(dev);
574 
575 	if (!netfront_tx_slot_available(np))
576 		netif_stop_queue(dev);
577 
578 	spin_unlock_irqrestore(&np->tx_lock, flags);
579 
580 	return NETDEV_TX_OK;
581 
582  drop:
583 	dev->stats.tx_dropped++;
584 	dev_kfree_skb(skb);
585 	return NETDEV_TX_OK;
586 }
587 
588 static int xennet_close(struct net_device *dev)
589 {
590 	struct netfront_info *np = netdev_priv(dev);
591 	netif_stop_queue(np->netdev);
592 	napi_disable(&np->napi);
593 	return 0;
594 }
595 
596 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
597 				grant_ref_t ref)
598 {
599 	int new = xennet_rxidx(np->rx.req_prod_pvt);
600 
601 	BUG_ON(np->rx_skbs[new]);
602 	np->rx_skbs[new] = skb;
603 	np->grant_rx_ref[new] = ref;
604 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
605 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
606 	np->rx.req_prod_pvt++;
607 }
608 
609 static int xennet_get_extras(struct netfront_info *np,
610 			     struct xen_netif_extra_info *extras,
611 			     RING_IDX rp)
612 
613 {
614 	struct xen_netif_extra_info *extra;
615 	struct device *dev = &np->netdev->dev;
616 	RING_IDX cons = np->rx.rsp_cons;
617 	int err = 0;
618 
619 	do {
620 		struct sk_buff *skb;
621 		grant_ref_t ref;
622 
623 		if (unlikely(cons + 1 == rp)) {
624 			if (net_ratelimit())
625 				dev_warn(dev, "Missing extra info\n");
626 			err = -EBADR;
627 			break;
628 		}
629 
630 		extra = (struct xen_netif_extra_info *)
631 			RING_GET_RESPONSE(&np->rx, ++cons);
632 
633 		if (unlikely(!extra->type ||
634 			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
635 			if (net_ratelimit())
636 				dev_warn(dev, "Invalid extra type: %d\n",
637 					extra->type);
638 			err = -EINVAL;
639 		} else {
640 			memcpy(&extras[extra->type - 1], extra,
641 			       sizeof(*extra));
642 		}
643 
644 		skb = xennet_get_rx_skb(np, cons);
645 		ref = xennet_get_rx_ref(np, cons);
646 		xennet_move_rx_slot(np, skb, ref);
647 	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
648 
649 	np->rx.rsp_cons = cons;
650 	return err;
651 }
652 
653 static int xennet_get_responses(struct netfront_info *np,
654 				struct netfront_rx_info *rinfo, RING_IDX rp,
655 				struct sk_buff_head *list)
656 {
657 	struct xen_netif_rx_response *rx = &rinfo->rx;
658 	struct xen_netif_extra_info *extras = rinfo->extras;
659 	struct device *dev = &np->netdev->dev;
660 	RING_IDX cons = np->rx.rsp_cons;
661 	struct sk_buff *skb = xennet_get_rx_skb(np, cons);
662 	grant_ref_t ref = xennet_get_rx_ref(np, cons);
663 	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
664 	int frags = 1;
665 	int err = 0;
666 	unsigned long ret;
667 
668 	if (rx->flags & XEN_NETRXF_extra_info) {
669 		err = xennet_get_extras(np, extras, rp);
670 		cons = np->rx.rsp_cons;
671 	}
672 
673 	for (;;) {
674 		if (unlikely(rx->status < 0 ||
675 			     rx->offset + rx->status > PAGE_SIZE)) {
676 			if (net_ratelimit())
677 				dev_warn(dev, "rx->offset: %x, size: %u\n",
678 					 rx->offset, rx->status);
679 			xennet_move_rx_slot(np, skb, ref);
680 			err = -EINVAL;
681 			goto next;
682 		}
683 
684 		/*
685 		 * This definitely indicates a bug, either in this driver or in
686 		 * the backend driver. In future this should flag the bad
687 		 * situation to the system controller to reboot the backed.
688 		 */
689 		if (ref == GRANT_INVALID_REF) {
690 			if (net_ratelimit())
691 				dev_warn(dev, "Bad rx response id %d.\n",
692 					 rx->id);
693 			err = -EINVAL;
694 			goto next;
695 		}
696 
697 		ret = gnttab_end_foreign_access_ref(ref, 0);
698 		BUG_ON(!ret);
699 
700 		gnttab_release_grant_reference(&np->gref_rx_head, ref);
701 
702 		__skb_queue_tail(list, skb);
703 
704 next:
705 		if (!(rx->flags & XEN_NETRXF_more_data))
706 			break;
707 
708 		if (cons + frags == rp) {
709 			if (net_ratelimit())
710 				dev_warn(dev, "Need more frags\n");
711 			err = -ENOENT;
712 			break;
713 		}
714 
715 		rx = RING_GET_RESPONSE(&np->rx, cons + frags);
716 		skb = xennet_get_rx_skb(np, cons + frags);
717 		ref = xennet_get_rx_ref(np, cons + frags);
718 		frags++;
719 	}
720 
721 	if (unlikely(frags > max)) {
722 		if (net_ratelimit())
723 			dev_warn(dev, "Too many frags\n");
724 		err = -E2BIG;
725 	}
726 
727 	if (unlikely(err))
728 		np->rx.rsp_cons = cons + frags;
729 
730 	return err;
731 }
732 
733 static int xennet_set_skb_gso(struct sk_buff *skb,
734 			      struct xen_netif_extra_info *gso)
735 {
736 	if (!gso->u.gso.size) {
737 		if (net_ratelimit())
738 			printk(KERN_WARNING "GSO size must not be zero.\n");
739 		return -EINVAL;
740 	}
741 
742 	/* Currently only TCPv4 S.O. is supported. */
743 	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
744 		if (net_ratelimit())
745 			printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
746 		return -EINVAL;
747 	}
748 
749 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
750 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
751 
752 	/* Header must be checked, and gso_segs computed. */
753 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
754 	skb_shinfo(skb)->gso_segs = 0;
755 
756 	return 0;
757 }
758 
759 static RING_IDX xennet_fill_frags(struct netfront_info *np,
760 				  struct sk_buff *skb,
761 				  struct sk_buff_head *list)
762 {
763 	struct skb_shared_info *shinfo = skb_shinfo(skb);
764 	int nr_frags = shinfo->nr_frags;
765 	RING_IDX cons = np->rx.rsp_cons;
766 	struct sk_buff *nskb;
767 
768 	while ((nskb = __skb_dequeue(list))) {
769 		struct xen_netif_rx_response *rx =
770 			RING_GET_RESPONSE(&np->rx, ++cons);
771 		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
772 
773 		__skb_fill_page_desc(skb, nr_frags,
774 				     skb_frag_page(nfrag),
775 				     rx->offset, rx->status);
776 
777 		skb->data_len += rx->status;
778 
779 		skb_shinfo(nskb)->nr_frags = 0;
780 		kfree_skb(nskb);
781 
782 		nr_frags++;
783 	}
784 
785 	shinfo->nr_frags = nr_frags;
786 	return cons;
787 }
788 
789 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
790 {
791 	struct iphdr *iph;
792 	unsigned char *th;
793 	int err = -EPROTO;
794 	int recalculate_partial_csum = 0;
795 
796 	/*
797 	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
798 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
799 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
800 	 * recalculate the partial checksum.
801 	 */
802 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
803 		struct netfront_info *np = netdev_priv(dev);
804 		np->rx_gso_checksum_fixup++;
805 		skb->ip_summed = CHECKSUM_PARTIAL;
806 		recalculate_partial_csum = 1;
807 	}
808 
809 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
810 	if (skb->ip_summed != CHECKSUM_PARTIAL)
811 		return 0;
812 
813 	if (skb->protocol != htons(ETH_P_IP))
814 		goto out;
815 
816 	iph = (void *)skb->data;
817 	th = skb->data + 4 * iph->ihl;
818 	if (th >= skb_tail_pointer(skb))
819 		goto out;
820 
821 	skb->csum_start = th - skb->head;
822 	switch (iph->protocol) {
823 	case IPPROTO_TCP:
824 		skb->csum_offset = offsetof(struct tcphdr, check);
825 
826 		if (recalculate_partial_csum) {
827 			struct tcphdr *tcph = (struct tcphdr *)th;
828 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
829 							 skb->len - iph->ihl*4,
830 							 IPPROTO_TCP, 0);
831 		}
832 		break;
833 	case IPPROTO_UDP:
834 		skb->csum_offset = offsetof(struct udphdr, check);
835 
836 		if (recalculate_partial_csum) {
837 			struct udphdr *udph = (struct udphdr *)th;
838 			udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
839 							 skb->len - iph->ihl*4,
840 							 IPPROTO_UDP, 0);
841 		}
842 		break;
843 	default:
844 		if (net_ratelimit())
845 			printk(KERN_ERR "Attempting to checksum a non-"
846 			       "TCP/UDP packet, dropping a protocol"
847 			       " %d packet", iph->protocol);
848 		goto out;
849 	}
850 
851 	if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
852 		goto out;
853 
854 	err = 0;
855 
856 out:
857 	return err;
858 }
859 
860 static int handle_incoming_queue(struct net_device *dev,
861 				 struct sk_buff_head *rxq)
862 {
863 	struct netfront_info *np = netdev_priv(dev);
864 	struct netfront_stats *stats = this_cpu_ptr(np->stats);
865 	int packets_dropped = 0;
866 	struct sk_buff *skb;
867 
868 	while ((skb = __skb_dequeue(rxq)) != NULL) {
869 		struct page *page = NETFRONT_SKB_CB(skb)->page;
870 		void *vaddr = page_address(page);
871 		unsigned offset = NETFRONT_SKB_CB(skb)->offset;
872 
873 		memcpy(skb->data, vaddr + offset,
874 		       skb_headlen(skb));
875 
876 		if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
877 			__free_page(page);
878 
879 		/* Ethernet work: Delayed to here as it peeks the header. */
880 		skb->protocol = eth_type_trans(skb, dev);
881 
882 		if (checksum_setup(dev, skb)) {
883 			kfree_skb(skb);
884 			packets_dropped++;
885 			dev->stats.rx_errors++;
886 			continue;
887 		}
888 
889 		u64_stats_update_begin(&stats->syncp);
890 		stats->rx_packets++;
891 		stats->rx_bytes += skb->len;
892 		u64_stats_update_end(&stats->syncp);
893 
894 		/* Pass it up. */
895 		netif_receive_skb(skb);
896 	}
897 
898 	return packets_dropped;
899 }
900 
901 static int xennet_poll(struct napi_struct *napi, int budget)
902 {
903 	struct netfront_info *np = container_of(napi, struct netfront_info, napi);
904 	struct net_device *dev = np->netdev;
905 	struct sk_buff *skb;
906 	struct netfront_rx_info rinfo;
907 	struct xen_netif_rx_response *rx = &rinfo.rx;
908 	struct xen_netif_extra_info *extras = rinfo.extras;
909 	RING_IDX i, rp;
910 	int work_done;
911 	struct sk_buff_head rxq;
912 	struct sk_buff_head errq;
913 	struct sk_buff_head tmpq;
914 	unsigned long flags;
915 	unsigned int len;
916 	int err;
917 
918 	spin_lock(&np->rx_lock);
919 
920 	skb_queue_head_init(&rxq);
921 	skb_queue_head_init(&errq);
922 	skb_queue_head_init(&tmpq);
923 
924 	rp = np->rx.sring->rsp_prod;
925 	rmb(); /* Ensure we see queued responses up to 'rp'. */
926 
927 	i = np->rx.rsp_cons;
928 	work_done = 0;
929 	while ((i != rp) && (work_done < budget)) {
930 		memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
931 		memset(extras, 0, sizeof(rinfo.extras));
932 
933 		err = xennet_get_responses(np, &rinfo, rp, &tmpq);
934 
935 		if (unlikely(err)) {
936 err:
937 			while ((skb = __skb_dequeue(&tmpq)))
938 				__skb_queue_tail(&errq, skb);
939 			dev->stats.rx_errors++;
940 			i = np->rx.rsp_cons;
941 			continue;
942 		}
943 
944 		skb = __skb_dequeue(&tmpq);
945 
946 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
947 			struct xen_netif_extra_info *gso;
948 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
949 
950 			if (unlikely(xennet_set_skb_gso(skb, gso))) {
951 				__skb_queue_head(&tmpq, skb);
952 				np->rx.rsp_cons += skb_queue_len(&tmpq);
953 				goto err;
954 			}
955 		}
956 
957 		NETFRONT_SKB_CB(skb)->page =
958 			skb_frag_page(&skb_shinfo(skb)->frags[0]);
959 		NETFRONT_SKB_CB(skb)->offset = rx->offset;
960 
961 		len = rx->status;
962 		if (len > RX_COPY_THRESHOLD)
963 			len = RX_COPY_THRESHOLD;
964 		skb_put(skb, len);
965 
966 		if (rx->status > len) {
967 			skb_shinfo(skb)->frags[0].page_offset =
968 				rx->offset + len;
969 			skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
970 			skb->data_len = rx->status - len;
971 		} else {
972 			__skb_fill_page_desc(skb, 0, NULL, 0, 0);
973 			skb_shinfo(skb)->nr_frags = 0;
974 		}
975 
976 		i = xennet_fill_frags(np, skb, &tmpq);
977 
978 		/*
979 		 * Truesize approximates the size of true data plus
980 		 * any supervisor overheads. Adding hypervisor
981 		 * overheads has been shown to significantly reduce
982 		 * achievable bandwidth with the default receive
983 		 * buffer size. It is therefore not wise to account
984 		 * for it here.
985 		 *
986 		 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
987 		 * to RX_COPY_THRESHOLD + the supervisor
988 		 * overheads. Here, we add the size of the data pulled
989 		 * in xennet_fill_frags().
990 		 *
991 		 * We also adjust for any unused space in the main
992 		 * data area by subtracting (RX_COPY_THRESHOLD -
993 		 * len). This is especially important with drivers
994 		 * which split incoming packets into header and data,
995 		 * using only 66 bytes of the main data area (see the
996 		 * e1000 driver for example.)  On such systems,
997 		 * without this last adjustement, our achievable
998 		 * receive throughout using the standard receive
999 		 * buffer size was cut by 25%(!!!).
1000 		 */
1001 		skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
1002 		skb->len += skb->data_len;
1003 
1004 		if (rx->flags & XEN_NETRXF_csum_blank)
1005 			skb->ip_summed = CHECKSUM_PARTIAL;
1006 		else if (rx->flags & XEN_NETRXF_data_validated)
1007 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1008 
1009 		__skb_queue_tail(&rxq, skb);
1010 
1011 		np->rx.rsp_cons = ++i;
1012 		work_done++;
1013 	}
1014 
1015 	__skb_queue_purge(&errq);
1016 
1017 	work_done -= handle_incoming_queue(dev, &rxq);
1018 
1019 	/* If we get a callback with very few responses, reduce fill target. */
1020 	/* NB. Note exponential increase, linear decrease. */
1021 	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1022 	     ((3*np->rx_target) / 4)) &&
1023 	    (--np->rx_target < np->rx_min_target))
1024 		np->rx_target = np->rx_min_target;
1025 
1026 	xennet_alloc_rx_buffers(dev);
1027 
1028 	if (work_done < budget) {
1029 		int more_to_do = 0;
1030 
1031 		local_irq_save(flags);
1032 
1033 		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1034 		if (!more_to_do)
1035 			__napi_complete(napi);
1036 
1037 		local_irq_restore(flags);
1038 	}
1039 
1040 	spin_unlock(&np->rx_lock);
1041 
1042 	return work_done;
1043 }
1044 
1045 static int xennet_change_mtu(struct net_device *dev, int mtu)
1046 {
1047 	int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
1048 
1049 	if (mtu > max)
1050 		return -EINVAL;
1051 	dev->mtu = mtu;
1052 	return 0;
1053 }
1054 
1055 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1056 						    struct rtnl_link_stats64 *tot)
1057 {
1058 	struct netfront_info *np = netdev_priv(dev);
1059 	int cpu;
1060 
1061 	for_each_possible_cpu(cpu) {
1062 		struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1063 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1064 		unsigned int start;
1065 
1066 		do {
1067 			start = u64_stats_fetch_begin_bh(&stats->syncp);
1068 
1069 			rx_packets = stats->rx_packets;
1070 			tx_packets = stats->tx_packets;
1071 			rx_bytes = stats->rx_bytes;
1072 			tx_bytes = stats->tx_bytes;
1073 		} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1074 
1075 		tot->rx_packets += rx_packets;
1076 		tot->tx_packets += tx_packets;
1077 		tot->rx_bytes   += rx_bytes;
1078 		tot->tx_bytes   += tx_bytes;
1079 	}
1080 
1081 	tot->rx_errors  = dev->stats.rx_errors;
1082 	tot->tx_dropped = dev->stats.tx_dropped;
1083 
1084 	return tot;
1085 }
1086 
1087 static void xennet_release_tx_bufs(struct netfront_info *np)
1088 {
1089 	struct sk_buff *skb;
1090 	int i;
1091 
1092 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1093 		/* Skip over entries which are actually freelist references */
1094 		if (skb_entry_is_link(&np->tx_skbs[i]))
1095 			continue;
1096 
1097 		skb = np->tx_skbs[i].skb;
1098 		gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
1099 					      GNTMAP_readonly);
1100 		gnttab_release_grant_reference(&np->gref_tx_head,
1101 					       np->grant_tx_ref[i]);
1102 		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1103 		add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1104 		dev_kfree_skb_irq(skb);
1105 	}
1106 }
1107 
1108 static void xennet_release_rx_bufs(struct netfront_info *np)
1109 {
1110 	struct mmu_update      *mmu = np->rx_mmu;
1111 	struct multicall_entry *mcl = np->rx_mcl;
1112 	struct sk_buff_head free_list;
1113 	struct sk_buff *skb;
1114 	unsigned long mfn;
1115 	int xfer = 0, noxfer = 0, unused = 0;
1116 	int id, ref;
1117 
1118 	dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1119 			 __func__);
1120 	return;
1121 
1122 	skb_queue_head_init(&free_list);
1123 
1124 	spin_lock_bh(&np->rx_lock);
1125 
1126 	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1127 		ref = np->grant_rx_ref[id];
1128 		if (ref == GRANT_INVALID_REF) {
1129 			unused++;
1130 			continue;
1131 		}
1132 
1133 		skb = np->rx_skbs[id];
1134 		mfn = gnttab_end_foreign_transfer_ref(ref);
1135 		gnttab_release_grant_reference(&np->gref_rx_head, ref);
1136 		np->grant_rx_ref[id] = GRANT_INVALID_REF;
1137 
1138 		if (0 == mfn) {
1139 			skb_shinfo(skb)->nr_frags = 0;
1140 			dev_kfree_skb(skb);
1141 			noxfer++;
1142 			continue;
1143 		}
1144 
1145 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1146 			/* Remap the page. */
1147 			const struct page *page =
1148 				skb_frag_page(&skb_shinfo(skb)->frags[0]);
1149 			unsigned long pfn = page_to_pfn(page);
1150 			void *vaddr = page_address(page);
1151 
1152 			MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1153 						mfn_pte(mfn, PAGE_KERNEL),
1154 						0);
1155 			mcl++;
1156 			mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1157 				| MMU_MACHPHYS_UPDATE;
1158 			mmu->val = pfn;
1159 			mmu++;
1160 
1161 			set_phys_to_machine(pfn, mfn);
1162 		}
1163 		__skb_queue_tail(&free_list, skb);
1164 		xfer++;
1165 	}
1166 
1167 	dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
1168 		 __func__, xfer, noxfer, unused);
1169 
1170 	if (xfer) {
1171 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1172 			/* Do all the remapping work and M2P updates. */
1173 			MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
1174 					 NULL, DOMID_SELF);
1175 			mcl++;
1176 			HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1177 		}
1178 	}
1179 
1180 	__skb_queue_purge(&free_list);
1181 
1182 	spin_unlock_bh(&np->rx_lock);
1183 }
1184 
1185 static void xennet_uninit(struct net_device *dev)
1186 {
1187 	struct netfront_info *np = netdev_priv(dev);
1188 	xennet_release_tx_bufs(np);
1189 	xennet_release_rx_bufs(np);
1190 	gnttab_free_grant_references(np->gref_tx_head);
1191 	gnttab_free_grant_references(np->gref_rx_head);
1192 }
1193 
1194 static netdev_features_t xennet_fix_features(struct net_device *dev,
1195 	netdev_features_t features)
1196 {
1197 	struct netfront_info *np = netdev_priv(dev);
1198 	int val;
1199 
1200 	if (features & NETIF_F_SG) {
1201 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1202 				 "%d", &val) < 0)
1203 			val = 0;
1204 
1205 		if (!val)
1206 			features &= ~NETIF_F_SG;
1207 	}
1208 
1209 	if (features & NETIF_F_TSO) {
1210 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1211 				 "feature-gso-tcpv4", "%d", &val) < 0)
1212 			val = 0;
1213 
1214 		if (!val)
1215 			features &= ~NETIF_F_TSO;
1216 	}
1217 
1218 	return features;
1219 }
1220 
1221 static int xennet_set_features(struct net_device *dev,
1222 	netdev_features_t features)
1223 {
1224 	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1225 		netdev_info(dev, "Reducing MTU because no SG offload");
1226 		dev->mtu = ETH_DATA_LEN;
1227 	}
1228 
1229 	return 0;
1230 }
1231 
1232 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1233 {
1234 	struct net_device *dev = dev_id;
1235 	struct netfront_info *np = netdev_priv(dev);
1236 	unsigned long flags;
1237 
1238 	spin_lock_irqsave(&np->tx_lock, flags);
1239 
1240 	if (likely(netif_carrier_ok(dev))) {
1241 		xennet_tx_buf_gc(dev);
1242 		/* Under tx_lock: protects access to rx shared-ring indexes. */
1243 		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
1244 			napi_schedule(&np->napi);
1245 	}
1246 
1247 	spin_unlock_irqrestore(&np->tx_lock, flags);
1248 
1249 	return IRQ_HANDLED;
1250 }
1251 
1252 #ifdef CONFIG_NET_POLL_CONTROLLER
1253 static void xennet_poll_controller(struct net_device *dev)
1254 {
1255 	xennet_interrupt(0, dev);
1256 }
1257 #endif
1258 
1259 static const struct net_device_ops xennet_netdev_ops = {
1260 	.ndo_open            = xennet_open,
1261 	.ndo_uninit          = xennet_uninit,
1262 	.ndo_stop            = xennet_close,
1263 	.ndo_start_xmit      = xennet_start_xmit,
1264 	.ndo_change_mtu	     = xennet_change_mtu,
1265 	.ndo_get_stats64     = xennet_get_stats64,
1266 	.ndo_set_mac_address = eth_mac_addr,
1267 	.ndo_validate_addr   = eth_validate_addr,
1268 	.ndo_fix_features    = xennet_fix_features,
1269 	.ndo_set_features    = xennet_set_features,
1270 #ifdef CONFIG_NET_POLL_CONTROLLER
1271 	.ndo_poll_controller = xennet_poll_controller,
1272 #endif
1273 };
1274 
1275 static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
1276 {
1277 	int i, err;
1278 	struct net_device *netdev;
1279 	struct netfront_info *np;
1280 
1281 	netdev = alloc_etherdev(sizeof(struct netfront_info));
1282 	if (!netdev)
1283 		return ERR_PTR(-ENOMEM);
1284 
1285 	np                   = netdev_priv(netdev);
1286 	np->xbdev            = dev;
1287 
1288 	spin_lock_init(&np->tx_lock);
1289 	spin_lock_init(&np->rx_lock);
1290 
1291 	skb_queue_head_init(&np->rx_batch);
1292 	np->rx_target     = RX_DFL_MIN_TARGET;
1293 	np->rx_min_target = RX_DFL_MIN_TARGET;
1294 	np->rx_max_target = RX_MAX_TARGET;
1295 
1296 	init_timer(&np->rx_refill_timer);
1297 	np->rx_refill_timer.data = (unsigned long)netdev;
1298 	np->rx_refill_timer.function = rx_refill_timeout;
1299 
1300 	err = -ENOMEM;
1301 	np->stats = alloc_percpu(struct netfront_stats);
1302 	if (np->stats == NULL)
1303 		goto exit;
1304 
1305 	/* Initialise tx_skbs as a free chain containing every entry. */
1306 	np->tx_skb_freelist = 0;
1307 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1308 		skb_entry_set_link(&np->tx_skbs[i], i+1);
1309 		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1310 	}
1311 
1312 	/* Clear out rx_skbs */
1313 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1314 		np->rx_skbs[i] = NULL;
1315 		np->grant_rx_ref[i] = GRANT_INVALID_REF;
1316 	}
1317 
1318 	/* A grant for every tx ring slot */
1319 	if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1320 					  &np->gref_tx_head) < 0) {
1321 		printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1322 		err = -ENOMEM;
1323 		goto exit_free_stats;
1324 	}
1325 	/* A grant for every rx ring slot */
1326 	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1327 					  &np->gref_rx_head) < 0) {
1328 		printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
1329 		err = -ENOMEM;
1330 		goto exit_free_tx;
1331 	}
1332 
1333 	netdev->netdev_ops	= &xennet_netdev_ops;
1334 
1335 	netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1336 	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1337 				  NETIF_F_GSO_ROBUST;
1338 	netdev->hw_features	= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
1339 
1340 	/*
1341          * Assume that all hw features are available for now. This set
1342          * will be adjusted by the call to netdev_update_features() in
1343          * xennet_connect() which is the earliest point where we can
1344          * negotiate with the backend regarding supported features.
1345          */
1346 	netdev->features |= netdev->hw_features;
1347 
1348 	SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
1349 	SET_NETDEV_DEV(netdev, &dev->dev);
1350 
1351 	np->netdev = netdev;
1352 
1353 	netif_carrier_off(netdev);
1354 
1355 	return netdev;
1356 
1357  exit_free_tx:
1358 	gnttab_free_grant_references(np->gref_tx_head);
1359  exit_free_stats:
1360 	free_percpu(np->stats);
1361  exit:
1362 	free_netdev(netdev);
1363 	return ERR_PTR(err);
1364 }
1365 
1366 /**
1367  * Entry point to this code when a new device is created.  Allocate the basic
1368  * structures and the ring buffers for communication with the backend, and
1369  * inform the backend of the appropriate details for those.
1370  */
1371 static int __devinit netfront_probe(struct xenbus_device *dev,
1372 				    const struct xenbus_device_id *id)
1373 {
1374 	int err;
1375 	struct net_device *netdev;
1376 	struct netfront_info *info;
1377 
1378 	netdev = xennet_create_dev(dev);
1379 	if (IS_ERR(netdev)) {
1380 		err = PTR_ERR(netdev);
1381 		xenbus_dev_fatal(dev, err, "creating netdev");
1382 		return err;
1383 	}
1384 
1385 	info = netdev_priv(netdev);
1386 	dev_set_drvdata(&dev->dev, info);
1387 
1388 	err = register_netdev(info->netdev);
1389 	if (err) {
1390 		printk(KERN_WARNING "%s: register_netdev err=%d\n",
1391 		       __func__, err);
1392 		goto fail;
1393 	}
1394 
1395 	err = xennet_sysfs_addif(info->netdev);
1396 	if (err) {
1397 		unregister_netdev(info->netdev);
1398 		printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
1399 		       __func__, err);
1400 		goto fail;
1401 	}
1402 
1403 	return 0;
1404 
1405  fail:
1406 	free_netdev(netdev);
1407 	dev_set_drvdata(&dev->dev, NULL);
1408 	return err;
1409 }
1410 
1411 static void xennet_end_access(int ref, void *page)
1412 {
1413 	/* This frees the page as a side-effect */
1414 	if (ref != GRANT_INVALID_REF)
1415 		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1416 }
1417 
1418 static void xennet_disconnect_backend(struct netfront_info *info)
1419 {
1420 	/* Stop old i/f to prevent errors whilst we rebuild the state. */
1421 	spin_lock_bh(&info->rx_lock);
1422 	spin_lock_irq(&info->tx_lock);
1423 	netif_carrier_off(info->netdev);
1424 	spin_unlock_irq(&info->tx_lock);
1425 	spin_unlock_bh(&info->rx_lock);
1426 
1427 	if (info->netdev->irq)
1428 		unbind_from_irqhandler(info->netdev->irq, info->netdev);
1429 	info->evtchn = info->netdev->irq = 0;
1430 
1431 	/* End access and free the pages */
1432 	xennet_end_access(info->tx_ring_ref, info->tx.sring);
1433 	xennet_end_access(info->rx_ring_ref, info->rx.sring);
1434 
1435 	info->tx_ring_ref = GRANT_INVALID_REF;
1436 	info->rx_ring_ref = GRANT_INVALID_REF;
1437 	info->tx.sring = NULL;
1438 	info->rx.sring = NULL;
1439 }
1440 
1441 /**
1442  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1443  * driver restart.  We tear down our netif structure and recreate it, but
1444  * leave the device-layer structures intact so that this is transparent to the
1445  * rest of the kernel.
1446  */
1447 static int netfront_resume(struct xenbus_device *dev)
1448 {
1449 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1450 
1451 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1452 
1453 	xennet_disconnect_backend(info);
1454 	return 0;
1455 }
1456 
1457 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1458 {
1459 	char *s, *e, *macstr;
1460 	int i;
1461 
1462 	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1463 	if (IS_ERR(macstr))
1464 		return PTR_ERR(macstr);
1465 
1466 	for (i = 0; i < ETH_ALEN; i++) {
1467 		mac[i] = simple_strtoul(s, &e, 16);
1468 		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1469 			kfree(macstr);
1470 			return -ENOENT;
1471 		}
1472 		s = e+1;
1473 	}
1474 
1475 	kfree(macstr);
1476 	return 0;
1477 }
1478 
1479 static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1480 {
1481 	struct xen_netif_tx_sring *txs;
1482 	struct xen_netif_rx_sring *rxs;
1483 	int err;
1484 	struct net_device *netdev = info->netdev;
1485 
1486 	info->tx_ring_ref = GRANT_INVALID_REF;
1487 	info->rx_ring_ref = GRANT_INVALID_REF;
1488 	info->rx.sring = NULL;
1489 	info->tx.sring = NULL;
1490 	netdev->irq = 0;
1491 
1492 	err = xen_net_read_mac(dev, netdev->dev_addr);
1493 	if (err) {
1494 		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1495 		goto fail;
1496 	}
1497 
1498 	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1499 	if (!txs) {
1500 		err = -ENOMEM;
1501 		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1502 		goto fail;
1503 	}
1504 	SHARED_RING_INIT(txs);
1505 	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1506 
1507 	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1508 	if (err < 0) {
1509 		free_page((unsigned long)txs);
1510 		goto fail;
1511 	}
1512 
1513 	info->tx_ring_ref = err;
1514 	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1515 	if (!rxs) {
1516 		err = -ENOMEM;
1517 		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1518 		goto fail;
1519 	}
1520 	SHARED_RING_INIT(rxs);
1521 	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1522 
1523 	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1524 	if (err < 0) {
1525 		free_page((unsigned long)rxs);
1526 		goto fail;
1527 	}
1528 	info->rx_ring_ref = err;
1529 
1530 	err = xenbus_alloc_evtchn(dev, &info->evtchn);
1531 	if (err)
1532 		goto fail;
1533 
1534 	err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
1535 					0, netdev->name, netdev);
1536 	if (err < 0)
1537 		goto fail;
1538 	netdev->irq = err;
1539 	return 0;
1540 
1541  fail:
1542 	return err;
1543 }
1544 
1545 /* Common code used when first setting up, and when resuming. */
1546 static int talk_to_netback(struct xenbus_device *dev,
1547 			   struct netfront_info *info)
1548 {
1549 	const char *message;
1550 	struct xenbus_transaction xbt;
1551 	int err;
1552 
1553 	/* Create shared ring, alloc event channel. */
1554 	err = setup_netfront(dev, info);
1555 	if (err)
1556 		goto out;
1557 
1558 again:
1559 	err = xenbus_transaction_start(&xbt);
1560 	if (err) {
1561 		xenbus_dev_fatal(dev, err, "starting transaction");
1562 		goto destroy_ring;
1563 	}
1564 
1565 	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1566 			    info->tx_ring_ref);
1567 	if (err) {
1568 		message = "writing tx ring-ref";
1569 		goto abort_transaction;
1570 	}
1571 	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1572 			    info->rx_ring_ref);
1573 	if (err) {
1574 		message = "writing rx ring-ref";
1575 		goto abort_transaction;
1576 	}
1577 	err = xenbus_printf(xbt, dev->nodename,
1578 			    "event-channel", "%u", info->evtchn);
1579 	if (err) {
1580 		message = "writing event-channel";
1581 		goto abort_transaction;
1582 	}
1583 
1584 	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1585 			    1);
1586 	if (err) {
1587 		message = "writing request-rx-copy";
1588 		goto abort_transaction;
1589 	}
1590 
1591 	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1592 	if (err) {
1593 		message = "writing feature-rx-notify";
1594 		goto abort_transaction;
1595 	}
1596 
1597 	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1598 	if (err) {
1599 		message = "writing feature-sg";
1600 		goto abort_transaction;
1601 	}
1602 
1603 	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1604 	if (err) {
1605 		message = "writing feature-gso-tcpv4";
1606 		goto abort_transaction;
1607 	}
1608 
1609 	err = xenbus_transaction_end(xbt, 0);
1610 	if (err) {
1611 		if (err == -EAGAIN)
1612 			goto again;
1613 		xenbus_dev_fatal(dev, err, "completing transaction");
1614 		goto destroy_ring;
1615 	}
1616 
1617 	return 0;
1618 
1619  abort_transaction:
1620 	xenbus_transaction_end(xbt, 1);
1621 	xenbus_dev_fatal(dev, err, "%s", message);
1622  destroy_ring:
1623 	xennet_disconnect_backend(info);
1624  out:
1625 	return err;
1626 }
1627 
1628 static int xennet_connect(struct net_device *dev)
1629 {
1630 	struct netfront_info *np = netdev_priv(dev);
1631 	int i, requeue_idx, err;
1632 	struct sk_buff *skb;
1633 	grant_ref_t ref;
1634 	struct xen_netif_rx_request *req;
1635 	unsigned int feature_rx_copy;
1636 
1637 	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1638 			   "feature-rx-copy", "%u", &feature_rx_copy);
1639 	if (err != 1)
1640 		feature_rx_copy = 0;
1641 
1642 	if (!feature_rx_copy) {
1643 		dev_info(&dev->dev,
1644 			 "backend does not support copying receive path\n");
1645 		return -ENODEV;
1646 	}
1647 
1648 	err = talk_to_netback(np->xbdev, np);
1649 	if (err)
1650 		return err;
1651 
1652 	rtnl_lock();
1653 	netdev_update_features(dev);
1654 	rtnl_unlock();
1655 
1656 	spin_lock_bh(&np->rx_lock);
1657 	spin_lock_irq(&np->tx_lock);
1658 
1659 	/* Step 1: Discard all pending TX packet fragments. */
1660 	xennet_release_tx_bufs(np);
1661 
1662 	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1663 	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1664 		skb_frag_t *frag;
1665 		const struct page *page;
1666 		if (!np->rx_skbs[i])
1667 			continue;
1668 
1669 		skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1670 		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1671 		req = RING_GET_REQUEST(&np->rx, requeue_idx);
1672 
1673 		frag = &skb_shinfo(skb)->frags[0];
1674 		page = skb_frag_page(frag);
1675 		gnttab_grant_foreign_access_ref(
1676 			ref, np->xbdev->otherend_id,
1677 			pfn_to_mfn(page_to_pfn(page)),
1678 			0);
1679 		req->gref = ref;
1680 		req->id   = requeue_idx;
1681 
1682 		requeue_idx++;
1683 	}
1684 
1685 	np->rx.req_prod_pvt = requeue_idx;
1686 
1687 	/*
1688 	 * Step 3: All public and private state should now be sane.  Get
1689 	 * ready to start sending and receiving packets and give the driver
1690 	 * domain a kick because we've probably just requeued some
1691 	 * packets.
1692 	 */
1693 	netif_carrier_on(np->netdev);
1694 	notify_remote_via_irq(np->netdev->irq);
1695 	xennet_tx_buf_gc(dev);
1696 	xennet_alloc_rx_buffers(dev);
1697 
1698 	spin_unlock_irq(&np->tx_lock);
1699 	spin_unlock_bh(&np->rx_lock);
1700 
1701 	return 0;
1702 }
1703 
1704 /**
1705  * Callback received when the backend's state changes.
1706  */
1707 static void netback_changed(struct xenbus_device *dev,
1708 			    enum xenbus_state backend_state)
1709 {
1710 	struct netfront_info *np = dev_get_drvdata(&dev->dev);
1711 	struct net_device *netdev = np->netdev;
1712 
1713 	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1714 
1715 	switch (backend_state) {
1716 	case XenbusStateInitialising:
1717 	case XenbusStateInitialised:
1718 	case XenbusStateReconfiguring:
1719 	case XenbusStateReconfigured:
1720 	case XenbusStateUnknown:
1721 	case XenbusStateClosed:
1722 		break;
1723 
1724 	case XenbusStateInitWait:
1725 		if (dev->state != XenbusStateInitialising)
1726 			break;
1727 		if (xennet_connect(netdev) != 0)
1728 			break;
1729 		xenbus_switch_state(dev, XenbusStateConnected);
1730 		break;
1731 
1732 	case XenbusStateConnected:
1733 		netif_notify_peers(netdev);
1734 		break;
1735 
1736 	case XenbusStateClosing:
1737 		xenbus_frontend_closed(dev);
1738 		break;
1739 	}
1740 }
1741 
1742 static const struct xennet_stat {
1743 	char name[ETH_GSTRING_LEN];
1744 	u16 offset;
1745 } xennet_stats[] = {
1746 	{
1747 		"rx_gso_checksum_fixup",
1748 		offsetof(struct netfront_info, rx_gso_checksum_fixup)
1749 	},
1750 };
1751 
1752 static int xennet_get_sset_count(struct net_device *dev, int string_set)
1753 {
1754 	switch (string_set) {
1755 	case ETH_SS_STATS:
1756 		return ARRAY_SIZE(xennet_stats);
1757 	default:
1758 		return -EINVAL;
1759 	}
1760 }
1761 
1762 static void xennet_get_ethtool_stats(struct net_device *dev,
1763 				     struct ethtool_stats *stats, u64 * data)
1764 {
1765 	void *np = netdev_priv(dev);
1766 	int i;
1767 
1768 	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1769 		data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
1770 }
1771 
1772 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1773 {
1774 	int i;
1775 
1776 	switch (stringset) {
1777 	case ETH_SS_STATS:
1778 		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1779 			memcpy(data + i * ETH_GSTRING_LEN,
1780 			       xennet_stats[i].name, ETH_GSTRING_LEN);
1781 		break;
1782 	}
1783 }
1784 
1785 static const struct ethtool_ops xennet_ethtool_ops =
1786 {
1787 	.get_link = ethtool_op_get_link,
1788 
1789 	.get_sset_count = xennet_get_sset_count,
1790 	.get_ethtool_stats = xennet_get_ethtool_stats,
1791 	.get_strings = xennet_get_strings,
1792 };
1793 
1794 #ifdef CONFIG_SYSFS
1795 static ssize_t show_rxbuf_min(struct device *dev,
1796 			      struct device_attribute *attr, char *buf)
1797 {
1798 	struct net_device *netdev = to_net_dev(dev);
1799 	struct netfront_info *info = netdev_priv(netdev);
1800 
1801 	return sprintf(buf, "%u\n", info->rx_min_target);
1802 }
1803 
1804 static ssize_t store_rxbuf_min(struct device *dev,
1805 			       struct device_attribute *attr,
1806 			       const char *buf, size_t len)
1807 {
1808 	struct net_device *netdev = to_net_dev(dev);
1809 	struct netfront_info *np = netdev_priv(netdev);
1810 	char *endp;
1811 	unsigned long target;
1812 
1813 	if (!capable(CAP_NET_ADMIN))
1814 		return -EPERM;
1815 
1816 	target = simple_strtoul(buf, &endp, 0);
1817 	if (endp == buf)
1818 		return -EBADMSG;
1819 
1820 	if (target < RX_MIN_TARGET)
1821 		target = RX_MIN_TARGET;
1822 	if (target > RX_MAX_TARGET)
1823 		target = RX_MAX_TARGET;
1824 
1825 	spin_lock_bh(&np->rx_lock);
1826 	if (target > np->rx_max_target)
1827 		np->rx_max_target = target;
1828 	np->rx_min_target = target;
1829 	if (target > np->rx_target)
1830 		np->rx_target = target;
1831 
1832 	xennet_alloc_rx_buffers(netdev);
1833 
1834 	spin_unlock_bh(&np->rx_lock);
1835 	return len;
1836 }
1837 
1838 static ssize_t show_rxbuf_max(struct device *dev,
1839 			      struct device_attribute *attr, char *buf)
1840 {
1841 	struct net_device *netdev = to_net_dev(dev);
1842 	struct netfront_info *info = netdev_priv(netdev);
1843 
1844 	return sprintf(buf, "%u\n", info->rx_max_target);
1845 }
1846 
1847 static ssize_t store_rxbuf_max(struct device *dev,
1848 			       struct device_attribute *attr,
1849 			       const char *buf, size_t len)
1850 {
1851 	struct net_device *netdev = to_net_dev(dev);
1852 	struct netfront_info *np = netdev_priv(netdev);
1853 	char *endp;
1854 	unsigned long target;
1855 
1856 	if (!capable(CAP_NET_ADMIN))
1857 		return -EPERM;
1858 
1859 	target = simple_strtoul(buf, &endp, 0);
1860 	if (endp == buf)
1861 		return -EBADMSG;
1862 
1863 	if (target < RX_MIN_TARGET)
1864 		target = RX_MIN_TARGET;
1865 	if (target > RX_MAX_TARGET)
1866 		target = RX_MAX_TARGET;
1867 
1868 	spin_lock_bh(&np->rx_lock);
1869 	if (target < np->rx_min_target)
1870 		np->rx_min_target = target;
1871 	np->rx_max_target = target;
1872 	if (target < np->rx_target)
1873 		np->rx_target = target;
1874 
1875 	xennet_alloc_rx_buffers(netdev);
1876 
1877 	spin_unlock_bh(&np->rx_lock);
1878 	return len;
1879 }
1880 
1881 static ssize_t show_rxbuf_cur(struct device *dev,
1882 			      struct device_attribute *attr, char *buf)
1883 {
1884 	struct net_device *netdev = to_net_dev(dev);
1885 	struct netfront_info *info = netdev_priv(netdev);
1886 
1887 	return sprintf(buf, "%u\n", info->rx_target);
1888 }
1889 
1890 static struct device_attribute xennet_attrs[] = {
1891 	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
1892 	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
1893 	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
1894 };
1895 
1896 static int xennet_sysfs_addif(struct net_device *netdev)
1897 {
1898 	int i;
1899 	int err;
1900 
1901 	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1902 		err = device_create_file(&netdev->dev,
1903 					   &xennet_attrs[i]);
1904 		if (err)
1905 			goto fail;
1906 	}
1907 	return 0;
1908 
1909  fail:
1910 	while (--i >= 0)
1911 		device_remove_file(&netdev->dev, &xennet_attrs[i]);
1912 	return err;
1913 }
1914 
1915 static void xennet_sysfs_delif(struct net_device *netdev)
1916 {
1917 	int i;
1918 
1919 	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
1920 		device_remove_file(&netdev->dev, &xennet_attrs[i]);
1921 }
1922 
1923 #endif /* CONFIG_SYSFS */
1924 
1925 static const struct xenbus_device_id netfront_ids[] = {
1926 	{ "vif" },
1927 	{ "" }
1928 };
1929 
1930 
1931 static int __devexit xennet_remove(struct xenbus_device *dev)
1932 {
1933 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1934 
1935 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1936 
1937 	unregister_netdev(info->netdev);
1938 
1939 	xennet_disconnect_backend(info);
1940 
1941 	del_timer_sync(&info->rx_refill_timer);
1942 
1943 	xennet_sysfs_delif(info->netdev);
1944 
1945 	free_percpu(info->stats);
1946 
1947 	free_netdev(info->netdev);
1948 
1949 	return 0;
1950 }
1951 
1952 static DEFINE_XENBUS_DRIVER(netfront, ,
1953 	.probe = netfront_probe,
1954 	.remove = __devexit_p(xennet_remove),
1955 	.resume = netfront_resume,
1956 	.otherend_changed = netback_changed,
1957 );
1958 
1959 static int __init netif_init(void)
1960 {
1961 	if (!xen_domain())
1962 		return -ENODEV;
1963 
1964 	if (xen_initial_domain())
1965 		return 0;
1966 
1967 	printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
1968 
1969 	return xenbus_register_frontend(&netfront_driver);
1970 }
1971 module_init(netif_init);
1972 
1973 
1974 static void __exit netif_exit(void)
1975 {
1976 	if (xen_initial_domain())
1977 		return;
1978 
1979 	xenbus_unregister_driver(&netfront_driver);
1980 }
1981 module_exit(netif_exit);
1982 
1983 MODULE_DESCRIPTION("Xen virtual network device frontend");
1984 MODULE_LICENSE("GPL");
1985 MODULE_ALIAS("xen:vif");
1986 MODULE_ALIAS("xennet");
1987