1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  *  drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34 
35 #include "common.h"
36 
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 #include <linux/highmem.h>
41 
42 #include <net/tcp.h>
43 
44 #include <xen/xen.h>
45 #include <xen/events.h>
46 #include <xen/interface/memory.h>
47 
48 #include <asm/xen/hypercall.h>
49 #include <asm/xen/page.h>
50 
51 /* Provide an option to disable split event channels at load time as
52  * event channels are limited resource. Split event channels are
53  * enabled by default.
54  */
55 bool separate_tx_rx_irq = 1;
56 module_param(separate_tx_rx_irq, bool, 0644);
57 
58 /* When guest ring is filled up, qdisc queues the packets for us, but we have
59  * to timeout them, otherwise other guests' packets can get stuck there
60  */
61 unsigned int rx_drain_timeout_msecs = 10000;
62 module_param(rx_drain_timeout_msecs, uint, 0444);
63 unsigned int rx_drain_timeout_jiffies;
64 
65 /*
66  * This is the maximum slots a skb can have. If a guest sends a skb
67  * which exceeds this limit it is considered malicious.
68  */
69 #define FATAL_SKB_SLOTS_DEFAULT 20
70 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
71 module_param(fatal_skb_slots, uint, 0444);
72 
73 static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
74 			       u8 status);
75 
76 static void make_tx_response(struct xenvif *vif,
77 			     struct xen_netif_tx_request *txp,
78 			     s8       st);
79 
80 static inline int tx_work_todo(struct xenvif *vif);
81 static inline int rx_work_todo(struct xenvif *vif);
82 
83 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
84 					     u16      id,
85 					     s8       st,
86 					     u16      offset,
87 					     u16      size,
88 					     u16      flags);
89 
90 static inline unsigned long idx_to_pfn(struct xenvif *vif,
91 				       u16 idx)
92 {
93 	return page_to_pfn(vif->mmap_pages[idx]);
94 }
95 
96 static inline unsigned long idx_to_kaddr(struct xenvif *vif,
97 					 u16 idx)
98 {
99 	return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
100 }
101 
102 #define callback_param(vif, pending_idx) \
103 	(vif->pending_tx_info[pending_idx].callback_struct)
104 
105 /* Find the containing VIF's structure from a pointer in pending_tx_info array
106  */
107 static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
108 {
109 	u16 pending_idx = ubuf->desc;
110 	struct pending_tx_info *temp =
111 		container_of(ubuf, struct pending_tx_info, callback_struct);
112 	return container_of(temp - pending_idx,
113 			    struct xenvif,
114 			    pending_tx_info[0]);
115 }
116 
117 /* This is a miniumum size for the linear area to avoid lots of
118  * calls to __pskb_pull_tail() as we set up checksum offsets. The
119  * value 128 was chosen as it covers all IPv4 and most likely
120  * IPv6 headers.
121  */
122 #define PKT_PROT_LEN 128
123 
124 static u16 frag_get_pending_idx(skb_frag_t *frag)
125 {
126 	return (u16)frag->page_offset;
127 }
128 
129 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
130 {
131 	frag->page_offset = pending_idx;
132 }
133 
134 static inline pending_ring_idx_t pending_index(unsigned i)
135 {
136 	return i & (MAX_PENDING_REQS-1);
137 }
138 
139 bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
140 {
141 	RING_IDX prod, cons;
142 
143 	do {
144 		prod = vif->rx.sring->req_prod;
145 		cons = vif->rx.req_cons;
146 
147 		if (prod - cons >= needed)
148 			return true;
149 
150 		vif->rx.sring->req_event = prod + 1;
151 
152 		/* Make sure event is visible before we check prod
153 		 * again.
154 		 */
155 		mb();
156 	} while (vif->rx.sring->req_prod != prod);
157 
158 	return false;
159 }
160 
161 /*
162  * Returns true if we should start a new receive buffer instead of
163  * adding 'size' bytes to a buffer which currently contains 'offset'
164  * bytes.
165  */
166 static bool start_new_rx_buffer(int offset, unsigned long size, int head)
167 {
168 	/* simple case: we have completely filled the current buffer. */
169 	if (offset == MAX_BUFFER_OFFSET)
170 		return true;
171 
172 	/*
173 	 * complex case: start a fresh buffer if the current frag
174 	 * would overflow the current buffer but only if:
175 	 *     (i)   this frag would fit completely in the next buffer
176 	 * and (ii)  there is already some data in the current buffer
177 	 * and (iii) this is not the head buffer.
178 	 *
179 	 * Where:
180 	 * - (i) stops us splitting a frag into two copies
181 	 *   unless the frag is too large for a single buffer.
182 	 * - (ii) stops us from leaving a buffer pointlessly empty.
183 	 * - (iii) stops us leaving the first buffer
184 	 *   empty. Strictly speaking this is already covered
185 	 *   by (ii) but is explicitly checked because
186 	 *   netfront relies on the first buffer being
187 	 *   non-empty and can crash otherwise.
188 	 *
189 	 * This means we will effectively linearise small
190 	 * frags but do not needlessly split large buffers
191 	 * into multiple copies tend to give large frags their
192 	 * own buffers as before.
193 	 */
194 	BUG_ON(size > MAX_BUFFER_OFFSET);
195 	if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
196 		return true;
197 
198 	return false;
199 }
200 
201 struct netrx_pending_operations {
202 	unsigned copy_prod, copy_cons;
203 	unsigned meta_prod, meta_cons;
204 	struct gnttab_copy *copy;
205 	struct xenvif_rx_meta *meta;
206 	int copy_off;
207 	grant_ref_t copy_gref;
208 };
209 
210 static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
211 						 struct netrx_pending_operations *npo)
212 {
213 	struct xenvif_rx_meta *meta;
214 	struct xen_netif_rx_request *req;
215 
216 	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
217 
218 	meta = npo->meta + npo->meta_prod++;
219 	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
220 	meta->gso_size = 0;
221 	meta->size = 0;
222 	meta->id = req->id;
223 
224 	npo->copy_off = 0;
225 	npo->copy_gref = req->gref;
226 
227 	return meta;
228 }
229 
230 /*
231  * Set up the grant operations for this fragment. If it's a flipping
232  * interface, we also set up the unmap request from here.
233  */
234 static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
235 				 struct netrx_pending_operations *npo,
236 				 struct page *page, unsigned long size,
237 				 unsigned long offset, int *head,
238 				 struct xenvif *foreign_vif,
239 				 grant_ref_t foreign_gref)
240 {
241 	struct gnttab_copy *copy_gop;
242 	struct xenvif_rx_meta *meta;
243 	unsigned long bytes;
244 	int gso_type = XEN_NETIF_GSO_TYPE_NONE;
245 
246 	/* Data must not cross a page boundary. */
247 	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
248 
249 	meta = npo->meta + npo->meta_prod - 1;
250 
251 	/* Skip unused frames from start of page */
252 	page += offset >> PAGE_SHIFT;
253 	offset &= ~PAGE_MASK;
254 
255 	while (size > 0) {
256 		BUG_ON(offset >= PAGE_SIZE);
257 		BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
258 
259 		bytes = PAGE_SIZE - offset;
260 
261 		if (bytes > size)
262 			bytes = size;
263 
264 		if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
265 			/*
266 			 * Netfront requires there to be some data in the head
267 			 * buffer.
268 			 */
269 			BUG_ON(*head);
270 
271 			meta = get_next_rx_buffer(vif, npo);
272 		}
273 
274 		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
275 			bytes = MAX_BUFFER_OFFSET - npo->copy_off;
276 
277 		copy_gop = npo->copy + npo->copy_prod++;
278 		copy_gop->flags = GNTCOPY_dest_gref;
279 		copy_gop->len = bytes;
280 
281 		if (foreign_vif) {
282 			copy_gop->source.domid = foreign_vif->domid;
283 			copy_gop->source.u.ref = foreign_gref;
284 			copy_gop->flags |= GNTCOPY_source_gref;
285 		} else {
286 			copy_gop->source.domid = DOMID_SELF;
287 			copy_gop->source.u.gmfn =
288 				virt_to_mfn(page_address(page));
289 		}
290 		copy_gop->source.offset = offset;
291 
292 		copy_gop->dest.domid = vif->domid;
293 		copy_gop->dest.offset = npo->copy_off;
294 		copy_gop->dest.u.ref = npo->copy_gref;
295 
296 		npo->copy_off += bytes;
297 		meta->size += bytes;
298 
299 		offset += bytes;
300 		size -= bytes;
301 
302 		/* Next frame */
303 		if (offset == PAGE_SIZE && size) {
304 			BUG_ON(!PageCompound(page));
305 			page++;
306 			offset = 0;
307 		}
308 
309 		/* Leave a gap for the GSO descriptor. */
310 		if (skb_is_gso(skb)) {
311 			if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
312 				gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
313 			else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
314 				gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
315 		}
316 
317 		if (*head && ((1 << gso_type) & vif->gso_mask))
318 			vif->rx.req_cons++;
319 
320 		*head = 0; /* There must be something in this buffer now. */
321 
322 	}
323 }
324 
325 /*
326  * Prepare an SKB to be transmitted to the frontend.
327  *
328  * This function is responsible for allocating grant operations, meta
329  * structures, etc.
330  *
331  * It returns the number of meta structures consumed. The number of
332  * ring slots used is always equal to the number of meta slots used
333  * plus the number of GSO descriptors used. Currently, we use either
334  * zero GSO descriptors (for non-GSO packets) or one descriptor (for
335  * frontend-side LRO).
336  */
337 static int xenvif_gop_skb(struct sk_buff *skb,
338 			  struct netrx_pending_operations *npo)
339 {
340 	struct xenvif *vif = netdev_priv(skb->dev);
341 	int nr_frags = skb_shinfo(skb)->nr_frags;
342 	int i;
343 	struct xen_netif_rx_request *req;
344 	struct xenvif_rx_meta *meta;
345 	unsigned char *data;
346 	int head = 1;
347 	int old_meta_prod;
348 	int gso_type;
349 	struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
350 	grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
351 	struct xenvif *foreign_vif = NULL;
352 
353 	old_meta_prod = npo->meta_prod;
354 
355 	gso_type = XEN_NETIF_GSO_TYPE_NONE;
356 	if (skb_is_gso(skb)) {
357 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
358 			gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
359 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
360 			gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
361 	}
362 
363 	/* Set up a GSO prefix descriptor, if necessary */
364 	if ((1 << gso_type) & vif->gso_prefix_mask) {
365 		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
366 		meta = npo->meta + npo->meta_prod++;
367 		meta->gso_type = gso_type;
368 		meta->gso_size = skb_shinfo(skb)->gso_size;
369 		meta->size = 0;
370 		meta->id = req->id;
371 	}
372 
373 	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
374 	meta = npo->meta + npo->meta_prod++;
375 
376 	if ((1 << gso_type) & vif->gso_mask) {
377 		meta->gso_type = gso_type;
378 		meta->gso_size = skb_shinfo(skb)->gso_size;
379 	} else {
380 		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
381 		meta->gso_size = 0;
382 	}
383 
384 	meta->size = 0;
385 	meta->id = req->id;
386 	npo->copy_off = 0;
387 	npo->copy_gref = req->gref;
388 
389 	if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
390 		 (ubuf->callback == &xenvif_zerocopy_callback)) {
391 		int i = 0;
392 		foreign_vif = ubuf_to_vif(ubuf);
393 
394 		do {
395 			u16 pending_idx = ubuf->desc;
396 			foreign_grefs[i++] =
397 				foreign_vif->pending_tx_info[pending_idx].req.gref;
398 			ubuf = (struct ubuf_info *) ubuf->ctx;
399 		} while (ubuf);
400 	}
401 
402 	data = skb->data;
403 	while (data < skb_tail_pointer(skb)) {
404 		unsigned int offset = offset_in_page(data);
405 		unsigned int len = PAGE_SIZE - offset;
406 
407 		if (data + len > skb_tail_pointer(skb))
408 			len = skb_tail_pointer(skb) - data;
409 
410 		xenvif_gop_frag_copy(vif, skb, npo,
411 				     virt_to_page(data), len, offset, &head,
412 				     NULL,
413 				     0);
414 		data += len;
415 	}
416 
417 	for (i = 0; i < nr_frags; i++) {
418 		xenvif_gop_frag_copy(vif, skb, npo,
419 				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
420 				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
421 				     skb_shinfo(skb)->frags[i].page_offset,
422 				     &head,
423 				     foreign_vif,
424 				     foreign_grefs[i]);
425 	}
426 
427 	return npo->meta_prod - old_meta_prod;
428 }
429 
430 /*
431  * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
432  * used to set up the operations on the top of
433  * netrx_pending_operations, which have since been done.  Check that
434  * they didn't give any errors and advance over them.
435  */
436 static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
437 			    struct netrx_pending_operations *npo)
438 {
439 	struct gnttab_copy     *copy_op;
440 	int status = XEN_NETIF_RSP_OKAY;
441 	int i;
442 
443 	for (i = 0; i < nr_meta_slots; i++) {
444 		copy_op = npo->copy + npo->copy_cons++;
445 		if (copy_op->status != GNTST_okay) {
446 			netdev_dbg(vif->dev,
447 				   "Bad status %d from copy to DOM%d.\n",
448 				   copy_op->status, vif->domid);
449 			status = XEN_NETIF_RSP_ERROR;
450 		}
451 	}
452 
453 	return status;
454 }
455 
456 static void xenvif_add_frag_responses(struct xenvif *vif, int status,
457 				      struct xenvif_rx_meta *meta,
458 				      int nr_meta_slots)
459 {
460 	int i;
461 	unsigned long offset;
462 
463 	/* No fragments used */
464 	if (nr_meta_slots <= 1)
465 		return;
466 
467 	nr_meta_slots--;
468 
469 	for (i = 0; i < nr_meta_slots; i++) {
470 		int flags;
471 		if (i == nr_meta_slots - 1)
472 			flags = 0;
473 		else
474 			flags = XEN_NETRXF_more_data;
475 
476 		offset = 0;
477 		make_rx_response(vif, meta[i].id, status, offset,
478 				 meta[i].size, flags);
479 	}
480 }
481 
482 struct xenvif_rx_cb {
483 	int meta_slots_used;
484 };
485 
486 #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
487 
488 void xenvif_kick_thread(struct xenvif *vif)
489 {
490 	wake_up(&vif->wq);
491 }
492 
493 static void xenvif_rx_action(struct xenvif *vif)
494 {
495 	s8 status;
496 	u16 flags;
497 	struct xen_netif_rx_response *resp;
498 	struct sk_buff_head rxq;
499 	struct sk_buff *skb;
500 	LIST_HEAD(notify);
501 	int ret;
502 	unsigned long offset;
503 	bool need_to_notify = false;
504 
505 	struct netrx_pending_operations npo = {
506 		.copy  = vif->grant_copy_op,
507 		.meta  = vif->meta,
508 	};
509 
510 	skb_queue_head_init(&rxq);
511 
512 	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
513 		RING_IDX max_slots_needed;
514 		RING_IDX old_req_cons;
515 		RING_IDX ring_slots_used;
516 		int i;
517 
518 		/* We need a cheap worse case estimate for the number of
519 		 * slots we'll use.
520 		 */
521 
522 		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
523 						skb_headlen(skb),
524 						PAGE_SIZE);
525 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
526 			unsigned int size;
527 			unsigned int offset;
528 
529 			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
530 			offset = skb_shinfo(skb)->frags[i].page_offset;
531 
532 			/* For a worse-case estimate we need to factor in
533 			 * the fragment page offset as this will affect the
534 			 * number of times xenvif_gop_frag_copy() will
535 			 * call start_new_rx_buffer().
536 			 */
537 			max_slots_needed += DIV_ROUND_UP(offset + size,
538 							 PAGE_SIZE);
539 		}
540 
541 		/* To avoid the estimate becoming too pessimal for some
542 		 * frontends that limit posted rx requests, cap the estimate
543 		 * at MAX_SKB_FRAGS.
544 		 */
545 		if (max_slots_needed > MAX_SKB_FRAGS)
546 			max_slots_needed = MAX_SKB_FRAGS;
547 
548 		/* We may need one more slot for GSO metadata */
549 		if (skb_is_gso(skb) &&
550 		   (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
551 		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
552 			max_slots_needed++;
553 
554 		/* If the skb may not fit then bail out now */
555 		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
556 			skb_queue_head(&vif->rx_queue, skb);
557 			need_to_notify = true;
558 			vif->rx_last_skb_slots = max_slots_needed;
559 			break;
560 		} else
561 			vif->rx_last_skb_slots = 0;
562 
563 		old_req_cons = vif->rx.req_cons;
564 		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
565 		ring_slots_used = vif->rx.req_cons - old_req_cons;
566 
567 		BUG_ON(ring_slots_used > max_slots_needed);
568 
569 		__skb_queue_tail(&rxq, skb);
570 	}
571 
572 	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
573 
574 	if (!npo.copy_prod)
575 		goto done;
576 
577 	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
578 	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
579 
580 	while ((skb = __skb_dequeue(&rxq)) != NULL) {
581 
582 		if ((1 << vif->meta[npo.meta_cons].gso_type) &
583 		    vif->gso_prefix_mask) {
584 			resp = RING_GET_RESPONSE(&vif->rx,
585 						 vif->rx.rsp_prod_pvt++);
586 
587 			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
588 
589 			resp->offset = vif->meta[npo.meta_cons].gso_size;
590 			resp->id = vif->meta[npo.meta_cons].id;
591 			resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
592 
593 			npo.meta_cons++;
594 			XENVIF_RX_CB(skb)->meta_slots_used--;
595 		}
596 
597 
598 		vif->dev->stats.tx_bytes += skb->len;
599 		vif->dev->stats.tx_packets++;
600 
601 		status = xenvif_check_gop(vif,
602 					  XENVIF_RX_CB(skb)->meta_slots_used,
603 					  &npo);
604 
605 		if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
606 			flags = 0;
607 		else
608 			flags = XEN_NETRXF_more_data;
609 
610 		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
611 			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
612 		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
613 			/* remote but checksummed. */
614 			flags |= XEN_NETRXF_data_validated;
615 
616 		offset = 0;
617 		resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
618 					status, offset,
619 					vif->meta[npo.meta_cons].size,
620 					flags);
621 
622 		if ((1 << vif->meta[npo.meta_cons].gso_type) &
623 		    vif->gso_mask) {
624 			struct xen_netif_extra_info *gso =
625 				(struct xen_netif_extra_info *)
626 				RING_GET_RESPONSE(&vif->rx,
627 						  vif->rx.rsp_prod_pvt++);
628 
629 			resp->flags |= XEN_NETRXF_extra_info;
630 
631 			gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
632 			gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
633 			gso->u.gso.pad = 0;
634 			gso->u.gso.features = 0;
635 
636 			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
637 			gso->flags = 0;
638 		}
639 
640 		xenvif_add_frag_responses(vif, status,
641 					  vif->meta + npo.meta_cons + 1,
642 					  XENVIF_RX_CB(skb)->meta_slots_used);
643 
644 		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
645 
646 		need_to_notify |= !!ret;
647 
648 		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
649 		dev_kfree_skb(skb);
650 	}
651 
652 done:
653 	if (need_to_notify)
654 		notify_remote_via_irq(vif->rx_irq);
655 }
656 
657 void xenvif_check_rx_xenvif(struct xenvif *vif)
658 {
659 	int more_to_do;
660 
661 	RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
662 
663 	if (more_to_do)
664 		napi_schedule(&vif->napi);
665 }
666 
667 static void tx_add_credit(struct xenvif *vif)
668 {
669 	unsigned long max_burst, max_credit;
670 
671 	/*
672 	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
673 	 * Otherwise the interface can seize up due to insufficient credit.
674 	 */
675 	max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
676 	max_burst = min(max_burst, 131072UL);
677 	max_burst = max(max_burst, vif->credit_bytes);
678 
679 	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
680 	max_credit = vif->remaining_credit + vif->credit_bytes;
681 	if (max_credit < vif->remaining_credit)
682 		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
683 
684 	vif->remaining_credit = min(max_credit, max_burst);
685 }
686 
687 static void tx_credit_callback(unsigned long data)
688 {
689 	struct xenvif *vif = (struct xenvif *)data;
690 	tx_add_credit(vif);
691 	xenvif_check_rx_xenvif(vif);
692 }
693 
694 static void xenvif_tx_err(struct xenvif *vif,
695 			  struct xen_netif_tx_request *txp, RING_IDX end)
696 {
697 	RING_IDX cons = vif->tx.req_cons;
698 	unsigned long flags;
699 
700 	do {
701 		spin_lock_irqsave(&vif->response_lock, flags);
702 		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
703 		spin_unlock_irqrestore(&vif->response_lock, flags);
704 		if (cons == end)
705 			break;
706 		txp = RING_GET_REQUEST(&vif->tx, cons++);
707 	} while (1);
708 	vif->tx.req_cons = cons;
709 }
710 
711 static void xenvif_fatal_tx_err(struct xenvif *vif)
712 {
713 	netdev_err(vif->dev, "fatal error; disabling device\n");
714 	vif->disabled = true;
715 	xenvif_kick_thread(vif);
716 }
717 
718 static int xenvif_count_requests(struct xenvif *vif,
719 				 struct xen_netif_tx_request *first,
720 				 struct xen_netif_tx_request *txp,
721 				 int work_to_do)
722 {
723 	RING_IDX cons = vif->tx.req_cons;
724 	int slots = 0;
725 	int drop_err = 0;
726 	int more_data;
727 
728 	if (!(first->flags & XEN_NETTXF_more_data))
729 		return 0;
730 
731 	do {
732 		struct xen_netif_tx_request dropped_tx = { 0 };
733 
734 		if (slots >= work_to_do) {
735 			netdev_err(vif->dev,
736 				   "Asked for %d slots but exceeds this limit\n",
737 				   work_to_do);
738 			xenvif_fatal_tx_err(vif);
739 			return -ENODATA;
740 		}
741 
742 		/* This guest is really using too many slots and
743 		 * considered malicious.
744 		 */
745 		if (unlikely(slots >= fatal_skb_slots)) {
746 			netdev_err(vif->dev,
747 				   "Malicious frontend using %d slots, threshold %u\n",
748 				   slots, fatal_skb_slots);
749 			xenvif_fatal_tx_err(vif);
750 			return -E2BIG;
751 		}
752 
753 		/* Xen network protocol had implicit dependency on
754 		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
755 		 * the historical MAX_SKB_FRAGS value 18 to honor the
756 		 * same behavior as before. Any packet using more than
757 		 * 18 slots but less than fatal_skb_slots slots is
758 		 * dropped
759 		 */
760 		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
761 			if (net_ratelimit())
762 				netdev_dbg(vif->dev,
763 					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
764 					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);
765 			drop_err = -E2BIG;
766 		}
767 
768 		if (drop_err)
769 			txp = &dropped_tx;
770 
771 		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
772 		       sizeof(*txp));
773 
774 		/* If the guest submitted a frame >= 64 KiB then
775 		 * first->size overflowed and following slots will
776 		 * appear to be larger than the frame.
777 		 *
778 		 * This cannot be fatal error as there are buggy
779 		 * frontends that do this.
780 		 *
781 		 * Consume all slots and drop the packet.
782 		 */
783 		if (!drop_err && txp->size > first->size) {
784 			if (net_ratelimit())
785 				netdev_dbg(vif->dev,
786 					   "Invalid tx request, slot size %u > remaining size %u\n",
787 					   txp->size, first->size);
788 			drop_err = -EIO;
789 		}
790 
791 		first->size -= txp->size;
792 		slots++;
793 
794 		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
795 			netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
796 				 txp->offset, txp->size);
797 			xenvif_fatal_tx_err(vif);
798 			return -EINVAL;
799 		}
800 
801 		more_data = txp->flags & XEN_NETTXF_more_data;
802 
803 		if (!drop_err)
804 			txp++;
805 
806 	} while (more_data);
807 
808 	if (drop_err) {
809 		xenvif_tx_err(vif, first, cons + slots);
810 		return drop_err;
811 	}
812 
813 	return slots;
814 }
815 
816 
817 struct xenvif_tx_cb {
818 	u16 pending_idx;
819 };
820 
821 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
822 
823 static inline void xenvif_tx_create_map_op(struct xenvif *vif,
824 					  u16 pending_idx,
825 					  struct xen_netif_tx_request *txp,
826 					  struct gnttab_map_grant_ref *mop)
827 {
828 	vif->pages_to_map[mop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
829 	gnttab_set_map_op(mop, idx_to_kaddr(vif, pending_idx),
830 			  GNTMAP_host_map | GNTMAP_readonly,
831 			  txp->gref, vif->domid);
832 
833 	memcpy(&vif->pending_tx_info[pending_idx].req, txp,
834 	       sizeof(*txp));
835 }
836 
837 static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
838 {
839 	struct sk_buff *skb =
840 		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
841 			  GFP_ATOMIC | __GFP_NOWARN);
842 	if (unlikely(skb == NULL))
843 		return NULL;
844 
845 	/* Packets passed to netif_rx() must have some headroom. */
846 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
847 
848 	/* Initialize it here to avoid later surprises */
849 	skb_shinfo(skb)->destructor_arg = NULL;
850 
851 	return skb;
852 }
853 
854 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
855 							struct sk_buff *skb,
856 							struct xen_netif_tx_request *txp,
857 							struct gnttab_map_grant_ref *gop)
858 {
859 	struct skb_shared_info *shinfo = skb_shinfo(skb);
860 	skb_frag_t *frags = shinfo->frags;
861 	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
862 	int start;
863 	pending_ring_idx_t index;
864 	unsigned int nr_slots, frag_overflow = 0;
865 
866 	/* At this point shinfo->nr_frags is in fact the number of
867 	 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
868 	 */
869 	if (shinfo->nr_frags > MAX_SKB_FRAGS) {
870 		frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
871 		BUG_ON(frag_overflow > MAX_SKB_FRAGS);
872 		shinfo->nr_frags = MAX_SKB_FRAGS;
873 	}
874 	nr_slots = shinfo->nr_frags;
875 
876 	/* Skip first skb fragment if it is on same page as header fragment. */
877 	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
878 
879 	for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
880 	     shinfo->nr_frags++, txp++, gop++) {
881 		index = pending_index(vif->pending_cons++);
882 		pending_idx = vif->pending_ring[index];
883 		xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
884 		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
885 	}
886 
887 	if (frag_overflow) {
888 		struct sk_buff *nskb = xenvif_alloc_skb(0);
889 		if (unlikely(nskb == NULL)) {
890 			if (net_ratelimit())
891 				netdev_err(vif->dev,
892 					   "Can't allocate the frag_list skb.\n");
893 			return NULL;
894 		}
895 
896 		shinfo = skb_shinfo(nskb);
897 		frags = shinfo->frags;
898 
899 		for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
900 		     shinfo->nr_frags++, txp++, gop++) {
901 			index = pending_index(vif->pending_cons++);
902 			pending_idx = vif->pending_ring[index];
903 			xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
904 			frag_set_pending_idx(&frags[shinfo->nr_frags],
905 					     pending_idx);
906 		}
907 
908 		skb_shinfo(skb)->frag_list = nskb;
909 	}
910 
911 	return gop;
912 }
913 
914 static inline void xenvif_grant_handle_set(struct xenvif *vif,
915 					   u16 pending_idx,
916 					   grant_handle_t handle)
917 {
918 	if (unlikely(vif->grant_tx_handle[pending_idx] !=
919 		     NETBACK_INVALID_HANDLE)) {
920 		netdev_err(vif->dev,
921 			   "Trying to overwrite active handle! pending_idx: %x\n",
922 			   pending_idx);
923 		BUG();
924 	}
925 	vif->grant_tx_handle[pending_idx] = handle;
926 }
927 
928 static inline void xenvif_grant_handle_reset(struct xenvif *vif,
929 					     u16 pending_idx)
930 {
931 	if (unlikely(vif->grant_tx_handle[pending_idx] ==
932 		     NETBACK_INVALID_HANDLE)) {
933 		netdev_err(vif->dev,
934 			   "Trying to unmap invalid handle! pending_idx: %x\n",
935 			   pending_idx);
936 		BUG();
937 	}
938 	vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
939 }
940 
941 static int xenvif_tx_check_gop(struct xenvif *vif,
942 			       struct sk_buff *skb,
943 			       struct gnttab_map_grant_ref **gopp_map,
944 			       struct gnttab_copy **gopp_copy)
945 {
946 	struct gnttab_map_grant_ref *gop_map = *gopp_map;
947 	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
948 	struct skb_shared_info *shinfo = skb_shinfo(skb);
949 	int nr_frags = shinfo->nr_frags;
950 	int i, err;
951 	struct sk_buff *first_skb = NULL;
952 
953 	/* Check status of header. */
954 	err = (*gopp_copy)->status;
955 	(*gopp_copy)++;
956 	if (unlikely(err)) {
957 		if (net_ratelimit())
958 			netdev_dbg(vif->dev,
959 				   "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
960 				   (*gopp_copy)->status,
961 				   pending_idx,
962 				   (*gopp_copy)->source.u.ref);
963 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
964 	}
965 
966 check_frags:
967 	for (i = 0; i < nr_frags; i++, gop_map++) {
968 		int j, newerr;
969 
970 		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
971 
972 		/* Check error status: if okay then remember grant handle. */
973 		newerr = gop_map->status;
974 
975 		if (likely(!newerr)) {
976 			xenvif_grant_handle_set(vif,
977 						pending_idx,
978 						gop_map->handle);
979 			/* Had a previous error? Invalidate this fragment. */
980 			if (unlikely(err))
981 				xenvif_idx_unmap(vif, pending_idx);
982 			continue;
983 		}
984 
985 		/* Error on this fragment: respond to client with an error. */
986 		if (net_ratelimit())
987 			netdev_dbg(vif->dev,
988 				   "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
989 				   i,
990 				   gop_map->status,
991 				   pending_idx,
992 				   gop_map->ref);
993 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
994 
995 		/* Not the first error? Preceding frags already invalidated. */
996 		if (err)
997 			continue;
998 		/* First error: invalidate preceding fragments. */
999 		for (j = 0; j < i; j++) {
1000 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1001 			xenvif_idx_unmap(vif, pending_idx);
1002 		}
1003 
1004 		/* Remember the error: invalidate all subsequent fragments. */
1005 		err = newerr;
1006 	}
1007 
1008 	if (skb_has_frag_list(skb)) {
1009 		first_skb = skb;
1010 		skb = shinfo->frag_list;
1011 		shinfo = skb_shinfo(skb);
1012 		nr_frags = shinfo->nr_frags;
1013 
1014 		goto check_frags;
1015 	}
1016 
1017 	/* There was a mapping error in the frag_list skb. We have to unmap
1018 	 * the first skb's frags
1019 	 */
1020 	if (first_skb && err) {
1021 		int j;
1022 		shinfo = skb_shinfo(first_skb);
1023 		for (j = 0; j < shinfo->nr_frags; j++) {
1024 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1025 			xenvif_idx_unmap(vif, pending_idx);
1026 		}
1027 	}
1028 
1029 	*gopp_map = gop_map;
1030 	return err;
1031 }
1032 
1033 static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
1034 {
1035 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1036 	int nr_frags = shinfo->nr_frags;
1037 	int i;
1038 	u16 prev_pending_idx = INVALID_PENDING_IDX;
1039 
1040 	for (i = 0; i < nr_frags; i++) {
1041 		skb_frag_t *frag = shinfo->frags + i;
1042 		struct xen_netif_tx_request *txp;
1043 		struct page *page;
1044 		u16 pending_idx;
1045 
1046 		pending_idx = frag_get_pending_idx(frag);
1047 
1048 		/* If this is not the first frag, chain it to the previous*/
1049 		if (prev_pending_idx == INVALID_PENDING_IDX)
1050 			skb_shinfo(skb)->destructor_arg =
1051 				&callback_param(vif, pending_idx);
1052 		else
1053 			callback_param(vif, prev_pending_idx).ctx =
1054 				&callback_param(vif, pending_idx);
1055 
1056 		callback_param(vif, pending_idx).ctx = NULL;
1057 		prev_pending_idx = pending_idx;
1058 
1059 		txp = &vif->pending_tx_info[pending_idx].req;
1060 		page = virt_to_page(idx_to_kaddr(vif, pending_idx));
1061 		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1062 		skb->len += txp->size;
1063 		skb->data_len += txp->size;
1064 		skb->truesize += txp->size;
1065 
1066 		/* Take an extra reference to offset network stack's put_page */
1067 		get_page(vif->mmap_pages[pending_idx]);
1068 	}
1069 	/* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1070 	 * overlaps with "index", and "mapping" is not set. I think mapping
1071 	 * should be set. If delivered to local stack, it would drop this
1072 	 * skb in sk_filter unless the socket has the right to use it.
1073 	 */
1074 	skb->pfmemalloc	= false;
1075 }
1076 
1077 static int xenvif_get_extras(struct xenvif *vif,
1078 				struct xen_netif_extra_info *extras,
1079 				int work_to_do)
1080 {
1081 	struct xen_netif_extra_info extra;
1082 	RING_IDX cons = vif->tx.req_cons;
1083 
1084 	do {
1085 		if (unlikely(work_to_do-- <= 0)) {
1086 			netdev_err(vif->dev, "Missing extra info\n");
1087 			xenvif_fatal_tx_err(vif);
1088 			return -EBADR;
1089 		}
1090 
1091 		memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1092 		       sizeof(extra));
1093 		if (unlikely(!extra.type ||
1094 			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1095 			vif->tx.req_cons = ++cons;
1096 			netdev_err(vif->dev,
1097 				   "Invalid extra type: %d\n", extra.type);
1098 			xenvif_fatal_tx_err(vif);
1099 			return -EINVAL;
1100 		}
1101 
1102 		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1103 		vif->tx.req_cons = ++cons;
1104 	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1105 
1106 	return work_to_do;
1107 }
1108 
1109 static int xenvif_set_skb_gso(struct xenvif *vif,
1110 			      struct sk_buff *skb,
1111 			      struct xen_netif_extra_info *gso)
1112 {
1113 	if (!gso->u.gso.size) {
1114 		netdev_err(vif->dev, "GSO size must not be zero.\n");
1115 		xenvif_fatal_tx_err(vif);
1116 		return -EINVAL;
1117 	}
1118 
1119 	switch (gso->u.gso.type) {
1120 	case XEN_NETIF_GSO_TYPE_TCPV4:
1121 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1122 		break;
1123 	case XEN_NETIF_GSO_TYPE_TCPV6:
1124 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1125 		break;
1126 	default:
1127 		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1128 		xenvif_fatal_tx_err(vif);
1129 		return -EINVAL;
1130 	}
1131 
1132 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1133 	/* gso_segs will be calculated later */
1134 
1135 	return 0;
1136 }
1137 
1138 static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1139 {
1140 	bool recalculate_partial_csum = false;
1141 
1142 	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1143 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
1144 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1145 	 * recalculate the partial checksum.
1146 	 */
1147 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1148 		vif->rx_gso_checksum_fixup++;
1149 		skb->ip_summed = CHECKSUM_PARTIAL;
1150 		recalculate_partial_csum = true;
1151 	}
1152 
1153 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1154 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1155 		return 0;
1156 
1157 	return skb_checksum_setup(skb, recalculate_partial_csum);
1158 }
1159 
1160 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1161 {
1162 	u64 now = get_jiffies_64();
1163 	u64 next_credit = vif->credit_window_start +
1164 		msecs_to_jiffies(vif->credit_usec / 1000);
1165 
1166 	/* Timer could already be pending in rare cases. */
1167 	if (timer_pending(&vif->credit_timeout))
1168 		return true;
1169 
1170 	/* Passed the point where we can replenish credit? */
1171 	if (time_after_eq64(now, next_credit)) {
1172 		vif->credit_window_start = now;
1173 		tx_add_credit(vif);
1174 	}
1175 
1176 	/* Still too big to send right now? Set a callback. */
1177 	if (size > vif->remaining_credit) {
1178 		vif->credit_timeout.data     =
1179 			(unsigned long)vif;
1180 		vif->credit_timeout.function =
1181 			tx_credit_callback;
1182 		mod_timer(&vif->credit_timeout,
1183 			  next_credit);
1184 		vif->credit_window_start = next_credit;
1185 
1186 		return true;
1187 	}
1188 
1189 	return false;
1190 }
1191 
1192 static void xenvif_tx_build_gops(struct xenvif *vif,
1193 				     int budget,
1194 				     unsigned *copy_ops,
1195 				     unsigned *map_ops)
1196 {
1197 	struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
1198 	struct sk_buff *skb;
1199 	int ret;
1200 
1201 	while (skb_queue_len(&vif->tx_queue) < budget) {
1202 		struct xen_netif_tx_request txreq;
1203 		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1204 		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1205 		u16 pending_idx;
1206 		RING_IDX idx;
1207 		int work_to_do;
1208 		unsigned int data_len;
1209 		pending_ring_idx_t index;
1210 
1211 		if (vif->tx.sring->req_prod - vif->tx.req_cons >
1212 		    XEN_NETIF_TX_RING_SIZE) {
1213 			netdev_err(vif->dev,
1214 				   "Impossible number of requests. "
1215 				   "req_prod %d, req_cons %d, size %ld\n",
1216 				   vif->tx.sring->req_prod, vif->tx.req_cons,
1217 				   XEN_NETIF_TX_RING_SIZE);
1218 			xenvif_fatal_tx_err(vif);
1219 			break;
1220 		}
1221 
1222 		work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
1223 		if (!work_to_do)
1224 			break;
1225 
1226 		idx = vif->tx.req_cons;
1227 		rmb(); /* Ensure that we see the request before we copy it. */
1228 		memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1229 
1230 		/* Credit-based scheduling. */
1231 		if (txreq.size > vif->remaining_credit &&
1232 		    tx_credit_exceeded(vif, txreq.size))
1233 			break;
1234 
1235 		vif->remaining_credit -= txreq.size;
1236 
1237 		work_to_do--;
1238 		vif->tx.req_cons = ++idx;
1239 
1240 		memset(extras, 0, sizeof(extras));
1241 		if (txreq.flags & XEN_NETTXF_extra_info) {
1242 			work_to_do = xenvif_get_extras(vif, extras,
1243 						       work_to_do);
1244 			idx = vif->tx.req_cons;
1245 			if (unlikely(work_to_do < 0))
1246 				break;
1247 		}
1248 
1249 		ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
1250 		if (unlikely(ret < 0))
1251 			break;
1252 
1253 		idx += ret;
1254 
1255 		if (unlikely(txreq.size < ETH_HLEN)) {
1256 			netdev_dbg(vif->dev,
1257 				   "Bad packet size: %d\n", txreq.size);
1258 			xenvif_tx_err(vif, &txreq, idx);
1259 			break;
1260 		}
1261 
1262 		/* No crossing a page as the payload mustn't fragment. */
1263 		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1264 			netdev_err(vif->dev,
1265 				   "txreq.offset: %x, size: %u, end: %lu\n",
1266 				   txreq.offset, txreq.size,
1267 				   (txreq.offset&~PAGE_MASK) + txreq.size);
1268 			xenvif_fatal_tx_err(vif);
1269 			break;
1270 		}
1271 
1272 		index = pending_index(vif->pending_cons);
1273 		pending_idx = vif->pending_ring[index];
1274 
1275 		data_len = (txreq.size > PKT_PROT_LEN &&
1276 			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1277 			PKT_PROT_LEN : txreq.size;
1278 
1279 		skb = xenvif_alloc_skb(data_len);
1280 		if (unlikely(skb == NULL)) {
1281 			netdev_dbg(vif->dev,
1282 				   "Can't allocate a skb in start_xmit.\n");
1283 			xenvif_tx_err(vif, &txreq, idx);
1284 			break;
1285 		}
1286 
1287 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1288 			struct xen_netif_extra_info *gso;
1289 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1290 
1291 			if (xenvif_set_skb_gso(vif, skb, gso)) {
1292 				/* Failure in xenvif_set_skb_gso is fatal. */
1293 				kfree_skb(skb);
1294 				break;
1295 			}
1296 		}
1297 
1298 		XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1299 
1300 		__skb_put(skb, data_len);
1301 		vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1302 		vif->tx_copy_ops[*copy_ops].source.domid = vif->domid;
1303 		vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1304 
1305 		vif->tx_copy_ops[*copy_ops].dest.u.gmfn =
1306 			virt_to_mfn(skb->data);
1307 		vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1308 		vif->tx_copy_ops[*copy_ops].dest.offset =
1309 			offset_in_page(skb->data);
1310 
1311 		vif->tx_copy_ops[*copy_ops].len = data_len;
1312 		vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1313 
1314 		(*copy_ops)++;
1315 
1316 		skb_shinfo(skb)->nr_frags = ret;
1317 		if (data_len < txreq.size) {
1318 			skb_shinfo(skb)->nr_frags++;
1319 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1320 					     pending_idx);
1321 			xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop);
1322 			gop++;
1323 		} else {
1324 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1325 					     INVALID_PENDING_IDX);
1326 			memcpy(&vif->pending_tx_info[pending_idx].req, &txreq,
1327 			       sizeof(txreq));
1328 		}
1329 
1330 		vif->pending_cons++;
1331 
1332 		request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
1333 		if (request_gop == NULL) {
1334 			kfree_skb(skb);
1335 			xenvif_tx_err(vif, &txreq, idx);
1336 			break;
1337 		}
1338 		gop = request_gop;
1339 
1340 		__skb_queue_tail(&vif->tx_queue, skb);
1341 
1342 		vif->tx.req_cons = idx;
1343 
1344 		if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) ||
1345 		    (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops)))
1346 			break;
1347 	}
1348 
1349 	(*map_ops) = gop - vif->tx_map_ops;
1350 	return;
1351 }
1352 
1353 /* Consolidate skb with a frag_list into a brand new one with local pages on
1354  * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1355  */
1356 static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
1357 {
1358 	unsigned int offset = skb_headlen(skb);
1359 	skb_frag_t frags[MAX_SKB_FRAGS];
1360 	int i;
1361 	struct ubuf_info *uarg;
1362 	struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1363 
1364 	vif->tx_zerocopy_sent += 2;
1365 	vif->tx_frag_overflow++;
1366 
1367 	xenvif_fill_frags(vif, nskb);
1368 	/* Subtract frags size, we will correct it later */
1369 	skb->truesize -= skb->data_len;
1370 	skb->len += nskb->len;
1371 	skb->data_len += nskb->len;
1372 
1373 	/* create a brand new frags array and coalesce there */
1374 	for (i = 0; offset < skb->len; i++) {
1375 		struct page *page;
1376 		unsigned int len;
1377 
1378 		BUG_ON(i >= MAX_SKB_FRAGS);
1379 		page = alloc_page(GFP_ATOMIC|__GFP_COLD);
1380 		if (!page) {
1381 			int j;
1382 			skb->truesize += skb->data_len;
1383 			for (j = 0; j < i; j++)
1384 				put_page(frags[j].page.p);
1385 			return -ENOMEM;
1386 		}
1387 
1388 		if (offset + PAGE_SIZE < skb->len)
1389 			len = PAGE_SIZE;
1390 		else
1391 			len = skb->len - offset;
1392 		if (skb_copy_bits(skb, offset, page_address(page), len))
1393 			BUG();
1394 
1395 		offset += len;
1396 		frags[i].page.p = page;
1397 		frags[i].page_offset = 0;
1398 		skb_frag_size_set(&frags[i], len);
1399 	}
1400 	/* swap out with old one */
1401 	memcpy(skb_shinfo(skb)->frags,
1402 	       frags,
1403 	       i * sizeof(skb_frag_t));
1404 	skb_shinfo(skb)->nr_frags = i;
1405 	skb->truesize += i * PAGE_SIZE;
1406 
1407 	/* remove traces of mapped pages and frag_list */
1408 	skb_frag_list_init(skb);
1409 	uarg = skb_shinfo(skb)->destructor_arg;
1410 	uarg->callback(uarg, true);
1411 	skb_shinfo(skb)->destructor_arg = NULL;
1412 
1413 	skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1414 	kfree_skb(nskb);
1415 
1416 	return 0;
1417 }
1418 
1419 static int xenvif_tx_submit(struct xenvif *vif)
1420 {
1421 	struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops;
1422 	struct gnttab_copy *gop_copy = vif->tx_copy_ops;
1423 	struct sk_buff *skb;
1424 	int work_done = 0;
1425 
1426 	while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1427 		struct xen_netif_tx_request *txp;
1428 		u16 pending_idx;
1429 		unsigned data_len;
1430 
1431 		pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1432 		txp = &vif->pending_tx_info[pending_idx].req;
1433 
1434 		/* Check the remap error code. */
1435 		if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) {
1436 			skb_shinfo(skb)->nr_frags = 0;
1437 			kfree_skb(skb);
1438 			continue;
1439 		}
1440 
1441 		data_len = skb->len;
1442 		callback_param(vif, pending_idx).ctx = NULL;
1443 		if (data_len < txp->size) {
1444 			/* Append the packet payload as a fragment. */
1445 			txp->offset += data_len;
1446 			txp->size -= data_len;
1447 		} else {
1448 			/* Schedule a response immediately. */
1449 			xenvif_idx_release(vif, pending_idx,
1450 					   XEN_NETIF_RSP_OKAY);
1451 		}
1452 
1453 		if (txp->flags & XEN_NETTXF_csum_blank)
1454 			skb->ip_summed = CHECKSUM_PARTIAL;
1455 		else if (txp->flags & XEN_NETTXF_data_validated)
1456 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1457 
1458 		xenvif_fill_frags(vif, skb);
1459 
1460 		if (unlikely(skb_has_frag_list(skb))) {
1461 			if (xenvif_handle_frag_list(vif, skb)) {
1462 				if (net_ratelimit())
1463 					netdev_err(vif->dev,
1464 						   "Not enough memory to consolidate frag_list!\n");
1465 				skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1466 				kfree_skb(skb);
1467 				continue;
1468 			}
1469 		}
1470 
1471 		if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
1472 			int target = min_t(int, skb->len, PKT_PROT_LEN);
1473 			__pskb_pull_tail(skb, target - skb_headlen(skb));
1474 		}
1475 
1476 		skb->dev      = vif->dev;
1477 		skb->protocol = eth_type_trans(skb, skb->dev);
1478 		skb_reset_network_header(skb);
1479 
1480 		if (checksum_setup(vif, skb)) {
1481 			netdev_dbg(vif->dev,
1482 				   "Can't setup checksum in net_tx_action\n");
1483 			/* We have to set this flag to trigger the callback */
1484 			if (skb_shinfo(skb)->destructor_arg)
1485 				skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1486 			kfree_skb(skb);
1487 			continue;
1488 		}
1489 
1490 		skb_probe_transport_header(skb, 0);
1491 
1492 		/* If the packet is GSO then we will have just set up the
1493 		 * transport header offset in checksum_setup so it's now
1494 		 * straightforward to calculate gso_segs.
1495 		 */
1496 		if (skb_is_gso(skb)) {
1497 			int mss = skb_shinfo(skb)->gso_size;
1498 			int hdrlen = skb_transport_header(skb) -
1499 				skb_mac_header(skb) +
1500 				tcp_hdrlen(skb);
1501 
1502 			skb_shinfo(skb)->gso_segs =
1503 				DIV_ROUND_UP(skb->len - hdrlen, mss);
1504 		}
1505 
1506 		vif->dev->stats.rx_bytes += skb->len;
1507 		vif->dev->stats.rx_packets++;
1508 
1509 		work_done++;
1510 
1511 		/* Set this flag right before netif_receive_skb, otherwise
1512 		 * someone might think this packet already left netback, and
1513 		 * do a skb_copy_ubufs while we are still in control of the
1514 		 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1515 		 */
1516 		if (skb_shinfo(skb)->destructor_arg) {
1517 			skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1518 			vif->tx_zerocopy_sent++;
1519 		}
1520 
1521 		netif_receive_skb(skb);
1522 	}
1523 
1524 	return work_done;
1525 }
1526 
1527 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1528 {
1529 	unsigned long flags;
1530 	pending_ring_idx_t index;
1531 	struct xenvif *vif = ubuf_to_vif(ubuf);
1532 
1533 	/* This is the only place where we grab this lock, to protect callbacks
1534 	 * from each other.
1535 	 */
1536 	spin_lock_irqsave(&vif->callback_lock, flags);
1537 	do {
1538 		u16 pending_idx = ubuf->desc;
1539 		ubuf = (struct ubuf_info *) ubuf->ctx;
1540 		BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
1541 			MAX_PENDING_REQS);
1542 		index = pending_index(vif->dealloc_prod);
1543 		vif->dealloc_ring[index] = pending_idx;
1544 		/* Sync with xenvif_tx_dealloc_action:
1545 		 * insert idx then incr producer.
1546 		 */
1547 		smp_wmb();
1548 		vif->dealloc_prod++;
1549 	} while (ubuf);
1550 	wake_up(&vif->dealloc_wq);
1551 	spin_unlock_irqrestore(&vif->callback_lock, flags);
1552 
1553 	if (likely(zerocopy_success))
1554 		vif->tx_zerocopy_success++;
1555 	else
1556 		vif->tx_zerocopy_fail++;
1557 }
1558 
1559 static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
1560 {
1561 	struct gnttab_unmap_grant_ref *gop;
1562 	pending_ring_idx_t dc, dp;
1563 	u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1564 	unsigned int i = 0;
1565 
1566 	dc = vif->dealloc_cons;
1567 	gop = vif->tx_unmap_ops;
1568 
1569 	/* Free up any grants we have finished using */
1570 	do {
1571 		dp = vif->dealloc_prod;
1572 
1573 		/* Ensure we see all indices enqueued by all
1574 		 * xenvif_zerocopy_callback().
1575 		 */
1576 		smp_rmb();
1577 
1578 		while (dc != dp) {
1579 			BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
1580 			pending_idx =
1581 				vif->dealloc_ring[pending_index(dc++)];
1582 
1583 			pending_idx_release[gop-vif->tx_unmap_ops] =
1584 				pending_idx;
1585 			vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
1586 				vif->mmap_pages[pending_idx];
1587 			gnttab_set_unmap_op(gop,
1588 					    idx_to_kaddr(vif, pending_idx),
1589 					    GNTMAP_host_map,
1590 					    vif->grant_tx_handle[pending_idx]);
1591 			xenvif_grant_handle_reset(vif, pending_idx);
1592 			++gop;
1593 		}
1594 
1595 	} while (dp != vif->dealloc_prod);
1596 
1597 	vif->dealloc_cons = dc;
1598 
1599 	if (gop - vif->tx_unmap_ops > 0) {
1600 		int ret;
1601 		ret = gnttab_unmap_refs(vif->tx_unmap_ops,
1602 					NULL,
1603 					vif->pages_to_unmap,
1604 					gop - vif->tx_unmap_ops);
1605 		if (ret) {
1606 			netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
1607 				   gop - vif->tx_unmap_ops, ret);
1608 			for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
1609 				if (gop[i].status != GNTST_okay)
1610 					netdev_err(vif->dev,
1611 						   " host_addr: %llx handle: %x status: %d\n",
1612 						   gop[i].host_addr,
1613 						   gop[i].handle,
1614 						   gop[i].status);
1615 			}
1616 			BUG();
1617 		}
1618 	}
1619 
1620 	for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
1621 		xenvif_idx_release(vif, pending_idx_release[i],
1622 				   XEN_NETIF_RSP_OKAY);
1623 }
1624 
1625 
1626 /* Called after netfront has transmitted */
1627 int xenvif_tx_action(struct xenvif *vif, int budget)
1628 {
1629 	unsigned nr_mops, nr_cops = 0;
1630 	int work_done, ret;
1631 
1632 	if (unlikely(!tx_work_todo(vif)))
1633 		return 0;
1634 
1635 	xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops);
1636 
1637 	if (nr_cops == 0)
1638 		return 0;
1639 
1640 	gnttab_batch_copy(vif->tx_copy_ops, nr_cops);
1641 	if (nr_mops != 0) {
1642 		ret = gnttab_map_refs(vif->tx_map_ops,
1643 				      NULL,
1644 				      vif->pages_to_map,
1645 				      nr_mops);
1646 		BUG_ON(ret);
1647 	}
1648 
1649 	work_done = xenvif_tx_submit(vif);
1650 
1651 	return work_done;
1652 }
1653 
1654 static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
1655 			       u8 status)
1656 {
1657 	struct pending_tx_info *pending_tx_info;
1658 	pending_ring_idx_t index;
1659 	unsigned long flags;
1660 
1661 	pending_tx_info = &vif->pending_tx_info[pending_idx];
1662 	spin_lock_irqsave(&vif->response_lock, flags);
1663 	make_tx_response(vif, &pending_tx_info->req, status);
1664 	index = pending_index(vif->pending_prod);
1665 	vif->pending_ring[index] = pending_idx;
1666 	/* TX shouldn't use the index before we give it back here */
1667 	mb();
1668 	vif->pending_prod++;
1669 	spin_unlock_irqrestore(&vif->response_lock, flags);
1670 }
1671 
1672 
1673 static void make_tx_response(struct xenvif *vif,
1674 			     struct xen_netif_tx_request *txp,
1675 			     s8       st)
1676 {
1677 	RING_IDX i = vif->tx.rsp_prod_pvt;
1678 	struct xen_netif_tx_response *resp;
1679 	int notify;
1680 
1681 	resp = RING_GET_RESPONSE(&vif->tx, i);
1682 	resp->id     = txp->id;
1683 	resp->status = st;
1684 
1685 	if (txp->flags & XEN_NETTXF_extra_info)
1686 		RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1687 
1688 	vif->tx.rsp_prod_pvt = ++i;
1689 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1690 	if (notify)
1691 		notify_remote_via_irq(vif->tx_irq);
1692 }
1693 
1694 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1695 					     u16      id,
1696 					     s8       st,
1697 					     u16      offset,
1698 					     u16      size,
1699 					     u16      flags)
1700 {
1701 	RING_IDX i = vif->rx.rsp_prod_pvt;
1702 	struct xen_netif_rx_response *resp;
1703 
1704 	resp = RING_GET_RESPONSE(&vif->rx, i);
1705 	resp->offset     = offset;
1706 	resp->flags      = flags;
1707 	resp->id         = id;
1708 	resp->status     = (s16)size;
1709 	if (st < 0)
1710 		resp->status = (s16)st;
1711 
1712 	vif->rx.rsp_prod_pvt = ++i;
1713 
1714 	return resp;
1715 }
1716 
1717 void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
1718 {
1719 	int ret;
1720 	struct gnttab_unmap_grant_ref tx_unmap_op;
1721 
1722 	gnttab_set_unmap_op(&tx_unmap_op,
1723 			    idx_to_kaddr(vif, pending_idx),
1724 			    GNTMAP_host_map,
1725 			    vif->grant_tx_handle[pending_idx]);
1726 	xenvif_grant_handle_reset(vif, pending_idx);
1727 
1728 	ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1729 				&vif->mmap_pages[pending_idx], 1);
1730 	if (ret) {
1731 		netdev_err(vif->dev,
1732 			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1733 			   ret,
1734 			   pending_idx,
1735 			   tx_unmap_op.host_addr,
1736 			   tx_unmap_op.handle,
1737 			   tx_unmap_op.status);
1738 		BUG();
1739 	}
1740 
1741 	xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
1742 }
1743 
1744 static inline int rx_work_todo(struct xenvif *vif)
1745 {
1746 	return (!skb_queue_empty(&vif->rx_queue) &&
1747 	       xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
1748 	       vif->rx_queue_purge;
1749 }
1750 
1751 static inline int tx_work_todo(struct xenvif *vif)
1752 {
1753 
1754 	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
1755 		return 1;
1756 
1757 	return 0;
1758 }
1759 
1760 static inline bool tx_dealloc_work_todo(struct xenvif *vif)
1761 {
1762 	return vif->dealloc_cons != vif->dealloc_prod;
1763 }
1764 
1765 void xenvif_unmap_frontend_rings(struct xenvif *vif)
1766 {
1767 	if (vif->tx.sring)
1768 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1769 					vif->tx.sring);
1770 	if (vif->rx.sring)
1771 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1772 					vif->rx.sring);
1773 }
1774 
1775 int xenvif_map_frontend_rings(struct xenvif *vif,
1776 			      grant_ref_t tx_ring_ref,
1777 			      grant_ref_t rx_ring_ref)
1778 {
1779 	void *addr;
1780 	struct xen_netif_tx_sring *txs;
1781 	struct xen_netif_rx_sring *rxs;
1782 
1783 	int err = -ENOMEM;
1784 
1785 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1786 				     tx_ring_ref, &addr);
1787 	if (err)
1788 		goto err;
1789 
1790 	txs = (struct xen_netif_tx_sring *)addr;
1791 	BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1792 
1793 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1794 				     rx_ring_ref, &addr);
1795 	if (err)
1796 		goto err;
1797 
1798 	rxs = (struct xen_netif_rx_sring *)addr;
1799 	BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1800 
1801 	return 0;
1802 
1803 err:
1804 	xenvif_unmap_frontend_rings(vif);
1805 	return err;
1806 }
1807 
1808 void xenvif_stop_queue(struct xenvif *vif)
1809 {
1810 	if (!vif->can_queue)
1811 		return;
1812 
1813 	netif_stop_queue(vif->dev);
1814 }
1815 
1816 static void xenvif_start_queue(struct xenvif *vif)
1817 {
1818 	if (xenvif_schedulable(vif))
1819 		netif_wake_queue(vif->dev);
1820 }
1821 
1822 int xenvif_kthread_guest_rx(void *data)
1823 {
1824 	struct xenvif *vif = data;
1825 	struct sk_buff *skb;
1826 
1827 	while (!kthread_should_stop()) {
1828 		wait_event_interruptible(vif->wq,
1829 					 rx_work_todo(vif) ||
1830 					 vif->disabled ||
1831 					 kthread_should_stop());
1832 
1833 		/* This frontend is found to be rogue, disable it in
1834 		 * kthread context. Currently this is only set when
1835 		 * netback finds out frontend sends malformed packet,
1836 		 * but we cannot disable the interface in softirq
1837 		 * context so we defer it here.
1838 		 */
1839 		if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
1840 			xenvif_carrier_off(vif);
1841 
1842 		if (kthread_should_stop())
1843 			break;
1844 
1845 		if (vif->rx_queue_purge) {
1846 			skb_queue_purge(&vif->rx_queue);
1847 			vif->rx_queue_purge = false;
1848 		}
1849 
1850 		if (!skb_queue_empty(&vif->rx_queue))
1851 			xenvif_rx_action(vif);
1852 
1853 		if (skb_queue_empty(&vif->rx_queue) &&
1854 		    netif_queue_stopped(vif->dev)) {
1855 			del_timer_sync(&vif->wake_queue);
1856 			xenvif_start_queue(vif);
1857 		}
1858 
1859 		cond_resched();
1860 	}
1861 
1862 	/* Bin any remaining skbs */
1863 	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
1864 		dev_kfree_skb(skb);
1865 
1866 	return 0;
1867 }
1868 
1869 int xenvif_dealloc_kthread(void *data)
1870 {
1871 	struct xenvif *vif = data;
1872 
1873 	while (!kthread_should_stop()) {
1874 		wait_event_interruptible(vif->dealloc_wq,
1875 					 tx_dealloc_work_todo(vif) ||
1876 					 kthread_should_stop());
1877 		if (kthread_should_stop())
1878 			break;
1879 
1880 		xenvif_tx_dealloc_action(vif);
1881 		cond_resched();
1882 	}
1883 
1884 	/* Unmap anything remaining*/
1885 	if (tx_dealloc_work_todo(vif))
1886 		xenvif_tx_dealloc_action(vif);
1887 
1888 	return 0;
1889 }
1890 
1891 static int __init netback_init(void)
1892 {
1893 	int rc = 0;
1894 
1895 	if (!xen_domain())
1896 		return -ENODEV;
1897 
1898 	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1899 		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1900 			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1901 		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1902 	}
1903 
1904 	rc = xenvif_xenbus_init();
1905 	if (rc)
1906 		goto failed_init;
1907 
1908 	rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
1909 
1910 	return 0;
1911 
1912 failed_init:
1913 	return rc;
1914 }
1915 
1916 module_init(netback_init);
1917 
1918 static void __exit netback_fini(void)
1919 {
1920 	xenvif_xenbus_fini();
1921 }
1922 module_exit(netback_fini);
1923 
1924 MODULE_LICENSE("Dual BSD/GPL");
1925 MODULE_ALIAS("xen-backend:vif");
1926