1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  *  drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34 
35 #include "common.h"
36 
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 
41 #include <net/tcp.h>
42 
43 #include <xen/xen.h>
44 #include <xen/events.h>
45 #include <xen/interface/memory.h>
46 
47 #include <asm/xen/hypercall.h>
48 #include <asm/xen/page.h>
49 
50 /* Provide an option to disable split event channels at load time as
51  * event channels are limited resource. Split event channels are
52  * enabled by default.
53  */
54 bool separate_tx_rx_irq = 1;
55 module_param(separate_tx_rx_irq, bool, 0644);
56 
57 /*
58  * This is the maximum slots a skb can have. If a guest sends a skb
59  * which exceeds this limit it is considered malicious.
60  */
61 #define FATAL_SKB_SLOTS_DEFAULT 20
62 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
63 module_param(fatal_skb_slots, uint, 0444);
64 
65 /*
66  * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
67  * the maximum slots a valid packet can use. Now this value is defined
68  * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
69  * all backend.
70  */
71 #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
72 
73 /*
74  * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
75  * one or more merged tx requests, otherwise it is the continuation of
76  * previous tx request.
77  */
78 static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
79 {
80 	return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
81 }
82 
83 static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
84 			       u8 status);
85 
86 static void make_tx_response(struct xenvif *vif,
87 			     struct xen_netif_tx_request *txp,
88 			     s8       st);
89 
90 static inline int tx_work_todo(struct xenvif *vif);
91 static inline int rx_work_todo(struct xenvif *vif);
92 
93 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
94 					     u16      id,
95 					     s8       st,
96 					     u16      offset,
97 					     u16      size,
98 					     u16      flags);
99 
100 static inline unsigned long idx_to_pfn(struct xenvif *vif,
101 				       u16 idx)
102 {
103 	return page_to_pfn(vif->mmap_pages[idx]);
104 }
105 
106 static inline unsigned long idx_to_kaddr(struct xenvif *vif,
107 					 u16 idx)
108 {
109 	return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
110 }
111 
112 /* This is a miniumum size for the linear area to avoid lots of
113  * calls to __pskb_pull_tail() as we set up checksum offsets. The
114  * value 128 was chosen as it covers all IPv4 and most likely
115  * IPv6 headers.
116  */
117 #define PKT_PROT_LEN 128
118 
119 static u16 frag_get_pending_idx(skb_frag_t *frag)
120 {
121 	return (u16)frag->page_offset;
122 }
123 
124 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
125 {
126 	frag->page_offset = pending_idx;
127 }
128 
129 static inline pending_ring_idx_t pending_index(unsigned i)
130 {
131 	return i & (MAX_PENDING_REQS-1);
132 }
133 
134 static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
135 {
136 	return MAX_PENDING_REQS -
137 		vif->pending_prod + vif->pending_cons;
138 }
139 
140 bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
141 {
142 	RING_IDX prod, cons;
143 
144 	do {
145 		prod = vif->rx.sring->req_prod;
146 		cons = vif->rx.req_cons;
147 
148 		if (prod - cons >= needed)
149 			return true;
150 
151 		vif->rx.sring->req_event = prod + 1;
152 
153 		/* Make sure event is visible before we check prod
154 		 * again.
155 		 */
156 		mb();
157 	} while (vif->rx.sring->req_prod != prod);
158 
159 	return false;
160 }
161 
162 /*
163  * Returns true if we should start a new receive buffer instead of
164  * adding 'size' bytes to a buffer which currently contains 'offset'
165  * bytes.
166  */
167 static bool start_new_rx_buffer(int offset, unsigned long size, int head)
168 {
169 	/* simple case: we have completely filled the current buffer. */
170 	if (offset == MAX_BUFFER_OFFSET)
171 		return true;
172 
173 	/*
174 	 * complex case: start a fresh buffer if the current frag
175 	 * would overflow the current buffer but only if:
176 	 *     (i)   this frag would fit completely in the next buffer
177 	 * and (ii)  there is already some data in the current buffer
178 	 * and (iii) this is not the head buffer.
179 	 *
180 	 * Where:
181 	 * - (i) stops us splitting a frag into two copies
182 	 *   unless the frag is too large for a single buffer.
183 	 * - (ii) stops us from leaving a buffer pointlessly empty.
184 	 * - (iii) stops us leaving the first buffer
185 	 *   empty. Strictly speaking this is already covered
186 	 *   by (ii) but is explicitly checked because
187 	 *   netfront relies on the first buffer being
188 	 *   non-empty and can crash otherwise.
189 	 *
190 	 * This means we will effectively linearise small
191 	 * frags but do not needlessly split large buffers
192 	 * into multiple copies tend to give large frags their
193 	 * own buffers as before.
194 	 */
195 	if ((offset + size > MAX_BUFFER_OFFSET) &&
196 	    (size <= MAX_BUFFER_OFFSET) && offset && !head)
197 		return true;
198 
199 	return false;
200 }
201 
202 struct netrx_pending_operations {
203 	unsigned copy_prod, copy_cons;
204 	unsigned meta_prod, meta_cons;
205 	struct gnttab_copy *copy;
206 	struct xenvif_rx_meta *meta;
207 	int copy_off;
208 	grant_ref_t copy_gref;
209 };
210 
211 static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
212 						 struct netrx_pending_operations *npo)
213 {
214 	struct xenvif_rx_meta *meta;
215 	struct xen_netif_rx_request *req;
216 
217 	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
218 
219 	meta = npo->meta + npo->meta_prod++;
220 	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
221 	meta->gso_size = 0;
222 	meta->size = 0;
223 	meta->id = req->id;
224 
225 	npo->copy_off = 0;
226 	npo->copy_gref = req->gref;
227 
228 	return meta;
229 }
230 
231 /*
232  * Set up the grant operations for this fragment. If it's a flipping
233  * interface, we also set up the unmap request from here.
234  */
235 static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
236 				 struct netrx_pending_operations *npo,
237 				 struct page *page, unsigned long size,
238 				 unsigned long offset, int *head)
239 {
240 	struct gnttab_copy *copy_gop;
241 	struct xenvif_rx_meta *meta;
242 	unsigned long bytes;
243 	int gso_type;
244 
245 	/* Data must not cross a page boundary. */
246 	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
247 
248 	meta = npo->meta + npo->meta_prod - 1;
249 
250 	/* Skip unused frames from start of page */
251 	page += offset >> PAGE_SHIFT;
252 	offset &= ~PAGE_MASK;
253 
254 	while (size > 0) {
255 		BUG_ON(offset >= PAGE_SIZE);
256 		BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
257 
258 		bytes = PAGE_SIZE - offset;
259 
260 		if (bytes > size)
261 			bytes = size;
262 
263 		if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
264 			/*
265 			 * Netfront requires there to be some data in the head
266 			 * buffer.
267 			 */
268 			BUG_ON(*head);
269 
270 			meta = get_next_rx_buffer(vif, npo);
271 		}
272 
273 		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
274 			bytes = MAX_BUFFER_OFFSET - npo->copy_off;
275 
276 		copy_gop = npo->copy + npo->copy_prod++;
277 		copy_gop->flags = GNTCOPY_dest_gref;
278 		copy_gop->len = bytes;
279 
280 		copy_gop->source.domid = DOMID_SELF;
281 		copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
282 		copy_gop->source.offset = offset;
283 
284 		copy_gop->dest.domid = vif->domid;
285 		copy_gop->dest.offset = npo->copy_off;
286 		copy_gop->dest.u.ref = npo->copy_gref;
287 
288 		npo->copy_off += bytes;
289 		meta->size += bytes;
290 
291 		offset += bytes;
292 		size -= bytes;
293 
294 		/* Next frame */
295 		if (offset == PAGE_SIZE && size) {
296 			BUG_ON(!PageCompound(page));
297 			page++;
298 			offset = 0;
299 		}
300 
301 		/* Leave a gap for the GSO descriptor. */
302 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
303 			gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
304 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
305 			gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
306 		else
307 			gso_type = XEN_NETIF_GSO_TYPE_NONE;
308 
309 		if (*head && ((1 << gso_type) & vif->gso_mask))
310 			vif->rx.req_cons++;
311 
312 		*head = 0; /* There must be something in this buffer now. */
313 
314 	}
315 }
316 
317 /*
318  * Prepare an SKB to be transmitted to the frontend.
319  *
320  * This function is responsible for allocating grant operations, meta
321  * structures, etc.
322  *
323  * It returns the number of meta structures consumed. The number of
324  * ring slots used is always equal to the number of meta slots used
325  * plus the number of GSO descriptors used. Currently, we use either
326  * zero GSO descriptors (for non-GSO packets) or one descriptor (for
327  * frontend-side LRO).
328  */
329 static int xenvif_gop_skb(struct sk_buff *skb,
330 			  struct netrx_pending_operations *npo)
331 {
332 	struct xenvif *vif = netdev_priv(skb->dev);
333 	int nr_frags = skb_shinfo(skb)->nr_frags;
334 	int i;
335 	struct xen_netif_rx_request *req;
336 	struct xenvif_rx_meta *meta;
337 	unsigned char *data;
338 	int head = 1;
339 	int old_meta_prod;
340 	int gso_type;
341 	int gso_size;
342 
343 	old_meta_prod = npo->meta_prod;
344 
345 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
346 		gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
347 		gso_size = skb_shinfo(skb)->gso_size;
348 	} else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
349 		gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
350 		gso_size = skb_shinfo(skb)->gso_size;
351 	} else {
352 		gso_type = XEN_NETIF_GSO_TYPE_NONE;
353 		gso_size = 0;
354 	}
355 
356 	/* Set up a GSO prefix descriptor, if necessary */
357 	if ((1 << gso_type) & vif->gso_prefix_mask) {
358 		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
359 		meta = npo->meta + npo->meta_prod++;
360 		meta->gso_type = gso_type;
361 		meta->gso_size = gso_size;
362 		meta->size = 0;
363 		meta->id = req->id;
364 	}
365 
366 	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
367 	meta = npo->meta + npo->meta_prod++;
368 
369 	if ((1 << gso_type) & vif->gso_mask) {
370 		meta->gso_type = gso_type;
371 		meta->gso_size = gso_size;
372 	} else {
373 		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
374 		meta->gso_size = 0;
375 	}
376 
377 	meta->size = 0;
378 	meta->id = req->id;
379 	npo->copy_off = 0;
380 	npo->copy_gref = req->gref;
381 
382 	data = skb->data;
383 	while (data < skb_tail_pointer(skb)) {
384 		unsigned int offset = offset_in_page(data);
385 		unsigned int len = PAGE_SIZE - offset;
386 
387 		if (data + len > skb_tail_pointer(skb))
388 			len = skb_tail_pointer(skb) - data;
389 
390 		xenvif_gop_frag_copy(vif, skb, npo,
391 				     virt_to_page(data), len, offset, &head);
392 		data += len;
393 	}
394 
395 	for (i = 0; i < nr_frags; i++) {
396 		xenvif_gop_frag_copy(vif, skb, npo,
397 				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
398 				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
399 				     skb_shinfo(skb)->frags[i].page_offset,
400 				     &head);
401 	}
402 
403 	return npo->meta_prod - old_meta_prod;
404 }
405 
406 /*
407  * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
408  * used to set up the operations on the top of
409  * netrx_pending_operations, which have since been done.  Check that
410  * they didn't give any errors and advance over them.
411  */
412 static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
413 			    struct netrx_pending_operations *npo)
414 {
415 	struct gnttab_copy     *copy_op;
416 	int status = XEN_NETIF_RSP_OKAY;
417 	int i;
418 
419 	for (i = 0; i < nr_meta_slots; i++) {
420 		copy_op = npo->copy + npo->copy_cons++;
421 		if (copy_op->status != GNTST_okay) {
422 			netdev_dbg(vif->dev,
423 				   "Bad status %d from copy to DOM%d.\n",
424 				   copy_op->status, vif->domid);
425 			status = XEN_NETIF_RSP_ERROR;
426 		}
427 	}
428 
429 	return status;
430 }
431 
432 static void xenvif_add_frag_responses(struct xenvif *vif, int status,
433 				      struct xenvif_rx_meta *meta,
434 				      int nr_meta_slots)
435 {
436 	int i;
437 	unsigned long offset;
438 
439 	/* No fragments used */
440 	if (nr_meta_slots <= 1)
441 		return;
442 
443 	nr_meta_slots--;
444 
445 	for (i = 0; i < nr_meta_slots; i++) {
446 		int flags;
447 		if (i == nr_meta_slots - 1)
448 			flags = 0;
449 		else
450 			flags = XEN_NETRXF_more_data;
451 
452 		offset = 0;
453 		make_rx_response(vif, meta[i].id, status, offset,
454 				 meta[i].size, flags);
455 	}
456 }
457 
458 struct skb_cb_overlay {
459 	int meta_slots_used;
460 };
461 
462 void xenvif_kick_thread(struct xenvif *vif)
463 {
464 	wake_up(&vif->wq);
465 }
466 
467 static void xenvif_rx_action(struct xenvif *vif)
468 {
469 	s8 status;
470 	u16 flags;
471 	struct xen_netif_rx_response *resp;
472 	struct sk_buff_head rxq;
473 	struct sk_buff *skb;
474 	LIST_HEAD(notify);
475 	int ret;
476 	unsigned long offset;
477 	struct skb_cb_overlay *sco;
478 	bool need_to_notify = false;
479 
480 	struct netrx_pending_operations npo = {
481 		.copy  = vif->grant_copy_op,
482 		.meta  = vif->meta,
483 	};
484 
485 	skb_queue_head_init(&rxq);
486 
487 	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
488 		RING_IDX max_slots_needed;
489 		int i;
490 
491 		/* We need a cheap worse case estimate for the number of
492 		 * slots we'll use.
493 		 */
494 
495 		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
496 						skb_headlen(skb),
497 						PAGE_SIZE);
498 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
499 			unsigned int size;
500 			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
501 			max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
502 		}
503 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
504 		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
505 			max_slots_needed++;
506 
507 		/* If the skb may not fit then bail out now */
508 		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
509 			skb_queue_head(&vif->rx_queue, skb);
510 			need_to_notify = true;
511 			vif->rx_last_skb_slots = max_slots_needed;
512 			break;
513 		} else
514 			vif->rx_last_skb_slots = 0;
515 
516 		sco = (struct skb_cb_overlay *)skb->cb;
517 		sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
518 		BUG_ON(sco->meta_slots_used > max_slots_needed);
519 
520 		__skb_queue_tail(&rxq, skb);
521 	}
522 
523 	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
524 
525 	if (!npo.copy_prod)
526 		goto done;
527 
528 	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
529 	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
530 
531 	while ((skb = __skb_dequeue(&rxq)) != NULL) {
532 		sco = (struct skb_cb_overlay *)skb->cb;
533 
534 		if ((1 << vif->meta[npo.meta_cons].gso_type) &
535 		    vif->gso_prefix_mask) {
536 			resp = RING_GET_RESPONSE(&vif->rx,
537 						 vif->rx.rsp_prod_pvt++);
538 
539 			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
540 
541 			resp->offset = vif->meta[npo.meta_cons].gso_size;
542 			resp->id = vif->meta[npo.meta_cons].id;
543 			resp->status = sco->meta_slots_used;
544 
545 			npo.meta_cons++;
546 			sco->meta_slots_used--;
547 		}
548 
549 
550 		vif->dev->stats.tx_bytes += skb->len;
551 		vif->dev->stats.tx_packets++;
552 
553 		status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
554 
555 		if (sco->meta_slots_used == 1)
556 			flags = 0;
557 		else
558 			flags = XEN_NETRXF_more_data;
559 
560 		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
561 			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
562 		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
563 			/* remote but checksummed. */
564 			flags |= XEN_NETRXF_data_validated;
565 
566 		offset = 0;
567 		resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
568 					status, offset,
569 					vif->meta[npo.meta_cons].size,
570 					flags);
571 
572 		if ((1 << vif->meta[npo.meta_cons].gso_type) &
573 		    vif->gso_mask) {
574 			struct xen_netif_extra_info *gso =
575 				(struct xen_netif_extra_info *)
576 				RING_GET_RESPONSE(&vif->rx,
577 						  vif->rx.rsp_prod_pvt++);
578 
579 			resp->flags |= XEN_NETRXF_extra_info;
580 
581 			gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
582 			gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
583 			gso->u.gso.pad = 0;
584 			gso->u.gso.features = 0;
585 
586 			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
587 			gso->flags = 0;
588 		}
589 
590 		xenvif_add_frag_responses(vif, status,
591 					  vif->meta + npo.meta_cons + 1,
592 					  sco->meta_slots_used);
593 
594 		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
595 
596 		need_to_notify |= !!ret;
597 
598 		npo.meta_cons += sco->meta_slots_used;
599 		dev_kfree_skb(skb);
600 	}
601 
602 done:
603 	if (need_to_notify)
604 		notify_remote_via_irq(vif->rx_irq);
605 }
606 
607 void xenvif_check_rx_xenvif(struct xenvif *vif)
608 {
609 	int more_to_do;
610 
611 	RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
612 
613 	if (more_to_do)
614 		napi_schedule(&vif->napi);
615 }
616 
617 static void tx_add_credit(struct xenvif *vif)
618 {
619 	unsigned long max_burst, max_credit;
620 
621 	/*
622 	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
623 	 * Otherwise the interface can seize up due to insufficient credit.
624 	 */
625 	max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
626 	max_burst = min(max_burst, 131072UL);
627 	max_burst = max(max_burst, vif->credit_bytes);
628 
629 	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
630 	max_credit = vif->remaining_credit + vif->credit_bytes;
631 	if (max_credit < vif->remaining_credit)
632 		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
633 
634 	vif->remaining_credit = min(max_credit, max_burst);
635 }
636 
637 static void tx_credit_callback(unsigned long data)
638 {
639 	struct xenvif *vif = (struct xenvif *)data;
640 	tx_add_credit(vif);
641 	xenvif_check_rx_xenvif(vif);
642 }
643 
644 static void xenvif_tx_err(struct xenvif *vif,
645 			  struct xen_netif_tx_request *txp, RING_IDX end)
646 {
647 	RING_IDX cons = vif->tx.req_cons;
648 
649 	do {
650 		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
651 		if (cons == end)
652 			break;
653 		txp = RING_GET_REQUEST(&vif->tx, cons++);
654 	} while (1);
655 	vif->tx.req_cons = cons;
656 }
657 
658 static void xenvif_fatal_tx_err(struct xenvif *vif)
659 {
660 	netdev_err(vif->dev, "fatal error; disabling device\n");
661 	xenvif_carrier_off(vif);
662 }
663 
664 static int xenvif_count_requests(struct xenvif *vif,
665 				 struct xen_netif_tx_request *first,
666 				 struct xen_netif_tx_request *txp,
667 				 int work_to_do)
668 {
669 	RING_IDX cons = vif->tx.req_cons;
670 	int slots = 0;
671 	int drop_err = 0;
672 	int more_data;
673 
674 	if (!(first->flags & XEN_NETTXF_more_data))
675 		return 0;
676 
677 	do {
678 		struct xen_netif_tx_request dropped_tx = { 0 };
679 
680 		if (slots >= work_to_do) {
681 			netdev_err(vif->dev,
682 				   "Asked for %d slots but exceeds this limit\n",
683 				   work_to_do);
684 			xenvif_fatal_tx_err(vif);
685 			return -ENODATA;
686 		}
687 
688 		/* This guest is really using too many slots and
689 		 * considered malicious.
690 		 */
691 		if (unlikely(slots >= fatal_skb_slots)) {
692 			netdev_err(vif->dev,
693 				   "Malicious frontend using %d slots, threshold %u\n",
694 				   slots, fatal_skb_slots);
695 			xenvif_fatal_tx_err(vif);
696 			return -E2BIG;
697 		}
698 
699 		/* Xen network protocol had implicit dependency on
700 		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
701 		 * the historical MAX_SKB_FRAGS value 18 to honor the
702 		 * same behavior as before. Any packet using more than
703 		 * 18 slots but less than fatal_skb_slots slots is
704 		 * dropped
705 		 */
706 		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
707 			if (net_ratelimit())
708 				netdev_dbg(vif->dev,
709 					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
710 					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);
711 			drop_err = -E2BIG;
712 		}
713 
714 		if (drop_err)
715 			txp = &dropped_tx;
716 
717 		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
718 		       sizeof(*txp));
719 
720 		/* If the guest submitted a frame >= 64 KiB then
721 		 * first->size overflowed and following slots will
722 		 * appear to be larger than the frame.
723 		 *
724 		 * This cannot be fatal error as there are buggy
725 		 * frontends that do this.
726 		 *
727 		 * Consume all slots and drop the packet.
728 		 */
729 		if (!drop_err && txp->size > first->size) {
730 			if (net_ratelimit())
731 				netdev_dbg(vif->dev,
732 					   "Invalid tx request, slot size %u > remaining size %u\n",
733 					   txp->size, first->size);
734 			drop_err = -EIO;
735 		}
736 
737 		first->size -= txp->size;
738 		slots++;
739 
740 		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
741 			netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
742 				 txp->offset, txp->size);
743 			xenvif_fatal_tx_err(vif);
744 			return -EINVAL;
745 		}
746 
747 		more_data = txp->flags & XEN_NETTXF_more_data;
748 
749 		if (!drop_err)
750 			txp++;
751 
752 	} while (more_data);
753 
754 	if (drop_err) {
755 		xenvif_tx_err(vif, first, cons + slots);
756 		return drop_err;
757 	}
758 
759 	return slots;
760 }
761 
762 static struct page *xenvif_alloc_page(struct xenvif *vif,
763 				      u16 pending_idx)
764 {
765 	struct page *page;
766 
767 	page = alloc_page(GFP_ATOMIC|__GFP_COLD);
768 	if (!page)
769 		return NULL;
770 	vif->mmap_pages[pending_idx] = page;
771 
772 	return page;
773 }
774 
775 static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
776 					       struct sk_buff *skb,
777 					       struct xen_netif_tx_request *txp,
778 					       struct gnttab_copy *gop)
779 {
780 	struct skb_shared_info *shinfo = skb_shinfo(skb);
781 	skb_frag_t *frags = shinfo->frags;
782 	u16 pending_idx = *((u16 *)skb->data);
783 	u16 head_idx = 0;
784 	int slot, start;
785 	struct page *page;
786 	pending_ring_idx_t index, start_idx = 0;
787 	uint16_t dst_offset;
788 	unsigned int nr_slots;
789 	struct pending_tx_info *first = NULL;
790 
791 	/* At this point shinfo->nr_frags is in fact the number of
792 	 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
793 	 */
794 	nr_slots = shinfo->nr_frags;
795 
796 	/* Skip first skb fragment if it is on same page as header fragment. */
797 	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
798 
799 	/* Coalesce tx requests, at this point the packet passed in
800 	 * should be <= 64K. Any packets larger than 64K have been
801 	 * handled in xenvif_count_requests().
802 	 */
803 	for (shinfo->nr_frags = slot = start; slot < nr_slots;
804 	     shinfo->nr_frags++) {
805 		struct pending_tx_info *pending_tx_info =
806 			vif->pending_tx_info;
807 
808 		page = alloc_page(GFP_ATOMIC|__GFP_COLD);
809 		if (!page)
810 			goto err;
811 
812 		dst_offset = 0;
813 		first = NULL;
814 		while (dst_offset < PAGE_SIZE && slot < nr_slots) {
815 			gop->flags = GNTCOPY_source_gref;
816 
817 			gop->source.u.ref = txp->gref;
818 			gop->source.domid = vif->domid;
819 			gop->source.offset = txp->offset;
820 
821 			gop->dest.domid = DOMID_SELF;
822 
823 			gop->dest.offset = dst_offset;
824 			gop->dest.u.gmfn = virt_to_mfn(page_address(page));
825 
826 			if (dst_offset + txp->size > PAGE_SIZE) {
827 				/* This page can only merge a portion
828 				 * of tx request. Do not increment any
829 				 * pointer / counter here. The txp
830 				 * will be dealt with in future
831 				 * rounds, eventually hitting the
832 				 * `else` branch.
833 				 */
834 				gop->len = PAGE_SIZE - dst_offset;
835 				txp->offset += gop->len;
836 				txp->size -= gop->len;
837 				dst_offset += gop->len; /* quit loop */
838 			} else {
839 				/* This tx request can be merged in the page */
840 				gop->len = txp->size;
841 				dst_offset += gop->len;
842 
843 				index = pending_index(vif->pending_cons++);
844 
845 				pending_idx = vif->pending_ring[index];
846 
847 				memcpy(&pending_tx_info[pending_idx].req, txp,
848 				       sizeof(*txp));
849 
850 				/* Poison these fields, corresponding
851 				 * fields for head tx req will be set
852 				 * to correct values after the loop.
853 				 */
854 				vif->mmap_pages[pending_idx] = (void *)(~0UL);
855 				pending_tx_info[pending_idx].head =
856 					INVALID_PENDING_RING_IDX;
857 
858 				if (!first) {
859 					first = &pending_tx_info[pending_idx];
860 					start_idx = index;
861 					head_idx = pending_idx;
862 				}
863 
864 				txp++;
865 				slot++;
866 			}
867 
868 			gop++;
869 		}
870 
871 		first->req.offset = 0;
872 		first->req.size = dst_offset;
873 		first->head = start_idx;
874 		vif->mmap_pages[head_idx] = page;
875 		frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
876 	}
877 
878 	BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
879 
880 	return gop;
881 err:
882 	/* Unwind, freeing all pages and sending error responses. */
883 	while (shinfo->nr_frags-- > start) {
884 		xenvif_idx_release(vif,
885 				frag_get_pending_idx(&frags[shinfo->nr_frags]),
886 				XEN_NETIF_RSP_ERROR);
887 	}
888 	/* The head too, if necessary. */
889 	if (start)
890 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
891 
892 	return NULL;
893 }
894 
895 static int xenvif_tx_check_gop(struct xenvif *vif,
896 			       struct sk_buff *skb,
897 			       struct gnttab_copy **gopp)
898 {
899 	struct gnttab_copy *gop = *gopp;
900 	u16 pending_idx = *((u16 *)skb->data);
901 	struct skb_shared_info *shinfo = skb_shinfo(skb);
902 	struct pending_tx_info *tx_info;
903 	int nr_frags = shinfo->nr_frags;
904 	int i, err, start;
905 	u16 peek; /* peek into next tx request */
906 
907 	/* Check status of header. */
908 	err = gop->status;
909 	if (unlikely(err))
910 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
911 
912 	/* Skip first skb fragment if it is on same page as header fragment. */
913 	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
914 
915 	for (i = start; i < nr_frags; i++) {
916 		int j, newerr;
917 		pending_ring_idx_t head;
918 
919 		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
920 		tx_info = &vif->pending_tx_info[pending_idx];
921 		head = tx_info->head;
922 
923 		/* Check error status: if okay then remember grant handle. */
924 		do {
925 			newerr = (++gop)->status;
926 			if (newerr)
927 				break;
928 			peek = vif->pending_ring[pending_index(++head)];
929 		} while (!pending_tx_is_head(vif, peek));
930 
931 		if (likely(!newerr)) {
932 			/* Had a previous error? Invalidate this fragment. */
933 			if (unlikely(err))
934 				xenvif_idx_release(vif, pending_idx,
935 						   XEN_NETIF_RSP_OKAY);
936 			continue;
937 		}
938 
939 		/* Error on this fragment: respond to client with an error. */
940 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
941 
942 		/* Not the first error? Preceding frags already invalidated. */
943 		if (err)
944 			continue;
945 
946 		/* First error: invalidate header and preceding fragments. */
947 		pending_idx = *((u16 *)skb->data);
948 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
949 		for (j = start; j < i; j++) {
950 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
951 			xenvif_idx_release(vif, pending_idx,
952 					   XEN_NETIF_RSP_OKAY);
953 		}
954 
955 		/* Remember the error: invalidate all subsequent fragments. */
956 		err = newerr;
957 	}
958 
959 	*gopp = gop + 1;
960 	return err;
961 }
962 
963 static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
964 {
965 	struct skb_shared_info *shinfo = skb_shinfo(skb);
966 	int nr_frags = shinfo->nr_frags;
967 	int i;
968 
969 	for (i = 0; i < nr_frags; i++) {
970 		skb_frag_t *frag = shinfo->frags + i;
971 		struct xen_netif_tx_request *txp;
972 		struct page *page;
973 		u16 pending_idx;
974 
975 		pending_idx = frag_get_pending_idx(frag);
976 
977 		txp = &vif->pending_tx_info[pending_idx].req;
978 		page = virt_to_page(idx_to_kaddr(vif, pending_idx));
979 		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
980 		skb->len += txp->size;
981 		skb->data_len += txp->size;
982 		skb->truesize += txp->size;
983 
984 		/* Take an extra reference to offset xenvif_idx_release */
985 		get_page(vif->mmap_pages[pending_idx]);
986 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
987 	}
988 }
989 
990 static int xenvif_get_extras(struct xenvif *vif,
991 				struct xen_netif_extra_info *extras,
992 				int work_to_do)
993 {
994 	struct xen_netif_extra_info extra;
995 	RING_IDX cons = vif->tx.req_cons;
996 
997 	do {
998 		if (unlikely(work_to_do-- <= 0)) {
999 			netdev_err(vif->dev, "Missing extra info\n");
1000 			xenvif_fatal_tx_err(vif);
1001 			return -EBADR;
1002 		}
1003 
1004 		memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1005 		       sizeof(extra));
1006 		if (unlikely(!extra.type ||
1007 			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1008 			vif->tx.req_cons = ++cons;
1009 			netdev_err(vif->dev,
1010 				   "Invalid extra type: %d\n", extra.type);
1011 			xenvif_fatal_tx_err(vif);
1012 			return -EINVAL;
1013 		}
1014 
1015 		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1016 		vif->tx.req_cons = ++cons;
1017 	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1018 
1019 	return work_to_do;
1020 }
1021 
1022 static int xenvif_set_skb_gso(struct xenvif *vif,
1023 			      struct sk_buff *skb,
1024 			      struct xen_netif_extra_info *gso)
1025 {
1026 	if (!gso->u.gso.size) {
1027 		netdev_err(vif->dev, "GSO size must not be zero.\n");
1028 		xenvif_fatal_tx_err(vif);
1029 		return -EINVAL;
1030 	}
1031 
1032 	switch (gso->u.gso.type) {
1033 	case XEN_NETIF_GSO_TYPE_TCPV4:
1034 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1035 		break;
1036 	case XEN_NETIF_GSO_TYPE_TCPV6:
1037 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1038 		break;
1039 	default:
1040 		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1041 		xenvif_fatal_tx_err(vif);
1042 		return -EINVAL;
1043 	}
1044 
1045 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1046 	/* gso_segs will be calculated later */
1047 
1048 	return 0;
1049 }
1050 
1051 static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1052 {
1053 	bool recalculate_partial_csum = false;
1054 
1055 	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1056 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
1057 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1058 	 * recalculate the partial checksum.
1059 	 */
1060 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1061 		vif->rx_gso_checksum_fixup++;
1062 		skb->ip_summed = CHECKSUM_PARTIAL;
1063 		recalculate_partial_csum = true;
1064 	}
1065 
1066 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1067 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1068 		return 0;
1069 
1070 	return skb_checksum_setup(skb, recalculate_partial_csum);
1071 }
1072 
1073 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1074 {
1075 	u64 now = get_jiffies_64();
1076 	u64 next_credit = vif->credit_window_start +
1077 		msecs_to_jiffies(vif->credit_usec / 1000);
1078 
1079 	/* Timer could already be pending in rare cases. */
1080 	if (timer_pending(&vif->credit_timeout))
1081 		return true;
1082 
1083 	/* Passed the point where we can replenish credit? */
1084 	if (time_after_eq64(now, next_credit)) {
1085 		vif->credit_window_start = now;
1086 		tx_add_credit(vif);
1087 	}
1088 
1089 	/* Still too big to send right now? Set a callback. */
1090 	if (size > vif->remaining_credit) {
1091 		vif->credit_timeout.data     =
1092 			(unsigned long)vif;
1093 		vif->credit_timeout.function =
1094 			tx_credit_callback;
1095 		mod_timer(&vif->credit_timeout,
1096 			  next_credit);
1097 		vif->credit_window_start = next_credit;
1098 
1099 		return true;
1100 	}
1101 
1102 	return false;
1103 }
1104 
1105 static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1106 {
1107 	struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
1108 	struct sk_buff *skb;
1109 	int ret;
1110 
1111 	while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1112 		< MAX_PENDING_REQS) &&
1113 	       (skb_queue_len(&vif->tx_queue) < budget)) {
1114 		struct xen_netif_tx_request txreq;
1115 		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1116 		struct page *page;
1117 		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1118 		u16 pending_idx;
1119 		RING_IDX idx;
1120 		int work_to_do;
1121 		unsigned int data_len;
1122 		pending_ring_idx_t index;
1123 
1124 		if (vif->tx.sring->req_prod - vif->tx.req_cons >
1125 		    XEN_NETIF_TX_RING_SIZE) {
1126 			netdev_err(vif->dev,
1127 				   "Impossible number of requests. "
1128 				   "req_prod %d, req_cons %d, size %ld\n",
1129 				   vif->tx.sring->req_prod, vif->tx.req_cons,
1130 				   XEN_NETIF_TX_RING_SIZE);
1131 			xenvif_fatal_tx_err(vif);
1132 			continue;
1133 		}
1134 
1135 		work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
1136 		if (!work_to_do)
1137 			break;
1138 
1139 		idx = vif->tx.req_cons;
1140 		rmb(); /* Ensure that we see the request before we copy it. */
1141 		memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1142 
1143 		/* Credit-based scheduling. */
1144 		if (txreq.size > vif->remaining_credit &&
1145 		    tx_credit_exceeded(vif, txreq.size))
1146 			break;
1147 
1148 		vif->remaining_credit -= txreq.size;
1149 
1150 		work_to_do--;
1151 		vif->tx.req_cons = ++idx;
1152 
1153 		memset(extras, 0, sizeof(extras));
1154 		if (txreq.flags & XEN_NETTXF_extra_info) {
1155 			work_to_do = xenvif_get_extras(vif, extras,
1156 						       work_to_do);
1157 			idx = vif->tx.req_cons;
1158 			if (unlikely(work_to_do < 0))
1159 				break;
1160 		}
1161 
1162 		ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
1163 		if (unlikely(ret < 0))
1164 			break;
1165 
1166 		idx += ret;
1167 
1168 		if (unlikely(txreq.size < ETH_HLEN)) {
1169 			netdev_dbg(vif->dev,
1170 				   "Bad packet size: %d\n", txreq.size);
1171 			xenvif_tx_err(vif, &txreq, idx);
1172 			break;
1173 		}
1174 
1175 		/* No crossing a page as the payload mustn't fragment. */
1176 		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1177 			netdev_err(vif->dev,
1178 				   "txreq.offset: %x, size: %u, end: %lu\n",
1179 				   txreq.offset, txreq.size,
1180 				   (txreq.offset&~PAGE_MASK) + txreq.size);
1181 			xenvif_fatal_tx_err(vif);
1182 			break;
1183 		}
1184 
1185 		index = pending_index(vif->pending_cons);
1186 		pending_idx = vif->pending_ring[index];
1187 
1188 		data_len = (txreq.size > PKT_PROT_LEN &&
1189 			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1190 			PKT_PROT_LEN : txreq.size;
1191 
1192 		skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1193 				GFP_ATOMIC | __GFP_NOWARN);
1194 		if (unlikely(skb == NULL)) {
1195 			netdev_dbg(vif->dev,
1196 				   "Can't allocate a skb in start_xmit.\n");
1197 			xenvif_tx_err(vif, &txreq, idx);
1198 			break;
1199 		}
1200 
1201 		/* Packets passed to netif_rx() must have some headroom. */
1202 		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1203 
1204 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1205 			struct xen_netif_extra_info *gso;
1206 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1207 
1208 			if (xenvif_set_skb_gso(vif, skb, gso)) {
1209 				/* Failure in xenvif_set_skb_gso is fatal. */
1210 				kfree_skb(skb);
1211 				break;
1212 			}
1213 		}
1214 
1215 		/* XXX could copy straight to head */
1216 		page = xenvif_alloc_page(vif, pending_idx);
1217 		if (!page) {
1218 			kfree_skb(skb);
1219 			xenvif_tx_err(vif, &txreq, idx);
1220 			break;
1221 		}
1222 
1223 		gop->source.u.ref = txreq.gref;
1224 		gop->source.domid = vif->domid;
1225 		gop->source.offset = txreq.offset;
1226 
1227 		gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1228 		gop->dest.domid = DOMID_SELF;
1229 		gop->dest.offset = txreq.offset;
1230 
1231 		gop->len = txreq.size;
1232 		gop->flags = GNTCOPY_source_gref;
1233 
1234 		gop++;
1235 
1236 		memcpy(&vif->pending_tx_info[pending_idx].req,
1237 		       &txreq, sizeof(txreq));
1238 		vif->pending_tx_info[pending_idx].head = index;
1239 		*((u16 *)skb->data) = pending_idx;
1240 
1241 		__skb_put(skb, data_len);
1242 
1243 		skb_shinfo(skb)->nr_frags = ret;
1244 		if (data_len < txreq.size) {
1245 			skb_shinfo(skb)->nr_frags++;
1246 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1247 					     pending_idx);
1248 		} else {
1249 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1250 					     INVALID_PENDING_IDX);
1251 		}
1252 
1253 		vif->pending_cons++;
1254 
1255 		request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
1256 		if (request_gop == NULL) {
1257 			kfree_skb(skb);
1258 			xenvif_tx_err(vif, &txreq, idx);
1259 			break;
1260 		}
1261 		gop = request_gop;
1262 
1263 		__skb_queue_tail(&vif->tx_queue, skb);
1264 
1265 		vif->tx.req_cons = idx;
1266 
1267 		if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
1268 			break;
1269 	}
1270 
1271 	return gop - vif->tx_copy_ops;
1272 }
1273 
1274 
1275 static int xenvif_tx_submit(struct xenvif *vif)
1276 {
1277 	struct gnttab_copy *gop = vif->tx_copy_ops;
1278 	struct sk_buff *skb;
1279 	int work_done = 0;
1280 
1281 	while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1282 		struct xen_netif_tx_request *txp;
1283 		u16 pending_idx;
1284 		unsigned data_len;
1285 
1286 		pending_idx = *((u16 *)skb->data);
1287 		txp = &vif->pending_tx_info[pending_idx].req;
1288 
1289 		/* Check the remap error code. */
1290 		if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
1291 			netdev_dbg(vif->dev, "netback grant failed.\n");
1292 			skb_shinfo(skb)->nr_frags = 0;
1293 			kfree_skb(skb);
1294 			continue;
1295 		}
1296 
1297 		data_len = skb->len;
1298 		memcpy(skb->data,
1299 		       (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
1300 		       data_len);
1301 		if (data_len < txp->size) {
1302 			/* Append the packet payload as a fragment. */
1303 			txp->offset += data_len;
1304 			txp->size -= data_len;
1305 		} else {
1306 			/* Schedule a response immediately. */
1307 			xenvif_idx_release(vif, pending_idx,
1308 					   XEN_NETIF_RSP_OKAY);
1309 		}
1310 
1311 		if (txp->flags & XEN_NETTXF_csum_blank)
1312 			skb->ip_summed = CHECKSUM_PARTIAL;
1313 		else if (txp->flags & XEN_NETTXF_data_validated)
1314 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1315 
1316 		xenvif_fill_frags(vif, skb);
1317 
1318 		if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
1319 			int target = min_t(int, skb->len, PKT_PROT_LEN);
1320 			__pskb_pull_tail(skb, target - skb_headlen(skb));
1321 		}
1322 
1323 		skb->dev      = vif->dev;
1324 		skb->protocol = eth_type_trans(skb, skb->dev);
1325 		skb_reset_network_header(skb);
1326 
1327 		if (checksum_setup(vif, skb)) {
1328 			netdev_dbg(vif->dev,
1329 				   "Can't setup checksum in net_tx_action\n");
1330 			kfree_skb(skb);
1331 			continue;
1332 		}
1333 
1334 		skb_probe_transport_header(skb, 0);
1335 
1336 		/* If the packet is GSO then we will have just set up the
1337 		 * transport header offset in checksum_setup so it's now
1338 		 * straightforward to calculate gso_segs.
1339 		 */
1340 		if (skb_is_gso(skb)) {
1341 			int mss = skb_shinfo(skb)->gso_size;
1342 			int hdrlen = skb_transport_header(skb) -
1343 				skb_mac_header(skb) +
1344 				tcp_hdrlen(skb);
1345 
1346 			skb_shinfo(skb)->gso_segs =
1347 				DIV_ROUND_UP(skb->len - hdrlen, mss);
1348 		}
1349 
1350 		vif->dev->stats.rx_bytes += skb->len;
1351 		vif->dev->stats.rx_packets++;
1352 
1353 		work_done++;
1354 
1355 		netif_receive_skb(skb);
1356 	}
1357 
1358 	return work_done;
1359 }
1360 
1361 /* Called after netfront has transmitted */
1362 int xenvif_tx_action(struct xenvif *vif, int budget)
1363 {
1364 	unsigned nr_gops;
1365 	int work_done;
1366 
1367 	if (unlikely(!tx_work_todo(vif)))
1368 		return 0;
1369 
1370 	nr_gops = xenvif_tx_build_gops(vif, budget);
1371 
1372 	if (nr_gops == 0)
1373 		return 0;
1374 
1375 	gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
1376 
1377 	work_done = xenvif_tx_submit(vif);
1378 
1379 	return work_done;
1380 }
1381 
1382 static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
1383 			       u8 status)
1384 {
1385 	struct pending_tx_info *pending_tx_info;
1386 	pending_ring_idx_t head;
1387 	u16 peek; /* peek into next tx request */
1388 
1389 	BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
1390 
1391 	/* Already complete? */
1392 	if (vif->mmap_pages[pending_idx] == NULL)
1393 		return;
1394 
1395 	pending_tx_info = &vif->pending_tx_info[pending_idx];
1396 
1397 	head = pending_tx_info->head;
1398 
1399 	BUG_ON(!pending_tx_is_head(vif, head));
1400 	BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
1401 
1402 	do {
1403 		pending_ring_idx_t index;
1404 		pending_ring_idx_t idx = pending_index(head);
1405 		u16 info_idx = vif->pending_ring[idx];
1406 
1407 		pending_tx_info = &vif->pending_tx_info[info_idx];
1408 		make_tx_response(vif, &pending_tx_info->req, status);
1409 
1410 		/* Setting any number other than
1411 		 * INVALID_PENDING_RING_IDX indicates this slot is
1412 		 * starting a new packet / ending a previous packet.
1413 		 */
1414 		pending_tx_info->head = 0;
1415 
1416 		index = pending_index(vif->pending_prod++);
1417 		vif->pending_ring[index] = vif->pending_ring[info_idx];
1418 
1419 		peek = vif->pending_ring[pending_index(++head)];
1420 
1421 	} while (!pending_tx_is_head(vif, peek));
1422 
1423 	put_page(vif->mmap_pages[pending_idx]);
1424 	vif->mmap_pages[pending_idx] = NULL;
1425 }
1426 
1427 
1428 static void make_tx_response(struct xenvif *vif,
1429 			     struct xen_netif_tx_request *txp,
1430 			     s8       st)
1431 {
1432 	RING_IDX i = vif->tx.rsp_prod_pvt;
1433 	struct xen_netif_tx_response *resp;
1434 	int notify;
1435 
1436 	resp = RING_GET_RESPONSE(&vif->tx, i);
1437 	resp->id     = txp->id;
1438 	resp->status = st;
1439 
1440 	if (txp->flags & XEN_NETTXF_extra_info)
1441 		RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1442 
1443 	vif->tx.rsp_prod_pvt = ++i;
1444 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1445 	if (notify)
1446 		notify_remote_via_irq(vif->tx_irq);
1447 }
1448 
1449 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1450 					     u16      id,
1451 					     s8       st,
1452 					     u16      offset,
1453 					     u16      size,
1454 					     u16      flags)
1455 {
1456 	RING_IDX i = vif->rx.rsp_prod_pvt;
1457 	struct xen_netif_rx_response *resp;
1458 
1459 	resp = RING_GET_RESPONSE(&vif->rx, i);
1460 	resp->offset     = offset;
1461 	resp->flags      = flags;
1462 	resp->id         = id;
1463 	resp->status     = (s16)size;
1464 	if (st < 0)
1465 		resp->status = (s16)st;
1466 
1467 	vif->rx.rsp_prod_pvt = ++i;
1468 
1469 	return resp;
1470 }
1471 
1472 static inline int rx_work_todo(struct xenvif *vif)
1473 {
1474 	return !skb_queue_empty(&vif->rx_queue) &&
1475 	       xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
1476 }
1477 
1478 static inline int tx_work_todo(struct xenvif *vif)
1479 {
1480 
1481 	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
1482 	    (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1483 	     < MAX_PENDING_REQS))
1484 		return 1;
1485 
1486 	return 0;
1487 }
1488 
1489 void xenvif_unmap_frontend_rings(struct xenvif *vif)
1490 {
1491 	if (vif->tx.sring)
1492 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1493 					vif->tx.sring);
1494 	if (vif->rx.sring)
1495 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1496 					vif->rx.sring);
1497 }
1498 
1499 int xenvif_map_frontend_rings(struct xenvif *vif,
1500 			      grant_ref_t tx_ring_ref,
1501 			      grant_ref_t rx_ring_ref)
1502 {
1503 	void *addr;
1504 	struct xen_netif_tx_sring *txs;
1505 	struct xen_netif_rx_sring *rxs;
1506 
1507 	int err = -ENOMEM;
1508 
1509 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1510 				     tx_ring_ref, &addr);
1511 	if (err)
1512 		goto err;
1513 
1514 	txs = (struct xen_netif_tx_sring *)addr;
1515 	BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1516 
1517 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1518 				     rx_ring_ref, &addr);
1519 	if (err)
1520 		goto err;
1521 
1522 	rxs = (struct xen_netif_rx_sring *)addr;
1523 	BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1524 
1525 	return 0;
1526 
1527 err:
1528 	xenvif_unmap_frontend_rings(vif);
1529 	return err;
1530 }
1531 
1532 void xenvif_stop_queue(struct xenvif *vif)
1533 {
1534 	if (!vif->can_queue)
1535 		return;
1536 
1537 	netif_stop_queue(vif->dev);
1538 }
1539 
1540 static void xenvif_start_queue(struct xenvif *vif)
1541 {
1542 	if (xenvif_schedulable(vif))
1543 		netif_wake_queue(vif->dev);
1544 }
1545 
1546 int xenvif_kthread(void *data)
1547 {
1548 	struct xenvif *vif = data;
1549 	struct sk_buff *skb;
1550 
1551 	while (!kthread_should_stop()) {
1552 		wait_event_interruptible(vif->wq,
1553 					 rx_work_todo(vif) ||
1554 					 kthread_should_stop());
1555 		if (kthread_should_stop())
1556 			break;
1557 
1558 		if (!skb_queue_empty(&vif->rx_queue))
1559 			xenvif_rx_action(vif);
1560 
1561 		if (skb_queue_empty(&vif->rx_queue) &&
1562 		    netif_queue_stopped(vif->dev))
1563 			xenvif_start_queue(vif);
1564 
1565 		cond_resched();
1566 	}
1567 
1568 	/* Bin any remaining skbs */
1569 	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
1570 		dev_kfree_skb(skb);
1571 
1572 	return 0;
1573 }
1574 
1575 static int __init netback_init(void)
1576 {
1577 	int rc = 0;
1578 
1579 	if (!xen_domain())
1580 		return -ENODEV;
1581 
1582 	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1583 		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1584 			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1585 		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1586 	}
1587 
1588 	rc = xenvif_xenbus_init();
1589 	if (rc)
1590 		goto failed_init;
1591 
1592 	return 0;
1593 
1594 failed_init:
1595 	return rc;
1596 }
1597 
1598 module_init(netback_init);
1599 
1600 static void __exit netback_fini(void)
1601 {
1602 	xenvif_xenbus_fini();
1603 }
1604 module_exit(netback_fini);
1605 
1606 MODULE_LICENSE("Dual BSD/GPL");
1607 MODULE_ALIAS("xen-backend:vif");
1608