1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  *  drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34 
35 #include "common.h"
36 
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 #include <linux/highmem.h>
41 
42 #include <net/tcp.h>
43 
44 #include <xen/xen.h>
45 #include <xen/events.h>
46 #include <xen/interface/memory.h>
47 #include <xen/page.h>
48 
49 #include <asm/xen/hypercall.h>
50 
51 /* Provide an option to disable split event channels at load time as
52  * event channels are limited resource. Split event channels are
53  * enabled by default.
54  */
55 bool separate_tx_rx_irq = true;
56 module_param(separate_tx_rx_irq, bool, 0644);
57 
58 /* The time that packets can stay on the guest Rx internal queue
59  * before they are dropped.
60  */
61 unsigned int rx_drain_timeout_msecs = 10000;
62 module_param(rx_drain_timeout_msecs, uint, 0444);
63 
64 /* The length of time before the frontend is considered unresponsive
65  * because it isn't providing Rx slots.
66  */
67 unsigned int rx_stall_timeout_msecs = 60000;
68 module_param(rx_stall_timeout_msecs, uint, 0444);
69 
70 unsigned int xenvif_max_queues;
71 module_param_named(max_queues, xenvif_max_queues, uint, 0644);
72 MODULE_PARM_DESC(max_queues,
73 		 "Maximum number of queues per virtual interface");
74 
75 /*
76  * This is the maximum slots a skb can have. If a guest sends a skb
77  * which exceeds this limit it is considered malicious.
78  */
79 #define FATAL_SKB_SLOTS_DEFAULT 20
80 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
81 module_param(fatal_skb_slots, uint, 0444);
82 
83 /* The amount to copy out of the first guest Tx slot into the skb's
84  * linear area.  If the first slot has more data, it will be mapped
85  * and put into the first frag.
86  *
87  * This is sized to avoid pulling headers from the frags for most
88  * TCP/IP packets.
89  */
90 #define XEN_NETBACK_TX_COPY_LEN 128
91 
92 /* This is the maximum number of flows in the hash cache. */
93 #define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
94 unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
95 module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
96 MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
97 
98 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
99 			       u8 status);
100 
101 static void make_tx_response(struct xenvif_queue *queue,
102 			     struct xen_netif_tx_request *txp,
103 			     unsigned int extra_count,
104 			     s8       st);
105 static void push_tx_responses(struct xenvif_queue *queue);
106 
107 static inline int tx_work_todo(struct xenvif_queue *queue);
108 
109 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
110 					     u16      id,
111 					     s8       st,
112 					     u16      offset,
113 					     u16      size,
114 					     u16      flags);
115 
116 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
117 				       u16 idx)
118 {
119 	return page_to_pfn(queue->mmap_pages[idx]);
120 }
121 
122 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
123 					 u16 idx)
124 {
125 	return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
126 }
127 
128 #define callback_param(vif, pending_idx) \
129 	(vif->pending_tx_info[pending_idx].callback_struct)
130 
131 /* Find the containing VIF's structure from a pointer in pending_tx_info array
132  */
133 static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
134 {
135 	u16 pending_idx = ubuf->desc;
136 	struct pending_tx_info *temp =
137 		container_of(ubuf, struct pending_tx_info, callback_struct);
138 	return container_of(temp - pending_idx,
139 			    struct xenvif_queue,
140 			    pending_tx_info[0]);
141 }
142 
143 static u16 frag_get_pending_idx(skb_frag_t *frag)
144 {
145 	return (u16)frag->page_offset;
146 }
147 
148 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
149 {
150 	frag->page_offset = pending_idx;
151 }
152 
153 static inline pending_ring_idx_t pending_index(unsigned i)
154 {
155 	return i & (MAX_PENDING_REQS-1);
156 }
157 
158 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
159 {
160 	RING_IDX prod, cons;
161 	struct sk_buff *skb;
162 	int needed;
163 
164 	skb = skb_peek(&queue->rx_queue);
165 	if (!skb)
166 		return false;
167 
168 	needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
169 	if (skb_is_gso(skb))
170 		needed++;
171 	if (skb->sw_hash)
172 		needed++;
173 
174 	do {
175 		prod = queue->rx.sring->req_prod;
176 		cons = queue->rx.req_cons;
177 
178 		if (prod - cons >= needed)
179 			return true;
180 
181 		queue->rx.sring->req_event = prod + 1;
182 
183 		/* Make sure event is visible before we check prod
184 		 * again.
185 		 */
186 		mb();
187 	} while (queue->rx.sring->req_prod != prod);
188 
189 	return false;
190 }
191 
192 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
193 {
194 	unsigned long flags;
195 
196 	spin_lock_irqsave(&queue->rx_queue.lock, flags);
197 
198 	__skb_queue_tail(&queue->rx_queue, skb);
199 
200 	queue->rx_queue_len += skb->len;
201 	if (queue->rx_queue_len > queue->rx_queue_max)
202 		netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
203 
204 	spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
205 }
206 
207 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
208 {
209 	struct sk_buff *skb;
210 
211 	spin_lock_irq(&queue->rx_queue.lock);
212 
213 	skb = __skb_dequeue(&queue->rx_queue);
214 	if (skb)
215 		queue->rx_queue_len -= skb->len;
216 
217 	spin_unlock_irq(&queue->rx_queue.lock);
218 
219 	return skb;
220 }
221 
222 static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
223 {
224 	spin_lock_irq(&queue->rx_queue.lock);
225 
226 	if (queue->rx_queue_len < queue->rx_queue_max)
227 		netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
228 
229 	spin_unlock_irq(&queue->rx_queue.lock);
230 }
231 
232 
233 static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
234 {
235 	struct sk_buff *skb;
236 	while ((skb = xenvif_rx_dequeue(queue)) != NULL)
237 		kfree_skb(skb);
238 }
239 
240 static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
241 {
242 	struct sk_buff *skb;
243 
244 	for(;;) {
245 		skb = skb_peek(&queue->rx_queue);
246 		if (!skb)
247 			break;
248 		if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
249 			break;
250 		xenvif_rx_dequeue(queue);
251 		kfree_skb(skb);
252 	}
253 }
254 
255 struct netrx_pending_operations {
256 	unsigned copy_prod, copy_cons;
257 	unsigned meta_prod, meta_cons;
258 	struct gnttab_copy *copy;
259 	struct xenvif_rx_meta *meta;
260 	int copy_off;
261 	grant_ref_t copy_gref;
262 };
263 
264 static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
265 						 struct netrx_pending_operations *npo)
266 {
267 	struct xenvif_rx_meta *meta;
268 	struct xen_netif_rx_request req;
269 
270 	RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
271 
272 	meta = npo->meta + npo->meta_prod++;
273 	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
274 	meta->gso_size = 0;
275 	meta->size = 0;
276 	meta->id = req.id;
277 
278 	npo->copy_off = 0;
279 	npo->copy_gref = req.gref;
280 
281 	return meta;
282 }
283 
284 struct gop_frag_copy {
285 	struct xenvif_queue *queue;
286 	struct netrx_pending_operations *npo;
287 	struct xenvif_rx_meta *meta;
288 	int head;
289 	int gso_type;
290 	int protocol;
291 	int hash_present;
292 
293 	struct page *page;
294 };
295 
296 static void xenvif_setup_copy_gop(unsigned long gfn,
297 				  unsigned int offset,
298 				  unsigned int *len,
299 				  struct gop_frag_copy *info)
300 {
301 	struct gnttab_copy *copy_gop;
302 	struct xen_page_foreign *foreign;
303 	/* Convenient aliases */
304 	struct xenvif_queue *queue = info->queue;
305 	struct netrx_pending_operations *npo = info->npo;
306 	struct page *page = info->page;
307 
308 	BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
309 
310 	if (npo->copy_off == MAX_BUFFER_OFFSET)
311 		info->meta = get_next_rx_buffer(queue, npo);
312 
313 	if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
314 		*len = MAX_BUFFER_OFFSET - npo->copy_off;
315 
316 	copy_gop = npo->copy + npo->copy_prod++;
317 	copy_gop->flags = GNTCOPY_dest_gref;
318 	copy_gop->len = *len;
319 
320 	foreign = xen_page_foreign(page);
321 	if (foreign) {
322 		copy_gop->source.domid = foreign->domid;
323 		copy_gop->source.u.ref = foreign->gref;
324 		copy_gop->flags |= GNTCOPY_source_gref;
325 	} else {
326 		copy_gop->source.domid = DOMID_SELF;
327 		copy_gop->source.u.gmfn = gfn;
328 	}
329 	copy_gop->source.offset = offset;
330 
331 	copy_gop->dest.domid = queue->vif->domid;
332 	copy_gop->dest.offset = npo->copy_off;
333 	copy_gop->dest.u.ref = npo->copy_gref;
334 
335 	npo->copy_off += *len;
336 	info->meta->size += *len;
337 
338 	if (!info->head)
339 		return;
340 
341 	/* Leave a gap for the GSO descriptor. */
342 	if ((1 << info->gso_type) & queue->vif->gso_mask)
343 		queue->rx.req_cons++;
344 
345 	/* Leave a gap for the hash extra segment. */
346 	if (info->hash_present)
347 		queue->rx.req_cons++;
348 
349 	info->head = 0; /* There must be something in this buffer now */
350 }
351 
352 static void xenvif_gop_frag_copy_grant(unsigned long gfn,
353 				       unsigned offset,
354 				       unsigned int len,
355 				       void *data)
356 {
357 	unsigned int bytes;
358 
359 	while (len) {
360 		bytes = len;
361 		xenvif_setup_copy_gop(gfn, offset, &bytes, data);
362 		offset += bytes;
363 		len -= bytes;
364 	}
365 }
366 
367 /*
368  * Set up the grant operations for this fragment. If it's a flipping
369  * interface, we also set up the unmap request from here.
370  */
371 static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
372 				 struct netrx_pending_operations *npo,
373 				 struct page *page, unsigned long size,
374 				 unsigned long offset, int *head)
375 {
376 	struct gop_frag_copy info = {
377 		.queue = queue,
378 		.npo = npo,
379 		.head = *head,
380 		.gso_type = XEN_NETIF_GSO_TYPE_NONE,
381 		/* xenvif_set_skb_hash() will have either set a s/w
382 		 * hash or cleared the hash depending on
383 		 * whether the the frontend wants a hash for this skb.
384 		 */
385 		.hash_present = skb->sw_hash,
386 	};
387 	unsigned long bytes;
388 
389 	if (skb_is_gso(skb)) {
390 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
391 			info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
392 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
393 			info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
394 	}
395 
396 	/* Data must not cross a page boundary. */
397 	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
398 
399 	info.meta = npo->meta + npo->meta_prod - 1;
400 
401 	/* Skip unused frames from start of page */
402 	page += offset >> PAGE_SHIFT;
403 	offset &= ~PAGE_MASK;
404 
405 	while (size > 0) {
406 		BUG_ON(offset >= PAGE_SIZE);
407 
408 		bytes = PAGE_SIZE - offset;
409 		if (bytes > size)
410 			bytes = size;
411 
412 		info.page = page;
413 		gnttab_foreach_grant_in_range(page, offset, bytes,
414 					      xenvif_gop_frag_copy_grant,
415 					      &info);
416 		size -= bytes;
417 		offset = 0;
418 
419 		/* Next page */
420 		if (size) {
421 			BUG_ON(!PageCompound(page));
422 			page++;
423 		}
424 	}
425 
426 	*head = info.head;
427 }
428 
429 /*
430  * Prepare an SKB to be transmitted to the frontend.
431  *
432  * This function is responsible for allocating grant operations, meta
433  * structures, etc.
434  *
435  * It returns the number of meta structures consumed. The number of
436  * ring slots used is always equal to the number of meta slots used
437  * plus the number of GSO descriptors used. Currently, we use either
438  * zero GSO descriptors (for non-GSO packets) or one descriptor (for
439  * frontend-side LRO).
440  */
441 static int xenvif_gop_skb(struct sk_buff *skb,
442 			  struct netrx_pending_operations *npo,
443 			  struct xenvif_queue *queue)
444 {
445 	struct xenvif *vif = netdev_priv(skb->dev);
446 	int nr_frags = skb_shinfo(skb)->nr_frags;
447 	int i;
448 	struct xen_netif_rx_request req;
449 	struct xenvif_rx_meta *meta;
450 	unsigned char *data;
451 	int head = 1;
452 	int old_meta_prod;
453 	int gso_type;
454 
455 	old_meta_prod = npo->meta_prod;
456 
457 	gso_type = XEN_NETIF_GSO_TYPE_NONE;
458 	if (skb_is_gso(skb)) {
459 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
460 			gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
461 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
462 			gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
463 	}
464 
465 	/* Set up a GSO prefix descriptor, if necessary */
466 	if ((1 << gso_type) & vif->gso_prefix_mask) {
467 		RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
468 		meta = npo->meta + npo->meta_prod++;
469 		meta->gso_type = gso_type;
470 		meta->gso_size = skb_shinfo(skb)->gso_size;
471 		meta->size = 0;
472 		meta->id = req.id;
473 	}
474 
475 	RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
476 	meta = npo->meta + npo->meta_prod++;
477 
478 	if ((1 << gso_type) & vif->gso_mask) {
479 		meta->gso_type = gso_type;
480 		meta->gso_size = skb_shinfo(skb)->gso_size;
481 	} else {
482 		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
483 		meta->gso_size = 0;
484 	}
485 
486 	meta->size = 0;
487 	meta->id = req.id;
488 	npo->copy_off = 0;
489 	npo->copy_gref = req.gref;
490 
491 	data = skb->data;
492 	while (data < skb_tail_pointer(skb)) {
493 		unsigned int offset = offset_in_page(data);
494 		unsigned int len = PAGE_SIZE - offset;
495 
496 		if (data + len > skb_tail_pointer(skb))
497 			len = skb_tail_pointer(skb) - data;
498 
499 		xenvif_gop_frag_copy(queue, skb, npo,
500 				     virt_to_page(data), len, offset, &head);
501 		data += len;
502 	}
503 
504 	for (i = 0; i < nr_frags; i++) {
505 		xenvif_gop_frag_copy(queue, skb, npo,
506 				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
507 				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
508 				     skb_shinfo(skb)->frags[i].page_offset,
509 				     &head);
510 	}
511 
512 	return npo->meta_prod - old_meta_prod;
513 }
514 
515 /*
516  * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
517  * used to set up the operations on the top of
518  * netrx_pending_operations, which have since been done.  Check that
519  * they didn't give any errors and advance over them.
520  */
521 static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
522 			    struct netrx_pending_operations *npo)
523 {
524 	struct gnttab_copy     *copy_op;
525 	int status = XEN_NETIF_RSP_OKAY;
526 	int i;
527 
528 	for (i = 0; i < nr_meta_slots; i++) {
529 		copy_op = npo->copy + npo->copy_cons++;
530 		if (copy_op->status != GNTST_okay) {
531 			netdev_dbg(vif->dev,
532 				   "Bad status %d from copy to DOM%d.\n",
533 				   copy_op->status, vif->domid);
534 			status = XEN_NETIF_RSP_ERROR;
535 		}
536 	}
537 
538 	return status;
539 }
540 
541 static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
542 				      struct xenvif_rx_meta *meta,
543 				      int nr_meta_slots)
544 {
545 	int i;
546 	unsigned long offset;
547 
548 	/* No fragments used */
549 	if (nr_meta_slots <= 1)
550 		return;
551 
552 	nr_meta_slots--;
553 
554 	for (i = 0; i < nr_meta_slots; i++) {
555 		int flags;
556 		if (i == nr_meta_slots - 1)
557 			flags = 0;
558 		else
559 			flags = XEN_NETRXF_more_data;
560 
561 		offset = 0;
562 		make_rx_response(queue, meta[i].id, status, offset,
563 				 meta[i].size, flags);
564 	}
565 }
566 
567 void xenvif_kick_thread(struct xenvif_queue *queue)
568 {
569 	wake_up(&queue->wq);
570 }
571 
572 static void xenvif_rx_action(struct xenvif_queue *queue)
573 {
574 	struct xenvif *vif = queue->vif;
575 	s8 status;
576 	u16 flags;
577 	struct xen_netif_rx_response *resp;
578 	struct sk_buff_head rxq;
579 	struct sk_buff *skb;
580 	LIST_HEAD(notify);
581 	int ret;
582 	unsigned long offset;
583 	bool need_to_notify = false;
584 
585 	struct netrx_pending_operations npo = {
586 		.copy  = queue->grant_copy_op,
587 		.meta  = queue->meta,
588 	};
589 
590 	skb_queue_head_init(&rxq);
591 
592 	while (xenvif_rx_ring_slots_available(queue)
593 	       && (skb = xenvif_rx_dequeue(queue)) != NULL) {
594 		queue->last_rx_time = jiffies;
595 
596 		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
597 
598 		__skb_queue_tail(&rxq, skb);
599 	}
600 
601 	BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
602 
603 	if (!npo.copy_prod)
604 		goto done;
605 
606 	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
607 	gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
608 
609 	while ((skb = __skb_dequeue(&rxq)) != NULL) {
610 		struct xen_netif_extra_info *extra = NULL;
611 
612 		if ((1 << queue->meta[npo.meta_cons].gso_type) &
613 		    vif->gso_prefix_mask) {
614 			resp = RING_GET_RESPONSE(&queue->rx,
615 						 queue->rx.rsp_prod_pvt++);
616 
617 			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
618 
619 			resp->offset = queue->meta[npo.meta_cons].gso_size;
620 			resp->id = queue->meta[npo.meta_cons].id;
621 			resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
622 
623 			npo.meta_cons++;
624 			XENVIF_RX_CB(skb)->meta_slots_used--;
625 		}
626 
627 
628 		queue->stats.tx_bytes += skb->len;
629 		queue->stats.tx_packets++;
630 
631 		status = xenvif_check_gop(vif,
632 					  XENVIF_RX_CB(skb)->meta_slots_used,
633 					  &npo);
634 
635 		if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
636 			flags = 0;
637 		else
638 			flags = XEN_NETRXF_more_data;
639 
640 		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
641 			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
642 		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
643 			/* remote but checksummed. */
644 			flags |= XEN_NETRXF_data_validated;
645 
646 		offset = 0;
647 		resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
648 					status, offset,
649 					queue->meta[npo.meta_cons].size,
650 					flags);
651 
652 		if ((1 << queue->meta[npo.meta_cons].gso_type) &
653 		    vif->gso_mask) {
654 			extra = (struct xen_netif_extra_info *)
655 				RING_GET_RESPONSE(&queue->rx,
656 						  queue->rx.rsp_prod_pvt++);
657 
658 			resp->flags |= XEN_NETRXF_extra_info;
659 
660 			extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
661 			extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
662 			extra->u.gso.pad = 0;
663 			extra->u.gso.features = 0;
664 
665 			extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
666 			extra->flags = 0;
667 		}
668 
669 		if (skb->sw_hash) {
670 			/* Since the skb got here via xenvif_select_queue()
671 			 * we know that the hash has been re-calculated
672 			 * according to a configuration set by the frontend
673 			 * and therefore we know that it is legitimate to
674 			 * pass it to the frontend.
675 			 */
676 			if (resp->flags & XEN_NETRXF_extra_info)
677 				extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
678 			else
679 				resp->flags |= XEN_NETRXF_extra_info;
680 
681 			extra = (struct xen_netif_extra_info *)
682 				RING_GET_RESPONSE(&queue->rx,
683 						  queue->rx.rsp_prod_pvt++);
684 
685 			extra->u.hash.algorithm =
686 				XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
687 
688 			if (skb->l4_hash)
689 				extra->u.hash.type =
690 					skb->protocol == htons(ETH_P_IP) ?
691 					_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
692 					_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
693 			else
694 				extra->u.hash.type =
695 					skb->protocol == htons(ETH_P_IP) ?
696 					_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
697 					_XEN_NETIF_CTRL_HASH_TYPE_IPV6;
698 
699 			*(uint32_t *)extra->u.hash.value =
700 				skb_get_hash_raw(skb);
701 
702 			extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
703 			extra->flags = 0;
704 		}
705 
706 		xenvif_add_frag_responses(queue, status,
707 					  queue->meta + npo.meta_cons + 1,
708 					  XENVIF_RX_CB(skb)->meta_slots_used);
709 
710 		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
711 
712 		need_to_notify |= !!ret;
713 
714 		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
715 		dev_kfree_skb(skb);
716 	}
717 
718 done:
719 	if (need_to_notify)
720 		notify_remote_via_irq(queue->rx_irq);
721 }
722 
723 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
724 {
725 	int more_to_do;
726 
727 	RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
728 
729 	if (more_to_do)
730 		napi_schedule(&queue->napi);
731 }
732 
733 static void tx_add_credit(struct xenvif_queue *queue)
734 {
735 	unsigned long max_burst, max_credit;
736 
737 	/*
738 	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
739 	 * Otherwise the interface can seize up due to insufficient credit.
740 	 */
741 	max_burst = max(131072UL, queue->credit_bytes);
742 
743 	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
744 	max_credit = queue->remaining_credit + queue->credit_bytes;
745 	if (max_credit < queue->remaining_credit)
746 		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
747 
748 	queue->remaining_credit = min(max_credit, max_burst);
749 }
750 
751 void xenvif_tx_credit_callback(unsigned long data)
752 {
753 	struct xenvif_queue *queue = (struct xenvif_queue *)data;
754 	tx_add_credit(queue);
755 	xenvif_napi_schedule_or_enable_events(queue);
756 }
757 
758 static void xenvif_tx_err(struct xenvif_queue *queue,
759 			  struct xen_netif_tx_request *txp,
760 			  unsigned int extra_count, RING_IDX end)
761 {
762 	RING_IDX cons = queue->tx.req_cons;
763 	unsigned long flags;
764 
765 	do {
766 		spin_lock_irqsave(&queue->response_lock, flags);
767 		make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
768 		push_tx_responses(queue);
769 		spin_unlock_irqrestore(&queue->response_lock, flags);
770 		if (cons == end)
771 			break;
772 		RING_COPY_REQUEST(&queue->tx, cons++, txp);
773 		extra_count = 0; /* only the first frag can have extras */
774 	} while (1);
775 	queue->tx.req_cons = cons;
776 }
777 
778 static void xenvif_fatal_tx_err(struct xenvif *vif)
779 {
780 	netdev_err(vif->dev, "fatal error; disabling device\n");
781 	vif->disabled = true;
782 	/* Disable the vif from queue 0's kthread */
783 	if (vif->queues)
784 		xenvif_kick_thread(&vif->queues[0]);
785 }
786 
787 static int xenvif_count_requests(struct xenvif_queue *queue,
788 				 struct xen_netif_tx_request *first,
789 				 unsigned int extra_count,
790 				 struct xen_netif_tx_request *txp,
791 				 int work_to_do)
792 {
793 	RING_IDX cons = queue->tx.req_cons;
794 	int slots = 0;
795 	int drop_err = 0;
796 	int more_data;
797 
798 	if (!(first->flags & XEN_NETTXF_more_data))
799 		return 0;
800 
801 	do {
802 		struct xen_netif_tx_request dropped_tx = { 0 };
803 
804 		if (slots >= work_to_do) {
805 			netdev_err(queue->vif->dev,
806 				   "Asked for %d slots but exceeds this limit\n",
807 				   work_to_do);
808 			xenvif_fatal_tx_err(queue->vif);
809 			return -ENODATA;
810 		}
811 
812 		/* This guest is really using too many slots and
813 		 * considered malicious.
814 		 */
815 		if (unlikely(slots >= fatal_skb_slots)) {
816 			netdev_err(queue->vif->dev,
817 				   "Malicious frontend using %d slots, threshold %u\n",
818 				   slots, fatal_skb_slots);
819 			xenvif_fatal_tx_err(queue->vif);
820 			return -E2BIG;
821 		}
822 
823 		/* Xen network protocol had implicit dependency on
824 		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
825 		 * the historical MAX_SKB_FRAGS value 18 to honor the
826 		 * same behavior as before. Any packet using more than
827 		 * 18 slots but less than fatal_skb_slots slots is
828 		 * dropped
829 		 */
830 		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
831 			if (net_ratelimit())
832 				netdev_dbg(queue->vif->dev,
833 					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
834 					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);
835 			drop_err = -E2BIG;
836 		}
837 
838 		if (drop_err)
839 			txp = &dropped_tx;
840 
841 		RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
842 
843 		/* If the guest submitted a frame >= 64 KiB then
844 		 * first->size overflowed and following slots will
845 		 * appear to be larger than the frame.
846 		 *
847 		 * This cannot be fatal error as there are buggy
848 		 * frontends that do this.
849 		 *
850 		 * Consume all slots and drop the packet.
851 		 */
852 		if (!drop_err && txp->size > first->size) {
853 			if (net_ratelimit())
854 				netdev_dbg(queue->vif->dev,
855 					   "Invalid tx request, slot size %u > remaining size %u\n",
856 					   txp->size, first->size);
857 			drop_err = -EIO;
858 		}
859 
860 		first->size -= txp->size;
861 		slots++;
862 
863 		if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
864 			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
865 				 txp->offset, txp->size);
866 			xenvif_fatal_tx_err(queue->vif);
867 			return -EINVAL;
868 		}
869 
870 		more_data = txp->flags & XEN_NETTXF_more_data;
871 
872 		if (!drop_err)
873 			txp++;
874 
875 	} while (more_data);
876 
877 	if (drop_err) {
878 		xenvif_tx_err(queue, first, extra_count, cons + slots);
879 		return drop_err;
880 	}
881 
882 	return slots;
883 }
884 
885 
886 struct xenvif_tx_cb {
887 	u16 pending_idx;
888 };
889 
890 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
891 
892 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
893 					   u16 pending_idx,
894 					   struct xen_netif_tx_request *txp,
895 					   unsigned int extra_count,
896 					   struct gnttab_map_grant_ref *mop)
897 {
898 	queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
899 	gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
900 			  GNTMAP_host_map | GNTMAP_readonly,
901 			  txp->gref, queue->vif->domid);
902 
903 	memcpy(&queue->pending_tx_info[pending_idx].req, txp,
904 	       sizeof(*txp));
905 	queue->pending_tx_info[pending_idx].extra_count = extra_count;
906 }
907 
908 static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
909 {
910 	struct sk_buff *skb =
911 		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
912 			  GFP_ATOMIC | __GFP_NOWARN);
913 	if (unlikely(skb == NULL))
914 		return NULL;
915 
916 	/* Packets passed to netif_rx() must have some headroom. */
917 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
918 
919 	/* Initialize it here to avoid later surprises */
920 	skb_shinfo(skb)->destructor_arg = NULL;
921 
922 	return skb;
923 }
924 
925 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
926 							struct sk_buff *skb,
927 							struct xen_netif_tx_request *txp,
928 							struct gnttab_map_grant_ref *gop,
929 							unsigned int frag_overflow,
930 							struct sk_buff *nskb)
931 {
932 	struct skb_shared_info *shinfo = skb_shinfo(skb);
933 	skb_frag_t *frags = shinfo->frags;
934 	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
935 	int start;
936 	pending_ring_idx_t index;
937 	unsigned int nr_slots;
938 
939 	nr_slots = shinfo->nr_frags;
940 
941 	/* Skip first skb fragment if it is on same page as header fragment. */
942 	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
943 
944 	for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
945 	     shinfo->nr_frags++, txp++, gop++) {
946 		index = pending_index(queue->pending_cons++);
947 		pending_idx = queue->pending_ring[index];
948 		xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
949 		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
950 	}
951 
952 	if (frag_overflow) {
953 
954 		shinfo = skb_shinfo(nskb);
955 		frags = shinfo->frags;
956 
957 		for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
958 		     shinfo->nr_frags++, txp++, gop++) {
959 			index = pending_index(queue->pending_cons++);
960 			pending_idx = queue->pending_ring[index];
961 			xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
962 						gop);
963 			frag_set_pending_idx(&frags[shinfo->nr_frags],
964 					     pending_idx);
965 		}
966 
967 		skb_shinfo(skb)->frag_list = nskb;
968 	}
969 
970 	return gop;
971 }
972 
973 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
974 					   u16 pending_idx,
975 					   grant_handle_t handle)
976 {
977 	if (unlikely(queue->grant_tx_handle[pending_idx] !=
978 		     NETBACK_INVALID_HANDLE)) {
979 		netdev_err(queue->vif->dev,
980 			   "Trying to overwrite active handle! pending_idx: 0x%x\n",
981 			   pending_idx);
982 		BUG();
983 	}
984 	queue->grant_tx_handle[pending_idx] = handle;
985 }
986 
987 static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
988 					     u16 pending_idx)
989 {
990 	if (unlikely(queue->grant_tx_handle[pending_idx] ==
991 		     NETBACK_INVALID_HANDLE)) {
992 		netdev_err(queue->vif->dev,
993 			   "Trying to unmap invalid handle! pending_idx: 0x%x\n",
994 			   pending_idx);
995 		BUG();
996 	}
997 	queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
998 }
999 
1000 static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1001 			       struct sk_buff *skb,
1002 			       struct gnttab_map_grant_ref **gopp_map,
1003 			       struct gnttab_copy **gopp_copy)
1004 {
1005 	struct gnttab_map_grant_ref *gop_map = *gopp_map;
1006 	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1007 	/* This always points to the shinfo of the skb being checked, which
1008 	 * could be either the first or the one on the frag_list
1009 	 */
1010 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1011 	/* If this is non-NULL, we are currently checking the frag_list skb, and
1012 	 * this points to the shinfo of the first one
1013 	 */
1014 	struct skb_shared_info *first_shinfo = NULL;
1015 	int nr_frags = shinfo->nr_frags;
1016 	const bool sharedslot = nr_frags &&
1017 				frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
1018 	int i, err;
1019 
1020 	/* Check status of header. */
1021 	err = (*gopp_copy)->status;
1022 	if (unlikely(err)) {
1023 		if (net_ratelimit())
1024 			netdev_dbg(queue->vif->dev,
1025 				   "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
1026 				   (*gopp_copy)->status,
1027 				   pending_idx,
1028 				   (*gopp_copy)->source.u.ref);
1029 		/* The first frag might still have this slot mapped */
1030 		if (!sharedslot)
1031 			xenvif_idx_release(queue, pending_idx,
1032 					   XEN_NETIF_RSP_ERROR);
1033 	}
1034 	(*gopp_copy)++;
1035 
1036 check_frags:
1037 	for (i = 0; i < nr_frags; i++, gop_map++) {
1038 		int j, newerr;
1039 
1040 		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1041 
1042 		/* Check error status: if okay then remember grant handle. */
1043 		newerr = gop_map->status;
1044 
1045 		if (likely(!newerr)) {
1046 			xenvif_grant_handle_set(queue,
1047 						pending_idx,
1048 						gop_map->handle);
1049 			/* Had a previous error? Invalidate this fragment. */
1050 			if (unlikely(err)) {
1051 				xenvif_idx_unmap(queue, pending_idx);
1052 				/* If the mapping of the first frag was OK, but
1053 				 * the header's copy failed, and they are
1054 				 * sharing a slot, send an error
1055 				 */
1056 				if (i == 0 && sharedslot)
1057 					xenvif_idx_release(queue, pending_idx,
1058 							   XEN_NETIF_RSP_ERROR);
1059 				else
1060 					xenvif_idx_release(queue, pending_idx,
1061 							   XEN_NETIF_RSP_OKAY);
1062 			}
1063 			continue;
1064 		}
1065 
1066 		/* Error on this fragment: respond to client with an error. */
1067 		if (net_ratelimit())
1068 			netdev_dbg(queue->vif->dev,
1069 				   "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1070 				   i,
1071 				   gop_map->status,
1072 				   pending_idx,
1073 				   gop_map->ref);
1074 
1075 		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1076 
1077 		/* Not the first error? Preceding frags already invalidated. */
1078 		if (err)
1079 			continue;
1080 
1081 		/* First error: if the header haven't shared a slot with the
1082 		 * first frag, release it as well.
1083 		 */
1084 		if (!sharedslot)
1085 			xenvif_idx_release(queue,
1086 					   XENVIF_TX_CB(skb)->pending_idx,
1087 					   XEN_NETIF_RSP_OKAY);
1088 
1089 		/* Invalidate preceding fragments of this skb. */
1090 		for (j = 0; j < i; j++) {
1091 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1092 			xenvif_idx_unmap(queue, pending_idx);
1093 			xenvif_idx_release(queue, pending_idx,
1094 					   XEN_NETIF_RSP_OKAY);
1095 		}
1096 
1097 		/* And if we found the error while checking the frag_list, unmap
1098 		 * the first skb's frags
1099 		 */
1100 		if (first_shinfo) {
1101 			for (j = 0; j < first_shinfo->nr_frags; j++) {
1102 				pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1103 				xenvif_idx_unmap(queue, pending_idx);
1104 				xenvif_idx_release(queue, pending_idx,
1105 						   XEN_NETIF_RSP_OKAY);
1106 			}
1107 		}
1108 
1109 		/* Remember the error: invalidate all subsequent fragments. */
1110 		err = newerr;
1111 	}
1112 
1113 	if (skb_has_frag_list(skb) && !first_shinfo) {
1114 		first_shinfo = skb_shinfo(skb);
1115 		shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1116 		nr_frags = shinfo->nr_frags;
1117 
1118 		goto check_frags;
1119 	}
1120 
1121 	*gopp_map = gop_map;
1122 	return err;
1123 }
1124 
1125 static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1126 {
1127 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1128 	int nr_frags = shinfo->nr_frags;
1129 	int i;
1130 	u16 prev_pending_idx = INVALID_PENDING_IDX;
1131 
1132 	for (i = 0; i < nr_frags; i++) {
1133 		skb_frag_t *frag = shinfo->frags + i;
1134 		struct xen_netif_tx_request *txp;
1135 		struct page *page;
1136 		u16 pending_idx;
1137 
1138 		pending_idx = frag_get_pending_idx(frag);
1139 
1140 		/* If this is not the first frag, chain it to the previous*/
1141 		if (prev_pending_idx == INVALID_PENDING_IDX)
1142 			skb_shinfo(skb)->destructor_arg =
1143 				&callback_param(queue, pending_idx);
1144 		else
1145 			callback_param(queue, prev_pending_idx).ctx =
1146 				&callback_param(queue, pending_idx);
1147 
1148 		callback_param(queue, pending_idx).ctx = NULL;
1149 		prev_pending_idx = pending_idx;
1150 
1151 		txp = &queue->pending_tx_info[pending_idx].req;
1152 		page = virt_to_page(idx_to_kaddr(queue, pending_idx));
1153 		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1154 		skb->len += txp->size;
1155 		skb->data_len += txp->size;
1156 		skb->truesize += txp->size;
1157 
1158 		/* Take an extra reference to offset network stack's put_page */
1159 		get_page(queue->mmap_pages[pending_idx]);
1160 	}
1161 }
1162 
1163 static int xenvif_get_extras(struct xenvif_queue *queue,
1164 			     struct xen_netif_extra_info *extras,
1165 			     unsigned int *extra_count,
1166 			     int work_to_do)
1167 {
1168 	struct xen_netif_extra_info extra;
1169 	RING_IDX cons = queue->tx.req_cons;
1170 
1171 	do {
1172 		if (unlikely(work_to_do-- <= 0)) {
1173 			netdev_err(queue->vif->dev, "Missing extra info\n");
1174 			xenvif_fatal_tx_err(queue->vif);
1175 			return -EBADR;
1176 		}
1177 
1178 		RING_COPY_REQUEST(&queue->tx, cons, &extra);
1179 
1180 		queue->tx.req_cons = ++cons;
1181 		(*extra_count)++;
1182 
1183 		if (unlikely(!extra.type ||
1184 			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1185 			netdev_err(queue->vif->dev,
1186 				   "Invalid extra type: %d\n", extra.type);
1187 			xenvif_fatal_tx_err(queue->vif);
1188 			return -EINVAL;
1189 		}
1190 
1191 		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1192 	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1193 
1194 	return work_to_do;
1195 }
1196 
1197 static int xenvif_set_skb_gso(struct xenvif *vif,
1198 			      struct sk_buff *skb,
1199 			      struct xen_netif_extra_info *gso)
1200 {
1201 	if (!gso->u.gso.size) {
1202 		netdev_err(vif->dev, "GSO size must not be zero.\n");
1203 		xenvif_fatal_tx_err(vif);
1204 		return -EINVAL;
1205 	}
1206 
1207 	switch (gso->u.gso.type) {
1208 	case XEN_NETIF_GSO_TYPE_TCPV4:
1209 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1210 		break;
1211 	case XEN_NETIF_GSO_TYPE_TCPV6:
1212 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1213 		break;
1214 	default:
1215 		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1216 		xenvif_fatal_tx_err(vif);
1217 		return -EINVAL;
1218 	}
1219 
1220 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1221 	/* gso_segs will be calculated later */
1222 
1223 	return 0;
1224 }
1225 
1226 static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
1227 {
1228 	bool recalculate_partial_csum = false;
1229 
1230 	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1231 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
1232 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1233 	 * recalculate the partial checksum.
1234 	 */
1235 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1236 		queue->stats.rx_gso_checksum_fixup++;
1237 		skb->ip_summed = CHECKSUM_PARTIAL;
1238 		recalculate_partial_csum = true;
1239 	}
1240 
1241 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1242 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1243 		return 0;
1244 
1245 	return skb_checksum_setup(skb, recalculate_partial_csum);
1246 }
1247 
1248 static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1249 {
1250 	u64 now = get_jiffies_64();
1251 	u64 next_credit = queue->credit_window_start +
1252 		msecs_to_jiffies(queue->credit_usec / 1000);
1253 
1254 	/* Timer could already be pending in rare cases. */
1255 	if (timer_pending(&queue->credit_timeout))
1256 		return true;
1257 
1258 	/* Passed the point where we can replenish credit? */
1259 	if (time_after_eq64(now, next_credit)) {
1260 		queue->credit_window_start = now;
1261 		tx_add_credit(queue);
1262 	}
1263 
1264 	/* Still too big to send right now? Set a callback. */
1265 	if (size > queue->remaining_credit) {
1266 		queue->credit_timeout.data     =
1267 			(unsigned long)queue;
1268 		mod_timer(&queue->credit_timeout,
1269 			  next_credit);
1270 		queue->credit_window_start = next_credit;
1271 
1272 		return true;
1273 	}
1274 
1275 	return false;
1276 }
1277 
1278 /* No locking is required in xenvif_mcast_add/del() as they are
1279  * only ever invoked from NAPI poll. An RCU list is used because
1280  * xenvif_mcast_match() is called asynchronously, during start_xmit.
1281  */
1282 
1283 static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
1284 {
1285 	struct xenvif_mcast_addr *mcast;
1286 
1287 	if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
1288 		if (net_ratelimit())
1289 			netdev_err(vif->dev,
1290 				   "Too many multicast addresses\n");
1291 		return -ENOSPC;
1292 	}
1293 
1294 	mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
1295 	if (!mcast)
1296 		return -ENOMEM;
1297 
1298 	ether_addr_copy(mcast->addr, addr);
1299 	list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
1300 	vif->fe_mcast_count++;
1301 
1302 	return 0;
1303 }
1304 
1305 static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
1306 {
1307 	struct xenvif_mcast_addr *mcast;
1308 
1309 	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1310 		if (ether_addr_equal(addr, mcast->addr)) {
1311 			--vif->fe_mcast_count;
1312 			list_del_rcu(&mcast->entry);
1313 			kfree_rcu(mcast, rcu);
1314 			break;
1315 		}
1316 	}
1317 }
1318 
1319 bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
1320 {
1321 	struct xenvif_mcast_addr *mcast;
1322 
1323 	rcu_read_lock();
1324 	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1325 		if (ether_addr_equal(addr, mcast->addr)) {
1326 			rcu_read_unlock();
1327 			return true;
1328 		}
1329 	}
1330 	rcu_read_unlock();
1331 
1332 	return false;
1333 }
1334 
1335 void xenvif_mcast_addr_list_free(struct xenvif *vif)
1336 {
1337 	/* No need for locking or RCU here. NAPI poll and TX queue
1338 	 * are stopped.
1339 	 */
1340 	while (!list_empty(&vif->fe_mcast_addr)) {
1341 		struct xenvif_mcast_addr *mcast;
1342 
1343 		mcast = list_first_entry(&vif->fe_mcast_addr,
1344 					 struct xenvif_mcast_addr,
1345 					 entry);
1346 		--vif->fe_mcast_count;
1347 		list_del(&mcast->entry);
1348 		kfree(mcast);
1349 	}
1350 }
1351 
1352 static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1353 				     int budget,
1354 				     unsigned *copy_ops,
1355 				     unsigned *map_ops)
1356 {
1357 	struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
1358 	struct sk_buff *skb, *nskb;
1359 	int ret;
1360 	unsigned int frag_overflow;
1361 
1362 	while (skb_queue_len(&queue->tx_queue) < budget) {
1363 		struct xen_netif_tx_request txreq;
1364 		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1365 		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1366 		unsigned int extra_count;
1367 		u16 pending_idx;
1368 		RING_IDX idx;
1369 		int work_to_do;
1370 		unsigned int data_len;
1371 		pending_ring_idx_t index;
1372 
1373 		if (queue->tx.sring->req_prod - queue->tx.req_cons >
1374 		    XEN_NETIF_TX_RING_SIZE) {
1375 			netdev_err(queue->vif->dev,
1376 				   "Impossible number of requests. "
1377 				   "req_prod %d, req_cons %d, size %ld\n",
1378 				   queue->tx.sring->req_prod, queue->tx.req_cons,
1379 				   XEN_NETIF_TX_RING_SIZE);
1380 			xenvif_fatal_tx_err(queue->vif);
1381 			break;
1382 		}
1383 
1384 		work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
1385 		if (!work_to_do)
1386 			break;
1387 
1388 		idx = queue->tx.req_cons;
1389 		rmb(); /* Ensure that we see the request before we copy it. */
1390 		RING_COPY_REQUEST(&queue->tx, idx, &txreq);
1391 
1392 		/* Credit-based scheduling. */
1393 		if (txreq.size > queue->remaining_credit &&
1394 		    tx_credit_exceeded(queue, txreq.size))
1395 			break;
1396 
1397 		queue->remaining_credit -= txreq.size;
1398 
1399 		work_to_do--;
1400 		queue->tx.req_cons = ++idx;
1401 
1402 		memset(extras, 0, sizeof(extras));
1403 		extra_count = 0;
1404 		if (txreq.flags & XEN_NETTXF_extra_info) {
1405 			work_to_do = xenvif_get_extras(queue, extras,
1406 						       &extra_count,
1407 						       work_to_do);
1408 			idx = queue->tx.req_cons;
1409 			if (unlikely(work_to_do < 0))
1410 				break;
1411 		}
1412 
1413 		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
1414 			struct xen_netif_extra_info *extra;
1415 
1416 			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
1417 			ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
1418 
1419 			make_tx_response(queue, &txreq, extra_count,
1420 					 (ret == 0) ?
1421 					 XEN_NETIF_RSP_OKAY :
1422 					 XEN_NETIF_RSP_ERROR);
1423 			push_tx_responses(queue);
1424 			continue;
1425 		}
1426 
1427 		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
1428 			struct xen_netif_extra_info *extra;
1429 
1430 			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
1431 			xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
1432 
1433 			make_tx_response(queue, &txreq, extra_count,
1434 					 XEN_NETIF_RSP_OKAY);
1435 			push_tx_responses(queue);
1436 			continue;
1437 		}
1438 
1439 		ret = xenvif_count_requests(queue, &txreq, extra_count,
1440 					    txfrags, work_to_do);
1441 		if (unlikely(ret < 0))
1442 			break;
1443 
1444 		idx += ret;
1445 
1446 		if (unlikely(txreq.size < ETH_HLEN)) {
1447 			netdev_dbg(queue->vif->dev,
1448 				   "Bad packet size: %d\n", txreq.size);
1449 			xenvif_tx_err(queue, &txreq, extra_count, idx);
1450 			break;
1451 		}
1452 
1453 		/* No crossing a page as the payload mustn't fragment. */
1454 		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
1455 			netdev_err(queue->vif->dev,
1456 				   "txreq.offset: %u, size: %u, end: %lu\n",
1457 				   txreq.offset, txreq.size,
1458 				   (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
1459 			xenvif_fatal_tx_err(queue->vif);
1460 			break;
1461 		}
1462 
1463 		index = pending_index(queue->pending_cons);
1464 		pending_idx = queue->pending_ring[index];
1465 
1466 		data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
1467 			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1468 			XEN_NETBACK_TX_COPY_LEN : txreq.size;
1469 
1470 		skb = xenvif_alloc_skb(data_len);
1471 		if (unlikely(skb == NULL)) {
1472 			netdev_dbg(queue->vif->dev,
1473 				   "Can't allocate a skb in start_xmit.\n");
1474 			xenvif_tx_err(queue, &txreq, extra_count, idx);
1475 			break;
1476 		}
1477 
1478 		skb_shinfo(skb)->nr_frags = ret;
1479 		if (data_len < txreq.size)
1480 			skb_shinfo(skb)->nr_frags++;
1481 		/* At this point shinfo->nr_frags is in fact the number of
1482 		 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1483 		 */
1484 		frag_overflow = 0;
1485 		nskb = NULL;
1486 		if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1487 			frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1488 			BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1489 			skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1490 			nskb = xenvif_alloc_skb(0);
1491 			if (unlikely(nskb == NULL)) {
1492 				kfree_skb(skb);
1493 				xenvif_tx_err(queue, &txreq, extra_count, idx);
1494 				if (net_ratelimit())
1495 					netdev_err(queue->vif->dev,
1496 						   "Can't allocate the frag_list skb.\n");
1497 				break;
1498 			}
1499 		}
1500 
1501 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1502 			struct xen_netif_extra_info *gso;
1503 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1504 
1505 			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1506 				/* Failure in xenvif_set_skb_gso is fatal. */
1507 				kfree_skb(skb);
1508 				kfree_skb(nskb);
1509 				break;
1510 			}
1511 		}
1512 
1513 		if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
1514 			struct xen_netif_extra_info *extra;
1515 			enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
1516 
1517 			extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
1518 
1519 			switch (extra->u.hash.type) {
1520 			case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
1521 			case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
1522 				type = PKT_HASH_TYPE_L3;
1523 				break;
1524 
1525 			case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
1526 			case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
1527 				type = PKT_HASH_TYPE_L4;
1528 				break;
1529 
1530 			default:
1531 				break;
1532 			}
1533 
1534 			if (type != PKT_HASH_TYPE_NONE)
1535 				skb_set_hash(skb,
1536 					     *(u32 *)extra->u.hash.value,
1537 					     type);
1538 		}
1539 
1540 		XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1541 
1542 		__skb_put(skb, data_len);
1543 		queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1544 		queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1545 		queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1546 
1547 		queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1548 			virt_to_gfn(skb->data);
1549 		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1550 		queue->tx_copy_ops[*copy_ops].dest.offset =
1551 			offset_in_page(skb->data) & ~XEN_PAGE_MASK;
1552 
1553 		queue->tx_copy_ops[*copy_ops].len = data_len;
1554 		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1555 
1556 		(*copy_ops)++;
1557 
1558 		if (data_len < txreq.size) {
1559 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1560 					     pending_idx);
1561 			xenvif_tx_create_map_op(queue, pending_idx, &txreq,
1562 						extra_count, gop);
1563 			gop++;
1564 		} else {
1565 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1566 					     INVALID_PENDING_IDX);
1567 			memcpy(&queue->pending_tx_info[pending_idx].req,
1568 			       &txreq, sizeof(txreq));
1569 			queue->pending_tx_info[pending_idx].extra_count =
1570 				extra_count;
1571 		}
1572 
1573 		queue->pending_cons++;
1574 
1575 		gop = xenvif_get_requests(queue, skb, txfrags, gop,
1576 				          frag_overflow, nskb);
1577 
1578 		__skb_queue_tail(&queue->tx_queue, skb);
1579 
1580 		queue->tx.req_cons = idx;
1581 
1582 		if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1583 		    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1584 			break;
1585 	}
1586 
1587 	(*map_ops) = gop - queue->tx_map_ops;
1588 	return;
1589 }
1590 
1591 /* Consolidate skb with a frag_list into a brand new one with local pages on
1592  * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1593  */
1594 static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1595 {
1596 	unsigned int offset = skb_headlen(skb);
1597 	skb_frag_t frags[MAX_SKB_FRAGS];
1598 	int i, f;
1599 	struct ubuf_info *uarg;
1600 	struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1601 
1602 	queue->stats.tx_zerocopy_sent += 2;
1603 	queue->stats.tx_frag_overflow++;
1604 
1605 	xenvif_fill_frags(queue, nskb);
1606 	/* Subtract frags size, we will correct it later */
1607 	skb->truesize -= skb->data_len;
1608 	skb->len += nskb->len;
1609 	skb->data_len += nskb->len;
1610 
1611 	/* create a brand new frags array and coalesce there */
1612 	for (i = 0; offset < skb->len; i++) {
1613 		struct page *page;
1614 		unsigned int len;
1615 
1616 		BUG_ON(i >= MAX_SKB_FRAGS);
1617 		page = alloc_page(GFP_ATOMIC);
1618 		if (!page) {
1619 			int j;
1620 			skb->truesize += skb->data_len;
1621 			for (j = 0; j < i; j++)
1622 				put_page(frags[j].page.p);
1623 			return -ENOMEM;
1624 		}
1625 
1626 		if (offset + PAGE_SIZE < skb->len)
1627 			len = PAGE_SIZE;
1628 		else
1629 			len = skb->len - offset;
1630 		if (skb_copy_bits(skb, offset, page_address(page), len))
1631 			BUG();
1632 
1633 		offset += len;
1634 		frags[i].page.p = page;
1635 		frags[i].page_offset = 0;
1636 		skb_frag_size_set(&frags[i], len);
1637 	}
1638 
1639 	/* Copied all the bits from the frag list -- free it. */
1640 	skb_frag_list_init(skb);
1641 	xenvif_skb_zerocopy_prepare(queue, nskb);
1642 	kfree_skb(nskb);
1643 
1644 	/* Release all the original (foreign) frags. */
1645 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1646 		skb_frag_unref(skb, f);
1647 	uarg = skb_shinfo(skb)->destructor_arg;
1648 	/* increase inflight counter to offset decrement in callback */
1649 	atomic_inc(&queue->inflight_packets);
1650 	uarg->callback(uarg, true);
1651 	skb_shinfo(skb)->destructor_arg = NULL;
1652 
1653 	/* Fill the skb with the new (local) frags. */
1654 	memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1655 	skb_shinfo(skb)->nr_frags = i;
1656 	skb->truesize += i * PAGE_SIZE;
1657 
1658 	return 0;
1659 }
1660 
1661 static int xenvif_tx_submit(struct xenvif_queue *queue)
1662 {
1663 	struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1664 	struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1665 	struct sk_buff *skb;
1666 	int work_done = 0;
1667 
1668 	while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1669 		struct xen_netif_tx_request *txp;
1670 		u16 pending_idx;
1671 		unsigned data_len;
1672 
1673 		pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1674 		txp = &queue->pending_tx_info[pending_idx].req;
1675 
1676 		/* Check the remap error code. */
1677 		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1678 			/* If there was an error, xenvif_tx_check_gop is
1679 			 * expected to release all the frags which were mapped,
1680 			 * so kfree_skb shouldn't do it again
1681 			 */
1682 			skb_shinfo(skb)->nr_frags = 0;
1683 			if (skb_has_frag_list(skb)) {
1684 				struct sk_buff *nskb =
1685 						skb_shinfo(skb)->frag_list;
1686 				skb_shinfo(nskb)->nr_frags = 0;
1687 			}
1688 			kfree_skb(skb);
1689 			continue;
1690 		}
1691 
1692 		data_len = skb->len;
1693 		callback_param(queue, pending_idx).ctx = NULL;
1694 		if (data_len < txp->size) {
1695 			/* Append the packet payload as a fragment. */
1696 			txp->offset += data_len;
1697 			txp->size -= data_len;
1698 		} else {
1699 			/* Schedule a response immediately. */
1700 			xenvif_idx_release(queue, pending_idx,
1701 					   XEN_NETIF_RSP_OKAY);
1702 		}
1703 
1704 		if (txp->flags & XEN_NETTXF_csum_blank)
1705 			skb->ip_summed = CHECKSUM_PARTIAL;
1706 		else if (txp->flags & XEN_NETTXF_data_validated)
1707 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1708 
1709 		xenvif_fill_frags(queue, skb);
1710 
1711 		if (unlikely(skb_has_frag_list(skb))) {
1712 			if (xenvif_handle_frag_list(queue, skb)) {
1713 				if (net_ratelimit())
1714 					netdev_err(queue->vif->dev,
1715 						   "Not enough memory to consolidate frag_list!\n");
1716 				xenvif_skb_zerocopy_prepare(queue, skb);
1717 				kfree_skb(skb);
1718 				continue;
1719 			}
1720 		}
1721 
1722 		skb->dev      = queue->vif->dev;
1723 		skb->protocol = eth_type_trans(skb, skb->dev);
1724 		skb_reset_network_header(skb);
1725 
1726 		if (checksum_setup(queue, skb)) {
1727 			netdev_dbg(queue->vif->dev,
1728 				   "Can't setup checksum in net_tx_action\n");
1729 			/* We have to set this flag to trigger the callback */
1730 			if (skb_shinfo(skb)->destructor_arg)
1731 				xenvif_skb_zerocopy_prepare(queue, skb);
1732 			kfree_skb(skb);
1733 			continue;
1734 		}
1735 
1736 		skb_probe_transport_header(skb, 0);
1737 
1738 		/* If the packet is GSO then we will have just set up the
1739 		 * transport header offset in checksum_setup so it's now
1740 		 * straightforward to calculate gso_segs.
1741 		 */
1742 		if (skb_is_gso(skb)) {
1743 			int mss = skb_shinfo(skb)->gso_size;
1744 			int hdrlen = skb_transport_header(skb) -
1745 				skb_mac_header(skb) +
1746 				tcp_hdrlen(skb);
1747 
1748 			skb_shinfo(skb)->gso_segs =
1749 				DIV_ROUND_UP(skb->len - hdrlen, mss);
1750 		}
1751 
1752 		queue->stats.rx_bytes += skb->len;
1753 		queue->stats.rx_packets++;
1754 
1755 		work_done++;
1756 
1757 		/* Set this flag right before netif_receive_skb, otherwise
1758 		 * someone might think this packet already left netback, and
1759 		 * do a skb_copy_ubufs while we are still in control of the
1760 		 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1761 		 */
1762 		if (skb_shinfo(skb)->destructor_arg) {
1763 			xenvif_skb_zerocopy_prepare(queue, skb);
1764 			queue->stats.tx_zerocopy_sent++;
1765 		}
1766 
1767 		netif_receive_skb(skb);
1768 	}
1769 
1770 	return work_done;
1771 }
1772 
1773 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1774 {
1775 	unsigned long flags;
1776 	pending_ring_idx_t index;
1777 	struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1778 
1779 	/* This is the only place where we grab this lock, to protect callbacks
1780 	 * from each other.
1781 	 */
1782 	spin_lock_irqsave(&queue->callback_lock, flags);
1783 	do {
1784 		u16 pending_idx = ubuf->desc;
1785 		ubuf = (struct ubuf_info *) ubuf->ctx;
1786 		BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1787 			MAX_PENDING_REQS);
1788 		index = pending_index(queue->dealloc_prod);
1789 		queue->dealloc_ring[index] = pending_idx;
1790 		/* Sync with xenvif_tx_dealloc_action:
1791 		 * insert idx then incr producer.
1792 		 */
1793 		smp_wmb();
1794 		queue->dealloc_prod++;
1795 	} while (ubuf);
1796 	spin_unlock_irqrestore(&queue->callback_lock, flags);
1797 
1798 	if (likely(zerocopy_success))
1799 		queue->stats.tx_zerocopy_success++;
1800 	else
1801 		queue->stats.tx_zerocopy_fail++;
1802 	xenvif_skb_zerocopy_complete(queue);
1803 }
1804 
1805 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1806 {
1807 	struct gnttab_unmap_grant_ref *gop;
1808 	pending_ring_idx_t dc, dp;
1809 	u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1810 	unsigned int i = 0;
1811 
1812 	dc = queue->dealloc_cons;
1813 	gop = queue->tx_unmap_ops;
1814 
1815 	/* Free up any grants we have finished using */
1816 	do {
1817 		dp = queue->dealloc_prod;
1818 
1819 		/* Ensure we see all indices enqueued by all
1820 		 * xenvif_zerocopy_callback().
1821 		 */
1822 		smp_rmb();
1823 
1824 		while (dc != dp) {
1825 			BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1826 			pending_idx =
1827 				queue->dealloc_ring[pending_index(dc++)];
1828 
1829 			pending_idx_release[gop - queue->tx_unmap_ops] =
1830 				pending_idx;
1831 			queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1832 				queue->mmap_pages[pending_idx];
1833 			gnttab_set_unmap_op(gop,
1834 					    idx_to_kaddr(queue, pending_idx),
1835 					    GNTMAP_host_map,
1836 					    queue->grant_tx_handle[pending_idx]);
1837 			xenvif_grant_handle_reset(queue, pending_idx);
1838 			++gop;
1839 		}
1840 
1841 	} while (dp != queue->dealloc_prod);
1842 
1843 	queue->dealloc_cons = dc;
1844 
1845 	if (gop - queue->tx_unmap_ops > 0) {
1846 		int ret;
1847 		ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1848 					NULL,
1849 					queue->pages_to_unmap,
1850 					gop - queue->tx_unmap_ops);
1851 		if (ret) {
1852 			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1853 				   gop - queue->tx_unmap_ops, ret);
1854 			for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1855 				if (gop[i].status != GNTST_okay)
1856 					netdev_err(queue->vif->dev,
1857 						   " host_addr: 0x%llx handle: 0x%x status: %d\n",
1858 						   gop[i].host_addr,
1859 						   gop[i].handle,
1860 						   gop[i].status);
1861 			}
1862 			BUG();
1863 		}
1864 	}
1865 
1866 	for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1867 		xenvif_idx_release(queue, pending_idx_release[i],
1868 				   XEN_NETIF_RSP_OKAY);
1869 }
1870 
1871 
1872 /* Called after netfront has transmitted */
1873 int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1874 {
1875 	unsigned nr_mops, nr_cops = 0;
1876 	int work_done, ret;
1877 
1878 	if (unlikely(!tx_work_todo(queue)))
1879 		return 0;
1880 
1881 	xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1882 
1883 	if (nr_cops == 0)
1884 		return 0;
1885 
1886 	gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1887 	if (nr_mops != 0) {
1888 		ret = gnttab_map_refs(queue->tx_map_ops,
1889 				      NULL,
1890 				      queue->pages_to_map,
1891 				      nr_mops);
1892 		BUG_ON(ret);
1893 	}
1894 
1895 	work_done = xenvif_tx_submit(queue);
1896 
1897 	return work_done;
1898 }
1899 
1900 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1901 			       u8 status)
1902 {
1903 	struct pending_tx_info *pending_tx_info;
1904 	pending_ring_idx_t index;
1905 	unsigned long flags;
1906 
1907 	pending_tx_info = &queue->pending_tx_info[pending_idx];
1908 
1909 	spin_lock_irqsave(&queue->response_lock, flags);
1910 
1911 	make_tx_response(queue, &pending_tx_info->req,
1912 			 pending_tx_info->extra_count, status);
1913 
1914 	/* Release the pending index before pusing the Tx response so
1915 	 * its available before a new Tx request is pushed by the
1916 	 * frontend.
1917 	 */
1918 	index = pending_index(queue->pending_prod++);
1919 	queue->pending_ring[index] = pending_idx;
1920 
1921 	push_tx_responses(queue);
1922 
1923 	spin_unlock_irqrestore(&queue->response_lock, flags);
1924 }
1925 
1926 
1927 static void make_tx_response(struct xenvif_queue *queue,
1928 			     struct xen_netif_tx_request *txp,
1929 			     unsigned int extra_count,
1930 			     s8       st)
1931 {
1932 	RING_IDX i = queue->tx.rsp_prod_pvt;
1933 	struct xen_netif_tx_response *resp;
1934 
1935 	resp = RING_GET_RESPONSE(&queue->tx, i);
1936 	resp->id     = txp->id;
1937 	resp->status = st;
1938 
1939 	while (extra_count-- != 0)
1940 		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1941 
1942 	queue->tx.rsp_prod_pvt = ++i;
1943 }
1944 
1945 static void push_tx_responses(struct xenvif_queue *queue)
1946 {
1947 	int notify;
1948 
1949 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1950 	if (notify)
1951 		notify_remote_via_irq(queue->tx_irq);
1952 }
1953 
1954 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1955 					     u16      id,
1956 					     s8       st,
1957 					     u16      offset,
1958 					     u16      size,
1959 					     u16      flags)
1960 {
1961 	RING_IDX i = queue->rx.rsp_prod_pvt;
1962 	struct xen_netif_rx_response *resp;
1963 
1964 	resp = RING_GET_RESPONSE(&queue->rx, i);
1965 	resp->offset     = offset;
1966 	resp->flags      = flags;
1967 	resp->id         = id;
1968 	resp->status     = (s16)size;
1969 	if (st < 0)
1970 		resp->status = (s16)st;
1971 
1972 	queue->rx.rsp_prod_pvt = ++i;
1973 
1974 	return resp;
1975 }
1976 
1977 void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1978 {
1979 	int ret;
1980 	struct gnttab_unmap_grant_ref tx_unmap_op;
1981 
1982 	gnttab_set_unmap_op(&tx_unmap_op,
1983 			    idx_to_kaddr(queue, pending_idx),
1984 			    GNTMAP_host_map,
1985 			    queue->grant_tx_handle[pending_idx]);
1986 	xenvif_grant_handle_reset(queue, pending_idx);
1987 
1988 	ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1989 				&queue->mmap_pages[pending_idx], 1);
1990 	if (ret) {
1991 		netdev_err(queue->vif->dev,
1992 			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1993 			   ret,
1994 			   pending_idx,
1995 			   tx_unmap_op.host_addr,
1996 			   tx_unmap_op.handle,
1997 			   tx_unmap_op.status);
1998 		BUG();
1999 	}
2000 }
2001 
2002 static inline int tx_work_todo(struct xenvif_queue *queue)
2003 {
2004 	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
2005 		return 1;
2006 
2007 	return 0;
2008 }
2009 
2010 static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
2011 {
2012 	return queue->dealloc_cons != queue->dealloc_prod;
2013 }
2014 
2015 void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
2016 {
2017 	if (queue->tx.sring)
2018 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
2019 					queue->tx.sring);
2020 	if (queue->rx.sring)
2021 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
2022 					queue->rx.sring);
2023 }
2024 
2025 int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
2026 				   grant_ref_t tx_ring_ref,
2027 				   grant_ref_t rx_ring_ref)
2028 {
2029 	void *addr;
2030 	struct xen_netif_tx_sring *txs;
2031 	struct xen_netif_rx_sring *rxs;
2032 
2033 	int err = -ENOMEM;
2034 
2035 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
2036 				     &tx_ring_ref, 1, &addr);
2037 	if (err)
2038 		goto err;
2039 
2040 	txs = (struct xen_netif_tx_sring *)addr;
2041 	BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
2042 
2043 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
2044 				     &rx_ring_ref, 1, &addr);
2045 	if (err)
2046 		goto err;
2047 
2048 	rxs = (struct xen_netif_rx_sring *)addr;
2049 	BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
2050 
2051 	return 0;
2052 
2053 err:
2054 	xenvif_unmap_frontend_data_rings(queue);
2055 	return err;
2056 }
2057 
2058 static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
2059 {
2060 	struct xenvif *vif = queue->vif;
2061 
2062 	queue->stalled = true;
2063 
2064 	/* At least one queue has stalled? Disable the carrier. */
2065 	spin_lock(&vif->lock);
2066 	if (vif->stalled_queues++ == 0) {
2067 		netdev_info(vif->dev, "Guest Rx stalled");
2068 		netif_carrier_off(vif->dev);
2069 	}
2070 	spin_unlock(&vif->lock);
2071 }
2072 
2073 static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
2074 {
2075 	struct xenvif *vif = queue->vif;
2076 
2077 	queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
2078 	queue->stalled = false;
2079 
2080 	/* All queues are ready? Enable the carrier. */
2081 	spin_lock(&vif->lock);
2082 	if (--vif->stalled_queues == 0) {
2083 		netdev_info(vif->dev, "Guest Rx ready");
2084 		netif_carrier_on(vif->dev);
2085 	}
2086 	spin_unlock(&vif->lock);
2087 }
2088 
2089 static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
2090 {
2091 	RING_IDX prod, cons;
2092 
2093 	prod = queue->rx.sring->req_prod;
2094 	cons = queue->rx.req_cons;
2095 
2096 	return !queue->stalled && prod - cons < 1
2097 		&& time_after(jiffies,
2098 			      queue->last_rx_time + queue->vif->stall_timeout);
2099 }
2100 
2101 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
2102 {
2103 	RING_IDX prod, cons;
2104 
2105 	prod = queue->rx.sring->req_prod;
2106 	cons = queue->rx.req_cons;
2107 
2108 	return queue->stalled && prod - cons >= 1;
2109 }
2110 
2111 static bool xenvif_have_rx_work(struct xenvif_queue *queue)
2112 {
2113 	return xenvif_rx_ring_slots_available(queue)
2114 		|| (queue->vif->stall_timeout &&
2115 		    (xenvif_rx_queue_stalled(queue)
2116 		     || xenvif_rx_queue_ready(queue)))
2117 		|| kthread_should_stop()
2118 		|| queue->vif->disabled;
2119 }
2120 
2121 static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
2122 {
2123 	struct sk_buff *skb;
2124 	long timeout;
2125 
2126 	skb = skb_peek(&queue->rx_queue);
2127 	if (!skb)
2128 		return MAX_SCHEDULE_TIMEOUT;
2129 
2130 	timeout = XENVIF_RX_CB(skb)->expires - jiffies;
2131 	return timeout < 0 ? 0 : timeout;
2132 }
2133 
2134 /* Wait until the guest Rx thread has work.
2135  *
2136  * The timeout needs to be adjusted based on the current head of the
2137  * queue (and not just the head at the beginning).  In particular, if
2138  * the queue is initially empty an infinite timeout is used and this
2139  * needs to be reduced when a skb is queued.
2140  *
2141  * This cannot be done with wait_event_timeout() because it only
2142  * calculates the timeout once.
2143  */
2144 static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
2145 {
2146 	DEFINE_WAIT(wait);
2147 
2148 	if (xenvif_have_rx_work(queue))
2149 		return;
2150 
2151 	for (;;) {
2152 		long ret;
2153 
2154 		prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
2155 		if (xenvif_have_rx_work(queue))
2156 			break;
2157 		ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
2158 		if (!ret)
2159 			break;
2160 	}
2161 	finish_wait(&queue->wq, &wait);
2162 }
2163 
2164 int xenvif_kthread_guest_rx(void *data)
2165 {
2166 	struct xenvif_queue *queue = data;
2167 	struct xenvif *vif = queue->vif;
2168 
2169 	if (!vif->stall_timeout)
2170 		xenvif_queue_carrier_on(queue);
2171 
2172 	for (;;) {
2173 		xenvif_wait_for_rx_work(queue);
2174 
2175 		if (kthread_should_stop())
2176 			break;
2177 
2178 		/* This frontend is found to be rogue, disable it in
2179 		 * kthread context. Currently this is only set when
2180 		 * netback finds out frontend sends malformed packet,
2181 		 * but we cannot disable the interface in softirq
2182 		 * context so we defer it here, if this thread is
2183 		 * associated with queue 0.
2184 		 */
2185 		if (unlikely(vif->disabled && queue->id == 0)) {
2186 			xenvif_carrier_off(vif);
2187 			break;
2188 		}
2189 
2190 		if (!skb_queue_empty(&queue->rx_queue))
2191 			xenvif_rx_action(queue);
2192 
2193 		/* If the guest hasn't provided any Rx slots for a
2194 		 * while it's probably not responsive, drop the
2195 		 * carrier so packets are dropped earlier.
2196 		 */
2197 		if (vif->stall_timeout) {
2198 			if (xenvif_rx_queue_stalled(queue))
2199 				xenvif_queue_carrier_off(queue);
2200 			else if (xenvif_rx_queue_ready(queue))
2201 				xenvif_queue_carrier_on(queue);
2202 		}
2203 
2204 		/* Queued packets may have foreign pages from other
2205 		 * domains.  These cannot be queued indefinitely as
2206 		 * this would starve guests of grant refs and transmit
2207 		 * slots.
2208 		 */
2209 		xenvif_rx_queue_drop_expired(queue);
2210 
2211 		xenvif_rx_queue_maybe_wake(queue);
2212 
2213 		cond_resched();
2214 	}
2215 
2216 	/* Bin any remaining skbs */
2217 	xenvif_rx_queue_purge(queue);
2218 
2219 	return 0;
2220 }
2221 
2222 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
2223 {
2224 	/* Dealloc thread must remain running until all inflight
2225 	 * packets complete.
2226 	 */
2227 	return kthread_should_stop() &&
2228 		!atomic_read(&queue->inflight_packets);
2229 }
2230 
2231 int xenvif_dealloc_kthread(void *data)
2232 {
2233 	struct xenvif_queue *queue = data;
2234 
2235 	for (;;) {
2236 		wait_event_interruptible(queue->dealloc_wq,
2237 					 tx_dealloc_work_todo(queue) ||
2238 					 xenvif_dealloc_kthread_should_stop(queue));
2239 		if (xenvif_dealloc_kthread_should_stop(queue))
2240 			break;
2241 
2242 		xenvif_tx_dealloc_action(queue);
2243 		cond_resched();
2244 	}
2245 
2246 	/* Unmap anything remaining*/
2247 	if (tx_dealloc_work_todo(queue))
2248 		xenvif_tx_dealloc_action(queue);
2249 
2250 	return 0;
2251 }
2252 
2253 static void make_ctrl_response(struct xenvif *vif,
2254 			       const struct xen_netif_ctrl_request *req,
2255 			       u32 status, u32 data)
2256 {
2257 	RING_IDX idx = vif->ctrl.rsp_prod_pvt;
2258 	struct xen_netif_ctrl_response rsp = {
2259 		.id = req->id,
2260 		.type = req->type,
2261 		.status = status,
2262 		.data = data,
2263 	};
2264 
2265 	*RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
2266 	vif->ctrl.rsp_prod_pvt = ++idx;
2267 }
2268 
2269 static void push_ctrl_response(struct xenvif *vif)
2270 {
2271 	int notify;
2272 
2273 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
2274 	if (notify)
2275 		notify_remote_via_irq(vif->ctrl_irq);
2276 }
2277 
2278 static void process_ctrl_request(struct xenvif *vif,
2279 				 const struct xen_netif_ctrl_request *req)
2280 {
2281 	u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
2282 	u32 data = 0;
2283 
2284 	switch (req->type) {
2285 	case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
2286 		status = xenvif_set_hash_alg(vif, req->data[0]);
2287 		break;
2288 
2289 	case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
2290 		status = xenvif_get_hash_flags(vif, &data);
2291 		break;
2292 
2293 	case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
2294 		status = xenvif_set_hash_flags(vif, req->data[0]);
2295 		break;
2296 
2297 	case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
2298 		status = xenvif_set_hash_key(vif, req->data[0],
2299 					     req->data[1]);
2300 		break;
2301 
2302 	case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
2303 		status = XEN_NETIF_CTRL_STATUS_SUCCESS;
2304 		data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
2305 		break;
2306 
2307 	case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
2308 		status = xenvif_set_hash_mapping_size(vif,
2309 						      req->data[0]);
2310 		break;
2311 
2312 	case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
2313 		status = xenvif_set_hash_mapping(vif, req->data[0],
2314 						 req->data[1],
2315 						 req->data[2]);
2316 		break;
2317 
2318 	default:
2319 		break;
2320 	}
2321 
2322 	make_ctrl_response(vif, req, status, data);
2323 	push_ctrl_response(vif);
2324 }
2325 
2326 static void xenvif_ctrl_action(struct xenvif *vif)
2327 {
2328 	for (;;) {
2329 		RING_IDX req_prod, req_cons;
2330 
2331 		req_prod = vif->ctrl.sring->req_prod;
2332 		req_cons = vif->ctrl.req_cons;
2333 
2334 		/* Make sure we can see requests before we process them. */
2335 		rmb();
2336 
2337 		if (req_cons == req_prod)
2338 			break;
2339 
2340 		while (req_cons != req_prod) {
2341 			struct xen_netif_ctrl_request req;
2342 
2343 			RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
2344 			req_cons++;
2345 
2346 			process_ctrl_request(vif, &req);
2347 		}
2348 
2349 		vif->ctrl.req_cons = req_cons;
2350 		vif->ctrl.sring->req_event = req_cons + 1;
2351 	}
2352 }
2353 
2354 static bool xenvif_ctrl_work_todo(struct xenvif *vif)
2355 {
2356 	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
2357 		return 1;
2358 
2359 	return 0;
2360 }
2361 
2362 int xenvif_ctrl_kthread(void *data)
2363 {
2364 	struct xenvif *vif = data;
2365 
2366 	for (;;) {
2367 		wait_event_interruptible(vif->ctrl_wq,
2368 					 xenvif_ctrl_work_todo(vif) ||
2369 					 kthread_should_stop());
2370 		if (kthread_should_stop())
2371 			break;
2372 
2373 		while (xenvif_ctrl_work_todo(vif))
2374 			xenvif_ctrl_action(vif);
2375 
2376 		cond_resched();
2377 	}
2378 
2379 	return 0;
2380 }
2381 
2382 static int __init netback_init(void)
2383 {
2384 	int rc = 0;
2385 
2386 	if (!xen_domain())
2387 		return -ENODEV;
2388 
2389 	/* Allow as many queues as there are CPUs if user has not
2390 	 * specified a value.
2391 	 */
2392 	if (xenvif_max_queues == 0)
2393 		xenvif_max_queues = num_online_cpus();
2394 
2395 	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
2396 		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2397 			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2398 		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
2399 	}
2400 
2401 	rc = xenvif_xenbus_init();
2402 	if (rc)
2403 		goto failed_init;
2404 
2405 #ifdef CONFIG_DEBUG_FS
2406 	xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
2407 	if (IS_ERR_OR_NULL(xen_netback_dbg_root))
2408 		pr_warn("Init of debugfs returned %ld!\n",
2409 			PTR_ERR(xen_netback_dbg_root));
2410 #endif /* CONFIG_DEBUG_FS */
2411 
2412 	return 0;
2413 
2414 failed_init:
2415 	return rc;
2416 }
2417 
2418 module_init(netback_init);
2419 
2420 static void __exit netback_fini(void)
2421 {
2422 #ifdef CONFIG_DEBUG_FS
2423 	if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
2424 		debugfs_remove_recursive(xen_netback_dbg_root);
2425 #endif /* CONFIG_DEBUG_FS */
2426 	xenvif_xenbus_fini();
2427 }
2428 module_exit(netback_fini);
2429 
2430 MODULE_LICENSE("Dual BSD/GPL");
2431 MODULE_ALIAS("xen-backend:vif");
2432