xref: /openbmc/linux/drivers/vhost/net.c (revision dd2934a95701576203b2f61e8ded4e4a2f9183ea)
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Author: Michael S. Tsirkin <mst@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * virtio-net server in host kernel.
7  */
8 
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/virtio_net.h>
13 #include <linux/miscdevice.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/workqueue.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/sched/clock.h>
21 #include <linux/sched/signal.h>
22 #include <linux/vmalloc.h>
23 
24 #include <linux/net.h>
25 #include <linux/if_packet.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_tun.h>
28 #include <linux/if_macvlan.h>
29 #include <linux/if_tap.h>
30 #include <linux/if_vlan.h>
31 #include <linux/skb_array.h>
32 #include <linux/skbuff.h>
33 
34 #include <net/sock.h>
35 #include <net/xdp.h>
36 
37 #include "vhost.h"
38 
39 static int experimental_zcopytx = 1;
40 module_param(experimental_zcopytx, int, 0444);
41 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
42 		                       " 1 -Enable; 0 - Disable");
43 
44 /* Max number of bytes transferred before requeueing the job.
45  * Using this limit prevents one virtqueue from starving others. */
46 #define VHOST_NET_WEIGHT 0x80000
47 
48 /* Max number of packets transferred before requeueing the job.
49  * Using this limit prevents one virtqueue from starving others with small
50  * pkts.
51  */
52 #define VHOST_NET_PKT_WEIGHT 256
53 
54 /* MAX number of TX used buffers for outstanding zerocopy */
55 #define VHOST_MAX_PEND 128
56 #define VHOST_GOODCOPY_LEN 256
57 
58 /*
59  * For transmit, used buffer len is unused; we override it to track buffer
60  * status internally; used for zerocopy tx only.
61  */
62 /* Lower device DMA failed */
63 #define VHOST_DMA_FAILED_LEN	((__force __virtio32)3)
64 /* Lower device DMA done */
65 #define VHOST_DMA_DONE_LEN	((__force __virtio32)2)
66 /* Lower device DMA in progress */
67 #define VHOST_DMA_IN_PROGRESS	((__force __virtio32)1)
68 /* Buffer unused */
69 #define VHOST_DMA_CLEAR_LEN	((__force __virtio32)0)
70 
71 #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
72 
73 enum {
74 	VHOST_NET_FEATURES = VHOST_FEATURES |
75 			 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
76 			 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
77 			 (1ULL << VIRTIO_F_IOMMU_PLATFORM)
78 };
79 
80 enum {
81 	VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
82 };
83 
84 enum {
85 	VHOST_NET_VQ_RX = 0,
86 	VHOST_NET_VQ_TX = 1,
87 	VHOST_NET_VQ_MAX = 2,
88 };
89 
90 struct vhost_net_ubuf_ref {
91 	/* refcount follows semantics similar to kref:
92 	 *  0: object is released
93 	 *  1: no outstanding ubufs
94 	 * >1: outstanding ubufs
95 	 */
96 	atomic_t refcount;
97 	wait_queue_head_t wait;
98 	struct vhost_virtqueue *vq;
99 };
100 
101 #define VHOST_NET_BATCH 64
102 struct vhost_net_buf {
103 	void **queue;
104 	int tail;
105 	int head;
106 };
107 
108 struct vhost_net_virtqueue {
109 	struct vhost_virtqueue vq;
110 	size_t vhost_hlen;
111 	size_t sock_hlen;
112 	/* vhost zerocopy support fields below: */
113 	/* last used idx for outstanding DMA zerocopy buffers */
114 	int upend_idx;
115 	/* For TX, first used idx for DMA done zerocopy buffers
116 	 * For RX, number of batched heads
117 	 */
118 	int done_idx;
119 	/* Number of XDP frames batched */
120 	int batched_xdp;
121 	/* an array of userspace buffers info */
122 	struct ubuf_info *ubuf_info;
123 	/* Reference counting for outstanding ubufs.
124 	 * Protected by vq mutex. Writers must also take device mutex. */
125 	struct vhost_net_ubuf_ref *ubufs;
126 	struct ptr_ring *rx_ring;
127 	struct vhost_net_buf rxq;
128 	/* Batched XDP buffs */
129 	struct xdp_buff *xdp;
130 };
131 
132 struct vhost_net {
133 	struct vhost_dev dev;
134 	struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
135 	struct vhost_poll poll[VHOST_NET_VQ_MAX];
136 	/* Number of TX recently submitted.
137 	 * Protected by tx vq lock. */
138 	unsigned tx_packets;
139 	/* Number of times zerocopy TX recently failed.
140 	 * Protected by tx vq lock. */
141 	unsigned tx_zcopy_err;
142 	/* Flush in progress. Protected by tx vq lock. */
143 	bool tx_flush;
144 };
145 
146 static unsigned vhost_net_zcopy_mask __read_mostly;
147 
148 static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
149 {
150 	if (rxq->tail != rxq->head)
151 		return rxq->queue[rxq->head];
152 	else
153 		return NULL;
154 }
155 
156 static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
157 {
158 	return rxq->tail - rxq->head;
159 }
160 
161 static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
162 {
163 	return rxq->tail == rxq->head;
164 }
165 
166 static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
167 {
168 	void *ret = vhost_net_buf_get_ptr(rxq);
169 	++rxq->head;
170 	return ret;
171 }
172 
173 static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
174 {
175 	struct vhost_net_buf *rxq = &nvq->rxq;
176 
177 	rxq->head = 0;
178 	rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
179 					      VHOST_NET_BATCH);
180 	return rxq->tail;
181 }
182 
183 static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
184 {
185 	struct vhost_net_buf *rxq = &nvq->rxq;
186 
187 	if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
188 		ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
189 				   vhost_net_buf_get_size(rxq),
190 				   tun_ptr_free);
191 		rxq->head = rxq->tail = 0;
192 	}
193 }
194 
195 static int vhost_net_buf_peek_len(void *ptr)
196 {
197 	if (tun_is_xdp_frame(ptr)) {
198 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
199 
200 		return xdpf->len;
201 	}
202 
203 	return __skb_array_len_with_tag(ptr);
204 }
205 
206 static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
207 {
208 	struct vhost_net_buf *rxq = &nvq->rxq;
209 
210 	if (!vhost_net_buf_is_empty(rxq))
211 		goto out;
212 
213 	if (!vhost_net_buf_produce(nvq))
214 		return 0;
215 
216 out:
217 	return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
218 }
219 
220 static void vhost_net_buf_init(struct vhost_net_buf *rxq)
221 {
222 	rxq->head = rxq->tail = 0;
223 }
224 
225 static void vhost_net_enable_zcopy(int vq)
226 {
227 	vhost_net_zcopy_mask |= 0x1 << vq;
228 }
229 
230 static struct vhost_net_ubuf_ref *
231 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
232 {
233 	struct vhost_net_ubuf_ref *ubufs;
234 	/* No zero copy backend? Nothing to count. */
235 	if (!zcopy)
236 		return NULL;
237 	ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
238 	if (!ubufs)
239 		return ERR_PTR(-ENOMEM);
240 	atomic_set(&ubufs->refcount, 1);
241 	init_waitqueue_head(&ubufs->wait);
242 	ubufs->vq = vq;
243 	return ubufs;
244 }
245 
246 static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
247 {
248 	int r = atomic_sub_return(1, &ubufs->refcount);
249 	if (unlikely(!r))
250 		wake_up(&ubufs->wait);
251 	return r;
252 }
253 
254 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
255 {
256 	vhost_net_ubuf_put(ubufs);
257 	wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
258 }
259 
260 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
261 {
262 	vhost_net_ubuf_put_and_wait(ubufs);
263 	kfree(ubufs);
264 }
265 
266 static void vhost_net_clear_ubuf_info(struct vhost_net *n)
267 {
268 	int i;
269 
270 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
271 		kfree(n->vqs[i].ubuf_info);
272 		n->vqs[i].ubuf_info = NULL;
273 	}
274 }
275 
276 static int vhost_net_set_ubuf_info(struct vhost_net *n)
277 {
278 	bool zcopy;
279 	int i;
280 
281 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
282 		zcopy = vhost_net_zcopy_mask & (0x1 << i);
283 		if (!zcopy)
284 			continue;
285 		n->vqs[i].ubuf_info =
286 			kmalloc_array(UIO_MAXIOV,
287 				      sizeof(*n->vqs[i].ubuf_info),
288 				      GFP_KERNEL);
289 		if  (!n->vqs[i].ubuf_info)
290 			goto err;
291 	}
292 	return 0;
293 
294 err:
295 	vhost_net_clear_ubuf_info(n);
296 	return -ENOMEM;
297 }
298 
299 static void vhost_net_vq_reset(struct vhost_net *n)
300 {
301 	int i;
302 
303 	vhost_net_clear_ubuf_info(n);
304 
305 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
306 		n->vqs[i].done_idx = 0;
307 		n->vqs[i].upend_idx = 0;
308 		n->vqs[i].ubufs = NULL;
309 		n->vqs[i].vhost_hlen = 0;
310 		n->vqs[i].sock_hlen = 0;
311 		vhost_net_buf_init(&n->vqs[i].rxq);
312 	}
313 
314 }
315 
316 static void vhost_net_tx_packet(struct vhost_net *net)
317 {
318 	++net->tx_packets;
319 	if (net->tx_packets < 1024)
320 		return;
321 	net->tx_packets = 0;
322 	net->tx_zcopy_err = 0;
323 }
324 
325 static void vhost_net_tx_err(struct vhost_net *net)
326 {
327 	++net->tx_zcopy_err;
328 }
329 
330 static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
331 {
332 	/* TX flush waits for outstanding DMAs to be done.
333 	 * Don't start new DMAs.
334 	 */
335 	return !net->tx_flush &&
336 		net->tx_packets / 64 >= net->tx_zcopy_err;
337 }
338 
339 static bool vhost_sock_zcopy(struct socket *sock)
340 {
341 	return unlikely(experimental_zcopytx) &&
342 		sock_flag(sock->sk, SOCK_ZEROCOPY);
343 }
344 
345 static bool vhost_sock_xdp(struct socket *sock)
346 {
347 	return sock_flag(sock->sk, SOCK_XDP);
348 }
349 
350 /* In case of DMA done not in order in lower device driver for some reason.
351  * upend_idx is used to track end of used idx, done_idx is used to track head
352  * of used idx. Once lower device DMA done contiguously, we will signal KVM
353  * guest used idx.
354  */
355 static void vhost_zerocopy_signal_used(struct vhost_net *net,
356 				       struct vhost_virtqueue *vq)
357 {
358 	struct vhost_net_virtqueue *nvq =
359 		container_of(vq, struct vhost_net_virtqueue, vq);
360 	int i, add;
361 	int j = 0;
362 
363 	for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
364 		if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
365 			vhost_net_tx_err(net);
366 		if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
367 			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
368 			++j;
369 		} else
370 			break;
371 	}
372 	while (j) {
373 		add = min(UIO_MAXIOV - nvq->done_idx, j);
374 		vhost_add_used_and_signal_n(vq->dev, vq,
375 					    &vq->heads[nvq->done_idx], add);
376 		nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
377 		j -= add;
378 	}
379 }
380 
381 static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
382 {
383 	struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
384 	struct vhost_virtqueue *vq = ubufs->vq;
385 	int cnt;
386 
387 	rcu_read_lock_bh();
388 
389 	/* set len to mark this desc buffers done DMA */
390 	vq->heads[ubuf->desc].len = success ?
391 		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
392 	cnt = vhost_net_ubuf_put(ubufs);
393 
394 	/*
395 	 * Trigger polling thread if guest stopped submitting new buffers:
396 	 * in this case, the refcount after decrement will eventually reach 1.
397 	 * We also trigger polling periodically after each 16 packets
398 	 * (the value 16 here is more or less arbitrary, it's tuned to trigger
399 	 * less than 10% of times).
400 	 */
401 	if (cnt <= 1 || !(cnt % 16))
402 		vhost_poll_queue(&vq->poll);
403 
404 	rcu_read_unlock_bh();
405 }
406 
407 static inline unsigned long busy_clock(void)
408 {
409 	return local_clock() >> 10;
410 }
411 
412 static bool vhost_can_busy_poll(unsigned long endtime)
413 {
414 	return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
415 		      !signal_pending(current));
416 }
417 
418 static void vhost_net_disable_vq(struct vhost_net *n,
419 				 struct vhost_virtqueue *vq)
420 {
421 	struct vhost_net_virtqueue *nvq =
422 		container_of(vq, struct vhost_net_virtqueue, vq);
423 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
424 	if (!vq->private_data)
425 		return;
426 	vhost_poll_stop(poll);
427 }
428 
429 static int vhost_net_enable_vq(struct vhost_net *n,
430 				struct vhost_virtqueue *vq)
431 {
432 	struct vhost_net_virtqueue *nvq =
433 		container_of(vq, struct vhost_net_virtqueue, vq);
434 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
435 	struct socket *sock;
436 
437 	sock = vq->private_data;
438 	if (!sock)
439 		return 0;
440 
441 	return vhost_poll_start(poll, sock->file);
442 }
443 
444 static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
445 {
446 	struct vhost_virtqueue *vq = &nvq->vq;
447 	struct vhost_dev *dev = vq->dev;
448 
449 	if (!nvq->done_idx)
450 		return;
451 
452 	vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
453 	nvq->done_idx = 0;
454 }
455 
456 static void vhost_tx_batch(struct vhost_net *net,
457 			   struct vhost_net_virtqueue *nvq,
458 			   struct socket *sock,
459 			   struct msghdr *msghdr)
460 {
461 	struct tun_msg_ctl ctl = {
462 		.type = TUN_MSG_PTR,
463 		.num = nvq->batched_xdp,
464 		.ptr = nvq->xdp,
465 	};
466 	int err;
467 
468 	if (nvq->batched_xdp == 0)
469 		goto signal_used;
470 
471 	msghdr->msg_control = &ctl;
472 	err = sock->ops->sendmsg(sock, msghdr, 0);
473 	if (unlikely(err < 0)) {
474 		vq_err(&nvq->vq, "Fail to batch sending packets\n");
475 		return;
476 	}
477 
478 signal_used:
479 	vhost_net_signal_used(nvq);
480 	nvq->batched_xdp = 0;
481 }
482 
483 static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
484 				    struct vhost_net_virtqueue *nvq,
485 				    unsigned int *out_num, unsigned int *in_num,
486 				    struct msghdr *msghdr, bool *busyloop_intr)
487 {
488 	struct vhost_virtqueue *vq = &nvq->vq;
489 	unsigned long uninitialized_var(endtime);
490 	int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
491 				  out_num, in_num, NULL, NULL);
492 
493 	if (r == vq->num && vq->busyloop_timeout) {
494 		/* Flush batched packets first */
495 		if (!vhost_sock_zcopy(vq->private_data))
496 			vhost_tx_batch(net, nvq, vq->private_data, msghdr);
497 		preempt_disable();
498 		endtime = busy_clock() + vq->busyloop_timeout;
499 		while (vhost_can_busy_poll(endtime)) {
500 			if (vhost_has_work(vq->dev)) {
501 				*busyloop_intr = true;
502 				break;
503 			}
504 			if (!vhost_vq_avail_empty(vq->dev, vq))
505 				break;
506 			cpu_relax();
507 		}
508 		preempt_enable();
509 		r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
510 				      out_num, in_num, NULL, NULL);
511 	}
512 
513 	return r;
514 }
515 
516 static bool vhost_exceeds_maxpend(struct vhost_net *net)
517 {
518 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
519 	struct vhost_virtqueue *vq = &nvq->vq;
520 
521 	return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
522 	       min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
523 }
524 
525 static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
526 			    size_t hdr_size, int out)
527 {
528 	/* Skip header. TODO: support TSO. */
529 	size_t len = iov_length(vq->iov, out);
530 
531 	iov_iter_init(iter, WRITE, vq->iov, out, len);
532 	iov_iter_advance(iter, hdr_size);
533 
534 	return iov_iter_count(iter);
535 }
536 
537 static bool vhost_exceeds_weight(int pkts, int total_len)
538 {
539 	return total_len >= VHOST_NET_WEIGHT ||
540 	       pkts >= VHOST_NET_PKT_WEIGHT;
541 }
542 
543 static int get_tx_bufs(struct vhost_net *net,
544 		       struct vhost_net_virtqueue *nvq,
545 		       struct msghdr *msg,
546 		       unsigned int *out, unsigned int *in,
547 		       size_t *len, bool *busyloop_intr)
548 {
549 	struct vhost_virtqueue *vq = &nvq->vq;
550 	int ret;
551 
552 	ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
553 
554 	if (ret < 0 || ret == vq->num)
555 		return ret;
556 
557 	if (*in) {
558 		vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
559 			*out, *in);
560 		return -EFAULT;
561 	}
562 
563 	/* Sanity check */
564 	*len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
565 	if (*len == 0) {
566 		vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
567 			*len, nvq->vhost_hlen);
568 		return -EFAULT;
569 	}
570 
571 	return ret;
572 }
573 
574 static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
575 {
576 	return total_len < VHOST_NET_WEIGHT &&
577 	       !vhost_vq_avail_empty(vq->dev, vq);
578 }
579 
580 #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
581 
582 static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
583 			       struct iov_iter *from)
584 {
585 	struct vhost_virtqueue *vq = &nvq->vq;
586 	struct socket *sock = vq->private_data;
587 	struct page_frag *alloc_frag = &current->task_frag;
588 	struct virtio_net_hdr *gso;
589 	struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
590 	struct tun_xdp_hdr *hdr;
591 	size_t len = iov_iter_count(from);
592 	int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0;
593 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
594 	int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen);
595 	int sock_hlen = nvq->sock_hlen;
596 	void *buf;
597 	int copied;
598 
599 	if (unlikely(len < nvq->sock_hlen))
600 		return -EFAULT;
601 
602 	if (SKB_DATA_ALIGN(len + pad) +
603 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
604 		return -ENOSPC;
605 
606 	buflen += SKB_DATA_ALIGN(len + pad);
607 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
608 	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
609 		return -ENOMEM;
610 
611 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
612 	copied = copy_page_from_iter(alloc_frag->page,
613 				     alloc_frag->offset +
614 				     offsetof(struct tun_xdp_hdr, gso),
615 				     sock_hlen, from);
616 	if (copied != sock_hlen)
617 		return -EFAULT;
618 
619 	hdr = buf;
620 	gso = &hdr->gso;
621 
622 	if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
623 	    vhost16_to_cpu(vq, gso->csum_start) +
624 	    vhost16_to_cpu(vq, gso->csum_offset) + 2 >
625 	    vhost16_to_cpu(vq, gso->hdr_len)) {
626 		gso->hdr_len = cpu_to_vhost16(vq,
627 			       vhost16_to_cpu(vq, gso->csum_start) +
628 			       vhost16_to_cpu(vq, gso->csum_offset) + 2);
629 
630 		if (vhost16_to_cpu(vq, gso->hdr_len) > len)
631 			return -EINVAL;
632 	}
633 
634 	len -= sock_hlen;
635 	copied = copy_page_from_iter(alloc_frag->page,
636 				     alloc_frag->offset + pad,
637 				     len, from);
638 	if (copied != len)
639 		return -EFAULT;
640 
641 	xdp->data_hard_start = buf;
642 	xdp->data = buf + pad;
643 	xdp->data_end = xdp->data + len;
644 	hdr->buflen = buflen;
645 
646 	get_page(alloc_frag->page);
647 	alloc_frag->offset += buflen;
648 
649 	++nvq->batched_xdp;
650 
651 	return 0;
652 }
653 
654 static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
655 {
656 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
657 	struct vhost_virtqueue *vq = &nvq->vq;
658 	unsigned out, in;
659 	int head;
660 	struct msghdr msg = {
661 		.msg_name = NULL,
662 		.msg_namelen = 0,
663 		.msg_control = NULL,
664 		.msg_controllen = 0,
665 		.msg_flags = MSG_DONTWAIT,
666 	};
667 	size_t len, total_len = 0;
668 	int err;
669 	int sent_pkts = 0;
670 	bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
671 
672 	for (;;) {
673 		bool busyloop_intr = false;
674 
675 		if (nvq->done_idx == VHOST_NET_BATCH)
676 			vhost_tx_batch(net, nvq, sock, &msg);
677 
678 		head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
679 				   &busyloop_intr);
680 		/* On error, stop handling until the next kick. */
681 		if (unlikely(head < 0))
682 			break;
683 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
684 		if (head == vq->num) {
685 			if (unlikely(busyloop_intr)) {
686 				vhost_poll_queue(&vq->poll);
687 			} else if (unlikely(vhost_enable_notify(&net->dev,
688 								vq))) {
689 				vhost_disable_notify(&net->dev, vq);
690 				continue;
691 			}
692 			break;
693 		}
694 
695 		total_len += len;
696 
697 		/* For simplicity, TX batching is only enabled if
698 		 * sndbuf is unlimited.
699 		 */
700 		if (sock_can_batch) {
701 			err = vhost_net_build_xdp(nvq, &msg.msg_iter);
702 			if (!err) {
703 				goto done;
704 			} else if (unlikely(err != -ENOSPC)) {
705 				vhost_tx_batch(net, nvq, sock, &msg);
706 				vhost_discard_vq_desc(vq, 1);
707 				vhost_net_enable_vq(net, vq);
708 				break;
709 			}
710 
711 			/* We can't build XDP buff, go for single
712 			 * packet path but let's flush batched
713 			 * packets.
714 			 */
715 			vhost_tx_batch(net, nvq, sock, &msg);
716 			msg.msg_control = NULL;
717 		} else {
718 			if (tx_can_batch(vq, total_len))
719 				msg.msg_flags |= MSG_MORE;
720 			else
721 				msg.msg_flags &= ~MSG_MORE;
722 		}
723 
724 		/* TODO: Check specific error and bomb out unless ENOBUFS? */
725 		err = sock->ops->sendmsg(sock, &msg, len);
726 		if (unlikely(err < 0)) {
727 			vhost_discard_vq_desc(vq, 1);
728 			vhost_net_enable_vq(net, vq);
729 			break;
730 		}
731 		if (err != len)
732 			pr_debug("Truncated TX packet: len %d != %zd\n",
733 				 err, len);
734 done:
735 		vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
736 		vq->heads[nvq->done_idx].len = 0;
737 		++nvq->done_idx;
738 		if (vhost_exceeds_weight(++sent_pkts, total_len)) {
739 			vhost_poll_queue(&vq->poll);
740 			break;
741 		}
742 	}
743 
744 	vhost_tx_batch(net, nvq, sock, &msg);
745 }
746 
747 static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
748 {
749 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
750 	struct vhost_virtqueue *vq = &nvq->vq;
751 	unsigned out, in;
752 	int head;
753 	struct msghdr msg = {
754 		.msg_name = NULL,
755 		.msg_namelen = 0,
756 		.msg_control = NULL,
757 		.msg_controllen = 0,
758 		.msg_flags = MSG_DONTWAIT,
759 	};
760 	struct tun_msg_ctl ctl;
761 	size_t len, total_len = 0;
762 	int err;
763 	struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
764 	bool zcopy_used;
765 	int sent_pkts = 0;
766 
767 	for (;;) {
768 		bool busyloop_intr;
769 
770 		/* Release DMAs done buffers first */
771 		vhost_zerocopy_signal_used(net, vq);
772 
773 		busyloop_intr = false;
774 		head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
775 				   &busyloop_intr);
776 		/* On error, stop handling until the next kick. */
777 		if (unlikely(head < 0))
778 			break;
779 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
780 		if (head == vq->num) {
781 			if (unlikely(busyloop_intr)) {
782 				vhost_poll_queue(&vq->poll);
783 			} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
784 				vhost_disable_notify(&net->dev, vq);
785 				continue;
786 			}
787 			break;
788 		}
789 
790 		zcopy_used = len >= VHOST_GOODCOPY_LEN
791 			     && !vhost_exceeds_maxpend(net)
792 			     && vhost_net_tx_select_zcopy(net);
793 
794 		/* use msg_control to pass vhost zerocopy ubuf info to skb */
795 		if (zcopy_used) {
796 			struct ubuf_info *ubuf;
797 			ubuf = nvq->ubuf_info + nvq->upend_idx;
798 
799 			vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
800 			vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
801 			ubuf->callback = vhost_zerocopy_callback;
802 			ubuf->ctx = nvq->ubufs;
803 			ubuf->desc = nvq->upend_idx;
804 			refcount_set(&ubuf->refcnt, 1);
805 			msg.msg_control = &ctl;
806 			ctl.type = TUN_MSG_UBUF;
807 			ctl.ptr = ubuf;
808 			msg.msg_controllen = sizeof(ctl);
809 			ubufs = nvq->ubufs;
810 			atomic_inc(&ubufs->refcount);
811 			nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
812 		} else {
813 			msg.msg_control = NULL;
814 			ubufs = NULL;
815 		}
816 		total_len += len;
817 		if (tx_can_batch(vq, total_len) &&
818 		    likely(!vhost_exceeds_maxpend(net))) {
819 			msg.msg_flags |= MSG_MORE;
820 		} else {
821 			msg.msg_flags &= ~MSG_MORE;
822 		}
823 
824 		/* TODO: Check specific error and bomb out unless ENOBUFS? */
825 		err = sock->ops->sendmsg(sock, &msg, len);
826 		if (unlikely(err < 0)) {
827 			if (zcopy_used) {
828 				vhost_net_ubuf_put(ubufs);
829 				nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
830 					% UIO_MAXIOV;
831 			}
832 			vhost_discard_vq_desc(vq, 1);
833 			vhost_net_enable_vq(net, vq);
834 			break;
835 		}
836 		if (err != len)
837 			pr_debug("Truncated TX packet: "
838 				 " len %d != %zd\n", err, len);
839 		if (!zcopy_used)
840 			vhost_add_used_and_signal(&net->dev, vq, head, 0);
841 		else
842 			vhost_zerocopy_signal_used(net, vq);
843 		vhost_net_tx_packet(net);
844 		if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) {
845 			vhost_poll_queue(&vq->poll);
846 			break;
847 		}
848 	}
849 }
850 
851 /* Expects to be always run from workqueue - which acts as
852  * read-size critical section for our kind of RCU. */
853 static void handle_tx(struct vhost_net *net)
854 {
855 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
856 	struct vhost_virtqueue *vq = &nvq->vq;
857 	struct socket *sock;
858 
859 	mutex_lock(&vq->mutex);
860 	sock = vq->private_data;
861 	if (!sock)
862 		goto out;
863 
864 	if (!vq_iotlb_prefetch(vq))
865 		goto out;
866 
867 	vhost_disable_notify(&net->dev, vq);
868 	vhost_net_disable_vq(net, vq);
869 
870 	if (vhost_sock_zcopy(sock))
871 		handle_tx_zerocopy(net, sock);
872 	else
873 		handle_tx_copy(net, sock);
874 
875 out:
876 	mutex_unlock(&vq->mutex);
877 }
878 
879 static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
880 {
881 	struct sk_buff *head;
882 	int len = 0;
883 	unsigned long flags;
884 
885 	if (rvq->rx_ring)
886 		return vhost_net_buf_peek(rvq);
887 
888 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
889 	head = skb_peek(&sk->sk_receive_queue);
890 	if (likely(head)) {
891 		len = head->len;
892 		if (skb_vlan_tag_present(head))
893 			len += VLAN_HLEN;
894 	}
895 
896 	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
897 	return len;
898 }
899 
900 static int sk_has_rx_data(struct sock *sk)
901 {
902 	struct socket *sock = sk->sk_socket;
903 
904 	if (sock->ops->peek_len)
905 		return sock->ops->peek_len(sock);
906 
907 	return skb_queue_empty(&sk->sk_receive_queue);
908 }
909 
910 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
911 				      bool *busyloop_intr)
912 {
913 	struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
914 	struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
915 	struct vhost_virtqueue *rvq = &rnvq->vq;
916 	struct vhost_virtqueue *tvq = &tnvq->vq;
917 	unsigned long uninitialized_var(endtime);
918 	int len = peek_head_len(rnvq, sk);
919 
920 	if (!len && tvq->busyloop_timeout) {
921 		/* Flush batched heads first */
922 		vhost_net_signal_used(rnvq);
923 		/* Both tx vq and rx socket were polled here */
924 		mutex_lock_nested(&tvq->mutex, 1);
925 		vhost_disable_notify(&net->dev, tvq);
926 
927 		preempt_disable();
928 		endtime = busy_clock() + tvq->busyloop_timeout;
929 
930 		while (vhost_can_busy_poll(endtime)) {
931 			if (vhost_has_work(&net->dev)) {
932 				*busyloop_intr = true;
933 				break;
934 			}
935 			if ((sk_has_rx_data(sk) &&
936 			     !vhost_vq_avail_empty(&net->dev, rvq)) ||
937 			    !vhost_vq_avail_empty(&net->dev, tvq))
938 				break;
939 			cpu_relax();
940 		}
941 
942 		preempt_enable();
943 
944 		if (!vhost_vq_avail_empty(&net->dev, tvq)) {
945 			vhost_poll_queue(&tvq->poll);
946 		} else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
947 			vhost_disable_notify(&net->dev, tvq);
948 			vhost_poll_queue(&tvq->poll);
949 		}
950 
951 		mutex_unlock(&tvq->mutex);
952 
953 		len = peek_head_len(rnvq, sk);
954 	}
955 
956 	return len;
957 }
958 
959 /* This is a multi-buffer version of vhost_get_desc, that works if
960  *	vq has read descriptors only.
961  * @vq		- the relevant virtqueue
962  * @datalen	- data length we'll be reading
963  * @iovcount	- returned count of io vectors we fill
964  * @log		- vhost log
965  * @log_num	- log offset
966  * @quota       - headcount quota, 1 for big buffer
967  *	returns number of buffer heads allocated, negative on error
968  */
969 static int get_rx_bufs(struct vhost_virtqueue *vq,
970 		       struct vring_used_elem *heads,
971 		       int datalen,
972 		       unsigned *iovcount,
973 		       struct vhost_log *log,
974 		       unsigned *log_num,
975 		       unsigned int quota)
976 {
977 	unsigned int out, in;
978 	int seg = 0;
979 	int headcount = 0;
980 	unsigned d;
981 	int r, nlogs = 0;
982 	/* len is always initialized before use since we are always called with
983 	 * datalen > 0.
984 	 */
985 	u32 uninitialized_var(len);
986 
987 	while (datalen > 0 && headcount < quota) {
988 		if (unlikely(seg >= UIO_MAXIOV)) {
989 			r = -ENOBUFS;
990 			goto err;
991 		}
992 		r = vhost_get_vq_desc(vq, vq->iov + seg,
993 				      ARRAY_SIZE(vq->iov) - seg, &out,
994 				      &in, log, log_num);
995 		if (unlikely(r < 0))
996 			goto err;
997 
998 		d = r;
999 		if (d == vq->num) {
1000 			r = 0;
1001 			goto err;
1002 		}
1003 		if (unlikely(out || in <= 0)) {
1004 			vq_err(vq, "unexpected descriptor format for RX: "
1005 				"out %d, in %d\n", out, in);
1006 			r = -EINVAL;
1007 			goto err;
1008 		}
1009 		if (unlikely(log)) {
1010 			nlogs += *log_num;
1011 			log += *log_num;
1012 		}
1013 		heads[headcount].id = cpu_to_vhost32(vq, d);
1014 		len = iov_length(vq->iov + seg, in);
1015 		heads[headcount].len = cpu_to_vhost32(vq, len);
1016 		datalen -= len;
1017 		++headcount;
1018 		seg += in;
1019 	}
1020 	heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
1021 	*iovcount = seg;
1022 	if (unlikely(log))
1023 		*log_num = nlogs;
1024 
1025 	/* Detect overrun */
1026 	if (unlikely(datalen > 0)) {
1027 		r = UIO_MAXIOV + 1;
1028 		goto err;
1029 	}
1030 	return headcount;
1031 err:
1032 	vhost_discard_vq_desc(vq, headcount);
1033 	return r;
1034 }
1035 
1036 /* Expects to be always run from workqueue - which acts as
1037  * read-size critical section for our kind of RCU. */
1038 static void handle_rx(struct vhost_net *net)
1039 {
1040 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
1041 	struct vhost_virtqueue *vq = &nvq->vq;
1042 	unsigned uninitialized_var(in), log;
1043 	struct vhost_log *vq_log;
1044 	struct msghdr msg = {
1045 		.msg_name = NULL,
1046 		.msg_namelen = 0,
1047 		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
1048 		.msg_controllen = 0,
1049 		.msg_flags = MSG_DONTWAIT,
1050 	};
1051 	struct virtio_net_hdr hdr = {
1052 		.flags = 0,
1053 		.gso_type = VIRTIO_NET_HDR_GSO_NONE
1054 	};
1055 	size_t total_len = 0;
1056 	int err, mergeable;
1057 	s16 headcount;
1058 	size_t vhost_hlen, sock_hlen;
1059 	size_t vhost_len, sock_len;
1060 	bool busyloop_intr = false;
1061 	struct socket *sock;
1062 	struct iov_iter fixup;
1063 	__virtio16 num_buffers;
1064 	int recv_pkts = 0;
1065 
1066 	mutex_lock_nested(&vq->mutex, 0);
1067 	sock = vq->private_data;
1068 	if (!sock)
1069 		goto out;
1070 
1071 	if (!vq_iotlb_prefetch(vq))
1072 		goto out;
1073 
1074 	vhost_disable_notify(&net->dev, vq);
1075 	vhost_net_disable_vq(net, vq);
1076 
1077 	vhost_hlen = nvq->vhost_hlen;
1078 	sock_hlen = nvq->sock_hlen;
1079 
1080 	vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
1081 		vq->log : NULL;
1082 	mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
1083 
1084 	while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
1085 						      &busyloop_intr))) {
1086 		sock_len += sock_hlen;
1087 		vhost_len = sock_len + vhost_hlen;
1088 		headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
1089 					vhost_len, &in, vq_log, &log,
1090 					likely(mergeable) ? UIO_MAXIOV : 1);
1091 		/* On error, stop handling until the next kick. */
1092 		if (unlikely(headcount < 0))
1093 			goto out;
1094 		/* OK, now we need to know about added descriptors. */
1095 		if (!headcount) {
1096 			if (unlikely(busyloop_intr)) {
1097 				vhost_poll_queue(&vq->poll);
1098 			} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
1099 				/* They have slipped one in as we were
1100 				 * doing that: check again. */
1101 				vhost_disable_notify(&net->dev, vq);
1102 				continue;
1103 			}
1104 			/* Nothing new?  Wait for eventfd to tell us
1105 			 * they refilled. */
1106 			goto out;
1107 		}
1108 		busyloop_intr = false;
1109 		if (nvq->rx_ring)
1110 			msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
1111 		/* On overrun, truncate and discard */
1112 		if (unlikely(headcount > UIO_MAXIOV)) {
1113 			iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
1114 			err = sock->ops->recvmsg(sock, &msg,
1115 						 1, MSG_DONTWAIT | MSG_TRUNC);
1116 			pr_debug("Discarded rx packet: len %zd\n", sock_len);
1117 			continue;
1118 		}
1119 		/* We don't need to be notified again. */
1120 		iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
1121 		fixup = msg.msg_iter;
1122 		if (unlikely((vhost_hlen))) {
1123 			/* We will supply the header ourselves
1124 			 * TODO: support TSO.
1125 			 */
1126 			iov_iter_advance(&msg.msg_iter, vhost_hlen);
1127 		}
1128 		err = sock->ops->recvmsg(sock, &msg,
1129 					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
1130 		/* Userspace might have consumed the packet meanwhile:
1131 		 * it's not supposed to do this usually, but might be hard
1132 		 * to prevent. Discard data we got (if any) and keep going. */
1133 		if (unlikely(err != sock_len)) {
1134 			pr_debug("Discarded rx packet: "
1135 				 " len %d, expected %zd\n", err, sock_len);
1136 			vhost_discard_vq_desc(vq, headcount);
1137 			continue;
1138 		}
1139 		/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
1140 		if (unlikely(vhost_hlen)) {
1141 			if (copy_to_iter(&hdr, sizeof(hdr),
1142 					 &fixup) != sizeof(hdr)) {
1143 				vq_err(vq, "Unable to write vnet_hdr "
1144 				       "at addr %p\n", vq->iov->iov_base);
1145 				goto out;
1146 			}
1147 		} else {
1148 			/* Header came from socket; we'll need to patch
1149 			 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
1150 			 */
1151 			iov_iter_advance(&fixup, sizeof(hdr));
1152 		}
1153 		/* TODO: Should check and handle checksum. */
1154 
1155 		num_buffers = cpu_to_vhost16(vq, headcount);
1156 		if (likely(mergeable) &&
1157 		    copy_to_iter(&num_buffers, sizeof num_buffers,
1158 				 &fixup) != sizeof num_buffers) {
1159 			vq_err(vq, "Failed num_buffers write");
1160 			vhost_discard_vq_desc(vq, headcount);
1161 			goto out;
1162 		}
1163 		nvq->done_idx += headcount;
1164 		if (nvq->done_idx > VHOST_NET_BATCH)
1165 			vhost_net_signal_used(nvq);
1166 		if (unlikely(vq_log))
1167 			vhost_log_write(vq, vq_log, log, vhost_len);
1168 		total_len += vhost_len;
1169 		if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
1170 			vhost_poll_queue(&vq->poll);
1171 			goto out;
1172 		}
1173 	}
1174 	if (unlikely(busyloop_intr))
1175 		vhost_poll_queue(&vq->poll);
1176 	else
1177 		vhost_net_enable_vq(net, vq);
1178 out:
1179 	vhost_net_signal_used(nvq);
1180 	mutex_unlock(&vq->mutex);
1181 }
1182 
1183 static void handle_tx_kick(struct vhost_work *work)
1184 {
1185 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1186 						  poll.work);
1187 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1188 
1189 	handle_tx(net);
1190 }
1191 
1192 static void handle_rx_kick(struct vhost_work *work)
1193 {
1194 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1195 						  poll.work);
1196 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1197 
1198 	handle_rx(net);
1199 }
1200 
1201 static void handle_tx_net(struct vhost_work *work)
1202 {
1203 	struct vhost_net *net = container_of(work, struct vhost_net,
1204 					     poll[VHOST_NET_VQ_TX].work);
1205 	handle_tx(net);
1206 }
1207 
1208 static void handle_rx_net(struct vhost_work *work)
1209 {
1210 	struct vhost_net *net = container_of(work, struct vhost_net,
1211 					     poll[VHOST_NET_VQ_RX].work);
1212 	handle_rx(net);
1213 }
1214 
1215 static int vhost_net_open(struct inode *inode, struct file *f)
1216 {
1217 	struct vhost_net *n;
1218 	struct vhost_dev *dev;
1219 	struct vhost_virtqueue **vqs;
1220 	void **queue;
1221 	struct xdp_buff *xdp;
1222 	int i;
1223 
1224 	n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1225 	if (!n)
1226 		return -ENOMEM;
1227 	vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
1228 	if (!vqs) {
1229 		kvfree(n);
1230 		return -ENOMEM;
1231 	}
1232 
1233 	queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
1234 			      GFP_KERNEL);
1235 	if (!queue) {
1236 		kfree(vqs);
1237 		kvfree(n);
1238 		return -ENOMEM;
1239 	}
1240 	n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
1241 
1242 	xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL);
1243 	if (!xdp) {
1244 		kfree(vqs);
1245 		kvfree(n);
1246 		kfree(queue);
1247 	}
1248 	n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
1249 
1250 	dev = &n->dev;
1251 	vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
1252 	vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
1253 	n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
1254 	n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
1255 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
1256 		n->vqs[i].ubufs = NULL;
1257 		n->vqs[i].ubuf_info = NULL;
1258 		n->vqs[i].upend_idx = 0;
1259 		n->vqs[i].done_idx = 0;
1260 		n->vqs[i].batched_xdp = 0;
1261 		n->vqs[i].vhost_hlen = 0;
1262 		n->vqs[i].sock_hlen = 0;
1263 		n->vqs[i].rx_ring = NULL;
1264 		vhost_net_buf_init(&n->vqs[i].rxq);
1265 	}
1266 	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
1267 
1268 	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
1269 	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
1270 
1271 	f->private_data = n;
1272 
1273 	return 0;
1274 }
1275 
1276 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
1277 					struct vhost_virtqueue *vq)
1278 {
1279 	struct socket *sock;
1280 	struct vhost_net_virtqueue *nvq =
1281 		container_of(vq, struct vhost_net_virtqueue, vq);
1282 
1283 	mutex_lock(&vq->mutex);
1284 	sock = vq->private_data;
1285 	vhost_net_disable_vq(n, vq);
1286 	vq->private_data = NULL;
1287 	vhost_net_buf_unproduce(nvq);
1288 	nvq->rx_ring = NULL;
1289 	mutex_unlock(&vq->mutex);
1290 	return sock;
1291 }
1292 
1293 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
1294 			   struct socket **rx_sock)
1295 {
1296 	*tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
1297 	*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
1298 }
1299 
1300 static void vhost_net_flush_vq(struct vhost_net *n, int index)
1301 {
1302 	vhost_poll_flush(n->poll + index);
1303 	vhost_poll_flush(&n->vqs[index].vq.poll);
1304 }
1305 
1306 static void vhost_net_flush(struct vhost_net *n)
1307 {
1308 	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
1309 	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
1310 	if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
1311 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1312 		n->tx_flush = true;
1313 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1314 		/* Wait for all lower device DMAs done. */
1315 		vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
1316 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1317 		n->tx_flush = false;
1318 		atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
1319 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1320 	}
1321 }
1322 
1323 static int vhost_net_release(struct inode *inode, struct file *f)
1324 {
1325 	struct vhost_net *n = f->private_data;
1326 	struct socket *tx_sock;
1327 	struct socket *rx_sock;
1328 
1329 	vhost_net_stop(n, &tx_sock, &rx_sock);
1330 	vhost_net_flush(n);
1331 	vhost_dev_stop(&n->dev);
1332 	vhost_dev_cleanup(&n->dev);
1333 	vhost_net_vq_reset(n);
1334 	if (tx_sock)
1335 		sockfd_put(tx_sock);
1336 	if (rx_sock)
1337 		sockfd_put(rx_sock);
1338 	/* Make sure no callbacks are outstanding */
1339 	synchronize_rcu_bh();
1340 	/* We do an extra flush before freeing memory,
1341 	 * since jobs can re-queue themselves. */
1342 	vhost_net_flush(n);
1343 	kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
1344 	kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
1345 	kfree(n->dev.vqs);
1346 	kvfree(n);
1347 	return 0;
1348 }
1349 
1350 static struct socket *get_raw_socket(int fd)
1351 {
1352 	struct {
1353 		struct sockaddr_ll sa;
1354 		char  buf[MAX_ADDR_LEN];
1355 	} uaddr;
1356 	int r;
1357 	struct socket *sock = sockfd_lookup(fd, &r);
1358 
1359 	if (!sock)
1360 		return ERR_PTR(-ENOTSOCK);
1361 
1362 	/* Parameter checking */
1363 	if (sock->sk->sk_type != SOCK_RAW) {
1364 		r = -ESOCKTNOSUPPORT;
1365 		goto err;
1366 	}
1367 
1368 	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0);
1369 	if (r < 0)
1370 		goto err;
1371 
1372 	if (uaddr.sa.sll_family != AF_PACKET) {
1373 		r = -EPFNOSUPPORT;
1374 		goto err;
1375 	}
1376 	return sock;
1377 err:
1378 	sockfd_put(sock);
1379 	return ERR_PTR(r);
1380 }
1381 
1382 static struct ptr_ring *get_tap_ptr_ring(int fd)
1383 {
1384 	struct ptr_ring *ring;
1385 	struct file *file = fget(fd);
1386 
1387 	if (!file)
1388 		return NULL;
1389 	ring = tun_get_tx_ring(file);
1390 	if (!IS_ERR(ring))
1391 		goto out;
1392 	ring = tap_get_ptr_ring(file);
1393 	if (!IS_ERR(ring))
1394 		goto out;
1395 	ring = NULL;
1396 out:
1397 	fput(file);
1398 	return ring;
1399 }
1400 
1401 static struct socket *get_tap_socket(int fd)
1402 {
1403 	struct file *file = fget(fd);
1404 	struct socket *sock;
1405 
1406 	if (!file)
1407 		return ERR_PTR(-EBADF);
1408 	sock = tun_get_socket(file);
1409 	if (!IS_ERR(sock))
1410 		return sock;
1411 	sock = tap_get_socket(file);
1412 	if (IS_ERR(sock))
1413 		fput(file);
1414 	return sock;
1415 }
1416 
1417 static struct socket *get_socket(int fd)
1418 {
1419 	struct socket *sock;
1420 
1421 	/* special case to disable backend */
1422 	if (fd == -1)
1423 		return NULL;
1424 	sock = get_raw_socket(fd);
1425 	if (!IS_ERR(sock))
1426 		return sock;
1427 	sock = get_tap_socket(fd);
1428 	if (!IS_ERR(sock))
1429 		return sock;
1430 	return ERR_PTR(-ENOTSOCK);
1431 }
1432 
1433 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
1434 {
1435 	struct socket *sock, *oldsock;
1436 	struct vhost_virtqueue *vq;
1437 	struct vhost_net_virtqueue *nvq;
1438 	struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
1439 	int r;
1440 
1441 	mutex_lock(&n->dev.mutex);
1442 	r = vhost_dev_check_owner(&n->dev);
1443 	if (r)
1444 		goto err;
1445 
1446 	if (index >= VHOST_NET_VQ_MAX) {
1447 		r = -ENOBUFS;
1448 		goto err;
1449 	}
1450 	vq = &n->vqs[index].vq;
1451 	nvq = &n->vqs[index];
1452 	mutex_lock(&vq->mutex);
1453 
1454 	/* Verify that ring has been setup correctly. */
1455 	if (!vhost_vq_access_ok(vq)) {
1456 		r = -EFAULT;
1457 		goto err_vq;
1458 	}
1459 	sock = get_socket(fd);
1460 	if (IS_ERR(sock)) {
1461 		r = PTR_ERR(sock);
1462 		goto err_vq;
1463 	}
1464 
1465 	/* start polling new socket */
1466 	oldsock = vq->private_data;
1467 	if (sock != oldsock) {
1468 		ubufs = vhost_net_ubuf_alloc(vq,
1469 					     sock && vhost_sock_zcopy(sock));
1470 		if (IS_ERR(ubufs)) {
1471 			r = PTR_ERR(ubufs);
1472 			goto err_ubufs;
1473 		}
1474 
1475 		vhost_net_disable_vq(n, vq);
1476 		vq->private_data = sock;
1477 		vhost_net_buf_unproduce(nvq);
1478 		r = vhost_vq_init_access(vq);
1479 		if (r)
1480 			goto err_used;
1481 		r = vhost_net_enable_vq(n, vq);
1482 		if (r)
1483 			goto err_used;
1484 		if (index == VHOST_NET_VQ_RX)
1485 			nvq->rx_ring = get_tap_ptr_ring(fd);
1486 
1487 		oldubufs = nvq->ubufs;
1488 		nvq->ubufs = ubufs;
1489 
1490 		n->tx_packets = 0;
1491 		n->tx_zcopy_err = 0;
1492 		n->tx_flush = false;
1493 	}
1494 
1495 	mutex_unlock(&vq->mutex);
1496 
1497 	if (oldubufs) {
1498 		vhost_net_ubuf_put_wait_and_free(oldubufs);
1499 		mutex_lock(&vq->mutex);
1500 		vhost_zerocopy_signal_used(n, vq);
1501 		mutex_unlock(&vq->mutex);
1502 	}
1503 
1504 	if (oldsock) {
1505 		vhost_net_flush_vq(n, index);
1506 		sockfd_put(oldsock);
1507 	}
1508 
1509 	mutex_unlock(&n->dev.mutex);
1510 	return 0;
1511 
1512 err_used:
1513 	vq->private_data = oldsock;
1514 	vhost_net_enable_vq(n, vq);
1515 	if (ubufs)
1516 		vhost_net_ubuf_put_wait_and_free(ubufs);
1517 err_ubufs:
1518 	if (sock)
1519 		sockfd_put(sock);
1520 err_vq:
1521 	mutex_unlock(&vq->mutex);
1522 err:
1523 	mutex_unlock(&n->dev.mutex);
1524 	return r;
1525 }
1526 
1527 static long vhost_net_reset_owner(struct vhost_net *n)
1528 {
1529 	struct socket *tx_sock = NULL;
1530 	struct socket *rx_sock = NULL;
1531 	long err;
1532 	struct vhost_umem *umem;
1533 
1534 	mutex_lock(&n->dev.mutex);
1535 	err = vhost_dev_check_owner(&n->dev);
1536 	if (err)
1537 		goto done;
1538 	umem = vhost_dev_reset_owner_prepare();
1539 	if (!umem) {
1540 		err = -ENOMEM;
1541 		goto done;
1542 	}
1543 	vhost_net_stop(n, &tx_sock, &rx_sock);
1544 	vhost_net_flush(n);
1545 	vhost_dev_stop(&n->dev);
1546 	vhost_dev_reset_owner(&n->dev, umem);
1547 	vhost_net_vq_reset(n);
1548 done:
1549 	mutex_unlock(&n->dev.mutex);
1550 	if (tx_sock)
1551 		sockfd_put(tx_sock);
1552 	if (rx_sock)
1553 		sockfd_put(rx_sock);
1554 	return err;
1555 }
1556 
1557 static int vhost_net_set_backend_features(struct vhost_net *n, u64 features)
1558 {
1559 	int i;
1560 
1561 	mutex_lock(&n->dev.mutex);
1562 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1563 		mutex_lock(&n->vqs[i].vq.mutex);
1564 		n->vqs[i].vq.acked_backend_features = features;
1565 		mutex_unlock(&n->vqs[i].vq.mutex);
1566 	}
1567 	mutex_unlock(&n->dev.mutex);
1568 
1569 	return 0;
1570 }
1571 
1572 static int vhost_net_set_features(struct vhost_net *n, u64 features)
1573 {
1574 	size_t vhost_hlen, sock_hlen, hdr_len;
1575 	int i;
1576 
1577 	hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
1578 			       (1ULL << VIRTIO_F_VERSION_1))) ?
1579 			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1580 			sizeof(struct virtio_net_hdr);
1581 	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
1582 		/* vhost provides vnet_hdr */
1583 		vhost_hlen = hdr_len;
1584 		sock_hlen = 0;
1585 	} else {
1586 		/* socket provides vnet_hdr */
1587 		vhost_hlen = 0;
1588 		sock_hlen = hdr_len;
1589 	}
1590 	mutex_lock(&n->dev.mutex);
1591 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1592 	    !vhost_log_access_ok(&n->dev))
1593 		goto out_unlock;
1594 
1595 	if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
1596 		if (vhost_init_device_iotlb(&n->dev, true))
1597 			goto out_unlock;
1598 	}
1599 
1600 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1601 		mutex_lock(&n->vqs[i].vq.mutex);
1602 		n->vqs[i].vq.acked_features = features;
1603 		n->vqs[i].vhost_hlen = vhost_hlen;
1604 		n->vqs[i].sock_hlen = sock_hlen;
1605 		mutex_unlock(&n->vqs[i].vq.mutex);
1606 	}
1607 	mutex_unlock(&n->dev.mutex);
1608 	return 0;
1609 
1610 out_unlock:
1611 	mutex_unlock(&n->dev.mutex);
1612 	return -EFAULT;
1613 }
1614 
1615 static long vhost_net_set_owner(struct vhost_net *n)
1616 {
1617 	int r;
1618 
1619 	mutex_lock(&n->dev.mutex);
1620 	if (vhost_dev_has_owner(&n->dev)) {
1621 		r = -EBUSY;
1622 		goto out;
1623 	}
1624 	r = vhost_net_set_ubuf_info(n);
1625 	if (r)
1626 		goto out;
1627 	r = vhost_dev_set_owner(&n->dev);
1628 	if (r)
1629 		vhost_net_clear_ubuf_info(n);
1630 	vhost_net_flush(n);
1631 out:
1632 	mutex_unlock(&n->dev.mutex);
1633 	return r;
1634 }
1635 
1636 static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
1637 			    unsigned long arg)
1638 {
1639 	struct vhost_net *n = f->private_data;
1640 	void __user *argp = (void __user *)arg;
1641 	u64 __user *featurep = argp;
1642 	struct vhost_vring_file backend;
1643 	u64 features;
1644 	int r;
1645 
1646 	switch (ioctl) {
1647 	case VHOST_NET_SET_BACKEND:
1648 		if (copy_from_user(&backend, argp, sizeof backend))
1649 			return -EFAULT;
1650 		return vhost_net_set_backend(n, backend.index, backend.fd);
1651 	case VHOST_GET_FEATURES:
1652 		features = VHOST_NET_FEATURES;
1653 		if (copy_to_user(featurep, &features, sizeof features))
1654 			return -EFAULT;
1655 		return 0;
1656 	case VHOST_SET_FEATURES:
1657 		if (copy_from_user(&features, featurep, sizeof features))
1658 			return -EFAULT;
1659 		if (features & ~VHOST_NET_FEATURES)
1660 			return -EOPNOTSUPP;
1661 		return vhost_net_set_features(n, features);
1662 	case VHOST_GET_BACKEND_FEATURES:
1663 		features = VHOST_NET_BACKEND_FEATURES;
1664 		if (copy_to_user(featurep, &features, sizeof(features)))
1665 			return -EFAULT;
1666 		return 0;
1667 	case VHOST_SET_BACKEND_FEATURES:
1668 		if (copy_from_user(&features, featurep, sizeof(features)))
1669 			return -EFAULT;
1670 		if (features & ~VHOST_NET_BACKEND_FEATURES)
1671 			return -EOPNOTSUPP;
1672 		return vhost_net_set_backend_features(n, features);
1673 	case VHOST_RESET_OWNER:
1674 		return vhost_net_reset_owner(n);
1675 	case VHOST_SET_OWNER:
1676 		return vhost_net_set_owner(n);
1677 	default:
1678 		mutex_lock(&n->dev.mutex);
1679 		r = vhost_dev_ioctl(&n->dev, ioctl, argp);
1680 		if (r == -ENOIOCTLCMD)
1681 			r = vhost_vring_ioctl(&n->dev, ioctl, argp);
1682 		else
1683 			vhost_net_flush(n);
1684 		mutex_unlock(&n->dev.mutex);
1685 		return r;
1686 	}
1687 }
1688 
1689 #ifdef CONFIG_COMPAT
1690 static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
1691 				   unsigned long arg)
1692 {
1693 	return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1694 }
1695 #endif
1696 
1697 static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
1698 {
1699 	struct file *file = iocb->ki_filp;
1700 	struct vhost_net *n = file->private_data;
1701 	struct vhost_dev *dev = &n->dev;
1702 	int noblock = file->f_flags & O_NONBLOCK;
1703 
1704 	return vhost_chr_read_iter(dev, to, noblock);
1705 }
1706 
1707 static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
1708 					struct iov_iter *from)
1709 {
1710 	struct file *file = iocb->ki_filp;
1711 	struct vhost_net *n = file->private_data;
1712 	struct vhost_dev *dev = &n->dev;
1713 
1714 	return vhost_chr_write_iter(dev, from);
1715 }
1716 
1717 static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
1718 {
1719 	struct vhost_net *n = file->private_data;
1720 	struct vhost_dev *dev = &n->dev;
1721 
1722 	return vhost_chr_poll(file, dev, wait);
1723 }
1724 
1725 static const struct file_operations vhost_net_fops = {
1726 	.owner          = THIS_MODULE,
1727 	.release        = vhost_net_release,
1728 	.read_iter      = vhost_net_chr_read_iter,
1729 	.write_iter     = vhost_net_chr_write_iter,
1730 	.poll           = vhost_net_chr_poll,
1731 	.unlocked_ioctl = vhost_net_ioctl,
1732 #ifdef CONFIG_COMPAT
1733 	.compat_ioctl   = vhost_net_compat_ioctl,
1734 #endif
1735 	.open           = vhost_net_open,
1736 	.llseek		= noop_llseek,
1737 };
1738 
1739 static struct miscdevice vhost_net_misc = {
1740 	.minor = VHOST_NET_MINOR,
1741 	.name = "vhost-net",
1742 	.fops = &vhost_net_fops,
1743 };
1744 
1745 static int vhost_net_init(void)
1746 {
1747 	if (experimental_zcopytx)
1748 		vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1749 	return misc_register(&vhost_net_misc);
1750 }
1751 module_init(vhost_net_init);
1752 
1753 static void vhost_net_exit(void)
1754 {
1755 	misc_deregister(&vhost_net_misc);
1756 }
1757 module_exit(vhost_net_exit);
1758 
1759 MODULE_VERSION("0.0.1");
1760 MODULE_LICENSE("GPL v2");
1761 MODULE_AUTHOR("Michael S. Tsirkin");
1762 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1763 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
1764 MODULE_ALIAS("devname:vhost-net");
1765