xref: /openbmc/linux/drivers/vhost/net.c (revision ca79522c)
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Author: Michael S. Tsirkin <mst@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * virtio-net server in host kernel.
7  */
8 
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/virtio_net.h>
13 #include <linux/miscdevice.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/workqueue.h>
18 #include <linux/rcupdate.h>
19 #include <linux/file.h>
20 #include <linux/slab.h>
21 
22 #include <linux/net.h>
23 #include <linux/if_packet.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_tun.h>
26 #include <linux/if_macvlan.h>
27 #include <linux/if_vlan.h>
28 
29 #include <net/sock.h>
30 
31 #include "vhost.h"
32 
33 static int experimental_zcopytx = 1;
34 module_param(experimental_zcopytx, int, 0444);
35 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
36 		                       " 1 -Enable; 0 - Disable");
37 
38 /* Max number of bytes transferred before requeueing the job.
39  * Using this limit prevents one virtqueue from starving others. */
40 #define VHOST_NET_WEIGHT 0x80000
41 
42 /* MAX number of TX used buffers for outstanding zerocopy */
43 #define VHOST_MAX_PEND 128
44 #define VHOST_GOODCOPY_LEN 256
45 
46 /*
47  * For transmit, used buffer len is unused; we override it to track buffer
48  * status internally; used for zerocopy tx only.
49  */
50 /* Lower device DMA failed */
51 #define VHOST_DMA_FAILED_LEN	3
52 /* Lower device DMA done */
53 #define VHOST_DMA_DONE_LEN	2
54 /* Lower device DMA in progress */
55 #define VHOST_DMA_IN_PROGRESS	1
56 /* Buffer unused */
57 #define VHOST_DMA_CLEAR_LEN	0
58 
59 #define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
60 
61 enum {
62 	VHOST_NET_FEATURES = VHOST_FEATURES |
63 			 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
64 			 (1ULL << VIRTIO_NET_F_MRG_RXBUF),
65 };
66 
67 enum {
68 	VHOST_NET_VQ_RX = 0,
69 	VHOST_NET_VQ_TX = 1,
70 	VHOST_NET_VQ_MAX = 2,
71 };
72 
73 struct vhost_net_ubuf_ref {
74 	struct kref kref;
75 	wait_queue_head_t wait;
76 	struct vhost_virtqueue *vq;
77 };
78 
79 struct vhost_net_virtqueue {
80 	struct vhost_virtqueue vq;
81 	/* hdr is used to store the virtio header.
82 	 * Since each iovec has >= 1 byte length, we never need more than
83 	 * header length entries to store the header. */
84 	struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
85 	size_t vhost_hlen;
86 	size_t sock_hlen;
87 	/* vhost zerocopy support fields below: */
88 	/* last used idx for outstanding DMA zerocopy buffers */
89 	int upend_idx;
90 	/* first used idx for DMA done zerocopy buffers */
91 	int done_idx;
92 	/* an array of userspace buffers info */
93 	struct ubuf_info *ubuf_info;
94 	/* Reference counting for outstanding ubufs.
95 	 * Protected by vq mutex. Writers must also take device mutex. */
96 	struct vhost_net_ubuf_ref *ubufs;
97 };
98 
99 struct vhost_net {
100 	struct vhost_dev dev;
101 	struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
102 	struct vhost_poll poll[VHOST_NET_VQ_MAX];
103 	/* Number of TX recently submitted.
104 	 * Protected by tx vq lock. */
105 	unsigned tx_packets;
106 	/* Number of times zerocopy TX recently failed.
107 	 * Protected by tx vq lock. */
108 	unsigned tx_zcopy_err;
109 	/* Flush in progress. Protected by tx vq lock. */
110 	bool tx_flush;
111 };
112 
113 static unsigned vhost_net_zcopy_mask __read_mostly;
114 
115 static void vhost_net_enable_zcopy(int vq)
116 {
117 	vhost_net_zcopy_mask |= 0x1 << vq;
118 }
119 
120 static void vhost_net_zerocopy_done_signal(struct kref *kref)
121 {
122 	struct vhost_net_ubuf_ref *ubufs;
123 
124 	ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
125 	wake_up(&ubufs->wait);
126 }
127 
128 static struct vhost_net_ubuf_ref *
129 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
130 {
131 	struct vhost_net_ubuf_ref *ubufs;
132 	/* No zero copy backend? Nothing to count. */
133 	if (!zcopy)
134 		return NULL;
135 	ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
136 	if (!ubufs)
137 		return ERR_PTR(-ENOMEM);
138 	kref_init(&ubufs->kref);
139 	init_waitqueue_head(&ubufs->wait);
140 	ubufs->vq = vq;
141 	return ubufs;
142 }
143 
144 static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
145 {
146 	kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
147 }
148 
149 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
150 {
151 	kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
152 	wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
153 	kfree(ubufs);
154 }
155 
156 static void vhost_net_clear_ubuf_info(struct vhost_net *n)
157 {
158 
159 	bool zcopy;
160 	int i;
161 
162 	for (i = 0; i < n->dev.nvqs; ++i) {
163 		zcopy = vhost_net_zcopy_mask & (0x1 << i);
164 		if (zcopy)
165 			kfree(n->vqs[i].ubuf_info);
166 	}
167 }
168 
169 int vhost_net_set_ubuf_info(struct vhost_net *n)
170 {
171 	bool zcopy;
172 	int i;
173 
174 	for (i = 0; i < n->dev.nvqs; ++i) {
175 		zcopy = vhost_net_zcopy_mask & (0x1 << i);
176 		if (!zcopy)
177 			continue;
178 		n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
179 					      UIO_MAXIOV, GFP_KERNEL);
180 		if  (!n->vqs[i].ubuf_info)
181 			goto err;
182 	}
183 	return 0;
184 
185 err:
186 	while (i--) {
187 		zcopy = vhost_net_zcopy_mask & (0x1 << i);
188 		if (!zcopy)
189 			continue;
190 		kfree(n->vqs[i].ubuf_info);
191 	}
192 	return -ENOMEM;
193 }
194 
195 void vhost_net_vq_reset(struct vhost_net *n)
196 {
197 	int i;
198 
199 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
200 		n->vqs[i].done_idx = 0;
201 		n->vqs[i].upend_idx = 0;
202 		n->vqs[i].ubufs = NULL;
203 		kfree(n->vqs[i].ubuf_info);
204 		n->vqs[i].ubuf_info = NULL;
205 		n->vqs[i].vhost_hlen = 0;
206 		n->vqs[i].sock_hlen = 0;
207 	}
208 
209 }
210 
211 static void vhost_net_tx_packet(struct vhost_net *net)
212 {
213 	++net->tx_packets;
214 	if (net->tx_packets < 1024)
215 		return;
216 	net->tx_packets = 0;
217 	net->tx_zcopy_err = 0;
218 }
219 
220 static void vhost_net_tx_err(struct vhost_net *net)
221 {
222 	++net->tx_zcopy_err;
223 }
224 
225 static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
226 {
227 	/* TX flush waits for outstanding DMAs to be done.
228 	 * Don't start new DMAs.
229 	 */
230 	return !net->tx_flush &&
231 		net->tx_packets / 64 >= net->tx_zcopy_err;
232 }
233 
234 static bool vhost_sock_zcopy(struct socket *sock)
235 {
236 	return unlikely(experimental_zcopytx) &&
237 		sock_flag(sock->sk, SOCK_ZEROCOPY);
238 }
239 
240 /* Pop first len bytes from iovec. Return number of segments used. */
241 static int move_iovec_hdr(struct iovec *from, struct iovec *to,
242 			  size_t len, int iov_count)
243 {
244 	int seg = 0;
245 	size_t size;
246 
247 	while (len && seg < iov_count) {
248 		size = min(from->iov_len, len);
249 		to->iov_base = from->iov_base;
250 		to->iov_len = size;
251 		from->iov_len -= size;
252 		from->iov_base += size;
253 		len -= size;
254 		++from;
255 		++to;
256 		++seg;
257 	}
258 	return seg;
259 }
260 /* Copy iovec entries for len bytes from iovec. */
261 static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
262 			   size_t len, int iovcount)
263 {
264 	int seg = 0;
265 	size_t size;
266 
267 	while (len && seg < iovcount) {
268 		size = min(from->iov_len, len);
269 		to->iov_base = from->iov_base;
270 		to->iov_len = size;
271 		len -= size;
272 		++from;
273 		++to;
274 		++seg;
275 	}
276 }
277 
278 /* In case of DMA done not in order in lower device driver for some reason.
279  * upend_idx is used to track end of used idx, done_idx is used to track head
280  * of used idx. Once lower device DMA done contiguously, we will signal KVM
281  * guest used idx.
282  */
283 static int vhost_zerocopy_signal_used(struct vhost_net *net,
284 				      struct vhost_virtqueue *vq)
285 {
286 	struct vhost_net_virtqueue *nvq =
287 		container_of(vq, struct vhost_net_virtqueue, vq);
288 	int i;
289 	int j = 0;
290 
291 	for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
292 		if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
293 			vhost_net_tx_err(net);
294 		if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
295 			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
296 			vhost_add_used_and_signal(vq->dev, vq,
297 						  vq->heads[i].id, 0);
298 			++j;
299 		} else
300 			break;
301 	}
302 	if (j)
303 		nvq->done_idx = i;
304 	return j;
305 }
306 
307 static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
308 {
309 	struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
310 	struct vhost_virtqueue *vq = ubufs->vq;
311 	int cnt = atomic_read(&ubufs->kref.refcount);
312 
313 	/*
314 	 * Trigger polling thread if guest stopped submitting new buffers:
315 	 * in this case, the refcount after decrement will eventually reach 1
316 	 * so here it is 2.
317 	 * We also trigger polling periodically after each 16 packets
318 	 * (the value 16 here is more or less arbitrary, it's tuned to trigger
319 	 * less than 10% of times).
320 	 */
321 	if (cnt <= 2 || !(cnt % 16))
322 		vhost_poll_queue(&vq->poll);
323 	/* set len to mark this desc buffers done DMA */
324 	vq->heads[ubuf->desc].len = success ?
325 		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
326 	vhost_net_ubuf_put(ubufs);
327 }
328 
329 /* Expects to be always run from workqueue - which acts as
330  * read-size critical section for our kind of RCU. */
331 static void handle_tx(struct vhost_net *net)
332 {
333 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
334 	struct vhost_virtqueue *vq = &nvq->vq;
335 	unsigned out, in, s;
336 	int head;
337 	struct msghdr msg = {
338 		.msg_name = NULL,
339 		.msg_namelen = 0,
340 		.msg_control = NULL,
341 		.msg_controllen = 0,
342 		.msg_iov = vq->iov,
343 		.msg_flags = MSG_DONTWAIT,
344 	};
345 	size_t len, total_len = 0;
346 	int err;
347 	size_t hdr_size;
348 	struct socket *sock;
349 	struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
350 	bool zcopy, zcopy_used;
351 
352 	/* TODO: check that we are running from vhost_worker? */
353 	sock = rcu_dereference_check(vq->private_data, 1);
354 	if (!sock)
355 		return;
356 
357 	mutex_lock(&vq->mutex);
358 	vhost_disable_notify(&net->dev, vq);
359 
360 	hdr_size = nvq->vhost_hlen;
361 	zcopy = nvq->ubufs;
362 
363 	for (;;) {
364 		/* Release DMAs done buffers first */
365 		if (zcopy)
366 			vhost_zerocopy_signal_used(net, vq);
367 
368 		head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
369 					 ARRAY_SIZE(vq->iov),
370 					 &out, &in,
371 					 NULL, NULL);
372 		/* On error, stop handling until the next kick. */
373 		if (unlikely(head < 0))
374 			break;
375 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
376 		if (head == vq->num) {
377 			int num_pends;
378 
379 			/* If more outstanding DMAs, queue the work.
380 			 * Handle upend_idx wrap around
381 			 */
382 			num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
383 				    (nvq->upend_idx - nvq->done_idx) :
384 				    (nvq->upend_idx + UIO_MAXIOV -
385 				     nvq->done_idx);
386 			if (unlikely(num_pends > VHOST_MAX_PEND))
387 				break;
388 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
389 				vhost_disable_notify(&net->dev, vq);
390 				continue;
391 			}
392 			break;
393 		}
394 		if (in) {
395 			vq_err(vq, "Unexpected descriptor format for TX: "
396 			       "out %d, int %d\n", out, in);
397 			break;
398 		}
399 		/* Skip header. TODO: support TSO. */
400 		s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
401 		msg.msg_iovlen = out;
402 		len = iov_length(vq->iov, out);
403 		/* Sanity check */
404 		if (!len) {
405 			vq_err(vq, "Unexpected header len for TX: "
406 			       "%zd expected %zd\n",
407 			       iov_length(nvq->hdr, s), hdr_size);
408 			break;
409 		}
410 		zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
411 				       nvq->upend_idx != nvq->done_idx);
412 
413 		/* use msg_control to pass vhost zerocopy ubuf info to skb */
414 		if (zcopy_used) {
415 			vq->heads[nvq->upend_idx].id = head;
416 			if (!vhost_net_tx_select_zcopy(net) ||
417 			    len < VHOST_GOODCOPY_LEN) {
418 				/* copy don't need to wait for DMA done */
419 				vq->heads[nvq->upend_idx].len =
420 							VHOST_DMA_DONE_LEN;
421 				msg.msg_control = NULL;
422 				msg.msg_controllen = 0;
423 				ubufs = NULL;
424 			} else {
425 				struct ubuf_info *ubuf;
426 				ubuf = nvq->ubuf_info + nvq->upend_idx;
427 
428 				vq->heads[nvq->upend_idx].len =
429 					VHOST_DMA_IN_PROGRESS;
430 				ubuf->callback = vhost_zerocopy_callback;
431 				ubuf->ctx = nvq->ubufs;
432 				ubuf->desc = nvq->upend_idx;
433 				msg.msg_control = ubuf;
434 				msg.msg_controllen = sizeof(ubuf);
435 				ubufs = nvq->ubufs;
436 				kref_get(&ubufs->kref);
437 			}
438 			nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
439 		}
440 		/* TODO: Check specific error and bomb out unless ENOBUFS? */
441 		err = sock->ops->sendmsg(NULL, sock, &msg, len);
442 		if (unlikely(err < 0)) {
443 			if (zcopy_used) {
444 				if (ubufs)
445 					vhost_net_ubuf_put(ubufs);
446 				nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
447 					% UIO_MAXIOV;
448 			}
449 			vhost_discard_vq_desc(vq, 1);
450 			break;
451 		}
452 		if (err != len)
453 			pr_debug("Truncated TX packet: "
454 				 " len %d != %zd\n", err, len);
455 		if (!zcopy_used)
456 			vhost_add_used_and_signal(&net->dev, vq, head, 0);
457 		else
458 			vhost_zerocopy_signal_used(net, vq);
459 		total_len += len;
460 		vhost_net_tx_packet(net);
461 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
462 			vhost_poll_queue(&vq->poll);
463 			break;
464 		}
465 	}
466 
467 	mutex_unlock(&vq->mutex);
468 }
469 
470 static int peek_head_len(struct sock *sk)
471 {
472 	struct sk_buff *head;
473 	int len = 0;
474 	unsigned long flags;
475 
476 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
477 	head = skb_peek(&sk->sk_receive_queue);
478 	if (likely(head)) {
479 		len = head->len;
480 		if (vlan_tx_tag_present(head))
481 			len += VLAN_HLEN;
482 	}
483 
484 	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
485 	return len;
486 }
487 
488 /* This is a multi-buffer version of vhost_get_desc, that works if
489  *	vq has read descriptors only.
490  * @vq		- the relevant virtqueue
491  * @datalen	- data length we'll be reading
492  * @iovcount	- returned count of io vectors we fill
493  * @log		- vhost log
494  * @log_num	- log offset
495  * @quota       - headcount quota, 1 for big buffer
496  *	returns number of buffer heads allocated, negative on error
497  */
498 static int get_rx_bufs(struct vhost_virtqueue *vq,
499 		       struct vring_used_elem *heads,
500 		       int datalen,
501 		       unsigned *iovcount,
502 		       struct vhost_log *log,
503 		       unsigned *log_num,
504 		       unsigned int quota)
505 {
506 	unsigned int out, in;
507 	int seg = 0;
508 	int headcount = 0;
509 	unsigned d;
510 	int r, nlogs = 0;
511 
512 	while (datalen > 0 && headcount < quota) {
513 		if (unlikely(seg >= UIO_MAXIOV)) {
514 			r = -ENOBUFS;
515 			goto err;
516 		}
517 		d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
518 				      ARRAY_SIZE(vq->iov) - seg, &out,
519 				      &in, log, log_num);
520 		if (d == vq->num) {
521 			r = 0;
522 			goto err;
523 		}
524 		if (unlikely(out || in <= 0)) {
525 			vq_err(vq, "unexpected descriptor format for RX: "
526 				"out %d, in %d\n", out, in);
527 			r = -EINVAL;
528 			goto err;
529 		}
530 		if (unlikely(log)) {
531 			nlogs += *log_num;
532 			log += *log_num;
533 		}
534 		heads[headcount].id = d;
535 		heads[headcount].len = iov_length(vq->iov + seg, in);
536 		datalen -= heads[headcount].len;
537 		++headcount;
538 		seg += in;
539 	}
540 	heads[headcount - 1].len += datalen;
541 	*iovcount = seg;
542 	if (unlikely(log))
543 		*log_num = nlogs;
544 	return headcount;
545 err:
546 	vhost_discard_vq_desc(vq, headcount);
547 	return r;
548 }
549 
550 /* Expects to be always run from workqueue - which acts as
551  * read-size critical section for our kind of RCU. */
552 static void handle_rx(struct vhost_net *net)
553 {
554 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
555 	struct vhost_virtqueue *vq = &nvq->vq;
556 	unsigned uninitialized_var(in), log;
557 	struct vhost_log *vq_log;
558 	struct msghdr msg = {
559 		.msg_name = NULL,
560 		.msg_namelen = 0,
561 		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
562 		.msg_controllen = 0,
563 		.msg_iov = vq->iov,
564 		.msg_flags = MSG_DONTWAIT,
565 	};
566 	struct virtio_net_hdr_mrg_rxbuf hdr = {
567 		.hdr.flags = 0,
568 		.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
569 	};
570 	size_t total_len = 0;
571 	int err, mergeable;
572 	s16 headcount;
573 	size_t vhost_hlen, sock_hlen;
574 	size_t vhost_len, sock_len;
575 	/* TODO: check that we are running from vhost_worker? */
576 	struct socket *sock = rcu_dereference_check(vq->private_data, 1);
577 
578 	if (!sock)
579 		return;
580 
581 	mutex_lock(&vq->mutex);
582 	vhost_disable_notify(&net->dev, vq);
583 	vhost_hlen = nvq->vhost_hlen;
584 	sock_hlen = nvq->sock_hlen;
585 
586 	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
587 		vq->log : NULL;
588 	mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
589 
590 	while ((sock_len = peek_head_len(sock->sk))) {
591 		sock_len += sock_hlen;
592 		vhost_len = sock_len + vhost_hlen;
593 		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
594 					&in, vq_log, &log,
595 					likely(mergeable) ? UIO_MAXIOV : 1);
596 		/* On error, stop handling until the next kick. */
597 		if (unlikely(headcount < 0))
598 			break;
599 		/* OK, now we need to know about added descriptors. */
600 		if (!headcount) {
601 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
602 				/* They have slipped one in as we were
603 				 * doing that: check again. */
604 				vhost_disable_notify(&net->dev, vq);
605 				continue;
606 			}
607 			/* Nothing new?  Wait for eventfd to tell us
608 			 * they refilled. */
609 			break;
610 		}
611 		/* We don't need to be notified again. */
612 		if (unlikely((vhost_hlen)))
613 			/* Skip header. TODO: support TSO. */
614 			move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
615 		else
616 			/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
617 			 * needed because recvmsg can modify msg_iov. */
618 			copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
619 		msg.msg_iovlen = in;
620 		err = sock->ops->recvmsg(NULL, sock, &msg,
621 					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
622 		/* Userspace might have consumed the packet meanwhile:
623 		 * it's not supposed to do this usually, but might be hard
624 		 * to prevent. Discard data we got (if any) and keep going. */
625 		if (unlikely(err != sock_len)) {
626 			pr_debug("Discarded rx packet: "
627 				 " len %d, expected %zd\n", err, sock_len);
628 			vhost_discard_vq_desc(vq, headcount);
629 			continue;
630 		}
631 		if (unlikely(vhost_hlen) &&
632 		    memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
633 				      vhost_hlen)) {
634 			vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
635 			       vq->iov->iov_base);
636 			break;
637 		}
638 		/* TODO: Should check and handle checksum. */
639 		if (likely(mergeable) &&
640 		    memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
641 				      offsetof(typeof(hdr), num_buffers),
642 				      sizeof hdr.num_buffers)) {
643 			vq_err(vq, "Failed num_buffers write");
644 			vhost_discard_vq_desc(vq, headcount);
645 			break;
646 		}
647 		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
648 					    headcount);
649 		if (unlikely(vq_log))
650 			vhost_log_write(vq, vq_log, log, vhost_len);
651 		total_len += vhost_len;
652 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
653 			vhost_poll_queue(&vq->poll);
654 			break;
655 		}
656 	}
657 
658 	mutex_unlock(&vq->mutex);
659 }
660 
661 static void handle_tx_kick(struct vhost_work *work)
662 {
663 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
664 						  poll.work);
665 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
666 
667 	handle_tx(net);
668 }
669 
670 static void handle_rx_kick(struct vhost_work *work)
671 {
672 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
673 						  poll.work);
674 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
675 
676 	handle_rx(net);
677 }
678 
679 static void handle_tx_net(struct vhost_work *work)
680 {
681 	struct vhost_net *net = container_of(work, struct vhost_net,
682 					     poll[VHOST_NET_VQ_TX].work);
683 	handle_tx(net);
684 }
685 
686 static void handle_rx_net(struct vhost_work *work)
687 {
688 	struct vhost_net *net = container_of(work, struct vhost_net,
689 					     poll[VHOST_NET_VQ_RX].work);
690 	handle_rx(net);
691 }
692 
693 static int vhost_net_open(struct inode *inode, struct file *f)
694 {
695 	struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
696 	struct vhost_dev *dev;
697 	struct vhost_virtqueue **vqs;
698 	int r, i;
699 
700 	if (!n)
701 		return -ENOMEM;
702 	vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
703 	if (!vqs) {
704 		kfree(n);
705 		return -ENOMEM;
706 	}
707 
708 	dev = &n->dev;
709 	vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
710 	vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
711 	n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
712 	n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
713 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
714 		n->vqs[i].ubufs = NULL;
715 		n->vqs[i].ubuf_info = NULL;
716 		n->vqs[i].upend_idx = 0;
717 		n->vqs[i].done_idx = 0;
718 		n->vqs[i].vhost_hlen = 0;
719 		n->vqs[i].sock_hlen = 0;
720 	}
721 	r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
722 	if (r < 0) {
723 		kfree(n);
724 		kfree(vqs);
725 		return r;
726 	}
727 
728 	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
729 	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
730 
731 	f->private_data = n;
732 
733 	return 0;
734 }
735 
736 static void vhost_net_disable_vq(struct vhost_net *n,
737 				 struct vhost_virtqueue *vq)
738 {
739 	struct vhost_net_virtqueue *nvq =
740 		container_of(vq, struct vhost_net_virtqueue, vq);
741 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
742 	if (!vq->private_data)
743 		return;
744 	vhost_poll_stop(poll);
745 }
746 
747 static int vhost_net_enable_vq(struct vhost_net *n,
748 				struct vhost_virtqueue *vq)
749 {
750 	struct vhost_net_virtqueue *nvq =
751 		container_of(vq, struct vhost_net_virtqueue, vq);
752 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
753 	struct socket *sock;
754 
755 	sock = rcu_dereference_protected(vq->private_data,
756 					 lockdep_is_held(&vq->mutex));
757 	if (!sock)
758 		return 0;
759 
760 	return vhost_poll_start(poll, sock->file);
761 }
762 
763 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
764 					struct vhost_virtqueue *vq)
765 {
766 	struct socket *sock;
767 
768 	mutex_lock(&vq->mutex);
769 	sock = rcu_dereference_protected(vq->private_data,
770 					 lockdep_is_held(&vq->mutex));
771 	vhost_net_disable_vq(n, vq);
772 	rcu_assign_pointer(vq->private_data, NULL);
773 	mutex_unlock(&vq->mutex);
774 	return sock;
775 }
776 
777 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
778 			   struct socket **rx_sock)
779 {
780 	*tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
781 	*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
782 }
783 
784 static void vhost_net_flush_vq(struct vhost_net *n, int index)
785 {
786 	vhost_poll_flush(n->poll + index);
787 	vhost_poll_flush(&n->vqs[index].vq.poll);
788 }
789 
790 static void vhost_net_flush(struct vhost_net *n)
791 {
792 	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
793 	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
794 	if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
795 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
796 		n->tx_flush = true;
797 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
798 		/* Wait for all lower device DMAs done. */
799 		vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
800 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
801 		n->tx_flush = false;
802 		kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
803 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
804 	}
805 }
806 
807 static int vhost_net_release(struct inode *inode, struct file *f)
808 {
809 	struct vhost_net *n = f->private_data;
810 	struct socket *tx_sock;
811 	struct socket *rx_sock;
812 
813 	vhost_net_stop(n, &tx_sock, &rx_sock);
814 	vhost_net_flush(n);
815 	vhost_dev_stop(&n->dev);
816 	vhost_dev_cleanup(&n->dev, false);
817 	vhost_net_vq_reset(n);
818 	if (tx_sock)
819 		fput(tx_sock->file);
820 	if (rx_sock)
821 		fput(rx_sock->file);
822 	/* We do an extra flush before freeing memory,
823 	 * since jobs can re-queue themselves. */
824 	vhost_net_flush(n);
825 	kfree(n->dev.vqs);
826 	kfree(n);
827 	return 0;
828 }
829 
830 static struct socket *get_raw_socket(int fd)
831 {
832 	struct {
833 		struct sockaddr_ll sa;
834 		char  buf[MAX_ADDR_LEN];
835 	} uaddr;
836 	int uaddr_len = sizeof uaddr, r;
837 	struct socket *sock = sockfd_lookup(fd, &r);
838 
839 	if (!sock)
840 		return ERR_PTR(-ENOTSOCK);
841 
842 	/* Parameter checking */
843 	if (sock->sk->sk_type != SOCK_RAW) {
844 		r = -ESOCKTNOSUPPORT;
845 		goto err;
846 	}
847 
848 	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
849 			       &uaddr_len, 0);
850 	if (r)
851 		goto err;
852 
853 	if (uaddr.sa.sll_family != AF_PACKET) {
854 		r = -EPFNOSUPPORT;
855 		goto err;
856 	}
857 	return sock;
858 err:
859 	fput(sock->file);
860 	return ERR_PTR(r);
861 }
862 
863 static struct socket *get_tap_socket(int fd)
864 {
865 	struct file *file = fget(fd);
866 	struct socket *sock;
867 
868 	if (!file)
869 		return ERR_PTR(-EBADF);
870 	sock = tun_get_socket(file);
871 	if (!IS_ERR(sock))
872 		return sock;
873 	sock = macvtap_get_socket(file);
874 	if (IS_ERR(sock))
875 		fput(file);
876 	return sock;
877 }
878 
879 static struct socket *get_socket(int fd)
880 {
881 	struct socket *sock;
882 
883 	/* special case to disable backend */
884 	if (fd == -1)
885 		return NULL;
886 	sock = get_raw_socket(fd);
887 	if (!IS_ERR(sock))
888 		return sock;
889 	sock = get_tap_socket(fd);
890 	if (!IS_ERR(sock))
891 		return sock;
892 	return ERR_PTR(-ENOTSOCK);
893 }
894 
895 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
896 {
897 	struct socket *sock, *oldsock;
898 	struct vhost_virtqueue *vq;
899 	struct vhost_net_virtqueue *nvq;
900 	struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
901 	int r;
902 
903 	mutex_lock(&n->dev.mutex);
904 	r = vhost_dev_check_owner(&n->dev);
905 	if (r)
906 		goto err;
907 
908 	if (index >= VHOST_NET_VQ_MAX) {
909 		r = -ENOBUFS;
910 		goto err;
911 	}
912 	vq = &n->vqs[index].vq;
913 	nvq = &n->vqs[index];
914 	mutex_lock(&vq->mutex);
915 
916 	/* Verify that ring has been setup correctly. */
917 	if (!vhost_vq_access_ok(vq)) {
918 		r = -EFAULT;
919 		goto err_vq;
920 	}
921 	sock = get_socket(fd);
922 	if (IS_ERR(sock)) {
923 		r = PTR_ERR(sock);
924 		goto err_vq;
925 	}
926 
927 	/* start polling new socket */
928 	oldsock = rcu_dereference_protected(vq->private_data,
929 					    lockdep_is_held(&vq->mutex));
930 	if (sock != oldsock) {
931 		ubufs = vhost_net_ubuf_alloc(vq,
932 					     sock && vhost_sock_zcopy(sock));
933 		if (IS_ERR(ubufs)) {
934 			r = PTR_ERR(ubufs);
935 			goto err_ubufs;
936 		}
937 
938 		vhost_net_disable_vq(n, vq);
939 		rcu_assign_pointer(vq->private_data, sock);
940 		r = vhost_init_used(vq);
941 		if (r)
942 			goto err_used;
943 		r = vhost_net_enable_vq(n, vq);
944 		if (r)
945 			goto err_used;
946 
947 		oldubufs = nvq->ubufs;
948 		nvq->ubufs = ubufs;
949 
950 		n->tx_packets = 0;
951 		n->tx_zcopy_err = 0;
952 		n->tx_flush = false;
953 	}
954 
955 	mutex_unlock(&vq->mutex);
956 
957 	if (oldubufs) {
958 		vhost_net_ubuf_put_and_wait(oldubufs);
959 		mutex_lock(&vq->mutex);
960 		vhost_zerocopy_signal_used(n, vq);
961 		mutex_unlock(&vq->mutex);
962 	}
963 
964 	if (oldsock) {
965 		vhost_net_flush_vq(n, index);
966 		fput(oldsock->file);
967 	}
968 
969 	mutex_unlock(&n->dev.mutex);
970 	return 0;
971 
972 err_used:
973 	rcu_assign_pointer(vq->private_data, oldsock);
974 	vhost_net_enable_vq(n, vq);
975 	if (ubufs)
976 		vhost_net_ubuf_put_and_wait(ubufs);
977 err_ubufs:
978 	fput(sock->file);
979 err_vq:
980 	mutex_unlock(&vq->mutex);
981 err:
982 	mutex_unlock(&n->dev.mutex);
983 	return r;
984 }
985 
986 static long vhost_net_reset_owner(struct vhost_net *n)
987 {
988 	struct socket *tx_sock = NULL;
989 	struct socket *rx_sock = NULL;
990 	long err;
991 	struct vhost_memory *memory;
992 
993 	mutex_lock(&n->dev.mutex);
994 	err = vhost_dev_check_owner(&n->dev);
995 	if (err)
996 		goto done;
997 	memory = vhost_dev_reset_owner_prepare();
998 	if (!memory) {
999 		err = -ENOMEM;
1000 		goto done;
1001 	}
1002 	vhost_net_stop(n, &tx_sock, &rx_sock);
1003 	vhost_net_flush(n);
1004 	vhost_dev_reset_owner(&n->dev, memory);
1005 	vhost_net_vq_reset(n);
1006 done:
1007 	mutex_unlock(&n->dev.mutex);
1008 	if (tx_sock)
1009 		fput(tx_sock->file);
1010 	if (rx_sock)
1011 		fput(rx_sock->file);
1012 	return err;
1013 }
1014 
1015 static int vhost_net_set_features(struct vhost_net *n, u64 features)
1016 {
1017 	size_t vhost_hlen, sock_hlen, hdr_len;
1018 	int i;
1019 
1020 	hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
1021 			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1022 			sizeof(struct virtio_net_hdr);
1023 	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
1024 		/* vhost provides vnet_hdr */
1025 		vhost_hlen = hdr_len;
1026 		sock_hlen = 0;
1027 	} else {
1028 		/* socket provides vnet_hdr */
1029 		vhost_hlen = 0;
1030 		sock_hlen = hdr_len;
1031 	}
1032 	mutex_lock(&n->dev.mutex);
1033 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1034 	    !vhost_log_access_ok(&n->dev)) {
1035 		mutex_unlock(&n->dev.mutex);
1036 		return -EFAULT;
1037 	}
1038 	n->dev.acked_features = features;
1039 	smp_wmb();
1040 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1041 		mutex_lock(&n->vqs[i].vq.mutex);
1042 		n->vqs[i].vhost_hlen = vhost_hlen;
1043 		n->vqs[i].sock_hlen = sock_hlen;
1044 		mutex_unlock(&n->vqs[i].vq.mutex);
1045 	}
1046 	vhost_net_flush(n);
1047 	mutex_unlock(&n->dev.mutex);
1048 	return 0;
1049 }
1050 
1051 static long vhost_net_set_owner(struct vhost_net *n)
1052 {
1053 	int r;
1054 
1055 	mutex_lock(&n->dev.mutex);
1056 	r = vhost_net_set_ubuf_info(n);
1057 	if (r)
1058 		goto out;
1059 	r = vhost_dev_set_owner(&n->dev);
1060 	if (r)
1061 		vhost_net_clear_ubuf_info(n);
1062 	vhost_net_flush(n);
1063 out:
1064 	mutex_unlock(&n->dev.mutex);
1065 	return r;
1066 }
1067 
1068 static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
1069 			    unsigned long arg)
1070 {
1071 	struct vhost_net *n = f->private_data;
1072 	void __user *argp = (void __user *)arg;
1073 	u64 __user *featurep = argp;
1074 	struct vhost_vring_file backend;
1075 	u64 features;
1076 	int r;
1077 
1078 	switch (ioctl) {
1079 	case VHOST_NET_SET_BACKEND:
1080 		if (copy_from_user(&backend, argp, sizeof backend))
1081 			return -EFAULT;
1082 		return vhost_net_set_backend(n, backend.index, backend.fd);
1083 	case VHOST_GET_FEATURES:
1084 		features = VHOST_NET_FEATURES;
1085 		if (copy_to_user(featurep, &features, sizeof features))
1086 			return -EFAULT;
1087 		return 0;
1088 	case VHOST_SET_FEATURES:
1089 		if (copy_from_user(&features, featurep, sizeof features))
1090 			return -EFAULT;
1091 		if (features & ~VHOST_NET_FEATURES)
1092 			return -EOPNOTSUPP;
1093 		return vhost_net_set_features(n, features);
1094 	case VHOST_RESET_OWNER:
1095 		return vhost_net_reset_owner(n);
1096 	case VHOST_SET_OWNER:
1097 		return vhost_net_set_owner(n);
1098 	default:
1099 		mutex_lock(&n->dev.mutex);
1100 		r = vhost_dev_ioctl(&n->dev, ioctl, argp);
1101 		if (r == -ENOIOCTLCMD)
1102 			r = vhost_vring_ioctl(&n->dev, ioctl, argp);
1103 		else
1104 			vhost_net_flush(n);
1105 		mutex_unlock(&n->dev.mutex);
1106 		return r;
1107 	}
1108 }
1109 
1110 #ifdef CONFIG_COMPAT
1111 static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
1112 				   unsigned long arg)
1113 {
1114 	return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1115 }
1116 #endif
1117 
1118 static const struct file_operations vhost_net_fops = {
1119 	.owner          = THIS_MODULE,
1120 	.release        = vhost_net_release,
1121 	.unlocked_ioctl = vhost_net_ioctl,
1122 #ifdef CONFIG_COMPAT
1123 	.compat_ioctl   = vhost_net_compat_ioctl,
1124 #endif
1125 	.open           = vhost_net_open,
1126 	.llseek		= noop_llseek,
1127 };
1128 
1129 static struct miscdevice vhost_net_misc = {
1130 	.minor = VHOST_NET_MINOR,
1131 	.name = "vhost-net",
1132 	.fops = &vhost_net_fops,
1133 };
1134 
1135 static int vhost_net_init(void)
1136 {
1137 	if (experimental_zcopytx)
1138 		vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1139 	return misc_register(&vhost_net_misc);
1140 }
1141 module_init(vhost_net_init);
1142 
1143 static void vhost_net_exit(void)
1144 {
1145 	misc_deregister(&vhost_net_misc);
1146 }
1147 module_exit(vhost_net_exit);
1148 
1149 MODULE_VERSION("0.0.1");
1150 MODULE_LICENSE("GPL v2");
1151 MODULE_AUTHOR("Michael S. Tsirkin");
1152 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1153 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
1154 MODULE_ALIAS("devname:vhost-net");
1155