xref: /openbmc/linux/drivers/vhost/net.c (revision 63dc02bd)
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Author: Michael S. Tsirkin <mst@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * virtio-net server in host kernel.
7  */
8 
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/virtio_net.h>
13 #include <linux/miscdevice.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/workqueue.h>
18 #include <linux/rcupdate.h>
19 #include <linux/file.h>
20 #include <linux/slab.h>
21 
22 #include <linux/net.h>
23 #include <linux/if_packet.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_tun.h>
26 #include <linux/if_macvlan.h>
27 #include <linux/if_vlan.h>
28 
29 #include <net/sock.h>
30 
31 #include "vhost.h"
32 
33 static int experimental_zcopytx;
34 module_param(experimental_zcopytx, int, 0444);
35 MODULE_PARM_DESC(experimental_zcopytx, "Enable Experimental Zero Copy TX");
36 
37 /* Max number of bytes transferred before requeueing the job.
38  * Using this limit prevents one virtqueue from starving others. */
39 #define VHOST_NET_WEIGHT 0x80000
40 
41 /* MAX number of TX used buffers for outstanding zerocopy */
42 #define VHOST_MAX_PEND 128
43 #define VHOST_GOODCOPY_LEN 256
44 
45 enum {
46 	VHOST_NET_VQ_RX = 0,
47 	VHOST_NET_VQ_TX = 1,
48 	VHOST_NET_VQ_MAX = 2,
49 };
50 
51 enum vhost_net_poll_state {
52 	VHOST_NET_POLL_DISABLED = 0,
53 	VHOST_NET_POLL_STARTED = 1,
54 	VHOST_NET_POLL_STOPPED = 2,
55 };
56 
57 struct vhost_net {
58 	struct vhost_dev dev;
59 	struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
60 	struct vhost_poll poll[VHOST_NET_VQ_MAX];
61 	/* Tells us whether we are polling a socket for TX.
62 	 * We only do this when socket buffer fills up.
63 	 * Protected by tx vq lock. */
64 	enum vhost_net_poll_state tx_poll_state;
65 };
66 
67 static bool vhost_sock_zcopy(struct socket *sock)
68 {
69 	return unlikely(experimental_zcopytx) &&
70 		sock_flag(sock->sk, SOCK_ZEROCOPY);
71 }
72 
73 /* Pop first len bytes from iovec. Return number of segments used. */
74 static int move_iovec_hdr(struct iovec *from, struct iovec *to,
75 			  size_t len, int iov_count)
76 {
77 	int seg = 0;
78 	size_t size;
79 
80 	while (len && seg < iov_count) {
81 		size = min(from->iov_len, len);
82 		to->iov_base = from->iov_base;
83 		to->iov_len = size;
84 		from->iov_len -= size;
85 		from->iov_base += size;
86 		len -= size;
87 		++from;
88 		++to;
89 		++seg;
90 	}
91 	return seg;
92 }
93 /* Copy iovec entries for len bytes from iovec. */
94 static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
95 			   size_t len, int iovcount)
96 {
97 	int seg = 0;
98 	size_t size;
99 
100 	while (len && seg < iovcount) {
101 		size = min(from->iov_len, len);
102 		to->iov_base = from->iov_base;
103 		to->iov_len = size;
104 		len -= size;
105 		++from;
106 		++to;
107 		++seg;
108 	}
109 }
110 
111 /* Caller must have TX VQ lock */
112 static void tx_poll_stop(struct vhost_net *net)
113 {
114 	if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
115 		return;
116 	vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
117 	net->tx_poll_state = VHOST_NET_POLL_STOPPED;
118 }
119 
120 /* Caller must have TX VQ lock */
121 static void tx_poll_start(struct vhost_net *net, struct socket *sock)
122 {
123 	if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
124 		return;
125 	vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
126 	net->tx_poll_state = VHOST_NET_POLL_STARTED;
127 }
128 
129 /* Expects to be always run from workqueue - which acts as
130  * read-size critical section for our kind of RCU. */
131 static void handle_tx(struct vhost_net *net)
132 {
133 	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
134 	unsigned out, in, s;
135 	int head;
136 	struct msghdr msg = {
137 		.msg_name = NULL,
138 		.msg_namelen = 0,
139 		.msg_control = NULL,
140 		.msg_controllen = 0,
141 		.msg_iov = vq->iov,
142 		.msg_flags = MSG_DONTWAIT,
143 	};
144 	size_t len, total_len = 0;
145 	int err, wmem;
146 	size_t hdr_size;
147 	struct socket *sock;
148 	struct vhost_ubuf_ref *uninitialized_var(ubufs);
149 	bool zcopy;
150 
151 	/* TODO: check that we are running from vhost_worker? */
152 	sock = rcu_dereference_check(vq->private_data, 1);
153 	if (!sock)
154 		return;
155 
156 	wmem = atomic_read(&sock->sk->sk_wmem_alloc);
157 	if (wmem >= sock->sk->sk_sndbuf) {
158 		mutex_lock(&vq->mutex);
159 		tx_poll_start(net, sock);
160 		mutex_unlock(&vq->mutex);
161 		return;
162 	}
163 
164 	mutex_lock(&vq->mutex);
165 	vhost_disable_notify(&net->dev, vq);
166 
167 	if (wmem < sock->sk->sk_sndbuf / 2)
168 		tx_poll_stop(net);
169 	hdr_size = vq->vhost_hlen;
170 	zcopy = vhost_sock_zcopy(sock);
171 
172 	for (;;) {
173 		/* Release DMAs done buffers first */
174 		if (zcopy)
175 			vhost_zerocopy_signal_used(vq);
176 
177 		head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
178 					 ARRAY_SIZE(vq->iov),
179 					 &out, &in,
180 					 NULL, NULL);
181 		/* On error, stop handling until the next kick. */
182 		if (unlikely(head < 0))
183 			break;
184 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
185 		if (head == vq->num) {
186 			int num_pends;
187 
188 			wmem = atomic_read(&sock->sk->sk_wmem_alloc);
189 			if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
190 				tx_poll_start(net, sock);
191 				set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
192 				break;
193 			}
194 			/* If more outstanding DMAs, queue the work.
195 			 * Handle upend_idx wrap around
196 			 */
197 			num_pends = likely(vq->upend_idx >= vq->done_idx) ?
198 				    (vq->upend_idx - vq->done_idx) :
199 				    (vq->upend_idx + UIO_MAXIOV - vq->done_idx);
200 			if (unlikely(num_pends > VHOST_MAX_PEND)) {
201 				tx_poll_start(net, sock);
202 				set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
203 				break;
204 			}
205 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
206 				vhost_disable_notify(&net->dev, vq);
207 				continue;
208 			}
209 			break;
210 		}
211 		if (in) {
212 			vq_err(vq, "Unexpected descriptor format for TX: "
213 			       "out %d, int %d\n", out, in);
214 			break;
215 		}
216 		/* Skip header. TODO: support TSO. */
217 		s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
218 		msg.msg_iovlen = out;
219 		len = iov_length(vq->iov, out);
220 		/* Sanity check */
221 		if (!len) {
222 			vq_err(vq, "Unexpected header len for TX: "
223 			       "%zd expected %zd\n",
224 			       iov_length(vq->hdr, s), hdr_size);
225 			break;
226 		}
227 		/* use msg_control to pass vhost zerocopy ubuf info to skb */
228 		if (zcopy) {
229 			vq->heads[vq->upend_idx].id = head;
230 			if (len < VHOST_GOODCOPY_LEN) {
231 				/* copy don't need to wait for DMA done */
232 				vq->heads[vq->upend_idx].len =
233 							VHOST_DMA_DONE_LEN;
234 				msg.msg_control = NULL;
235 				msg.msg_controllen = 0;
236 				ubufs = NULL;
237 			} else {
238 				struct ubuf_info *ubuf = &vq->ubuf_info[head];
239 
240 				vq->heads[vq->upend_idx].len = len;
241 				ubuf->callback = vhost_zerocopy_callback;
242 				ubuf->ctx = vq->ubufs;
243 				ubuf->desc = vq->upend_idx;
244 				msg.msg_control = ubuf;
245 				msg.msg_controllen = sizeof(ubuf);
246 				ubufs = vq->ubufs;
247 				kref_get(&ubufs->kref);
248 			}
249 			vq->upend_idx = (vq->upend_idx + 1) % UIO_MAXIOV;
250 		}
251 		/* TODO: Check specific error and bomb out unless ENOBUFS? */
252 		err = sock->ops->sendmsg(NULL, sock, &msg, len);
253 		if (unlikely(err < 0)) {
254 			if (zcopy) {
255 				if (ubufs)
256 					vhost_ubuf_put(ubufs);
257 				vq->upend_idx = ((unsigned)vq->upend_idx - 1) %
258 					UIO_MAXIOV;
259 			}
260 			vhost_discard_vq_desc(vq, 1);
261 			tx_poll_start(net, sock);
262 			break;
263 		}
264 		if (err != len)
265 			pr_debug("Truncated TX packet: "
266 				 " len %d != %zd\n", err, len);
267 		if (!zcopy)
268 			vhost_add_used_and_signal(&net->dev, vq, head, 0);
269 		total_len += len;
270 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
271 			vhost_poll_queue(&vq->poll);
272 			break;
273 		}
274 	}
275 
276 	mutex_unlock(&vq->mutex);
277 }
278 
279 static int peek_head_len(struct sock *sk)
280 {
281 	struct sk_buff *head;
282 	int len = 0;
283 	unsigned long flags;
284 
285 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
286 	head = skb_peek(&sk->sk_receive_queue);
287 	if (likely(head)) {
288 		len = head->len;
289 		if (vlan_tx_tag_present(head))
290 			len += VLAN_HLEN;
291 	}
292 
293 	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
294 	return len;
295 }
296 
297 /* This is a multi-buffer version of vhost_get_desc, that works if
298  *	vq has read descriptors only.
299  * @vq		- the relevant virtqueue
300  * @datalen	- data length we'll be reading
301  * @iovcount	- returned count of io vectors we fill
302  * @log		- vhost log
303  * @log_num	- log offset
304  * @quota       - headcount quota, 1 for big buffer
305  *	returns number of buffer heads allocated, negative on error
306  */
307 static int get_rx_bufs(struct vhost_virtqueue *vq,
308 		       struct vring_used_elem *heads,
309 		       int datalen,
310 		       unsigned *iovcount,
311 		       struct vhost_log *log,
312 		       unsigned *log_num,
313 		       unsigned int quota)
314 {
315 	unsigned int out, in;
316 	int seg = 0;
317 	int headcount = 0;
318 	unsigned d;
319 	int r, nlogs = 0;
320 
321 	while (datalen > 0 && headcount < quota) {
322 		if (unlikely(seg >= UIO_MAXIOV)) {
323 			r = -ENOBUFS;
324 			goto err;
325 		}
326 		d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
327 				      ARRAY_SIZE(vq->iov) - seg, &out,
328 				      &in, log, log_num);
329 		if (d == vq->num) {
330 			r = 0;
331 			goto err;
332 		}
333 		if (unlikely(out || in <= 0)) {
334 			vq_err(vq, "unexpected descriptor format for RX: "
335 				"out %d, in %d\n", out, in);
336 			r = -EINVAL;
337 			goto err;
338 		}
339 		if (unlikely(log)) {
340 			nlogs += *log_num;
341 			log += *log_num;
342 		}
343 		heads[headcount].id = d;
344 		heads[headcount].len = iov_length(vq->iov + seg, in);
345 		datalen -= heads[headcount].len;
346 		++headcount;
347 		seg += in;
348 	}
349 	heads[headcount - 1].len += datalen;
350 	*iovcount = seg;
351 	if (unlikely(log))
352 		*log_num = nlogs;
353 	return headcount;
354 err:
355 	vhost_discard_vq_desc(vq, headcount);
356 	return r;
357 }
358 
359 /* Expects to be always run from workqueue - which acts as
360  * read-size critical section for our kind of RCU. */
361 static void handle_rx(struct vhost_net *net)
362 {
363 	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
364 	unsigned uninitialized_var(in), log;
365 	struct vhost_log *vq_log;
366 	struct msghdr msg = {
367 		.msg_name = NULL,
368 		.msg_namelen = 0,
369 		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
370 		.msg_controllen = 0,
371 		.msg_iov = vq->iov,
372 		.msg_flags = MSG_DONTWAIT,
373 	};
374 	struct virtio_net_hdr_mrg_rxbuf hdr = {
375 		.hdr.flags = 0,
376 		.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
377 	};
378 	size_t total_len = 0;
379 	int err, headcount, mergeable;
380 	size_t vhost_hlen, sock_hlen;
381 	size_t vhost_len, sock_len;
382 	/* TODO: check that we are running from vhost_worker? */
383 	struct socket *sock = rcu_dereference_check(vq->private_data, 1);
384 
385 	if (!sock)
386 		return;
387 
388 	mutex_lock(&vq->mutex);
389 	vhost_disable_notify(&net->dev, vq);
390 	vhost_hlen = vq->vhost_hlen;
391 	sock_hlen = vq->sock_hlen;
392 
393 	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
394 		vq->log : NULL;
395 	mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
396 
397 	while ((sock_len = peek_head_len(sock->sk))) {
398 		sock_len += sock_hlen;
399 		vhost_len = sock_len + vhost_hlen;
400 		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
401 					&in, vq_log, &log,
402 					likely(mergeable) ? UIO_MAXIOV : 1);
403 		/* On error, stop handling until the next kick. */
404 		if (unlikely(headcount < 0))
405 			break;
406 		/* OK, now we need to know about added descriptors. */
407 		if (!headcount) {
408 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
409 				/* They have slipped one in as we were
410 				 * doing that: check again. */
411 				vhost_disable_notify(&net->dev, vq);
412 				continue;
413 			}
414 			/* Nothing new?  Wait for eventfd to tell us
415 			 * they refilled. */
416 			break;
417 		}
418 		/* We don't need to be notified again. */
419 		if (unlikely((vhost_hlen)))
420 			/* Skip header. TODO: support TSO. */
421 			move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in);
422 		else
423 			/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
424 			 * needed because recvmsg can modify msg_iov. */
425 			copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in);
426 		msg.msg_iovlen = in;
427 		err = sock->ops->recvmsg(NULL, sock, &msg,
428 					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
429 		/* Userspace might have consumed the packet meanwhile:
430 		 * it's not supposed to do this usually, but might be hard
431 		 * to prevent. Discard data we got (if any) and keep going. */
432 		if (unlikely(err != sock_len)) {
433 			pr_debug("Discarded rx packet: "
434 				 " len %d, expected %zd\n", err, sock_len);
435 			vhost_discard_vq_desc(vq, headcount);
436 			continue;
437 		}
438 		if (unlikely(vhost_hlen) &&
439 		    memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0,
440 				      vhost_hlen)) {
441 			vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
442 			       vq->iov->iov_base);
443 			break;
444 		}
445 		/* TODO: Should check and handle checksum. */
446 		if (likely(mergeable) &&
447 		    memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
448 				      offsetof(typeof(hdr), num_buffers),
449 				      sizeof hdr.num_buffers)) {
450 			vq_err(vq, "Failed num_buffers write");
451 			vhost_discard_vq_desc(vq, headcount);
452 			break;
453 		}
454 		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
455 					    headcount);
456 		if (unlikely(vq_log))
457 			vhost_log_write(vq, vq_log, log, vhost_len);
458 		total_len += vhost_len;
459 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
460 			vhost_poll_queue(&vq->poll);
461 			break;
462 		}
463 	}
464 
465 	mutex_unlock(&vq->mutex);
466 }
467 
468 static void handle_tx_kick(struct vhost_work *work)
469 {
470 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
471 						  poll.work);
472 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
473 
474 	handle_tx(net);
475 }
476 
477 static void handle_rx_kick(struct vhost_work *work)
478 {
479 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
480 						  poll.work);
481 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
482 
483 	handle_rx(net);
484 }
485 
486 static void handle_tx_net(struct vhost_work *work)
487 {
488 	struct vhost_net *net = container_of(work, struct vhost_net,
489 					     poll[VHOST_NET_VQ_TX].work);
490 	handle_tx(net);
491 }
492 
493 static void handle_rx_net(struct vhost_work *work)
494 {
495 	struct vhost_net *net = container_of(work, struct vhost_net,
496 					     poll[VHOST_NET_VQ_RX].work);
497 	handle_rx(net);
498 }
499 
500 static int vhost_net_open(struct inode *inode, struct file *f)
501 {
502 	struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
503 	struct vhost_dev *dev;
504 	int r;
505 
506 	if (!n)
507 		return -ENOMEM;
508 
509 	dev = &n->dev;
510 	n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
511 	n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
512 	r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
513 	if (r < 0) {
514 		kfree(n);
515 		return r;
516 	}
517 
518 	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
519 	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
520 	n->tx_poll_state = VHOST_NET_POLL_DISABLED;
521 
522 	f->private_data = n;
523 
524 	return 0;
525 }
526 
527 static void vhost_net_disable_vq(struct vhost_net *n,
528 				 struct vhost_virtqueue *vq)
529 {
530 	if (!vq->private_data)
531 		return;
532 	if (vq == n->vqs + VHOST_NET_VQ_TX) {
533 		tx_poll_stop(n);
534 		n->tx_poll_state = VHOST_NET_POLL_DISABLED;
535 	} else
536 		vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
537 }
538 
539 static void vhost_net_enable_vq(struct vhost_net *n,
540 				struct vhost_virtqueue *vq)
541 {
542 	struct socket *sock;
543 
544 	sock = rcu_dereference_protected(vq->private_data,
545 					 lockdep_is_held(&vq->mutex));
546 	if (!sock)
547 		return;
548 	if (vq == n->vqs + VHOST_NET_VQ_TX) {
549 		n->tx_poll_state = VHOST_NET_POLL_STOPPED;
550 		tx_poll_start(n, sock);
551 	} else
552 		vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
553 }
554 
555 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
556 					struct vhost_virtqueue *vq)
557 {
558 	struct socket *sock;
559 
560 	mutex_lock(&vq->mutex);
561 	sock = rcu_dereference_protected(vq->private_data,
562 					 lockdep_is_held(&vq->mutex));
563 	vhost_net_disable_vq(n, vq);
564 	rcu_assign_pointer(vq->private_data, NULL);
565 	mutex_unlock(&vq->mutex);
566 	return sock;
567 }
568 
569 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
570 			   struct socket **rx_sock)
571 {
572 	*tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
573 	*rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
574 }
575 
576 static void vhost_net_flush_vq(struct vhost_net *n, int index)
577 {
578 	vhost_poll_flush(n->poll + index);
579 	vhost_poll_flush(&n->dev.vqs[index].poll);
580 }
581 
582 static void vhost_net_flush(struct vhost_net *n)
583 {
584 	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
585 	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
586 }
587 
588 static int vhost_net_release(struct inode *inode, struct file *f)
589 {
590 	struct vhost_net *n = f->private_data;
591 	struct socket *tx_sock;
592 	struct socket *rx_sock;
593 
594 	vhost_net_stop(n, &tx_sock, &rx_sock);
595 	vhost_net_flush(n);
596 	vhost_dev_cleanup(&n->dev, false);
597 	if (tx_sock)
598 		fput(tx_sock->file);
599 	if (rx_sock)
600 		fput(rx_sock->file);
601 	/* We do an extra flush before freeing memory,
602 	 * since jobs can re-queue themselves. */
603 	vhost_net_flush(n);
604 	kfree(n);
605 	return 0;
606 }
607 
608 static struct socket *get_raw_socket(int fd)
609 {
610 	struct {
611 		struct sockaddr_ll sa;
612 		char  buf[MAX_ADDR_LEN];
613 	} uaddr;
614 	int uaddr_len = sizeof uaddr, r;
615 	struct socket *sock = sockfd_lookup(fd, &r);
616 
617 	if (!sock)
618 		return ERR_PTR(-ENOTSOCK);
619 
620 	/* Parameter checking */
621 	if (sock->sk->sk_type != SOCK_RAW) {
622 		r = -ESOCKTNOSUPPORT;
623 		goto err;
624 	}
625 
626 	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
627 			       &uaddr_len, 0);
628 	if (r)
629 		goto err;
630 
631 	if (uaddr.sa.sll_family != AF_PACKET) {
632 		r = -EPFNOSUPPORT;
633 		goto err;
634 	}
635 	return sock;
636 err:
637 	fput(sock->file);
638 	return ERR_PTR(r);
639 }
640 
641 static struct socket *get_tap_socket(int fd)
642 {
643 	struct file *file = fget(fd);
644 	struct socket *sock;
645 
646 	if (!file)
647 		return ERR_PTR(-EBADF);
648 	sock = tun_get_socket(file);
649 	if (!IS_ERR(sock))
650 		return sock;
651 	sock = macvtap_get_socket(file);
652 	if (IS_ERR(sock))
653 		fput(file);
654 	return sock;
655 }
656 
657 static struct socket *get_socket(int fd)
658 {
659 	struct socket *sock;
660 
661 	/* special case to disable backend */
662 	if (fd == -1)
663 		return NULL;
664 	sock = get_raw_socket(fd);
665 	if (!IS_ERR(sock))
666 		return sock;
667 	sock = get_tap_socket(fd);
668 	if (!IS_ERR(sock))
669 		return sock;
670 	return ERR_PTR(-ENOTSOCK);
671 }
672 
673 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
674 {
675 	struct socket *sock, *oldsock;
676 	struct vhost_virtqueue *vq;
677 	struct vhost_ubuf_ref *ubufs, *oldubufs = NULL;
678 	int r;
679 
680 	mutex_lock(&n->dev.mutex);
681 	r = vhost_dev_check_owner(&n->dev);
682 	if (r)
683 		goto err;
684 
685 	if (index >= VHOST_NET_VQ_MAX) {
686 		r = -ENOBUFS;
687 		goto err;
688 	}
689 	vq = n->vqs + index;
690 	mutex_lock(&vq->mutex);
691 
692 	/* Verify that ring has been setup correctly. */
693 	if (!vhost_vq_access_ok(vq)) {
694 		r = -EFAULT;
695 		goto err_vq;
696 	}
697 	sock = get_socket(fd);
698 	if (IS_ERR(sock)) {
699 		r = PTR_ERR(sock);
700 		goto err_vq;
701 	}
702 
703 	/* start polling new socket */
704 	oldsock = rcu_dereference_protected(vq->private_data,
705 					    lockdep_is_held(&vq->mutex));
706 	if (sock != oldsock) {
707 		ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock));
708 		if (IS_ERR(ubufs)) {
709 			r = PTR_ERR(ubufs);
710 			goto err_ubufs;
711 		}
712 		oldubufs = vq->ubufs;
713 		vq->ubufs = ubufs;
714 		vhost_net_disable_vq(n, vq);
715 		rcu_assign_pointer(vq->private_data, sock);
716 		vhost_net_enable_vq(n, vq);
717 
718 		r = vhost_init_used(vq);
719 		if (r)
720 			goto err_vq;
721 	}
722 
723 	mutex_unlock(&vq->mutex);
724 
725 	if (oldubufs) {
726 		vhost_ubuf_put_and_wait(oldubufs);
727 		mutex_lock(&vq->mutex);
728 		vhost_zerocopy_signal_used(vq);
729 		mutex_unlock(&vq->mutex);
730 	}
731 
732 	if (oldsock) {
733 		vhost_net_flush_vq(n, index);
734 		fput(oldsock->file);
735 	}
736 
737 	mutex_unlock(&n->dev.mutex);
738 	return 0;
739 
740 err_ubufs:
741 	fput(sock->file);
742 err_vq:
743 	mutex_unlock(&vq->mutex);
744 err:
745 	mutex_unlock(&n->dev.mutex);
746 	return r;
747 }
748 
749 static long vhost_net_reset_owner(struct vhost_net *n)
750 {
751 	struct socket *tx_sock = NULL;
752 	struct socket *rx_sock = NULL;
753 	long err;
754 
755 	mutex_lock(&n->dev.mutex);
756 	err = vhost_dev_check_owner(&n->dev);
757 	if (err)
758 		goto done;
759 	vhost_net_stop(n, &tx_sock, &rx_sock);
760 	vhost_net_flush(n);
761 	err = vhost_dev_reset_owner(&n->dev);
762 done:
763 	mutex_unlock(&n->dev.mutex);
764 	if (tx_sock)
765 		fput(tx_sock->file);
766 	if (rx_sock)
767 		fput(rx_sock->file);
768 	return err;
769 }
770 
771 static int vhost_net_set_features(struct vhost_net *n, u64 features)
772 {
773 	size_t vhost_hlen, sock_hlen, hdr_len;
774 	int i;
775 
776 	hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
777 			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
778 			sizeof(struct virtio_net_hdr);
779 	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
780 		/* vhost provides vnet_hdr */
781 		vhost_hlen = hdr_len;
782 		sock_hlen = 0;
783 	} else {
784 		/* socket provides vnet_hdr */
785 		vhost_hlen = 0;
786 		sock_hlen = hdr_len;
787 	}
788 	mutex_lock(&n->dev.mutex);
789 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
790 	    !vhost_log_access_ok(&n->dev)) {
791 		mutex_unlock(&n->dev.mutex);
792 		return -EFAULT;
793 	}
794 	n->dev.acked_features = features;
795 	smp_wmb();
796 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
797 		mutex_lock(&n->vqs[i].mutex);
798 		n->vqs[i].vhost_hlen = vhost_hlen;
799 		n->vqs[i].sock_hlen = sock_hlen;
800 		mutex_unlock(&n->vqs[i].mutex);
801 	}
802 	vhost_net_flush(n);
803 	mutex_unlock(&n->dev.mutex);
804 	return 0;
805 }
806 
807 static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
808 			    unsigned long arg)
809 {
810 	struct vhost_net *n = f->private_data;
811 	void __user *argp = (void __user *)arg;
812 	u64 __user *featurep = argp;
813 	struct vhost_vring_file backend;
814 	u64 features;
815 	int r;
816 
817 	switch (ioctl) {
818 	case VHOST_NET_SET_BACKEND:
819 		if (copy_from_user(&backend, argp, sizeof backend))
820 			return -EFAULT;
821 		return vhost_net_set_backend(n, backend.index, backend.fd);
822 	case VHOST_GET_FEATURES:
823 		features = VHOST_FEATURES;
824 		if (copy_to_user(featurep, &features, sizeof features))
825 			return -EFAULT;
826 		return 0;
827 	case VHOST_SET_FEATURES:
828 		if (copy_from_user(&features, featurep, sizeof features))
829 			return -EFAULT;
830 		if (features & ~VHOST_FEATURES)
831 			return -EOPNOTSUPP;
832 		return vhost_net_set_features(n, features);
833 	case VHOST_RESET_OWNER:
834 		return vhost_net_reset_owner(n);
835 	default:
836 		mutex_lock(&n->dev.mutex);
837 		r = vhost_dev_ioctl(&n->dev, ioctl, arg);
838 		vhost_net_flush(n);
839 		mutex_unlock(&n->dev.mutex);
840 		return r;
841 	}
842 }
843 
844 #ifdef CONFIG_COMPAT
845 static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
846 				   unsigned long arg)
847 {
848 	return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
849 }
850 #endif
851 
852 static const struct file_operations vhost_net_fops = {
853 	.owner          = THIS_MODULE,
854 	.release        = vhost_net_release,
855 	.unlocked_ioctl = vhost_net_ioctl,
856 #ifdef CONFIG_COMPAT
857 	.compat_ioctl   = vhost_net_compat_ioctl,
858 #endif
859 	.open           = vhost_net_open,
860 	.llseek		= noop_llseek,
861 };
862 
863 static struct miscdevice vhost_net_misc = {
864 	.minor = VHOST_NET_MINOR,
865 	.name = "vhost-net",
866 	.fops = &vhost_net_fops,
867 };
868 
869 static int vhost_net_init(void)
870 {
871 	if (experimental_zcopytx)
872 		vhost_enable_zcopy(VHOST_NET_VQ_TX);
873 	return misc_register(&vhost_net_misc);
874 }
875 module_init(vhost_net_init);
876 
877 static void vhost_net_exit(void)
878 {
879 	misc_deregister(&vhost_net_misc);
880 }
881 module_exit(vhost_net_exit);
882 
883 MODULE_VERSION("0.0.1");
884 MODULE_LICENSE("GPL v2");
885 MODULE_AUTHOR("Michael S. Tsirkin");
886 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
887 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
888 MODULE_ALIAS("devname:vhost-net");
889