xref: /openbmc/linux/drivers/vhost/net.c (revision 95e9fd10)
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Author: Michael S. Tsirkin <mst@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * virtio-net server in host kernel.
7  */
8 
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/virtio_net.h>
13 #include <linux/miscdevice.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/workqueue.h>
18 #include <linux/rcupdate.h>
19 #include <linux/file.h>
20 #include <linux/slab.h>
21 
22 #include <linux/net.h>
23 #include <linux/if_packet.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_tun.h>
26 #include <linux/if_macvlan.h>
27 #include <linux/if_vlan.h>
28 
29 #include <net/sock.h>
30 
31 #include "vhost.h"
32 
33 static int experimental_zcopytx;
34 module_param(experimental_zcopytx, int, 0444);
35 MODULE_PARM_DESC(experimental_zcopytx, "Enable Experimental Zero Copy TX");
36 
37 /* Max number of bytes transferred before requeueing the job.
38  * Using this limit prevents one virtqueue from starving others. */
39 #define VHOST_NET_WEIGHT 0x80000
40 
41 /* MAX number of TX used buffers for outstanding zerocopy */
42 #define VHOST_MAX_PEND 128
43 #define VHOST_GOODCOPY_LEN 256
44 
45 enum {
46 	VHOST_NET_VQ_RX = 0,
47 	VHOST_NET_VQ_TX = 1,
48 	VHOST_NET_VQ_MAX = 2,
49 };
50 
51 enum vhost_net_poll_state {
52 	VHOST_NET_POLL_DISABLED = 0,
53 	VHOST_NET_POLL_STARTED = 1,
54 	VHOST_NET_POLL_STOPPED = 2,
55 };
56 
57 struct vhost_net {
58 	struct vhost_dev dev;
59 	struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
60 	struct vhost_poll poll[VHOST_NET_VQ_MAX];
61 	/* Tells us whether we are polling a socket for TX.
62 	 * We only do this when socket buffer fills up.
63 	 * Protected by tx vq lock. */
64 	enum vhost_net_poll_state tx_poll_state;
65 };
66 
67 static bool vhost_sock_zcopy(struct socket *sock)
68 {
69 	return unlikely(experimental_zcopytx) &&
70 		sock_flag(sock->sk, SOCK_ZEROCOPY);
71 }
72 
73 /* Pop first len bytes from iovec. Return number of segments used. */
74 static int move_iovec_hdr(struct iovec *from, struct iovec *to,
75 			  size_t len, int iov_count)
76 {
77 	int seg = 0;
78 	size_t size;
79 
80 	while (len && seg < iov_count) {
81 		size = min(from->iov_len, len);
82 		to->iov_base = from->iov_base;
83 		to->iov_len = size;
84 		from->iov_len -= size;
85 		from->iov_base += size;
86 		len -= size;
87 		++from;
88 		++to;
89 		++seg;
90 	}
91 	return seg;
92 }
93 /* Copy iovec entries for len bytes from iovec. */
94 static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
95 			   size_t len, int iovcount)
96 {
97 	int seg = 0;
98 	size_t size;
99 
100 	while (len && seg < iovcount) {
101 		size = min(from->iov_len, len);
102 		to->iov_base = from->iov_base;
103 		to->iov_len = size;
104 		len -= size;
105 		++from;
106 		++to;
107 		++seg;
108 	}
109 }
110 
111 /* Caller must have TX VQ lock */
112 static void tx_poll_stop(struct vhost_net *net)
113 {
114 	if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
115 		return;
116 	vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
117 	net->tx_poll_state = VHOST_NET_POLL_STOPPED;
118 }
119 
120 /* Caller must have TX VQ lock */
121 static void tx_poll_start(struct vhost_net *net, struct socket *sock)
122 {
123 	if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
124 		return;
125 	vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
126 	net->tx_poll_state = VHOST_NET_POLL_STARTED;
127 }
128 
129 /* Expects to be always run from workqueue - which acts as
130  * read-size critical section for our kind of RCU. */
131 static void handle_tx(struct vhost_net *net)
132 {
133 	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
134 	unsigned out, in, s;
135 	int head;
136 	struct msghdr msg = {
137 		.msg_name = NULL,
138 		.msg_namelen = 0,
139 		.msg_control = NULL,
140 		.msg_controllen = 0,
141 		.msg_iov = vq->iov,
142 		.msg_flags = MSG_DONTWAIT,
143 	};
144 	size_t len, total_len = 0;
145 	int err, wmem;
146 	size_t hdr_size;
147 	struct socket *sock;
148 	struct vhost_ubuf_ref *uninitialized_var(ubufs);
149 	bool zcopy;
150 
151 	/* TODO: check that we are running from vhost_worker? */
152 	sock = rcu_dereference_check(vq->private_data, 1);
153 	if (!sock)
154 		return;
155 
156 	wmem = atomic_read(&sock->sk->sk_wmem_alloc);
157 	if (wmem >= sock->sk->sk_sndbuf) {
158 		mutex_lock(&vq->mutex);
159 		tx_poll_start(net, sock);
160 		mutex_unlock(&vq->mutex);
161 		return;
162 	}
163 
164 	mutex_lock(&vq->mutex);
165 	vhost_disable_notify(&net->dev, vq);
166 
167 	if (wmem < sock->sk->sk_sndbuf / 2)
168 		tx_poll_stop(net);
169 	hdr_size = vq->vhost_hlen;
170 	zcopy = vq->ubufs;
171 
172 	for (;;) {
173 		/* Release DMAs done buffers first */
174 		if (zcopy)
175 			vhost_zerocopy_signal_used(vq);
176 
177 		head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
178 					 ARRAY_SIZE(vq->iov),
179 					 &out, &in,
180 					 NULL, NULL);
181 		/* On error, stop handling until the next kick. */
182 		if (unlikely(head < 0))
183 			break;
184 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
185 		if (head == vq->num) {
186 			int num_pends;
187 
188 			wmem = atomic_read(&sock->sk->sk_wmem_alloc);
189 			if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
190 				tx_poll_start(net, sock);
191 				set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
192 				break;
193 			}
194 			/* If more outstanding DMAs, queue the work.
195 			 * Handle upend_idx wrap around
196 			 */
197 			num_pends = likely(vq->upend_idx >= vq->done_idx) ?
198 				    (vq->upend_idx - vq->done_idx) :
199 				    (vq->upend_idx + UIO_MAXIOV - vq->done_idx);
200 			if (unlikely(num_pends > VHOST_MAX_PEND)) {
201 				tx_poll_start(net, sock);
202 				set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
203 				break;
204 			}
205 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
206 				vhost_disable_notify(&net->dev, vq);
207 				continue;
208 			}
209 			break;
210 		}
211 		if (in) {
212 			vq_err(vq, "Unexpected descriptor format for TX: "
213 			       "out %d, int %d\n", out, in);
214 			break;
215 		}
216 		/* Skip header. TODO: support TSO. */
217 		s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
218 		msg.msg_iovlen = out;
219 		len = iov_length(vq->iov, out);
220 		/* Sanity check */
221 		if (!len) {
222 			vq_err(vq, "Unexpected header len for TX: "
223 			       "%zd expected %zd\n",
224 			       iov_length(vq->hdr, s), hdr_size);
225 			break;
226 		}
227 		/* use msg_control to pass vhost zerocopy ubuf info to skb */
228 		if (zcopy) {
229 			vq->heads[vq->upend_idx].id = head;
230 			if (len < VHOST_GOODCOPY_LEN) {
231 				/* copy don't need to wait for DMA done */
232 				vq->heads[vq->upend_idx].len =
233 							VHOST_DMA_DONE_LEN;
234 				msg.msg_control = NULL;
235 				msg.msg_controllen = 0;
236 				ubufs = NULL;
237 			} else {
238 				struct ubuf_info *ubuf = &vq->ubuf_info[head];
239 
240 				vq->heads[vq->upend_idx].len = len;
241 				ubuf->callback = vhost_zerocopy_callback;
242 				ubuf->ctx = vq->ubufs;
243 				ubuf->desc = vq->upend_idx;
244 				msg.msg_control = ubuf;
245 				msg.msg_controllen = sizeof(ubuf);
246 				ubufs = vq->ubufs;
247 				kref_get(&ubufs->kref);
248 			}
249 			vq->upend_idx = (vq->upend_idx + 1) % UIO_MAXIOV;
250 		}
251 		/* TODO: Check specific error and bomb out unless ENOBUFS? */
252 		err = sock->ops->sendmsg(NULL, sock, &msg, len);
253 		if (unlikely(err < 0)) {
254 			if (zcopy) {
255 				if (ubufs)
256 					vhost_ubuf_put(ubufs);
257 				vq->upend_idx = ((unsigned)vq->upend_idx - 1) %
258 					UIO_MAXIOV;
259 			}
260 			vhost_discard_vq_desc(vq, 1);
261 			if (err == -EAGAIN || err == -ENOBUFS)
262 				tx_poll_start(net, sock);
263 			break;
264 		}
265 		if (err != len)
266 			pr_debug("Truncated TX packet: "
267 				 " len %d != %zd\n", err, len);
268 		if (!zcopy)
269 			vhost_add_used_and_signal(&net->dev, vq, head, 0);
270 		else
271 			vhost_zerocopy_signal_used(vq);
272 		total_len += len;
273 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
274 			vhost_poll_queue(&vq->poll);
275 			break;
276 		}
277 	}
278 
279 	mutex_unlock(&vq->mutex);
280 }
281 
282 static int peek_head_len(struct sock *sk)
283 {
284 	struct sk_buff *head;
285 	int len = 0;
286 	unsigned long flags;
287 
288 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
289 	head = skb_peek(&sk->sk_receive_queue);
290 	if (likely(head)) {
291 		len = head->len;
292 		if (vlan_tx_tag_present(head))
293 			len += VLAN_HLEN;
294 	}
295 
296 	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
297 	return len;
298 }
299 
300 /* This is a multi-buffer version of vhost_get_desc, that works if
301  *	vq has read descriptors only.
302  * @vq		- the relevant virtqueue
303  * @datalen	- data length we'll be reading
304  * @iovcount	- returned count of io vectors we fill
305  * @log		- vhost log
306  * @log_num	- log offset
307  * @quota       - headcount quota, 1 for big buffer
308  *	returns number of buffer heads allocated, negative on error
309  */
310 static int get_rx_bufs(struct vhost_virtqueue *vq,
311 		       struct vring_used_elem *heads,
312 		       int datalen,
313 		       unsigned *iovcount,
314 		       struct vhost_log *log,
315 		       unsigned *log_num,
316 		       unsigned int quota)
317 {
318 	unsigned int out, in;
319 	int seg = 0;
320 	int headcount = 0;
321 	unsigned d;
322 	int r, nlogs = 0;
323 
324 	while (datalen > 0 && headcount < quota) {
325 		if (unlikely(seg >= UIO_MAXIOV)) {
326 			r = -ENOBUFS;
327 			goto err;
328 		}
329 		d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
330 				      ARRAY_SIZE(vq->iov) - seg, &out,
331 				      &in, log, log_num);
332 		if (d == vq->num) {
333 			r = 0;
334 			goto err;
335 		}
336 		if (unlikely(out || in <= 0)) {
337 			vq_err(vq, "unexpected descriptor format for RX: "
338 				"out %d, in %d\n", out, in);
339 			r = -EINVAL;
340 			goto err;
341 		}
342 		if (unlikely(log)) {
343 			nlogs += *log_num;
344 			log += *log_num;
345 		}
346 		heads[headcount].id = d;
347 		heads[headcount].len = iov_length(vq->iov + seg, in);
348 		datalen -= heads[headcount].len;
349 		++headcount;
350 		seg += in;
351 	}
352 	heads[headcount - 1].len += datalen;
353 	*iovcount = seg;
354 	if (unlikely(log))
355 		*log_num = nlogs;
356 	return headcount;
357 err:
358 	vhost_discard_vq_desc(vq, headcount);
359 	return r;
360 }
361 
362 /* Expects to be always run from workqueue - which acts as
363  * read-size critical section for our kind of RCU. */
364 static void handle_rx(struct vhost_net *net)
365 {
366 	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
367 	unsigned uninitialized_var(in), log;
368 	struct vhost_log *vq_log;
369 	struct msghdr msg = {
370 		.msg_name = NULL,
371 		.msg_namelen = 0,
372 		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
373 		.msg_controllen = 0,
374 		.msg_iov = vq->iov,
375 		.msg_flags = MSG_DONTWAIT,
376 	};
377 	struct virtio_net_hdr_mrg_rxbuf hdr = {
378 		.hdr.flags = 0,
379 		.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
380 	};
381 	size_t total_len = 0;
382 	int err, headcount, mergeable;
383 	size_t vhost_hlen, sock_hlen;
384 	size_t vhost_len, sock_len;
385 	/* TODO: check that we are running from vhost_worker? */
386 	struct socket *sock = rcu_dereference_check(vq->private_data, 1);
387 
388 	if (!sock)
389 		return;
390 
391 	mutex_lock(&vq->mutex);
392 	vhost_disable_notify(&net->dev, vq);
393 	vhost_hlen = vq->vhost_hlen;
394 	sock_hlen = vq->sock_hlen;
395 
396 	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
397 		vq->log : NULL;
398 	mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
399 
400 	while ((sock_len = peek_head_len(sock->sk))) {
401 		sock_len += sock_hlen;
402 		vhost_len = sock_len + vhost_hlen;
403 		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
404 					&in, vq_log, &log,
405 					likely(mergeable) ? UIO_MAXIOV : 1);
406 		/* On error, stop handling until the next kick. */
407 		if (unlikely(headcount < 0))
408 			break;
409 		/* OK, now we need to know about added descriptors. */
410 		if (!headcount) {
411 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
412 				/* They have slipped one in as we were
413 				 * doing that: check again. */
414 				vhost_disable_notify(&net->dev, vq);
415 				continue;
416 			}
417 			/* Nothing new?  Wait for eventfd to tell us
418 			 * they refilled. */
419 			break;
420 		}
421 		/* We don't need to be notified again. */
422 		if (unlikely((vhost_hlen)))
423 			/* Skip header. TODO: support TSO. */
424 			move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in);
425 		else
426 			/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
427 			 * needed because recvmsg can modify msg_iov. */
428 			copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in);
429 		msg.msg_iovlen = in;
430 		err = sock->ops->recvmsg(NULL, sock, &msg,
431 					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
432 		/* Userspace might have consumed the packet meanwhile:
433 		 * it's not supposed to do this usually, but might be hard
434 		 * to prevent. Discard data we got (if any) and keep going. */
435 		if (unlikely(err != sock_len)) {
436 			pr_debug("Discarded rx packet: "
437 				 " len %d, expected %zd\n", err, sock_len);
438 			vhost_discard_vq_desc(vq, headcount);
439 			continue;
440 		}
441 		if (unlikely(vhost_hlen) &&
442 		    memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0,
443 				      vhost_hlen)) {
444 			vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
445 			       vq->iov->iov_base);
446 			break;
447 		}
448 		/* TODO: Should check and handle checksum. */
449 		if (likely(mergeable) &&
450 		    memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
451 				      offsetof(typeof(hdr), num_buffers),
452 				      sizeof hdr.num_buffers)) {
453 			vq_err(vq, "Failed num_buffers write");
454 			vhost_discard_vq_desc(vq, headcount);
455 			break;
456 		}
457 		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
458 					    headcount);
459 		if (unlikely(vq_log))
460 			vhost_log_write(vq, vq_log, log, vhost_len);
461 		total_len += vhost_len;
462 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
463 			vhost_poll_queue(&vq->poll);
464 			break;
465 		}
466 	}
467 
468 	mutex_unlock(&vq->mutex);
469 }
470 
471 static void handle_tx_kick(struct vhost_work *work)
472 {
473 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
474 						  poll.work);
475 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
476 
477 	handle_tx(net);
478 }
479 
480 static void handle_rx_kick(struct vhost_work *work)
481 {
482 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
483 						  poll.work);
484 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
485 
486 	handle_rx(net);
487 }
488 
489 static void handle_tx_net(struct vhost_work *work)
490 {
491 	struct vhost_net *net = container_of(work, struct vhost_net,
492 					     poll[VHOST_NET_VQ_TX].work);
493 	handle_tx(net);
494 }
495 
496 static void handle_rx_net(struct vhost_work *work)
497 {
498 	struct vhost_net *net = container_of(work, struct vhost_net,
499 					     poll[VHOST_NET_VQ_RX].work);
500 	handle_rx(net);
501 }
502 
503 static int vhost_net_open(struct inode *inode, struct file *f)
504 {
505 	struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
506 	struct vhost_dev *dev;
507 	int r;
508 
509 	if (!n)
510 		return -ENOMEM;
511 
512 	dev = &n->dev;
513 	n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
514 	n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
515 	r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
516 	if (r < 0) {
517 		kfree(n);
518 		return r;
519 	}
520 
521 	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
522 	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
523 	n->tx_poll_state = VHOST_NET_POLL_DISABLED;
524 
525 	f->private_data = n;
526 
527 	return 0;
528 }
529 
530 static void vhost_net_disable_vq(struct vhost_net *n,
531 				 struct vhost_virtqueue *vq)
532 {
533 	if (!vq->private_data)
534 		return;
535 	if (vq == n->vqs + VHOST_NET_VQ_TX) {
536 		tx_poll_stop(n);
537 		n->tx_poll_state = VHOST_NET_POLL_DISABLED;
538 	} else
539 		vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
540 }
541 
542 static void vhost_net_enable_vq(struct vhost_net *n,
543 				struct vhost_virtqueue *vq)
544 {
545 	struct socket *sock;
546 
547 	sock = rcu_dereference_protected(vq->private_data,
548 					 lockdep_is_held(&vq->mutex));
549 	if (!sock)
550 		return;
551 	if (vq == n->vqs + VHOST_NET_VQ_TX) {
552 		n->tx_poll_state = VHOST_NET_POLL_STOPPED;
553 		tx_poll_start(n, sock);
554 	} else
555 		vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
556 }
557 
558 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
559 					struct vhost_virtqueue *vq)
560 {
561 	struct socket *sock;
562 
563 	mutex_lock(&vq->mutex);
564 	sock = rcu_dereference_protected(vq->private_data,
565 					 lockdep_is_held(&vq->mutex));
566 	vhost_net_disable_vq(n, vq);
567 	rcu_assign_pointer(vq->private_data, NULL);
568 	mutex_unlock(&vq->mutex);
569 	return sock;
570 }
571 
572 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
573 			   struct socket **rx_sock)
574 {
575 	*tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
576 	*rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
577 }
578 
579 static void vhost_net_flush_vq(struct vhost_net *n, int index)
580 {
581 	vhost_poll_flush(n->poll + index);
582 	vhost_poll_flush(&n->dev.vqs[index].poll);
583 }
584 
585 static void vhost_net_flush(struct vhost_net *n)
586 {
587 	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
588 	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
589 }
590 
591 static int vhost_net_release(struct inode *inode, struct file *f)
592 {
593 	struct vhost_net *n = f->private_data;
594 	struct socket *tx_sock;
595 	struct socket *rx_sock;
596 
597 	vhost_net_stop(n, &tx_sock, &rx_sock);
598 	vhost_net_flush(n);
599 	vhost_dev_cleanup(&n->dev, false);
600 	if (tx_sock)
601 		fput(tx_sock->file);
602 	if (rx_sock)
603 		fput(rx_sock->file);
604 	/* We do an extra flush before freeing memory,
605 	 * since jobs can re-queue themselves. */
606 	vhost_net_flush(n);
607 	kfree(n);
608 	return 0;
609 }
610 
611 static struct socket *get_raw_socket(int fd)
612 {
613 	struct {
614 		struct sockaddr_ll sa;
615 		char  buf[MAX_ADDR_LEN];
616 	} uaddr;
617 	int uaddr_len = sizeof uaddr, r;
618 	struct socket *sock = sockfd_lookup(fd, &r);
619 
620 	if (!sock)
621 		return ERR_PTR(-ENOTSOCK);
622 
623 	/* Parameter checking */
624 	if (sock->sk->sk_type != SOCK_RAW) {
625 		r = -ESOCKTNOSUPPORT;
626 		goto err;
627 	}
628 
629 	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
630 			       &uaddr_len, 0);
631 	if (r)
632 		goto err;
633 
634 	if (uaddr.sa.sll_family != AF_PACKET) {
635 		r = -EPFNOSUPPORT;
636 		goto err;
637 	}
638 	return sock;
639 err:
640 	fput(sock->file);
641 	return ERR_PTR(r);
642 }
643 
644 static struct socket *get_tap_socket(int fd)
645 {
646 	struct file *file = fget(fd);
647 	struct socket *sock;
648 
649 	if (!file)
650 		return ERR_PTR(-EBADF);
651 	sock = tun_get_socket(file);
652 	if (!IS_ERR(sock))
653 		return sock;
654 	sock = macvtap_get_socket(file);
655 	if (IS_ERR(sock))
656 		fput(file);
657 	return sock;
658 }
659 
660 static struct socket *get_socket(int fd)
661 {
662 	struct socket *sock;
663 
664 	/* special case to disable backend */
665 	if (fd == -1)
666 		return NULL;
667 	sock = get_raw_socket(fd);
668 	if (!IS_ERR(sock))
669 		return sock;
670 	sock = get_tap_socket(fd);
671 	if (!IS_ERR(sock))
672 		return sock;
673 	return ERR_PTR(-ENOTSOCK);
674 }
675 
676 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
677 {
678 	struct socket *sock, *oldsock;
679 	struct vhost_virtqueue *vq;
680 	struct vhost_ubuf_ref *ubufs, *oldubufs = NULL;
681 	int r;
682 
683 	mutex_lock(&n->dev.mutex);
684 	r = vhost_dev_check_owner(&n->dev);
685 	if (r)
686 		goto err;
687 
688 	if (index >= VHOST_NET_VQ_MAX) {
689 		r = -ENOBUFS;
690 		goto err;
691 	}
692 	vq = n->vqs + index;
693 	mutex_lock(&vq->mutex);
694 
695 	/* Verify that ring has been setup correctly. */
696 	if (!vhost_vq_access_ok(vq)) {
697 		r = -EFAULT;
698 		goto err_vq;
699 	}
700 	sock = get_socket(fd);
701 	if (IS_ERR(sock)) {
702 		r = PTR_ERR(sock);
703 		goto err_vq;
704 	}
705 
706 	/* start polling new socket */
707 	oldsock = rcu_dereference_protected(vq->private_data,
708 					    lockdep_is_held(&vq->mutex));
709 	if (sock != oldsock) {
710 		ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock));
711 		if (IS_ERR(ubufs)) {
712 			r = PTR_ERR(ubufs);
713 			goto err_ubufs;
714 		}
715 		oldubufs = vq->ubufs;
716 		vq->ubufs = ubufs;
717 		vhost_net_disable_vq(n, vq);
718 		rcu_assign_pointer(vq->private_data, sock);
719 		vhost_net_enable_vq(n, vq);
720 
721 		r = vhost_init_used(vq);
722 		if (r)
723 			goto err_vq;
724 	}
725 
726 	mutex_unlock(&vq->mutex);
727 
728 	if (oldubufs) {
729 		vhost_ubuf_put_and_wait(oldubufs);
730 		mutex_lock(&vq->mutex);
731 		vhost_zerocopy_signal_used(vq);
732 		mutex_unlock(&vq->mutex);
733 	}
734 
735 	if (oldsock) {
736 		vhost_net_flush_vq(n, index);
737 		fput(oldsock->file);
738 	}
739 
740 	mutex_unlock(&n->dev.mutex);
741 	return 0;
742 
743 err_ubufs:
744 	fput(sock->file);
745 err_vq:
746 	mutex_unlock(&vq->mutex);
747 err:
748 	mutex_unlock(&n->dev.mutex);
749 	return r;
750 }
751 
752 static long vhost_net_reset_owner(struct vhost_net *n)
753 {
754 	struct socket *tx_sock = NULL;
755 	struct socket *rx_sock = NULL;
756 	long err;
757 
758 	mutex_lock(&n->dev.mutex);
759 	err = vhost_dev_check_owner(&n->dev);
760 	if (err)
761 		goto done;
762 	vhost_net_stop(n, &tx_sock, &rx_sock);
763 	vhost_net_flush(n);
764 	err = vhost_dev_reset_owner(&n->dev);
765 done:
766 	mutex_unlock(&n->dev.mutex);
767 	if (tx_sock)
768 		fput(tx_sock->file);
769 	if (rx_sock)
770 		fput(rx_sock->file);
771 	return err;
772 }
773 
774 static int vhost_net_set_features(struct vhost_net *n, u64 features)
775 {
776 	size_t vhost_hlen, sock_hlen, hdr_len;
777 	int i;
778 
779 	hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
780 			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
781 			sizeof(struct virtio_net_hdr);
782 	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
783 		/* vhost provides vnet_hdr */
784 		vhost_hlen = hdr_len;
785 		sock_hlen = 0;
786 	} else {
787 		/* socket provides vnet_hdr */
788 		vhost_hlen = 0;
789 		sock_hlen = hdr_len;
790 	}
791 	mutex_lock(&n->dev.mutex);
792 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
793 	    !vhost_log_access_ok(&n->dev)) {
794 		mutex_unlock(&n->dev.mutex);
795 		return -EFAULT;
796 	}
797 	n->dev.acked_features = features;
798 	smp_wmb();
799 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
800 		mutex_lock(&n->vqs[i].mutex);
801 		n->vqs[i].vhost_hlen = vhost_hlen;
802 		n->vqs[i].sock_hlen = sock_hlen;
803 		mutex_unlock(&n->vqs[i].mutex);
804 	}
805 	vhost_net_flush(n);
806 	mutex_unlock(&n->dev.mutex);
807 	return 0;
808 }
809 
810 static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
811 			    unsigned long arg)
812 {
813 	struct vhost_net *n = f->private_data;
814 	void __user *argp = (void __user *)arg;
815 	u64 __user *featurep = argp;
816 	struct vhost_vring_file backend;
817 	u64 features;
818 	int r;
819 
820 	switch (ioctl) {
821 	case VHOST_NET_SET_BACKEND:
822 		if (copy_from_user(&backend, argp, sizeof backend))
823 			return -EFAULT;
824 		return vhost_net_set_backend(n, backend.index, backend.fd);
825 	case VHOST_GET_FEATURES:
826 		features = VHOST_NET_FEATURES;
827 		if (copy_to_user(featurep, &features, sizeof features))
828 			return -EFAULT;
829 		return 0;
830 	case VHOST_SET_FEATURES:
831 		if (copy_from_user(&features, featurep, sizeof features))
832 			return -EFAULT;
833 		if (features & ~VHOST_NET_FEATURES)
834 			return -EOPNOTSUPP;
835 		return vhost_net_set_features(n, features);
836 	case VHOST_RESET_OWNER:
837 		return vhost_net_reset_owner(n);
838 	default:
839 		mutex_lock(&n->dev.mutex);
840 		r = vhost_dev_ioctl(&n->dev, ioctl, arg);
841 		vhost_net_flush(n);
842 		mutex_unlock(&n->dev.mutex);
843 		return r;
844 	}
845 }
846 
847 #ifdef CONFIG_COMPAT
848 static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
849 				   unsigned long arg)
850 {
851 	return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
852 }
853 #endif
854 
855 static const struct file_operations vhost_net_fops = {
856 	.owner          = THIS_MODULE,
857 	.release        = vhost_net_release,
858 	.unlocked_ioctl = vhost_net_ioctl,
859 #ifdef CONFIG_COMPAT
860 	.compat_ioctl   = vhost_net_compat_ioctl,
861 #endif
862 	.open           = vhost_net_open,
863 	.llseek		= noop_llseek,
864 };
865 
866 static struct miscdevice vhost_net_misc = {
867 	.minor = VHOST_NET_MINOR,
868 	.name = "vhost-net",
869 	.fops = &vhost_net_fops,
870 };
871 
872 static int vhost_net_init(void)
873 {
874 	if (experimental_zcopytx)
875 		vhost_enable_zcopy(VHOST_NET_VQ_TX);
876 	return misc_register(&vhost_net_misc);
877 }
878 module_init(vhost_net_init);
879 
880 static void vhost_net_exit(void)
881 {
882 	misc_deregister(&vhost_net_misc);
883 }
884 module_exit(vhost_net_exit);
885 
886 MODULE_VERSION("0.0.1");
887 MODULE_LICENSE("GPL v2");
888 MODULE_AUTHOR("Michael S. Tsirkin");
889 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
890 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
891 MODULE_ALIAS("devname:vhost-net");
892