xref: /openbmc/linux/drivers/vhost/vsock.c (revision e5c86679)
1 /*
2  * vhost transport for vsock
3  *
4  * Copyright (C) 2013-2015 Red Hat, Inc.
5  * Author: Asias He <asias@redhat.com>
6  *         Stefan Hajnoczi <stefanha@redhat.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2.
9  */
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
15 #include <net/sock.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
18 
19 #include <net/af_vsock.h>
20 #include "vhost.h"
21 
22 #define VHOST_VSOCK_DEFAULT_HOST_CID	2
23 
24 enum {
25 	VHOST_VSOCK_FEATURES = VHOST_FEATURES,
26 };
27 
28 /* Used to track all the vhost_vsock instances on the system. */
29 static DEFINE_SPINLOCK(vhost_vsock_lock);
30 static LIST_HEAD(vhost_vsock_list);
31 
32 struct vhost_vsock {
33 	struct vhost_dev dev;
34 	struct vhost_virtqueue vqs[2];
35 
36 	/* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
37 	struct list_head list;
38 
39 	struct vhost_work send_pkt_work;
40 	spinlock_t send_pkt_list_lock;
41 	struct list_head send_pkt_list;	/* host->guest pending packets */
42 
43 	atomic_t queued_replies;
44 
45 	u32 guest_cid;
46 };
47 
48 static u32 vhost_transport_get_local_cid(void)
49 {
50 	return VHOST_VSOCK_DEFAULT_HOST_CID;
51 }
52 
53 static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
54 {
55 	struct vhost_vsock *vsock;
56 
57 	list_for_each_entry(vsock, &vhost_vsock_list, list) {
58 		u32 other_cid = vsock->guest_cid;
59 
60 		/* Skip instances that have no CID yet */
61 		if (other_cid == 0)
62 			continue;
63 
64 		if (other_cid == guest_cid) {
65 			return vsock;
66 		}
67 	}
68 
69 	return NULL;
70 }
71 
72 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
73 {
74 	struct vhost_vsock *vsock;
75 
76 	spin_lock_bh(&vhost_vsock_lock);
77 	vsock = __vhost_vsock_get(guest_cid);
78 	spin_unlock_bh(&vhost_vsock_lock);
79 
80 	return vsock;
81 }
82 
83 static void
84 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
85 			    struct vhost_virtqueue *vq)
86 {
87 	struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
88 	bool added = false;
89 	bool restart_tx = false;
90 
91 	mutex_lock(&vq->mutex);
92 
93 	if (!vq->private_data)
94 		goto out;
95 
96 	/* Avoid further vmexits, we're already processing the virtqueue */
97 	vhost_disable_notify(&vsock->dev, vq);
98 
99 	for (;;) {
100 		struct virtio_vsock_pkt *pkt;
101 		struct iov_iter iov_iter;
102 		unsigned out, in;
103 		size_t nbytes;
104 		size_t len;
105 		int head;
106 
107 		spin_lock_bh(&vsock->send_pkt_list_lock);
108 		if (list_empty(&vsock->send_pkt_list)) {
109 			spin_unlock_bh(&vsock->send_pkt_list_lock);
110 			vhost_enable_notify(&vsock->dev, vq);
111 			break;
112 		}
113 
114 		pkt = list_first_entry(&vsock->send_pkt_list,
115 				       struct virtio_vsock_pkt, list);
116 		list_del_init(&pkt->list);
117 		spin_unlock_bh(&vsock->send_pkt_list_lock);
118 
119 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
120 					 &out, &in, NULL, NULL);
121 		if (head < 0) {
122 			spin_lock_bh(&vsock->send_pkt_list_lock);
123 			list_add(&pkt->list, &vsock->send_pkt_list);
124 			spin_unlock_bh(&vsock->send_pkt_list_lock);
125 			break;
126 		}
127 
128 		if (head == vq->num) {
129 			spin_lock_bh(&vsock->send_pkt_list_lock);
130 			list_add(&pkt->list, &vsock->send_pkt_list);
131 			spin_unlock_bh(&vsock->send_pkt_list_lock);
132 
133 			/* We cannot finish yet if more buffers snuck in while
134 			 * re-enabling notify.
135 			 */
136 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
137 				vhost_disable_notify(&vsock->dev, vq);
138 				continue;
139 			}
140 			break;
141 		}
142 
143 		if (out) {
144 			virtio_transport_free_pkt(pkt);
145 			vq_err(vq, "Expected 0 output buffers, got %u\n", out);
146 			break;
147 		}
148 
149 		len = iov_length(&vq->iov[out], in);
150 		iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
151 
152 		nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
153 		if (nbytes != sizeof(pkt->hdr)) {
154 			virtio_transport_free_pkt(pkt);
155 			vq_err(vq, "Faulted on copying pkt hdr\n");
156 			break;
157 		}
158 
159 		nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
160 		if (nbytes != pkt->len) {
161 			virtio_transport_free_pkt(pkt);
162 			vq_err(vq, "Faulted on copying pkt buf\n");
163 			break;
164 		}
165 
166 		vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
167 		added = true;
168 
169 		if (pkt->reply) {
170 			int val;
171 
172 			val = atomic_dec_return(&vsock->queued_replies);
173 
174 			/* Do we have resources to resume tx processing? */
175 			if (val + 1 == tx_vq->num)
176 				restart_tx = true;
177 		}
178 
179 		virtio_transport_free_pkt(pkt);
180 	}
181 	if (added)
182 		vhost_signal(&vsock->dev, vq);
183 
184 out:
185 	mutex_unlock(&vq->mutex);
186 
187 	if (restart_tx)
188 		vhost_poll_queue(&tx_vq->poll);
189 }
190 
191 static void vhost_transport_send_pkt_work(struct vhost_work *work)
192 {
193 	struct vhost_virtqueue *vq;
194 	struct vhost_vsock *vsock;
195 
196 	vsock = container_of(work, struct vhost_vsock, send_pkt_work);
197 	vq = &vsock->vqs[VSOCK_VQ_RX];
198 
199 	vhost_transport_do_send_pkt(vsock, vq);
200 }
201 
202 static int
203 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
204 {
205 	struct vhost_vsock *vsock;
206 	int len = pkt->len;
207 
208 	/* Find the vhost_vsock according to guest context id  */
209 	vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
210 	if (!vsock) {
211 		virtio_transport_free_pkt(pkt);
212 		return -ENODEV;
213 	}
214 
215 	if (pkt->reply)
216 		atomic_inc(&vsock->queued_replies);
217 
218 	spin_lock_bh(&vsock->send_pkt_list_lock);
219 	list_add_tail(&pkt->list, &vsock->send_pkt_list);
220 	spin_unlock_bh(&vsock->send_pkt_list_lock);
221 
222 	vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
223 	return len;
224 }
225 
226 static int
227 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
228 {
229 	struct vhost_vsock *vsock;
230 	struct virtio_vsock_pkt *pkt, *n;
231 	int cnt = 0;
232 	LIST_HEAD(freeme);
233 
234 	/* Find the vhost_vsock according to guest context id  */
235 	vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
236 	if (!vsock)
237 		return -ENODEV;
238 
239 	spin_lock_bh(&vsock->send_pkt_list_lock);
240 	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
241 		if (pkt->vsk != vsk)
242 			continue;
243 		list_move(&pkt->list, &freeme);
244 	}
245 	spin_unlock_bh(&vsock->send_pkt_list_lock);
246 
247 	list_for_each_entry_safe(pkt, n, &freeme, list) {
248 		if (pkt->reply)
249 			cnt++;
250 		list_del(&pkt->list);
251 		virtio_transport_free_pkt(pkt);
252 	}
253 
254 	if (cnt) {
255 		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
256 		int new_cnt;
257 
258 		new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
259 		if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
260 			vhost_poll_queue(&tx_vq->poll);
261 	}
262 
263 	return 0;
264 }
265 
266 static struct virtio_vsock_pkt *
267 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
268 		      unsigned int out, unsigned int in)
269 {
270 	struct virtio_vsock_pkt *pkt;
271 	struct iov_iter iov_iter;
272 	size_t nbytes;
273 	size_t len;
274 
275 	if (in != 0) {
276 		vq_err(vq, "Expected 0 input buffers, got %u\n", in);
277 		return NULL;
278 	}
279 
280 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
281 	if (!pkt)
282 		return NULL;
283 
284 	len = iov_length(vq->iov, out);
285 	iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
286 
287 	nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
288 	if (nbytes != sizeof(pkt->hdr)) {
289 		vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
290 		       sizeof(pkt->hdr), nbytes);
291 		kfree(pkt);
292 		return NULL;
293 	}
294 
295 	if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
296 		pkt->len = le32_to_cpu(pkt->hdr.len);
297 
298 	/* No payload */
299 	if (!pkt->len)
300 		return pkt;
301 
302 	/* The pkt is too big */
303 	if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
304 		kfree(pkt);
305 		return NULL;
306 	}
307 
308 	pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
309 	if (!pkt->buf) {
310 		kfree(pkt);
311 		return NULL;
312 	}
313 
314 	nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
315 	if (nbytes != pkt->len) {
316 		vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
317 		       pkt->len, nbytes);
318 		virtio_transport_free_pkt(pkt);
319 		return NULL;
320 	}
321 
322 	return pkt;
323 }
324 
325 /* Is there space left for replies to rx packets? */
326 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
327 {
328 	struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
329 	int val;
330 
331 	smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
332 	val = atomic_read(&vsock->queued_replies);
333 
334 	return val < vq->num;
335 }
336 
337 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
338 {
339 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
340 						  poll.work);
341 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
342 						 dev);
343 	struct virtio_vsock_pkt *pkt;
344 	int head;
345 	unsigned int out, in;
346 	bool added = false;
347 
348 	mutex_lock(&vq->mutex);
349 
350 	if (!vq->private_data)
351 		goto out;
352 
353 	vhost_disable_notify(&vsock->dev, vq);
354 	for (;;) {
355 		u32 len;
356 
357 		if (!vhost_vsock_more_replies(vsock)) {
358 			/* Stop tx until the device processes already
359 			 * pending replies.  Leave tx virtqueue
360 			 * callbacks disabled.
361 			 */
362 			goto no_more_replies;
363 		}
364 
365 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
366 					 &out, &in, NULL, NULL);
367 		if (head < 0)
368 			break;
369 
370 		if (head == vq->num) {
371 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
372 				vhost_disable_notify(&vsock->dev, vq);
373 				continue;
374 			}
375 			break;
376 		}
377 
378 		pkt = vhost_vsock_alloc_pkt(vq, out, in);
379 		if (!pkt) {
380 			vq_err(vq, "Faulted on pkt\n");
381 			continue;
382 		}
383 
384 		len = pkt->len;
385 
386 		/* Only accept correctly addressed packets */
387 		if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
388 			virtio_transport_recv_pkt(pkt);
389 		else
390 			virtio_transport_free_pkt(pkt);
391 
392 		vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
393 		added = true;
394 	}
395 
396 no_more_replies:
397 	if (added)
398 		vhost_signal(&vsock->dev, vq);
399 
400 out:
401 	mutex_unlock(&vq->mutex);
402 }
403 
404 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
405 {
406 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
407 						poll.work);
408 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
409 						 dev);
410 
411 	vhost_transport_do_send_pkt(vsock, vq);
412 }
413 
414 static int vhost_vsock_start(struct vhost_vsock *vsock)
415 {
416 	struct vhost_virtqueue *vq;
417 	size_t i;
418 	int ret;
419 
420 	mutex_lock(&vsock->dev.mutex);
421 
422 	ret = vhost_dev_check_owner(&vsock->dev);
423 	if (ret)
424 		goto err;
425 
426 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
427 		vq = &vsock->vqs[i];
428 
429 		mutex_lock(&vq->mutex);
430 
431 		if (!vhost_vq_access_ok(vq)) {
432 			ret = -EFAULT;
433 			goto err_vq;
434 		}
435 
436 		if (!vq->private_data) {
437 			vq->private_data = vsock;
438 			ret = vhost_vq_init_access(vq);
439 			if (ret)
440 				goto err_vq;
441 		}
442 
443 		mutex_unlock(&vq->mutex);
444 	}
445 
446 	mutex_unlock(&vsock->dev.mutex);
447 	return 0;
448 
449 err_vq:
450 	vq->private_data = NULL;
451 	mutex_unlock(&vq->mutex);
452 
453 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
454 		vq = &vsock->vqs[i];
455 
456 		mutex_lock(&vq->mutex);
457 		vq->private_data = NULL;
458 		mutex_unlock(&vq->mutex);
459 	}
460 err:
461 	mutex_unlock(&vsock->dev.mutex);
462 	return ret;
463 }
464 
465 static int vhost_vsock_stop(struct vhost_vsock *vsock)
466 {
467 	size_t i;
468 	int ret;
469 
470 	mutex_lock(&vsock->dev.mutex);
471 
472 	ret = vhost_dev_check_owner(&vsock->dev);
473 	if (ret)
474 		goto err;
475 
476 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
477 		struct vhost_virtqueue *vq = &vsock->vqs[i];
478 
479 		mutex_lock(&vq->mutex);
480 		vq->private_data = NULL;
481 		mutex_unlock(&vq->mutex);
482 	}
483 
484 err:
485 	mutex_unlock(&vsock->dev.mutex);
486 	return ret;
487 }
488 
489 static void vhost_vsock_free(struct vhost_vsock *vsock)
490 {
491 	kvfree(vsock);
492 }
493 
494 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
495 {
496 	struct vhost_virtqueue **vqs;
497 	struct vhost_vsock *vsock;
498 	int ret;
499 
500 	/* This struct is large and allocation could fail, fall back to vmalloc
501 	 * if there is no other way.
502 	 */
503 	vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
504 	if (!vsock) {
505 		vsock = vmalloc(sizeof(*vsock));
506 		if (!vsock)
507 			return -ENOMEM;
508 	}
509 
510 	vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
511 	if (!vqs) {
512 		ret = -ENOMEM;
513 		goto out;
514 	}
515 
516 	atomic_set(&vsock->queued_replies, 0);
517 
518 	vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
519 	vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
520 	vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
521 	vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
522 
523 	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
524 
525 	file->private_data = vsock;
526 	spin_lock_init(&vsock->send_pkt_list_lock);
527 	INIT_LIST_HEAD(&vsock->send_pkt_list);
528 	vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
529 
530 	spin_lock_bh(&vhost_vsock_lock);
531 	list_add_tail(&vsock->list, &vhost_vsock_list);
532 	spin_unlock_bh(&vhost_vsock_lock);
533 	return 0;
534 
535 out:
536 	vhost_vsock_free(vsock);
537 	return ret;
538 }
539 
540 static void vhost_vsock_flush(struct vhost_vsock *vsock)
541 {
542 	int i;
543 
544 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
545 		if (vsock->vqs[i].handle_kick)
546 			vhost_poll_flush(&vsock->vqs[i].poll);
547 	vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
548 }
549 
550 static void vhost_vsock_reset_orphans(struct sock *sk)
551 {
552 	struct vsock_sock *vsk = vsock_sk(sk);
553 
554 	/* vmci_transport.c doesn't take sk_lock here either.  At least we're
555 	 * under vsock_table_lock so the sock cannot disappear while we're
556 	 * executing.
557 	 */
558 
559 	if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
560 		sock_set_flag(sk, SOCK_DONE);
561 		vsk->peer_shutdown = SHUTDOWN_MASK;
562 		sk->sk_state = SS_UNCONNECTED;
563 		sk->sk_err = ECONNRESET;
564 		sk->sk_error_report(sk);
565 	}
566 }
567 
568 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
569 {
570 	struct vhost_vsock *vsock = file->private_data;
571 
572 	spin_lock_bh(&vhost_vsock_lock);
573 	list_del(&vsock->list);
574 	spin_unlock_bh(&vhost_vsock_lock);
575 
576 	/* Iterating over all connections for all CIDs to find orphans is
577 	 * inefficient.  Room for improvement here. */
578 	vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
579 
580 	vhost_vsock_stop(vsock);
581 	vhost_vsock_flush(vsock);
582 	vhost_dev_stop(&vsock->dev);
583 
584 	spin_lock_bh(&vsock->send_pkt_list_lock);
585 	while (!list_empty(&vsock->send_pkt_list)) {
586 		struct virtio_vsock_pkt *pkt;
587 
588 		pkt = list_first_entry(&vsock->send_pkt_list,
589 				struct virtio_vsock_pkt, list);
590 		list_del_init(&pkt->list);
591 		virtio_transport_free_pkt(pkt);
592 	}
593 	spin_unlock_bh(&vsock->send_pkt_list_lock);
594 
595 	vhost_dev_cleanup(&vsock->dev, false);
596 	kfree(vsock->dev.vqs);
597 	vhost_vsock_free(vsock);
598 	return 0;
599 }
600 
601 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
602 {
603 	struct vhost_vsock *other;
604 
605 	/* Refuse reserved CIDs */
606 	if (guest_cid <= VMADDR_CID_HOST ||
607 	    guest_cid == U32_MAX)
608 		return -EINVAL;
609 
610 	/* 64-bit CIDs are not yet supported */
611 	if (guest_cid > U32_MAX)
612 		return -EINVAL;
613 
614 	/* Refuse if CID is already in use */
615 	spin_lock_bh(&vhost_vsock_lock);
616 	other = __vhost_vsock_get(guest_cid);
617 	if (other && other != vsock) {
618 		spin_unlock_bh(&vhost_vsock_lock);
619 		return -EADDRINUSE;
620 	}
621 	vsock->guest_cid = guest_cid;
622 	spin_unlock_bh(&vhost_vsock_lock);
623 
624 	return 0;
625 }
626 
627 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
628 {
629 	struct vhost_virtqueue *vq;
630 	int i;
631 
632 	if (features & ~VHOST_VSOCK_FEATURES)
633 		return -EOPNOTSUPP;
634 
635 	mutex_lock(&vsock->dev.mutex);
636 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
637 	    !vhost_log_access_ok(&vsock->dev)) {
638 		mutex_unlock(&vsock->dev.mutex);
639 		return -EFAULT;
640 	}
641 
642 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
643 		vq = &vsock->vqs[i];
644 		mutex_lock(&vq->mutex);
645 		vq->acked_features = features;
646 		mutex_unlock(&vq->mutex);
647 	}
648 	mutex_unlock(&vsock->dev.mutex);
649 	return 0;
650 }
651 
652 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
653 				  unsigned long arg)
654 {
655 	struct vhost_vsock *vsock = f->private_data;
656 	void __user *argp = (void __user *)arg;
657 	u64 guest_cid;
658 	u64 features;
659 	int start;
660 	int r;
661 
662 	switch (ioctl) {
663 	case VHOST_VSOCK_SET_GUEST_CID:
664 		if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
665 			return -EFAULT;
666 		return vhost_vsock_set_cid(vsock, guest_cid);
667 	case VHOST_VSOCK_SET_RUNNING:
668 		if (copy_from_user(&start, argp, sizeof(start)))
669 			return -EFAULT;
670 		if (start)
671 			return vhost_vsock_start(vsock);
672 		else
673 			return vhost_vsock_stop(vsock);
674 	case VHOST_GET_FEATURES:
675 		features = VHOST_VSOCK_FEATURES;
676 		if (copy_to_user(argp, &features, sizeof(features)))
677 			return -EFAULT;
678 		return 0;
679 	case VHOST_SET_FEATURES:
680 		if (copy_from_user(&features, argp, sizeof(features)))
681 			return -EFAULT;
682 		return vhost_vsock_set_features(vsock, features);
683 	default:
684 		mutex_lock(&vsock->dev.mutex);
685 		r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
686 		if (r == -ENOIOCTLCMD)
687 			r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
688 		else
689 			vhost_vsock_flush(vsock);
690 		mutex_unlock(&vsock->dev.mutex);
691 		return r;
692 	}
693 }
694 
695 static const struct file_operations vhost_vsock_fops = {
696 	.owner          = THIS_MODULE,
697 	.open           = vhost_vsock_dev_open,
698 	.release        = vhost_vsock_dev_release,
699 	.llseek		= noop_llseek,
700 	.unlocked_ioctl = vhost_vsock_dev_ioctl,
701 };
702 
703 static struct miscdevice vhost_vsock_misc = {
704 	.minor = MISC_DYNAMIC_MINOR,
705 	.name = "vhost-vsock",
706 	.fops = &vhost_vsock_fops,
707 };
708 
709 static struct virtio_transport vhost_transport = {
710 	.transport = {
711 		.get_local_cid            = vhost_transport_get_local_cid,
712 
713 		.init                     = virtio_transport_do_socket_init,
714 		.destruct                 = virtio_transport_destruct,
715 		.release                  = virtio_transport_release,
716 		.connect                  = virtio_transport_connect,
717 		.shutdown                 = virtio_transport_shutdown,
718 		.cancel_pkt               = vhost_transport_cancel_pkt,
719 
720 		.dgram_enqueue            = virtio_transport_dgram_enqueue,
721 		.dgram_dequeue            = virtio_transport_dgram_dequeue,
722 		.dgram_bind               = virtio_transport_dgram_bind,
723 		.dgram_allow              = virtio_transport_dgram_allow,
724 
725 		.stream_enqueue           = virtio_transport_stream_enqueue,
726 		.stream_dequeue           = virtio_transport_stream_dequeue,
727 		.stream_has_data          = virtio_transport_stream_has_data,
728 		.stream_has_space         = virtio_transport_stream_has_space,
729 		.stream_rcvhiwat          = virtio_transport_stream_rcvhiwat,
730 		.stream_is_active         = virtio_transport_stream_is_active,
731 		.stream_allow             = virtio_transport_stream_allow,
732 
733 		.notify_poll_in           = virtio_transport_notify_poll_in,
734 		.notify_poll_out          = virtio_transport_notify_poll_out,
735 		.notify_recv_init         = virtio_transport_notify_recv_init,
736 		.notify_recv_pre_block    = virtio_transport_notify_recv_pre_block,
737 		.notify_recv_pre_dequeue  = virtio_transport_notify_recv_pre_dequeue,
738 		.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
739 		.notify_send_init         = virtio_transport_notify_send_init,
740 		.notify_send_pre_block    = virtio_transport_notify_send_pre_block,
741 		.notify_send_pre_enqueue  = virtio_transport_notify_send_pre_enqueue,
742 		.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
743 
744 		.set_buffer_size          = virtio_transport_set_buffer_size,
745 		.set_min_buffer_size      = virtio_transport_set_min_buffer_size,
746 		.set_max_buffer_size      = virtio_transport_set_max_buffer_size,
747 		.get_buffer_size          = virtio_transport_get_buffer_size,
748 		.get_min_buffer_size      = virtio_transport_get_min_buffer_size,
749 		.get_max_buffer_size      = virtio_transport_get_max_buffer_size,
750 	},
751 
752 	.send_pkt = vhost_transport_send_pkt,
753 };
754 
755 static int __init vhost_vsock_init(void)
756 {
757 	int ret;
758 
759 	ret = vsock_core_init(&vhost_transport.transport);
760 	if (ret < 0)
761 		return ret;
762 	return misc_register(&vhost_vsock_misc);
763 };
764 
765 static void __exit vhost_vsock_exit(void)
766 {
767 	misc_deregister(&vhost_vsock_misc);
768 	vsock_core_exit();
769 };
770 
771 module_init(vhost_vsock_init);
772 module_exit(vhost_vsock_exit);
773 MODULE_LICENSE("GPL v2");
774 MODULE_AUTHOR("Asias He");
775 MODULE_DESCRIPTION("vhost transport for vsock ");
776