xref: /openbmc/linux/drivers/vhost/vsock.c (revision 4f727ecefefbd180de10e25b3e74c03dce3f1e75)
1 /*
2  * vhost transport for vsock
3  *
4  * Copyright (C) 2013-2015 Red Hat, Inc.
5  * Author: Asias He <asias@redhat.com>
6  *         Stefan Hajnoczi <stefanha@redhat.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2.
9  */
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
15 #include <net/sock.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
18 #include <linux/hashtable.h>
19 
20 #include <net/af_vsock.h>
21 #include "vhost.h"
22 
23 #define VHOST_VSOCK_DEFAULT_HOST_CID	2
24 /* Max number of bytes transferred before requeueing the job.
25  * Using this limit prevents one virtqueue from starving others. */
26 #define VHOST_VSOCK_WEIGHT 0x80000
27 /* Max number of packets transferred before requeueing the job.
28  * Using this limit prevents one virtqueue from starving others with
29  * small pkts.
30  */
31 #define VHOST_VSOCK_PKT_WEIGHT 256
32 
33 enum {
34 	VHOST_VSOCK_FEATURES = VHOST_FEATURES,
35 };
36 
37 /* Used to track all the vhost_vsock instances on the system. */
38 static DEFINE_MUTEX(vhost_vsock_mutex);
39 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
40 
41 struct vhost_vsock {
42 	struct vhost_dev dev;
43 	struct vhost_virtqueue vqs[2];
44 
45 	/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
46 	struct hlist_node hash;
47 
48 	struct vhost_work send_pkt_work;
49 	spinlock_t send_pkt_list_lock;
50 	struct list_head send_pkt_list;	/* host->guest pending packets */
51 
52 	atomic_t queued_replies;
53 
54 	u32 guest_cid;
55 };
56 
57 static u32 vhost_transport_get_local_cid(void)
58 {
59 	return VHOST_VSOCK_DEFAULT_HOST_CID;
60 }
61 
62 /* Callers that dereference the return value must hold vhost_vsock_mutex or the
63  * RCU read lock.
64  */
65 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
66 {
67 	struct vhost_vsock *vsock;
68 
69 	hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
70 		u32 other_cid = vsock->guest_cid;
71 
72 		/* Skip instances that have no CID yet */
73 		if (other_cid == 0)
74 			continue;
75 
76 		if (other_cid == guest_cid)
77 			return vsock;
78 
79 	}
80 
81 	return NULL;
82 }
83 
84 static void
85 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
86 			    struct vhost_virtqueue *vq)
87 {
88 	struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
89 	int pkts = 0, total_len = 0;
90 	bool added = false;
91 	bool restart_tx = false;
92 
93 	mutex_lock(&vq->mutex);
94 
95 	if (!vq->private_data)
96 		goto out;
97 
98 	/* Avoid further vmexits, we're already processing the virtqueue */
99 	vhost_disable_notify(&vsock->dev, vq);
100 
101 	do {
102 		struct virtio_vsock_pkt *pkt;
103 		struct iov_iter iov_iter;
104 		unsigned out, in;
105 		size_t nbytes;
106 		size_t len;
107 		int head;
108 
109 		spin_lock_bh(&vsock->send_pkt_list_lock);
110 		if (list_empty(&vsock->send_pkt_list)) {
111 			spin_unlock_bh(&vsock->send_pkt_list_lock);
112 			vhost_enable_notify(&vsock->dev, vq);
113 			break;
114 		}
115 
116 		pkt = list_first_entry(&vsock->send_pkt_list,
117 				       struct virtio_vsock_pkt, list);
118 		list_del_init(&pkt->list);
119 		spin_unlock_bh(&vsock->send_pkt_list_lock);
120 
121 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
122 					 &out, &in, NULL, NULL);
123 		if (head < 0) {
124 			spin_lock_bh(&vsock->send_pkt_list_lock);
125 			list_add(&pkt->list, &vsock->send_pkt_list);
126 			spin_unlock_bh(&vsock->send_pkt_list_lock);
127 			break;
128 		}
129 
130 		if (head == vq->num) {
131 			spin_lock_bh(&vsock->send_pkt_list_lock);
132 			list_add(&pkt->list, &vsock->send_pkt_list);
133 			spin_unlock_bh(&vsock->send_pkt_list_lock);
134 
135 			/* We cannot finish yet if more buffers snuck in while
136 			 * re-enabling notify.
137 			 */
138 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
139 				vhost_disable_notify(&vsock->dev, vq);
140 				continue;
141 			}
142 			break;
143 		}
144 
145 		if (out) {
146 			virtio_transport_free_pkt(pkt);
147 			vq_err(vq, "Expected 0 output buffers, got %u\n", out);
148 			break;
149 		}
150 
151 		len = iov_length(&vq->iov[out], in);
152 		iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
153 
154 		nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
155 		if (nbytes != sizeof(pkt->hdr)) {
156 			virtio_transport_free_pkt(pkt);
157 			vq_err(vq, "Faulted on copying pkt hdr\n");
158 			break;
159 		}
160 
161 		nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
162 		if (nbytes != pkt->len) {
163 			virtio_transport_free_pkt(pkt);
164 			vq_err(vq, "Faulted on copying pkt buf\n");
165 			break;
166 		}
167 
168 		vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
169 		added = true;
170 
171 		if (pkt->reply) {
172 			int val;
173 
174 			val = atomic_dec_return(&vsock->queued_replies);
175 
176 			/* Do we have resources to resume tx processing? */
177 			if (val + 1 == tx_vq->num)
178 				restart_tx = true;
179 		}
180 
181 		/* Deliver to monitoring devices all correctly transmitted
182 		 * packets.
183 		 */
184 		virtio_transport_deliver_tap_pkt(pkt);
185 
186 		total_len += pkt->len;
187 		virtio_transport_free_pkt(pkt);
188 	} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
189 	if (added)
190 		vhost_signal(&vsock->dev, vq);
191 
192 out:
193 	mutex_unlock(&vq->mutex);
194 
195 	if (restart_tx)
196 		vhost_poll_queue(&tx_vq->poll);
197 }
198 
199 static void vhost_transport_send_pkt_work(struct vhost_work *work)
200 {
201 	struct vhost_virtqueue *vq;
202 	struct vhost_vsock *vsock;
203 
204 	vsock = container_of(work, struct vhost_vsock, send_pkt_work);
205 	vq = &vsock->vqs[VSOCK_VQ_RX];
206 
207 	vhost_transport_do_send_pkt(vsock, vq);
208 }
209 
210 static int
211 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
212 {
213 	struct vhost_vsock *vsock;
214 	int len = pkt->len;
215 
216 	rcu_read_lock();
217 
218 	/* Find the vhost_vsock according to guest context id  */
219 	vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
220 	if (!vsock) {
221 		rcu_read_unlock();
222 		virtio_transport_free_pkt(pkt);
223 		return -ENODEV;
224 	}
225 
226 	if (pkt->reply)
227 		atomic_inc(&vsock->queued_replies);
228 
229 	spin_lock_bh(&vsock->send_pkt_list_lock);
230 	list_add_tail(&pkt->list, &vsock->send_pkt_list);
231 	spin_unlock_bh(&vsock->send_pkt_list_lock);
232 
233 	vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
234 
235 	rcu_read_unlock();
236 	return len;
237 }
238 
239 static int
240 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
241 {
242 	struct vhost_vsock *vsock;
243 	struct virtio_vsock_pkt *pkt, *n;
244 	int cnt = 0;
245 	int ret = -ENODEV;
246 	LIST_HEAD(freeme);
247 
248 	rcu_read_lock();
249 
250 	/* Find the vhost_vsock according to guest context id  */
251 	vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
252 	if (!vsock)
253 		goto out;
254 
255 	spin_lock_bh(&vsock->send_pkt_list_lock);
256 	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
257 		if (pkt->vsk != vsk)
258 			continue;
259 		list_move(&pkt->list, &freeme);
260 	}
261 	spin_unlock_bh(&vsock->send_pkt_list_lock);
262 
263 	list_for_each_entry_safe(pkt, n, &freeme, list) {
264 		if (pkt->reply)
265 			cnt++;
266 		list_del(&pkt->list);
267 		virtio_transport_free_pkt(pkt);
268 	}
269 
270 	if (cnt) {
271 		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
272 		int new_cnt;
273 
274 		new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
275 		if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
276 			vhost_poll_queue(&tx_vq->poll);
277 	}
278 
279 	ret = 0;
280 out:
281 	rcu_read_unlock();
282 	return ret;
283 }
284 
285 static struct virtio_vsock_pkt *
286 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
287 		      unsigned int out, unsigned int in)
288 {
289 	struct virtio_vsock_pkt *pkt;
290 	struct iov_iter iov_iter;
291 	size_t nbytes;
292 	size_t len;
293 
294 	if (in != 0) {
295 		vq_err(vq, "Expected 0 input buffers, got %u\n", in);
296 		return NULL;
297 	}
298 
299 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
300 	if (!pkt)
301 		return NULL;
302 
303 	len = iov_length(vq->iov, out);
304 	iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
305 
306 	nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
307 	if (nbytes != sizeof(pkt->hdr)) {
308 		vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
309 		       sizeof(pkt->hdr), nbytes);
310 		kfree(pkt);
311 		return NULL;
312 	}
313 
314 	if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
315 		pkt->len = le32_to_cpu(pkt->hdr.len);
316 
317 	/* No payload */
318 	if (!pkt->len)
319 		return pkt;
320 
321 	/* The pkt is too big */
322 	if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
323 		kfree(pkt);
324 		return NULL;
325 	}
326 
327 	pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
328 	if (!pkt->buf) {
329 		kfree(pkt);
330 		return NULL;
331 	}
332 
333 	nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
334 	if (nbytes != pkt->len) {
335 		vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
336 		       pkt->len, nbytes);
337 		virtio_transport_free_pkt(pkt);
338 		return NULL;
339 	}
340 
341 	return pkt;
342 }
343 
344 /* Is there space left for replies to rx packets? */
345 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
346 {
347 	struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
348 	int val;
349 
350 	smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
351 	val = atomic_read(&vsock->queued_replies);
352 
353 	return val < vq->num;
354 }
355 
356 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
357 {
358 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
359 						  poll.work);
360 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
361 						 dev);
362 	struct virtio_vsock_pkt *pkt;
363 	int head, pkts = 0, total_len = 0;
364 	unsigned int out, in;
365 	bool added = false;
366 
367 	mutex_lock(&vq->mutex);
368 
369 	if (!vq->private_data)
370 		goto out;
371 
372 	vhost_disable_notify(&vsock->dev, vq);
373 	do {
374 		u32 len;
375 
376 		if (!vhost_vsock_more_replies(vsock)) {
377 			/* Stop tx until the device processes already
378 			 * pending replies.  Leave tx virtqueue
379 			 * callbacks disabled.
380 			 */
381 			goto no_more_replies;
382 		}
383 
384 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
385 					 &out, &in, NULL, NULL);
386 		if (head < 0)
387 			break;
388 
389 		if (head == vq->num) {
390 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
391 				vhost_disable_notify(&vsock->dev, vq);
392 				continue;
393 			}
394 			break;
395 		}
396 
397 		pkt = vhost_vsock_alloc_pkt(vq, out, in);
398 		if (!pkt) {
399 			vq_err(vq, "Faulted on pkt\n");
400 			continue;
401 		}
402 
403 		len = pkt->len;
404 
405 		/* Deliver to monitoring devices all received packets */
406 		virtio_transport_deliver_tap_pkt(pkt);
407 
408 		/* Only accept correctly addressed packets */
409 		if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
410 			virtio_transport_recv_pkt(pkt);
411 		else
412 			virtio_transport_free_pkt(pkt);
413 
414 		len += sizeof(pkt->hdr);
415 		vhost_add_used(vq, head, len);
416 		total_len += len;
417 		added = true;
418 	} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
419 
420 no_more_replies:
421 	if (added)
422 		vhost_signal(&vsock->dev, vq);
423 
424 out:
425 	mutex_unlock(&vq->mutex);
426 }
427 
428 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
429 {
430 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
431 						poll.work);
432 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
433 						 dev);
434 
435 	vhost_transport_do_send_pkt(vsock, vq);
436 }
437 
438 static int vhost_vsock_start(struct vhost_vsock *vsock)
439 {
440 	struct vhost_virtqueue *vq;
441 	size_t i;
442 	int ret;
443 
444 	mutex_lock(&vsock->dev.mutex);
445 
446 	ret = vhost_dev_check_owner(&vsock->dev);
447 	if (ret)
448 		goto err;
449 
450 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
451 		vq = &vsock->vqs[i];
452 
453 		mutex_lock(&vq->mutex);
454 
455 		if (!vhost_vq_access_ok(vq)) {
456 			ret = -EFAULT;
457 			goto err_vq;
458 		}
459 
460 		if (!vq->private_data) {
461 			vq->private_data = vsock;
462 			ret = vhost_vq_init_access(vq);
463 			if (ret)
464 				goto err_vq;
465 		}
466 
467 		mutex_unlock(&vq->mutex);
468 	}
469 
470 	mutex_unlock(&vsock->dev.mutex);
471 	return 0;
472 
473 err_vq:
474 	vq->private_data = NULL;
475 	mutex_unlock(&vq->mutex);
476 
477 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
478 		vq = &vsock->vqs[i];
479 
480 		mutex_lock(&vq->mutex);
481 		vq->private_data = NULL;
482 		mutex_unlock(&vq->mutex);
483 	}
484 err:
485 	mutex_unlock(&vsock->dev.mutex);
486 	return ret;
487 }
488 
489 static int vhost_vsock_stop(struct vhost_vsock *vsock)
490 {
491 	size_t i;
492 	int ret;
493 
494 	mutex_lock(&vsock->dev.mutex);
495 
496 	ret = vhost_dev_check_owner(&vsock->dev);
497 	if (ret)
498 		goto err;
499 
500 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
501 		struct vhost_virtqueue *vq = &vsock->vqs[i];
502 
503 		mutex_lock(&vq->mutex);
504 		vq->private_data = NULL;
505 		mutex_unlock(&vq->mutex);
506 	}
507 
508 err:
509 	mutex_unlock(&vsock->dev.mutex);
510 	return ret;
511 }
512 
513 static void vhost_vsock_free(struct vhost_vsock *vsock)
514 {
515 	kvfree(vsock);
516 }
517 
518 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
519 {
520 	struct vhost_virtqueue **vqs;
521 	struct vhost_vsock *vsock;
522 	int ret;
523 
524 	/* This struct is large and allocation could fail, fall back to vmalloc
525 	 * if there is no other way.
526 	 */
527 	vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
528 	if (!vsock)
529 		return -ENOMEM;
530 
531 	vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
532 	if (!vqs) {
533 		ret = -ENOMEM;
534 		goto out;
535 	}
536 
537 	vsock->guest_cid = 0; /* no CID assigned yet */
538 
539 	atomic_set(&vsock->queued_replies, 0);
540 
541 	vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
542 	vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
543 	vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
544 	vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
545 
546 	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
547 		       UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
548 		       VHOST_VSOCK_WEIGHT);
549 
550 	file->private_data = vsock;
551 	spin_lock_init(&vsock->send_pkt_list_lock);
552 	INIT_LIST_HEAD(&vsock->send_pkt_list);
553 	vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
554 	return 0;
555 
556 out:
557 	vhost_vsock_free(vsock);
558 	return ret;
559 }
560 
561 static void vhost_vsock_flush(struct vhost_vsock *vsock)
562 {
563 	int i;
564 
565 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
566 		if (vsock->vqs[i].handle_kick)
567 			vhost_poll_flush(&vsock->vqs[i].poll);
568 	vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
569 }
570 
571 static void vhost_vsock_reset_orphans(struct sock *sk)
572 {
573 	struct vsock_sock *vsk = vsock_sk(sk);
574 
575 	/* vmci_transport.c doesn't take sk_lock here either.  At least we're
576 	 * under vsock_table_lock so the sock cannot disappear while we're
577 	 * executing.
578 	 */
579 
580 	/* If the peer is still valid, no need to reset connection */
581 	if (vhost_vsock_get(vsk->remote_addr.svm_cid))
582 		return;
583 
584 	/* If the close timeout is pending, let it expire.  This avoids races
585 	 * with the timeout callback.
586 	 */
587 	if (vsk->close_work_scheduled)
588 		return;
589 
590 	sock_set_flag(sk, SOCK_DONE);
591 	vsk->peer_shutdown = SHUTDOWN_MASK;
592 	sk->sk_state = SS_UNCONNECTED;
593 	sk->sk_err = ECONNRESET;
594 	sk->sk_error_report(sk);
595 }
596 
597 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
598 {
599 	struct vhost_vsock *vsock = file->private_data;
600 
601 	mutex_lock(&vhost_vsock_mutex);
602 	if (vsock->guest_cid)
603 		hash_del_rcu(&vsock->hash);
604 	mutex_unlock(&vhost_vsock_mutex);
605 
606 	/* Wait for other CPUs to finish using vsock */
607 	synchronize_rcu();
608 
609 	/* Iterating over all connections for all CIDs to find orphans is
610 	 * inefficient.  Room for improvement here. */
611 	vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
612 
613 	vhost_vsock_stop(vsock);
614 	vhost_vsock_flush(vsock);
615 	vhost_dev_stop(&vsock->dev);
616 
617 	spin_lock_bh(&vsock->send_pkt_list_lock);
618 	while (!list_empty(&vsock->send_pkt_list)) {
619 		struct virtio_vsock_pkt *pkt;
620 
621 		pkt = list_first_entry(&vsock->send_pkt_list,
622 				struct virtio_vsock_pkt, list);
623 		list_del_init(&pkt->list);
624 		virtio_transport_free_pkt(pkt);
625 	}
626 	spin_unlock_bh(&vsock->send_pkt_list_lock);
627 
628 	vhost_dev_cleanup(&vsock->dev);
629 	kfree(vsock->dev.vqs);
630 	vhost_vsock_free(vsock);
631 	return 0;
632 }
633 
634 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
635 {
636 	struct vhost_vsock *other;
637 
638 	/* Refuse reserved CIDs */
639 	if (guest_cid <= VMADDR_CID_HOST ||
640 	    guest_cid == U32_MAX)
641 		return -EINVAL;
642 
643 	/* 64-bit CIDs are not yet supported */
644 	if (guest_cid > U32_MAX)
645 		return -EINVAL;
646 
647 	/* Refuse if CID is already in use */
648 	mutex_lock(&vhost_vsock_mutex);
649 	other = vhost_vsock_get(guest_cid);
650 	if (other && other != vsock) {
651 		mutex_unlock(&vhost_vsock_mutex);
652 		return -EADDRINUSE;
653 	}
654 
655 	if (vsock->guest_cid)
656 		hash_del_rcu(&vsock->hash);
657 
658 	vsock->guest_cid = guest_cid;
659 	hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
660 	mutex_unlock(&vhost_vsock_mutex);
661 
662 	return 0;
663 }
664 
665 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
666 {
667 	struct vhost_virtqueue *vq;
668 	int i;
669 
670 	if (features & ~VHOST_VSOCK_FEATURES)
671 		return -EOPNOTSUPP;
672 
673 	mutex_lock(&vsock->dev.mutex);
674 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
675 	    !vhost_log_access_ok(&vsock->dev)) {
676 		mutex_unlock(&vsock->dev.mutex);
677 		return -EFAULT;
678 	}
679 
680 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
681 		vq = &vsock->vqs[i];
682 		mutex_lock(&vq->mutex);
683 		vq->acked_features = features;
684 		mutex_unlock(&vq->mutex);
685 	}
686 	mutex_unlock(&vsock->dev.mutex);
687 	return 0;
688 }
689 
690 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
691 				  unsigned long arg)
692 {
693 	struct vhost_vsock *vsock = f->private_data;
694 	void __user *argp = (void __user *)arg;
695 	u64 guest_cid;
696 	u64 features;
697 	int start;
698 	int r;
699 
700 	switch (ioctl) {
701 	case VHOST_VSOCK_SET_GUEST_CID:
702 		if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
703 			return -EFAULT;
704 		return vhost_vsock_set_cid(vsock, guest_cid);
705 	case VHOST_VSOCK_SET_RUNNING:
706 		if (copy_from_user(&start, argp, sizeof(start)))
707 			return -EFAULT;
708 		if (start)
709 			return vhost_vsock_start(vsock);
710 		else
711 			return vhost_vsock_stop(vsock);
712 	case VHOST_GET_FEATURES:
713 		features = VHOST_VSOCK_FEATURES;
714 		if (copy_to_user(argp, &features, sizeof(features)))
715 			return -EFAULT;
716 		return 0;
717 	case VHOST_SET_FEATURES:
718 		if (copy_from_user(&features, argp, sizeof(features)))
719 			return -EFAULT;
720 		return vhost_vsock_set_features(vsock, features);
721 	default:
722 		mutex_lock(&vsock->dev.mutex);
723 		r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
724 		if (r == -ENOIOCTLCMD)
725 			r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
726 		else
727 			vhost_vsock_flush(vsock);
728 		mutex_unlock(&vsock->dev.mutex);
729 		return r;
730 	}
731 }
732 
733 #ifdef CONFIG_COMPAT
734 static long vhost_vsock_dev_compat_ioctl(struct file *f, unsigned int ioctl,
735 					 unsigned long arg)
736 {
737 	return vhost_vsock_dev_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
738 }
739 #endif
740 
741 static const struct file_operations vhost_vsock_fops = {
742 	.owner          = THIS_MODULE,
743 	.open           = vhost_vsock_dev_open,
744 	.release        = vhost_vsock_dev_release,
745 	.llseek		= noop_llseek,
746 	.unlocked_ioctl = vhost_vsock_dev_ioctl,
747 #ifdef CONFIG_COMPAT
748 	.compat_ioctl   = vhost_vsock_dev_compat_ioctl,
749 #endif
750 };
751 
752 static struct miscdevice vhost_vsock_misc = {
753 	.minor = VHOST_VSOCK_MINOR,
754 	.name = "vhost-vsock",
755 	.fops = &vhost_vsock_fops,
756 };
757 
758 static struct virtio_transport vhost_transport = {
759 	.transport = {
760 		.get_local_cid            = vhost_transport_get_local_cid,
761 
762 		.init                     = virtio_transport_do_socket_init,
763 		.destruct                 = virtio_transport_destruct,
764 		.release                  = virtio_transport_release,
765 		.connect                  = virtio_transport_connect,
766 		.shutdown                 = virtio_transport_shutdown,
767 		.cancel_pkt               = vhost_transport_cancel_pkt,
768 
769 		.dgram_enqueue            = virtio_transport_dgram_enqueue,
770 		.dgram_dequeue            = virtio_transport_dgram_dequeue,
771 		.dgram_bind               = virtio_transport_dgram_bind,
772 		.dgram_allow              = virtio_transport_dgram_allow,
773 
774 		.stream_enqueue           = virtio_transport_stream_enqueue,
775 		.stream_dequeue           = virtio_transport_stream_dequeue,
776 		.stream_has_data          = virtio_transport_stream_has_data,
777 		.stream_has_space         = virtio_transport_stream_has_space,
778 		.stream_rcvhiwat          = virtio_transport_stream_rcvhiwat,
779 		.stream_is_active         = virtio_transport_stream_is_active,
780 		.stream_allow             = virtio_transport_stream_allow,
781 
782 		.notify_poll_in           = virtio_transport_notify_poll_in,
783 		.notify_poll_out          = virtio_transport_notify_poll_out,
784 		.notify_recv_init         = virtio_transport_notify_recv_init,
785 		.notify_recv_pre_block    = virtio_transport_notify_recv_pre_block,
786 		.notify_recv_pre_dequeue  = virtio_transport_notify_recv_pre_dequeue,
787 		.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
788 		.notify_send_init         = virtio_transport_notify_send_init,
789 		.notify_send_pre_block    = virtio_transport_notify_send_pre_block,
790 		.notify_send_pre_enqueue  = virtio_transport_notify_send_pre_enqueue,
791 		.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
792 
793 		.set_buffer_size          = virtio_transport_set_buffer_size,
794 		.set_min_buffer_size      = virtio_transport_set_min_buffer_size,
795 		.set_max_buffer_size      = virtio_transport_set_max_buffer_size,
796 		.get_buffer_size          = virtio_transport_get_buffer_size,
797 		.get_min_buffer_size      = virtio_transport_get_min_buffer_size,
798 		.get_max_buffer_size      = virtio_transport_get_max_buffer_size,
799 	},
800 
801 	.send_pkt = vhost_transport_send_pkt,
802 };
803 
804 static int __init vhost_vsock_init(void)
805 {
806 	int ret;
807 
808 	ret = vsock_core_init(&vhost_transport.transport);
809 	if (ret < 0)
810 		return ret;
811 	return misc_register(&vhost_vsock_misc);
812 };
813 
814 static void __exit vhost_vsock_exit(void)
815 {
816 	misc_deregister(&vhost_vsock_misc);
817 	vsock_core_exit();
818 };
819 
820 module_init(vhost_vsock_init);
821 module_exit(vhost_vsock_exit);
822 MODULE_LICENSE("GPL v2");
823 MODULE_AUTHOR("Asias He");
824 MODULE_DESCRIPTION("vhost transport for vsock ");
825 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
826 MODULE_ALIAS("devname:vhost-vsock");
827