xref: /openbmc/linux/drivers/vhost/vsock.c (revision d6e0cbb1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vhost transport for vsock
4  *
5  * Copyright (C) 2013-2015 Red Hat, Inc.
6  * Author: Asias He <asias@redhat.com>
7  *         Stefan Hajnoczi <stefanha@redhat.com>
8  */
9 #include <linux/miscdevice.h>
10 #include <linux/atomic.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
14 #include <net/sock.h>
15 #include <linux/virtio_vsock.h>
16 #include <linux/vhost.h>
17 #include <linux/hashtable.h>
18 
19 #include <net/af_vsock.h>
20 #include "vhost.h"
21 
22 #define VHOST_VSOCK_DEFAULT_HOST_CID	2
23 /* Max number of bytes transferred before requeueing the job.
24  * Using this limit prevents one virtqueue from starving others. */
25 #define VHOST_VSOCK_WEIGHT 0x80000
26 /* Max number of packets transferred before requeueing the job.
27  * Using this limit prevents one virtqueue from starving others with
28  * small pkts.
29  */
30 #define VHOST_VSOCK_PKT_WEIGHT 256
31 
32 enum {
33 	VHOST_VSOCK_FEATURES = VHOST_FEATURES,
34 };
35 
36 /* Used to track all the vhost_vsock instances on the system. */
37 static DEFINE_MUTEX(vhost_vsock_mutex);
38 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
39 
40 struct vhost_vsock {
41 	struct vhost_dev dev;
42 	struct vhost_virtqueue vqs[2];
43 
44 	/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
45 	struct hlist_node hash;
46 
47 	struct vhost_work send_pkt_work;
48 	spinlock_t send_pkt_list_lock;
49 	struct list_head send_pkt_list;	/* host->guest pending packets */
50 
51 	atomic_t queued_replies;
52 
53 	u32 guest_cid;
54 };
55 
56 static u32 vhost_transport_get_local_cid(void)
57 {
58 	return VHOST_VSOCK_DEFAULT_HOST_CID;
59 }
60 
61 /* Callers that dereference the return value must hold vhost_vsock_mutex or the
62  * RCU read lock.
63  */
64 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
65 {
66 	struct vhost_vsock *vsock;
67 
68 	hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
69 		u32 other_cid = vsock->guest_cid;
70 
71 		/* Skip instances that have no CID yet */
72 		if (other_cid == 0)
73 			continue;
74 
75 		if (other_cid == guest_cid)
76 			return vsock;
77 
78 	}
79 
80 	return NULL;
81 }
82 
83 static void
84 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
85 			    struct vhost_virtqueue *vq)
86 {
87 	struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
88 	int pkts = 0, total_len = 0;
89 	bool added = false;
90 	bool restart_tx = false;
91 
92 	mutex_lock(&vq->mutex);
93 
94 	if (!vq->private_data)
95 		goto out;
96 
97 	/* Avoid further vmexits, we're already processing the virtqueue */
98 	vhost_disable_notify(&vsock->dev, vq);
99 
100 	do {
101 		struct virtio_vsock_pkt *pkt;
102 		struct iov_iter iov_iter;
103 		unsigned out, in;
104 		size_t nbytes;
105 		size_t len;
106 		int head;
107 
108 		spin_lock_bh(&vsock->send_pkt_list_lock);
109 		if (list_empty(&vsock->send_pkt_list)) {
110 			spin_unlock_bh(&vsock->send_pkt_list_lock);
111 			vhost_enable_notify(&vsock->dev, vq);
112 			break;
113 		}
114 
115 		pkt = list_first_entry(&vsock->send_pkt_list,
116 				       struct virtio_vsock_pkt, list);
117 		list_del_init(&pkt->list);
118 		spin_unlock_bh(&vsock->send_pkt_list_lock);
119 
120 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
121 					 &out, &in, NULL, NULL);
122 		if (head < 0) {
123 			spin_lock_bh(&vsock->send_pkt_list_lock);
124 			list_add(&pkt->list, &vsock->send_pkt_list);
125 			spin_unlock_bh(&vsock->send_pkt_list_lock);
126 			break;
127 		}
128 
129 		if (head == vq->num) {
130 			spin_lock_bh(&vsock->send_pkt_list_lock);
131 			list_add(&pkt->list, &vsock->send_pkt_list);
132 			spin_unlock_bh(&vsock->send_pkt_list_lock);
133 
134 			/* We cannot finish yet if more buffers snuck in while
135 			 * re-enabling notify.
136 			 */
137 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
138 				vhost_disable_notify(&vsock->dev, vq);
139 				continue;
140 			}
141 			break;
142 		}
143 
144 		if (out) {
145 			virtio_transport_free_pkt(pkt);
146 			vq_err(vq, "Expected 0 output buffers, got %u\n", out);
147 			break;
148 		}
149 
150 		len = iov_length(&vq->iov[out], in);
151 		iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
152 
153 		nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
154 		if (nbytes != sizeof(pkt->hdr)) {
155 			virtio_transport_free_pkt(pkt);
156 			vq_err(vq, "Faulted on copying pkt hdr\n");
157 			break;
158 		}
159 
160 		nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
161 		if (nbytes != pkt->len) {
162 			virtio_transport_free_pkt(pkt);
163 			vq_err(vq, "Faulted on copying pkt buf\n");
164 			break;
165 		}
166 
167 		vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
168 		added = true;
169 
170 		if (pkt->reply) {
171 			int val;
172 
173 			val = atomic_dec_return(&vsock->queued_replies);
174 
175 			/* Do we have resources to resume tx processing? */
176 			if (val + 1 == tx_vq->num)
177 				restart_tx = true;
178 		}
179 
180 		/* Deliver to monitoring devices all correctly transmitted
181 		 * packets.
182 		 */
183 		virtio_transport_deliver_tap_pkt(pkt);
184 
185 		total_len += pkt->len;
186 		virtio_transport_free_pkt(pkt);
187 	} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
188 	if (added)
189 		vhost_signal(&vsock->dev, vq);
190 
191 out:
192 	mutex_unlock(&vq->mutex);
193 
194 	if (restart_tx)
195 		vhost_poll_queue(&tx_vq->poll);
196 }
197 
198 static void vhost_transport_send_pkt_work(struct vhost_work *work)
199 {
200 	struct vhost_virtqueue *vq;
201 	struct vhost_vsock *vsock;
202 
203 	vsock = container_of(work, struct vhost_vsock, send_pkt_work);
204 	vq = &vsock->vqs[VSOCK_VQ_RX];
205 
206 	vhost_transport_do_send_pkt(vsock, vq);
207 }
208 
209 static int
210 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
211 {
212 	struct vhost_vsock *vsock;
213 	int len = pkt->len;
214 
215 	rcu_read_lock();
216 
217 	/* Find the vhost_vsock according to guest context id  */
218 	vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
219 	if (!vsock) {
220 		rcu_read_unlock();
221 		virtio_transport_free_pkt(pkt);
222 		return -ENODEV;
223 	}
224 
225 	if (pkt->reply)
226 		atomic_inc(&vsock->queued_replies);
227 
228 	spin_lock_bh(&vsock->send_pkt_list_lock);
229 	list_add_tail(&pkt->list, &vsock->send_pkt_list);
230 	spin_unlock_bh(&vsock->send_pkt_list_lock);
231 
232 	vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
233 
234 	rcu_read_unlock();
235 	return len;
236 }
237 
238 static int
239 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
240 {
241 	struct vhost_vsock *vsock;
242 	struct virtio_vsock_pkt *pkt, *n;
243 	int cnt = 0;
244 	int ret = -ENODEV;
245 	LIST_HEAD(freeme);
246 
247 	rcu_read_lock();
248 
249 	/* Find the vhost_vsock according to guest context id  */
250 	vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
251 	if (!vsock)
252 		goto out;
253 
254 	spin_lock_bh(&vsock->send_pkt_list_lock);
255 	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
256 		if (pkt->vsk != vsk)
257 			continue;
258 		list_move(&pkt->list, &freeme);
259 	}
260 	spin_unlock_bh(&vsock->send_pkt_list_lock);
261 
262 	list_for_each_entry_safe(pkt, n, &freeme, list) {
263 		if (pkt->reply)
264 			cnt++;
265 		list_del(&pkt->list);
266 		virtio_transport_free_pkt(pkt);
267 	}
268 
269 	if (cnt) {
270 		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
271 		int new_cnt;
272 
273 		new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
274 		if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
275 			vhost_poll_queue(&tx_vq->poll);
276 	}
277 
278 	ret = 0;
279 out:
280 	rcu_read_unlock();
281 	return ret;
282 }
283 
284 static struct virtio_vsock_pkt *
285 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
286 		      unsigned int out, unsigned int in)
287 {
288 	struct virtio_vsock_pkt *pkt;
289 	struct iov_iter iov_iter;
290 	size_t nbytes;
291 	size_t len;
292 
293 	if (in != 0) {
294 		vq_err(vq, "Expected 0 input buffers, got %u\n", in);
295 		return NULL;
296 	}
297 
298 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
299 	if (!pkt)
300 		return NULL;
301 
302 	len = iov_length(vq->iov, out);
303 	iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
304 
305 	nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
306 	if (nbytes != sizeof(pkt->hdr)) {
307 		vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
308 		       sizeof(pkt->hdr), nbytes);
309 		kfree(pkt);
310 		return NULL;
311 	}
312 
313 	if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
314 		pkt->len = le32_to_cpu(pkt->hdr.len);
315 
316 	/* No payload */
317 	if (!pkt->len)
318 		return pkt;
319 
320 	/* The pkt is too big */
321 	if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
322 		kfree(pkt);
323 		return NULL;
324 	}
325 
326 	pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
327 	if (!pkt->buf) {
328 		kfree(pkt);
329 		return NULL;
330 	}
331 
332 	nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
333 	if (nbytes != pkt->len) {
334 		vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
335 		       pkt->len, nbytes);
336 		virtio_transport_free_pkt(pkt);
337 		return NULL;
338 	}
339 
340 	return pkt;
341 }
342 
343 /* Is there space left for replies to rx packets? */
344 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
345 {
346 	struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
347 	int val;
348 
349 	smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
350 	val = atomic_read(&vsock->queued_replies);
351 
352 	return val < vq->num;
353 }
354 
355 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
356 {
357 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
358 						  poll.work);
359 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
360 						 dev);
361 	struct virtio_vsock_pkt *pkt;
362 	int head, pkts = 0, total_len = 0;
363 	unsigned int out, in;
364 	bool added = false;
365 
366 	mutex_lock(&vq->mutex);
367 
368 	if (!vq->private_data)
369 		goto out;
370 
371 	vhost_disable_notify(&vsock->dev, vq);
372 	do {
373 		u32 len;
374 
375 		if (!vhost_vsock_more_replies(vsock)) {
376 			/* Stop tx until the device processes already
377 			 * pending replies.  Leave tx virtqueue
378 			 * callbacks disabled.
379 			 */
380 			goto no_more_replies;
381 		}
382 
383 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
384 					 &out, &in, NULL, NULL);
385 		if (head < 0)
386 			break;
387 
388 		if (head == vq->num) {
389 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
390 				vhost_disable_notify(&vsock->dev, vq);
391 				continue;
392 			}
393 			break;
394 		}
395 
396 		pkt = vhost_vsock_alloc_pkt(vq, out, in);
397 		if (!pkt) {
398 			vq_err(vq, "Faulted on pkt\n");
399 			continue;
400 		}
401 
402 		len = pkt->len;
403 
404 		/* Deliver to monitoring devices all received packets */
405 		virtio_transport_deliver_tap_pkt(pkt);
406 
407 		/* Only accept correctly addressed packets */
408 		if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
409 			virtio_transport_recv_pkt(pkt);
410 		else
411 			virtio_transport_free_pkt(pkt);
412 
413 		len += sizeof(pkt->hdr);
414 		vhost_add_used(vq, head, len);
415 		total_len += len;
416 		added = true;
417 	} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
418 
419 no_more_replies:
420 	if (added)
421 		vhost_signal(&vsock->dev, vq);
422 
423 out:
424 	mutex_unlock(&vq->mutex);
425 }
426 
427 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
428 {
429 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
430 						poll.work);
431 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
432 						 dev);
433 
434 	vhost_transport_do_send_pkt(vsock, vq);
435 }
436 
437 static int vhost_vsock_start(struct vhost_vsock *vsock)
438 {
439 	struct vhost_virtqueue *vq;
440 	size_t i;
441 	int ret;
442 
443 	mutex_lock(&vsock->dev.mutex);
444 
445 	ret = vhost_dev_check_owner(&vsock->dev);
446 	if (ret)
447 		goto err;
448 
449 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
450 		vq = &vsock->vqs[i];
451 
452 		mutex_lock(&vq->mutex);
453 
454 		if (!vhost_vq_access_ok(vq)) {
455 			ret = -EFAULT;
456 			goto err_vq;
457 		}
458 
459 		if (!vq->private_data) {
460 			vq->private_data = vsock;
461 			ret = vhost_vq_init_access(vq);
462 			if (ret)
463 				goto err_vq;
464 		}
465 
466 		mutex_unlock(&vq->mutex);
467 	}
468 
469 	mutex_unlock(&vsock->dev.mutex);
470 	return 0;
471 
472 err_vq:
473 	vq->private_data = NULL;
474 	mutex_unlock(&vq->mutex);
475 
476 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
477 		vq = &vsock->vqs[i];
478 
479 		mutex_lock(&vq->mutex);
480 		vq->private_data = NULL;
481 		mutex_unlock(&vq->mutex);
482 	}
483 err:
484 	mutex_unlock(&vsock->dev.mutex);
485 	return ret;
486 }
487 
488 static int vhost_vsock_stop(struct vhost_vsock *vsock)
489 {
490 	size_t i;
491 	int ret;
492 
493 	mutex_lock(&vsock->dev.mutex);
494 
495 	ret = vhost_dev_check_owner(&vsock->dev);
496 	if (ret)
497 		goto err;
498 
499 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
500 		struct vhost_virtqueue *vq = &vsock->vqs[i];
501 
502 		mutex_lock(&vq->mutex);
503 		vq->private_data = NULL;
504 		mutex_unlock(&vq->mutex);
505 	}
506 
507 err:
508 	mutex_unlock(&vsock->dev.mutex);
509 	return ret;
510 }
511 
512 static void vhost_vsock_free(struct vhost_vsock *vsock)
513 {
514 	kvfree(vsock);
515 }
516 
517 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
518 {
519 	struct vhost_virtqueue **vqs;
520 	struct vhost_vsock *vsock;
521 	int ret;
522 
523 	/* This struct is large and allocation could fail, fall back to vmalloc
524 	 * if there is no other way.
525 	 */
526 	vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
527 	if (!vsock)
528 		return -ENOMEM;
529 
530 	vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
531 	if (!vqs) {
532 		ret = -ENOMEM;
533 		goto out;
534 	}
535 
536 	vsock->guest_cid = 0; /* no CID assigned yet */
537 
538 	atomic_set(&vsock->queued_replies, 0);
539 
540 	vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
541 	vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
542 	vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
543 	vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
544 
545 	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
546 		       UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
547 		       VHOST_VSOCK_WEIGHT);
548 
549 	file->private_data = vsock;
550 	spin_lock_init(&vsock->send_pkt_list_lock);
551 	INIT_LIST_HEAD(&vsock->send_pkt_list);
552 	vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
553 	return 0;
554 
555 out:
556 	vhost_vsock_free(vsock);
557 	return ret;
558 }
559 
560 static void vhost_vsock_flush(struct vhost_vsock *vsock)
561 {
562 	int i;
563 
564 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
565 		if (vsock->vqs[i].handle_kick)
566 			vhost_poll_flush(&vsock->vqs[i].poll);
567 	vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
568 }
569 
570 static void vhost_vsock_reset_orphans(struct sock *sk)
571 {
572 	struct vsock_sock *vsk = vsock_sk(sk);
573 
574 	/* vmci_transport.c doesn't take sk_lock here either.  At least we're
575 	 * under vsock_table_lock so the sock cannot disappear while we're
576 	 * executing.
577 	 */
578 
579 	/* If the peer is still valid, no need to reset connection */
580 	if (vhost_vsock_get(vsk->remote_addr.svm_cid))
581 		return;
582 
583 	/* If the close timeout is pending, let it expire.  This avoids races
584 	 * with the timeout callback.
585 	 */
586 	if (vsk->close_work_scheduled)
587 		return;
588 
589 	sock_set_flag(sk, SOCK_DONE);
590 	vsk->peer_shutdown = SHUTDOWN_MASK;
591 	sk->sk_state = SS_UNCONNECTED;
592 	sk->sk_err = ECONNRESET;
593 	sk->sk_error_report(sk);
594 }
595 
596 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
597 {
598 	struct vhost_vsock *vsock = file->private_data;
599 
600 	mutex_lock(&vhost_vsock_mutex);
601 	if (vsock->guest_cid)
602 		hash_del_rcu(&vsock->hash);
603 	mutex_unlock(&vhost_vsock_mutex);
604 
605 	/* Wait for other CPUs to finish using vsock */
606 	synchronize_rcu();
607 
608 	/* Iterating over all connections for all CIDs to find orphans is
609 	 * inefficient.  Room for improvement here. */
610 	vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
611 
612 	vhost_vsock_stop(vsock);
613 	vhost_vsock_flush(vsock);
614 	vhost_dev_stop(&vsock->dev);
615 
616 	spin_lock_bh(&vsock->send_pkt_list_lock);
617 	while (!list_empty(&vsock->send_pkt_list)) {
618 		struct virtio_vsock_pkt *pkt;
619 
620 		pkt = list_first_entry(&vsock->send_pkt_list,
621 				struct virtio_vsock_pkt, list);
622 		list_del_init(&pkt->list);
623 		virtio_transport_free_pkt(pkt);
624 	}
625 	spin_unlock_bh(&vsock->send_pkt_list_lock);
626 
627 	vhost_dev_cleanup(&vsock->dev);
628 	kfree(vsock->dev.vqs);
629 	vhost_vsock_free(vsock);
630 	return 0;
631 }
632 
633 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
634 {
635 	struct vhost_vsock *other;
636 
637 	/* Refuse reserved CIDs */
638 	if (guest_cid <= VMADDR_CID_HOST ||
639 	    guest_cid == U32_MAX)
640 		return -EINVAL;
641 
642 	/* 64-bit CIDs are not yet supported */
643 	if (guest_cid > U32_MAX)
644 		return -EINVAL;
645 
646 	/* Refuse if CID is already in use */
647 	mutex_lock(&vhost_vsock_mutex);
648 	other = vhost_vsock_get(guest_cid);
649 	if (other && other != vsock) {
650 		mutex_unlock(&vhost_vsock_mutex);
651 		return -EADDRINUSE;
652 	}
653 
654 	if (vsock->guest_cid)
655 		hash_del_rcu(&vsock->hash);
656 
657 	vsock->guest_cid = guest_cid;
658 	hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
659 	mutex_unlock(&vhost_vsock_mutex);
660 
661 	return 0;
662 }
663 
664 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
665 {
666 	struct vhost_virtqueue *vq;
667 	int i;
668 
669 	if (features & ~VHOST_VSOCK_FEATURES)
670 		return -EOPNOTSUPP;
671 
672 	mutex_lock(&vsock->dev.mutex);
673 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
674 	    !vhost_log_access_ok(&vsock->dev)) {
675 		mutex_unlock(&vsock->dev.mutex);
676 		return -EFAULT;
677 	}
678 
679 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
680 		vq = &vsock->vqs[i];
681 		mutex_lock(&vq->mutex);
682 		vq->acked_features = features;
683 		mutex_unlock(&vq->mutex);
684 	}
685 	mutex_unlock(&vsock->dev.mutex);
686 	return 0;
687 }
688 
689 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
690 				  unsigned long arg)
691 {
692 	struct vhost_vsock *vsock = f->private_data;
693 	void __user *argp = (void __user *)arg;
694 	u64 guest_cid;
695 	u64 features;
696 	int start;
697 	int r;
698 
699 	switch (ioctl) {
700 	case VHOST_VSOCK_SET_GUEST_CID:
701 		if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
702 			return -EFAULT;
703 		return vhost_vsock_set_cid(vsock, guest_cid);
704 	case VHOST_VSOCK_SET_RUNNING:
705 		if (copy_from_user(&start, argp, sizeof(start)))
706 			return -EFAULT;
707 		if (start)
708 			return vhost_vsock_start(vsock);
709 		else
710 			return vhost_vsock_stop(vsock);
711 	case VHOST_GET_FEATURES:
712 		features = VHOST_VSOCK_FEATURES;
713 		if (copy_to_user(argp, &features, sizeof(features)))
714 			return -EFAULT;
715 		return 0;
716 	case VHOST_SET_FEATURES:
717 		if (copy_from_user(&features, argp, sizeof(features)))
718 			return -EFAULT;
719 		return vhost_vsock_set_features(vsock, features);
720 	default:
721 		mutex_lock(&vsock->dev.mutex);
722 		r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
723 		if (r == -ENOIOCTLCMD)
724 			r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
725 		else
726 			vhost_vsock_flush(vsock);
727 		mutex_unlock(&vsock->dev.mutex);
728 		return r;
729 	}
730 }
731 
732 #ifdef CONFIG_COMPAT
733 static long vhost_vsock_dev_compat_ioctl(struct file *f, unsigned int ioctl,
734 					 unsigned long arg)
735 {
736 	return vhost_vsock_dev_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
737 }
738 #endif
739 
740 static const struct file_operations vhost_vsock_fops = {
741 	.owner          = THIS_MODULE,
742 	.open           = vhost_vsock_dev_open,
743 	.release        = vhost_vsock_dev_release,
744 	.llseek		= noop_llseek,
745 	.unlocked_ioctl = vhost_vsock_dev_ioctl,
746 #ifdef CONFIG_COMPAT
747 	.compat_ioctl   = vhost_vsock_dev_compat_ioctl,
748 #endif
749 };
750 
751 static struct miscdevice vhost_vsock_misc = {
752 	.minor = VHOST_VSOCK_MINOR,
753 	.name = "vhost-vsock",
754 	.fops = &vhost_vsock_fops,
755 };
756 
757 static struct virtio_transport vhost_transport = {
758 	.transport = {
759 		.get_local_cid            = vhost_transport_get_local_cid,
760 
761 		.init                     = virtio_transport_do_socket_init,
762 		.destruct                 = virtio_transport_destruct,
763 		.release                  = virtio_transport_release,
764 		.connect                  = virtio_transport_connect,
765 		.shutdown                 = virtio_transport_shutdown,
766 		.cancel_pkt               = vhost_transport_cancel_pkt,
767 
768 		.dgram_enqueue            = virtio_transport_dgram_enqueue,
769 		.dgram_dequeue            = virtio_transport_dgram_dequeue,
770 		.dgram_bind               = virtio_transport_dgram_bind,
771 		.dgram_allow              = virtio_transport_dgram_allow,
772 
773 		.stream_enqueue           = virtio_transport_stream_enqueue,
774 		.stream_dequeue           = virtio_transport_stream_dequeue,
775 		.stream_has_data          = virtio_transport_stream_has_data,
776 		.stream_has_space         = virtio_transport_stream_has_space,
777 		.stream_rcvhiwat          = virtio_transport_stream_rcvhiwat,
778 		.stream_is_active         = virtio_transport_stream_is_active,
779 		.stream_allow             = virtio_transport_stream_allow,
780 
781 		.notify_poll_in           = virtio_transport_notify_poll_in,
782 		.notify_poll_out          = virtio_transport_notify_poll_out,
783 		.notify_recv_init         = virtio_transport_notify_recv_init,
784 		.notify_recv_pre_block    = virtio_transport_notify_recv_pre_block,
785 		.notify_recv_pre_dequeue  = virtio_transport_notify_recv_pre_dequeue,
786 		.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
787 		.notify_send_init         = virtio_transport_notify_send_init,
788 		.notify_send_pre_block    = virtio_transport_notify_send_pre_block,
789 		.notify_send_pre_enqueue  = virtio_transport_notify_send_pre_enqueue,
790 		.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
791 
792 		.set_buffer_size          = virtio_transport_set_buffer_size,
793 		.set_min_buffer_size      = virtio_transport_set_min_buffer_size,
794 		.set_max_buffer_size      = virtio_transport_set_max_buffer_size,
795 		.get_buffer_size          = virtio_transport_get_buffer_size,
796 		.get_min_buffer_size      = virtio_transport_get_min_buffer_size,
797 		.get_max_buffer_size      = virtio_transport_get_max_buffer_size,
798 	},
799 
800 	.send_pkt = vhost_transport_send_pkt,
801 };
802 
803 static int __init vhost_vsock_init(void)
804 {
805 	int ret;
806 
807 	ret = vsock_core_init(&vhost_transport.transport);
808 	if (ret < 0)
809 		return ret;
810 	return misc_register(&vhost_vsock_misc);
811 };
812 
813 static void __exit vhost_vsock_exit(void)
814 {
815 	misc_deregister(&vhost_vsock_misc);
816 	vsock_core_exit();
817 };
818 
819 module_init(vhost_vsock_init);
820 module_exit(vhost_vsock_exit);
821 MODULE_LICENSE("GPL v2");
822 MODULE_AUTHOR("Asias He");
823 MODULE_DESCRIPTION("vhost transport for vsock ");
824 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
825 MODULE_ALIAS("devname:vhost-vsock");
826