xref: /openbmc/linux/drivers/vhost/vsock.c (revision 9af8f106)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vhost transport for vsock
4  *
5  * Copyright (C) 2013-2015 Red Hat, Inc.
6  * Author: Asias He <asias@redhat.com>
7  *         Stefan Hajnoczi <stefanha@redhat.com>
8  */
9 #include <linux/miscdevice.h>
10 #include <linux/atomic.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
14 #include <net/sock.h>
15 #include <linux/virtio_vsock.h>
16 #include <linux/vhost.h>
17 #include <linux/hashtable.h>
18 
19 #include <net/af_vsock.h>
20 #include "vhost.h"
21 
22 #define VHOST_VSOCK_DEFAULT_HOST_CID	2
23 /* Max number of bytes transferred before requeueing the job.
24  * Using this limit prevents one virtqueue from starving others. */
25 #define VHOST_VSOCK_WEIGHT 0x80000
26 /* Max number of packets transferred before requeueing the job.
27  * Using this limit prevents one virtqueue from starving others with
28  * small pkts.
29  */
30 #define VHOST_VSOCK_PKT_WEIGHT 256
31 
32 enum {
33 	VHOST_VSOCK_FEATURES = VHOST_FEATURES |
34 			       (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
35 			       (1ULL << VIRTIO_VSOCK_F_SEQPACKET)
36 };
37 
38 enum {
39 	VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
40 };
41 
42 /* Used to track all the vhost_vsock instances on the system. */
43 static DEFINE_MUTEX(vhost_vsock_mutex);
44 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
45 
46 struct vhost_vsock {
47 	struct vhost_dev dev;
48 	struct vhost_virtqueue vqs[2];
49 
50 	/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
51 	struct hlist_node hash;
52 
53 	struct vhost_work send_pkt_work;
54 	spinlock_t send_pkt_list_lock;
55 	struct list_head send_pkt_list;	/* host->guest pending packets */
56 
57 	atomic_t queued_replies;
58 
59 	u32 guest_cid;
60 	bool seqpacket_allow;
61 };
62 
63 static u32 vhost_transport_get_local_cid(void)
64 {
65 	return VHOST_VSOCK_DEFAULT_HOST_CID;
66 }
67 
68 /* Callers that dereference the return value must hold vhost_vsock_mutex or the
69  * RCU read lock.
70  */
71 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
72 {
73 	struct vhost_vsock *vsock;
74 
75 	hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
76 		u32 other_cid = vsock->guest_cid;
77 
78 		/* Skip instances that have no CID yet */
79 		if (other_cid == 0)
80 			continue;
81 
82 		if (other_cid == guest_cid)
83 			return vsock;
84 
85 	}
86 
87 	return NULL;
88 }
89 
90 static void
91 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
92 			    struct vhost_virtqueue *vq)
93 {
94 	struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
95 	int pkts = 0, total_len = 0;
96 	bool added = false;
97 	bool restart_tx = false;
98 
99 	mutex_lock(&vq->mutex);
100 
101 	if (!vhost_vq_get_backend(vq))
102 		goto out;
103 
104 	if (!vq_meta_prefetch(vq))
105 		goto out;
106 
107 	/* Avoid further vmexits, we're already processing the virtqueue */
108 	vhost_disable_notify(&vsock->dev, vq);
109 
110 	do {
111 		struct virtio_vsock_pkt *pkt;
112 		struct iov_iter iov_iter;
113 		unsigned out, in;
114 		size_t nbytes;
115 		size_t iov_len, payload_len;
116 		int head;
117 		bool restore_flag = false;
118 
119 		spin_lock_bh(&vsock->send_pkt_list_lock);
120 		if (list_empty(&vsock->send_pkt_list)) {
121 			spin_unlock_bh(&vsock->send_pkt_list_lock);
122 			vhost_enable_notify(&vsock->dev, vq);
123 			break;
124 		}
125 
126 		pkt = list_first_entry(&vsock->send_pkt_list,
127 				       struct virtio_vsock_pkt, list);
128 		list_del_init(&pkt->list);
129 		spin_unlock_bh(&vsock->send_pkt_list_lock);
130 
131 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
132 					 &out, &in, NULL, NULL);
133 		if (head < 0) {
134 			spin_lock_bh(&vsock->send_pkt_list_lock);
135 			list_add(&pkt->list, &vsock->send_pkt_list);
136 			spin_unlock_bh(&vsock->send_pkt_list_lock);
137 			break;
138 		}
139 
140 		if (head == vq->num) {
141 			spin_lock_bh(&vsock->send_pkt_list_lock);
142 			list_add(&pkt->list, &vsock->send_pkt_list);
143 			spin_unlock_bh(&vsock->send_pkt_list_lock);
144 
145 			/* We cannot finish yet if more buffers snuck in while
146 			 * re-enabling notify.
147 			 */
148 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
149 				vhost_disable_notify(&vsock->dev, vq);
150 				continue;
151 			}
152 			break;
153 		}
154 
155 		if (out) {
156 			virtio_transport_free_pkt(pkt);
157 			vq_err(vq, "Expected 0 output buffers, got %u\n", out);
158 			break;
159 		}
160 
161 		iov_len = iov_length(&vq->iov[out], in);
162 		if (iov_len < sizeof(pkt->hdr)) {
163 			virtio_transport_free_pkt(pkt);
164 			vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
165 			break;
166 		}
167 
168 		iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
169 		payload_len = pkt->len - pkt->off;
170 
171 		/* If the packet is greater than the space available in the
172 		 * buffer, we split it using multiple buffers.
173 		 */
174 		if (payload_len > iov_len - sizeof(pkt->hdr)) {
175 			payload_len = iov_len - sizeof(pkt->hdr);
176 
177 			/* As we are copying pieces of large packet's buffer to
178 			 * small rx buffers, headers of packets in rx queue are
179 			 * created dynamically and are initialized with header
180 			 * of current packet(except length). But in case of
181 			 * SOCK_SEQPACKET, we also must clear message delimeter
182 			 * bit(VIRTIO_VSOCK_SEQ_EOM). Otherwise, instead of one
183 			 * packet with delimeter(which marks end of message),
184 			 * there will be sequence of packets with delimeter
185 			 * bit set. After initialized header will be copied to
186 			 * rx buffer, this bit will be restored.
187 			 */
188 			if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
189 				pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
190 				restore_flag = true;
191 			}
192 		}
193 
194 		/* Set the correct length in the header */
195 		pkt->hdr.len = cpu_to_le32(payload_len);
196 
197 		nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
198 		if (nbytes != sizeof(pkt->hdr)) {
199 			virtio_transport_free_pkt(pkt);
200 			vq_err(vq, "Faulted on copying pkt hdr\n");
201 			break;
202 		}
203 
204 		nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
205 				      &iov_iter);
206 		if (nbytes != payload_len) {
207 			virtio_transport_free_pkt(pkt);
208 			vq_err(vq, "Faulted on copying pkt buf\n");
209 			break;
210 		}
211 
212 		/* Deliver to monitoring devices all packets that we
213 		 * will transmit.
214 		 */
215 		virtio_transport_deliver_tap_pkt(pkt);
216 
217 		vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
218 		added = true;
219 
220 		pkt->off += payload_len;
221 		total_len += payload_len;
222 
223 		/* If we didn't send all the payload we can requeue the packet
224 		 * to send it with the next available buffer.
225 		 */
226 		if (pkt->off < pkt->len) {
227 			if (restore_flag)
228 				pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
229 
230 			/* We are queueing the same virtio_vsock_pkt to handle
231 			 * the remaining bytes, and we want to deliver it
232 			 * to monitoring devices in the next iteration.
233 			 */
234 			pkt->tap_delivered = false;
235 
236 			spin_lock_bh(&vsock->send_pkt_list_lock);
237 			list_add(&pkt->list, &vsock->send_pkt_list);
238 			spin_unlock_bh(&vsock->send_pkt_list_lock);
239 		} else {
240 			if (pkt->reply) {
241 				int val;
242 
243 				val = atomic_dec_return(&vsock->queued_replies);
244 
245 				/* Do we have resources to resume tx
246 				 * processing?
247 				 */
248 				if (val + 1 == tx_vq->num)
249 					restart_tx = true;
250 			}
251 
252 			virtio_transport_free_pkt(pkt);
253 		}
254 	} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
255 	if (added)
256 		vhost_signal(&vsock->dev, vq);
257 
258 out:
259 	mutex_unlock(&vq->mutex);
260 
261 	if (restart_tx)
262 		vhost_poll_queue(&tx_vq->poll);
263 }
264 
265 static void vhost_transport_send_pkt_work(struct vhost_work *work)
266 {
267 	struct vhost_virtqueue *vq;
268 	struct vhost_vsock *vsock;
269 
270 	vsock = container_of(work, struct vhost_vsock, send_pkt_work);
271 	vq = &vsock->vqs[VSOCK_VQ_RX];
272 
273 	vhost_transport_do_send_pkt(vsock, vq);
274 }
275 
276 static int
277 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
278 {
279 	struct vhost_vsock *vsock;
280 	int len = pkt->len;
281 
282 	rcu_read_lock();
283 
284 	/* Find the vhost_vsock according to guest context id  */
285 	vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
286 	if (!vsock) {
287 		rcu_read_unlock();
288 		virtio_transport_free_pkt(pkt);
289 		return -ENODEV;
290 	}
291 
292 	if (pkt->reply)
293 		atomic_inc(&vsock->queued_replies);
294 
295 	spin_lock_bh(&vsock->send_pkt_list_lock);
296 	list_add_tail(&pkt->list, &vsock->send_pkt_list);
297 	spin_unlock_bh(&vsock->send_pkt_list_lock);
298 
299 	vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
300 
301 	rcu_read_unlock();
302 	return len;
303 }
304 
305 static int
306 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
307 {
308 	struct vhost_vsock *vsock;
309 	struct virtio_vsock_pkt *pkt, *n;
310 	int cnt = 0;
311 	int ret = -ENODEV;
312 	LIST_HEAD(freeme);
313 
314 	rcu_read_lock();
315 
316 	/* Find the vhost_vsock according to guest context id  */
317 	vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
318 	if (!vsock)
319 		goto out;
320 
321 	spin_lock_bh(&vsock->send_pkt_list_lock);
322 	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
323 		if (pkt->vsk != vsk)
324 			continue;
325 		list_move(&pkt->list, &freeme);
326 	}
327 	spin_unlock_bh(&vsock->send_pkt_list_lock);
328 
329 	list_for_each_entry_safe(pkt, n, &freeme, list) {
330 		if (pkt->reply)
331 			cnt++;
332 		list_del(&pkt->list);
333 		virtio_transport_free_pkt(pkt);
334 	}
335 
336 	if (cnt) {
337 		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
338 		int new_cnt;
339 
340 		new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
341 		if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
342 			vhost_poll_queue(&tx_vq->poll);
343 	}
344 
345 	ret = 0;
346 out:
347 	rcu_read_unlock();
348 	return ret;
349 }
350 
351 static struct virtio_vsock_pkt *
352 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
353 		      unsigned int out, unsigned int in)
354 {
355 	struct virtio_vsock_pkt *pkt;
356 	struct iov_iter iov_iter;
357 	size_t nbytes;
358 	size_t len;
359 
360 	if (in != 0) {
361 		vq_err(vq, "Expected 0 input buffers, got %u\n", in);
362 		return NULL;
363 	}
364 
365 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
366 	if (!pkt)
367 		return NULL;
368 
369 	len = iov_length(vq->iov, out);
370 	iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
371 
372 	nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
373 	if (nbytes != sizeof(pkt->hdr)) {
374 		vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
375 		       sizeof(pkt->hdr), nbytes);
376 		kfree(pkt);
377 		return NULL;
378 	}
379 
380 	pkt->len = le32_to_cpu(pkt->hdr.len);
381 
382 	/* No payload */
383 	if (!pkt->len)
384 		return pkt;
385 
386 	/* The pkt is too big */
387 	if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
388 		kfree(pkt);
389 		return NULL;
390 	}
391 
392 	pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
393 	if (!pkt->buf) {
394 		kfree(pkt);
395 		return NULL;
396 	}
397 
398 	pkt->buf_len = pkt->len;
399 
400 	nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
401 	if (nbytes != pkt->len) {
402 		vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
403 		       pkt->len, nbytes);
404 		virtio_transport_free_pkt(pkt);
405 		return NULL;
406 	}
407 
408 	return pkt;
409 }
410 
411 /* Is there space left for replies to rx packets? */
412 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
413 {
414 	struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
415 	int val;
416 
417 	smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
418 	val = atomic_read(&vsock->queued_replies);
419 
420 	return val < vq->num;
421 }
422 
423 static bool vhost_transport_seqpacket_allow(u32 remote_cid);
424 
425 static struct virtio_transport vhost_transport = {
426 	.transport = {
427 		.module                   = THIS_MODULE,
428 
429 		.get_local_cid            = vhost_transport_get_local_cid,
430 
431 		.init                     = virtio_transport_do_socket_init,
432 		.destruct                 = virtio_transport_destruct,
433 		.release                  = virtio_transport_release,
434 		.connect                  = virtio_transport_connect,
435 		.shutdown                 = virtio_transport_shutdown,
436 		.cancel_pkt               = vhost_transport_cancel_pkt,
437 
438 		.dgram_enqueue            = virtio_transport_dgram_enqueue,
439 		.dgram_dequeue            = virtio_transport_dgram_dequeue,
440 		.dgram_bind               = virtio_transport_dgram_bind,
441 		.dgram_allow              = virtio_transport_dgram_allow,
442 
443 		.stream_enqueue           = virtio_transport_stream_enqueue,
444 		.stream_dequeue           = virtio_transport_stream_dequeue,
445 		.stream_has_data          = virtio_transport_stream_has_data,
446 		.stream_has_space         = virtio_transport_stream_has_space,
447 		.stream_rcvhiwat          = virtio_transport_stream_rcvhiwat,
448 		.stream_is_active         = virtio_transport_stream_is_active,
449 		.stream_allow             = virtio_transport_stream_allow,
450 
451 		.seqpacket_dequeue        = virtio_transport_seqpacket_dequeue,
452 		.seqpacket_enqueue        = virtio_transport_seqpacket_enqueue,
453 		.seqpacket_allow          = vhost_transport_seqpacket_allow,
454 		.seqpacket_has_data       = virtio_transport_seqpacket_has_data,
455 
456 		.notify_poll_in           = virtio_transport_notify_poll_in,
457 		.notify_poll_out          = virtio_transport_notify_poll_out,
458 		.notify_recv_init         = virtio_transport_notify_recv_init,
459 		.notify_recv_pre_block    = virtio_transport_notify_recv_pre_block,
460 		.notify_recv_pre_dequeue  = virtio_transport_notify_recv_pre_dequeue,
461 		.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
462 		.notify_send_init         = virtio_transport_notify_send_init,
463 		.notify_send_pre_block    = virtio_transport_notify_send_pre_block,
464 		.notify_send_pre_enqueue  = virtio_transport_notify_send_pre_enqueue,
465 		.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
466 		.notify_buffer_size       = virtio_transport_notify_buffer_size,
467 
468 	},
469 
470 	.send_pkt = vhost_transport_send_pkt,
471 };
472 
473 static bool vhost_transport_seqpacket_allow(u32 remote_cid)
474 {
475 	struct vhost_vsock *vsock;
476 	bool seqpacket_allow = false;
477 
478 	rcu_read_lock();
479 	vsock = vhost_vsock_get(remote_cid);
480 
481 	if (vsock)
482 		seqpacket_allow = vsock->seqpacket_allow;
483 
484 	rcu_read_unlock();
485 
486 	return seqpacket_allow;
487 }
488 
489 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
490 {
491 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
492 						  poll.work);
493 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
494 						 dev);
495 	struct virtio_vsock_pkt *pkt;
496 	int head, pkts = 0, total_len = 0;
497 	unsigned int out, in;
498 	bool added = false;
499 
500 	mutex_lock(&vq->mutex);
501 
502 	if (!vhost_vq_get_backend(vq))
503 		goto out;
504 
505 	if (!vq_meta_prefetch(vq))
506 		goto out;
507 
508 	vhost_disable_notify(&vsock->dev, vq);
509 	do {
510 		u32 len;
511 
512 		if (!vhost_vsock_more_replies(vsock)) {
513 			/* Stop tx until the device processes already
514 			 * pending replies.  Leave tx virtqueue
515 			 * callbacks disabled.
516 			 */
517 			goto no_more_replies;
518 		}
519 
520 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
521 					 &out, &in, NULL, NULL);
522 		if (head < 0)
523 			break;
524 
525 		if (head == vq->num) {
526 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
527 				vhost_disable_notify(&vsock->dev, vq);
528 				continue;
529 			}
530 			break;
531 		}
532 
533 		pkt = vhost_vsock_alloc_pkt(vq, out, in);
534 		if (!pkt) {
535 			vq_err(vq, "Faulted on pkt\n");
536 			continue;
537 		}
538 
539 		len = pkt->len;
540 
541 		/* Deliver to monitoring devices all received packets */
542 		virtio_transport_deliver_tap_pkt(pkt);
543 
544 		/* Only accept correctly addressed packets */
545 		if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
546 		    le64_to_cpu(pkt->hdr.dst_cid) ==
547 		    vhost_transport_get_local_cid())
548 			virtio_transport_recv_pkt(&vhost_transport, pkt);
549 		else
550 			virtio_transport_free_pkt(pkt);
551 
552 		len += sizeof(pkt->hdr);
553 		vhost_add_used(vq, head, len);
554 		total_len += len;
555 		added = true;
556 	} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
557 
558 no_more_replies:
559 	if (added)
560 		vhost_signal(&vsock->dev, vq);
561 
562 out:
563 	mutex_unlock(&vq->mutex);
564 }
565 
566 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
567 {
568 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
569 						poll.work);
570 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
571 						 dev);
572 
573 	vhost_transport_do_send_pkt(vsock, vq);
574 }
575 
576 static int vhost_vsock_start(struct vhost_vsock *vsock)
577 {
578 	struct vhost_virtqueue *vq;
579 	size_t i;
580 	int ret;
581 
582 	mutex_lock(&vsock->dev.mutex);
583 
584 	ret = vhost_dev_check_owner(&vsock->dev);
585 	if (ret)
586 		goto err;
587 
588 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
589 		vq = &vsock->vqs[i];
590 
591 		mutex_lock(&vq->mutex);
592 
593 		if (!vhost_vq_access_ok(vq)) {
594 			ret = -EFAULT;
595 			goto err_vq;
596 		}
597 
598 		if (!vhost_vq_get_backend(vq)) {
599 			vhost_vq_set_backend(vq, vsock);
600 			ret = vhost_vq_init_access(vq);
601 			if (ret)
602 				goto err_vq;
603 		}
604 
605 		mutex_unlock(&vq->mutex);
606 	}
607 
608 	/* Some packets may have been queued before the device was started,
609 	 * let's kick the send worker to send them.
610 	 */
611 	vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
612 
613 	mutex_unlock(&vsock->dev.mutex);
614 	return 0;
615 
616 err_vq:
617 	vhost_vq_set_backend(vq, NULL);
618 	mutex_unlock(&vq->mutex);
619 
620 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
621 		vq = &vsock->vqs[i];
622 
623 		mutex_lock(&vq->mutex);
624 		vhost_vq_set_backend(vq, NULL);
625 		mutex_unlock(&vq->mutex);
626 	}
627 err:
628 	mutex_unlock(&vsock->dev.mutex);
629 	return ret;
630 }
631 
632 static int vhost_vsock_stop(struct vhost_vsock *vsock)
633 {
634 	size_t i;
635 	int ret;
636 
637 	mutex_lock(&vsock->dev.mutex);
638 
639 	ret = vhost_dev_check_owner(&vsock->dev);
640 	if (ret)
641 		goto err;
642 
643 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
644 		struct vhost_virtqueue *vq = &vsock->vqs[i];
645 
646 		mutex_lock(&vq->mutex);
647 		vhost_vq_set_backend(vq, NULL);
648 		mutex_unlock(&vq->mutex);
649 	}
650 
651 err:
652 	mutex_unlock(&vsock->dev.mutex);
653 	return ret;
654 }
655 
656 static void vhost_vsock_free(struct vhost_vsock *vsock)
657 {
658 	kvfree(vsock);
659 }
660 
661 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
662 {
663 	struct vhost_virtqueue **vqs;
664 	struct vhost_vsock *vsock;
665 	int ret;
666 
667 	/* This struct is large and allocation could fail, fall back to vmalloc
668 	 * if there is no other way.
669 	 */
670 	vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
671 	if (!vsock)
672 		return -ENOMEM;
673 
674 	vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
675 	if (!vqs) {
676 		ret = -ENOMEM;
677 		goto out;
678 	}
679 
680 	vsock->guest_cid = 0; /* no CID assigned yet */
681 
682 	atomic_set(&vsock->queued_replies, 0);
683 
684 	vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
685 	vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
686 	vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
687 	vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
688 
689 	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
690 		       UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
691 		       VHOST_VSOCK_WEIGHT, true, NULL);
692 
693 	file->private_data = vsock;
694 	spin_lock_init(&vsock->send_pkt_list_lock);
695 	INIT_LIST_HEAD(&vsock->send_pkt_list);
696 	vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
697 	return 0;
698 
699 out:
700 	vhost_vsock_free(vsock);
701 	return ret;
702 }
703 
704 static void vhost_vsock_flush(struct vhost_vsock *vsock)
705 {
706 	int i;
707 
708 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
709 		if (vsock->vqs[i].handle_kick)
710 			vhost_poll_flush(&vsock->vqs[i].poll);
711 	vhost_work_dev_flush(&vsock->dev);
712 }
713 
714 static void vhost_vsock_reset_orphans(struct sock *sk)
715 {
716 	struct vsock_sock *vsk = vsock_sk(sk);
717 
718 	/* vmci_transport.c doesn't take sk_lock here either.  At least we're
719 	 * under vsock_table_lock so the sock cannot disappear while we're
720 	 * executing.
721 	 */
722 
723 	/* If the peer is still valid, no need to reset connection */
724 	if (vhost_vsock_get(vsk->remote_addr.svm_cid))
725 		return;
726 
727 	/* If the close timeout is pending, let it expire.  This avoids races
728 	 * with the timeout callback.
729 	 */
730 	if (vsk->close_work_scheduled)
731 		return;
732 
733 	sock_set_flag(sk, SOCK_DONE);
734 	vsk->peer_shutdown = SHUTDOWN_MASK;
735 	sk->sk_state = SS_UNCONNECTED;
736 	sk->sk_err = ECONNRESET;
737 	sk_error_report(sk);
738 }
739 
740 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
741 {
742 	struct vhost_vsock *vsock = file->private_data;
743 
744 	mutex_lock(&vhost_vsock_mutex);
745 	if (vsock->guest_cid)
746 		hash_del_rcu(&vsock->hash);
747 	mutex_unlock(&vhost_vsock_mutex);
748 
749 	/* Wait for other CPUs to finish using vsock */
750 	synchronize_rcu();
751 
752 	/* Iterating over all connections for all CIDs to find orphans is
753 	 * inefficient.  Room for improvement here. */
754 	vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
755 
756 	vhost_vsock_stop(vsock);
757 	vhost_vsock_flush(vsock);
758 	vhost_dev_stop(&vsock->dev);
759 
760 	spin_lock_bh(&vsock->send_pkt_list_lock);
761 	while (!list_empty(&vsock->send_pkt_list)) {
762 		struct virtio_vsock_pkt *pkt;
763 
764 		pkt = list_first_entry(&vsock->send_pkt_list,
765 				struct virtio_vsock_pkt, list);
766 		list_del_init(&pkt->list);
767 		virtio_transport_free_pkt(pkt);
768 	}
769 	spin_unlock_bh(&vsock->send_pkt_list_lock);
770 
771 	vhost_dev_cleanup(&vsock->dev);
772 	kfree(vsock->dev.vqs);
773 	vhost_vsock_free(vsock);
774 	return 0;
775 }
776 
777 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
778 {
779 	struct vhost_vsock *other;
780 
781 	/* Refuse reserved CIDs */
782 	if (guest_cid <= VMADDR_CID_HOST ||
783 	    guest_cid == U32_MAX)
784 		return -EINVAL;
785 
786 	/* 64-bit CIDs are not yet supported */
787 	if (guest_cid > U32_MAX)
788 		return -EINVAL;
789 
790 	/* Refuse if CID is assigned to the guest->host transport (i.e. nested
791 	 * VM), to make the loopback work.
792 	 */
793 	if (vsock_find_cid(guest_cid))
794 		return -EADDRINUSE;
795 
796 	/* Refuse if CID is already in use */
797 	mutex_lock(&vhost_vsock_mutex);
798 	other = vhost_vsock_get(guest_cid);
799 	if (other && other != vsock) {
800 		mutex_unlock(&vhost_vsock_mutex);
801 		return -EADDRINUSE;
802 	}
803 
804 	if (vsock->guest_cid)
805 		hash_del_rcu(&vsock->hash);
806 
807 	vsock->guest_cid = guest_cid;
808 	hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
809 	mutex_unlock(&vhost_vsock_mutex);
810 
811 	return 0;
812 }
813 
814 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
815 {
816 	struct vhost_virtqueue *vq;
817 	int i;
818 
819 	if (features & ~VHOST_VSOCK_FEATURES)
820 		return -EOPNOTSUPP;
821 
822 	mutex_lock(&vsock->dev.mutex);
823 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
824 	    !vhost_log_access_ok(&vsock->dev)) {
825 		goto err;
826 	}
827 
828 	if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
829 		if (vhost_init_device_iotlb(&vsock->dev, true))
830 			goto err;
831 	}
832 
833 	if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
834 		vsock->seqpacket_allow = true;
835 
836 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
837 		vq = &vsock->vqs[i];
838 		mutex_lock(&vq->mutex);
839 		vq->acked_features = features;
840 		mutex_unlock(&vq->mutex);
841 	}
842 	mutex_unlock(&vsock->dev.mutex);
843 	return 0;
844 
845 err:
846 	mutex_unlock(&vsock->dev.mutex);
847 	return -EFAULT;
848 }
849 
850 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
851 				  unsigned long arg)
852 {
853 	struct vhost_vsock *vsock = f->private_data;
854 	void __user *argp = (void __user *)arg;
855 	u64 guest_cid;
856 	u64 features;
857 	int start;
858 	int r;
859 
860 	switch (ioctl) {
861 	case VHOST_VSOCK_SET_GUEST_CID:
862 		if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
863 			return -EFAULT;
864 		return vhost_vsock_set_cid(vsock, guest_cid);
865 	case VHOST_VSOCK_SET_RUNNING:
866 		if (copy_from_user(&start, argp, sizeof(start)))
867 			return -EFAULT;
868 		if (start)
869 			return vhost_vsock_start(vsock);
870 		else
871 			return vhost_vsock_stop(vsock);
872 	case VHOST_GET_FEATURES:
873 		features = VHOST_VSOCK_FEATURES;
874 		if (copy_to_user(argp, &features, sizeof(features)))
875 			return -EFAULT;
876 		return 0;
877 	case VHOST_SET_FEATURES:
878 		if (copy_from_user(&features, argp, sizeof(features)))
879 			return -EFAULT;
880 		return vhost_vsock_set_features(vsock, features);
881 	case VHOST_GET_BACKEND_FEATURES:
882 		features = VHOST_VSOCK_BACKEND_FEATURES;
883 		if (copy_to_user(argp, &features, sizeof(features)))
884 			return -EFAULT;
885 		return 0;
886 	case VHOST_SET_BACKEND_FEATURES:
887 		if (copy_from_user(&features, argp, sizeof(features)))
888 			return -EFAULT;
889 		if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
890 			return -EOPNOTSUPP;
891 		vhost_set_backend_features(&vsock->dev, features);
892 		return 0;
893 	default:
894 		mutex_lock(&vsock->dev.mutex);
895 		r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
896 		if (r == -ENOIOCTLCMD)
897 			r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
898 		else
899 			vhost_vsock_flush(vsock);
900 		mutex_unlock(&vsock->dev.mutex);
901 		return r;
902 	}
903 }
904 
905 static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
906 {
907 	struct file *file = iocb->ki_filp;
908 	struct vhost_vsock *vsock = file->private_data;
909 	struct vhost_dev *dev = &vsock->dev;
910 	int noblock = file->f_flags & O_NONBLOCK;
911 
912 	return vhost_chr_read_iter(dev, to, noblock);
913 }
914 
915 static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
916 					struct iov_iter *from)
917 {
918 	struct file *file = iocb->ki_filp;
919 	struct vhost_vsock *vsock = file->private_data;
920 	struct vhost_dev *dev = &vsock->dev;
921 
922 	return vhost_chr_write_iter(dev, from);
923 }
924 
925 static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
926 {
927 	struct vhost_vsock *vsock = file->private_data;
928 	struct vhost_dev *dev = &vsock->dev;
929 
930 	return vhost_chr_poll(file, dev, wait);
931 }
932 
933 static const struct file_operations vhost_vsock_fops = {
934 	.owner          = THIS_MODULE,
935 	.open           = vhost_vsock_dev_open,
936 	.release        = vhost_vsock_dev_release,
937 	.llseek		= noop_llseek,
938 	.unlocked_ioctl = vhost_vsock_dev_ioctl,
939 	.compat_ioctl   = compat_ptr_ioctl,
940 	.read_iter      = vhost_vsock_chr_read_iter,
941 	.write_iter     = vhost_vsock_chr_write_iter,
942 	.poll           = vhost_vsock_chr_poll,
943 };
944 
945 static struct miscdevice vhost_vsock_misc = {
946 	.minor = VHOST_VSOCK_MINOR,
947 	.name = "vhost-vsock",
948 	.fops = &vhost_vsock_fops,
949 };
950 
951 static int __init vhost_vsock_init(void)
952 {
953 	int ret;
954 
955 	ret = vsock_core_register(&vhost_transport.transport,
956 				  VSOCK_TRANSPORT_F_H2G);
957 	if (ret < 0)
958 		return ret;
959 	return misc_register(&vhost_vsock_misc);
960 };
961 
962 static void __exit vhost_vsock_exit(void)
963 {
964 	misc_deregister(&vhost_vsock_misc);
965 	vsock_core_unregister(&vhost_transport.transport);
966 };
967 
968 module_init(vhost_vsock_init);
969 module_exit(vhost_vsock_exit);
970 MODULE_LICENSE("GPL v2");
971 MODULE_AUTHOR("Asias He");
972 MODULE_DESCRIPTION("vhost transport for vsock ");
973 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
974 MODULE_ALIAS("devname:vhost-vsock");
975