1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * common code for virtio vsock
4  *
5  * Copyright (C) 2013-2015 Red Hat, Inc.
6  * Author: Asias He <asias@redhat.com>
7  *         Stefan Hajnoczi <stefanha@redhat.com>
8  */
9 #include <linux/spinlock.h>
10 #include <linux/module.h>
11 #include <linux/sched/signal.h>
12 #include <linux/ctype.h>
13 #include <linux/list.h>
14 #include <linux/virtio_vsock.h>
15 #include <uapi/linux/vsockmon.h>
16 
17 #include <net/sock.h>
18 #include <net/af_vsock.h>
19 
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/vsock_virtio_transport_common.h>
22 
23 /* How long to wait for graceful shutdown of a connection */
24 #define VSOCK_CLOSE_TIMEOUT (8 * HZ)
25 
26 /* Threshold for detecting small packets to copy */
27 #define GOOD_COPY_LEN  128
28 
29 static const struct virtio_transport *
30 virtio_transport_get_ops(struct vsock_sock *vsk)
31 {
32 	const struct vsock_transport *t = vsock_core_get_transport(vsk);
33 
34 	if (WARN_ON(!t))
35 		return NULL;
36 
37 	return container_of(t, struct virtio_transport, transport);
38 }
39 
40 /* Returns a new packet on success, otherwise returns NULL.
41  *
42  * If NULL is returned, errp is set to a negative errno.
43  */
44 static struct sk_buff *
45 virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
46 			   size_t len,
47 			   u32 src_cid,
48 			   u32 src_port,
49 			   u32 dst_cid,
50 			   u32 dst_port)
51 {
52 	const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len;
53 	struct virtio_vsock_hdr *hdr;
54 	struct sk_buff *skb;
55 	void *payload;
56 	int err;
57 
58 	skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL);
59 	if (!skb)
60 		return NULL;
61 
62 	hdr = virtio_vsock_hdr(skb);
63 	hdr->type	= cpu_to_le16(info->type);
64 	hdr->op		= cpu_to_le16(info->op);
65 	hdr->src_cid	= cpu_to_le64(src_cid);
66 	hdr->dst_cid	= cpu_to_le64(dst_cid);
67 	hdr->src_port	= cpu_to_le32(src_port);
68 	hdr->dst_port	= cpu_to_le32(dst_port);
69 	hdr->flags	= cpu_to_le32(info->flags);
70 	hdr->len	= cpu_to_le32(len);
71 
72 	if (info->msg && len > 0) {
73 		payload = skb_put(skb, len);
74 		err = memcpy_from_msg(payload, info->msg, len);
75 		if (err)
76 			goto out;
77 
78 		if (msg_data_left(info->msg) == 0 &&
79 		    info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) {
80 			hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
81 
82 			if (info->msg->msg_flags & MSG_EOR)
83 				hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
84 		}
85 	}
86 
87 	if (info->reply)
88 		virtio_vsock_skb_set_reply(skb);
89 
90 	trace_virtio_transport_alloc_pkt(src_cid, src_port,
91 					 dst_cid, dst_port,
92 					 len,
93 					 info->type,
94 					 info->op,
95 					 info->flags);
96 
97 	return skb;
98 
99 out:
100 	kfree_skb(skb);
101 	return NULL;
102 }
103 
104 /* Packet capture */
105 static struct sk_buff *virtio_transport_build_skb(void *opaque)
106 {
107 	struct virtio_vsock_hdr *pkt_hdr;
108 	struct sk_buff *pkt = opaque;
109 	struct af_vsockmon_hdr *hdr;
110 	struct sk_buff *skb;
111 	size_t payload_len;
112 	void *payload_buf;
113 
114 	/* A packet could be split to fit the RX buffer, so we can retrieve
115 	 * the payload length from the header and the buffer pointer taking
116 	 * care of the offset in the original packet.
117 	 */
118 	pkt_hdr = virtio_vsock_hdr(pkt);
119 	payload_len = pkt->len;
120 	payload_buf = pkt->data;
121 
122 	skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len,
123 			GFP_ATOMIC);
124 	if (!skb)
125 		return NULL;
126 
127 	hdr = skb_put(skb, sizeof(*hdr));
128 
129 	/* pkt->hdr is little-endian so no need to byteswap here */
130 	hdr->src_cid = pkt_hdr->src_cid;
131 	hdr->src_port = pkt_hdr->src_port;
132 	hdr->dst_cid = pkt_hdr->dst_cid;
133 	hdr->dst_port = pkt_hdr->dst_port;
134 
135 	hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO);
136 	hdr->len = cpu_to_le16(sizeof(*pkt_hdr));
137 	memset(hdr->reserved, 0, sizeof(hdr->reserved));
138 
139 	switch (le16_to_cpu(pkt_hdr->op)) {
140 	case VIRTIO_VSOCK_OP_REQUEST:
141 	case VIRTIO_VSOCK_OP_RESPONSE:
142 		hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT);
143 		break;
144 	case VIRTIO_VSOCK_OP_RST:
145 	case VIRTIO_VSOCK_OP_SHUTDOWN:
146 		hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT);
147 		break;
148 	case VIRTIO_VSOCK_OP_RW:
149 		hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD);
150 		break;
151 	case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
152 	case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
153 		hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL);
154 		break;
155 	default:
156 		hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN);
157 		break;
158 	}
159 
160 	skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr));
161 
162 	if (payload_len) {
163 		skb_put_data(skb, payload_buf, payload_len);
164 	}
165 
166 	return skb;
167 }
168 
169 void virtio_transport_deliver_tap_pkt(struct sk_buff *skb)
170 {
171 	if (virtio_vsock_skb_tap_delivered(skb))
172 		return;
173 
174 	vsock_deliver_tap(virtio_transport_build_skb, skb);
175 	virtio_vsock_skb_set_tap_delivered(skb);
176 }
177 EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
178 
179 static u16 virtio_transport_get_type(struct sock *sk)
180 {
181 	if (sk->sk_type == SOCK_STREAM)
182 		return VIRTIO_VSOCK_TYPE_STREAM;
183 	else
184 		return VIRTIO_VSOCK_TYPE_SEQPACKET;
185 }
186 
187 /* This function can only be used on connecting/connected sockets,
188  * since a socket assigned to a transport is required.
189  *
190  * Do not use on listener sockets!
191  */
192 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
193 					  struct virtio_vsock_pkt_info *info)
194 {
195 	u32 src_cid, src_port, dst_cid, dst_port;
196 	const struct virtio_transport *t_ops;
197 	struct virtio_vsock_sock *vvs;
198 	u32 pkt_len = info->pkt_len;
199 	struct sk_buff *skb;
200 
201 	info->type = virtio_transport_get_type(sk_vsock(vsk));
202 
203 	t_ops = virtio_transport_get_ops(vsk);
204 	if (unlikely(!t_ops))
205 		return -EFAULT;
206 
207 	src_cid = t_ops->transport.get_local_cid();
208 	src_port = vsk->local_addr.svm_port;
209 	if (!info->remote_cid) {
210 		dst_cid	= vsk->remote_addr.svm_cid;
211 		dst_port = vsk->remote_addr.svm_port;
212 	} else {
213 		dst_cid = info->remote_cid;
214 		dst_port = info->remote_port;
215 	}
216 
217 	vvs = vsk->trans;
218 
219 	/* we can send less than pkt_len bytes */
220 	if (pkt_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
221 		pkt_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
222 
223 	/* virtio_transport_get_credit might return less than pkt_len credit */
224 	pkt_len = virtio_transport_get_credit(vvs, pkt_len);
225 
226 	/* Do not send zero length OP_RW pkt */
227 	if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
228 		return pkt_len;
229 
230 	skb = virtio_transport_alloc_skb(info, pkt_len,
231 					 src_cid, src_port,
232 					 dst_cid, dst_port);
233 	if (!skb) {
234 		virtio_transport_put_credit(vvs, pkt_len);
235 		return -ENOMEM;
236 	}
237 
238 	virtio_transport_inc_tx_pkt(vvs, skb);
239 
240 	return t_ops->send_pkt(skb);
241 }
242 
243 static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
244 					struct sk_buff *skb)
245 {
246 	if (vvs->rx_bytes + skb->len > vvs->buf_alloc)
247 		return false;
248 
249 	vvs->rx_bytes += skb->len;
250 	return true;
251 }
252 
253 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
254 					struct sk_buff *skb)
255 {
256 	int len;
257 
258 	len = skb_headroom(skb) - sizeof(struct virtio_vsock_hdr) - skb->len;
259 	vvs->rx_bytes -= len;
260 	vvs->fwd_cnt += len;
261 }
262 
263 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb)
264 {
265 	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
266 
267 	spin_lock_bh(&vvs->rx_lock);
268 	vvs->last_fwd_cnt = vvs->fwd_cnt;
269 	hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
270 	hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc);
271 	spin_unlock_bh(&vvs->rx_lock);
272 }
273 EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
274 
275 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
276 {
277 	u32 ret;
278 
279 	spin_lock_bh(&vvs->tx_lock);
280 	ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
281 	if (ret > credit)
282 		ret = credit;
283 	vvs->tx_cnt += ret;
284 	spin_unlock_bh(&vvs->tx_lock);
285 
286 	return ret;
287 }
288 EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
289 
290 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
291 {
292 	spin_lock_bh(&vvs->tx_lock);
293 	vvs->tx_cnt -= credit;
294 	spin_unlock_bh(&vvs->tx_lock);
295 }
296 EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
297 
298 static int virtio_transport_send_credit_update(struct vsock_sock *vsk)
299 {
300 	struct virtio_vsock_pkt_info info = {
301 		.op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
302 		.vsk = vsk,
303 	};
304 
305 	return virtio_transport_send_pkt_info(vsk, &info);
306 }
307 
308 static ssize_t
309 virtio_transport_stream_do_peek(struct vsock_sock *vsk,
310 				struct msghdr *msg,
311 				size_t len)
312 {
313 	struct virtio_vsock_sock *vvs = vsk->trans;
314 	size_t bytes, total = 0, off;
315 	struct sk_buff *skb, *tmp;
316 	int err = -EFAULT;
317 
318 	spin_lock_bh(&vvs->rx_lock);
319 
320 	skb_queue_walk_safe(&vvs->rx_queue, skb,  tmp) {
321 		off = 0;
322 
323 		if (total == len)
324 			break;
325 
326 		while (total < len && off < skb->len) {
327 			bytes = len - total;
328 			if (bytes > skb->len - off)
329 				bytes = skb->len - off;
330 
331 			/* sk_lock is held by caller so no one else can dequeue.
332 			 * Unlock rx_lock since memcpy_to_msg() may sleep.
333 			 */
334 			spin_unlock_bh(&vvs->rx_lock);
335 
336 			err = memcpy_to_msg(msg, skb->data + off, bytes);
337 			if (err)
338 				goto out;
339 
340 			spin_lock_bh(&vvs->rx_lock);
341 
342 			total += bytes;
343 			off += bytes;
344 		}
345 	}
346 
347 	spin_unlock_bh(&vvs->rx_lock);
348 
349 	return total;
350 
351 out:
352 	if (total)
353 		err = total;
354 	return err;
355 }
356 
357 static ssize_t
358 virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
359 				   struct msghdr *msg,
360 				   size_t len)
361 {
362 	struct virtio_vsock_sock *vvs = vsk->trans;
363 	size_t bytes, total = 0;
364 	struct sk_buff *skb;
365 	int err = -EFAULT;
366 	u32 free_space;
367 
368 	spin_lock_bh(&vvs->rx_lock);
369 	while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
370 		skb = __skb_dequeue(&vvs->rx_queue);
371 
372 		bytes = len - total;
373 		if (bytes > skb->len)
374 			bytes = skb->len;
375 
376 		/* sk_lock is held by caller so no one else can dequeue.
377 		 * Unlock rx_lock since memcpy_to_msg() may sleep.
378 		 */
379 		spin_unlock_bh(&vvs->rx_lock);
380 
381 		err = memcpy_to_msg(msg, skb->data, bytes);
382 		if (err)
383 			goto out;
384 
385 		spin_lock_bh(&vvs->rx_lock);
386 
387 		total += bytes;
388 		skb_pull(skb, bytes);
389 
390 		if (skb->len == 0) {
391 			virtio_transport_dec_rx_pkt(vvs, skb);
392 			consume_skb(skb);
393 		} else {
394 			__skb_queue_head(&vvs->rx_queue, skb);
395 		}
396 	}
397 
398 	free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt);
399 
400 	spin_unlock_bh(&vvs->rx_lock);
401 
402 	/* To reduce the number of credit update messages,
403 	 * don't update credits as long as lots of space is available.
404 	 * Note: the limit chosen here is arbitrary. Setting the limit
405 	 * too high causes extra messages. Too low causes transmitter
406 	 * stalls. As stalls are in theory more expensive than extra
407 	 * messages, we set the limit to a high value. TODO: experiment
408 	 * with different values.
409 	 */
410 	if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
411 		virtio_transport_send_credit_update(vsk);
412 
413 	return total;
414 
415 out:
416 	if (total)
417 		err = total;
418 	return err;
419 }
420 
421 static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
422 						 struct msghdr *msg,
423 						 int flags)
424 {
425 	struct virtio_vsock_sock *vvs = vsk->trans;
426 	int dequeued_len = 0;
427 	size_t user_buf_len = msg_data_left(msg);
428 	bool msg_ready = false;
429 	struct sk_buff *skb;
430 
431 	spin_lock_bh(&vvs->rx_lock);
432 
433 	if (vvs->msg_count == 0) {
434 		spin_unlock_bh(&vvs->rx_lock);
435 		return 0;
436 	}
437 
438 	while (!msg_ready) {
439 		struct virtio_vsock_hdr *hdr;
440 
441 		skb = __skb_dequeue(&vvs->rx_queue);
442 		if (!skb)
443 			break;
444 		hdr = virtio_vsock_hdr(skb);
445 
446 		if (dequeued_len >= 0) {
447 			size_t pkt_len;
448 			size_t bytes_to_copy;
449 
450 			pkt_len = (size_t)le32_to_cpu(hdr->len);
451 			bytes_to_copy = min(user_buf_len, pkt_len);
452 
453 			if (bytes_to_copy) {
454 				int err;
455 
456 				/* sk_lock is held by caller so no one else can dequeue.
457 				 * Unlock rx_lock since memcpy_to_msg() may sleep.
458 				 */
459 				spin_unlock_bh(&vvs->rx_lock);
460 
461 				err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
462 				if (err) {
463 					/* Copy of message failed. Rest of
464 					 * fragments will be freed without copy.
465 					 */
466 					dequeued_len = err;
467 				} else {
468 					user_buf_len -= bytes_to_copy;
469 					skb_pull(skb, bytes_to_copy);
470 				}
471 
472 				spin_lock_bh(&vvs->rx_lock);
473 			}
474 
475 			if (dequeued_len >= 0)
476 				dequeued_len += pkt_len;
477 		}
478 
479 		if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
480 			msg_ready = true;
481 			vvs->msg_count--;
482 
483 			if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR)
484 				msg->msg_flags |= MSG_EOR;
485 		}
486 
487 		virtio_transport_dec_rx_pkt(vvs, skb);
488 		kfree_skb(skb);
489 	}
490 
491 	spin_unlock_bh(&vvs->rx_lock);
492 
493 	virtio_transport_send_credit_update(vsk);
494 
495 	return dequeued_len;
496 }
497 
498 ssize_t
499 virtio_transport_stream_dequeue(struct vsock_sock *vsk,
500 				struct msghdr *msg,
501 				size_t len, int flags)
502 {
503 	if (flags & MSG_PEEK)
504 		return virtio_transport_stream_do_peek(vsk, msg, len);
505 	else
506 		return virtio_transport_stream_do_dequeue(vsk, msg, len);
507 }
508 EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
509 
510 ssize_t
511 virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
512 				   struct msghdr *msg,
513 				   int flags)
514 {
515 	if (flags & MSG_PEEK)
516 		return -EOPNOTSUPP;
517 
518 	return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags);
519 }
520 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);
521 
522 int
523 virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
524 				   struct msghdr *msg,
525 				   size_t len)
526 {
527 	struct virtio_vsock_sock *vvs = vsk->trans;
528 
529 	spin_lock_bh(&vvs->tx_lock);
530 
531 	if (len > vvs->peer_buf_alloc) {
532 		spin_unlock_bh(&vvs->tx_lock);
533 		return -EMSGSIZE;
534 	}
535 
536 	spin_unlock_bh(&vvs->tx_lock);
537 
538 	return virtio_transport_stream_enqueue(vsk, msg, len);
539 }
540 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue);
541 
542 int
543 virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
544 			       struct msghdr *msg,
545 			       size_t len, int flags)
546 {
547 	return -EOPNOTSUPP;
548 }
549 EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
550 
551 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
552 {
553 	struct virtio_vsock_sock *vvs = vsk->trans;
554 	s64 bytes;
555 
556 	spin_lock_bh(&vvs->rx_lock);
557 	bytes = vvs->rx_bytes;
558 	spin_unlock_bh(&vvs->rx_lock);
559 
560 	return bytes;
561 }
562 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
563 
564 u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk)
565 {
566 	struct virtio_vsock_sock *vvs = vsk->trans;
567 	u32 msg_count;
568 
569 	spin_lock_bh(&vvs->rx_lock);
570 	msg_count = vvs->msg_count;
571 	spin_unlock_bh(&vvs->rx_lock);
572 
573 	return msg_count;
574 }
575 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data);
576 
577 static s64 virtio_transport_has_space(struct vsock_sock *vsk)
578 {
579 	struct virtio_vsock_sock *vvs = vsk->trans;
580 	s64 bytes;
581 
582 	bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
583 	if (bytes < 0)
584 		bytes = 0;
585 
586 	return bytes;
587 }
588 
589 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
590 {
591 	struct virtio_vsock_sock *vvs = vsk->trans;
592 	s64 bytes;
593 
594 	spin_lock_bh(&vvs->tx_lock);
595 	bytes = virtio_transport_has_space(vsk);
596 	spin_unlock_bh(&vvs->tx_lock);
597 
598 	return bytes;
599 }
600 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
601 
602 int virtio_transport_do_socket_init(struct vsock_sock *vsk,
603 				    struct vsock_sock *psk)
604 {
605 	struct virtio_vsock_sock *vvs;
606 
607 	vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
608 	if (!vvs)
609 		return -ENOMEM;
610 
611 	vsk->trans = vvs;
612 	vvs->vsk = vsk;
613 	if (psk && psk->trans) {
614 		struct virtio_vsock_sock *ptrans = psk->trans;
615 
616 		vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
617 	}
618 
619 	if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE)
620 		vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE;
621 
622 	vvs->buf_alloc = vsk->buffer_size;
623 
624 	spin_lock_init(&vvs->rx_lock);
625 	spin_lock_init(&vvs->tx_lock);
626 	skb_queue_head_init(&vvs->rx_queue);
627 
628 	return 0;
629 }
630 EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
631 
632 /* sk_lock held by the caller */
633 void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val)
634 {
635 	struct virtio_vsock_sock *vvs = vsk->trans;
636 
637 	if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE)
638 		*val = VIRTIO_VSOCK_MAX_BUF_SIZE;
639 
640 	vvs->buf_alloc = *val;
641 
642 	virtio_transport_send_credit_update(vsk);
643 }
644 EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size);
645 
646 int
647 virtio_transport_notify_poll_in(struct vsock_sock *vsk,
648 				size_t target,
649 				bool *data_ready_now)
650 {
651 	*data_ready_now = vsock_stream_has_data(vsk) >= target;
652 
653 	return 0;
654 }
655 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
656 
657 int
658 virtio_transport_notify_poll_out(struct vsock_sock *vsk,
659 				 size_t target,
660 				 bool *space_avail_now)
661 {
662 	s64 free_space;
663 
664 	free_space = vsock_stream_has_space(vsk);
665 	if (free_space > 0)
666 		*space_avail_now = true;
667 	else if (free_space == 0)
668 		*space_avail_now = false;
669 
670 	return 0;
671 }
672 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
673 
674 int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
675 	size_t target, struct vsock_transport_recv_notify_data *data)
676 {
677 	return 0;
678 }
679 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
680 
681 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
682 	size_t target, struct vsock_transport_recv_notify_data *data)
683 {
684 	return 0;
685 }
686 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
687 
688 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
689 	size_t target, struct vsock_transport_recv_notify_data *data)
690 {
691 	return 0;
692 }
693 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
694 
695 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
696 	size_t target, ssize_t copied, bool data_read,
697 	struct vsock_transport_recv_notify_data *data)
698 {
699 	return 0;
700 }
701 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
702 
703 int virtio_transport_notify_send_init(struct vsock_sock *vsk,
704 	struct vsock_transport_send_notify_data *data)
705 {
706 	return 0;
707 }
708 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
709 
710 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
711 	struct vsock_transport_send_notify_data *data)
712 {
713 	return 0;
714 }
715 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
716 
717 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
718 	struct vsock_transport_send_notify_data *data)
719 {
720 	return 0;
721 }
722 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
723 
724 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
725 	ssize_t written, struct vsock_transport_send_notify_data *data)
726 {
727 	return 0;
728 }
729 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
730 
731 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
732 {
733 	return vsk->buffer_size;
734 }
735 EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
736 
737 bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
738 {
739 	return true;
740 }
741 EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
742 
743 bool virtio_transport_stream_allow(u32 cid, u32 port)
744 {
745 	return true;
746 }
747 EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
748 
749 int virtio_transport_dgram_bind(struct vsock_sock *vsk,
750 				struct sockaddr_vm *addr)
751 {
752 	return -EOPNOTSUPP;
753 }
754 EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
755 
756 bool virtio_transport_dgram_allow(u32 cid, u32 port)
757 {
758 	return false;
759 }
760 EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
761 
762 int virtio_transport_connect(struct vsock_sock *vsk)
763 {
764 	struct virtio_vsock_pkt_info info = {
765 		.op = VIRTIO_VSOCK_OP_REQUEST,
766 		.vsk = vsk,
767 	};
768 
769 	return virtio_transport_send_pkt_info(vsk, &info);
770 }
771 EXPORT_SYMBOL_GPL(virtio_transport_connect);
772 
773 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
774 {
775 	struct virtio_vsock_pkt_info info = {
776 		.op = VIRTIO_VSOCK_OP_SHUTDOWN,
777 		.flags = (mode & RCV_SHUTDOWN ?
778 			  VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
779 			 (mode & SEND_SHUTDOWN ?
780 			  VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
781 		.vsk = vsk,
782 	};
783 
784 	return virtio_transport_send_pkt_info(vsk, &info);
785 }
786 EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
787 
788 int
789 virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
790 			       struct sockaddr_vm *remote_addr,
791 			       struct msghdr *msg,
792 			       size_t dgram_len)
793 {
794 	return -EOPNOTSUPP;
795 }
796 EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
797 
798 ssize_t
799 virtio_transport_stream_enqueue(struct vsock_sock *vsk,
800 				struct msghdr *msg,
801 				size_t len)
802 {
803 	struct virtio_vsock_pkt_info info = {
804 		.op = VIRTIO_VSOCK_OP_RW,
805 		.msg = msg,
806 		.pkt_len = len,
807 		.vsk = vsk,
808 	};
809 
810 	return virtio_transport_send_pkt_info(vsk, &info);
811 }
812 EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
813 
814 void virtio_transport_destruct(struct vsock_sock *vsk)
815 {
816 	struct virtio_vsock_sock *vvs = vsk->trans;
817 
818 	kfree(vvs);
819 }
820 EXPORT_SYMBOL_GPL(virtio_transport_destruct);
821 
822 static int virtio_transport_reset(struct vsock_sock *vsk,
823 				  struct sk_buff *skb)
824 {
825 	struct virtio_vsock_pkt_info info = {
826 		.op = VIRTIO_VSOCK_OP_RST,
827 		.reply = !!skb,
828 		.vsk = vsk,
829 	};
830 
831 	/* Send RST only if the original pkt is not a RST pkt */
832 	if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST)
833 		return 0;
834 
835 	return virtio_transport_send_pkt_info(vsk, &info);
836 }
837 
838 /* Normally packets are associated with a socket.  There may be no socket if an
839  * attempt was made to connect to a socket that does not exist.
840  */
841 static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
842 					  struct sk_buff *skb)
843 {
844 	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
845 	struct virtio_vsock_pkt_info info = {
846 		.op = VIRTIO_VSOCK_OP_RST,
847 		.type = le16_to_cpu(hdr->type),
848 		.reply = true,
849 	};
850 	struct sk_buff *reply;
851 
852 	/* Send RST only if the original pkt is not a RST pkt */
853 	if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
854 		return 0;
855 
856 	reply = virtio_transport_alloc_skb(&info, 0,
857 					   le64_to_cpu(hdr->dst_cid),
858 					   le32_to_cpu(hdr->dst_port),
859 					   le64_to_cpu(hdr->src_cid),
860 					   le32_to_cpu(hdr->src_port));
861 	if (!reply)
862 		return -ENOMEM;
863 
864 	if (!t) {
865 		kfree_skb(reply);
866 		return -ENOTCONN;
867 	}
868 
869 	return t->send_pkt(reply);
870 }
871 
872 /* This function should be called with sk_lock held and SOCK_DONE set */
873 static void virtio_transport_remove_sock(struct vsock_sock *vsk)
874 {
875 	struct virtio_vsock_sock *vvs = vsk->trans;
876 
877 	/* We don't need to take rx_lock, as the socket is closing and we are
878 	 * removing it.
879 	 */
880 	__skb_queue_purge(&vvs->rx_queue);
881 	vsock_remove_sock(vsk);
882 }
883 
884 static void virtio_transport_wait_close(struct sock *sk, long timeout)
885 {
886 	if (timeout) {
887 		DEFINE_WAIT_FUNC(wait, woken_wake_function);
888 
889 		add_wait_queue(sk_sleep(sk), &wait);
890 
891 		do {
892 			if (sk_wait_event(sk, &timeout,
893 					  sock_flag(sk, SOCK_DONE), &wait))
894 				break;
895 		} while (!signal_pending(current) && timeout);
896 
897 		remove_wait_queue(sk_sleep(sk), &wait);
898 	}
899 }
900 
901 static void virtio_transport_do_close(struct vsock_sock *vsk,
902 				      bool cancel_timeout)
903 {
904 	struct sock *sk = sk_vsock(vsk);
905 
906 	sock_set_flag(sk, SOCK_DONE);
907 	vsk->peer_shutdown = SHUTDOWN_MASK;
908 	if (vsock_stream_has_data(vsk) <= 0)
909 		sk->sk_state = TCP_CLOSING;
910 	sk->sk_state_change(sk);
911 
912 	if (vsk->close_work_scheduled &&
913 	    (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
914 		vsk->close_work_scheduled = false;
915 
916 		virtio_transport_remove_sock(vsk);
917 
918 		/* Release refcnt obtained when we scheduled the timeout */
919 		sock_put(sk);
920 	}
921 }
922 
923 static void virtio_transport_close_timeout(struct work_struct *work)
924 {
925 	struct vsock_sock *vsk =
926 		container_of(work, struct vsock_sock, close_work.work);
927 	struct sock *sk = sk_vsock(vsk);
928 
929 	sock_hold(sk);
930 	lock_sock(sk);
931 
932 	if (!sock_flag(sk, SOCK_DONE)) {
933 		(void)virtio_transport_reset(vsk, NULL);
934 
935 		virtio_transport_do_close(vsk, false);
936 	}
937 
938 	vsk->close_work_scheduled = false;
939 
940 	release_sock(sk);
941 	sock_put(sk);
942 }
943 
944 /* User context, vsk->sk is locked */
945 static bool virtio_transport_close(struct vsock_sock *vsk)
946 {
947 	struct sock *sk = &vsk->sk;
948 
949 	if (!(sk->sk_state == TCP_ESTABLISHED ||
950 	      sk->sk_state == TCP_CLOSING))
951 		return true;
952 
953 	/* Already received SHUTDOWN from peer, reply with RST */
954 	if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
955 		(void)virtio_transport_reset(vsk, NULL);
956 		return true;
957 	}
958 
959 	if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
960 		(void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
961 
962 	if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
963 		virtio_transport_wait_close(sk, sk->sk_lingertime);
964 
965 	if (sock_flag(sk, SOCK_DONE)) {
966 		return true;
967 	}
968 
969 	sock_hold(sk);
970 	INIT_DELAYED_WORK(&vsk->close_work,
971 			  virtio_transport_close_timeout);
972 	vsk->close_work_scheduled = true;
973 	schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
974 	return false;
975 }
976 
977 void virtio_transport_release(struct vsock_sock *vsk)
978 {
979 	struct sock *sk = &vsk->sk;
980 	bool remove_sock = true;
981 
982 	if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
983 		remove_sock = virtio_transport_close(vsk);
984 
985 	if (remove_sock) {
986 		sock_set_flag(sk, SOCK_DONE);
987 		virtio_transport_remove_sock(vsk);
988 	}
989 }
990 EXPORT_SYMBOL_GPL(virtio_transport_release);
991 
992 static int
993 virtio_transport_recv_connecting(struct sock *sk,
994 				 struct sk_buff *skb)
995 {
996 	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
997 	struct vsock_sock *vsk = vsock_sk(sk);
998 	int skerr;
999 	int err;
1000 
1001 	switch (le16_to_cpu(hdr->op)) {
1002 	case VIRTIO_VSOCK_OP_RESPONSE:
1003 		sk->sk_state = TCP_ESTABLISHED;
1004 		sk->sk_socket->state = SS_CONNECTED;
1005 		vsock_insert_connected(vsk);
1006 		sk->sk_state_change(sk);
1007 		break;
1008 	case VIRTIO_VSOCK_OP_INVALID:
1009 		break;
1010 	case VIRTIO_VSOCK_OP_RST:
1011 		skerr = ECONNRESET;
1012 		err = 0;
1013 		goto destroy;
1014 	default:
1015 		skerr = EPROTO;
1016 		err = -EINVAL;
1017 		goto destroy;
1018 	}
1019 	return 0;
1020 
1021 destroy:
1022 	virtio_transport_reset(vsk, skb);
1023 	sk->sk_state = TCP_CLOSE;
1024 	sk->sk_err = skerr;
1025 	sk_error_report(sk);
1026 	return err;
1027 }
1028 
1029 static void
1030 virtio_transport_recv_enqueue(struct vsock_sock *vsk,
1031 			      struct sk_buff *skb)
1032 {
1033 	struct virtio_vsock_sock *vvs = vsk->trans;
1034 	bool can_enqueue, free_pkt = false;
1035 	struct virtio_vsock_hdr *hdr;
1036 	u32 len;
1037 
1038 	hdr = virtio_vsock_hdr(skb);
1039 	len = le32_to_cpu(hdr->len);
1040 
1041 	spin_lock_bh(&vvs->rx_lock);
1042 
1043 	can_enqueue = virtio_transport_inc_rx_pkt(vvs, skb);
1044 	if (!can_enqueue) {
1045 		free_pkt = true;
1046 		goto out;
1047 	}
1048 
1049 	if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
1050 		vvs->msg_count++;
1051 
1052 	/* Try to copy small packets into the buffer of last packet queued,
1053 	 * to avoid wasting memory queueing the entire buffer with a small
1054 	 * payload.
1055 	 */
1056 	if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) {
1057 		struct virtio_vsock_hdr *last_hdr;
1058 		struct sk_buff *last_skb;
1059 
1060 		last_skb = skb_peek_tail(&vvs->rx_queue);
1061 		last_hdr = virtio_vsock_hdr(last_skb);
1062 
1063 		/* If there is space in the last packet queued, we copy the
1064 		 * new packet in its buffer. We avoid this if the last packet
1065 		 * queued has VIRTIO_VSOCK_SEQ_EOM set, because this is
1066 		 * delimiter of SEQPACKET message, so 'pkt' is the first packet
1067 		 * of a new message.
1068 		 */
1069 		if (skb->len < skb_tailroom(last_skb) &&
1070 		    !(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) {
1071 			memcpy(skb_put(last_skb, skb->len), skb->data, skb->len);
1072 			free_pkt = true;
1073 			last_hdr->flags |= hdr->flags;
1074 			last_hdr->len = cpu_to_le32(last_skb->len);
1075 			goto out;
1076 		}
1077 	}
1078 
1079 	__skb_queue_tail(&vvs->rx_queue, skb);
1080 
1081 out:
1082 	spin_unlock_bh(&vvs->rx_lock);
1083 	if (free_pkt)
1084 		kfree_skb(skb);
1085 }
1086 
1087 static int
1088 virtio_transport_recv_connected(struct sock *sk,
1089 				struct sk_buff *skb)
1090 {
1091 	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
1092 	struct vsock_sock *vsk = vsock_sk(sk);
1093 	int err = 0;
1094 
1095 	switch (le16_to_cpu(hdr->op)) {
1096 	case VIRTIO_VSOCK_OP_RW:
1097 		virtio_transport_recv_enqueue(vsk, skb);
1098 		vsock_data_ready(sk);
1099 		return err;
1100 	case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
1101 		virtio_transport_send_credit_update(vsk);
1102 		break;
1103 	case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
1104 		sk->sk_write_space(sk);
1105 		break;
1106 	case VIRTIO_VSOCK_OP_SHUTDOWN:
1107 		if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
1108 			vsk->peer_shutdown |= RCV_SHUTDOWN;
1109 		if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
1110 			vsk->peer_shutdown |= SEND_SHUTDOWN;
1111 		if (vsk->peer_shutdown == SHUTDOWN_MASK &&
1112 		    vsock_stream_has_data(vsk) <= 0 &&
1113 		    !sock_flag(sk, SOCK_DONE)) {
1114 			(void)virtio_transport_reset(vsk, NULL);
1115 			virtio_transport_do_close(vsk, true);
1116 		}
1117 		if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
1118 			sk->sk_state_change(sk);
1119 		break;
1120 	case VIRTIO_VSOCK_OP_RST:
1121 		virtio_transport_do_close(vsk, true);
1122 		break;
1123 	default:
1124 		err = -EINVAL;
1125 		break;
1126 	}
1127 
1128 	kfree_skb(skb);
1129 	return err;
1130 }
1131 
1132 static void
1133 virtio_transport_recv_disconnecting(struct sock *sk,
1134 				    struct sk_buff *skb)
1135 {
1136 	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
1137 	struct vsock_sock *vsk = vsock_sk(sk);
1138 
1139 	if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
1140 		virtio_transport_do_close(vsk, true);
1141 }
1142 
1143 static int
1144 virtio_transport_send_response(struct vsock_sock *vsk,
1145 			       struct sk_buff *skb)
1146 {
1147 	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
1148 	struct virtio_vsock_pkt_info info = {
1149 		.op = VIRTIO_VSOCK_OP_RESPONSE,
1150 		.remote_cid = le64_to_cpu(hdr->src_cid),
1151 		.remote_port = le32_to_cpu(hdr->src_port),
1152 		.reply = true,
1153 		.vsk = vsk,
1154 	};
1155 
1156 	return virtio_transport_send_pkt_info(vsk, &info);
1157 }
1158 
1159 static bool virtio_transport_space_update(struct sock *sk,
1160 					  struct sk_buff *skb)
1161 {
1162 	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
1163 	struct vsock_sock *vsk = vsock_sk(sk);
1164 	struct virtio_vsock_sock *vvs = vsk->trans;
1165 	bool space_available;
1166 
1167 	/* Listener sockets are not associated with any transport, so we are
1168 	 * not able to take the state to see if there is space available in the
1169 	 * remote peer, but since they are only used to receive requests, we
1170 	 * can assume that there is always space available in the other peer.
1171 	 */
1172 	if (!vvs)
1173 		return true;
1174 
1175 	/* buf_alloc and fwd_cnt is always included in the hdr */
1176 	spin_lock_bh(&vvs->tx_lock);
1177 	vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc);
1178 	vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt);
1179 	space_available = virtio_transport_has_space(vsk);
1180 	spin_unlock_bh(&vvs->tx_lock);
1181 	return space_available;
1182 }
1183 
1184 /* Handle server socket */
1185 static int
1186 virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb,
1187 			     struct virtio_transport *t)
1188 {
1189 	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
1190 	struct vsock_sock *vsk = vsock_sk(sk);
1191 	struct vsock_sock *vchild;
1192 	struct sock *child;
1193 	int ret;
1194 
1195 	if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) {
1196 		virtio_transport_reset_no_sock(t, skb);
1197 		return -EINVAL;
1198 	}
1199 
1200 	if (sk_acceptq_is_full(sk)) {
1201 		virtio_transport_reset_no_sock(t, skb);
1202 		return -ENOMEM;
1203 	}
1204 
1205 	child = vsock_create_connected(sk);
1206 	if (!child) {
1207 		virtio_transport_reset_no_sock(t, skb);
1208 		return -ENOMEM;
1209 	}
1210 
1211 	sk_acceptq_added(sk);
1212 
1213 	lock_sock_nested(child, SINGLE_DEPTH_NESTING);
1214 
1215 	child->sk_state = TCP_ESTABLISHED;
1216 
1217 	vchild = vsock_sk(child);
1218 	vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid),
1219 			le32_to_cpu(hdr->dst_port));
1220 	vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid),
1221 			le32_to_cpu(hdr->src_port));
1222 
1223 	ret = vsock_assign_transport(vchild, vsk);
1224 	/* Transport assigned (looking at remote_addr) must be the same
1225 	 * where we received the request.
1226 	 */
1227 	if (ret || vchild->transport != &t->transport) {
1228 		release_sock(child);
1229 		virtio_transport_reset_no_sock(t, skb);
1230 		sock_put(child);
1231 		return ret;
1232 	}
1233 
1234 	if (virtio_transport_space_update(child, skb))
1235 		child->sk_write_space(child);
1236 
1237 	vsock_insert_connected(vchild);
1238 	vsock_enqueue_accept(sk, child);
1239 	virtio_transport_send_response(vchild, skb);
1240 
1241 	release_sock(child);
1242 
1243 	sk->sk_data_ready(sk);
1244 	return 0;
1245 }
1246 
1247 static bool virtio_transport_valid_type(u16 type)
1248 {
1249 	return (type == VIRTIO_VSOCK_TYPE_STREAM) ||
1250 	       (type == VIRTIO_VSOCK_TYPE_SEQPACKET);
1251 }
1252 
1253 /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
1254  * lock.
1255  */
1256 void virtio_transport_recv_pkt(struct virtio_transport *t,
1257 			       struct sk_buff *skb)
1258 {
1259 	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
1260 	struct sockaddr_vm src, dst;
1261 	struct vsock_sock *vsk;
1262 	struct sock *sk;
1263 	bool space_available;
1264 
1265 	vsock_addr_init(&src, le64_to_cpu(hdr->src_cid),
1266 			le32_to_cpu(hdr->src_port));
1267 	vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid),
1268 			le32_to_cpu(hdr->dst_port));
1269 
1270 	trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
1271 					dst.svm_cid, dst.svm_port,
1272 					le32_to_cpu(hdr->len),
1273 					le16_to_cpu(hdr->type),
1274 					le16_to_cpu(hdr->op),
1275 					le32_to_cpu(hdr->flags),
1276 					le32_to_cpu(hdr->buf_alloc),
1277 					le32_to_cpu(hdr->fwd_cnt));
1278 
1279 	if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) {
1280 		(void)virtio_transport_reset_no_sock(t, skb);
1281 		goto free_pkt;
1282 	}
1283 
1284 	/* The socket must be in connected or bound table
1285 	 * otherwise send reset back
1286 	 */
1287 	sk = vsock_find_connected_socket(&src, &dst);
1288 	if (!sk) {
1289 		sk = vsock_find_bound_socket(&dst);
1290 		if (!sk) {
1291 			(void)virtio_transport_reset_no_sock(t, skb);
1292 			goto free_pkt;
1293 		}
1294 	}
1295 
1296 	if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) {
1297 		(void)virtio_transport_reset_no_sock(t, skb);
1298 		sock_put(sk);
1299 		goto free_pkt;
1300 	}
1301 
1302 	vsk = vsock_sk(sk);
1303 
1304 	lock_sock(sk);
1305 
1306 	/* Check if sk has been closed before lock_sock */
1307 	if (sock_flag(sk, SOCK_DONE)) {
1308 		(void)virtio_transport_reset_no_sock(t, skb);
1309 		release_sock(sk);
1310 		sock_put(sk);
1311 		goto free_pkt;
1312 	}
1313 
1314 	space_available = virtio_transport_space_update(sk, skb);
1315 
1316 	/* Update CID in case it has changed after a transport reset event */
1317 	if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
1318 		vsk->local_addr.svm_cid = dst.svm_cid;
1319 
1320 	if (space_available)
1321 		sk->sk_write_space(sk);
1322 
1323 	switch (sk->sk_state) {
1324 	case TCP_LISTEN:
1325 		virtio_transport_recv_listen(sk, skb, t);
1326 		kfree_skb(skb);
1327 		break;
1328 	case TCP_SYN_SENT:
1329 		virtio_transport_recv_connecting(sk, skb);
1330 		kfree_skb(skb);
1331 		break;
1332 	case TCP_ESTABLISHED:
1333 		virtio_transport_recv_connected(sk, skb);
1334 		break;
1335 	case TCP_CLOSING:
1336 		virtio_transport_recv_disconnecting(sk, skb);
1337 		kfree_skb(skb);
1338 		break;
1339 	default:
1340 		(void)virtio_transport_reset_no_sock(t, skb);
1341 		kfree_skb(skb);
1342 		break;
1343 	}
1344 
1345 	release_sock(sk);
1346 
1347 	/* Release refcnt obtained when we fetched this socket out of the
1348 	 * bound or connected list.
1349 	 */
1350 	sock_put(sk);
1351 	return;
1352 
1353 free_pkt:
1354 	kfree_skb(skb);
1355 }
1356 EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
1357 
1358 /* Remove skbs found in a queue that have a vsk that matches.
1359  *
1360  * Each skb is freed.
1361  *
1362  * Returns the count of skbs that were reply packets.
1363  */
1364 int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue)
1365 {
1366 	struct sk_buff_head freeme;
1367 	struct sk_buff *skb, *tmp;
1368 	int cnt = 0;
1369 
1370 	skb_queue_head_init(&freeme);
1371 
1372 	spin_lock_bh(&queue->lock);
1373 	skb_queue_walk_safe(queue, skb, tmp) {
1374 		if (vsock_sk(skb->sk) != vsk)
1375 			continue;
1376 
1377 		__skb_unlink(skb, queue);
1378 		__skb_queue_tail(&freeme, skb);
1379 
1380 		if (virtio_vsock_skb_reply(skb))
1381 			cnt++;
1382 	}
1383 	spin_unlock_bh(&queue->lock);
1384 
1385 	__skb_queue_purge(&freeme);
1386 
1387 	return cnt;
1388 }
1389 EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs);
1390 
1391 MODULE_LICENSE("GPL v2");
1392 MODULE_AUTHOR("Asias He");
1393 MODULE_DESCRIPTION("common code for virtio vsock");
1394