1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * common code for virtio vsock 4 * 5 * Copyright (C) 2013-2015 Red Hat, Inc. 6 * Author: Asias He <asias@redhat.com> 7 * Stefan Hajnoczi <stefanha@redhat.com> 8 */ 9 #include <linux/spinlock.h> 10 #include <linux/module.h> 11 #include <linux/sched/signal.h> 12 #include <linux/ctype.h> 13 #include <linux/list.h> 14 #include <linux/virtio_vsock.h> 15 #include <uapi/linux/vsockmon.h> 16 17 #include <net/sock.h> 18 #include <net/af_vsock.h> 19 20 #define CREATE_TRACE_POINTS 21 #include <trace/events/vsock_virtio_transport_common.h> 22 23 /* How long to wait for graceful shutdown of a connection */ 24 #define VSOCK_CLOSE_TIMEOUT (8 * HZ) 25 26 /* Threshold for detecting small packets to copy */ 27 #define GOOD_COPY_LEN 128 28 29 static const struct virtio_transport * 30 virtio_transport_get_ops(struct vsock_sock *vsk) 31 { 32 const struct vsock_transport *t = vsock_core_get_transport(vsk); 33 34 if (WARN_ON(!t)) 35 return NULL; 36 37 return container_of(t, struct virtio_transport, transport); 38 } 39 40 /* Returns a new packet on success, otherwise returns NULL. 41 * 42 * If NULL is returned, errp is set to a negative errno. 43 */ 44 static struct sk_buff * 45 virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info, 46 size_t len, 47 u32 src_cid, 48 u32 src_port, 49 u32 dst_cid, 50 u32 dst_port) 51 { 52 const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len; 53 struct virtio_vsock_hdr *hdr; 54 struct sk_buff *skb; 55 void *payload; 56 int err; 57 58 skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL); 59 if (!skb) 60 return NULL; 61 62 hdr = virtio_vsock_hdr(skb); 63 hdr->type = cpu_to_le16(info->type); 64 hdr->op = cpu_to_le16(info->op); 65 hdr->src_cid = cpu_to_le64(src_cid); 66 hdr->dst_cid = cpu_to_le64(dst_cid); 67 hdr->src_port = cpu_to_le32(src_port); 68 hdr->dst_port = cpu_to_le32(dst_port); 69 hdr->flags = cpu_to_le32(info->flags); 70 hdr->len = cpu_to_le32(len); 71 72 if (info->msg && len > 0) { 73 payload = skb_put(skb, len); 74 err = memcpy_from_msg(payload, info->msg, len); 75 if (err) 76 goto out; 77 78 if (msg_data_left(info->msg) == 0 && 79 info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) { 80 hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); 81 82 if (info->msg->msg_flags & MSG_EOR) 83 hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); 84 } 85 } 86 87 if (info->reply) 88 virtio_vsock_skb_set_reply(skb); 89 90 trace_virtio_transport_alloc_pkt(src_cid, src_port, 91 dst_cid, dst_port, 92 len, 93 info->type, 94 info->op, 95 info->flags); 96 97 if (info->vsk && !skb_set_owner_sk_safe(skb, sk_vsock(info->vsk))) { 98 WARN_ONCE(1, "failed to allocate skb on vsock socket with sk_refcnt == 0\n"); 99 goto out; 100 } 101 102 return skb; 103 104 out: 105 kfree_skb(skb); 106 return NULL; 107 } 108 109 /* Packet capture */ 110 static struct sk_buff *virtio_transport_build_skb(void *opaque) 111 { 112 struct virtio_vsock_hdr *pkt_hdr; 113 struct sk_buff *pkt = opaque; 114 struct af_vsockmon_hdr *hdr; 115 struct sk_buff *skb; 116 size_t payload_len; 117 void *payload_buf; 118 119 /* A packet could be split to fit the RX buffer, so we can retrieve 120 * the payload length from the header and the buffer pointer taking 121 * care of the offset in the original packet. 122 */ 123 pkt_hdr = virtio_vsock_hdr(pkt); 124 payload_len = pkt->len; 125 payload_buf = pkt->data; 126 127 skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len, 128 GFP_ATOMIC); 129 if (!skb) 130 return NULL; 131 132 hdr = skb_put(skb, sizeof(*hdr)); 133 134 /* pkt->hdr is little-endian so no need to byteswap here */ 135 hdr->src_cid = pkt_hdr->src_cid; 136 hdr->src_port = pkt_hdr->src_port; 137 hdr->dst_cid = pkt_hdr->dst_cid; 138 hdr->dst_port = pkt_hdr->dst_port; 139 140 hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO); 141 hdr->len = cpu_to_le16(sizeof(*pkt_hdr)); 142 memset(hdr->reserved, 0, sizeof(hdr->reserved)); 143 144 switch (le16_to_cpu(pkt_hdr->op)) { 145 case VIRTIO_VSOCK_OP_REQUEST: 146 case VIRTIO_VSOCK_OP_RESPONSE: 147 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT); 148 break; 149 case VIRTIO_VSOCK_OP_RST: 150 case VIRTIO_VSOCK_OP_SHUTDOWN: 151 hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT); 152 break; 153 case VIRTIO_VSOCK_OP_RW: 154 hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD); 155 break; 156 case VIRTIO_VSOCK_OP_CREDIT_UPDATE: 157 case VIRTIO_VSOCK_OP_CREDIT_REQUEST: 158 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL); 159 break; 160 default: 161 hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN); 162 break; 163 } 164 165 skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr)); 166 167 if (payload_len) { 168 skb_put_data(skb, payload_buf, payload_len); 169 } 170 171 return skb; 172 } 173 174 void virtio_transport_deliver_tap_pkt(struct sk_buff *skb) 175 { 176 if (virtio_vsock_skb_tap_delivered(skb)) 177 return; 178 179 vsock_deliver_tap(virtio_transport_build_skb, skb); 180 virtio_vsock_skb_set_tap_delivered(skb); 181 } 182 EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt); 183 184 static u16 virtio_transport_get_type(struct sock *sk) 185 { 186 if (sk->sk_type == SOCK_STREAM) 187 return VIRTIO_VSOCK_TYPE_STREAM; 188 else 189 return VIRTIO_VSOCK_TYPE_SEQPACKET; 190 } 191 192 /* This function can only be used on connecting/connected sockets, 193 * since a socket assigned to a transport is required. 194 * 195 * Do not use on listener sockets! 196 */ 197 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, 198 struct virtio_vsock_pkt_info *info) 199 { 200 u32 src_cid, src_port, dst_cid, dst_port; 201 const struct virtio_transport *t_ops; 202 struct virtio_vsock_sock *vvs; 203 u32 pkt_len = info->pkt_len; 204 struct sk_buff *skb; 205 206 info->type = virtio_transport_get_type(sk_vsock(vsk)); 207 208 t_ops = virtio_transport_get_ops(vsk); 209 if (unlikely(!t_ops)) 210 return -EFAULT; 211 212 src_cid = t_ops->transport.get_local_cid(); 213 src_port = vsk->local_addr.svm_port; 214 if (!info->remote_cid) { 215 dst_cid = vsk->remote_addr.svm_cid; 216 dst_port = vsk->remote_addr.svm_port; 217 } else { 218 dst_cid = info->remote_cid; 219 dst_port = info->remote_port; 220 } 221 222 vvs = vsk->trans; 223 224 /* we can send less than pkt_len bytes */ 225 if (pkt_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) 226 pkt_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; 227 228 /* virtio_transport_get_credit might return less than pkt_len credit */ 229 pkt_len = virtio_transport_get_credit(vvs, pkt_len); 230 231 /* Do not send zero length OP_RW pkt */ 232 if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW) 233 return pkt_len; 234 235 skb = virtio_transport_alloc_skb(info, pkt_len, 236 src_cid, src_port, 237 dst_cid, dst_port); 238 if (!skb) { 239 virtio_transport_put_credit(vvs, pkt_len); 240 return -ENOMEM; 241 } 242 243 virtio_transport_inc_tx_pkt(vvs, skb); 244 245 return t_ops->send_pkt(skb); 246 } 247 248 static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs, 249 u32 len) 250 { 251 if (vvs->rx_bytes + len > vvs->buf_alloc) 252 return false; 253 254 vvs->rx_bytes += len; 255 return true; 256 } 257 258 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, 259 u32 len) 260 { 261 vvs->rx_bytes -= len; 262 vvs->fwd_cnt += len; 263 } 264 265 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb) 266 { 267 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 268 269 spin_lock_bh(&vvs->rx_lock); 270 vvs->last_fwd_cnt = vvs->fwd_cnt; 271 hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt); 272 hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc); 273 spin_unlock_bh(&vvs->rx_lock); 274 } 275 EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt); 276 277 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit) 278 { 279 u32 ret; 280 281 spin_lock_bh(&vvs->tx_lock); 282 ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); 283 if (ret > credit) 284 ret = credit; 285 vvs->tx_cnt += ret; 286 spin_unlock_bh(&vvs->tx_lock); 287 288 return ret; 289 } 290 EXPORT_SYMBOL_GPL(virtio_transport_get_credit); 291 292 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit) 293 { 294 spin_lock_bh(&vvs->tx_lock); 295 vvs->tx_cnt -= credit; 296 spin_unlock_bh(&vvs->tx_lock); 297 } 298 EXPORT_SYMBOL_GPL(virtio_transport_put_credit); 299 300 static int virtio_transport_send_credit_update(struct vsock_sock *vsk) 301 { 302 struct virtio_vsock_pkt_info info = { 303 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, 304 .vsk = vsk, 305 }; 306 307 return virtio_transport_send_pkt_info(vsk, &info); 308 } 309 310 static ssize_t 311 virtio_transport_stream_do_peek(struct vsock_sock *vsk, 312 struct msghdr *msg, 313 size_t len) 314 { 315 struct virtio_vsock_sock *vvs = vsk->trans; 316 size_t bytes, total = 0, off; 317 struct sk_buff *skb, *tmp; 318 int err = -EFAULT; 319 320 spin_lock_bh(&vvs->rx_lock); 321 322 skb_queue_walk_safe(&vvs->rx_queue, skb, tmp) { 323 off = 0; 324 325 if (total == len) 326 break; 327 328 while (total < len && off < skb->len) { 329 bytes = len - total; 330 if (bytes > skb->len - off) 331 bytes = skb->len - off; 332 333 /* sk_lock is held by caller so no one else can dequeue. 334 * Unlock rx_lock since memcpy_to_msg() may sleep. 335 */ 336 spin_unlock_bh(&vvs->rx_lock); 337 338 err = memcpy_to_msg(msg, skb->data + off, bytes); 339 if (err) 340 goto out; 341 342 spin_lock_bh(&vvs->rx_lock); 343 344 total += bytes; 345 off += bytes; 346 } 347 } 348 349 spin_unlock_bh(&vvs->rx_lock); 350 351 return total; 352 353 out: 354 if (total) 355 err = total; 356 return err; 357 } 358 359 static ssize_t 360 virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, 361 struct msghdr *msg, 362 size_t len) 363 { 364 struct virtio_vsock_sock *vvs = vsk->trans; 365 size_t bytes, total = 0; 366 struct sk_buff *skb; 367 int err = -EFAULT; 368 u32 free_space; 369 370 spin_lock_bh(&vvs->rx_lock); 371 372 if (WARN_ONCE(skb_queue_empty(&vvs->rx_queue) && vvs->rx_bytes, 373 "rx_queue is empty, but rx_bytes is non-zero\n")) { 374 spin_unlock_bh(&vvs->rx_lock); 375 return err; 376 } 377 378 while (total < len && !skb_queue_empty(&vvs->rx_queue)) { 379 skb = skb_peek(&vvs->rx_queue); 380 381 bytes = len - total; 382 if (bytes > skb->len) 383 bytes = skb->len; 384 385 /* sk_lock is held by caller so no one else can dequeue. 386 * Unlock rx_lock since memcpy_to_msg() may sleep. 387 */ 388 spin_unlock_bh(&vvs->rx_lock); 389 390 err = memcpy_to_msg(msg, skb->data, bytes); 391 if (err) 392 goto out; 393 394 spin_lock_bh(&vvs->rx_lock); 395 396 total += bytes; 397 skb_pull(skb, bytes); 398 399 if (skb->len == 0) { 400 u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len); 401 402 virtio_transport_dec_rx_pkt(vvs, pkt_len); 403 __skb_unlink(skb, &vvs->rx_queue); 404 consume_skb(skb); 405 } 406 } 407 408 free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt); 409 410 spin_unlock_bh(&vvs->rx_lock); 411 412 /* To reduce the number of credit update messages, 413 * don't update credits as long as lots of space is available. 414 * Note: the limit chosen here is arbitrary. Setting the limit 415 * too high causes extra messages. Too low causes transmitter 416 * stalls. As stalls are in theory more expensive than extra 417 * messages, we set the limit to a high value. TODO: experiment 418 * with different values. 419 */ 420 if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) 421 virtio_transport_send_credit_update(vsk); 422 423 return total; 424 425 out: 426 if (total) 427 err = total; 428 return err; 429 } 430 431 static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, 432 struct msghdr *msg, 433 int flags) 434 { 435 struct virtio_vsock_sock *vvs = vsk->trans; 436 int dequeued_len = 0; 437 size_t user_buf_len = msg_data_left(msg); 438 bool msg_ready = false; 439 struct sk_buff *skb; 440 441 spin_lock_bh(&vvs->rx_lock); 442 443 if (vvs->msg_count == 0) { 444 spin_unlock_bh(&vvs->rx_lock); 445 return 0; 446 } 447 448 while (!msg_ready) { 449 struct virtio_vsock_hdr *hdr; 450 size_t pkt_len; 451 452 skb = __skb_dequeue(&vvs->rx_queue); 453 if (!skb) 454 break; 455 hdr = virtio_vsock_hdr(skb); 456 pkt_len = (size_t)le32_to_cpu(hdr->len); 457 458 if (dequeued_len >= 0) { 459 size_t bytes_to_copy; 460 461 bytes_to_copy = min(user_buf_len, pkt_len); 462 463 if (bytes_to_copy) { 464 int err; 465 466 /* sk_lock is held by caller so no one else can dequeue. 467 * Unlock rx_lock since memcpy_to_msg() may sleep. 468 */ 469 spin_unlock_bh(&vvs->rx_lock); 470 471 err = memcpy_to_msg(msg, skb->data, bytes_to_copy); 472 if (err) { 473 /* Copy of message failed. Rest of 474 * fragments will be freed without copy. 475 */ 476 dequeued_len = err; 477 } else { 478 user_buf_len -= bytes_to_copy; 479 } 480 481 spin_lock_bh(&vvs->rx_lock); 482 } 483 484 if (dequeued_len >= 0) 485 dequeued_len += pkt_len; 486 } 487 488 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) { 489 msg_ready = true; 490 vvs->msg_count--; 491 492 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) 493 msg->msg_flags |= MSG_EOR; 494 } 495 496 virtio_transport_dec_rx_pkt(vvs, pkt_len); 497 kfree_skb(skb); 498 } 499 500 spin_unlock_bh(&vvs->rx_lock); 501 502 virtio_transport_send_credit_update(vsk); 503 504 return dequeued_len; 505 } 506 507 ssize_t 508 virtio_transport_stream_dequeue(struct vsock_sock *vsk, 509 struct msghdr *msg, 510 size_t len, int flags) 511 { 512 if (flags & MSG_PEEK) 513 return virtio_transport_stream_do_peek(vsk, msg, len); 514 else 515 return virtio_transport_stream_do_dequeue(vsk, msg, len); 516 } 517 EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue); 518 519 ssize_t 520 virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, 521 struct msghdr *msg, 522 int flags) 523 { 524 if (flags & MSG_PEEK) 525 return -EOPNOTSUPP; 526 527 return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags); 528 } 529 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue); 530 531 int 532 virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, 533 struct msghdr *msg, 534 size_t len) 535 { 536 struct virtio_vsock_sock *vvs = vsk->trans; 537 538 spin_lock_bh(&vvs->tx_lock); 539 540 if (len > vvs->peer_buf_alloc) { 541 spin_unlock_bh(&vvs->tx_lock); 542 return -EMSGSIZE; 543 } 544 545 spin_unlock_bh(&vvs->tx_lock); 546 547 return virtio_transport_stream_enqueue(vsk, msg, len); 548 } 549 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue); 550 551 int 552 virtio_transport_dgram_dequeue(struct vsock_sock *vsk, 553 struct msghdr *msg, 554 size_t len, int flags) 555 { 556 return -EOPNOTSUPP; 557 } 558 EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue); 559 560 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk) 561 { 562 struct virtio_vsock_sock *vvs = vsk->trans; 563 s64 bytes; 564 565 spin_lock_bh(&vvs->rx_lock); 566 bytes = vvs->rx_bytes; 567 spin_unlock_bh(&vvs->rx_lock); 568 569 return bytes; 570 } 571 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data); 572 573 u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk) 574 { 575 struct virtio_vsock_sock *vvs = vsk->trans; 576 u32 msg_count; 577 578 spin_lock_bh(&vvs->rx_lock); 579 msg_count = vvs->msg_count; 580 spin_unlock_bh(&vvs->rx_lock); 581 582 return msg_count; 583 } 584 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data); 585 586 static s64 virtio_transport_has_space(struct vsock_sock *vsk) 587 { 588 struct virtio_vsock_sock *vvs = vsk->trans; 589 s64 bytes; 590 591 bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); 592 if (bytes < 0) 593 bytes = 0; 594 595 return bytes; 596 } 597 598 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk) 599 { 600 struct virtio_vsock_sock *vvs = vsk->trans; 601 s64 bytes; 602 603 spin_lock_bh(&vvs->tx_lock); 604 bytes = virtio_transport_has_space(vsk); 605 spin_unlock_bh(&vvs->tx_lock); 606 607 return bytes; 608 } 609 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space); 610 611 int virtio_transport_do_socket_init(struct vsock_sock *vsk, 612 struct vsock_sock *psk) 613 { 614 struct virtio_vsock_sock *vvs; 615 616 vvs = kzalloc(sizeof(*vvs), GFP_KERNEL); 617 if (!vvs) 618 return -ENOMEM; 619 620 vsk->trans = vvs; 621 vvs->vsk = vsk; 622 if (psk && psk->trans) { 623 struct virtio_vsock_sock *ptrans = psk->trans; 624 625 vvs->peer_buf_alloc = ptrans->peer_buf_alloc; 626 } 627 628 if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE) 629 vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE; 630 631 vvs->buf_alloc = vsk->buffer_size; 632 633 spin_lock_init(&vvs->rx_lock); 634 spin_lock_init(&vvs->tx_lock); 635 skb_queue_head_init(&vvs->rx_queue); 636 637 return 0; 638 } 639 EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init); 640 641 /* sk_lock held by the caller */ 642 void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val) 643 { 644 struct virtio_vsock_sock *vvs = vsk->trans; 645 646 if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE) 647 *val = VIRTIO_VSOCK_MAX_BUF_SIZE; 648 649 vvs->buf_alloc = *val; 650 651 virtio_transport_send_credit_update(vsk); 652 } 653 EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size); 654 655 int 656 virtio_transport_notify_poll_in(struct vsock_sock *vsk, 657 size_t target, 658 bool *data_ready_now) 659 { 660 *data_ready_now = vsock_stream_has_data(vsk) >= target; 661 662 return 0; 663 } 664 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in); 665 666 int 667 virtio_transport_notify_poll_out(struct vsock_sock *vsk, 668 size_t target, 669 bool *space_avail_now) 670 { 671 s64 free_space; 672 673 free_space = vsock_stream_has_space(vsk); 674 if (free_space > 0) 675 *space_avail_now = true; 676 else if (free_space == 0) 677 *space_avail_now = false; 678 679 return 0; 680 } 681 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out); 682 683 int virtio_transport_notify_recv_init(struct vsock_sock *vsk, 684 size_t target, struct vsock_transport_recv_notify_data *data) 685 { 686 return 0; 687 } 688 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init); 689 690 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk, 691 size_t target, struct vsock_transport_recv_notify_data *data) 692 { 693 return 0; 694 } 695 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block); 696 697 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk, 698 size_t target, struct vsock_transport_recv_notify_data *data) 699 { 700 return 0; 701 } 702 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue); 703 704 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk, 705 size_t target, ssize_t copied, bool data_read, 706 struct vsock_transport_recv_notify_data *data) 707 { 708 return 0; 709 } 710 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue); 711 712 int virtio_transport_notify_send_init(struct vsock_sock *vsk, 713 struct vsock_transport_send_notify_data *data) 714 { 715 return 0; 716 } 717 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init); 718 719 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk, 720 struct vsock_transport_send_notify_data *data) 721 { 722 return 0; 723 } 724 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block); 725 726 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk, 727 struct vsock_transport_send_notify_data *data) 728 { 729 return 0; 730 } 731 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue); 732 733 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk, 734 ssize_t written, struct vsock_transport_send_notify_data *data) 735 { 736 return 0; 737 } 738 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue); 739 740 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk) 741 { 742 return vsk->buffer_size; 743 } 744 EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat); 745 746 bool virtio_transport_stream_is_active(struct vsock_sock *vsk) 747 { 748 return true; 749 } 750 EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active); 751 752 bool virtio_transport_stream_allow(u32 cid, u32 port) 753 { 754 return true; 755 } 756 EXPORT_SYMBOL_GPL(virtio_transport_stream_allow); 757 758 int virtio_transport_dgram_bind(struct vsock_sock *vsk, 759 struct sockaddr_vm *addr) 760 { 761 return -EOPNOTSUPP; 762 } 763 EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind); 764 765 bool virtio_transport_dgram_allow(u32 cid, u32 port) 766 { 767 return false; 768 } 769 EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow); 770 771 int virtio_transport_connect(struct vsock_sock *vsk) 772 { 773 struct virtio_vsock_pkt_info info = { 774 .op = VIRTIO_VSOCK_OP_REQUEST, 775 .vsk = vsk, 776 }; 777 778 return virtio_transport_send_pkt_info(vsk, &info); 779 } 780 EXPORT_SYMBOL_GPL(virtio_transport_connect); 781 782 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode) 783 { 784 struct virtio_vsock_pkt_info info = { 785 .op = VIRTIO_VSOCK_OP_SHUTDOWN, 786 .flags = (mode & RCV_SHUTDOWN ? 787 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | 788 (mode & SEND_SHUTDOWN ? 789 VIRTIO_VSOCK_SHUTDOWN_SEND : 0), 790 .vsk = vsk, 791 }; 792 793 return virtio_transport_send_pkt_info(vsk, &info); 794 } 795 EXPORT_SYMBOL_GPL(virtio_transport_shutdown); 796 797 int 798 virtio_transport_dgram_enqueue(struct vsock_sock *vsk, 799 struct sockaddr_vm *remote_addr, 800 struct msghdr *msg, 801 size_t dgram_len) 802 { 803 return -EOPNOTSUPP; 804 } 805 EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue); 806 807 ssize_t 808 virtio_transport_stream_enqueue(struct vsock_sock *vsk, 809 struct msghdr *msg, 810 size_t len) 811 { 812 struct virtio_vsock_pkt_info info = { 813 .op = VIRTIO_VSOCK_OP_RW, 814 .msg = msg, 815 .pkt_len = len, 816 .vsk = vsk, 817 }; 818 819 return virtio_transport_send_pkt_info(vsk, &info); 820 } 821 EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue); 822 823 void virtio_transport_destruct(struct vsock_sock *vsk) 824 { 825 struct virtio_vsock_sock *vvs = vsk->trans; 826 827 kfree(vvs); 828 } 829 EXPORT_SYMBOL_GPL(virtio_transport_destruct); 830 831 static int virtio_transport_reset(struct vsock_sock *vsk, 832 struct sk_buff *skb) 833 { 834 struct virtio_vsock_pkt_info info = { 835 .op = VIRTIO_VSOCK_OP_RST, 836 .reply = !!skb, 837 .vsk = vsk, 838 }; 839 840 /* Send RST only if the original pkt is not a RST pkt */ 841 if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST) 842 return 0; 843 844 return virtio_transport_send_pkt_info(vsk, &info); 845 } 846 847 /* Normally packets are associated with a socket. There may be no socket if an 848 * attempt was made to connect to a socket that does not exist. 849 */ 850 static int virtio_transport_reset_no_sock(const struct virtio_transport *t, 851 struct sk_buff *skb) 852 { 853 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 854 struct virtio_vsock_pkt_info info = { 855 .op = VIRTIO_VSOCK_OP_RST, 856 .type = le16_to_cpu(hdr->type), 857 .reply = true, 858 }; 859 struct sk_buff *reply; 860 861 /* Send RST only if the original pkt is not a RST pkt */ 862 if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST) 863 return 0; 864 865 reply = virtio_transport_alloc_skb(&info, 0, 866 le64_to_cpu(hdr->dst_cid), 867 le32_to_cpu(hdr->dst_port), 868 le64_to_cpu(hdr->src_cid), 869 le32_to_cpu(hdr->src_port)); 870 if (!reply) 871 return -ENOMEM; 872 873 if (!t) { 874 kfree_skb(reply); 875 return -ENOTCONN; 876 } 877 878 return t->send_pkt(reply); 879 } 880 881 /* This function should be called with sk_lock held and SOCK_DONE set */ 882 static void virtio_transport_remove_sock(struct vsock_sock *vsk) 883 { 884 struct virtio_vsock_sock *vvs = vsk->trans; 885 886 /* We don't need to take rx_lock, as the socket is closing and we are 887 * removing it. 888 */ 889 __skb_queue_purge(&vvs->rx_queue); 890 vsock_remove_sock(vsk); 891 } 892 893 static void virtio_transport_wait_close(struct sock *sk, long timeout) 894 { 895 if (timeout) { 896 DEFINE_WAIT_FUNC(wait, woken_wake_function); 897 898 add_wait_queue(sk_sleep(sk), &wait); 899 900 do { 901 if (sk_wait_event(sk, &timeout, 902 sock_flag(sk, SOCK_DONE), &wait)) 903 break; 904 } while (!signal_pending(current) && timeout); 905 906 remove_wait_queue(sk_sleep(sk), &wait); 907 } 908 } 909 910 static void virtio_transport_do_close(struct vsock_sock *vsk, 911 bool cancel_timeout) 912 { 913 struct sock *sk = sk_vsock(vsk); 914 915 sock_set_flag(sk, SOCK_DONE); 916 vsk->peer_shutdown = SHUTDOWN_MASK; 917 if (vsock_stream_has_data(vsk) <= 0) 918 sk->sk_state = TCP_CLOSING; 919 sk->sk_state_change(sk); 920 921 if (vsk->close_work_scheduled && 922 (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) { 923 vsk->close_work_scheduled = false; 924 925 virtio_transport_remove_sock(vsk); 926 927 /* Release refcnt obtained when we scheduled the timeout */ 928 sock_put(sk); 929 } 930 } 931 932 static void virtio_transport_close_timeout(struct work_struct *work) 933 { 934 struct vsock_sock *vsk = 935 container_of(work, struct vsock_sock, close_work.work); 936 struct sock *sk = sk_vsock(vsk); 937 938 sock_hold(sk); 939 lock_sock(sk); 940 941 if (!sock_flag(sk, SOCK_DONE)) { 942 (void)virtio_transport_reset(vsk, NULL); 943 944 virtio_transport_do_close(vsk, false); 945 } 946 947 vsk->close_work_scheduled = false; 948 949 release_sock(sk); 950 sock_put(sk); 951 } 952 953 /* User context, vsk->sk is locked */ 954 static bool virtio_transport_close(struct vsock_sock *vsk) 955 { 956 struct sock *sk = &vsk->sk; 957 958 if (!(sk->sk_state == TCP_ESTABLISHED || 959 sk->sk_state == TCP_CLOSING)) 960 return true; 961 962 /* Already received SHUTDOWN from peer, reply with RST */ 963 if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) { 964 (void)virtio_transport_reset(vsk, NULL); 965 return true; 966 } 967 968 if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK) 969 (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK); 970 971 if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING)) 972 virtio_transport_wait_close(sk, sk->sk_lingertime); 973 974 if (sock_flag(sk, SOCK_DONE)) { 975 return true; 976 } 977 978 sock_hold(sk); 979 INIT_DELAYED_WORK(&vsk->close_work, 980 virtio_transport_close_timeout); 981 vsk->close_work_scheduled = true; 982 schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT); 983 return false; 984 } 985 986 void virtio_transport_release(struct vsock_sock *vsk) 987 { 988 struct sock *sk = &vsk->sk; 989 bool remove_sock = true; 990 991 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) 992 remove_sock = virtio_transport_close(vsk); 993 994 if (remove_sock) { 995 sock_set_flag(sk, SOCK_DONE); 996 virtio_transport_remove_sock(vsk); 997 } 998 } 999 EXPORT_SYMBOL_GPL(virtio_transport_release); 1000 1001 static int 1002 virtio_transport_recv_connecting(struct sock *sk, 1003 struct sk_buff *skb) 1004 { 1005 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1006 struct vsock_sock *vsk = vsock_sk(sk); 1007 int skerr; 1008 int err; 1009 1010 switch (le16_to_cpu(hdr->op)) { 1011 case VIRTIO_VSOCK_OP_RESPONSE: 1012 sk->sk_state = TCP_ESTABLISHED; 1013 sk->sk_socket->state = SS_CONNECTED; 1014 vsock_insert_connected(vsk); 1015 sk->sk_state_change(sk); 1016 break; 1017 case VIRTIO_VSOCK_OP_INVALID: 1018 break; 1019 case VIRTIO_VSOCK_OP_RST: 1020 skerr = ECONNRESET; 1021 err = 0; 1022 goto destroy; 1023 default: 1024 skerr = EPROTO; 1025 err = -EINVAL; 1026 goto destroy; 1027 } 1028 return 0; 1029 1030 destroy: 1031 virtio_transport_reset(vsk, skb); 1032 sk->sk_state = TCP_CLOSE; 1033 sk->sk_err = skerr; 1034 sk_error_report(sk); 1035 return err; 1036 } 1037 1038 static void 1039 virtio_transport_recv_enqueue(struct vsock_sock *vsk, 1040 struct sk_buff *skb) 1041 { 1042 struct virtio_vsock_sock *vvs = vsk->trans; 1043 bool can_enqueue, free_pkt = false; 1044 struct virtio_vsock_hdr *hdr; 1045 u32 len; 1046 1047 hdr = virtio_vsock_hdr(skb); 1048 len = le32_to_cpu(hdr->len); 1049 1050 spin_lock_bh(&vvs->rx_lock); 1051 1052 can_enqueue = virtio_transport_inc_rx_pkt(vvs, len); 1053 if (!can_enqueue) { 1054 free_pkt = true; 1055 goto out; 1056 } 1057 1058 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) 1059 vvs->msg_count++; 1060 1061 /* Try to copy small packets into the buffer of last packet queued, 1062 * to avoid wasting memory queueing the entire buffer with a small 1063 * payload. 1064 */ 1065 if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) { 1066 struct virtio_vsock_hdr *last_hdr; 1067 struct sk_buff *last_skb; 1068 1069 last_skb = skb_peek_tail(&vvs->rx_queue); 1070 last_hdr = virtio_vsock_hdr(last_skb); 1071 1072 /* If there is space in the last packet queued, we copy the 1073 * new packet in its buffer. We avoid this if the last packet 1074 * queued has VIRTIO_VSOCK_SEQ_EOM set, because this is 1075 * delimiter of SEQPACKET message, so 'pkt' is the first packet 1076 * of a new message. 1077 */ 1078 if (skb->len < skb_tailroom(last_skb) && 1079 !(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) { 1080 memcpy(skb_put(last_skb, skb->len), skb->data, skb->len); 1081 free_pkt = true; 1082 last_hdr->flags |= hdr->flags; 1083 le32_add_cpu(&last_hdr->len, len); 1084 goto out; 1085 } 1086 } 1087 1088 __skb_queue_tail(&vvs->rx_queue, skb); 1089 1090 out: 1091 spin_unlock_bh(&vvs->rx_lock); 1092 if (free_pkt) 1093 kfree_skb(skb); 1094 } 1095 1096 static int 1097 virtio_transport_recv_connected(struct sock *sk, 1098 struct sk_buff *skb) 1099 { 1100 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1101 struct vsock_sock *vsk = vsock_sk(sk); 1102 int err = 0; 1103 1104 switch (le16_to_cpu(hdr->op)) { 1105 case VIRTIO_VSOCK_OP_RW: 1106 virtio_transport_recv_enqueue(vsk, skb); 1107 vsock_data_ready(sk); 1108 return err; 1109 case VIRTIO_VSOCK_OP_CREDIT_REQUEST: 1110 virtio_transport_send_credit_update(vsk); 1111 break; 1112 case VIRTIO_VSOCK_OP_CREDIT_UPDATE: 1113 sk->sk_write_space(sk); 1114 break; 1115 case VIRTIO_VSOCK_OP_SHUTDOWN: 1116 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV) 1117 vsk->peer_shutdown |= RCV_SHUTDOWN; 1118 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) 1119 vsk->peer_shutdown |= SEND_SHUTDOWN; 1120 if (vsk->peer_shutdown == SHUTDOWN_MASK && 1121 vsock_stream_has_data(vsk) <= 0 && 1122 !sock_flag(sk, SOCK_DONE)) { 1123 (void)virtio_transport_reset(vsk, NULL); 1124 virtio_transport_do_close(vsk, true); 1125 } 1126 if (le32_to_cpu(virtio_vsock_hdr(skb)->flags)) 1127 sk->sk_state_change(sk); 1128 break; 1129 case VIRTIO_VSOCK_OP_RST: 1130 virtio_transport_do_close(vsk, true); 1131 break; 1132 default: 1133 err = -EINVAL; 1134 break; 1135 } 1136 1137 kfree_skb(skb); 1138 return err; 1139 } 1140 1141 static void 1142 virtio_transport_recv_disconnecting(struct sock *sk, 1143 struct sk_buff *skb) 1144 { 1145 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1146 struct vsock_sock *vsk = vsock_sk(sk); 1147 1148 if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST) 1149 virtio_transport_do_close(vsk, true); 1150 } 1151 1152 static int 1153 virtio_transport_send_response(struct vsock_sock *vsk, 1154 struct sk_buff *skb) 1155 { 1156 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1157 struct virtio_vsock_pkt_info info = { 1158 .op = VIRTIO_VSOCK_OP_RESPONSE, 1159 .remote_cid = le64_to_cpu(hdr->src_cid), 1160 .remote_port = le32_to_cpu(hdr->src_port), 1161 .reply = true, 1162 .vsk = vsk, 1163 }; 1164 1165 return virtio_transport_send_pkt_info(vsk, &info); 1166 } 1167 1168 static bool virtio_transport_space_update(struct sock *sk, 1169 struct sk_buff *skb) 1170 { 1171 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1172 struct vsock_sock *vsk = vsock_sk(sk); 1173 struct virtio_vsock_sock *vvs = vsk->trans; 1174 bool space_available; 1175 1176 /* Listener sockets are not associated with any transport, so we are 1177 * not able to take the state to see if there is space available in the 1178 * remote peer, but since they are only used to receive requests, we 1179 * can assume that there is always space available in the other peer. 1180 */ 1181 if (!vvs) 1182 return true; 1183 1184 /* buf_alloc and fwd_cnt is always included in the hdr */ 1185 spin_lock_bh(&vvs->tx_lock); 1186 vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc); 1187 vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt); 1188 space_available = virtio_transport_has_space(vsk); 1189 spin_unlock_bh(&vvs->tx_lock); 1190 return space_available; 1191 } 1192 1193 /* Handle server socket */ 1194 static int 1195 virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb, 1196 struct virtio_transport *t) 1197 { 1198 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1199 struct vsock_sock *vsk = vsock_sk(sk); 1200 struct vsock_sock *vchild; 1201 struct sock *child; 1202 int ret; 1203 1204 if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) { 1205 virtio_transport_reset_no_sock(t, skb); 1206 return -EINVAL; 1207 } 1208 1209 if (sk_acceptq_is_full(sk)) { 1210 virtio_transport_reset_no_sock(t, skb); 1211 return -ENOMEM; 1212 } 1213 1214 child = vsock_create_connected(sk); 1215 if (!child) { 1216 virtio_transport_reset_no_sock(t, skb); 1217 return -ENOMEM; 1218 } 1219 1220 sk_acceptq_added(sk); 1221 1222 lock_sock_nested(child, SINGLE_DEPTH_NESTING); 1223 1224 child->sk_state = TCP_ESTABLISHED; 1225 1226 vchild = vsock_sk(child); 1227 vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid), 1228 le32_to_cpu(hdr->dst_port)); 1229 vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid), 1230 le32_to_cpu(hdr->src_port)); 1231 1232 ret = vsock_assign_transport(vchild, vsk); 1233 /* Transport assigned (looking at remote_addr) must be the same 1234 * where we received the request. 1235 */ 1236 if (ret || vchild->transport != &t->transport) { 1237 release_sock(child); 1238 virtio_transport_reset_no_sock(t, skb); 1239 sock_put(child); 1240 return ret; 1241 } 1242 1243 if (virtio_transport_space_update(child, skb)) 1244 child->sk_write_space(child); 1245 1246 vsock_insert_connected(vchild); 1247 vsock_enqueue_accept(sk, child); 1248 virtio_transport_send_response(vchild, skb); 1249 1250 release_sock(child); 1251 1252 sk->sk_data_ready(sk); 1253 return 0; 1254 } 1255 1256 static bool virtio_transport_valid_type(u16 type) 1257 { 1258 return (type == VIRTIO_VSOCK_TYPE_STREAM) || 1259 (type == VIRTIO_VSOCK_TYPE_SEQPACKET); 1260 } 1261 1262 /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex 1263 * lock. 1264 */ 1265 void virtio_transport_recv_pkt(struct virtio_transport *t, 1266 struct sk_buff *skb) 1267 { 1268 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1269 struct sockaddr_vm src, dst; 1270 struct vsock_sock *vsk; 1271 struct sock *sk; 1272 bool space_available; 1273 1274 vsock_addr_init(&src, le64_to_cpu(hdr->src_cid), 1275 le32_to_cpu(hdr->src_port)); 1276 vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid), 1277 le32_to_cpu(hdr->dst_port)); 1278 1279 trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port, 1280 dst.svm_cid, dst.svm_port, 1281 le32_to_cpu(hdr->len), 1282 le16_to_cpu(hdr->type), 1283 le16_to_cpu(hdr->op), 1284 le32_to_cpu(hdr->flags), 1285 le32_to_cpu(hdr->buf_alloc), 1286 le32_to_cpu(hdr->fwd_cnt)); 1287 1288 if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) { 1289 (void)virtio_transport_reset_no_sock(t, skb); 1290 goto free_pkt; 1291 } 1292 1293 /* The socket must be in connected or bound table 1294 * otherwise send reset back 1295 */ 1296 sk = vsock_find_connected_socket(&src, &dst); 1297 if (!sk) { 1298 sk = vsock_find_bound_socket(&dst); 1299 if (!sk) { 1300 (void)virtio_transport_reset_no_sock(t, skb); 1301 goto free_pkt; 1302 } 1303 } 1304 1305 if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) { 1306 (void)virtio_transport_reset_no_sock(t, skb); 1307 sock_put(sk); 1308 goto free_pkt; 1309 } 1310 1311 if (!skb_set_owner_sk_safe(skb, sk)) { 1312 WARN_ONCE(1, "receiving vsock socket has sk_refcnt == 0\n"); 1313 goto free_pkt; 1314 } 1315 1316 vsk = vsock_sk(sk); 1317 1318 lock_sock(sk); 1319 1320 /* Check if sk has been closed before lock_sock */ 1321 if (sock_flag(sk, SOCK_DONE)) { 1322 (void)virtio_transport_reset_no_sock(t, skb); 1323 release_sock(sk); 1324 sock_put(sk); 1325 goto free_pkt; 1326 } 1327 1328 space_available = virtio_transport_space_update(sk, skb); 1329 1330 /* Update CID in case it has changed after a transport reset event */ 1331 if (vsk->local_addr.svm_cid != VMADDR_CID_ANY) 1332 vsk->local_addr.svm_cid = dst.svm_cid; 1333 1334 if (space_available) 1335 sk->sk_write_space(sk); 1336 1337 switch (sk->sk_state) { 1338 case TCP_LISTEN: 1339 virtio_transport_recv_listen(sk, skb, t); 1340 kfree_skb(skb); 1341 break; 1342 case TCP_SYN_SENT: 1343 virtio_transport_recv_connecting(sk, skb); 1344 kfree_skb(skb); 1345 break; 1346 case TCP_ESTABLISHED: 1347 virtio_transport_recv_connected(sk, skb); 1348 break; 1349 case TCP_CLOSING: 1350 virtio_transport_recv_disconnecting(sk, skb); 1351 kfree_skb(skb); 1352 break; 1353 default: 1354 (void)virtio_transport_reset_no_sock(t, skb); 1355 kfree_skb(skb); 1356 break; 1357 } 1358 1359 release_sock(sk); 1360 1361 /* Release refcnt obtained when we fetched this socket out of the 1362 * bound or connected list. 1363 */ 1364 sock_put(sk); 1365 return; 1366 1367 free_pkt: 1368 kfree_skb(skb); 1369 } 1370 EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt); 1371 1372 /* Remove skbs found in a queue that have a vsk that matches. 1373 * 1374 * Each skb is freed. 1375 * 1376 * Returns the count of skbs that were reply packets. 1377 */ 1378 int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue) 1379 { 1380 struct sk_buff_head freeme; 1381 struct sk_buff *skb, *tmp; 1382 int cnt = 0; 1383 1384 skb_queue_head_init(&freeme); 1385 1386 spin_lock_bh(&queue->lock); 1387 skb_queue_walk_safe(queue, skb, tmp) { 1388 if (vsock_sk(skb->sk) != vsk) 1389 continue; 1390 1391 __skb_unlink(skb, queue); 1392 __skb_queue_tail(&freeme, skb); 1393 1394 if (virtio_vsock_skb_reply(skb)) 1395 cnt++; 1396 } 1397 spin_unlock_bh(&queue->lock); 1398 1399 __skb_queue_purge(&freeme); 1400 1401 return cnt; 1402 } 1403 EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs); 1404 1405 MODULE_LICENSE("GPL v2"); 1406 MODULE_AUTHOR("Asias He"); 1407 MODULE_DESCRIPTION("common code for virtio vsock"); 1408