1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * common code for virtio vsock 4 * 5 * Copyright (C) 2013-2015 Red Hat, Inc. 6 * Author: Asias He <asias@redhat.com> 7 * Stefan Hajnoczi <stefanha@redhat.com> 8 */ 9 #include <linux/spinlock.h> 10 #include <linux/module.h> 11 #include <linux/sched/signal.h> 12 #include <linux/ctype.h> 13 #include <linux/list.h> 14 #include <linux/virtio_vsock.h> 15 #include <uapi/linux/vsockmon.h> 16 17 #include <net/sock.h> 18 #include <net/af_vsock.h> 19 20 #define CREATE_TRACE_POINTS 21 #include <trace/events/vsock_virtio_transport_common.h> 22 23 /* How long to wait for graceful shutdown of a connection */ 24 #define VSOCK_CLOSE_TIMEOUT (8 * HZ) 25 26 /* Threshold for detecting small packets to copy */ 27 #define GOOD_COPY_LEN 128 28 29 static const struct virtio_transport * 30 virtio_transport_get_ops(struct vsock_sock *vsk) 31 { 32 const struct vsock_transport *t = vsock_core_get_transport(vsk); 33 34 if (WARN_ON(!t)) 35 return NULL; 36 37 return container_of(t, struct virtio_transport, transport); 38 } 39 40 /* Returns a new packet on success, otherwise returns NULL. 41 * 42 * If NULL is returned, errp is set to a negative errno. 43 */ 44 static struct sk_buff * 45 virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info, 46 size_t len, 47 u32 src_cid, 48 u32 src_port, 49 u32 dst_cid, 50 u32 dst_port) 51 { 52 const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len; 53 struct virtio_vsock_hdr *hdr; 54 struct sk_buff *skb; 55 void *payload; 56 int err; 57 58 skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL); 59 if (!skb) 60 return NULL; 61 62 hdr = virtio_vsock_hdr(skb); 63 hdr->type = cpu_to_le16(info->type); 64 hdr->op = cpu_to_le16(info->op); 65 hdr->src_cid = cpu_to_le64(src_cid); 66 hdr->dst_cid = cpu_to_le64(dst_cid); 67 hdr->src_port = cpu_to_le32(src_port); 68 hdr->dst_port = cpu_to_le32(dst_port); 69 hdr->flags = cpu_to_le32(info->flags); 70 hdr->len = cpu_to_le32(len); 71 72 if (info->msg && len > 0) { 73 payload = skb_put(skb, len); 74 err = memcpy_from_msg(payload, info->msg, len); 75 if (err) 76 goto out; 77 78 if (msg_data_left(info->msg) == 0 && 79 info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) { 80 hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); 81 82 if (info->msg->msg_flags & MSG_EOR) 83 hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); 84 } 85 } 86 87 if (info->reply) 88 virtio_vsock_skb_set_reply(skb); 89 90 trace_virtio_transport_alloc_pkt(src_cid, src_port, 91 dst_cid, dst_port, 92 len, 93 info->type, 94 info->op, 95 info->flags); 96 97 return skb; 98 99 out: 100 kfree_skb(skb); 101 return NULL; 102 } 103 104 /* Packet capture */ 105 static struct sk_buff *virtio_transport_build_skb(void *opaque) 106 { 107 struct virtio_vsock_hdr *pkt_hdr; 108 struct sk_buff *pkt = opaque; 109 struct af_vsockmon_hdr *hdr; 110 struct sk_buff *skb; 111 size_t payload_len; 112 void *payload_buf; 113 114 /* A packet could be split to fit the RX buffer, so we can retrieve 115 * the payload length from the header and the buffer pointer taking 116 * care of the offset in the original packet. 117 */ 118 pkt_hdr = virtio_vsock_hdr(pkt); 119 payload_len = pkt->len; 120 payload_buf = pkt->data; 121 122 skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len, 123 GFP_ATOMIC); 124 if (!skb) 125 return NULL; 126 127 hdr = skb_put(skb, sizeof(*hdr)); 128 129 /* pkt->hdr is little-endian so no need to byteswap here */ 130 hdr->src_cid = pkt_hdr->src_cid; 131 hdr->src_port = pkt_hdr->src_port; 132 hdr->dst_cid = pkt_hdr->dst_cid; 133 hdr->dst_port = pkt_hdr->dst_port; 134 135 hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO); 136 hdr->len = cpu_to_le16(sizeof(*pkt_hdr)); 137 memset(hdr->reserved, 0, sizeof(hdr->reserved)); 138 139 switch (le16_to_cpu(pkt_hdr->op)) { 140 case VIRTIO_VSOCK_OP_REQUEST: 141 case VIRTIO_VSOCK_OP_RESPONSE: 142 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT); 143 break; 144 case VIRTIO_VSOCK_OP_RST: 145 case VIRTIO_VSOCK_OP_SHUTDOWN: 146 hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT); 147 break; 148 case VIRTIO_VSOCK_OP_RW: 149 hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD); 150 break; 151 case VIRTIO_VSOCK_OP_CREDIT_UPDATE: 152 case VIRTIO_VSOCK_OP_CREDIT_REQUEST: 153 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL); 154 break; 155 default: 156 hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN); 157 break; 158 } 159 160 skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr)); 161 162 if (payload_len) { 163 skb_put_data(skb, payload_buf, payload_len); 164 } 165 166 return skb; 167 } 168 169 void virtio_transport_deliver_tap_pkt(struct sk_buff *skb) 170 { 171 if (virtio_vsock_skb_tap_delivered(skb)) 172 return; 173 174 vsock_deliver_tap(virtio_transport_build_skb, skb); 175 virtio_vsock_skb_set_tap_delivered(skb); 176 } 177 EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt); 178 179 static u16 virtio_transport_get_type(struct sock *sk) 180 { 181 if (sk->sk_type == SOCK_STREAM) 182 return VIRTIO_VSOCK_TYPE_STREAM; 183 else 184 return VIRTIO_VSOCK_TYPE_SEQPACKET; 185 } 186 187 /* This function can only be used on connecting/connected sockets, 188 * since a socket assigned to a transport is required. 189 * 190 * Do not use on listener sockets! 191 */ 192 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, 193 struct virtio_vsock_pkt_info *info) 194 { 195 u32 src_cid, src_port, dst_cid, dst_port; 196 const struct virtio_transport *t_ops; 197 struct virtio_vsock_sock *vvs; 198 u32 pkt_len = info->pkt_len; 199 struct sk_buff *skb; 200 201 info->type = virtio_transport_get_type(sk_vsock(vsk)); 202 203 t_ops = virtio_transport_get_ops(vsk); 204 if (unlikely(!t_ops)) 205 return -EFAULT; 206 207 src_cid = t_ops->transport.get_local_cid(); 208 src_port = vsk->local_addr.svm_port; 209 if (!info->remote_cid) { 210 dst_cid = vsk->remote_addr.svm_cid; 211 dst_port = vsk->remote_addr.svm_port; 212 } else { 213 dst_cid = info->remote_cid; 214 dst_port = info->remote_port; 215 } 216 217 vvs = vsk->trans; 218 219 /* we can send less than pkt_len bytes */ 220 if (pkt_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) 221 pkt_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; 222 223 /* virtio_transport_get_credit might return less than pkt_len credit */ 224 pkt_len = virtio_transport_get_credit(vvs, pkt_len); 225 226 /* Do not send zero length OP_RW pkt */ 227 if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW) 228 return pkt_len; 229 230 skb = virtio_transport_alloc_skb(info, pkt_len, 231 src_cid, src_port, 232 dst_cid, dst_port); 233 if (!skb) { 234 virtio_transport_put_credit(vvs, pkt_len); 235 return -ENOMEM; 236 } 237 238 virtio_transport_inc_tx_pkt(vvs, skb); 239 240 return t_ops->send_pkt(skb); 241 } 242 243 static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs, 244 u32 len) 245 { 246 if (vvs->rx_bytes + len > vvs->buf_alloc) 247 return false; 248 249 vvs->rx_bytes += len; 250 return true; 251 } 252 253 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, 254 u32 len) 255 { 256 vvs->rx_bytes -= len; 257 vvs->fwd_cnt += len; 258 } 259 260 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb) 261 { 262 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 263 264 spin_lock_bh(&vvs->rx_lock); 265 vvs->last_fwd_cnt = vvs->fwd_cnt; 266 hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt); 267 hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc); 268 spin_unlock_bh(&vvs->rx_lock); 269 } 270 EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt); 271 272 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit) 273 { 274 u32 ret; 275 276 spin_lock_bh(&vvs->tx_lock); 277 ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); 278 if (ret > credit) 279 ret = credit; 280 vvs->tx_cnt += ret; 281 spin_unlock_bh(&vvs->tx_lock); 282 283 return ret; 284 } 285 EXPORT_SYMBOL_GPL(virtio_transport_get_credit); 286 287 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit) 288 { 289 spin_lock_bh(&vvs->tx_lock); 290 vvs->tx_cnt -= credit; 291 spin_unlock_bh(&vvs->tx_lock); 292 } 293 EXPORT_SYMBOL_GPL(virtio_transport_put_credit); 294 295 static int virtio_transport_send_credit_update(struct vsock_sock *vsk) 296 { 297 struct virtio_vsock_pkt_info info = { 298 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, 299 .vsk = vsk, 300 }; 301 302 return virtio_transport_send_pkt_info(vsk, &info); 303 } 304 305 static ssize_t 306 virtio_transport_stream_do_peek(struct vsock_sock *vsk, 307 struct msghdr *msg, 308 size_t len) 309 { 310 struct virtio_vsock_sock *vvs = vsk->trans; 311 size_t bytes, total = 0, off; 312 struct sk_buff *skb, *tmp; 313 int err = -EFAULT; 314 315 spin_lock_bh(&vvs->rx_lock); 316 317 skb_queue_walk_safe(&vvs->rx_queue, skb, tmp) { 318 off = 0; 319 320 if (total == len) 321 break; 322 323 while (total < len && off < skb->len) { 324 bytes = len - total; 325 if (bytes > skb->len - off) 326 bytes = skb->len - off; 327 328 /* sk_lock is held by caller so no one else can dequeue. 329 * Unlock rx_lock since memcpy_to_msg() may sleep. 330 */ 331 spin_unlock_bh(&vvs->rx_lock); 332 333 err = memcpy_to_msg(msg, skb->data + off, bytes); 334 if (err) 335 goto out; 336 337 spin_lock_bh(&vvs->rx_lock); 338 339 total += bytes; 340 off += bytes; 341 } 342 } 343 344 spin_unlock_bh(&vvs->rx_lock); 345 346 return total; 347 348 out: 349 if (total) 350 err = total; 351 return err; 352 } 353 354 static ssize_t 355 virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, 356 struct msghdr *msg, 357 size_t len) 358 { 359 struct virtio_vsock_sock *vvs = vsk->trans; 360 size_t bytes, total = 0; 361 struct sk_buff *skb; 362 int err = -EFAULT; 363 u32 free_space; 364 365 spin_lock_bh(&vvs->rx_lock); 366 while (total < len && !skb_queue_empty(&vvs->rx_queue)) { 367 skb = skb_peek(&vvs->rx_queue); 368 369 bytes = len - total; 370 if (bytes > skb->len) 371 bytes = skb->len; 372 373 /* sk_lock is held by caller so no one else can dequeue. 374 * Unlock rx_lock since memcpy_to_msg() may sleep. 375 */ 376 spin_unlock_bh(&vvs->rx_lock); 377 378 err = memcpy_to_msg(msg, skb->data, bytes); 379 if (err) 380 goto out; 381 382 spin_lock_bh(&vvs->rx_lock); 383 384 total += bytes; 385 skb_pull(skb, bytes); 386 387 if (skb->len == 0) { 388 u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len); 389 390 virtio_transport_dec_rx_pkt(vvs, pkt_len); 391 __skb_unlink(skb, &vvs->rx_queue); 392 consume_skb(skb); 393 } 394 } 395 396 free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt); 397 398 spin_unlock_bh(&vvs->rx_lock); 399 400 /* To reduce the number of credit update messages, 401 * don't update credits as long as lots of space is available. 402 * Note: the limit chosen here is arbitrary. Setting the limit 403 * too high causes extra messages. Too low causes transmitter 404 * stalls. As stalls are in theory more expensive than extra 405 * messages, we set the limit to a high value. TODO: experiment 406 * with different values. 407 */ 408 if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) 409 virtio_transport_send_credit_update(vsk); 410 411 return total; 412 413 out: 414 if (total) 415 err = total; 416 return err; 417 } 418 419 static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, 420 struct msghdr *msg, 421 int flags) 422 { 423 struct virtio_vsock_sock *vvs = vsk->trans; 424 int dequeued_len = 0; 425 size_t user_buf_len = msg_data_left(msg); 426 bool msg_ready = false; 427 struct sk_buff *skb; 428 429 spin_lock_bh(&vvs->rx_lock); 430 431 if (vvs->msg_count == 0) { 432 spin_unlock_bh(&vvs->rx_lock); 433 return 0; 434 } 435 436 while (!msg_ready) { 437 struct virtio_vsock_hdr *hdr; 438 size_t pkt_len; 439 440 skb = __skb_dequeue(&vvs->rx_queue); 441 if (!skb) 442 break; 443 hdr = virtio_vsock_hdr(skb); 444 pkt_len = (size_t)le32_to_cpu(hdr->len); 445 446 if (dequeued_len >= 0) { 447 size_t bytes_to_copy; 448 449 bytes_to_copy = min(user_buf_len, pkt_len); 450 451 if (bytes_to_copy) { 452 int err; 453 454 /* sk_lock is held by caller so no one else can dequeue. 455 * Unlock rx_lock since memcpy_to_msg() may sleep. 456 */ 457 spin_unlock_bh(&vvs->rx_lock); 458 459 err = memcpy_to_msg(msg, skb->data, bytes_to_copy); 460 if (err) { 461 /* Copy of message failed. Rest of 462 * fragments will be freed without copy. 463 */ 464 dequeued_len = err; 465 } else { 466 user_buf_len -= bytes_to_copy; 467 } 468 469 spin_lock_bh(&vvs->rx_lock); 470 } 471 472 if (dequeued_len >= 0) 473 dequeued_len += pkt_len; 474 } 475 476 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) { 477 msg_ready = true; 478 vvs->msg_count--; 479 480 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) 481 msg->msg_flags |= MSG_EOR; 482 } 483 484 virtio_transport_dec_rx_pkt(vvs, pkt_len); 485 kfree_skb(skb); 486 } 487 488 spin_unlock_bh(&vvs->rx_lock); 489 490 virtio_transport_send_credit_update(vsk); 491 492 return dequeued_len; 493 } 494 495 ssize_t 496 virtio_transport_stream_dequeue(struct vsock_sock *vsk, 497 struct msghdr *msg, 498 size_t len, int flags) 499 { 500 if (flags & MSG_PEEK) 501 return virtio_transport_stream_do_peek(vsk, msg, len); 502 else 503 return virtio_transport_stream_do_dequeue(vsk, msg, len); 504 } 505 EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue); 506 507 ssize_t 508 virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, 509 struct msghdr *msg, 510 int flags) 511 { 512 if (flags & MSG_PEEK) 513 return -EOPNOTSUPP; 514 515 return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags); 516 } 517 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue); 518 519 int 520 virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, 521 struct msghdr *msg, 522 size_t len) 523 { 524 struct virtio_vsock_sock *vvs = vsk->trans; 525 526 spin_lock_bh(&vvs->tx_lock); 527 528 if (len > vvs->peer_buf_alloc) { 529 spin_unlock_bh(&vvs->tx_lock); 530 return -EMSGSIZE; 531 } 532 533 spin_unlock_bh(&vvs->tx_lock); 534 535 return virtio_transport_stream_enqueue(vsk, msg, len); 536 } 537 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue); 538 539 int 540 virtio_transport_dgram_dequeue(struct vsock_sock *vsk, 541 struct msghdr *msg, 542 size_t len, int flags) 543 { 544 return -EOPNOTSUPP; 545 } 546 EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue); 547 548 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk) 549 { 550 struct virtio_vsock_sock *vvs = vsk->trans; 551 s64 bytes; 552 553 spin_lock_bh(&vvs->rx_lock); 554 bytes = vvs->rx_bytes; 555 spin_unlock_bh(&vvs->rx_lock); 556 557 return bytes; 558 } 559 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data); 560 561 u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk) 562 { 563 struct virtio_vsock_sock *vvs = vsk->trans; 564 u32 msg_count; 565 566 spin_lock_bh(&vvs->rx_lock); 567 msg_count = vvs->msg_count; 568 spin_unlock_bh(&vvs->rx_lock); 569 570 return msg_count; 571 } 572 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data); 573 574 static s64 virtio_transport_has_space(struct vsock_sock *vsk) 575 { 576 struct virtio_vsock_sock *vvs = vsk->trans; 577 s64 bytes; 578 579 bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); 580 if (bytes < 0) 581 bytes = 0; 582 583 return bytes; 584 } 585 586 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk) 587 { 588 struct virtio_vsock_sock *vvs = vsk->trans; 589 s64 bytes; 590 591 spin_lock_bh(&vvs->tx_lock); 592 bytes = virtio_transport_has_space(vsk); 593 spin_unlock_bh(&vvs->tx_lock); 594 595 return bytes; 596 } 597 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space); 598 599 int virtio_transport_do_socket_init(struct vsock_sock *vsk, 600 struct vsock_sock *psk) 601 { 602 struct virtio_vsock_sock *vvs; 603 604 vvs = kzalloc(sizeof(*vvs), GFP_KERNEL); 605 if (!vvs) 606 return -ENOMEM; 607 608 vsk->trans = vvs; 609 vvs->vsk = vsk; 610 if (psk && psk->trans) { 611 struct virtio_vsock_sock *ptrans = psk->trans; 612 613 vvs->peer_buf_alloc = ptrans->peer_buf_alloc; 614 } 615 616 if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE) 617 vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE; 618 619 vvs->buf_alloc = vsk->buffer_size; 620 621 spin_lock_init(&vvs->rx_lock); 622 spin_lock_init(&vvs->tx_lock); 623 skb_queue_head_init(&vvs->rx_queue); 624 625 return 0; 626 } 627 EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init); 628 629 /* sk_lock held by the caller */ 630 void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val) 631 { 632 struct virtio_vsock_sock *vvs = vsk->trans; 633 634 if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE) 635 *val = VIRTIO_VSOCK_MAX_BUF_SIZE; 636 637 vvs->buf_alloc = *val; 638 639 virtio_transport_send_credit_update(vsk); 640 } 641 EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size); 642 643 int 644 virtio_transport_notify_poll_in(struct vsock_sock *vsk, 645 size_t target, 646 bool *data_ready_now) 647 { 648 *data_ready_now = vsock_stream_has_data(vsk) >= target; 649 650 return 0; 651 } 652 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in); 653 654 int 655 virtio_transport_notify_poll_out(struct vsock_sock *vsk, 656 size_t target, 657 bool *space_avail_now) 658 { 659 s64 free_space; 660 661 free_space = vsock_stream_has_space(vsk); 662 if (free_space > 0) 663 *space_avail_now = true; 664 else if (free_space == 0) 665 *space_avail_now = false; 666 667 return 0; 668 } 669 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out); 670 671 int virtio_transport_notify_recv_init(struct vsock_sock *vsk, 672 size_t target, struct vsock_transport_recv_notify_data *data) 673 { 674 return 0; 675 } 676 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init); 677 678 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk, 679 size_t target, struct vsock_transport_recv_notify_data *data) 680 { 681 return 0; 682 } 683 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block); 684 685 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk, 686 size_t target, struct vsock_transport_recv_notify_data *data) 687 { 688 return 0; 689 } 690 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue); 691 692 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk, 693 size_t target, ssize_t copied, bool data_read, 694 struct vsock_transport_recv_notify_data *data) 695 { 696 return 0; 697 } 698 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue); 699 700 int virtio_transport_notify_send_init(struct vsock_sock *vsk, 701 struct vsock_transport_send_notify_data *data) 702 { 703 return 0; 704 } 705 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init); 706 707 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk, 708 struct vsock_transport_send_notify_data *data) 709 { 710 return 0; 711 } 712 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block); 713 714 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk, 715 struct vsock_transport_send_notify_data *data) 716 { 717 return 0; 718 } 719 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue); 720 721 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk, 722 ssize_t written, struct vsock_transport_send_notify_data *data) 723 { 724 return 0; 725 } 726 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue); 727 728 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk) 729 { 730 return vsk->buffer_size; 731 } 732 EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat); 733 734 bool virtio_transport_stream_is_active(struct vsock_sock *vsk) 735 { 736 return true; 737 } 738 EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active); 739 740 bool virtio_transport_stream_allow(u32 cid, u32 port) 741 { 742 return true; 743 } 744 EXPORT_SYMBOL_GPL(virtio_transport_stream_allow); 745 746 int virtio_transport_dgram_bind(struct vsock_sock *vsk, 747 struct sockaddr_vm *addr) 748 { 749 return -EOPNOTSUPP; 750 } 751 EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind); 752 753 bool virtio_transport_dgram_allow(u32 cid, u32 port) 754 { 755 return false; 756 } 757 EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow); 758 759 int virtio_transport_connect(struct vsock_sock *vsk) 760 { 761 struct virtio_vsock_pkt_info info = { 762 .op = VIRTIO_VSOCK_OP_REQUEST, 763 .vsk = vsk, 764 }; 765 766 return virtio_transport_send_pkt_info(vsk, &info); 767 } 768 EXPORT_SYMBOL_GPL(virtio_transport_connect); 769 770 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode) 771 { 772 struct virtio_vsock_pkt_info info = { 773 .op = VIRTIO_VSOCK_OP_SHUTDOWN, 774 .flags = (mode & RCV_SHUTDOWN ? 775 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | 776 (mode & SEND_SHUTDOWN ? 777 VIRTIO_VSOCK_SHUTDOWN_SEND : 0), 778 .vsk = vsk, 779 }; 780 781 return virtio_transport_send_pkt_info(vsk, &info); 782 } 783 EXPORT_SYMBOL_GPL(virtio_transport_shutdown); 784 785 int 786 virtio_transport_dgram_enqueue(struct vsock_sock *vsk, 787 struct sockaddr_vm *remote_addr, 788 struct msghdr *msg, 789 size_t dgram_len) 790 { 791 return -EOPNOTSUPP; 792 } 793 EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue); 794 795 ssize_t 796 virtio_transport_stream_enqueue(struct vsock_sock *vsk, 797 struct msghdr *msg, 798 size_t len) 799 { 800 struct virtio_vsock_pkt_info info = { 801 .op = VIRTIO_VSOCK_OP_RW, 802 .msg = msg, 803 .pkt_len = len, 804 .vsk = vsk, 805 }; 806 807 return virtio_transport_send_pkt_info(vsk, &info); 808 } 809 EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue); 810 811 void virtio_transport_destruct(struct vsock_sock *vsk) 812 { 813 struct virtio_vsock_sock *vvs = vsk->trans; 814 815 kfree(vvs); 816 } 817 EXPORT_SYMBOL_GPL(virtio_transport_destruct); 818 819 static int virtio_transport_reset(struct vsock_sock *vsk, 820 struct sk_buff *skb) 821 { 822 struct virtio_vsock_pkt_info info = { 823 .op = VIRTIO_VSOCK_OP_RST, 824 .reply = !!skb, 825 .vsk = vsk, 826 }; 827 828 /* Send RST only if the original pkt is not a RST pkt */ 829 if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST) 830 return 0; 831 832 return virtio_transport_send_pkt_info(vsk, &info); 833 } 834 835 /* Normally packets are associated with a socket. There may be no socket if an 836 * attempt was made to connect to a socket that does not exist. 837 */ 838 static int virtio_transport_reset_no_sock(const struct virtio_transport *t, 839 struct sk_buff *skb) 840 { 841 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 842 struct virtio_vsock_pkt_info info = { 843 .op = VIRTIO_VSOCK_OP_RST, 844 .type = le16_to_cpu(hdr->type), 845 .reply = true, 846 }; 847 struct sk_buff *reply; 848 849 /* Send RST only if the original pkt is not a RST pkt */ 850 if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST) 851 return 0; 852 853 reply = virtio_transport_alloc_skb(&info, 0, 854 le64_to_cpu(hdr->dst_cid), 855 le32_to_cpu(hdr->dst_port), 856 le64_to_cpu(hdr->src_cid), 857 le32_to_cpu(hdr->src_port)); 858 if (!reply) 859 return -ENOMEM; 860 861 if (!t) { 862 kfree_skb(reply); 863 return -ENOTCONN; 864 } 865 866 return t->send_pkt(reply); 867 } 868 869 /* This function should be called with sk_lock held and SOCK_DONE set */ 870 static void virtio_transport_remove_sock(struct vsock_sock *vsk) 871 { 872 struct virtio_vsock_sock *vvs = vsk->trans; 873 874 /* We don't need to take rx_lock, as the socket is closing and we are 875 * removing it. 876 */ 877 __skb_queue_purge(&vvs->rx_queue); 878 vsock_remove_sock(vsk); 879 } 880 881 static void virtio_transport_wait_close(struct sock *sk, long timeout) 882 { 883 if (timeout) { 884 DEFINE_WAIT_FUNC(wait, woken_wake_function); 885 886 add_wait_queue(sk_sleep(sk), &wait); 887 888 do { 889 if (sk_wait_event(sk, &timeout, 890 sock_flag(sk, SOCK_DONE), &wait)) 891 break; 892 } while (!signal_pending(current) && timeout); 893 894 remove_wait_queue(sk_sleep(sk), &wait); 895 } 896 } 897 898 static void virtio_transport_do_close(struct vsock_sock *vsk, 899 bool cancel_timeout) 900 { 901 struct sock *sk = sk_vsock(vsk); 902 903 sock_set_flag(sk, SOCK_DONE); 904 vsk->peer_shutdown = SHUTDOWN_MASK; 905 if (vsock_stream_has_data(vsk) <= 0) 906 sk->sk_state = TCP_CLOSING; 907 sk->sk_state_change(sk); 908 909 if (vsk->close_work_scheduled && 910 (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) { 911 vsk->close_work_scheduled = false; 912 913 virtio_transport_remove_sock(vsk); 914 915 /* Release refcnt obtained when we scheduled the timeout */ 916 sock_put(sk); 917 } 918 } 919 920 static void virtio_transport_close_timeout(struct work_struct *work) 921 { 922 struct vsock_sock *vsk = 923 container_of(work, struct vsock_sock, close_work.work); 924 struct sock *sk = sk_vsock(vsk); 925 926 sock_hold(sk); 927 lock_sock(sk); 928 929 if (!sock_flag(sk, SOCK_DONE)) { 930 (void)virtio_transport_reset(vsk, NULL); 931 932 virtio_transport_do_close(vsk, false); 933 } 934 935 vsk->close_work_scheduled = false; 936 937 release_sock(sk); 938 sock_put(sk); 939 } 940 941 /* User context, vsk->sk is locked */ 942 static bool virtio_transport_close(struct vsock_sock *vsk) 943 { 944 struct sock *sk = &vsk->sk; 945 946 if (!(sk->sk_state == TCP_ESTABLISHED || 947 sk->sk_state == TCP_CLOSING)) 948 return true; 949 950 /* Already received SHUTDOWN from peer, reply with RST */ 951 if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) { 952 (void)virtio_transport_reset(vsk, NULL); 953 return true; 954 } 955 956 if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK) 957 (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK); 958 959 if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING)) 960 virtio_transport_wait_close(sk, sk->sk_lingertime); 961 962 if (sock_flag(sk, SOCK_DONE)) { 963 return true; 964 } 965 966 sock_hold(sk); 967 INIT_DELAYED_WORK(&vsk->close_work, 968 virtio_transport_close_timeout); 969 vsk->close_work_scheduled = true; 970 schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT); 971 return false; 972 } 973 974 void virtio_transport_release(struct vsock_sock *vsk) 975 { 976 struct sock *sk = &vsk->sk; 977 bool remove_sock = true; 978 979 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) 980 remove_sock = virtio_transport_close(vsk); 981 982 if (remove_sock) { 983 sock_set_flag(sk, SOCK_DONE); 984 virtio_transport_remove_sock(vsk); 985 } 986 } 987 EXPORT_SYMBOL_GPL(virtio_transport_release); 988 989 static int 990 virtio_transport_recv_connecting(struct sock *sk, 991 struct sk_buff *skb) 992 { 993 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 994 struct vsock_sock *vsk = vsock_sk(sk); 995 int skerr; 996 int err; 997 998 switch (le16_to_cpu(hdr->op)) { 999 case VIRTIO_VSOCK_OP_RESPONSE: 1000 sk->sk_state = TCP_ESTABLISHED; 1001 sk->sk_socket->state = SS_CONNECTED; 1002 vsock_insert_connected(vsk); 1003 sk->sk_state_change(sk); 1004 break; 1005 case VIRTIO_VSOCK_OP_INVALID: 1006 break; 1007 case VIRTIO_VSOCK_OP_RST: 1008 skerr = ECONNRESET; 1009 err = 0; 1010 goto destroy; 1011 default: 1012 skerr = EPROTO; 1013 err = -EINVAL; 1014 goto destroy; 1015 } 1016 return 0; 1017 1018 destroy: 1019 virtio_transport_reset(vsk, skb); 1020 sk->sk_state = TCP_CLOSE; 1021 sk->sk_err = skerr; 1022 sk_error_report(sk); 1023 return err; 1024 } 1025 1026 static void 1027 virtio_transport_recv_enqueue(struct vsock_sock *vsk, 1028 struct sk_buff *skb) 1029 { 1030 struct virtio_vsock_sock *vvs = vsk->trans; 1031 bool can_enqueue, free_pkt = false; 1032 struct virtio_vsock_hdr *hdr; 1033 u32 len; 1034 1035 hdr = virtio_vsock_hdr(skb); 1036 len = le32_to_cpu(hdr->len); 1037 1038 spin_lock_bh(&vvs->rx_lock); 1039 1040 can_enqueue = virtio_transport_inc_rx_pkt(vvs, len); 1041 if (!can_enqueue) { 1042 free_pkt = true; 1043 goto out; 1044 } 1045 1046 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) 1047 vvs->msg_count++; 1048 1049 /* Try to copy small packets into the buffer of last packet queued, 1050 * to avoid wasting memory queueing the entire buffer with a small 1051 * payload. 1052 */ 1053 if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) { 1054 struct virtio_vsock_hdr *last_hdr; 1055 struct sk_buff *last_skb; 1056 1057 last_skb = skb_peek_tail(&vvs->rx_queue); 1058 last_hdr = virtio_vsock_hdr(last_skb); 1059 1060 /* If there is space in the last packet queued, we copy the 1061 * new packet in its buffer. We avoid this if the last packet 1062 * queued has VIRTIO_VSOCK_SEQ_EOM set, because this is 1063 * delimiter of SEQPACKET message, so 'pkt' is the first packet 1064 * of a new message. 1065 */ 1066 if (skb->len < skb_tailroom(last_skb) && 1067 !(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) { 1068 memcpy(skb_put(last_skb, skb->len), skb->data, skb->len); 1069 free_pkt = true; 1070 last_hdr->flags |= hdr->flags; 1071 last_hdr->len = cpu_to_le32(last_skb->len); 1072 goto out; 1073 } 1074 } 1075 1076 __skb_queue_tail(&vvs->rx_queue, skb); 1077 1078 out: 1079 spin_unlock_bh(&vvs->rx_lock); 1080 if (free_pkt) 1081 kfree_skb(skb); 1082 } 1083 1084 static int 1085 virtio_transport_recv_connected(struct sock *sk, 1086 struct sk_buff *skb) 1087 { 1088 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1089 struct vsock_sock *vsk = vsock_sk(sk); 1090 int err = 0; 1091 1092 switch (le16_to_cpu(hdr->op)) { 1093 case VIRTIO_VSOCK_OP_RW: 1094 virtio_transport_recv_enqueue(vsk, skb); 1095 vsock_data_ready(sk); 1096 return err; 1097 case VIRTIO_VSOCK_OP_CREDIT_REQUEST: 1098 virtio_transport_send_credit_update(vsk); 1099 break; 1100 case VIRTIO_VSOCK_OP_CREDIT_UPDATE: 1101 sk->sk_write_space(sk); 1102 break; 1103 case VIRTIO_VSOCK_OP_SHUTDOWN: 1104 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV) 1105 vsk->peer_shutdown |= RCV_SHUTDOWN; 1106 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) 1107 vsk->peer_shutdown |= SEND_SHUTDOWN; 1108 if (vsk->peer_shutdown == SHUTDOWN_MASK && 1109 vsock_stream_has_data(vsk) <= 0 && 1110 !sock_flag(sk, SOCK_DONE)) { 1111 (void)virtio_transport_reset(vsk, NULL); 1112 virtio_transport_do_close(vsk, true); 1113 } 1114 if (le32_to_cpu(virtio_vsock_hdr(skb)->flags)) 1115 sk->sk_state_change(sk); 1116 break; 1117 case VIRTIO_VSOCK_OP_RST: 1118 virtio_transport_do_close(vsk, true); 1119 break; 1120 default: 1121 err = -EINVAL; 1122 break; 1123 } 1124 1125 kfree_skb(skb); 1126 return err; 1127 } 1128 1129 static void 1130 virtio_transport_recv_disconnecting(struct sock *sk, 1131 struct sk_buff *skb) 1132 { 1133 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1134 struct vsock_sock *vsk = vsock_sk(sk); 1135 1136 if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST) 1137 virtio_transport_do_close(vsk, true); 1138 } 1139 1140 static int 1141 virtio_transport_send_response(struct vsock_sock *vsk, 1142 struct sk_buff *skb) 1143 { 1144 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1145 struct virtio_vsock_pkt_info info = { 1146 .op = VIRTIO_VSOCK_OP_RESPONSE, 1147 .remote_cid = le64_to_cpu(hdr->src_cid), 1148 .remote_port = le32_to_cpu(hdr->src_port), 1149 .reply = true, 1150 .vsk = vsk, 1151 }; 1152 1153 return virtio_transport_send_pkt_info(vsk, &info); 1154 } 1155 1156 static bool virtio_transport_space_update(struct sock *sk, 1157 struct sk_buff *skb) 1158 { 1159 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1160 struct vsock_sock *vsk = vsock_sk(sk); 1161 struct virtio_vsock_sock *vvs = vsk->trans; 1162 bool space_available; 1163 1164 /* Listener sockets are not associated with any transport, so we are 1165 * not able to take the state to see if there is space available in the 1166 * remote peer, but since they are only used to receive requests, we 1167 * can assume that there is always space available in the other peer. 1168 */ 1169 if (!vvs) 1170 return true; 1171 1172 /* buf_alloc and fwd_cnt is always included in the hdr */ 1173 spin_lock_bh(&vvs->tx_lock); 1174 vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc); 1175 vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt); 1176 space_available = virtio_transport_has_space(vsk); 1177 spin_unlock_bh(&vvs->tx_lock); 1178 return space_available; 1179 } 1180 1181 /* Handle server socket */ 1182 static int 1183 virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb, 1184 struct virtio_transport *t) 1185 { 1186 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1187 struct vsock_sock *vsk = vsock_sk(sk); 1188 struct vsock_sock *vchild; 1189 struct sock *child; 1190 int ret; 1191 1192 if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) { 1193 virtio_transport_reset_no_sock(t, skb); 1194 return -EINVAL; 1195 } 1196 1197 if (sk_acceptq_is_full(sk)) { 1198 virtio_transport_reset_no_sock(t, skb); 1199 return -ENOMEM; 1200 } 1201 1202 child = vsock_create_connected(sk); 1203 if (!child) { 1204 virtio_transport_reset_no_sock(t, skb); 1205 return -ENOMEM; 1206 } 1207 1208 sk_acceptq_added(sk); 1209 1210 lock_sock_nested(child, SINGLE_DEPTH_NESTING); 1211 1212 child->sk_state = TCP_ESTABLISHED; 1213 1214 vchild = vsock_sk(child); 1215 vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid), 1216 le32_to_cpu(hdr->dst_port)); 1217 vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid), 1218 le32_to_cpu(hdr->src_port)); 1219 1220 ret = vsock_assign_transport(vchild, vsk); 1221 /* Transport assigned (looking at remote_addr) must be the same 1222 * where we received the request. 1223 */ 1224 if (ret || vchild->transport != &t->transport) { 1225 release_sock(child); 1226 virtio_transport_reset_no_sock(t, skb); 1227 sock_put(child); 1228 return ret; 1229 } 1230 1231 if (virtio_transport_space_update(child, skb)) 1232 child->sk_write_space(child); 1233 1234 vsock_insert_connected(vchild); 1235 vsock_enqueue_accept(sk, child); 1236 virtio_transport_send_response(vchild, skb); 1237 1238 release_sock(child); 1239 1240 sk->sk_data_ready(sk); 1241 return 0; 1242 } 1243 1244 static bool virtio_transport_valid_type(u16 type) 1245 { 1246 return (type == VIRTIO_VSOCK_TYPE_STREAM) || 1247 (type == VIRTIO_VSOCK_TYPE_SEQPACKET); 1248 } 1249 1250 /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex 1251 * lock. 1252 */ 1253 void virtio_transport_recv_pkt(struct virtio_transport *t, 1254 struct sk_buff *skb) 1255 { 1256 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1257 struct sockaddr_vm src, dst; 1258 struct vsock_sock *vsk; 1259 struct sock *sk; 1260 bool space_available; 1261 1262 vsock_addr_init(&src, le64_to_cpu(hdr->src_cid), 1263 le32_to_cpu(hdr->src_port)); 1264 vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid), 1265 le32_to_cpu(hdr->dst_port)); 1266 1267 trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port, 1268 dst.svm_cid, dst.svm_port, 1269 le32_to_cpu(hdr->len), 1270 le16_to_cpu(hdr->type), 1271 le16_to_cpu(hdr->op), 1272 le32_to_cpu(hdr->flags), 1273 le32_to_cpu(hdr->buf_alloc), 1274 le32_to_cpu(hdr->fwd_cnt)); 1275 1276 if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) { 1277 (void)virtio_transport_reset_no_sock(t, skb); 1278 goto free_pkt; 1279 } 1280 1281 /* The socket must be in connected or bound table 1282 * otherwise send reset back 1283 */ 1284 sk = vsock_find_connected_socket(&src, &dst); 1285 if (!sk) { 1286 sk = vsock_find_bound_socket(&dst); 1287 if (!sk) { 1288 (void)virtio_transport_reset_no_sock(t, skb); 1289 goto free_pkt; 1290 } 1291 } 1292 1293 if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) { 1294 (void)virtio_transport_reset_no_sock(t, skb); 1295 sock_put(sk); 1296 goto free_pkt; 1297 } 1298 1299 vsk = vsock_sk(sk); 1300 1301 lock_sock(sk); 1302 1303 /* Check if sk has been closed before lock_sock */ 1304 if (sock_flag(sk, SOCK_DONE)) { 1305 (void)virtio_transport_reset_no_sock(t, skb); 1306 release_sock(sk); 1307 sock_put(sk); 1308 goto free_pkt; 1309 } 1310 1311 space_available = virtio_transport_space_update(sk, skb); 1312 1313 /* Update CID in case it has changed after a transport reset event */ 1314 if (vsk->local_addr.svm_cid != VMADDR_CID_ANY) 1315 vsk->local_addr.svm_cid = dst.svm_cid; 1316 1317 if (space_available) 1318 sk->sk_write_space(sk); 1319 1320 switch (sk->sk_state) { 1321 case TCP_LISTEN: 1322 virtio_transport_recv_listen(sk, skb, t); 1323 kfree_skb(skb); 1324 break; 1325 case TCP_SYN_SENT: 1326 virtio_transport_recv_connecting(sk, skb); 1327 kfree_skb(skb); 1328 break; 1329 case TCP_ESTABLISHED: 1330 virtio_transport_recv_connected(sk, skb); 1331 break; 1332 case TCP_CLOSING: 1333 virtio_transport_recv_disconnecting(sk, skb); 1334 kfree_skb(skb); 1335 break; 1336 default: 1337 (void)virtio_transport_reset_no_sock(t, skb); 1338 kfree_skb(skb); 1339 break; 1340 } 1341 1342 release_sock(sk); 1343 1344 /* Release refcnt obtained when we fetched this socket out of the 1345 * bound or connected list. 1346 */ 1347 sock_put(sk); 1348 return; 1349 1350 free_pkt: 1351 kfree_skb(skb); 1352 } 1353 EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt); 1354 1355 /* Remove skbs found in a queue that have a vsk that matches. 1356 * 1357 * Each skb is freed. 1358 * 1359 * Returns the count of skbs that were reply packets. 1360 */ 1361 int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue) 1362 { 1363 struct sk_buff_head freeme; 1364 struct sk_buff *skb, *tmp; 1365 int cnt = 0; 1366 1367 skb_queue_head_init(&freeme); 1368 1369 spin_lock_bh(&queue->lock); 1370 skb_queue_walk_safe(queue, skb, tmp) { 1371 if (vsock_sk(skb->sk) != vsk) 1372 continue; 1373 1374 __skb_unlink(skb, queue); 1375 __skb_queue_tail(&freeme, skb); 1376 1377 if (virtio_vsock_skb_reply(skb)) 1378 cnt++; 1379 } 1380 spin_unlock_bh(&queue->lock); 1381 1382 __skb_queue_purge(&freeme); 1383 1384 return cnt; 1385 } 1386 EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs); 1387 1388 MODULE_LICENSE("GPL v2"); 1389 MODULE_AUTHOR("Asias He"); 1390 MODULE_DESCRIPTION("common code for virtio vsock"); 1391