1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * common code for virtio vsock 4 * 5 * Copyright (C) 2013-2015 Red Hat, Inc. 6 * Author: Asias He <asias@redhat.com> 7 * Stefan Hajnoczi <stefanha@redhat.com> 8 */ 9 #include <linux/spinlock.h> 10 #include <linux/module.h> 11 #include <linux/sched/signal.h> 12 #include <linux/ctype.h> 13 #include <linux/list.h> 14 #include <linux/virtio_vsock.h> 15 #include <uapi/linux/vsockmon.h> 16 17 #include <net/sock.h> 18 #include <net/af_vsock.h> 19 20 #define CREATE_TRACE_POINTS 21 #include <trace/events/vsock_virtio_transport_common.h> 22 23 /* How long to wait for graceful shutdown of a connection */ 24 #define VSOCK_CLOSE_TIMEOUT (8 * HZ) 25 26 /* Threshold for detecting small packets to copy */ 27 #define GOOD_COPY_LEN 128 28 29 static const struct virtio_transport * 30 virtio_transport_get_ops(struct vsock_sock *vsk) 31 { 32 const struct vsock_transport *t = vsock_core_get_transport(vsk); 33 34 if (WARN_ON(!t)) 35 return NULL; 36 37 return container_of(t, struct virtio_transport, transport); 38 } 39 40 /* Returns a new packet on success, otherwise returns NULL. 41 * 42 * If NULL is returned, errp is set to a negative errno. 43 */ 44 static struct sk_buff * 45 virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info, 46 size_t len, 47 u32 src_cid, 48 u32 src_port, 49 u32 dst_cid, 50 u32 dst_port) 51 { 52 const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len; 53 struct virtio_vsock_hdr *hdr; 54 struct sk_buff *skb; 55 void *payload; 56 int err; 57 58 skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL); 59 if (!skb) 60 return NULL; 61 62 hdr = virtio_vsock_hdr(skb); 63 hdr->type = cpu_to_le16(info->type); 64 hdr->op = cpu_to_le16(info->op); 65 hdr->src_cid = cpu_to_le64(src_cid); 66 hdr->dst_cid = cpu_to_le64(dst_cid); 67 hdr->src_port = cpu_to_le32(src_port); 68 hdr->dst_port = cpu_to_le32(dst_port); 69 hdr->flags = cpu_to_le32(info->flags); 70 hdr->len = cpu_to_le32(len); 71 72 if (info->msg && len > 0) { 73 payload = skb_put(skb, len); 74 err = memcpy_from_msg(payload, info->msg, len); 75 if (err) 76 goto out; 77 78 if (msg_data_left(info->msg) == 0 && 79 info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) { 80 hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); 81 82 if (info->msg->msg_flags & MSG_EOR) 83 hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); 84 } 85 } 86 87 if (info->reply) 88 virtio_vsock_skb_set_reply(skb); 89 90 trace_virtio_transport_alloc_pkt(src_cid, src_port, 91 dst_cid, dst_port, 92 len, 93 info->type, 94 info->op, 95 info->flags); 96 97 return skb; 98 99 out: 100 kfree_skb(skb); 101 return NULL; 102 } 103 104 /* Packet capture */ 105 static struct sk_buff *virtio_transport_build_skb(void *opaque) 106 { 107 struct virtio_vsock_hdr *pkt_hdr; 108 struct sk_buff *pkt = opaque; 109 struct af_vsockmon_hdr *hdr; 110 struct sk_buff *skb; 111 size_t payload_len; 112 void *payload_buf; 113 114 /* A packet could be split to fit the RX buffer, so we can retrieve 115 * the payload length from the header and the buffer pointer taking 116 * care of the offset in the original packet. 117 */ 118 pkt_hdr = virtio_vsock_hdr(pkt); 119 payload_len = pkt->len; 120 payload_buf = pkt->data; 121 122 skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len, 123 GFP_ATOMIC); 124 if (!skb) 125 return NULL; 126 127 hdr = skb_put(skb, sizeof(*hdr)); 128 129 /* pkt->hdr is little-endian so no need to byteswap here */ 130 hdr->src_cid = pkt_hdr->src_cid; 131 hdr->src_port = pkt_hdr->src_port; 132 hdr->dst_cid = pkt_hdr->dst_cid; 133 hdr->dst_port = pkt_hdr->dst_port; 134 135 hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO); 136 hdr->len = cpu_to_le16(sizeof(*pkt_hdr)); 137 memset(hdr->reserved, 0, sizeof(hdr->reserved)); 138 139 switch (le16_to_cpu(pkt_hdr->op)) { 140 case VIRTIO_VSOCK_OP_REQUEST: 141 case VIRTIO_VSOCK_OP_RESPONSE: 142 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT); 143 break; 144 case VIRTIO_VSOCK_OP_RST: 145 case VIRTIO_VSOCK_OP_SHUTDOWN: 146 hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT); 147 break; 148 case VIRTIO_VSOCK_OP_RW: 149 hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD); 150 break; 151 case VIRTIO_VSOCK_OP_CREDIT_UPDATE: 152 case VIRTIO_VSOCK_OP_CREDIT_REQUEST: 153 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL); 154 break; 155 default: 156 hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN); 157 break; 158 } 159 160 skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr)); 161 162 if (payload_len) { 163 skb_put_data(skb, payload_buf, payload_len); 164 } 165 166 return skb; 167 } 168 169 void virtio_transport_deliver_tap_pkt(struct sk_buff *skb) 170 { 171 if (virtio_vsock_skb_tap_delivered(skb)) 172 return; 173 174 vsock_deliver_tap(virtio_transport_build_skb, skb); 175 virtio_vsock_skb_set_tap_delivered(skb); 176 } 177 EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt); 178 179 static u16 virtio_transport_get_type(struct sock *sk) 180 { 181 if (sk->sk_type == SOCK_STREAM) 182 return VIRTIO_VSOCK_TYPE_STREAM; 183 else 184 return VIRTIO_VSOCK_TYPE_SEQPACKET; 185 } 186 187 /* This function can only be used on connecting/connected sockets, 188 * since a socket assigned to a transport is required. 189 * 190 * Do not use on listener sockets! 191 */ 192 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, 193 struct virtio_vsock_pkt_info *info) 194 { 195 u32 src_cid, src_port, dst_cid, dst_port; 196 const struct virtio_transport *t_ops; 197 struct virtio_vsock_sock *vvs; 198 u32 pkt_len = info->pkt_len; 199 struct sk_buff *skb; 200 201 info->type = virtio_transport_get_type(sk_vsock(vsk)); 202 203 t_ops = virtio_transport_get_ops(vsk); 204 if (unlikely(!t_ops)) 205 return -EFAULT; 206 207 src_cid = t_ops->transport.get_local_cid(); 208 src_port = vsk->local_addr.svm_port; 209 if (!info->remote_cid) { 210 dst_cid = vsk->remote_addr.svm_cid; 211 dst_port = vsk->remote_addr.svm_port; 212 } else { 213 dst_cid = info->remote_cid; 214 dst_port = info->remote_port; 215 } 216 217 vvs = vsk->trans; 218 219 /* we can send less than pkt_len bytes */ 220 if (pkt_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) 221 pkt_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; 222 223 /* virtio_transport_get_credit might return less than pkt_len credit */ 224 pkt_len = virtio_transport_get_credit(vvs, pkt_len); 225 226 /* Do not send zero length OP_RW pkt */ 227 if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW) 228 return pkt_len; 229 230 skb = virtio_transport_alloc_skb(info, pkt_len, 231 src_cid, src_port, 232 dst_cid, dst_port); 233 if (!skb) { 234 virtio_transport_put_credit(vvs, pkt_len); 235 return -ENOMEM; 236 } 237 238 virtio_transport_inc_tx_pkt(vvs, skb); 239 240 return t_ops->send_pkt(skb); 241 } 242 243 static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs, 244 u32 len) 245 { 246 if (vvs->rx_bytes + len > vvs->buf_alloc) 247 return false; 248 249 vvs->rx_bytes += len; 250 return true; 251 } 252 253 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, 254 u32 len) 255 { 256 vvs->rx_bytes -= len; 257 vvs->fwd_cnt += len; 258 } 259 260 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb) 261 { 262 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 263 264 spin_lock_bh(&vvs->rx_lock); 265 vvs->last_fwd_cnt = vvs->fwd_cnt; 266 hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt); 267 hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc); 268 spin_unlock_bh(&vvs->rx_lock); 269 } 270 EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt); 271 272 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit) 273 { 274 u32 ret; 275 276 spin_lock_bh(&vvs->tx_lock); 277 ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); 278 if (ret > credit) 279 ret = credit; 280 vvs->tx_cnt += ret; 281 spin_unlock_bh(&vvs->tx_lock); 282 283 return ret; 284 } 285 EXPORT_SYMBOL_GPL(virtio_transport_get_credit); 286 287 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit) 288 { 289 spin_lock_bh(&vvs->tx_lock); 290 vvs->tx_cnt -= credit; 291 spin_unlock_bh(&vvs->tx_lock); 292 } 293 EXPORT_SYMBOL_GPL(virtio_transport_put_credit); 294 295 static int virtio_transport_send_credit_update(struct vsock_sock *vsk) 296 { 297 struct virtio_vsock_pkt_info info = { 298 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, 299 .vsk = vsk, 300 }; 301 302 return virtio_transport_send_pkt_info(vsk, &info); 303 } 304 305 static ssize_t 306 virtio_transport_stream_do_peek(struct vsock_sock *vsk, 307 struct msghdr *msg, 308 size_t len) 309 { 310 struct virtio_vsock_sock *vvs = vsk->trans; 311 size_t bytes, total = 0, off; 312 struct sk_buff *skb, *tmp; 313 int err = -EFAULT; 314 315 spin_lock_bh(&vvs->rx_lock); 316 317 skb_queue_walk_safe(&vvs->rx_queue, skb, tmp) { 318 off = 0; 319 320 if (total == len) 321 break; 322 323 while (total < len && off < skb->len) { 324 bytes = len - total; 325 if (bytes > skb->len - off) 326 bytes = skb->len - off; 327 328 /* sk_lock is held by caller so no one else can dequeue. 329 * Unlock rx_lock since memcpy_to_msg() may sleep. 330 */ 331 spin_unlock_bh(&vvs->rx_lock); 332 333 err = memcpy_to_msg(msg, skb->data + off, bytes); 334 if (err) 335 goto out; 336 337 spin_lock_bh(&vvs->rx_lock); 338 339 total += bytes; 340 off += bytes; 341 } 342 } 343 344 spin_unlock_bh(&vvs->rx_lock); 345 346 return total; 347 348 out: 349 if (total) 350 err = total; 351 return err; 352 } 353 354 static ssize_t 355 virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, 356 struct msghdr *msg, 357 size_t len) 358 { 359 struct virtio_vsock_sock *vvs = vsk->trans; 360 size_t bytes, total = 0; 361 struct sk_buff *skb; 362 int err = -EFAULT; 363 u32 free_space; 364 365 spin_lock_bh(&vvs->rx_lock); 366 while (total < len && !skb_queue_empty(&vvs->rx_queue)) { 367 skb = __skb_dequeue(&vvs->rx_queue); 368 369 bytes = len - total; 370 if (bytes > skb->len) 371 bytes = skb->len; 372 373 /* sk_lock is held by caller so no one else can dequeue. 374 * Unlock rx_lock since memcpy_to_msg() may sleep. 375 */ 376 spin_unlock_bh(&vvs->rx_lock); 377 378 err = memcpy_to_msg(msg, skb->data, bytes); 379 if (err) 380 goto out; 381 382 spin_lock_bh(&vvs->rx_lock); 383 384 total += bytes; 385 skb_pull(skb, bytes); 386 387 if (skb->len == 0) { 388 u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len); 389 390 virtio_transport_dec_rx_pkt(vvs, pkt_len); 391 consume_skb(skb); 392 } else { 393 __skb_queue_head(&vvs->rx_queue, skb); 394 } 395 } 396 397 free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt); 398 399 spin_unlock_bh(&vvs->rx_lock); 400 401 /* To reduce the number of credit update messages, 402 * don't update credits as long as lots of space is available. 403 * Note: the limit chosen here is arbitrary. Setting the limit 404 * too high causes extra messages. Too low causes transmitter 405 * stalls. As stalls are in theory more expensive than extra 406 * messages, we set the limit to a high value. TODO: experiment 407 * with different values. 408 */ 409 if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) 410 virtio_transport_send_credit_update(vsk); 411 412 return total; 413 414 out: 415 if (total) 416 err = total; 417 return err; 418 } 419 420 static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, 421 struct msghdr *msg, 422 int flags) 423 { 424 struct virtio_vsock_sock *vvs = vsk->trans; 425 int dequeued_len = 0; 426 size_t user_buf_len = msg_data_left(msg); 427 bool msg_ready = false; 428 struct sk_buff *skb; 429 430 spin_lock_bh(&vvs->rx_lock); 431 432 if (vvs->msg_count == 0) { 433 spin_unlock_bh(&vvs->rx_lock); 434 return 0; 435 } 436 437 while (!msg_ready) { 438 struct virtio_vsock_hdr *hdr; 439 size_t pkt_len; 440 441 skb = __skb_dequeue(&vvs->rx_queue); 442 if (!skb) 443 break; 444 hdr = virtio_vsock_hdr(skb); 445 pkt_len = (size_t)le32_to_cpu(hdr->len); 446 447 if (dequeued_len >= 0) { 448 size_t bytes_to_copy; 449 450 bytes_to_copy = min(user_buf_len, pkt_len); 451 452 if (bytes_to_copy) { 453 int err; 454 455 /* sk_lock is held by caller so no one else can dequeue. 456 * Unlock rx_lock since memcpy_to_msg() may sleep. 457 */ 458 spin_unlock_bh(&vvs->rx_lock); 459 460 err = memcpy_to_msg(msg, skb->data, bytes_to_copy); 461 if (err) { 462 /* Copy of message failed. Rest of 463 * fragments will be freed without copy. 464 */ 465 dequeued_len = err; 466 } else { 467 user_buf_len -= bytes_to_copy; 468 } 469 470 spin_lock_bh(&vvs->rx_lock); 471 } 472 473 if (dequeued_len >= 0) 474 dequeued_len += pkt_len; 475 } 476 477 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) { 478 msg_ready = true; 479 vvs->msg_count--; 480 481 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) 482 msg->msg_flags |= MSG_EOR; 483 } 484 485 virtio_transport_dec_rx_pkt(vvs, pkt_len); 486 kfree_skb(skb); 487 } 488 489 spin_unlock_bh(&vvs->rx_lock); 490 491 virtio_transport_send_credit_update(vsk); 492 493 return dequeued_len; 494 } 495 496 ssize_t 497 virtio_transport_stream_dequeue(struct vsock_sock *vsk, 498 struct msghdr *msg, 499 size_t len, int flags) 500 { 501 if (flags & MSG_PEEK) 502 return virtio_transport_stream_do_peek(vsk, msg, len); 503 else 504 return virtio_transport_stream_do_dequeue(vsk, msg, len); 505 } 506 EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue); 507 508 ssize_t 509 virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, 510 struct msghdr *msg, 511 int flags) 512 { 513 if (flags & MSG_PEEK) 514 return -EOPNOTSUPP; 515 516 return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags); 517 } 518 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue); 519 520 int 521 virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, 522 struct msghdr *msg, 523 size_t len) 524 { 525 struct virtio_vsock_sock *vvs = vsk->trans; 526 527 spin_lock_bh(&vvs->tx_lock); 528 529 if (len > vvs->peer_buf_alloc) { 530 spin_unlock_bh(&vvs->tx_lock); 531 return -EMSGSIZE; 532 } 533 534 spin_unlock_bh(&vvs->tx_lock); 535 536 return virtio_transport_stream_enqueue(vsk, msg, len); 537 } 538 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue); 539 540 int 541 virtio_transport_dgram_dequeue(struct vsock_sock *vsk, 542 struct msghdr *msg, 543 size_t len, int flags) 544 { 545 return -EOPNOTSUPP; 546 } 547 EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue); 548 549 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk) 550 { 551 struct virtio_vsock_sock *vvs = vsk->trans; 552 s64 bytes; 553 554 spin_lock_bh(&vvs->rx_lock); 555 bytes = vvs->rx_bytes; 556 spin_unlock_bh(&vvs->rx_lock); 557 558 return bytes; 559 } 560 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data); 561 562 u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk) 563 { 564 struct virtio_vsock_sock *vvs = vsk->trans; 565 u32 msg_count; 566 567 spin_lock_bh(&vvs->rx_lock); 568 msg_count = vvs->msg_count; 569 spin_unlock_bh(&vvs->rx_lock); 570 571 return msg_count; 572 } 573 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data); 574 575 static s64 virtio_transport_has_space(struct vsock_sock *vsk) 576 { 577 struct virtio_vsock_sock *vvs = vsk->trans; 578 s64 bytes; 579 580 bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); 581 if (bytes < 0) 582 bytes = 0; 583 584 return bytes; 585 } 586 587 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk) 588 { 589 struct virtio_vsock_sock *vvs = vsk->trans; 590 s64 bytes; 591 592 spin_lock_bh(&vvs->tx_lock); 593 bytes = virtio_transport_has_space(vsk); 594 spin_unlock_bh(&vvs->tx_lock); 595 596 return bytes; 597 } 598 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space); 599 600 int virtio_transport_do_socket_init(struct vsock_sock *vsk, 601 struct vsock_sock *psk) 602 { 603 struct virtio_vsock_sock *vvs; 604 605 vvs = kzalloc(sizeof(*vvs), GFP_KERNEL); 606 if (!vvs) 607 return -ENOMEM; 608 609 vsk->trans = vvs; 610 vvs->vsk = vsk; 611 if (psk && psk->trans) { 612 struct virtio_vsock_sock *ptrans = psk->trans; 613 614 vvs->peer_buf_alloc = ptrans->peer_buf_alloc; 615 } 616 617 if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE) 618 vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE; 619 620 vvs->buf_alloc = vsk->buffer_size; 621 622 spin_lock_init(&vvs->rx_lock); 623 spin_lock_init(&vvs->tx_lock); 624 skb_queue_head_init(&vvs->rx_queue); 625 626 return 0; 627 } 628 EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init); 629 630 /* sk_lock held by the caller */ 631 void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val) 632 { 633 struct virtio_vsock_sock *vvs = vsk->trans; 634 635 if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE) 636 *val = VIRTIO_VSOCK_MAX_BUF_SIZE; 637 638 vvs->buf_alloc = *val; 639 640 virtio_transport_send_credit_update(vsk); 641 } 642 EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size); 643 644 int 645 virtio_transport_notify_poll_in(struct vsock_sock *vsk, 646 size_t target, 647 bool *data_ready_now) 648 { 649 *data_ready_now = vsock_stream_has_data(vsk) >= target; 650 651 return 0; 652 } 653 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in); 654 655 int 656 virtio_transport_notify_poll_out(struct vsock_sock *vsk, 657 size_t target, 658 bool *space_avail_now) 659 { 660 s64 free_space; 661 662 free_space = vsock_stream_has_space(vsk); 663 if (free_space > 0) 664 *space_avail_now = true; 665 else if (free_space == 0) 666 *space_avail_now = false; 667 668 return 0; 669 } 670 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out); 671 672 int virtio_transport_notify_recv_init(struct vsock_sock *vsk, 673 size_t target, struct vsock_transport_recv_notify_data *data) 674 { 675 return 0; 676 } 677 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init); 678 679 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk, 680 size_t target, struct vsock_transport_recv_notify_data *data) 681 { 682 return 0; 683 } 684 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block); 685 686 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk, 687 size_t target, struct vsock_transport_recv_notify_data *data) 688 { 689 return 0; 690 } 691 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue); 692 693 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk, 694 size_t target, ssize_t copied, bool data_read, 695 struct vsock_transport_recv_notify_data *data) 696 { 697 return 0; 698 } 699 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue); 700 701 int virtio_transport_notify_send_init(struct vsock_sock *vsk, 702 struct vsock_transport_send_notify_data *data) 703 { 704 return 0; 705 } 706 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init); 707 708 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk, 709 struct vsock_transport_send_notify_data *data) 710 { 711 return 0; 712 } 713 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block); 714 715 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk, 716 struct vsock_transport_send_notify_data *data) 717 { 718 return 0; 719 } 720 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue); 721 722 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk, 723 ssize_t written, struct vsock_transport_send_notify_data *data) 724 { 725 return 0; 726 } 727 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue); 728 729 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk) 730 { 731 return vsk->buffer_size; 732 } 733 EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat); 734 735 bool virtio_transport_stream_is_active(struct vsock_sock *vsk) 736 { 737 return true; 738 } 739 EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active); 740 741 bool virtio_transport_stream_allow(u32 cid, u32 port) 742 { 743 return true; 744 } 745 EXPORT_SYMBOL_GPL(virtio_transport_stream_allow); 746 747 int virtio_transport_dgram_bind(struct vsock_sock *vsk, 748 struct sockaddr_vm *addr) 749 { 750 return -EOPNOTSUPP; 751 } 752 EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind); 753 754 bool virtio_transport_dgram_allow(u32 cid, u32 port) 755 { 756 return false; 757 } 758 EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow); 759 760 int virtio_transport_connect(struct vsock_sock *vsk) 761 { 762 struct virtio_vsock_pkt_info info = { 763 .op = VIRTIO_VSOCK_OP_REQUEST, 764 .vsk = vsk, 765 }; 766 767 return virtio_transport_send_pkt_info(vsk, &info); 768 } 769 EXPORT_SYMBOL_GPL(virtio_transport_connect); 770 771 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode) 772 { 773 struct virtio_vsock_pkt_info info = { 774 .op = VIRTIO_VSOCK_OP_SHUTDOWN, 775 .flags = (mode & RCV_SHUTDOWN ? 776 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | 777 (mode & SEND_SHUTDOWN ? 778 VIRTIO_VSOCK_SHUTDOWN_SEND : 0), 779 .vsk = vsk, 780 }; 781 782 return virtio_transport_send_pkt_info(vsk, &info); 783 } 784 EXPORT_SYMBOL_GPL(virtio_transport_shutdown); 785 786 int 787 virtio_transport_dgram_enqueue(struct vsock_sock *vsk, 788 struct sockaddr_vm *remote_addr, 789 struct msghdr *msg, 790 size_t dgram_len) 791 { 792 return -EOPNOTSUPP; 793 } 794 EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue); 795 796 ssize_t 797 virtio_transport_stream_enqueue(struct vsock_sock *vsk, 798 struct msghdr *msg, 799 size_t len) 800 { 801 struct virtio_vsock_pkt_info info = { 802 .op = VIRTIO_VSOCK_OP_RW, 803 .msg = msg, 804 .pkt_len = len, 805 .vsk = vsk, 806 }; 807 808 return virtio_transport_send_pkt_info(vsk, &info); 809 } 810 EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue); 811 812 void virtio_transport_destruct(struct vsock_sock *vsk) 813 { 814 struct virtio_vsock_sock *vvs = vsk->trans; 815 816 kfree(vvs); 817 } 818 EXPORT_SYMBOL_GPL(virtio_transport_destruct); 819 820 static int virtio_transport_reset(struct vsock_sock *vsk, 821 struct sk_buff *skb) 822 { 823 struct virtio_vsock_pkt_info info = { 824 .op = VIRTIO_VSOCK_OP_RST, 825 .reply = !!skb, 826 .vsk = vsk, 827 }; 828 829 /* Send RST only if the original pkt is not a RST pkt */ 830 if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST) 831 return 0; 832 833 return virtio_transport_send_pkt_info(vsk, &info); 834 } 835 836 /* Normally packets are associated with a socket. There may be no socket if an 837 * attempt was made to connect to a socket that does not exist. 838 */ 839 static int virtio_transport_reset_no_sock(const struct virtio_transport *t, 840 struct sk_buff *skb) 841 { 842 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 843 struct virtio_vsock_pkt_info info = { 844 .op = VIRTIO_VSOCK_OP_RST, 845 .type = le16_to_cpu(hdr->type), 846 .reply = true, 847 }; 848 struct sk_buff *reply; 849 850 /* Send RST only if the original pkt is not a RST pkt */ 851 if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST) 852 return 0; 853 854 reply = virtio_transport_alloc_skb(&info, 0, 855 le64_to_cpu(hdr->dst_cid), 856 le32_to_cpu(hdr->dst_port), 857 le64_to_cpu(hdr->src_cid), 858 le32_to_cpu(hdr->src_port)); 859 if (!reply) 860 return -ENOMEM; 861 862 if (!t) { 863 kfree_skb(reply); 864 return -ENOTCONN; 865 } 866 867 return t->send_pkt(reply); 868 } 869 870 /* This function should be called with sk_lock held and SOCK_DONE set */ 871 static void virtio_transport_remove_sock(struct vsock_sock *vsk) 872 { 873 struct virtio_vsock_sock *vvs = vsk->trans; 874 875 /* We don't need to take rx_lock, as the socket is closing and we are 876 * removing it. 877 */ 878 __skb_queue_purge(&vvs->rx_queue); 879 vsock_remove_sock(vsk); 880 } 881 882 static void virtio_transport_wait_close(struct sock *sk, long timeout) 883 { 884 if (timeout) { 885 DEFINE_WAIT_FUNC(wait, woken_wake_function); 886 887 add_wait_queue(sk_sleep(sk), &wait); 888 889 do { 890 if (sk_wait_event(sk, &timeout, 891 sock_flag(sk, SOCK_DONE), &wait)) 892 break; 893 } while (!signal_pending(current) && timeout); 894 895 remove_wait_queue(sk_sleep(sk), &wait); 896 } 897 } 898 899 static void virtio_transport_do_close(struct vsock_sock *vsk, 900 bool cancel_timeout) 901 { 902 struct sock *sk = sk_vsock(vsk); 903 904 sock_set_flag(sk, SOCK_DONE); 905 vsk->peer_shutdown = SHUTDOWN_MASK; 906 if (vsock_stream_has_data(vsk) <= 0) 907 sk->sk_state = TCP_CLOSING; 908 sk->sk_state_change(sk); 909 910 if (vsk->close_work_scheduled && 911 (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) { 912 vsk->close_work_scheduled = false; 913 914 virtio_transport_remove_sock(vsk); 915 916 /* Release refcnt obtained when we scheduled the timeout */ 917 sock_put(sk); 918 } 919 } 920 921 static void virtio_transport_close_timeout(struct work_struct *work) 922 { 923 struct vsock_sock *vsk = 924 container_of(work, struct vsock_sock, close_work.work); 925 struct sock *sk = sk_vsock(vsk); 926 927 sock_hold(sk); 928 lock_sock(sk); 929 930 if (!sock_flag(sk, SOCK_DONE)) { 931 (void)virtio_transport_reset(vsk, NULL); 932 933 virtio_transport_do_close(vsk, false); 934 } 935 936 vsk->close_work_scheduled = false; 937 938 release_sock(sk); 939 sock_put(sk); 940 } 941 942 /* User context, vsk->sk is locked */ 943 static bool virtio_transport_close(struct vsock_sock *vsk) 944 { 945 struct sock *sk = &vsk->sk; 946 947 if (!(sk->sk_state == TCP_ESTABLISHED || 948 sk->sk_state == TCP_CLOSING)) 949 return true; 950 951 /* Already received SHUTDOWN from peer, reply with RST */ 952 if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) { 953 (void)virtio_transport_reset(vsk, NULL); 954 return true; 955 } 956 957 if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK) 958 (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK); 959 960 if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING)) 961 virtio_transport_wait_close(sk, sk->sk_lingertime); 962 963 if (sock_flag(sk, SOCK_DONE)) { 964 return true; 965 } 966 967 sock_hold(sk); 968 INIT_DELAYED_WORK(&vsk->close_work, 969 virtio_transport_close_timeout); 970 vsk->close_work_scheduled = true; 971 schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT); 972 return false; 973 } 974 975 void virtio_transport_release(struct vsock_sock *vsk) 976 { 977 struct sock *sk = &vsk->sk; 978 bool remove_sock = true; 979 980 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) 981 remove_sock = virtio_transport_close(vsk); 982 983 if (remove_sock) { 984 sock_set_flag(sk, SOCK_DONE); 985 virtio_transport_remove_sock(vsk); 986 } 987 } 988 EXPORT_SYMBOL_GPL(virtio_transport_release); 989 990 static int 991 virtio_transport_recv_connecting(struct sock *sk, 992 struct sk_buff *skb) 993 { 994 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 995 struct vsock_sock *vsk = vsock_sk(sk); 996 int skerr; 997 int err; 998 999 switch (le16_to_cpu(hdr->op)) { 1000 case VIRTIO_VSOCK_OP_RESPONSE: 1001 sk->sk_state = TCP_ESTABLISHED; 1002 sk->sk_socket->state = SS_CONNECTED; 1003 vsock_insert_connected(vsk); 1004 sk->sk_state_change(sk); 1005 break; 1006 case VIRTIO_VSOCK_OP_INVALID: 1007 break; 1008 case VIRTIO_VSOCK_OP_RST: 1009 skerr = ECONNRESET; 1010 err = 0; 1011 goto destroy; 1012 default: 1013 skerr = EPROTO; 1014 err = -EINVAL; 1015 goto destroy; 1016 } 1017 return 0; 1018 1019 destroy: 1020 virtio_transport_reset(vsk, skb); 1021 sk->sk_state = TCP_CLOSE; 1022 sk->sk_err = skerr; 1023 sk_error_report(sk); 1024 return err; 1025 } 1026 1027 static void 1028 virtio_transport_recv_enqueue(struct vsock_sock *vsk, 1029 struct sk_buff *skb) 1030 { 1031 struct virtio_vsock_sock *vvs = vsk->trans; 1032 bool can_enqueue, free_pkt = false; 1033 struct virtio_vsock_hdr *hdr; 1034 u32 len; 1035 1036 hdr = virtio_vsock_hdr(skb); 1037 len = le32_to_cpu(hdr->len); 1038 1039 spin_lock_bh(&vvs->rx_lock); 1040 1041 can_enqueue = virtio_transport_inc_rx_pkt(vvs, len); 1042 if (!can_enqueue) { 1043 free_pkt = true; 1044 goto out; 1045 } 1046 1047 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) 1048 vvs->msg_count++; 1049 1050 /* Try to copy small packets into the buffer of last packet queued, 1051 * to avoid wasting memory queueing the entire buffer with a small 1052 * payload. 1053 */ 1054 if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) { 1055 struct virtio_vsock_hdr *last_hdr; 1056 struct sk_buff *last_skb; 1057 1058 last_skb = skb_peek_tail(&vvs->rx_queue); 1059 last_hdr = virtio_vsock_hdr(last_skb); 1060 1061 /* If there is space in the last packet queued, we copy the 1062 * new packet in its buffer. We avoid this if the last packet 1063 * queued has VIRTIO_VSOCK_SEQ_EOM set, because this is 1064 * delimiter of SEQPACKET message, so 'pkt' is the first packet 1065 * of a new message. 1066 */ 1067 if (skb->len < skb_tailroom(last_skb) && 1068 !(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) { 1069 memcpy(skb_put(last_skb, skb->len), skb->data, skb->len); 1070 free_pkt = true; 1071 last_hdr->flags |= hdr->flags; 1072 last_hdr->len = cpu_to_le32(last_skb->len); 1073 goto out; 1074 } 1075 } 1076 1077 __skb_queue_tail(&vvs->rx_queue, skb); 1078 1079 out: 1080 spin_unlock_bh(&vvs->rx_lock); 1081 if (free_pkt) 1082 kfree_skb(skb); 1083 } 1084 1085 static int 1086 virtio_transport_recv_connected(struct sock *sk, 1087 struct sk_buff *skb) 1088 { 1089 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1090 struct vsock_sock *vsk = vsock_sk(sk); 1091 int err = 0; 1092 1093 switch (le16_to_cpu(hdr->op)) { 1094 case VIRTIO_VSOCK_OP_RW: 1095 virtio_transport_recv_enqueue(vsk, skb); 1096 vsock_data_ready(sk); 1097 return err; 1098 case VIRTIO_VSOCK_OP_CREDIT_REQUEST: 1099 virtio_transport_send_credit_update(vsk); 1100 break; 1101 case VIRTIO_VSOCK_OP_CREDIT_UPDATE: 1102 sk->sk_write_space(sk); 1103 break; 1104 case VIRTIO_VSOCK_OP_SHUTDOWN: 1105 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV) 1106 vsk->peer_shutdown |= RCV_SHUTDOWN; 1107 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) 1108 vsk->peer_shutdown |= SEND_SHUTDOWN; 1109 if (vsk->peer_shutdown == SHUTDOWN_MASK && 1110 vsock_stream_has_data(vsk) <= 0 && 1111 !sock_flag(sk, SOCK_DONE)) { 1112 (void)virtio_transport_reset(vsk, NULL); 1113 virtio_transport_do_close(vsk, true); 1114 } 1115 if (le32_to_cpu(virtio_vsock_hdr(skb)->flags)) 1116 sk->sk_state_change(sk); 1117 break; 1118 case VIRTIO_VSOCK_OP_RST: 1119 virtio_transport_do_close(vsk, true); 1120 break; 1121 default: 1122 err = -EINVAL; 1123 break; 1124 } 1125 1126 kfree_skb(skb); 1127 return err; 1128 } 1129 1130 static void 1131 virtio_transport_recv_disconnecting(struct sock *sk, 1132 struct sk_buff *skb) 1133 { 1134 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1135 struct vsock_sock *vsk = vsock_sk(sk); 1136 1137 if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST) 1138 virtio_transport_do_close(vsk, true); 1139 } 1140 1141 static int 1142 virtio_transport_send_response(struct vsock_sock *vsk, 1143 struct sk_buff *skb) 1144 { 1145 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1146 struct virtio_vsock_pkt_info info = { 1147 .op = VIRTIO_VSOCK_OP_RESPONSE, 1148 .remote_cid = le64_to_cpu(hdr->src_cid), 1149 .remote_port = le32_to_cpu(hdr->src_port), 1150 .reply = true, 1151 .vsk = vsk, 1152 }; 1153 1154 return virtio_transport_send_pkt_info(vsk, &info); 1155 } 1156 1157 static bool virtio_transport_space_update(struct sock *sk, 1158 struct sk_buff *skb) 1159 { 1160 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1161 struct vsock_sock *vsk = vsock_sk(sk); 1162 struct virtio_vsock_sock *vvs = vsk->trans; 1163 bool space_available; 1164 1165 /* Listener sockets are not associated with any transport, so we are 1166 * not able to take the state to see if there is space available in the 1167 * remote peer, but since they are only used to receive requests, we 1168 * can assume that there is always space available in the other peer. 1169 */ 1170 if (!vvs) 1171 return true; 1172 1173 /* buf_alloc and fwd_cnt is always included in the hdr */ 1174 spin_lock_bh(&vvs->tx_lock); 1175 vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc); 1176 vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt); 1177 space_available = virtio_transport_has_space(vsk); 1178 spin_unlock_bh(&vvs->tx_lock); 1179 return space_available; 1180 } 1181 1182 /* Handle server socket */ 1183 static int 1184 virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb, 1185 struct virtio_transport *t) 1186 { 1187 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1188 struct vsock_sock *vsk = vsock_sk(sk); 1189 struct vsock_sock *vchild; 1190 struct sock *child; 1191 int ret; 1192 1193 if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) { 1194 virtio_transport_reset_no_sock(t, skb); 1195 return -EINVAL; 1196 } 1197 1198 if (sk_acceptq_is_full(sk)) { 1199 virtio_transport_reset_no_sock(t, skb); 1200 return -ENOMEM; 1201 } 1202 1203 child = vsock_create_connected(sk); 1204 if (!child) { 1205 virtio_transport_reset_no_sock(t, skb); 1206 return -ENOMEM; 1207 } 1208 1209 sk_acceptq_added(sk); 1210 1211 lock_sock_nested(child, SINGLE_DEPTH_NESTING); 1212 1213 child->sk_state = TCP_ESTABLISHED; 1214 1215 vchild = vsock_sk(child); 1216 vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid), 1217 le32_to_cpu(hdr->dst_port)); 1218 vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid), 1219 le32_to_cpu(hdr->src_port)); 1220 1221 ret = vsock_assign_transport(vchild, vsk); 1222 /* Transport assigned (looking at remote_addr) must be the same 1223 * where we received the request. 1224 */ 1225 if (ret || vchild->transport != &t->transport) { 1226 release_sock(child); 1227 virtio_transport_reset_no_sock(t, skb); 1228 sock_put(child); 1229 return ret; 1230 } 1231 1232 if (virtio_transport_space_update(child, skb)) 1233 child->sk_write_space(child); 1234 1235 vsock_insert_connected(vchild); 1236 vsock_enqueue_accept(sk, child); 1237 virtio_transport_send_response(vchild, skb); 1238 1239 release_sock(child); 1240 1241 sk->sk_data_ready(sk); 1242 return 0; 1243 } 1244 1245 static bool virtio_transport_valid_type(u16 type) 1246 { 1247 return (type == VIRTIO_VSOCK_TYPE_STREAM) || 1248 (type == VIRTIO_VSOCK_TYPE_SEQPACKET); 1249 } 1250 1251 /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex 1252 * lock. 1253 */ 1254 void virtio_transport_recv_pkt(struct virtio_transport *t, 1255 struct sk_buff *skb) 1256 { 1257 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); 1258 struct sockaddr_vm src, dst; 1259 struct vsock_sock *vsk; 1260 struct sock *sk; 1261 bool space_available; 1262 1263 vsock_addr_init(&src, le64_to_cpu(hdr->src_cid), 1264 le32_to_cpu(hdr->src_port)); 1265 vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid), 1266 le32_to_cpu(hdr->dst_port)); 1267 1268 trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port, 1269 dst.svm_cid, dst.svm_port, 1270 le32_to_cpu(hdr->len), 1271 le16_to_cpu(hdr->type), 1272 le16_to_cpu(hdr->op), 1273 le32_to_cpu(hdr->flags), 1274 le32_to_cpu(hdr->buf_alloc), 1275 le32_to_cpu(hdr->fwd_cnt)); 1276 1277 if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) { 1278 (void)virtio_transport_reset_no_sock(t, skb); 1279 goto free_pkt; 1280 } 1281 1282 /* The socket must be in connected or bound table 1283 * otherwise send reset back 1284 */ 1285 sk = vsock_find_connected_socket(&src, &dst); 1286 if (!sk) { 1287 sk = vsock_find_bound_socket(&dst); 1288 if (!sk) { 1289 (void)virtio_transport_reset_no_sock(t, skb); 1290 goto free_pkt; 1291 } 1292 } 1293 1294 if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) { 1295 (void)virtio_transport_reset_no_sock(t, skb); 1296 sock_put(sk); 1297 goto free_pkt; 1298 } 1299 1300 vsk = vsock_sk(sk); 1301 1302 lock_sock(sk); 1303 1304 /* Check if sk has been closed before lock_sock */ 1305 if (sock_flag(sk, SOCK_DONE)) { 1306 (void)virtio_transport_reset_no_sock(t, skb); 1307 release_sock(sk); 1308 sock_put(sk); 1309 goto free_pkt; 1310 } 1311 1312 space_available = virtio_transport_space_update(sk, skb); 1313 1314 /* Update CID in case it has changed after a transport reset event */ 1315 if (vsk->local_addr.svm_cid != VMADDR_CID_ANY) 1316 vsk->local_addr.svm_cid = dst.svm_cid; 1317 1318 if (space_available) 1319 sk->sk_write_space(sk); 1320 1321 switch (sk->sk_state) { 1322 case TCP_LISTEN: 1323 virtio_transport_recv_listen(sk, skb, t); 1324 kfree_skb(skb); 1325 break; 1326 case TCP_SYN_SENT: 1327 virtio_transport_recv_connecting(sk, skb); 1328 kfree_skb(skb); 1329 break; 1330 case TCP_ESTABLISHED: 1331 virtio_transport_recv_connected(sk, skb); 1332 break; 1333 case TCP_CLOSING: 1334 virtio_transport_recv_disconnecting(sk, skb); 1335 kfree_skb(skb); 1336 break; 1337 default: 1338 (void)virtio_transport_reset_no_sock(t, skb); 1339 kfree_skb(skb); 1340 break; 1341 } 1342 1343 release_sock(sk); 1344 1345 /* Release refcnt obtained when we fetched this socket out of the 1346 * bound or connected list. 1347 */ 1348 sock_put(sk); 1349 return; 1350 1351 free_pkt: 1352 kfree_skb(skb); 1353 } 1354 EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt); 1355 1356 /* Remove skbs found in a queue that have a vsk that matches. 1357 * 1358 * Each skb is freed. 1359 * 1360 * Returns the count of skbs that were reply packets. 1361 */ 1362 int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue) 1363 { 1364 struct sk_buff_head freeme; 1365 struct sk_buff *skb, *tmp; 1366 int cnt = 0; 1367 1368 skb_queue_head_init(&freeme); 1369 1370 spin_lock_bh(&queue->lock); 1371 skb_queue_walk_safe(queue, skb, tmp) { 1372 if (vsock_sk(skb->sk) != vsk) 1373 continue; 1374 1375 __skb_unlink(skb, queue); 1376 __skb_queue_tail(&freeme, skb); 1377 1378 if (virtio_vsock_skb_reply(skb)) 1379 cnt++; 1380 } 1381 spin_unlock_bh(&queue->lock); 1382 1383 __skb_queue_purge(&freeme); 1384 1385 return cnt; 1386 } 1387 EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs); 1388 1389 MODULE_LICENSE("GPL v2"); 1390 MODULE_AUTHOR("Asias He"); 1391 MODULE_DESCRIPTION("common code for virtio vsock"); 1392