1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * common code for virtio vsock 4 * 5 * Copyright (C) 2013-2015 Red Hat, Inc. 6 * Author: Asias He <asias@redhat.com> 7 * Stefan Hajnoczi <stefanha@redhat.com> 8 */ 9 #include <linux/spinlock.h> 10 #include <linux/module.h> 11 #include <linux/sched/signal.h> 12 #include <linux/ctype.h> 13 #include <linux/list.h> 14 #include <linux/virtio_vsock.h> 15 #include <uapi/linux/vsockmon.h> 16 17 #include <net/sock.h> 18 #include <net/af_vsock.h> 19 20 #define CREATE_TRACE_POINTS 21 #include <trace/events/vsock_virtio_transport_common.h> 22 23 /* How long to wait for graceful shutdown of a connection */ 24 #define VSOCK_CLOSE_TIMEOUT (8 * HZ) 25 26 /* Threshold for detecting small packets to copy */ 27 #define GOOD_COPY_LEN 128 28 29 static const struct virtio_transport * 30 virtio_transport_get_ops(struct vsock_sock *vsk) 31 { 32 const struct vsock_transport *t = vsock_core_get_transport(vsk); 33 34 if (WARN_ON(!t)) 35 return NULL; 36 37 return container_of(t, struct virtio_transport, transport); 38 } 39 40 static struct virtio_vsock_pkt * 41 virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, 42 size_t len, 43 u32 src_cid, 44 u32 src_port, 45 u32 dst_cid, 46 u32 dst_port) 47 { 48 struct virtio_vsock_pkt *pkt; 49 int err; 50 51 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); 52 if (!pkt) 53 return NULL; 54 55 pkt->hdr.type = cpu_to_le16(info->type); 56 pkt->hdr.op = cpu_to_le16(info->op); 57 pkt->hdr.src_cid = cpu_to_le64(src_cid); 58 pkt->hdr.dst_cid = cpu_to_le64(dst_cid); 59 pkt->hdr.src_port = cpu_to_le32(src_port); 60 pkt->hdr.dst_port = cpu_to_le32(dst_port); 61 pkt->hdr.flags = cpu_to_le32(info->flags); 62 pkt->len = len; 63 pkt->hdr.len = cpu_to_le32(len); 64 pkt->reply = info->reply; 65 pkt->vsk = info->vsk; 66 67 if (info->msg && len > 0) { 68 pkt->buf = kmalloc(len, GFP_KERNEL); 69 if (!pkt->buf) 70 goto out_pkt; 71 72 pkt->buf_len = len; 73 74 err = memcpy_from_msg(pkt->buf, info->msg, len); 75 if (err) 76 goto out; 77 78 if (msg_data_left(info->msg) == 0 && 79 info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) 80 pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); 81 } 82 83 trace_virtio_transport_alloc_pkt(src_cid, src_port, 84 dst_cid, dst_port, 85 len, 86 info->type, 87 info->op, 88 info->flags); 89 90 return pkt; 91 92 out: 93 kfree(pkt->buf); 94 out_pkt: 95 kfree(pkt); 96 return NULL; 97 } 98 99 /* Packet capture */ 100 static struct sk_buff *virtio_transport_build_skb(void *opaque) 101 { 102 struct virtio_vsock_pkt *pkt = opaque; 103 struct af_vsockmon_hdr *hdr; 104 struct sk_buff *skb; 105 size_t payload_len; 106 void *payload_buf; 107 108 /* A packet could be split to fit the RX buffer, so we can retrieve 109 * the payload length from the header and the buffer pointer taking 110 * care of the offset in the original packet. 111 */ 112 payload_len = le32_to_cpu(pkt->hdr.len); 113 payload_buf = pkt->buf + pkt->off; 114 115 skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len, 116 GFP_ATOMIC); 117 if (!skb) 118 return NULL; 119 120 hdr = skb_put(skb, sizeof(*hdr)); 121 122 /* pkt->hdr is little-endian so no need to byteswap here */ 123 hdr->src_cid = pkt->hdr.src_cid; 124 hdr->src_port = pkt->hdr.src_port; 125 hdr->dst_cid = pkt->hdr.dst_cid; 126 hdr->dst_port = pkt->hdr.dst_port; 127 128 hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO); 129 hdr->len = cpu_to_le16(sizeof(pkt->hdr)); 130 memset(hdr->reserved, 0, sizeof(hdr->reserved)); 131 132 switch (le16_to_cpu(pkt->hdr.op)) { 133 case VIRTIO_VSOCK_OP_REQUEST: 134 case VIRTIO_VSOCK_OP_RESPONSE: 135 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT); 136 break; 137 case VIRTIO_VSOCK_OP_RST: 138 case VIRTIO_VSOCK_OP_SHUTDOWN: 139 hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT); 140 break; 141 case VIRTIO_VSOCK_OP_RW: 142 hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD); 143 break; 144 case VIRTIO_VSOCK_OP_CREDIT_UPDATE: 145 case VIRTIO_VSOCK_OP_CREDIT_REQUEST: 146 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL); 147 break; 148 default: 149 hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN); 150 break; 151 } 152 153 skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr)); 154 155 if (payload_len) { 156 skb_put_data(skb, payload_buf, payload_len); 157 } 158 159 return skb; 160 } 161 162 void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt) 163 { 164 if (pkt->tap_delivered) 165 return; 166 167 vsock_deliver_tap(virtio_transport_build_skb, pkt); 168 pkt->tap_delivered = true; 169 } 170 EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt); 171 172 static u16 virtio_transport_get_type(struct sock *sk) 173 { 174 if (sk->sk_type == SOCK_STREAM) 175 return VIRTIO_VSOCK_TYPE_STREAM; 176 else 177 return VIRTIO_VSOCK_TYPE_SEQPACKET; 178 } 179 180 /* This function can only be used on connecting/connected sockets, 181 * since a socket assigned to a transport is required. 182 * 183 * Do not use on listener sockets! 184 */ 185 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, 186 struct virtio_vsock_pkt_info *info) 187 { 188 u32 src_cid, src_port, dst_cid, dst_port; 189 const struct virtio_transport *t_ops; 190 struct virtio_vsock_sock *vvs; 191 struct virtio_vsock_pkt *pkt; 192 u32 pkt_len = info->pkt_len; 193 194 info->type = virtio_transport_get_type(sk_vsock(vsk)); 195 196 t_ops = virtio_transport_get_ops(vsk); 197 if (unlikely(!t_ops)) 198 return -EFAULT; 199 200 src_cid = t_ops->transport.get_local_cid(); 201 src_port = vsk->local_addr.svm_port; 202 if (!info->remote_cid) { 203 dst_cid = vsk->remote_addr.svm_cid; 204 dst_port = vsk->remote_addr.svm_port; 205 } else { 206 dst_cid = info->remote_cid; 207 dst_port = info->remote_port; 208 } 209 210 vvs = vsk->trans; 211 212 /* we can send less than pkt_len bytes */ 213 if (pkt_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) 214 pkt_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; 215 216 /* virtio_transport_get_credit might return less than pkt_len credit */ 217 pkt_len = virtio_transport_get_credit(vvs, pkt_len); 218 219 /* Do not send zero length OP_RW pkt */ 220 if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW) 221 return pkt_len; 222 223 pkt = virtio_transport_alloc_pkt(info, pkt_len, 224 src_cid, src_port, 225 dst_cid, dst_port); 226 if (!pkt) { 227 virtio_transport_put_credit(vvs, pkt_len); 228 return -ENOMEM; 229 } 230 231 virtio_transport_inc_tx_pkt(vvs, pkt); 232 233 return t_ops->send_pkt(pkt); 234 } 235 236 static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs, 237 struct virtio_vsock_pkt *pkt) 238 { 239 if (vvs->rx_bytes + pkt->len > vvs->buf_alloc) 240 return false; 241 242 vvs->rx_bytes += pkt->len; 243 return true; 244 } 245 246 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, 247 struct virtio_vsock_pkt *pkt) 248 { 249 vvs->rx_bytes -= pkt->len; 250 vvs->fwd_cnt += pkt->len; 251 } 252 253 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt) 254 { 255 spin_lock_bh(&vvs->rx_lock); 256 vvs->last_fwd_cnt = vvs->fwd_cnt; 257 pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt); 258 pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc); 259 spin_unlock_bh(&vvs->rx_lock); 260 } 261 EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt); 262 263 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit) 264 { 265 u32 ret; 266 267 spin_lock_bh(&vvs->tx_lock); 268 ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); 269 if (ret > credit) 270 ret = credit; 271 vvs->tx_cnt += ret; 272 spin_unlock_bh(&vvs->tx_lock); 273 274 return ret; 275 } 276 EXPORT_SYMBOL_GPL(virtio_transport_get_credit); 277 278 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit) 279 { 280 spin_lock_bh(&vvs->tx_lock); 281 vvs->tx_cnt -= credit; 282 spin_unlock_bh(&vvs->tx_lock); 283 } 284 EXPORT_SYMBOL_GPL(virtio_transport_put_credit); 285 286 static int virtio_transport_send_credit_update(struct vsock_sock *vsk) 287 { 288 struct virtio_vsock_pkt_info info = { 289 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, 290 .vsk = vsk, 291 }; 292 293 return virtio_transport_send_pkt_info(vsk, &info); 294 } 295 296 static ssize_t 297 virtio_transport_stream_do_peek(struct vsock_sock *vsk, 298 struct msghdr *msg, 299 size_t len) 300 { 301 struct virtio_vsock_sock *vvs = vsk->trans; 302 struct virtio_vsock_pkt *pkt; 303 size_t bytes, total = 0, off; 304 int err = -EFAULT; 305 306 spin_lock_bh(&vvs->rx_lock); 307 308 list_for_each_entry(pkt, &vvs->rx_queue, list) { 309 off = pkt->off; 310 311 if (total == len) 312 break; 313 314 while (total < len && off < pkt->len) { 315 bytes = len - total; 316 if (bytes > pkt->len - off) 317 bytes = pkt->len - off; 318 319 /* sk_lock is held by caller so no one else can dequeue. 320 * Unlock rx_lock since memcpy_to_msg() may sleep. 321 */ 322 spin_unlock_bh(&vvs->rx_lock); 323 324 err = memcpy_to_msg(msg, pkt->buf + off, bytes); 325 if (err) 326 goto out; 327 328 spin_lock_bh(&vvs->rx_lock); 329 330 total += bytes; 331 off += bytes; 332 } 333 } 334 335 spin_unlock_bh(&vvs->rx_lock); 336 337 return total; 338 339 out: 340 if (total) 341 err = total; 342 return err; 343 } 344 345 static ssize_t 346 virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, 347 struct msghdr *msg, 348 size_t len) 349 { 350 struct virtio_vsock_sock *vvs = vsk->trans; 351 struct virtio_vsock_pkt *pkt; 352 size_t bytes, total = 0; 353 u32 free_space; 354 int err = -EFAULT; 355 356 spin_lock_bh(&vvs->rx_lock); 357 while (total < len && !list_empty(&vvs->rx_queue)) { 358 pkt = list_first_entry(&vvs->rx_queue, 359 struct virtio_vsock_pkt, list); 360 361 bytes = len - total; 362 if (bytes > pkt->len - pkt->off) 363 bytes = pkt->len - pkt->off; 364 365 /* sk_lock is held by caller so no one else can dequeue. 366 * Unlock rx_lock since memcpy_to_msg() may sleep. 367 */ 368 spin_unlock_bh(&vvs->rx_lock); 369 370 err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes); 371 if (err) 372 goto out; 373 374 spin_lock_bh(&vvs->rx_lock); 375 376 total += bytes; 377 pkt->off += bytes; 378 if (pkt->off == pkt->len) { 379 virtio_transport_dec_rx_pkt(vvs, pkt); 380 list_del(&pkt->list); 381 virtio_transport_free_pkt(pkt); 382 } 383 } 384 385 free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt); 386 387 spin_unlock_bh(&vvs->rx_lock); 388 389 /* To reduce the number of credit update messages, 390 * don't update credits as long as lots of space is available. 391 * Note: the limit chosen here is arbitrary. Setting the limit 392 * too high causes extra messages. Too low causes transmitter 393 * stalls. As stalls are in theory more expensive than extra 394 * messages, we set the limit to a high value. TODO: experiment 395 * with different values. 396 */ 397 if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) 398 virtio_transport_send_credit_update(vsk); 399 400 return total; 401 402 out: 403 if (total) 404 err = total; 405 return err; 406 } 407 408 static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, 409 struct msghdr *msg, 410 int flags) 411 { 412 struct virtio_vsock_sock *vvs = vsk->trans; 413 struct virtio_vsock_pkt *pkt; 414 int dequeued_len = 0; 415 size_t user_buf_len = msg_data_left(msg); 416 bool copy_failed = false; 417 bool msg_ready = false; 418 419 spin_lock_bh(&vvs->rx_lock); 420 421 if (vvs->msg_count == 0) { 422 spin_unlock_bh(&vvs->rx_lock); 423 return 0; 424 } 425 426 while (!msg_ready) { 427 pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list); 428 429 if (!copy_failed) { 430 size_t pkt_len; 431 size_t bytes_to_copy; 432 433 pkt_len = (size_t)le32_to_cpu(pkt->hdr.len); 434 bytes_to_copy = min(user_buf_len, pkt_len); 435 436 if (bytes_to_copy) { 437 int err; 438 439 /* sk_lock is held by caller so no one else can dequeue. 440 * Unlock rx_lock since memcpy_to_msg() may sleep. 441 */ 442 spin_unlock_bh(&vvs->rx_lock); 443 444 err = memcpy_to_msg(msg, pkt->buf, bytes_to_copy); 445 if (err) { 446 /* Copy of message failed, set flag to skip 447 * copy path for rest of fragments. Rest of 448 * fragments will be freed without copy. 449 */ 450 copy_failed = true; 451 dequeued_len = err; 452 } else { 453 user_buf_len -= bytes_to_copy; 454 } 455 456 spin_lock_bh(&vvs->rx_lock); 457 } 458 459 if (dequeued_len >= 0) 460 dequeued_len += pkt_len; 461 } 462 463 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) { 464 msg_ready = true; 465 vvs->msg_count--; 466 } 467 468 virtio_transport_dec_rx_pkt(vvs, pkt); 469 list_del(&pkt->list); 470 virtio_transport_free_pkt(pkt); 471 } 472 473 spin_unlock_bh(&vvs->rx_lock); 474 475 virtio_transport_send_credit_update(vsk); 476 477 return dequeued_len; 478 } 479 480 ssize_t 481 virtio_transport_stream_dequeue(struct vsock_sock *vsk, 482 struct msghdr *msg, 483 size_t len, int flags) 484 { 485 if (flags & MSG_PEEK) 486 return virtio_transport_stream_do_peek(vsk, msg, len); 487 else 488 return virtio_transport_stream_do_dequeue(vsk, msg, len); 489 } 490 EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue); 491 492 ssize_t 493 virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, 494 struct msghdr *msg, 495 int flags) 496 { 497 if (flags & MSG_PEEK) 498 return -EOPNOTSUPP; 499 500 return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags); 501 } 502 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue); 503 504 int 505 virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, 506 struct msghdr *msg, 507 size_t len) 508 { 509 struct virtio_vsock_sock *vvs = vsk->trans; 510 511 spin_lock_bh(&vvs->tx_lock); 512 513 if (len > vvs->peer_buf_alloc) { 514 spin_unlock_bh(&vvs->tx_lock); 515 return -EMSGSIZE; 516 } 517 518 spin_unlock_bh(&vvs->tx_lock); 519 520 return virtio_transport_stream_enqueue(vsk, msg, len); 521 } 522 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue); 523 524 int 525 virtio_transport_dgram_dequeue(struct vsock_sock *vsk, 526 struct msghdr *msg, 527 size_t len, int flags) 528 { 529 return -EOPNOTSUPP; 530 } 531 EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue); 532 533 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk) 534 { 535 struct virtio_vsock_sock *vvs = vsk->trans; 536 s64 bytes; 537 538 spin_lock_bh(&vvs->rx_lock); 539 bytes = vvs->rx_bytes; 540 spin_unlock_bh(&vvs->rx_lock); 541 542 return bytes; 543 } 544 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data); 545 546 u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk) 547 { 548 struct virtio_vsock_sock *vvs = vsk->trans; 549 u32 msg_count; 550 551 spin_lock_bh(&vvs->rx_lock); 552 msg_count = vvs->msg_count; 553 spin_unlock_bh(&vvs->rx_lock); 554 555 return msg_count; 556 } 557 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data); 558 559 static s64 virtio_transport_has_space(struct vsock_sock *vsk) 560 { 561 struct virtio_vsock_sock *vvs = vsk->trans; 562 s64 bytes; 563 564 bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); 565 if (bytes < 0) 566 bytes = 0; 567 568 return bytes; 569 } 570 571 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk) 572 { 573 struct virtio_vsock_sock *vvs = vsk->trans; 574 s64 bytes; 575 576 spin_lock_bh(&vvs->tx_lock); 577 bytes = virtio_transport_has_space(vsk); 578 spin_unlock_bh(&vvs->tx_lock); 579 580 return bytes; 581 } 582 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space); 583 584 int virtio_transport_do_socket_init(struct vsock_sock *vsk, 585 struct vsock_sock *psk) 586 { 587 struct virtio_vsock_sock *vvs; 588 589 vvs = kzalloc(sizeof(*vvs), GFP_KERNEL); 590 if (!vvs) 591 return -ENOMEM; 592 593 vsk->trans = vvs; 594 vvs->vsk = vsk; 595 if (psk && psk->trans) { 596 struct virtio_vsock_sock *ptrans = psk->trans; 597 598 vvs->peer_buf_alloc = ptrans->peer_buf_alloc; 599 } 600 601 if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE) 602 vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE; 603 604 vvs->buf_alloc = vsk->buffer_size; 605 606 spin_lock_init(&vvs->rx_lock); 607 spin_lock_init(&vvs->tx_lock); 608 INIT_LIST_HEAD(&vvs->rx_queue); 609 610 return 0; 611 } 612 EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init); 613 614 /* sk_lock held by the caller */ 615 void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val) 616 { 617 struct virtio_vsock_sock *vvs = vsk->trans; 618 619 if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE) 620 *val = VIRTIO_VSOCK_MAX_BUF_SIZE; 621 622 vvs->buf_alloc = *val; 623 624 virtio_transport_send_credit_update(vsk); 625 } 626 EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size); 627 628 int 629 virtio_transport_notify_poll_in(struct vsock_sock *vsk, 630 size_t target, 631 bool *data_ready_now) 632 { 633 if (vsock_stream_has_data(vsk)) 634 *data_ready_now = true; 635 else 636 *data_ready_now = false; 637 638 return 0; 639 } 640 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in); 641 642 int 643 virtio_transport_notify_poll_out(struct vsock_sock *vsk, 644 size_t target, 645 bool *space_avail_now) 646 { 647 s64 free_space; 648 649 free_space = vsock_stream_has_space(vsk); 650 if (free_space > 0) 651 *space_avail_now = true; 652 else if (free_space == 0) 653 *space_avail_now = false; 654 655 return 0; 656 } 657 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out); 658 659 int virtio_transport_notify_recv_init(struct vsock_sock *vsk, 660 size_t target, struct vsock_transport_recv_notify_data *data) 661 { 662 return 0; 663 } 664 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init); 665 666 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk, 667 size_t target, struct vsock_transport_recv_notify_data *data) 668 { 669 return 0; 670 } 671 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block); 672 673 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk, 674 size_t target, struct vsock_transport_recv_notify_data *data) 675 { 676 return 0; 677 } 678 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue); 679 680 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk, 681 size_t target, ssize_t copied, bool data_read, 682 struct vsock_transport_recv_notify_data *data) 683 { 684 return 0; 685 } 686 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue); 687 688 int virtio_transport_notify_send_init(struct vsock_sock *vsk, 689 struct vsock_transport_send_notify_data *data) 690 { 691 return 0; 692 } 693 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init); 694 695 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk, 696 struct vsock_transport_send_notify_data *data) 697 { 698 return 0; 699 } 700 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block); 701 702 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk, 703 struct vsock_transport_send_notify_data *data) 704 { 705 return 0; 706 } 707 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue); 708 709 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk, 710 ssize_t written, struct vsock_transport_send_notify_data *data) 711 { 712 return 0; 713 } 714 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue); 715 716 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk) 717 { 718 return vsk->buffer_size; 719 } 720 EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat); 721 722 bool virtio_transport_stream_is_active(struct vsock_sock *vsk) 723 { 724 return true; 725 } 726 EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active); 727 728 bool virtio_transport_stream_allow(u32 cid, u32 port) 729 { 730 return true; 731 } 732 EXPORT_SYMBOL_GPL(virtio_transport_stream_allow); 733 734 int virtio_transport_dgram_bind(struct vsock_sock *vsk, 735 struct sockaddr_vm *addr) 736 { 737 return -EOPNOTSUPP; 738 } 739 EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind); 740 741 bool virtio_transport_dgram_allow(u32 cid, u32 port) 742 { 743 return false; 744 } 745 EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow); 746 747 int virtio_transport_connect(struct vsock_sock *vsk) 748 { 749 struct virtio_vsock_pkt_info info = { 750 .op = VIRTIO_VSOCK_OP_REQUEST, 751 .vsk = vsk, 752 }; 753 754 return virtio_transport_send_pkt_info(vsk, &info); 755 } 756 EXPORT_SYMBOL_GPL(virtio_transport_connect); 757 758 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode) 759 { 760 struct virtio_vsock_pkt_info info = { 761 .op = VIRTIO_VSOCK_OP_SHUTDOWN, 762 .flags = (mode & RCV_SHUTDOWN ? 763 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | 764 (mode & SEND_SHUTDOWN ? 765 VIRTIO_VSOCK_SHUTDOWN_SEND : 0), 766 .vsk = vsk, 767 }; 768 769 return virtio_transport_send_pkt_info(vsk, &info); 770 } 771 EXPORT_SYMBOL_GPL(virtio_transport_shutdown); 772 773 int 774 virtio_transport_dgram_enqueue(struct vsock_sock *vsk, 775 struct sockaddr_vm *remote_addr, 776 struct msghdr *msg, 777 size_t dgram_len) 778 { 779 return -EOPNOTSUPP; 780 } 781 EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue); 782 783 ssize_t 784 virtio_transport_stream_enqueue(struct vsock_sock *vsk, 785 struct msghdr *msg, 786 size_t len) 787 { 788 struct virtio_vsock_pkt_info info = { 789 .op = VIRTIO_VSOCK_OP_RW, 790 .msg = msg, 791 .pkt_len = len, 792 .vsk = vsk, 793 }; 794 795 return virtio_transport_send_pkt_info(vsk, &info); 796 } 797 EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue); 798 799 void virtio_transport_destruct(struct vsock_sock *vsk) 800 { 801 struct virtio_vsock_sock *vvs = vsk->trans; 802 803 kfree(vvs); 804 } 805 EXPORT_SYMBOL_GPL(virtio_transport_destruct); 806 807 static int virtio_transport_reset(struct vsock_sock *vsk, 808 struct virtio_vsock_pkt *pkt) 809 { 810 struct virtio_vsock_pkt_info info = { 811 .op = VIRTIO_VSOCK_OP_RST, 812 .reply = !!pkt, 813 .vsk = vsk, 814 }; 815 816 /* Send RST only if the original pkt is not a RST pkt */ 817 if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST) 818 return 0; 819 820 return virtio_transport_send_pkt_info(vsk, &info); 821 } 822 823 /* Normally packets are associated with a socket. There may be no socket if an 824 * attempt was made to connect to a socket that does not exist. 825 */ 826 static int virtio_transport_reset_no_sock(const struct virtio_transport *t, 827 struct virtio_vsock_pkt *pkt) 828 { 829 struct virtio_vsock_pkt *reply; 830 struct virtio_vsock_pkt_info info = { 831 .op = VIRTIO_VSOCK_OP_RST, 832 .type = le16_to_cpu(pkt->hdr.type), 833 .reply = true, 834 }; 835 836 /* Send RST only if the original pkt is not a RST pkt */ 837 if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST) 838 return 0; 839 840 reply = virtio_transport_alloc_pkt(&info, 0, 841 le64_to_cpu(pkt->hdr.dst_cid), 842 le32_to_cpu(pkt->hdr.dst_port), 843 le64_to_cpu(pkt->hdr.src_cid), 844 le32_to_cpu(pkt->hdr.src_port)); 845 if (!reply) 846 return -ENOMEM; 847 848 if (!t) { 849 virtio_transport_free_pkt(reply); 850 return -ENOTCONN; 851 } 852 853 return t->send_pkt(reply); 854 } 855 856 /* This function should be called with sk_lock held and SOCK_DONE set */ 857 static void virtio_transport_remove_sock(struct vsock_sock *vsk) 858 { 859 struct virtio_vsock_sock *vvs = vsk->trans; 860 struct virtio_vsock_pkt *pkt, *tmp; 861 862 /* We don't need to take rx_lock, as the socket is closing and we are 863 * removing it. 864 */ 865 list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) { 866 list_del(&pkt->list); 867 virtio_transport_free_pkt(pkt); 868 } 869 870 vsock_remove_sock(vsk); 871 } 872 873 static void virtio_transport_wait_close(struct sock *sk, long timeout) 874 { 875 if (timeout) { 876 DEFINE_WAIT_FUNC(wait, woken_wake_function); 877 878 add_wait_queue(sk_sleep(sk), &wait); 879 880 do { 881 if (sk_wait_event(sk, &timeout, 882 sock_flag(sk, SOCK_DONE), &wait)) 883 break; 884 } while (!signal_pending(current) && timeout); 885 886 remove_wait_queue(sk_sleep(sk), &wait); 887 } 888 } 889 890 static void virtio_transport_do_close(struct vsock_sock *vsk, 891 bool cancel_timeout) 892 { 893 struct sock *sk = sk_vsock(vsk); 894 895 sock_set_flag(sk, SOCK_DONE); 896 vsk->peer_shutdown = SHUTDOWN_MASK; 897 if (vsock_stream_has_data(vsk) <= 0) 898 sk->sk_state = TCP_CLOSING; 899 sk->sk_state_change(sk); 900 901 if (vsk->close_work_scheduled && 902 (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) { 903 vsk->close_work_scheduled = false; 904 905 virtio_transport_remove_sock(vsk); 906 907 /* Release refcnt obtained when we scheduled the timeout */ 908 sock_put(sk); 909 } 910 } 911 912 static void virtio_transport_close_timeout(struct work_struct *work) 913 { 914 struct vsock_sock *vsk = 915 container_of(work, struct vsock_sock, close_work.work); 916 struct sock *sk = sk_vsock(vsk); 917 918 sock_hold(sk); 919 lock_sock(sk); 920 921 if (!sock_flag(sk, SOCK_DONE)) { 922 (void)virtio_transport_reset(vsk, NULL); 923 924 virtio_transport_do_close(vsk, false); 925 } 926 927 vsk->close_work_scheduled = false; 928 929 release_sock(sk); 930 sock_put(sk); 931 } 932 933 /* User context, vsk->sk is locked */ 934 static bool virtio_transport_close(struct vsock_sock *vsk) 935 { 936 struct sock *sk = &vsk->sk; 937 938 if (!(sk->sk_state == TCP_ESTABLISHED || 939 sk->sk_state == TCP_CLOSING)) 940 return true; 941 942 /* Already received SHUTDOWN from peer, reply with RST */ 943 if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) { 944 (void)virtio_transport_reset(vsk, NULL); 945 return true; 946 } 947 948 if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK) 949 (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK); 950 951 if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING)) 952 virtio_transport_wait_close(sk, sk->sk_lingertime); 953 954 if (sock_flag(sk, SOCK_DONE)) { 955 return true; 956 } 957 958 sock_hold(sk); 959 INIT_DELAYED_WORK(&vsk->close_work, 960 virtio_transport_close_timeout); 961 vsk->close_work_scheduled = true; 962 schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT); 963 return false; 964 } 965 966 void virtio_transport_release(struct vsock_sock *vsk) 967 { 968 struct sock *sk = &vsk->sk; 969 bool remove_sock = true; 970 971 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) 972 remove_sock = virtio_transport_close(vsk); 973 974 if (remove_sock) { 975 sock_set_flag(sk, SOCK_DONE); 976 virtio_transport_remove_sock(vsk); 977 } 978 } 979 EXPORT_SYMBOL_GPL(virtio_transport_release); 980 981 static int 982 virtio_transport_recv_connecting(struct sock *sk, 983 struct virtio_vsock_pkt *pkt) 984 { 985 struct vsock_sock *vsk = vsock_sk(sk); 986 int err; 987 int skerr; 988 989 switch (le16_to_cpu(pkt->hdr.op)) { 990 case VIRTIO_VSOCK_OP_RESPONSE: 991 sk->sk_state = TCP_ESTABLISHED; 992 sk->sk_socket->state = SS_CONNECTED; 993 vsock_insert_connected(vsk); 994 sk->sk_state_change(sk); 995 break; 996 case VIRTIO_VSOCK_OP_INVALID: 997 break; 998 case VIRTIO_VSOCK_OP_RST: 999 skerr = ECONNRESET; 1000 err = 0; 1001 goto destroy; 1002 default: 1003 skerr = EPROTO; 1004 err = -EINVAL; 1005 goto destroy; 1006 } 1007 return 0; 1008 1009 destroy: 1010 virtio_transport_reset(vsk, pkt); 1011 sk->sk_state = TCP_CLOSE; 1012 sk->sk_err = skerr; 1013 sk->sk_error_report(sk); 1014 return err; 1015 } 1016 1017 static void 1018 virtio_transport_recv_enqueue(struct vsock_sock *vsk, 1019 struct virtio_vsock_pkt *pkt) 1020 { 1021 struct virtio_vsock_sock *vvs = vsk->trans; 1022 bool can_enqueue, free_pkt = false; 1023 1024 pkt->len = le32_to_cpu(pkt->hdr.len); 1025 pkt->off = 0; 1026 1027 spin_lock_bh(&vvs->rx_lock); 1028 1029 can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt); 1030 if (!can_enqueue) { 1031 free_pkt = true; 1032 goto out; 1033 } 1034 1035 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) 1036 vvs->msg_count++; 1037 1038 /* Try to copy small packets into the buffer of last packet queued, 1039 * to avoid wasting memory queueing the entire buffer with a small 1040 * payload. 1041 */ 1042 if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) { 1043 struct virtio_vsock_pkt *last_pkt; 1044 1045 last_pkt = list_last_entry(&vvs->rx_queue, 1046 struct virtio_vsock_pkt, list); 1047 1048 /* If there is space in the last packet queued, we copy the 1049 * new packet in its buffer. We avoid this if the last packet 1050 * queued has VIRTIO_VSOCK_SEQ_EOR set, because this is 1051 * delimiter of SEQPACKET record, so 'pkt' is the first packet 1052 * of a new record. 1053 */ 1054 if ((pkt->len <= last_pkt->buf_len - last_pkt->len) && 1055 !(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)) { 1056 memcpy(last_pkt->buf + last_pkt->len, pkt->buf, 1057 pkt->len); 1058 last_pkt->len += pkt->len; 1059 free_pkt = true; 1060 last_pkt->hdr.flags |= pkt->hdr.flags; 1061 goto out; 1062 } 1063 } 1064 1065 list_add_tail(&pkt->list, &vvs->rx_queue); 1066 1067 out: 1068 spin_unlock_bh(&vvs->rx_lock); 1069 if (free_pkt) 1070 virtio_transport_free_pkt(pkt); 1071 } 1072 1073 static int 1074 virtio_transport_recv_connected(struct sock *sk, 1075 struct virtio_vsock_pkt *pkt) 1076 { 1077 struct vsock_sock *vsk = vsock_sk(sk); 1078 int err = 0; 1079 1080 switch (le16_to_cpu(pkt->hdr.op)) { 1081 case VIRTIO_VSOCK_OP_RW: 1082 virtio_transport_recv_enqueue(vsk, pkt); 1083 sk->sk_data_ready(sk); 1084 return err; 1085 case VIRTIO_VSOCK_OP_CREDIT_UPDATE: 1086 sk->sk_write_space(sk); 1087 break; 1088 case VIRTIO_VSOCK_OP_SHUTDOWN: 1089 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV) 1090 vsk->peer_shutdown |= RCV_SHUTDOWN; 1091 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) 1092 vsk->peer_shutdown |= SEND_SHUTDOWN; 1093 if (vsk->peer_shutdown == SHUTDOWN_MASK && 1094 vsock_stream_has_data(vsk) <= 0 && 1095 !sock_flag(sk, SOCK_DONE)) { 1096 (void)virtio_transport_reset(vsk, NULL); 1097 1098 virtio_transport_do_close(vsk, true); 1099 } 1100 if (le32_to_cpu(pkt->hdr.flags)) 1101 sk->sk_state_change(sk); 1102 break; 1103 case VIRTIO_VSOCK_OP_RST: 1104 virtio_transport_do_close(vsk, true); 1105 break; 1106 default: 1107 err = -EINVAL; 1108 break; 1109 } 1110 1111 virtio_transport_free_pkt(pkt); 1112 return err; 1113 } 1114 1115 static void 1116 virtio_transport_recv_disconnecting(struct sock *sk, 1117 struct virtio_vsock_pkt *pkt) 1118 { 1119 struct vsock_sock *vsk = vsock_sk(sk); 1120 1121 if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST) 1122 virtio_transport_do_close(vsk, true); 1123 } 1124 1125 static int 1126 virtio_transport_send_response(struct vsock_sock *vsk, 1127 struct virtio_vsock_pkt *pkt) 1128 { 1129 struct virtio_vsock_pkt_info info = { 1130 .op = VIRTIO_VSOCK_OP_RESPONSE, 1131 .remote_cid = le64_to_cpu(pkt->hdr.src_cid), 1132 .remote_port = le32_to_cpu(pkt->hdr.src_port), 1133 .reply = true, 1134 .vsk = vsk, 1135 }; 1136 1137 return virtio_transport_send_pkt_info(vsk, &info); 1138 } 1139 1140 static bool virtio_transport_space_update(struct sock *sk, 1141 struct virtio_vsock_pkt *pkt) 1142 { 1143 struct vsock_sock *vsk = vsock_sk(sk); 1144 struct virtio_vsock_sock *vvs = vsk->trans; 1145 bool space_available; 1146 1147 /* Listener sockets are not associated with any transport, so we are 1148 * not able to take the state to see if there is space available in the 1149 * remote peer, but since they are only used to receive requests, we 1150 * can assume that there is always space available in the other peer. 1151 */ 1152 if (!vvs) 1153 return true; 1154 1155 /* buf_alloc and fwd_cnt is always included in the hdr */ 1156 spin_lock_bh(&vvs->tx_lock); 1157 vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc); 1158 vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt); 1159 space_available = virtio_transport_has_space(vsk); 1160 spin_unlock_bh(&vvs->tx_lock); 1161 return space_available; 1162 } 1163 1164 /* Handle server socket */ 1165 static int 1166 virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt, 1167 struct virtio_transport *t) 1168 { 1169 struct vsock_sock *vsk = vsock_sk(sk); 1170 struct vsock_sock *vchild; 1171 struct sock *child; 1172 int ret; 1173 1174 if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) { 1175 virtio_transport_reset_no_sock(t, pkt); 1176 return -EINVAL; 1177 } 1178 1179 if (sk_acceptq_is_full(sk)) { 1180 virtio_transport_reset_no_sock(t, pkt); 1181 return -ENOMEM; 1182 } 1183 1184 child = vsock_create_connected(sk); 1185 if (!child) { 1186 virtio_transport_reset_no_sock(t, pkt); 1187 return -ENOMEM; 1188 } 1189 1190 sk_acceptq_added(sk); 1191 1192 lock_sock_nested(child, SINGLE_DEPTH_NESTING); 1193 1194 child->sk_state = TCP_ESTABLISHED; 1195 1196 vchild = vsock_sk(child); 1197 vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid), 1198 le32_to_cpu(pkt->hdr.dst_port)); 1199 vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid), 1200 le32_to_cpu(pkt->hdr.src_port)); 1201 1202 ret = vsock_assign_transport(vchild, vsk); 1203 /* Transport assigned (looking at remote_addr) must be the same 1204 * where we received the request. 1205 */ 1206 if (ret || vchild->transport != &t->transport) { 1207 release_sock(child); 1208 virtio_transport_reset_no_sock(t, pkt); 1209 sock_put(child); 1210 return ret; 1211 } 1212 1213 if (virtio_transport_space_update(child, pkt)) 1214 child->sk_write_space(child); 1215 1216 vsock_insert_connected(vchild); 1217 vsock_enqueue_accept(sk, child); 1218 virtio_transport_send_response(vchild, pkt); 1219 1220 release_sock(child); 1221 1222 sk->sk_data_ready(sk); 1223 return 0; 1224 } 1225 1226 static bool virtio_transport_valid_type(u16 type) 1227 { 1228 return (type == VIRTIO_VSOCK_TYPE_STREAM) || 1229 (type == VIRTIO_VSOCK_TYPE_SEQPACKET); 1230 } 1231 1232 /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex 1233 * lock. 1234 */ 1235 void virtio_transport_recv_pkt(struct virtio_transport *t, 1236 struct virtio_vsock_pkt *pkt) 1237 { 1238 struct sockaddr_vm src, dst; 1239 struct vsock_sock *vsk; 1240 struct sock *sk; 1241 bool space_available; 1242 1243 vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid), 1244 le32_to_cpu(pkt->hdr.src_port)); 1245 vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid), 1246 le32_to_cpu(pkt->hdr.dst_port)); 1247 1248 trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port, 1249 dst.svm_cid, dst.svm_port, 1250 le32_to_cpu(pkt->hdr.len), 1251 le16_to_cpu(pkt->hdr.type), 1252 le16_to_cpu(pkt->hdr.op), 1253 le32_to_cpu(pkt->hdr.flags), 1254 le32_to_cpu(pkt->hdr.buf_alloc), 1255 le32_to_cpu(pkt->hdr.fwd_cnt)); 1256 1257 if (!virtio_transport_valid_type(le16_to_cpu(pkt->hdr.type))) { 1258 (void)virtio_transport_reset_no_sock(t, pkt); 1259 goto free_pkt; 1260 } 1261 1262 /* The socket must be in connected or bound table 1263 * otherwise send reset back 1264 */ 1265 sk = vsock_find_connected_socket(&src, &dst); 1266 if (!sk) { 1267 sk = vsock_find_bound_socket(&dst); 1268 if (!sk) { 1269 (void)virtio_transport_reset_no_sock(t, pkt); 1270 goto free_pkt; 1271 } 1272 } 1273 1274 if (virtio_transport_get_type(sk) != le16_to_cpu(pkt->hdr.type)) { 1275 (void)virtio_transport_reset_no_sock(t, pkt); 1276 sock_put(sk); 1277 goto free_pkt; 1278 } 1279 1280 vsk = vsock_sk(sk); 1281 1282 lock_sock(sk); 1283 1284 /* Check if sk has been closed before lock_sock */ 1285 if (sock_flag(sk, SOCK_DONE)) { 1286 (void)virtio_transport_reset_no_sock(t, pkt); 1287 release_sock(sk); 1288 sock_put(sk); 1289 goto free_pkt; 1290 } 1291 1292 space_available = virtio_transport_space_update(sk, pkt); 1293 1294 /* Update CID in case it has changed after a transport reset event */ 1295 vsk->local_addr.svm_cid = dst.svm_cid; 1296 1297 if (space_available) 1298 sk->sk_write_space(sk); 1299 1300 switch (sk->sk_state) { 1301 case TCP_LISTEN: 1302 virtio_transport_recv_listen(sk, pkt, t); 1303 virtio_transport_free_pkt(pkt); 1304 break; 1305 case TCP_SYN_SENT: 1306 virtio_transport_recv_connecting(sk, pkt); 1307 virtio_transport_free_pkt(pkt); 1308 break; 1309 case TCP_ESTABLISHED: 1310 virtio_transport_recv_connected(sk, pkt); 1311 break; 1312 case TCP_CLOSING: 1313 virtio_transport_recv_disconnecting(sk, pkt); 1314 virtio_transport_free_pkt(pkt); 1315 break; 1316 default: 1317 (void)virtio_transport_reset_no_sock(t, pkt); 1318 virtio_transport_free_pkt(pkt); 1319 break; 1320 } 1321 1322 release_sock(sk); 1323 1324 /* Release refcnt obtained when we fetched this socket out of the 1325 * bound or connected list. 1326 */ 1327 sock_put(sk); 1328 return; 1329 1330 free_pkt: 1331 virtio_transport_free_pkt(pkt); 1332 } 1333 EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt); 1334 1335 void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt) 1336 { 1337 kfree(pkt->buf); 1338 kfree(pkt); 1339 } 1340 EXPORT_SYMBOL_GPL(virtio_transport_free_pkt); 1341 1342 MODULE_LICENSE("GPL v2"); 1343 MODULE_AUTHOR("Asias He"); 1344 MODULE_DESCRIPTION("common code for virtio vsock"); 1345