1 /* 2 * vhost transport for vsock 3 * 4 * Copyright (C) 2013-2015 Red Hat, Inc. 5 * Author: Asias He <asias@redhat.com> 6 * Stefan Hajnoczi <stefanha@redhat.com> 7 * 8 * This work is licensed under the terms of the GNU GPL, version 2. 9 */ 10 #include <linux/miscdevice.h> 11 #include <linux/atomic.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/vmalloc.h> 15 #include <net/sock.h> 16 #include <linux/virtio_vsock.h> 17 #include <linux/vhost.h> 18 19 #include <net/af_vsock.h> 20 #include "vhost.h" 21 22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2 23 24 enum { 25 VHOST_VSOCK_FEATURES = VHOST_FEATURES, 26 }; 27 28 /* Used to track all the vhost_vsock instances on the system. */ 29 static DEFINE_SPINLOCK(vhost_vsock_lock); 30 static LIST_HEAD(vhost_vsock_list); 31 32 struct vhost_vsock { 33 struct vhost_dev dev; 34 struct vhost_virtqueue vqs[2]; 35 36 /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */ 37 struct list_head list; 38 39 struct vhost_work send_pkt_work; 40 spinlock_t send_pkt_list_lock; 41 struct list_head send_pkt_list; /* host->guest pending packets */ 42 43 atomic_t queued_replies; 44 45 u32 guest_cid; 46 }; 47 48 static u32 vhost_transport_get_local_cid(void) 49 { 50 return VHOST_VSOCK_DEFAULT_HOST_CID; 51 } 52 53 static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) 54 { 55 struct vhost_vsock *vsock; 56 57 list_for_each_entry(vsock, &vhost_vsock_list, list) { 58 u32 other_cid = vsock->guest_cid; 59 60 /* Skip instances that have no CID yet */ 61 if (other_cid == 0) 62 continue; 63 64 if (other_cid == guest_cid) { 65 return vsock; 66 } 67 } 68 69 return NULL; 70 } 71 72 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) 73 { 74 struct vhost_vsock *vsock; 75 76 spin_lock_bh(&vhost_vsock_lock); 77 vsock = __vhost_vsock_get(guest_cid); 78 spin_unlock_bh(&vhost_vsock_lock); 79 80 return vsock; 81 } 82 83 static void 84 vhost_transport_do_send_pkt(struct vhost_vsock *vsock, 85 struct vhost_virtqueue *vq) 86 { 87 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; 88 bool added = false; 89 bool restart_tx = false; 90 91 mutex_lock(&vq->mutex); 92 93 if (!vq->private_data) 94 goto out; 95 96 /* Avoid further vmexits, we're already processing the virtqueue */ 97 vhost_disable_notify(&vsock->dev, vq); 98 99 for (;;) { 100 struct virtio_vsock_pkt *pkt; 101 struct iov_iter iov_iter; 102 unsigned out, in; 103 size_t nbytes; 104 size_t len; 105 int head; 106 107 spin_lock_bh(&vsock->send_pkt_list_lock); 108 if (list_empty(&vsock->send_pkt_list)) { 109 spin_unlock_bh(&vsock->send_pkt_list_lock); 110 vhost_enable_notify(&vsock->dev, vq); 111 break; 112 } 113 114 pkt = list_first_entry(&vsock->send_pkt_list, 115 struct virtio_vsock_pkt, list); 116 list_del_init(&pkt->list); 117 spin_unlock_bh(&vsock->send_pkt_list_lock); 118 119 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), 120 &out, &in, NULL, NULL); 121 if (head < 0) { 122 spin_lock_bh(&vsock->send_pkt_list_lock); 123 list_add(&pkt->list, &vsock->send_pkt_list); 124 spin_unlock_bh(&vsock->send_pkt_list_lock); 125 break; 126 } 127 128 if (head == vq->num) { 129 spin_lock_bh(&vsock->send_pkt_list_lock); 130 list_add(&pkt->list, &vsock->send_pkt_list); 131 spin_unlock_bh(&vsock->send_pkt_list_lock); 132 133 /* We cannot finish yet if more buffers snuck in while 134 * re-enabling notify. 135 */ 136 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { 137 vhost_disable_notify(&vsock->dev, vq); 138 continue; 139 } 140 break; 141 } 142 143 if (out) { 144 virtio_transport_free_pkt(pkt); 145 vq_err(vq, "Expected 0 output buffers, got %u\n", out); 146 break; 147 } 148 149 len = iov_length(&vq->iov[out], in); 150 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len); 151 152 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); 153 if (nbytes != sizeof(pkt->hdr)) { 154 virtio_transport_free_pkt(pkt); 155 vq_err(vq, "Faulted on copying pkt hdr\n"); 156 break; 157 } 158 159 nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter); 160 if (nbytes != pkt->len) { 161 virtio_transport_free_pkt(pkt); 162 vq_err(vq, "Faulted on copying pkt buf\n"); 163 break; 164 } 165 166 vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); 167 added = true; 168 169 if (pkt->reply) { 170 int val; 171 172 val = atomic_dec_return(&vsock->queued_replies); 173 174 /* Do we have resources to resume tx processing? */ 175 if (val + 1 == tx_vq->num) 176 restart_tx = true; 177 } 178 179 /* Deliver to monitoring devices all correctly transmitted 180 * packets. 181 */ 182 virtio_transport_deliver_tap_pkt(pkt); 183 184 virtio_transport_free_pkt(pkt); 185 } 186 if (added) 187 vhost_signal(&vsock->dev, vq); 188 189 out: 190 mutex_unlock(&vq->mutex); 191 192 if (restart_tx) 193 vhost_poll_queue(&tx_vq->poll); 194 } 195 196 static void vhost_transport_send_pkt_work(struct vhost_work *work) 197 { 198 struct vhost_virtqueue *vq; 199 struct vhost_vsock *vsock; 200 201 vsock = container_of(work, struct vhost_vsock, send_pkt_work); 202 vq = &vsock->vqs[VSOCK_VQ_RX]; 203 204 vhost_transport_do_send_pkt(vsock, vq); 205 } 206 207 static int 208 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) 209 { 210 struct vhost_vsock *vsock; 211 int len = pkt->len; 212 213 /* Find the vhost_vsock according to guest context id */ 214 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); 215 if (!vsock) { 216 virtio_transport_free_pkt(pkt); 217 return -ENODEV; 218 } 219 220 if (pkt->reply) 221 atomic_inc(&vsock->queued_replies); 222 223 spin_lock_bh(&vsock->send_pkt_list_lock); 224 list_add_tail(&pkt->list, &vsock->send_pkt_list); 225 spin_unlock_bh(&vsock->send_pkt_list_lock); 226 227 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); 228 return len; 229 } 230 231 static int 232 vhost_transport_cancel_pkt(struct vsock_sock *vsk) 233 { 234 struct vhost_vsock *vsock; 235 struct virtio_vsock_pkt *pkt, *n; 236 int cnt = 0; 237 LIST_HEAD(freeme); 238 239 /* Find the vhost_vsock according to guest context id */ 240 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); 241 if (!vsock) 242 return -ENODEV; 243 244 spin_lock_bh(&vsock->send_pkt_list_lock); 245 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { 246 if (pkt->vsk != vsk) 247 continue; 248 list_move(&pkt->list, &freeme); 249 } 250 spin_unlock_bh(&vsock->send_pkt_list_lock); 251 252 list_for_each_entry_safe(pkt, n, &freeme, list) { 253 if (pkt->reply) 254 cnt++; 255 list_del(&pkt->list); 256 virtio_transport_free_pkt(pkt); 257 } 258 259 if (cnt) { 260 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; 261 int new_cnt; 262 263 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); 264 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) 265 vhost_poll_queue(&tx_vq->poll); 266 } 267 268 return 0; 269 } 270 271 static struct virtio_vsock_pkt * 272 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, 273 unsigned int out, unsigned int in) 274 { 275 struct virtio_vsock_pkt *pkt; 276 struct iov_iter iov_iter; 277 size_t nbytes; 278 size_t len; 279 280 if (in != 0) { 281 vq_err(vq, "Expected 0 input buffers, got %u\n", in); 282 return NULL; 283 } 284 285 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); 286 if (!pkt) 287 return NULL; 288 289 len = iov_length(vq->iov, out); 290 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len); 291 292 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); 293 if (nbytes != sizeof(pkt->hdr)) { 294 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n", 295 sizeof(pkt->hdr), nbytes); 296 kfree(pkt); 297 return NULL; 298 } 299 300 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM) 301 pkt->len = le32_to_cpu(pkt->hdr.len); 302 303 /* No payload */ 304 if (!pkt->len) 305 return pkt; 306 307 /* The pkt is too big */ 308 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) { 309 kfree(pkt); 310 return NULL; 311 } 312 313 pkt->buf = kmalloc(pkt->len, GFP_KERNEL); 314 if (!pkt->buf) { 315 kfree(pkt); 316 return NULL; 317 } 318 319 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter); 320 if (nbytes != pkt->len) { 321 vq_err(vq, "Expected %u byte payload, got %zu bytes\n", 322 pkt->len, nbytes); 323 virtio_transport_free_pkt(pkt); 324 return NULL; 325 } 326 327 return pkt; 328 } 329 330 /* Is there space left for replies to rx packets? */ 331 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) 332 { 333 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; 334 int val; 335 336 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ 337 val = atomic_read(&vsock->queued_replies); 338 339 return val < vq->num; 340 } 341 342 static void vhost_vsock_handle_tx_kick(struct vhost_work *work) 343 { 344 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 345 poll.work); 346 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, 347 dev); 348 struct virtio_vsock_pkt *pkt; 349 int head; 350 unsigned int out, in; 351 bool added = false; 352 353 mutex_lock(&vq->mutex); 354 355 if (!vq->private_data) 356 goto out; 357 358 vhost_disable_notify(&vsock->dev, vq); 359 for (;;) { 360 u32 len; 361 362 if (!vhost_vsock_more_replies(vsock)) { 363 /* Stop tx until the device processes already 364 * pending replies. Leave tx virtqueue 365 * callbacks disabled. 366 */ 367 goto no_more_replies; 368 } 369 370 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), 371 &out, &in, NULL, NULL); 372 if (head < 0) 373 break; 374 375 if (head == vq->num) { 376 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { 377 vhost_disable_notify(&vsock->dev, vq); 378 continue; 379 } 380 break; 381 } 382 383 pkt = vhost_vsock_alloc_pkt(vq, out, in); 384 if (!pkt) { 385 vq_err(vq, "Faulted on pkt\n"); 386 continue; 387 } 388 389 len = pkt->len; 390 391 /* Deliver to monitoring devices all received packets */ 392 virtio_transport_deliver_tap_pkt(pkt); 393 394 /* Only accept correctly addressed packets */ 395 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) 396 virtio_transport_recv_pkt(pkt); 397 else 398 virtio_transport_free_pkt(pkt); 399 400 vhost_add_used(vq, head, sizeof(pkt->hdr) + len); 401 added = true; 402 } 403 404 no_more_replies: 405 if (added) 406 vhost_signal(&vsock->dev, vq); 407 408 out: 409 mutex_unlock(&vq->mutex); 410 } 411 412 static void vhost_vsock_handle_rx_kick(struct vhost_work *work) 413 { 414 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 415 poll.work); 416 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, 417 dev); 418 419 vhost_transport_do_send_pkt(vsock, vq); 420 } 421 422 static int vhost_vsock_start(struct vhost_vsock *vsock) 423 { 424 struct vhost_virtqueue *vq; 425 size_t i; 426 int ret; 427 428 mutex_lock(&vsock->dev.mutex); 429 430 ret = vhost_dev_check_owner(&vsock->dev); 431 if (ret) 432 goto err; 433 434 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { 435 vq = &vsock->vqs[i]; 436 437 mutex_lock(&vq->mutex); 438 439 if (!vhost_vq_access_ok(vq)) { 440 ret = -EFAULT; 441 goto err_vq; 442 } 443 444 if (!vq->private_data) { 445 vq->private_data = vsock; 446 ret = vhost_vq_init_access(vq); 447 if (ret) 448 goto err_vq; 449 } 450 451 mutex_unlock(&vq->mutex); 452 } 453 454 mutex_unlock(&vsock->dev.mutex); 455 return 0; 456 457 err_vq: 458 vq->private_data = NULL; 459 mutex_unlock(&vq->mutex); 460 461 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { 462 vq = &vsock->vqs[i]; 463 464 mutex_lock(&vq->mutex); 465 vq->private_data = NULL; 466 mutex_unlock(&vq->mutex); 467 } 468 err: 469 mutex_unlock(&vsock->dev.mutex); 470 return ret; 471 } 472 473 static int vhost_vsock_stop(struct vhost_vsock *vsock) 474 { 475 size_t i; 476 int ret; 477 478 mutex_lock(&vsock->dev.mutex); 479 480 ret = vhost_dev_check_owner(&vsock->dev); 481 if (ret) 482 goto err; 483 484 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { 485 struct vhost_virtqueue *vq = &vsock->vqs[i]; 486 487 mutex_lock(&vq->mutex); 488 vq->private_data = NULL; 489 mutex_unlock(&vq->mutex); 490 } 491 492 err: 493 mutex_unlock(&vsock->dev.mutex); 494 return ret; 495 } 496 497 static void vhost_vsock_free(struct vhost_vsock *vsock) 498 { 499 kvfree(vsock); 500 } 501 502 static int vhost_vsock_dev_open(struct inode *inode, struct file *file) 503 { 504 struct vhost_virtqueue **vqs; 505 struct vhost_vsock *vsock; 506 int ret; 507 508 /* This struct is large and allocation could fail, fall back to vmalloc 509 * if there is no other way. 510 */ 511 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 512 if (!vsock) { 513 vsock = vmalloc(sizeof(*vsock)); 514 if (!vsock) 515 return -ENOMEM; 516 } 517 518 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); 519 if (!vqs) { 520 ret = -ENOMEM; 521 goto out; 522 } 523 524 atomic_set(&vsock->queued_replies, 0); 525 526 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX]; 527 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX]; 528 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; 529 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; 530 531 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); 532 533 file->private_data = vsock; 534 spin_lock_init(&vsock->send_pkt_list_lock); 535 INIT_LIST_HEAD(&vsock->send_pkt_list); 536 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); 537 538 spin_lock_bh(&vhost_vsock_lock); 539 list_add_tail(&vsock->list, &vhost_vsock_list); 540 spin_unlock_bh(&vhost_vsock_lock); 541 return 0; 542 543 out: 544 vhost_vsock_free(vsock); 545 return ret; 546 } 547 548 static void vhost_vsock_flush(struct vhost_vsock *vsock) 549 { 550 int i; 551 552 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) 553 if (vsock->vqs[i].handle_kick) 554 vhost_poll_flush(&vsock->vqs[i].poll); 555 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work); 556 } 557 558 static void vhost_vsock_reset_orphans(struct sock *sk) 559 { 560 struct vsock_sock *vsk = vsock_sk(sk); 561 562 /* vmci_transport.c doesn't take sk_lock here either. At least we're 563 * under vsock_table_lock so the sock cannot disappear while we're 564 * executing. 565 */ 566 567 if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) { 568 sock_set_flag(sk, SOCK_DONE); 569 vsk->peer_shutdown = SHUTDOWN_MASK; 570 sk->sk_state = SS_UNCONNECTED; 571 sk->sk_err = ECONNRESET; 572 sk->sk_error_report(sk); 573 } 574 } 575 576 static int vhost_vsock_dev_release(struct inode *inode, struct file *file) 577 { 578 struct vhost_vsock *vsock = file->private_data; 579 580 spin_lock_bh(&vhost_vsock_lock); 581 list_del(&vsock->list); 582 spin_unlock_bh(&vhost_vsock_lock); 583 584 /* Iterating over all connections for all CIDs to find orphans is 585 * inefficient. Room for improvement here. */ 586 vsock_for_each_connected_socket(vhost_vsock_reset_orphans); 587 588 vhost_vsock_stop(vsock); 589 vhost_vsock_flush(vsock); 590 vhost_dev_stop(&vsock->dev); 591 592 spin_lock_bh(&vsock->send_pkt_list_lock); 593 while (!list_empty(&vsock->send_pkt_list)) { 594 struct virtio_vsock_pkt *pkt; 595 596 pkt = list_first_entry(&vsock->send_pkt_list, 597 struct virtio_vsock_pkt, list); 598 list_del_init(&pkt->list); 599 virtio_transport_free_pkt(pkt); 600 } 601 spin_unlock_bh(&vsock->send_pkt_list_lock); 602 603 vhost_dev_cleanup(&vsock->dev, false); 604 kfree(vsock->dev.vqs); 605 vhost_vsock_free(vsock); 606 return 0; 607 } 608 609 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) 610 { 611 struct vhost_vsock *other; 612 613 /* Refuse reserved CIDs */ 614 if (guest_cid <= VMADDR_CID_HOST || 615 guest_cid == U32_MAX) 616 return -EINVAL; 617 618 /* 64-bit CIDs are not yet supported */ 619 if (guest_cid > U32_MAX) 620 return -EINVAL; 621 622 /* Refuse if CID is already in use */ 623 spin_lock_bh(&vhost_vsock_lock); 624 other = __vhost_vsock_get(guest_cid); 625 if (other && other != vsock) { 626 spin_unlock_bh(&vhost_vsock_lock); 627 return -EADDRINUSE; 628 } 629 vsock->guest_cid = guest_cid; 630 spin_unlock_bh(&vhost_vsock_lock); 631 632 return 0; 633 } 634 635 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) 636 { 637 struct vhost_virtqueue *vq; 638 int i; 639 640 if (features & ~VHOST_VSOCK_FEATURES) 641 return -EOPNOTSUPP; 642 643 mutex_lock(&vsock->dev.mutex); 644 if ((features & (1 << VHOST_F_LOG_ALL)) && 645 !vhost_log_access_ok(&vsock->dev)) { 646 mutex_unlock(&vsock->dev.mutex); 647 return -EFAULT; 648 } 649 650 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { 651 vq = &vsock->vqs[i]; 652 mutex_lock(&vq->mutex); 653 vq->acked_features = features; 654 mutex_unlock(&vq->mutex); 655 } 656 mutex_unlock(&vsock->dev.mutex); 657 return 0; 658 } 659 660 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, 661 unsigned long arg) 662 { 663 struct vhost_vsock *vsock = f->private_data; 664 void __user *argp = (void __user *)arg; 665 u64 guest_cid; 666 u64 features; 667 int start; 668 int r; 669 670 switch (ioctl) { 671 case VHOST_VSOCK_SET_GUEST_CID: 672 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid))) 673 return -EFAULT; 674 return vhost_vsock_set_cid(vsock, guest_cid); 675 case VHOST_VSOCK_SET_RUNNING: 676 if (copy_from_user(&start, argp, sizeof(start))) 677 return -EFAULT; 678 if (start) 679 return vhost_vsock_start(vsock); 680 else 681 return vhost_vsock_stop(vsock); 682 case VHOST_GET_FEATURES: 683 features = VHOST_VSOCK_FEATURES; 684 if (copy_to_user(argp, &features, sizeof(features))) 685 return -EFAULT; 686 return 0; 687 case VHOST_SET_FEATURES: 688 if (copy_from_user(&features, argp, sizeof(features))) 689 return -EFAULT; 690 return vhost_vsock_set_features(vsock, features); 691 default: 692 mutex_lock(&vsock->dev.mutex); 693 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp); 694 if (r == -ENOIOCTLCMD) 695 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp); 696 else 697 vhost_vsock_flush(vsock); 698 mutex_unlock(&vsock->dev.mutex); 699 return r; 700 } 701 } 702 703 static const struct file_operations vhost_vsock_fops = { 704 .owner = THIS_MODULE, 705 .open = vhost_vsock_dev_open, 706 .release = vhost_vsock_dev_release, 707 .llseek = noop_llseek, 708 .unlocked_ioctl = vhost_vsock_dev_ioctl, 709 }; 710 711 static struct miscdevice vhost_vsock_misc = { 712 .minor = MISC_DYNAMIC_MINOR, 713 .name = "vhost-vsock", 714 .fops = &vhost_vsock_fops, 715 }; 716 717 static struct virtio_transport vhost_transport = { 718 .transport = { 719 .get_local_cid = vhost_transport_get_local_cid, 720 721 .init = virtio_transport_do_socket_init, 722 .destruct = virtio_transport_destruct, 723 .release = virtio_transport_release, 724 .connect = virtio_transport_connect, 725 .shutdown = virtio_transport_shutdown, 726 .cancel_pkt = vhost_transport_cancel_pkt, 727 728 .dgram_enqueue = virtio_transport_dgram_enqueue, 729 .dgram_dequeue = virtio_transport_dgram_dequeue, 730 .dgram_bind = virtio_transport_dgram_bind, 731 .dgram_allow = virtio_transport_dgram_allow, 732 733 .stream_enqueue = virtio_transport_stream_enqueue, 734 .stream_dequeue = virtio_transport_stream_dequeue, 735 .stream_has_data = virtio_transport_stream_has_data, 736 .stream_has_space = virtio_transport_stream_has_space, 737 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, 738 .stream_is_active = virtio_transport_stream_is_active, 739 .stream_allow = virtio_transport_stream_allow, 740 741 .notify_poll_in = virtio_transport_notify_poll_in, 742 .notify_poll_out = virtio_transport_notify_poll_out, 743 .notify_recv_init = virtio_transport_notify_recv_init, 744 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, 745 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, 746 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, 747 .notify_send_init = virtio_transport_notify_send_init, 748 .notify_send_pre_block = virtio_transport_notify_send_pre_block, 749 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, 750 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, 751 752 .set_buffer_size = virtio_transport_set_buffer_size, 753 .set_min_buffer_size = virtio_transport_set_min_buffer_size, 754 .set_max_buffer_size = virtio_transport_set_max_buffer_size, 755 .get_buffer_size = virtio_transport_get_buffer_size, 756 .get_min_buffer_size = virtio_transport_get_min_buffer_size, 757 .get_max_buffer_size = virtio_transport_get_max_buffer_size, 758 }, 759 760 .send_pkt = vhost_transport_send_pkt, 761 }; 762 763 static int __init vhost_vsock_init(void) 764 { 765 int ret; 766 767 ret = vsock_core_init(&vhost_transport.transport); 768 if (ret < 0) 769 return ret; 770 return misc_register(&vhost_vsock_misc); 771 }; 772 773 static void __exit vhost_vsock_exit(void) 774 { 775 misc_deregister(&vhost_vsock_misc); 776 vsock_core_exit(); 777 }; 778 779 module_init(vhost_vsock_init); 780 module_exit(vhost_vsock_exit); 781 MODULE_LICENSE("GPL v2"); 782 MODULE_AUTHOR("Asias He"); 783 MODULE_DESCRIPTION("vhost transport for vsock "); 784