1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * virtio transport for vsock 4 * 5 * Copyright (C) 2013-2015 Red Hat, Inc. 6 * Author: Asias He <asias@redhat.com> 7 * Stefan Hajnoczi <stefanha@redhat.com> 8 * 9 * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s 10 * early virtio-vsock proof-of-concept bits. 11 */ 12 #include <linux/spinlock.h> 13 #include <linux/module.h> 14 #include <linux/list.h> 15 #include <linux/atomic.h> 16 #include <linux/virtio.h> 17 #include <linux/virtio_ids.h> 18 #include <linux/virtio_config.h> 19 #include <linux/virtio_vsock.h> 20 #include <net/sock.h> 21 #include <linux/mutex.h> 22 #include <net/af_vsock.h> 23 24 static struct workqueue_struct *virtio_vsock_workqueue; 25 static struct virtio_vsock *the_virtio_vsock; 26 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ 27 28 struct virtio_vsock { 29 struct virtio_device *vdev; 30 struct virtqueue *vqs[VSOCK_VQ_MAX]; 31 32 /* Virtqueue processing is deferred to a workqueue */ 33 struct work_struct tx_work; 34 struct work_struct rx_work; 35 struct work_struct event_work; 36 37 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX] 38 * must be accessed with tx_lock held. 39 */ 40 struct mutex tx_lock; 41 bool tx_run; 42 43 struct work_struct send_pkt_work; 44 spinlock_t send_pkt_list_lock; 45 struct list_head send_pkt_list; 46 47 struct work_struct loopback_work; 48 spinlock_t loopback_list_lock; /* protects loopback_list */ 49 struct list_head loopback_list; 50 51 atomic_t queued_replies; 52 53 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX] 54 * must be accessed with rx_lock held. 55 */ 56 struct mutex rx_lock; 57 bool rx_run; 58 int rx_buf_nr; 59 int rx_buf_max_nr; 60 61 /* The following fields are protected by event_lock. 62 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. 63 */ 64 struct mutex event_lock; 65 bool event_run; 66 struct virtio_vsock_event event_list[8]; 67 68 u32 guest_cid; 69 }; 70 71 static u32 virtio_transport_get_local_cid(void) 72 { 73 struct virtio_vsock *vsock; 74 u32 ret; 75 76 rcu_read_lock(); 77 vsock = rcu_dereference(the_virtio_vsock); 78 if (!vsock) { 79 ret = VMADDR_CID_ANY; 80 goto out_rcu; 81 } 82 83 ret = vsock->guest_cid; 84 out_rcu: 85 rcu_read_unlock(); 86 return ret; 87 } 88 89 static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock, 90 struct virtio_vsock_pkt *pkt) 91 { 92 int len = pkt->len; 93 94 spin_lock_bh(&vsock->loopback_list_lock); 95 list_add_tail(&pkt->list, &vsock->loopback_list); 96 spin_unlock_bh(&vsock->loopback_list_lock); 97 98 queue_work(virtio_vsock_workqueue, &vsock->loopback_work); 99 100 return len; 101 } 102 103 static void 104 virtio_transport_send_pkt_work(struct work_struct *work) 105 { 106 struct virtio_vsock *vsock = 107 container_of(work, struct virtio_vsock, send_pkt_work); 108 struct virtqueue *vq; 109 bool added = false; 110 bool restart_rx = false; 111 112 mutex_lock(&vsock->tx_lock); 113 114 if (!vsock->tx_run) 115 goto out; 116 117 vq = vsock->vqs[VSOCK_VQ_TX]; 118 119 for (;;) { 120 struct virtio_vsock_pkt *pkt; 121 struct scatterlist hdr, buf, *sgs[2]; 122 int ret, in_sg = 0, out_sg = 0; 123 bool reply; 124 125 spin_lock_bh(&vsock->send_pkt_list_lock); 126 if (list_empty(&vsock->send_pkt_list)) { 127 spin_unlock_bh(&vsock->send_pkt_list_lock); 128 break; 129 } 130 131 pkt = list_first_entry(&vsock->send_pkt_list, 132 struct virtio_vsock_pkt, list); 133 list_del_init(&pkt->list); 134 spin_unlock_bh(&vsock->send_pkt_list_lock); 135 136 virtio_transport_deliver_tap_pkt(pkt); 137 138 reply = pkt->reply; 139 140 sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); 141 sgs[out_sg++] = &hdr; 142 if (pkt->buf) { 143 sg_init_one(&buf, pkt->buf, pkt->len); 144 sgs[out_sg++] = &buf; 145 } 146 147 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); 148 /* Usually this means that there is no more space available in 149 * the vq 150 */ 151 if (ret < 0) { 152 spin_lock_bh(&vsock->send_pkt_list_lock); 153 list_add(&pkt->list, &vsock->send_pkt_list); 154 spin_unlock_bh(&vsock->send_pkt_list_lock); 155 break; 156 } 157 158 if (reply) { 159 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; 160 int val; 161 162 val = atomic_dec_return(&vsock->queued_replies); 163 164 /* Do we now have resources to resume rx processing? */ 165 if (val + 1 == virtqueue_get_vring_size(rx_vq)) 166 restart_rx = true; 167 } 168 169 added = true; 170 } 171 172 if (added) 173 virtqueue_kick(vq); 174 175 out: 176 mutex_unlock(&vsock->tx_lock); 177 178 if (restart_rx) 179 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 180 } 181 182 static int 183 virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) 184 { 185 struct virtio_vsock *vsock; 186 int len = pkt->len; 187 188 rcu_read_lock(); 189 vsock = rcu_dereference(the_virtio_vsock); 190 if (!vsock) { 191 virtio_transport_free_pkt(pkt); 192 len = -ENODEV; 193 goto out_rcu; 194 } 195 196 if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) { 197 len = virtio_transport_send_pkt_loopback(vsock, pkt); 198 goto out_rcu; 199 } 200 201 if (pkt->reply) 202 atomic_inc(&vsock->queued_replies); 203 204 spin_lock_bh(&vsock->send_pkt_list_lock); 205 list_add_tail(&pkt->list, &vsock->send_pkt_list); 206 spin_unlock_bh(&vsock->send_pkt_list_lock); 207 208 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 209 210 out_rcu: 211 rcu_read_unlock(); 212 return len; 213 } 214 215 static int 216 virtio_transport_cancel_pkt(struct vsock_sock *vsk) 217 { 218 struct virtio_vsock *vsock; 219 struct virtio_vsock_pkt *pkt, *n; 220 int cnt = 0, ret; 221 LIST_HEAD(freeme); 222 223 rcu_read_lock(); 224 vsock = rcu_dereference(the_virtio_vsock); 225 if (!vsock) { 226 ret = -ENODEV; 227 goto out_rcu; 228 } 229 230 spin_lock_bh(&vsock->send_pkt_list_lock); 231 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { 232 if (pkt->vsk != vsk) 233 continue; 234 list_move(&pkt->list, &freeme); 235 } 236 spin_unlock_bh(&vsock->send_pkt_list_lock); 237 238 list_for_each_entry_safe(pkt, n, &freeme, list) { 239 if (pkt->reply) 240 cnt++; 241 list_del(&pkt->list); 242 virtio_transport_free_pkt(pkt); 243 } 244 245 if (cnt) { 246 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; 247 int new_cnt; 248 249 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); 250 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && 251 new_cnt < virtqueue_get_vring_size(rx_vq)) 252 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 253 } 254 255 ret = 0; 256 257 out_rcu: 258 rcu_read_unlock(); 259 return ret; 260 } 261 262 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) 263 { 264 int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; 265 struct virtio_vsock_pkt *pkt; 266 struct scatterlist hdr, buf, *sgs[2]; 267 struct virtqueue *vq; 268 int ret; 269 270 vq = vsock->vqs[VSOCK_VQ_RX]; 271 272 do { 273 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); 274 if (!pkt) 275 break; 276 277 pkt->buf = kmalloc(buf_len, GFP_KERNEL); 278 if (!pkt->buf) { 279 virtio_transport_free_pkt(pkt); 280 break; 281 } 282 283 pkt->buf_len = buf_len; 284 pkt->len = buf_len; 285 286 sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); 287 sgs[0] = &hdr; 288 289 sg_init_one(&buf, pkt->buf, buf_len); 290 sgs[1] = &buf; 291 ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL); 292 if (ret) { 293 virtio_transport_free_pkt(pkt); 294 break; 295 } 296 vsock->rx_buf_nr++; 297 } while (vq->num_free); 298 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) 299 vsock->rx_buf_max_nr = vsock->rx_buf_nr; 300 virtqueue_kick(vq); 301 } 302 303 static void virtio_transport_tx_work(struct work_struct *work) 304 { 305 struct virtio_vsock *vsock = 306 container_of(work, struct virtio_vsock, tx_work); 307 struct virtqueue *vq; 308 bool added = false; 309 310 vq = vsock->vqs[VSOCK_VQ_TX]; 311 mutex_lock(&vsock->tx_lock); 312 313 if (!vsock->tx_run) 314 goto out; 315 316 do { 317 struct virtio_vsock_pkt *pkt; 318 unsigned int len; 319 320 virtqueue_disable_cb(vq); 321 while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) { 322 virtio_transport_free_pkt(pkt); 323 added = true; 324 } 325 } while (!virtqueue_enable_cb(vq)); 326 327 out: 328 mutex_unlock(&vsock->tx_lock); 329 330 if (added) 331 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 332 } 333 334 /* Is there space left for replies to rx packets? */ 335 static bool virtio_transport_more_replies(struct virtio_vsock *vsock) 336 { 337 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; 338 int val; 339 340 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ 341 val = atomic_read(&vsock->queued_replies); 342 343 return val < virtqueue_get_vring_size(vq); 344 } 345 346 /* event_lock must be held */ 347 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, 348 struct virtio_vsock_event *event) 349 { 350 struct scatterlist sg; 351 struct virtqueue *vq; 352 353 vq = vsock->vqs[VSOCK_VQ_EVENT]; 354 355 sg_init_one(&sg, event, sizeof(*event)); 356 357 return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL); 358 } 359 360 /* event_lock must be held */ 361 static void virtio_vsock_event_fill(struct virtio_vsock *vsock) 362 { 363 size_t i; 364 365 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { 366 struct virtio_vsock_event *event = &vsock->event_list[i]; 367 368 virtio_vsock_event_fill_one(vsock, event); 369 } 370 371 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); 372 } 373 374 static void virtio_vsock_reset_sock(struct sock *sk) 375 { 376 lock_sock(sk); 377 sk->sk_state = TCP_CLOSE; 378 sk->sk_err = ECONNRESET; 379 sk->sk_error_report(sk); 380 release_sock(sk); 381 } 382 383 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) 384 { 385 struct virtio_device *vdev = vsock->vdev; 386 __le64 guest_cid; 387 388 vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), 389 &guest_cid, sizeof(guest_cid)); 390 vsock->guest_cid = le64_to_cpu(guest_cid); 391 } 392 393 /* event_lock must be held */ 394 static void virtio_vsock_event_handle(struct virtio_vsock *vsock, 395 struct virtio_vsock_event *event) 396 { 397 switch (le32_to_cpu(event->id)) { 398 case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: 399 virtio_vsock_update_guest_cid(vsock); 400 vsock_for_each_connected_socket(virtio_vsock_reset_sock); 401 break; 402 } 403 } 404 405 static void virtio_transport_event_work(struct work_struct *work) 406 { 407 struct virtio_vsock *vsock = 408 container_of(work, struct virtio_vsock, event_work); 409 struct virtqueue *vq; 410 411 vq = vsock->vqs[VSOCK_VQ_EVENT]; 412 413 mutex_lock(&vsock->event_lock); 414 415 if (!vsock->event_run) 416 goto out; 417 418 do { 419 struct virtio_vsock_event *event; 420 unsigned int len; 421 422 virtqueue_disable_cb(vq); 423 while ((event = virtqueue_get_buf(vq, &len)) != NULL) { 424 if (len == sizeof(*event)) 425 virtio_vsock_event_handle(vsock, event); 426 427 virtio_vsock_event_fill_one(vsock, event); 428 } 429 } while (!virtqueue_enable_cb(vq)); 430 431 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); 432 out: 433 mutex_unlock(&vsock->event_lock); 434 } 435 436 static void virtio_vsock_event_done(struct virtqueue *vq) 437 { 438 struct virtio_vsock *vsock = vq->vdev->priv; 439 440 if (!vsock) 441 return; 442 queue_work(virtio_vsock_workqueue, &vsock->event_work); 443 } 444 445 static void virtio_vsock_tx_done(struct virtqueue *vq) 446 { 447 struct virtio_vsock *vsock = vq->vdev->priv; 448 449 if (!vsock) 450 return; 451 queue_work(virtio_vsock_workqueue, &vsock->tx_work); 452 } 453 454 static void virtio_vsock_rx_done(struct virtqueue *vq) 455 { 456 struct virtio_vsock *vsock = vq->vdev->priv; 457 458 if (!vsock) 459 return; 460 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 461 } 462 463 static struct virtio_transport virtio_transport = { 464 .transport = { 465 .module = THIS_MODULE, 466 467 .get_local_cid = virtio_transport_get_local_cid, 468 469 .init = virtio_transport_do_socket_init, 470 .destruct = virtio_transport_destruct, 471 .release = virtio_transport_release, 472 .connect = virtio_transport_connect, 473 .shutdown = virtio_transport_shutdown, 474 .cancel_pkt = virtio_transport_cancel_pkt, 475 476 .dgram_bind = virtio_transport_dgram_bind, 477 .dgram_dequeue = virtio_transport_dgram_dequeue, 478 .dgram_enqueue = virtio_transport_dgram_enqueue, 479 .dgram_allow = virtio_transport_dgram_allow, 480 481 .stream_dequeue = virtio_transport_stream_dequeue, 482 .stream_enqueue = virtio_transport_stream_enqueue, 483 .stream_has_data = virtio_transport_stream_has_data, 484 .stream_has_space = virtio_transport_stream_has_space, 485 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, 486 .stream_is_active = virtio_transport_stream_is_active, 487 .stream_allow = virtio_transport_stream_allow, 488 489 .notify_poll_in = virtio_transport_notify_poll_in, 490 .notify_poll_out = virtio_transport_notify_poll_out, 491 .notify_recv_init = virtio_transport_notify_recv_init, 492 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, 493 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, 494 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, 495 .notify_send_init = virtio_transport_notify_send_init, 496 .notify_send_pre_block = virtio_transport_notify_send_pre_block, 497 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, 498 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, 499 .notify_buffer_size = virtio_transport_notify_buffer_size, 500 }, 501 502 .send_pkt = virtio_transport_send_pkt, 503 }; 504 505 static void virtio_transport_loopback_work(struct work_struct *work) 506 { 507 struct virtio_vsock *vsock = 508 container_of(work, struct virtio_vsock, loopback_work); 509 LIST_HEAD(pkts); 510 511 spin_lock_bh(&vsock->loopback_list_lock); 512 list_splice_init(&vsock->loopback_list, &pkts); 513 spin_unlock_bh(&vsock->loopback_list_lock); 514 515 mutex_lock(&vsock->rx_lock); 516 517 if (!vsock->rx_run) 518 goto out; 519 520 while (!list_empty(&pkts)) { 521 struct virtio_vsock_pkt *pkt; 522 523 pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); 524 list_del_init(&pkt->list); 525 526 virtio_transport_recv_pkt(&virtio_transport, pkt); 527 } 528 out: 529 mutex_unlock(&vsock->rx_lock); 530 } 531 532 static void virtio_transport_rx_work(struct work_struct *work) 533 { 534 struct virtio_vsock *vsock = 535 container_of(work, struct virtio_vsock, rx_work); 536 struct virtqueue *vq; 537 538 vq = vsock->vqs[VSOCK_VQ_RX]; 539 540 mutex_lock(&vsock->rx_lock); 541 542 if (!vsock->rx_run) 543 goto out; 544 545 do { 546 virtqueue_disable_cb(vq); 547 for (;;) { 548 struct virtio_vsock_pkt *pkt; 549 unsigned int len; 550 551 if (!virtio_transport_more_replies(vsock)) { 552 /* Stop rx until the device processes already 553 * pending replies. Leave rx virtqueue 554 * callbacks disabled. 555 */ 556 goto out; 557 } 558 559 pkt = virtqueue_get_buf(vq, &len); 560 if (!pkt) { 561 break; 562 } 563 564 vsock->rx_buf_nr--; 565 566 /* Drop short/long packets */ 567 if (unlikely(len < sizeof(pkt->hdr) || 568 len > sizeof(pkt->hdr) + pkt->len)) { 569 virtio_transport_free_pkt(pkt); 570 continue; 571 } 572 573 pkt->len = len - sizeof(pkt->hdr); 574 virtio_transport_deliver_tap_pkt(pkt); 575 virtio_transport_recv_pkt(&virtio_transport, pkt); 576 } 577 } while (!virtqueue_enable_cb(vq)); 578 579 out: 580 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) 581 virtio_vsock_rx_fill(vsock); 582 mutex_unlock(&vsock->rx_lock); 583 } 584 585 static int virtio_vsock_probe(struct virtio_device *vdev) 586 { 587 vq_callback_t *callbacks[] = { 588 virtio_vsock_rx_done, 589 virtio_vsock_tx_done, 590 virtio_vsock_event_done, 591 }; 592 static const char * const names[] = { 593 "rx", 594 "tx", 595 "event", 596 }; 597 struct virtio_vsock *vsock = NULL; 598 int ret; 599 600 ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); 601 if (ret) 602 return ret; 603 604 /* Only one virtio-vsock device per guest is supported */ 605 if (rcu_dereference_protected(the_virtio_vsock, 606 lockdep_is_held(&the_virtio_vsock_mutex))) { 607 ret = -EBUSY; 608 goto out; 609 } 610 611 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); 612 if (!vsock) { 613 ret = -ENOMEM; 614 goto out; 615 } 616 617 vsock->vdev = vdev; 618 619 ret = virtio_find_vqs(vsock->vdev, VSOCK_VQ_MAX, 620 vsock->vqs, callbacks, names, 621 NULL); 622 if (ret < 0) 623 goto out; 624 625 virtio_vsock_update_guest_cid(vsock); 626 627 vsock->rx_buf_nr = 0; 628 vsock->rx_buf_max_nr = 0; 629 atomic_set(&vsock->queued_replies, 0); 630 631 mutex_init(&vsock->tx_lock); 632 mutex_init(&vsock->rx_lock); 633 mutex_init(&vsock->event_lock); 634 spin_lock_init(&vsock->send_pkt_list_lock); 635 INIT_LIST_HEAD(&vsock->send_pkt_list); 636 spin_lock_init(&vsock->loopback_list_lock); 637 INIT_LIST_HEAD(&vsock->loopback_list); 638 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); 639 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); 640 INIT_WORK(&vsock->event_work, virtio_transport_event_work); 641 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); 642 INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work); 643 644 mutex_lock(&vsock->tx_lock); 645 vsock->tx_run = true; 646 mutex_unlock(&vsock->tx_lock); 647 648 mutex_lock(&vsock->rx_lock); 649 virtio_vsock_rx_fill(vsock); 650 vsock->rx_run = true; 651 mutex_unlock(&vsock->rx_lock); 652 653 mutex_lock(&vsock->event_lock); 654 virtio_vsock_event_fill(vsock); 655 vsock->event_run = true; 656 mutex_unlock(&vsock->event_lock); 657 658 vdev->priv = vsock; 659 rcu_assign_pointer(the_virtio_vsock, vsock); 660 661 mutex_unlock(&the_virtio_vsock_mutex); 662 return 0; 663 664 out: 665 kfree(vsock); 666 mutex_unlock(&the_virtio_vsock_mutex); 667 return ret; 668 } 669 670 static void virtio_vsock_remove(struct virtio_device *vdev) 671 { 672 struct virtio_vsock *vsock = vdev->priv; 673 struct virtio_vsock_pkt *pkt; 674 675 mutex_lock(&the_virtio_vsock_mutex); 676 677 vdev->priv = NULL; 678 rcu_assign_pointer(the_virtio_vsock, NULL); 679 synchronize_rcu(); 680 681 /* Reset all connected sockets when the device disappear */ 682 vsock_for_each_connected_socket(virtio_vsock_reset_sock); 683 684 /* Stop all work handlers to make sure no one is accessing the device, 685 * so we can safely call vdev->config->reset(). 686 */ 687 mutex_lock(&vsock->rx_lock); 688 vsock->rx_run = false; 689 mutex_unlock(&vsock->rx_lock); 690 691 mutex_lock(&vsock->tx_lock); 692 vsock->tx_run = false; 693 mutex_unlock(&vsock->tx_lock); 694 695 mutex_lock(&vsock->event_lock); 696 vsock->event_run = false; 697 mutex_unlock(&vsock->event_lock); 698 699 /* Flush all device writes and interrupts, device will not use any 700 * more buffers. 701 */ 702 vdev->config->reset(vdev); 703 704 mutex_lock(&vsock->rx_lock); 705 while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) 706 virtio_transport_free_pkt(pkt); 707 mutex_unlock(&vsock->rx_lock); 708 709 mutex_lock(&vsock->tx_lock); 710 while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) 711 virtio_transport_free_pkt(pkt); 712 mutex_unlock(&vsock->tx_lock); 713 714 spin_lock_bh(&vsock->send_pkt_list_lock); 715 while (!list_empty(&vsock->send_pkt_list)) { 716 pkt = list_first_entry(&vsock->send_pkt_list, 717 struct virtio_vsock_pkt, list); 718 list_del(&pkt->list); 719 virtio_transport_free_pkt(pkt); 720 } 721 spin_unlock_bh(&vsock->send_pkt_list_lock); 722 723 spin_lock_bh(&vsock->loopback_list_lock); 724 while (!list_empty(&vsock->loopback_list)) { 725 pkt = list_first_entry(&vsock->loopback_list, 726 struct virtio_vsock_pkt, list); 727 list_del(&pkt->list); 728 virtio_transport_free_pkt(pkt); 729 } 730 spin_unlock_bh(&vsock->loopback_list_lock); 731 732 /* Delete virtqueues and flush outstanding callbacks if any */ 733 vdev->config->del_vqs(vdev); 734 735 /* Other works can be queued before 'config->del_vqs()', so we flush 736 * all works before to free the vsock object to avoid use after free. 737 */ 738 flush_work(&vsock->loopback_work); 739 flush_work(&vsock->rx_work); 740 flush_work(&vsock->tx_work); 741 flush_work(&vsock->event_work); 742 flush_work(&vsock->send_pkt_work); 743 744 mutex_unlock(&the_virtio_vsock_mutex); 745 746 kfree(vsock); 747 } 748 749 static struct virtio_device_id id_table[] = { 750 { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, 751 { 0 }, 752 }; 753 754 static unsigned int features[] = { 755 }; 756 757 static struct virtio_driver virtio_vsock_driver = { 758 .feature_table = features, 759 .feature_table_size = ARRAY_SIZE(features), 760 .driver.name = KBUILD_MODNAME, 761 .driver.owner = THIS_MODULE, 762 .id_table = id_table, 763 .probe = virtio_vsock_probe, 764 .remove = virtio_vsock_remove, 765 }; 766 767 static int __init virtio_vsock_init(void) 768 { 769 int ret; 770 771 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); 772 if (!virtio_vsock_workqueue) 773 return -ENOMEM; 774 775 ret = vsock_core_register(&virtio_transport.transport, 776 VSOCK_TRANSPORT_F_G2H); 777 if (ret) 778 goto out_wq; 779 780 ret = register_virtio_driver(&virtio_vsock_driver); 781 if (ret) 782 goto out_vci; 783 784 return 0; 785 786 out_vci: 787 vsock_core_unregister(&virtio_transport.transport); 788 out_wq: 789 destroy_workqueue(virtio_vsock_workqueue); 790 return ret; 791 } 792 793 static void __exit virtio_vsock_exit(void) 794 { 795 unregister_virtio_driver(&virtio_vsock_driver); 796 vsock_core_unregister(&virtio_transport.transport); 797 destroy_workqueue(virtio_vsock_workqueue); 798 } 799 800 module_init(virtio_vsock_init); 801 module_exit(virtio_vsock_exit); 802 MODULE_LICENSE("GPL v2"); 803 MODULE_AUTHOR("Asias He"); 804 MODULE_DESCRIPTION("virtio transport for vsock"); 805 MODULE_DEVICE_TABLE(virtio, id_table); 806