1 /* Copyright (C) 2009 Red Hat, Inc. 2 * Copyright (C) 2006 Rusty Russell IBM Corporation 3 * 4 * Author: Michael S. Tsirkin <mst@redhat.com> 5 * 6 * Inspiration, some code, and most witty comments come from 7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. 10 * 11 * Generic code for virtio server in host kernel. 12 */ 13 14 #include <linux/eventfd.h> 15 #include <linux/vhost.h> 16 #include <linux/uio.h> 17 #include <linux/mm.h> 18 #include <linux/mmu_context.h> 19 #include <linux/miscdevice.h> 20 #include <linux/mutex.h> 21 #include <linux/poll.h> 22 #include <linux/file.h> 23 #include <linux/highmem.h> 24 #include <linux/slab.h> 25 #include <linux/vmalloc.h> 26 #include <linux/kthread.h> 27 #include <linux/cgroup.h> 28 #include <linux/module.h> 29 #include <linux/sort.h> 30 #include <linux/sched/mm.h> 31 #include <linux/sched/signal.h> 32 #include <linux/interval_tree_generic.h> 33 34 #include "vhost.h" 35 36 static ushort max_mem_regions = 64; 37 module_param(max_mem_regions, ushort, 0444); 38 MODULE_PARM_DESC(max_mem_regions, 39 "Maximum number of memory regions in memory map. (default: 64)"); 40 static int max_iotlb_entries = 2048; 41 module_param(max_iotlb_entries, int, 0444); 42 MODULE_PARM_DESC(max_iotlb_entries, 43 "Maximum number of iotlb entries. (default: 2048)"); 44 45 enum { 46 VHOST_MEMORY_F_LOG = 0x1, 47 }; 48 49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) 50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) 51 52 INTERVAL_TREE_DEFINE(struct vhost_umem_node, 53 rb, __u64, __subtree_last, 54 START, LAST, static inline, vhost_umem_interval_tree); 55 56 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY 57 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) 58 { 59 vq->user_be = !virtio_legacy_is_little_endian(); 60 } 61 62 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) 63 { 64 vq->user_be = true; 65 } 66 67 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) 68 { 69 vq->user_be = false; 70 } 71 72 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) 73 { 74 struct vhost_vring_state s; 75 76 if (vq->private_data) 77 return -EBUSY; 78 79 if (copy_from_user(&s, argp, sizeof(s))) 80 return -EFAULT; 81 82 if (s.num != VHOST_VRING_LITTLE_ENDIAN && 83 s.num != VHOST_VRING_BIG_ENDIAN) 84 return -EINVAL; 85 86 if (s.num == VHOST_VRING_BIG_ENDIAN) 87 vhost_enable_cross_endian_big(vq); 88 else 89 vhost_enable_cross_endian_little(vq); 90 91 return 0; 92 } 93 94 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, 95 int __user *argp) 96 { 97 struct vhost_vring_state s = { 98 .index = idx, 99 .num = vq->user_be 100 }; 101 102 if (copy_to_user(argp, &s, sizeof(s))) 103 return -EFAULT; 104 105 return 0; 106 } 107 108 static void vhost_init_is_le(struct vhost_virtqueue *vq) 109 { 110 /* Note for legacy virtio: user_be is initialized at reset time 111 * according to the host endianness. If userspace does not set an 112 * explicit endianness, the default behavior is native endian, as 113 * expected by legacy virtio. 114 */ 115 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; 116 } 117 #else 118 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) 119 { 120 } 121 122 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) 123 { 124 return -ENOIOCTLCMD; 125 } 126 127 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, 128 int __user *argp) 129 { 130 return -ENOIOCTLCMD; 131 } 132 133 static void vhost_init_is_le(struct vhost_virtqueue *vq) 134 { 135 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) 136 || virtio_legacy_is_little_endian(); 137 } 138 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ 139 140 static void vhost_reset_is_le(struct vhost_virtqueue *vq) 141 { 142 vhost_init_is_le(vq); 143 } 144 145 struct vhost_flush_struct { 146 struct vhost_work work; 147 struct completion wait_event; 148 }; 149 150 static void vhost_flush_work(struct vhost_work *work) 151 { 152 struct vhost_flush_struct *s; 153 154 s = container_of(work, struct vhost_flush_struct, work); 155 complete(&s->wait_event); 156 } 157 158 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, 159 poll_table *pt) 160 { 161 struct vhost_poll *poll; 162 163 poll = container_of(pt, struct vhost_poll, table); 164 poll->wqh = wqh; 165 add_wait_queue(wqh, &poll->wait); 166 } 167 168 static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, 169 void *key) 170 { 171 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); 172 173 if (!(key_to_poll(key) & poll->mask)) 174 return 0; 175 176 vhost_poll_queue(poll); 177 return 0; 178 } 179 180 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) 181 { 182 clear_bit(VHOST_WORK_QUEUED, &work->flags); 183 work->fn = fn; 184 } 185 EXPORT_SYMBOL_GPL(vhost_work_init); 186 187 /* Init poll structure */ 188 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 189 __poll_t mask, struct vhost_dev *dev) 190 { 191 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); 192 init_poll_funcptr(&poll->table, vhost_poll_func); 193 poll->mask = mask; 194 poll->dev = dev; 195 poll->wqh = NULL; 196 197 vhost_work_init(&poll->work, fn); 198 } 199 EXPORT_SYMBOL_GPL(vhost_poll_init); 200 201 /* Start polling a file. We add ourselves to file's wait queue. The caller must 202 * keep a reference to a file until after vhost_poll_stop is called. */ 203 int vhost_poll_start(struct vhost_poll *poll, struct file *file) 204 { 205 __poll_t mask; 206 int ret = 0; 207 208 if (poll->wqh) 209 return 0; 210 211 mask = vfs_poll(file, &poll->table); 212 if (mask) 213 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); 214 if (mask & EPOLLERR) { 215 vhost_poll_stop(poll); 216 ret = -EINVAL; 217 } 218 219 return ret; 220 } 221 EXPORT_SYMBOL_GPL(vhost_poll_start); 222 223 /* Stop polling a file. After this function returns, it becomes safe to drop the 224 * file reference. You must also flush afterwards. */ 225 void vhost_poll_stop(struct vhost_poll *poll) 226 { 227 if (poll->wqh) { 228 remove_wait_queue(poll->wqh, &poll->wait); 229 poll->wqh = NULL; 230 } 231 } 232 EXPORT_SYMBOL_GPL(vhost_poll_stop); 233 234 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) 235 { 236 struct vhost_flush_struct flush; 237 238 if (dev->worker) { 239 init_completion(&flush.wait_event); 240 vhost_work_init(&flush.work, vhost_flush_work); 241 242 vhost_work_queue(dev, &flush.work); 243 wait_for_completion(&flush.wait_event); 244 } 245 } 246 EXPORT_SYMBOL_GPL(vhost_work_flush); 247 248 /* Flush any work that has been scheduled. When calling this, don't hold any 249 * locks that are also used by the callback. */ 250 void vhost_poll_flush(struct vhost_poll *poll) 251 { 252 vhost_work_flush(poll->dev, &poll->work); 253 } 254 EXPORT_SYMBOL_GPL(vhost_poll_flush); 255 256 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) 257 { 258 if (!dev->worker) 259 return; 260 261 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { 262 /* We can only add the work to the list after we're 263 * sure it was not in the list. 264 * test_and_set_bit() implies a memory barrier. 265 */ 266 llist_add(&work->node, &dev->work_list); 267 wake_up_process(dev->worker); 268 } 269 } 270 EXPORT_SYMBOL_GPL(vhost_work_queue); 271 272 /* A lockless hint for busy polling code to exit the loop */ 273 bool vhost_has_work(struct vhost_dev *dev) 274 { 275 return !llist_empty(&dev->work_list); 276 } 277 EXPORT_SYMBOL_GPL(vhost_has_work); 278 279 void vhost_poll_queue(struct vhost_poll *poll) 280 { 281 vhost_work_queue(poll->dev, &poll->work); 282 } 283 EXPORT_SYMBOL_GPL(vhost_poll_queue); 284 285 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) 286 { 287 int j; 288 289 for (j = 0; j < VHOST_NUM_ADDRS; j++) 290 vq->meta_iotlb[j] = NULL; 291 } 292 293 static void vhost_vq_meta_reset(struct vhost_dev *d) 294 { 295 int i; 296 297 for (i = 0; i < d->nvqs; ++i) 298 __vhost_vq_meta_reset(d->vqs[i]); 299 } 300 301 static void vhost_vq_reset(struct vhost_dev *dev, 302 struct vhost_virtqueue *vq) 303 { 304 vq->num = 1; 305 vq->desc = NULL; 306 vq->avail = NULL; 307 vq->used = NULL; 308 vq->last_avail_idx = 0; 309 vq->avail_idx = 0; 310 vq->last_used_idx = 0; 311 vq->signalled_used = 0; 312 vq->signalled_used_valid = false; 313 vq->used_flags = 0; 314 vq->log_used = false; 315 vq->log_addr = -1ull; 316 vq->private_data = NULL; 317 vq->acked_features = 0; 318 vq->log_base = NULL; 319 vq->error_ctx = NULL; 320 vq->kick = NULL; 321 vq->call_ctx = NULL; 322 vq->log_ctx = NULL; 323 vhost_reset_is_le(vq); 324 vhost_disable_cross_endian(vq); 325 vq->busyloop_timeout = 0; 326 vq->umem = NULL; 327 vq->iotlb = NULL; 328 __vhost_vq_meta_reset(vq); 329 } 330 331 static int vhost_worker(void *data) 332 { 333 struct vhost_dev *dev = data; 334 struct vhost_work *work, *work_next; 335 struct llist_node *node; 336 mm_segment_t oldfs = get_fs(); 337 338 set_fs(USER_DS); 339 use_mm(dev->mm); 340 341 for (;;) { 342 /* mb paired w/ kthread_stop */ 343 set_current_state(TASK_INTERRUPTIBLE); 344 345 if (kthread_should_stop()) { 346 __set_current_state(TASK_RUNNING); 347 break; 348 } 349 350 node = llist_del_all(&dev->work_list); 351 if (!node) 352 schedule(); 353 354 node = llist_reverse_order(node); 355 /* make sure flag is seen after deletion */ 356 smp_wmb(); 357 llist_for_each_entry_safe(work, work_next, node, node) { 358 clear_bit(VHOST_WORK_QUEUED, &work->flags); 359 __set_current_state(TASK_RUNNING); 360 work->fn(work); 361 if (need_resched()) 362 schedule(); 363 } 364 } 365 unuse_mm(dev->mm); 366 set_fs(oldfs); 367 return 0; 368 } 369 370 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) 371 { 372 kfree(vq->indirect); 373 vq->indirect = NULL; 374 kfree(vq->log); 375 vq->log = NULL; 376 kfree(vq->heads); 377 vq->heads = NULL; 378 } 379 380 /* Helper to allocate iovec buffers for all vqs. */ 381 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) 382 { 383 struct vhost_virtqueue *vq; 384 int i; 385 386 for (i = 0; i < dev->nvqs; ++i) { 387 vq = dev->vqs[i]; 388 vq->indirect = kmalloc_array(UIO_MAXIOV, 389 sizeof(*vq->indirect), 390 GFP_KERNEL); 391 vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log), 392 GFP_KERNEL); 393 vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads), 394 GFP_KERNEL); 395 if (!vq->indirect || !vq->log || !vq->heads) 396 goto err_nomem; 397 } 398 return 0; 399 400 err_nomem: 401 for (; i >= 0; --i) 402 vhost_vq_free_iovecs(dev->vqs[i]); 403 return -ENOMEM; 404 } 405 406 static void vhost_dev_free_iovecs(struct vhost_dev *dev) 407 { 408 int i; 409 410 for (i = 0; i < dev->nvqs; ++i) 411 vhost_vq_free_iovecs(dev->vqs[i]); 412 } 413 414 void vhost_dev_init(struct vhost_dev *dev, 415 struct vhost_virtqueue **vqs, int nvqs) 416 { 417 struct vhost_virtqueue *vq; 418 int i; 419 420 dev->vqs = vqs; 421 dev->nvqs = nvqs; 422 mutex_init(&dev->mutex); 423 dev->log_ctx = NULL; 424 dev->umem = NULL; 425 dev->iotlb = NULL; 426 dev->mm = NULL; 427 dev->worker = NULL; 428 init_llist_head(&dev->work_list); 429 init_waitqueue_head(&dev->wait); 430 INIT_LIST_HEAD(&dev->read_list); 431 INIT_LIST_HEAD(&dev->pending_list); 432 spin_lock_init(&dev->iotlb_lock); 433 434 435 for (i = 0; i < dev->nvqs; ++i) { 436 vq = dev->vqs[i]; 437 vq->log = NULL; 438 vq->indirect = NULL; 439 vq->heads = NULL; 440 vq->dev = dev; 441 mutex_init(&vq->mutex); 442 vhost_vq_reset(dev, vq); 443 if (vq->handle_kick) 444 vhost_poll_init(&vq->poll, vq->handle_kick, 445 EPOLLIN, dev); 446 } 447 } 448 EXPORT_SYMBOL_GPL(vhost_dev_init); 449 450 /* Caller should have device mutex */ 451 long vhost_dev_check_owner(struct vhost_dev *dev) 452 { 453 /* Are you the owner? If not, I don't think you mean to do that */ 454 return dev->mm == current->mm ? 0 : -EPERM; 455 } 456 EXPORT_SYMBOL_GPL(vhost_dev_check_owner); 457 458 struct vhost_attach_cgroups_struct { 459 struct vhost_work work; 460 struct task_struct *owner; 461 int ret; 462 }; 463 464 static void vhost_attach_cgroups_work(struct vhost_work *work) 465 { 466 struct vhost_attach_cgroups_struct *s; 467 468 s = container_of(work, struct vhost_attach_cgroups_struct, work); 469 s->ret = cgroup_attach_task_all(s->owner, current); 470 } 471 472 static int vhost_attach_cgroups(struct vhost_dev *dev) 473 { 474 struct vhost_attach_cgroups_struct attach; 475 476 attach.owner = current; 477 vhost_work_init(&attach.work, vhost_attach_cgroups_work); 478 vhost_work_queue(dev, &attach.work); 479 vhost_work_flush(dev, &attach.work); 480 return attach.ret; 481 } 482 483 /* Caller should have device mutex */ 484 bool vhost_dev_has_owner(struct vhost_dev *dev) 485 { 486 return dev->mm; 487 } 488 EXPORT_SYMBOL_GPL(vhost_dev_has_owner); 489 490 /* Caller should have device mutex */ 491 long vhost_dev_set_owner(struct vhost_dev *dev) 492 { 493 struct task_struct *worker; 494 int err; 495 496 /* Is there an owner already? */ 497 if (vhost_dev_has_owner(dev)) { 498 err = -EBUSY; 499 goto err_mm; 500 } 501 502 /* No owner, become one */ 503 dev->mm = get_task_mm(current); 504 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); 505 if (IS_ERR(worker)) { 506 err = PTR_ERR(worker); 507 goto err_worker; 508 } 509 510 dev->worker = worker; 511 wake_up_process(worker); /* avoid contributing to loadavg */ 512 513 err = vhost_attach_cgroups(dev); 514 if (err) 515 goto err_cgroup; 516 517 err = vhost_dev_alloc_iovecs(dev); 518 if (err) 519 goto err_cgroup; 520 521 return 0; 522 err_cgroup: 523 kthread_stop(worker); 524 dev->worker = NULL; 525 err_worker: 526 if (dev->mm) 527 mmput(dev->mm); 528 dev->mm = NULL; 529 err_mm: 530 return err; 531 } 532 EXPORT_SYMBOL_GPL(vhost_dev_set_owner); 533 534 struct vhost_umem *vhost_dev_reset_owner_prepare(void) 535 { 536 return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL); 537 } 538 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); 539 540 /* Caller should have device mutex */ 541 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem) 542 { 543 int i; 544 545 vhost_dev_cleanup(dev); 546 547 /* Restore memory to default empty mapping. */ 548 INIT_LIST_HEAD(&umem->umem_list); 549 dev->umem = umem; 550 /* We don't need VQ locks below since vhost_dev_cleanup makes sure 551 * VQs aren't running. 552 */ 553 for (i = 0; i < dev->nvqs; ++i) 554 dev->vqs[i]->umem = umem; 555 } 556 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner); 557 558 void vhost_dev_stop(struct vhost_dev *dev) 559 { 560 int i; 561 562 for (i = 0; i < dev->nvqs; ++i) { 563 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) { 564 vhost_poll_stop(&dev->vqs[i]->poll); 565 vhost_poll_flush(&dev->vqs[i]->poll); 566 } 567 } 568 } 569 EXPORT_SYMBOL_GPL(vhost_dev_stop); 570 571 static void vhost_umem_free(struct vhost_umem *umem, 572 struct vhost_umem_node *node) 573 { 574 vhost_umem_interval_tree_remove(node, &umem->umem_tree); 575 list_del(&node->link); 576 kfree(node); 577 umem->numem--; 578 } 579 580 static void vhost_umem_clean(struct vhost_umem *umem) 581 { 582 struct vhost_umem_node *node, *tmp; 583 584 if (!umem) 585 return; 586 587 list_for_each_entry_safe(node, tmp, &umem->umem_list, link) 588 vhost_umem_free(umem, node); 589 590 kvfree(umem); 591 } 592 593 static void vhost_clear_msg(struct vhost_dev *dev) 594 { 595 struct vhost_msg_node *node, *n; 596 597 spin_lock(&dev->iotlb_lock); 598 599 list_for_each_entry_safe(node, n, &dev->read_list, node) { 600 list_del(&node->node); 601 kfree(node); 602 } 603 604 list_for_each_entry_safe(node, n, &dev->pending_list, node) { 605 list_del(&node->node); 606 kfree(node); 607 } 608 609 spin_unlock(&dev->iotlb_lock); 610 } 611 612 void vhost_dev_cleanup(struct vhost_dev *dev) 613 { 614 int i; 615 616 for (i = 0; i < dev->nvqs; ++i) { 617 if (dev->vqs[i]->error_ctx) 618 eventfd_ctx_put(dev->vqs[i]->error_ctx); 619 if (dev->vqs[i]->kick) 620 fput(dev->vqs[i]->kick); 621 if (dev->vqs[i]->call_ctx) 622 eventfd_ctx_put(dev->vqs[i]->call_ctx); 623 vhost_vq_reset(dev, dev->vqs[i]); 624 } 625 vhost_dev_free_iovecs(dev); 626 if (dev->log_ctx) 627 eventfd_ctx_put(dev->log_ctx); 628 dev->log_ctx = NULL; 629 /* No one will access memory at this point */ 630 vhost_umem_clean(dev->umem); 631 dev->umem = NULL; 632 vhost_umem_clean(dev->iotlb); 633 dev->iotlb = NULL; 634 vhost_clear_msg(dev); 635 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); 636 WARN_ON(!llist_empty(&dev->work_list)); 637 if (dev->worker) { 638 kthread_stop(dev->worker); 639 dev->worker = NULL; 640 } 641 if (dev->mm) 642 mmput(dev->mm); 643 dev->mm = NULL; 644 } 645 EXPORT_SYMBOL_GPL(vhost_dev_cleanup); 646 647 static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 648 { 649 u64 a = addr / VHOST_PAGE_SIZE / 8; 650 651 /* Make sure 64 bit math will not overflow. */ 652 if (a > ULONG_MAX - (unsigned long)log_base || 653 a + (unsigned long)log_base > ULONG_MAX) 654 return false; 655 656 return access_ok(VERIFY_WRITE, log_base + a, 657 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); 658 } 659 660 static bool vhost_overflow(u64 uaddr, u64 size) 661 { 662 /* Make sure 64 bit math will not overflow. */ 663 return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size; 664 } 665 666 /* Caller should have vq mutex and device mutex. */ 667 static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem, 668 int log_all) 669 { 670 struct vhost_umem_node *node; 671 672 if (!umem) 673 return false; 674 675 list_for_each_entry(node, &umem->umem_list, link) { 676 unsigned long a = node->userspace_addr; 677 678 if (vhost_overflow(node->userspace_addr, node->size)) 679 return false; 680 681 682 if (!access_ok(VERIFY_WRITE, (void __user *)a, 683 node->size)) 684 return false; 685 else if (log_all && !log_access_ok(log_base, 686 node->start, 687 node->size)) 688 return false; 689 } 690 return true; 691 } 692 693 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, 694 u64 addr, unsigned int size, 695 int type) 696 { 697 const struct vhost_umem_node *node = vq->meta_iotlb[type]; 698 699 if (!node) 700 return NULL; 701 702 return (void *)(uintptr_t)(node->userspace_addr + addr - node->start); 703 } 704 705 /* Can we switch to this memory table? */ 706 /* Caller should have device mutex but not vq mutex */ 707 static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, 708 int log_all) 709 { 710 int i; 711 712 for (i = 0; i < d->nvqs; ++i) { 713 bool ok; 714 bool log; 715 716 mutex_lock(&d->vqs[i]->mutex); 717 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); 718 /* If ring is inactive, will check when it's enabled. */ 719 if (d->vqs[i]->private_data) 720 ok = vq_memory_access_ok(d->vqs[i]->log_base, 721 umem, log); 722 else 723 ok = true; 724 mutex_unlock(&d->vqs[i]->mutex); 725 if (!ok) 726 return false; 727 } 728 return true; 729 } 730 731 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, 732 struct iovec iov[], int iov_size, int access); 733 734 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, 735 const void *from, unsigned size) 736 { 737 int ret; 738 739 if (!vq->iotlb) 740 return __copy_to_user(to, from, size); 741 else { 742 /* This function should be called after iotlb 743 * prefetch, which means we're sure that all vq 744 * could be access through iotlb. So -EAGAIN should 745 * not happen in this case. 746 */ 747 struct iov_iter t; 748 void __user *uaddr = vhost_vq_meta_fetch(vq, 749 (u64)(uintptr_t)to, size, 750 VHOST_ADDR_USED); 751 752 if (uaddr) 753 return __copy_to_user(uaddr, from, size); 754 755 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, 756 ARRAY_SIZE(vq->iotlb_iov), 757 VHOST_ACCESS_WO); 758 if (ret < 0) 759 goto out; 760 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size); 761 ret = copy_to_iter(from, size, &t); 762 if (ret == size) 763 ret = 0; 764 } 765 out: 766 return ret; 767 } 768 769 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, 770 void __user *from, unsigned size) 771 { 772 int ret; 773 774 if (!vq->iotlb) 775 return __copy_from_user(to, from, size); 776 else { 777 /* This function should be called after iotlb 778 * prefetch, which means we're sure that vq 779 * could be access through iotlb. So -EAGAIN should 780 * not happen in this case. 781 */ 782 void __user *uaddr = vhost_vq_meta_fetch(vq, 783 (u64)(uintptr_t)from, size, 784 VHOST_ADDR_DESC); 785 struct iov_iter f; 786 787 if (uaddr) 788 return __copy_from_user(to, uaddr, size); 789 790 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, 791 ARRAY_SIZE(vq->iotlb_iov), 792 VHOST_ACCESS_RO); 793 if (ret < 0) { 794 vq_err(vq, "IOTLB translation failure: uaddr " 795 "%p size 0x%llx\n", from, 796 (unsigned long long) size); 797 goto out; 798 } 799 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size); 800 ret = copy_from_iter(to, size, &f); 801 if (ret == size) 802 ret = 0; 803 } 804 805 out: 806 return ret; 807 } 808 809 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, 810 void __user *addr, unsigned int size, 811 int type) 812 { 813 int ret; 814 815 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, 816 ARRAY_SIZE(vq->iotlb_iov), 817 VHOST_ACCESS_RO); 818 if (ret < 0) { 819 vq_err(vq, "IOTLB translation failure: uaddr " 820 "%p size 0x%llx\n", addr, 821 (unsigned long long) size); 822 return NULL; 823 } 824 825 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) { 826 vq_err(vq, "Non atomic userspace memory access: uaddr " 827 "%p size 0x%llx\n", addr, 828 (unsigned long long) size); 829 return NULL; 830 } 831 832 return vq->iotlb_iov[0].iov_base; 833 } 834 835 /* This function should be called after iotlb 836 * prefetch, which means we're sure that vq 837 * could be access through iotlb. So -EAGAIN should 838 * not happen in this case. 839 */ 840 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, 841 void *addr, unsigned int size, 842 int type) 843 { 844 void __user *uaddr = vhost_vq_meta_fetch(vq, 845 (u64)(uintptr_t)addr, size, type); 846 if (uaddr) 847 return uaddr; 848 849 return __vhost_get_user_slow(vq, addr, size, type); 850 } 851 852 #define vhost_put_user(vq, x, ptr) \ 853 ({ \ 854 int ret = -EFAULT; \ 855 if (!vq->iotlb) { \ 856 ret = __put_user(x, ptr); \ 857 } else { \ 858 __typeof__(ptr) to = \ 859 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ 860 sizeof(*ptr), VHOST_ADDR_USED); \ 861 if (to != NULL) \ 862 ret = __put_user(x, to); \ 863 else \ 864 ret = -EFAULT; \ 865 } \ 866 ret; \ 867 }) 868 869 #define vhost_get_user(vq, x, ptr, type) \ 870 ({ \ 871 int ret; \ 872 if (!vq->iotlb) { \ 873 ret = __get_user(x, ptr); \ 874 } else { \ 875 __typeof__(ptr) from = \ 876 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ 877 sizeof(*ptr), \ 878 type); \ 879 if (from != NULL) \ 880 ret = __get_user(x, from); \ 881 else \ 882 ret = -EFAULT; \ 883 } \ 884 ret; \ 885 }) 886 887 #define vhost_get_avail(vq, x, ptr) \ 888 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL) 889 890 #define vhost_get_used(vq, x, ptr) \ 891 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) 892 893 static void vhost_dev_lock_vqs(struct vhost_dev *d) 894 { 895 int i = 0; 896 for (i = 0; i < d->nvqs; ++i) 897 mutex_lock_nested(&d->vqs[i]->mutex, i); 898 } 899 900 static void vhost_dev_unlock_vqs(struct vhost_dev *d) 901 { 902 int i = 0; 903 for (i = 0; i < d->nvqs; ++i) 904 mutex_unlock(&d->vqs[i]->mutex); 905 } 906 907 static int vhost_new_umem_range(struct vhost_umem *umem, 908 u64 start, u64 size, u64 end, 909 u64 userspace_addr, int perm) 910 { 911 struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC); 912 913 if (!node) 914 return -ENOMEM; 915 916 if (umem->numem == max_iotlb_entries) { 917 tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link); 918 vhost_umem_free(umem, tmp); 919 } 920 921 node->start = start; 922 node->size = size; 923 node->last = end; 924 node->userspace_addr = userspace_addr; 925 node->perm = perm; 926 INIT_LIST_HEAD(&node->link); 927 list_add_tail(&node->link, &umem->umem_list); 928 vhost_umem_interval_tree_insert(node, &umem->umem_tree); 929 umem->numem++; 930 931 return 0; 932 } 933 934 static void vhost_del_umem_range(struct vhost_umem *umem, 935 u64 start, u64 end) 936 { 937 struct vhost_umem_node *node; 938 939 while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, 940 start, end))) 941 vhost_umem_free(umem, node); 942 } 943 944 static void vhost_iotlb_notify_vq(struct vhost_dev *d, 945 struct vhost_iotlb_msg *msg) 946 { 947 struct vhost_msg_node *node, *n; 948 949 spin_lock(&d->iotlb_lock); 950 951 list_for_each_entry_safe(node, n, &d->pending_list, node) { 952 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; 953 if (msg->iova <= vq_msg->iova && 954 msg->iova + msg->size - 1 > vq_msg->iova && 955 vq_msg->type == VHOST_IOTLB_MISS) { 956 vhost_poll_queue(&node->vq->poll); 957 list_del(&node->node); 958 kfree(node); 959 } 960 } 961 962 spin_unlock(&d->iotlb_lock); 963 } 964 965 static bool umem_access_ok(u64 uaddr, u64 size, int access) 966 { 967 unsigned long a = uaddr; 968 969 /* Make sure 64 bit math will not overflow. */ 970 if (vhost_overflow(uaddr, size)) 971 return false; 972 973 if ((access & VHOST_ACCESS_RO) && 974 !access_ok(VERIFY_READ, (void __user *)a, size)) 975 return false; 976 if ((access & VHOST_ACCESS_WO) && 977 !access_ok(VERIFY_WRITE, (void __user *)a, size)) 978 return false; 979 return true; 980 } 981 982 static int vhost_process_iotlb_msg(struct vhost_dev *dev, 983 struct vhost_iotlb_msg *msg) 984 { 985 int ret = 0; 986 987 mutex_lock(&dev->mutex); 988 vhost_dev_lock_vqs(dev); 989 switch (msg->type) { 990 case VHOST_IOTLB_UPDATE: 991 if (!dev->iotlb) { 992 ret = -EFAULT; 993 break; 994 } 995 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) { 996 ret = -EFAULT; 997 break; 998 } 999 vhost_vq_meta_reset(dev); 1000 if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size, 1001 msg->iova + msg->size - 1, 1002 msg->uaddr, msg->perm)) { 1003 ret = -ENOMEM; 1004 break; 1005 } 1006 vhost_iotlb_notify_vq(dev, msg); 1007 break; 1008 case VHOST_IOTLB_INVALIDATE: 1009 if (!dev->iotlb) { 1010 ret = -EFAULT; 1011 break; 1012 } 1013 vhost_vq_meta_reset(dev); 1014 vhost_del_umem_range(dev->iotlb, msg->iova, 1015 msg->iova + msg->size - 1); 1016 break; 1017 default: 1018 ret = -EINVAL; 1019 break; 1020 } 1021 1022 vhost_dev_unlock_vqs(dev); 1023 mutex_unlock(&dev->mutex); 1024 1025 return ret; 1026 } 1027 ssize_t vhost_chr_write_iter(struct vhost_dev *dev, 1028 struct iov_iter *from) 1029 { 1030 struct vhost_msg_node node; 1031 unsigned size = sizeof(struct vhost_msg); 1032 size_t ret; 1033 int err; 1034 1035 if (iov_iter_count(from) < size) 1036 return 0; 1037 ret = copy_from_iter(&node.msg, size, from); 1038 if (ret != size) 1039 goto done; 1040 1041 switch (node.msg.type) { 1042 case VHOST_IOTLB_MSG: 1043 err = vhost_process_iotlb_msg(dev, &node.msg.iotlb); 1044 if (err) 1045 ret = err; 1046 break; 1047 default: 1048 ret = -EINVAL; 1049 break; 1050 } 1051 1052 done: 1053 return ret; 1054 } 1055 EXPORT_SYMBOL(vhost_chr_write_iter); 1056 1057 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev, 1058 poll_table *wait) 1059 { 1060 __poll_t mask = 0; 1061 1062 poll_wait(file, &dev->wait, wait); 1063 1064 if (!list_empty(&dev->read_list)) 1065 mask |= EPOLLIN | EPOLLRDNORM; 1066 1067 return mask; 1068 } 1069 EXPORT_SYMBOL(vhost_chr_poll); 1070 1071 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to, 1072 int noblock) 1073 { 1074 DEFINE_WAIT(wait); 1075 struct vhost_msg_node *node; 1076 ssize_t ret = 0; 1077 unsigned size = sizeof(struct vhost_msg); 1078 1079 if (iov_iter_count(to) < size) 1080 return 0; 1081 1082 while (1) { 1083 if (!noblock) 1084 prepare_to_wait(&dev->wait, &wait, 1085 TASK_INTERRUPTIBLE); 1086 1087 node = vhost_dequeue_msg(dev, &dev->read_list); 1088 if (node) 1089 break; 1090 if (noblock) { 1091 ret = -EAGAIN; 1092 break; 1093 } 1094 if (signal_pending(current)) { 1095 ret = -ERESTARTSYS; 1096 break; 1097 } 1098 if (!dev->iotlb) { 1099 ret = -EBADFD; 1100 break; 1101 } 1102 1103 schedule(); 1104 } 1105 1106 if (!noblock) 1107 finish_wait(&dev->wait, &wait); 1108 1109 if (node) { 1110 ret = copy_to_iter(&node->msg, size, to); 1111 1112 if (ret != size || node->msg.type != VHOST_IOTLB_MISS) { 1113 kfree(node); 1114 return ret; 1115 } 1116 1117 vhost_enqueue_msg(dev, &dev->pending_list, node); 1118 } 1119 1120 return ret; 1121 } 1122 EXPORT_SYMBOL_GPL(vhost_chr_read_iter); 1123 1124 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access) 1125 { 1126 struct vhost_dev *dev = vq->dev; 1127 struct vhost_msg_node *node; 1128 struct vhost_iotlb_msg *msg; 1129 1130 node = vhost_new_msg(vq, VHOST_IOTLB_MISS); 1131 if (!node) 1132 return -ENOMEM; 1133 1134 msg = &node->msg.iotlb; 1135 msg->type = VHOST_IOTLB_MISS; 1136 msg->iova = iova; 1137 msg->perm = access; 1138 1139 vhost_enqueue_msg(dev, &dev->read_list, node); 1140 1141 return 0; 1142 } 1143 1144 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, 1145 struct vring_desc __user *desc, 1146 struct vring_avail __user *avail, 1147 struct vring_used __user *used) 1148 1149 { 1150 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 1151 1152 return access_ok(VERIFY_READ, desc, num * sizeof *desc) && 1153 access_ok(VERIFY_READ, avail, 1154 sizeof *avail + num * sizeof *avail->ring + s) && 1155 access_ok(VERIFY_WRITE, used, 1156 sizeof *used + num * sizeof *used->ring + s); 1157 } 1158 1159 static void vhost_vq_meta_update(struct vhost_virtqueue *vq, 1160 const struct vhost_umem_node *node, 1161 int type) 1162 { 1163 int access = (type == VHOST_ADDR_USED) ? 1164 VHOST_ACCESS_WO : VHOST_ACCESS_RO; 1165 1166 if (likely(node->perm & access)) 1167 vq->meta_iotlb[type] = node; 1168 } 1169 1170 static bool iotlb_access_ok(struct vhost_virtqueue *vq, 1171 int access, u64 addr, u64 len, int type) 1172 { 1173 const struct vhost_umem_node *node; 1174 struct vhost_umem *umem = vq->iotlb; 1175 u64 s = 0, size, orig_addr = addr, last = addr + len - 1; 1176 1177 if (vhost_vq_meta_fetch(vq, addr, len, type)) 1178 return true; 1179 1180 while (len > s) { 1181 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, 1182 addr, 1183 last); 1184 if (node == NULL || node->start > addr) { 1185 vhost_iotlb_miss(vq, addr, access); 1186 return false; 1187 } else if (!(node->perm & access)) { 1188 /* Report the possible access violation by 1189 * request another translation from userspace. 1190 */ 1191 return false; 1192 } 1193 1194 size = node->size - addr + node->start; 1195 1196 if (orig_addr == addr && size >= len) 1197 vhost_vq_meta_update(vq, node, type); 1198 1199 s += size; 1200 addr += size; 1201 } 1202 1203 return true; 1204 } 1205 1206 int vq_iotlb_prefetch(struct vhost_virtqueue *vq) 1207 { 1208 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 1209 unsigned int num = vq->num; 1210 1211 if (!vq->iotlb) 1212 return 1; 1213 1214 return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, 1215 num * sizeof(*vq->desc), VHOST_ADDR_DESC) && 1216 iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail, 1217 sizeof *vq->avail + 1218 num * sizeof(*vq->avail->ring) + s, 1219 VHOST_ADDR_AVAIL) && 1220 iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used, 1221 sizeof *vq->used + 1222 num * sizeof(*vq->used->ring) + s, 1223 VHOST_ADDR_USED); 1224 } 1225 EXPORT_SYMBOL_GPL(vq_iotlb_prefetch); 1226 1227 /* Can we log writes? */ 1228 /* Caller should have device mutex but not vq mutex */ 1229 bool vhost_log_access_ok(struct vhost_dev *dev) 1230 { 1231 return memory_access_ok(dev, dev->umem, 1); 1232 } 1233 EXPORT_SYMBOL_GPL(vhost_log_access_ok); 1234 1235 /* Verify access for write logging. */ 1236 /* Caller should have vq mutex and device mutex */ 1237 static bool vq_log_access_ok(struct vhost_virtqueue *vq, 1238 void __user *log_base) 1239 { 1240 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 1241 1242 return vq_memory_access_ok(log_base, vq->umem, 1243 vhost_has_feature(vq, VHOST_F_LOG_ALL)) && 1244 (!vq->log_used || log_access_ok(log_base, vq->log_addr, 1245 sizeof *vq->used + 1246 vq->num * sizeof *vq->used->ring + s)); 1247 } 1248 1249 /* Can we start vq? */ 1250 /* Caller should have vq mutex and device mutex */ 1251 bool vhost_vq_access_ok(struct vhost_virtqueue *vq) 1252 { 1253 if (!vq_log_access_ok(vq, vq->log_base)) 1254 return false; 1255 1256 /* Access validation occurs at prefetch time with IOTLB */ 1257 if (vq->iotlb) 1258 return true; 1259 1260 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); 1261 } 1262 EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 1263 1264 static struct vhost_umem *vhost_umem_alloc(void) 1265 { 1266 struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL); 1267 1268 if (!umem) 1269 return NULL; 1270 1271 umem->umem_tree = RB_ROOT_CACHED; 1272 umem->numem = 0; 1273 INIT_LIST_HEAD(&umem->umem_list); 1274 1275 return umem; 1276 } 1277 1278 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 1279 { 1280 struct vhost_memory mem, *newmem; 1281 struct vhost_memory_region *region; 1282 struct vhost_umem *newumem, *oldumem; 1283 unsigned long size = offsetof(struct vhost_memory, regions); 1284 int i; 1285 1286 if (copy_from_user(&mem, m, size)) 1287 return -EFAULT; 1288 if (mem.padding) 1289 return -EOPNOTSUPP; 1290 if (mem.nregions > max_mem_regions) 1291 return -E2BIG; 1292 newmem = kvzalloc(struct_size(newmem, regions, mem.nregions), 1293 GFP_KERNEL); 1294 if (!newmem) 1295 return -ENOMEM; 1296 1297 memcpy(newmem, &mem, size); 1298 if (copy_from_user(newmem->regions, m->regions, 1299 mem.nregions * sizeof *m->regions)) { 1300 kvfree(newmem); 1301 return -EFAULT; 1302 } 1303 1304 newumem = vhost_umem_alloc(); 1305 if (!newumem) { 1306 kvfree(newmem); 1307 return -ENOMEM; 1308 } 1309 1310 for (region = newmem->regions; 1311 region < newmem->regions + mem.nregions; 1312 region++) { 1313 if (vhost_new_umem_range(newumem, 1314 region->guest_phys_addr, 1315 region->memory_size, 1316 region->guest_phys_addr + 1317 region->memory_size - 1, 1318 region->userspace_addr, 1319 VHOST_ACCESS_RW)) 1320 goto err; 1321 } 1322 1323 if (!memory_access_ok(d, newumem, 0)) 1324 goto err; 1325 1326 oldumem = d->umem; 1327 d->umem = newumem; 1328 1329 /* All memory accesses are done under some VQ mutex. */ 1330 for (i = 0; i < d->nvqs; ++i) { 1331 mutex_lock(&d->vqs[i]->mutex); 1332 d->vqs[i]->umem = newumem; 1333 mutex_unlock(&d->vqs[i]->mutex); 1334 } 1335 1336 kvfree(newmem); 1337 vhost_umem_clean(oldumem); 1338 return 0; 1339 1340 err: 1341 vhost_umem_clean(newumem); 1342 kvfree(newmem); 1343 return -EFAULT; 1344 } 1345 1346 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) 1347 { 1348 struct file *eventfp, *filep = NULL; 1349 bool pollstart = false, pollstop = false; 1350 struct eventfd_ctx *ctx = NULL; 1351 u32 __user *idxp = argp; 1352 struct vhost_virtqueue *vq; 1353 struct vhost_vring_state s; 1354 struct vhost_vring_file f; 1355 struct vhost_vring_addr a; 1356 u32 idx; 1357 long r; 1358 1359 r = get_user(idx, idxp); 1360 if (r < 0) 1361 return r; 1362 if (idx >= d->nvqs) 1363 return -ENOBUFS; 1364 1365 vq = d->vqs[idx]; 1366 1367 mutex_lock(&vq->mutex); 1368 1369 switch (ioctl) { 1370 case VHOST_SET_VRING_NUM: 1371 /* Resizing ring with an active backend? 1372 * You don't want to do that. */ 1373 if (vq->private_data) { 1374 r = -EBUSY; 1375 break; 1376 } 1377 if (copy_from_user(&s, argp, sizeof s)) { 1378 r = -EFAULT; 1379 break; 1380 } 1381 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { 1382 r = -EINVAL; 1383 break; 1384 } 1385 vq->num = s.num; 1386 break; 1387 case VHOST_SET_VRING_BASE: 1388 /* Moving base with an active backend? 1389 * You don't want to do that. */ 1390 if (vq->private_data) { 1391 r = -EBUSY; 1392 break; 1393 } 1394 if (copy_from_user(&s, argp, sizeof s)) { 1395 r = -EFAULT; 1396 break; 1397 } 1398 if (s.num > 0xffff) { 1399 r = -EINVAL; 1400 break; 1401 } 1402 vq->last_avail_idx = s.num; 1403 /* Forget the cached index value. */ 1404 vq->avail_idx = vq->last_avail_idx; 1405 break; 1406 case VHOST_GET_VRING_BASE: 1407 s.index = idx; 1408 s.num = vq->last_avail_idx; 1409 if (copy_to_user(argp, &s, sizeof s)) 1410 r = -EFAULT; 1411 break; 1412 case VHOST_SET_VRING_ADDR: 1413 if (copy_from_user(&a, argp, sizeof a)) { 1414 r = -EFAULT; 1415 break; 1416 } 1417 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { 1418 r = -EOPNOTSUPP; 1419 break; 1420 } 1421 /* For 32bit, verify that the top 32bits of the user 1422 data are set to zero. */ 1423 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr || 1424 (u64)(unsigned long)a.used_user_addr != a.used_user_addr || 1425 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) { 1426 r = -EFAULT; 1427 break; 1428 } 1429 1430 /* Make sure it's safe to cast pointers to vring types. */ 1431 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); 1432 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); 1433 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || 1434 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || 1435 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) { 1436 r = -EINVAL; 1437 break; 1438 } 1439 1440 /* We only verify access here if backend is configured. 1441 * If it is not, we don't as size might not have been setup. 1442 * We will verify when backend is configured. */ 1443 if (vq->private_data) { 1444 if (!vq_access_ok(vq, vq->num, 1445 (void __user *)(unsigned long)a.desc_user_addr, 1446 (void __user *)(unsigned long)a.avail_user_addr, 1447 (void __user *)(unsigned long)a.used_user_addr)) { 1448 r = -EINVAL; 1449 break; 1450 } 1451 1452 /* Also validate log access for used ring if enabled. */ 1453 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) && 1454 !log_access_ok(vq->log_base, a.log_guest_addr, 1455 sizeof *vq->used + 1456 vq->num * sizeof *vq->used->ring)) { 1457 r = -EINVAL; 1458 break; 1459 } 1460 } 1461 1462 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); 1463 vq->desc = (void __user *)(unsigned long)a.desc_user_addr; 1464 vq->avail = (void __user *)(unsigned long)a.avail_user_addr; 1465 vq->log_addr = a.log_guest_addr; 1466 vq->used = (void __user *)(unsigned long)a.used_user_addr; 1467 break; 1468 case VHOST_SET_VRING_KICK: 1469 if (copy_from_user(&f, argp, sizeof f)) { 1470 r = -EFAULT; 1471 break; 1472 } 1473 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); 1474 if (IS_ERR(eventfp)) { 1475 r = PTR_ERR(eventfp); 1476 break; 1477 } 1478 if (eventfp != vq->kick) { 1479 pollstop = (filep = vq->kick) != NULL; 1480 pollstart = (vq->kick = eventfp) != NULL; 1481 } else 1482 filep = eventfp; 1483 break; 1484 case VHOST_SET_VRING_CALL: 1485 if (copy_from_user(&f, argp, sizeof f)) { 1486 r = -EFAULT; 1487 break; 1488 } 1489 ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd); 1490 if (IS_ERR(ctx)) { 1491 r = PTR_ERR(ctx); 1492 break; 1493 } 1494 swap(ctx, vq->call_ctx); 1495 break; 1496 case VHOST_SET_VRING_ERR: 1497 if (copy_from_user(&f, argp, sizeof f)) { 1498 r = -EFAULT; 1499 break; 1500 } 1501 ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd); 1502 if (IS_ERR(ctx)) { 1503 r = PTR_ERR(ctx); 1504 break; 1505 } 1506 swap(ctx, vq->error_ctx); 1507 break; 1508 case VHOST_SET_VRING_ENDIAN: 1509 r = vhost_set_vring_endian(vq, argp); 1510 break; 1511 case VHOST_GET_VRING_ENDIAN: 1512 r = vhost_get_vring_endian(vq, idx, argp); 1513 break; 1514 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT: 1515 if (copy_from_user(&s, argp, sizeof(s))) { 1516 r = -EFAULT; 1517 break; 1518 } 1519 vq->busyloop_timeout = s.num; 1520 break; 1521 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT: 1522 s.index = idx; 1523 s.num = vq->busyloop_timeout; 1524 if (copy_to_user(argp, &s, sizeof(s))) 1525 r = -EFAULT; 1526 break; 1527 default: 1528 r = -ENOIOCTLCMD; 1529 } 1530 1531 if (pollstop && vq->handle_kick) 1532 vhost_poll_stop(&vq->poll); 1533 1534 if (!IS_ERR_OR_NULL(ctx)) 1535 eventfd_ctx_put(ctx); 1536 if (filep) 1537 fput(filep); 1538 1539 if (pollstart && vq->handle_kick) 1540 r = vhost_poll_start(&vq->poll, vq->kick); 1541 1542 mutex_unlock(&vq->mutex); 1543 1544 if (pollstop && vq->handle_kick) 1545 vhost_poll_flush(&vq->poll); 1546 return r; 1547 } 1548 EXPORT_SYMBOL_GPL(vhost_vring_ioctl); 1549 1550 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled) 1551 { 1552 struct vhost_umem *niotlb, *oiotlb; 1553 int i; 1554 1555 niotlb = vhost_umem_alloc(); 1556 if (!niotlb) 1557 return -ENOMEM; 1558 1559 oiotlb = d->iotlb; 1560 d->iotlb = niotlb; 1561 1562 for (i = 0; i < d->nvqs; ++i) { 1563 mutex_lock(&d->vqs[i]->mutex); 1564 d->vqs[i]->iotlb = niotlb; 1565 mutex_unlock(&d->vqs[i]->mutex); 1566 } 1567 1568 vhost_umem_clean(oiotlb); 1569 1570 return 0; 1571 } 1572 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb); 1573 1574 /* Caller must have device mutex */ 1575 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) 1576 { 1577 struct eventfd_ctx *ctx; 1578 u64 p; 1579 long r; 1580 int i, fd; 1581 1582 /* If you are not the owner, you can become one */ 1583 if (ioctl == VHOST_SET_OWNER) { 1584 r = vhost_dev_set_owner(d); 1585 goto done; 1586 } 1587 1588 /* You must be the owner to do anything else */ 1589 r = vhost_dev_check_owner(d); 1590 if (r) 1591 goto done; 1592 1593 switch (ioctl) { 1594 case VHOST_SET_MEM_TABLE: 1595 r = vhost_set_memory(d, argp); 1596 break; 1597 case VHOST_SET_LOG_BASE: 1598 if (copy_from_user(&p, argp, sizeof p)) { 1599 r = -EFAULT; 1600 break; 1601 } 1602 if ((u64)(unsigned long)p != p) { 1603 r = -EFAULT; 1604 break; 1605 } 1606 for (i = 0; i < d->nvqs; ++i) { 1607 struct vhost_virtqueue *vq; 1608 void __user *base = (void __user *)(unsigned long)p; 1609 vq = d->vqs[i]; 1610 mutex_lock(&vq->mutex); 1611 /* If ring is inactive, will check when it's enabled. */ 1612 if (vq->private_data && !vq_log_access_ok(vq, base)) 1613 r = -EFAULT; 1614 else 1615 vq->log_base = base; 1616 mutex_unlock(&vq->mutex); 1617 } 1618 break; 1619 case VHOST_SET_LOG_FD: 1620 r = get_user(fd, (int __user *)argp); 1621 if (r < 0) 1622 break; 1623 ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd); 1624 if (IS_ERR(ctx)) { 1625 r = PTR_ERR(ctx); 1626 break; 1627 } 1628 swap(ctx, d->log_ctx); 1629 for (i = 0; i < d->nvqs; ++i) { 1630 mutex_lock(&d->vqs[i]->mutex); 1631 d->vqs[i]->log_ctx = d->log_ctx; 1632 mutex_unlock(&d->vqs[i]->mutex); 1633 } 1634 if (ctx) 1635 eventfd_ctx_put(ctx); 1636 break; 1637 default: 1638 r = -ENOIOCTLCMD; 1639 break; 1640 } 1641 done: 1642 return r; 1643 } 1644 EXPORT_SYMBOL_GPL(vhost_dev_ioctl); 1645 1646 /* TODO: This is really inefficient. We need something like get_user() 1647 * (instruction directly accesses the data, with an exception table entry 1648 * returning -EFAULT). See Documentation/x86/exception-tables.txt. 1649 */ 1650 static int set_bit_to_user(int nr, void __user *addr) 1651 { 1652 unsigned long log = (unsigned long)addr; 1653 struct page *page; 1654 void *base; 1655 int bit = nr + (log % PAGE_SIZE) * 8; 1656 int r; 1657 1658 r = get_user_pages_fast(log, 1, 1, &page); 1659 if (r < 0) 1660 return r; 1661 BUG_ON(r != 1); 1662 base = kmap_atomic(page); 1663 set_bit(bit, base); 1664 kunmap_atomic(base); 1665 set_page_dirty_lock(page); 1666 put_page(page); 1667 return 0; 1668 } 1669 1670 static int log_write(void __user *log_base, 1671 u64 write_address, u64 write_length) 1672 { 1673 u64 write_page = write_address / VHOST_PAGE_SIZE; 1674 int r; 1675 1676 if (!write_length) 1677 return 0; 1678 write_length += write_address % VHOST_PAGE_SIZE; 1679 for (;;) { 1680 u64 base = (u64)(unsigned long)log_base; 1681 u64 log = base + write_page / 8; 1682 int bit = write_page % 8; 1683 if ((u64)(unsigned long)log != log) 1684 return -EFAULT; 1685 r = set_bit_to_user(bit, (void __user *)(unsigned long)log); 1686 if (r < 0) 1687 return r; 1688 if (write_length <= VHOST_PAGE_SIZE) 1689 break; 1690 write_length -= VHOST_PAGE_SIZE; 1691 write_page += 1; 1692 } 1693 return r; 1694 } 1695 1696 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 1697 unsigned int log_num, u64 len) 1698 { 1699 int i, r; 1700 1701 /* Make sure data written is seen before log. */ 1702 smp_wmb(); 1703 for (i = 0; i < log_num; ++i) { 1704 u64 l = min(log[i].len, len); 1705 r = log_write(vq->log_base, log[i].addr, l); 1706 if (r < 0) 1707 return r; 1708 len -= l; 1709 if (!len) { 1710 if (vq->log_ctx) 1711 eventfd_signal(vq->log_ctx, 1); 1712 return 0; 1713 } 1714 } 1715 /* Length written exceeds what we have stored. This is a bug. */ 1716 BUG(); 1717 return 0; 1718 } 1719 EXPORT_SYMBOL_GPL(vhost_log_write); 1720 1721 static int vhost_update_used_flags(struct vhost_virtqueue *vq) 1722 { 1723 void __user *used; 1724 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), 1725 &vq->used->flags) < 0) 1726 return -EFAULT; 1727 if (unlikely(vq->log_used)) { 1728 /* Make sure the flag is seen before log. */ 1729 smp_wmb(); 1730 /* Log used flag write. */ 1731 used = &vq->used->flags; 1732 log_write(vq->log_base, vq->log_addr + 1733 (used - (void __user *)vq->used), 1734 sizeof vq->used->flags); 1735 if (vq->log_ctx) 1736 eventfd_signal(vq->log_ctx, 1); 1737 } 1738 return 0; 1739 } 1740 1741 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) 1742 { 1743 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), 1744 vhost_avail_event(vq))) 1745 return -EFAULT; 1746 if (unlikely(vq->log_used)) { 1747 void __user *used; 1748 /* Make sure the event is seen before log. */ 1749 smp_wmb(); 1750 /* Log avail event write */ 1751 used = vhost_avail_event(vq); 1752 log_write(vq->log_base, vq->log_addr + 1753 (used - (void __user *)vq->used), 1754 sizeof *vhost_avail_event(vq)); 1755 if (vq->log_ctx) 1756 eventfd_signal(vq->log_ctx, 1); 1757 } 1758 return 0; 1759 } 1760 1761 int vhost_vq_init_access(struct vhost_virtqueue *vq) 1762 { 1763 __virtio16 last_used_idx; 1764 int r; 1765 bool is_le = vq->is_le; 1766 1767 if (!vq->private_data) 1768 return 0; 1769 1770 vhost_init_is_le(vq); 1771 1772 r = vhost_update_used_flags(vq); 1773 if (r) 1774 goto err; 1775 vq->signalled_used_valid = false; 1776 if (!vq->iotlb && 1777 !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) { 1778 r = -EFAULT; 1779 goto err; 1780 } 1781 r = vhost_get_used(vq, last_used_idx, &vq->used->idx); 1782 if (r) { 1783 vq_err(vq, "Can't access used idx at %p\n", 1784 &vq->used->idx); 1785 goto err; 1786 } 1787 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); 1788 return 0; 1789 1790 err: 1791 vq->is_le = is_le; 1792 return r; 1793 } 1794 EXPORT_SYMBOL_GPL(vhost_vq_init_access); 1795 1796 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, 1797 struct iovec iov[], int iov_size, int access) 1798 { 1799 const struct vhost_umem_node *node; 1800 struct vhost_dev *dev = vq->dev; 1801 struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem; 1802 struct iovec *_iov; 1803 u64 s = 0; 1804 int ret = 0; 1805 1806 while ((u64)len > s) { 1807 u64 size; 1808 if (unlikely(ret >= iov_size)) { 1809 ret = -ENOBUFS; 1810 break; 1811 } 1812 1813 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, 1814 addr, addr + len - 1); 1815 if (node == NULL || node->start > addr) { 1816 if (umem != dev->iotlb) { 1817 ret = -EFAULT; 1818 break; 1819 } 1820 ret = -EAGAIN; 1821 break; 1822 } else if (!(node->perm & access)) { 1823 ret = -EPERM; 1824 break; 1825 } 1826 1827 _iov = iov + ret; 1828 size = node->size - addr + node->start; 1829 _iov->iov_len = min((u64)len - s, size); 1830 _iov->iov_base = (void __user *)(unsigned long) 1831 (node->userspace_addr + addr - node->start); 1832 s += size; 1833 addr += size; 1834 ++ret; 1835 } 1836 1837 if (ret == -EAGAIN) 1838 vhost_iotlb_miss(vq, addr, access); 1839 return ret; 1840 } 1841 1842 /* Each buffer in the virtqueues is actually a chain of descriptors. This 1843 * function returns the next descriptor in the chain, 1844 * or -1U if we're at the end. */ 1845 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) 1846 { 1847 unsigned int next; 1848 1849 /* If this descriptor says it doesn't chain, we're done. */ 1850 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) 1851 return -1U; 1852 1853 /* Check they're not leading us off end of descriptors. */ 1854 next = vhost16_to_cpu(vq, READ_ONCE(desc->next)); 1855 return next; 1856 } 1857 1858 static int get_indirect(struct vhost_virtqueue *vq, 1859 struct iovec iov[], unsigned int iov_size, 1860 unsigned int *out_num, unsigned int *in_num, 1861 struct vhost_log *log, unsigned int *log_num, 1862 struct vring_desc *indirect) 1863 { 1864 struct vring_desc desc; 1865 unsigned int i = 0, count, found = 0; 1866 u32 len = vhost32_to_cpu(vq, indirect->len); 1867 struct iov_iter from; 1868 int ret, access; 1869 1870 /* Sanity check */ 1871 if (unlikely(len % sizeof desc)) { 1872 vq_err(vq, "Invalid length in indirect descriptor: " 1873 "len 0x%llx not multiple of 0x%zx\n", 1874 (unsigned long long)len, 1875 sizeof desc); 1876 return -EINVAL; 1877 } 1878 1879 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, 1880 UIO_MAXIOV, VHOST_ACCESS_RO); 1881 if (unlikely(ret < 0)) { 1882 if (ret != -EAGAIN) 1883 vq_err(vq, "Translation failure %d in indirect.\n", ret); 1884 return ret; 1885 } 1886 iov_iter_init(&from, READ, vq->indirect, ret, len); 1887 1888 /* We will use the result as an address to read from, so most 1889 * architectures only need a compiler barrier here. */ 1890 read_barrier_depends(); 1891 1892 count = len / sizeof desc; 1893 /* Buffers are chained via a 16 bit next field, so 1894 * we can have at most 2^16 of these. */ 1895 if (unlikely(count > USHRT_MAX + 1)) { 1896 vq_err(vq, "Indirect buffer length too big: %d\n", 1897 indirect->len); 1898 return -E2BIG; 1899 } 1900 1901 do { 1902 unsigned iov_count = *in_num + *out_num; 1903 if (unlikely(++found > count)) { 1904 vq_err(vq, "Loop detected: last one at %u " 1905 "indirect size %u\n", 1906 i, count); 1907 return -EINVAL; 1908 } 1909 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) { 1910 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", 1911 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); 1912 return -EINVAL; 1913 } 1914 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { 1915 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", 1916 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); 1917 return -EINVAL; 1918 } 1919 1920 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) 1921 access = VHOST_ACCESS_WO; 1922 else 1923 access = VHOST_ACCESS_RO; 1924 1925 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), 1926 vhost32_to_cpu(vq, desc.len), iov + iov_count, 1927 iov_size - iov_count, access); 1928 if (unlikely(ret < 0)) { 1929 if (ret != -EAGAIN) 1930 vq_err(vq, "Translation failure %d indirect idx %d\n", 1931 ret, i); 1932 return ret; 1933 } 1934 /* If this is an input descriptor, increment that count. */ 1935 if (access == VHOST_ACCESS_WO) { 1936 *in_num += ret; 1937 if (unlikely(log)) { 1938 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); 1939 log[*log_num].len = vhost32_to_cpu(vq, desc.len); 1940 ++*log_num; 1941 } 1942 } else { 1943 /* If it's an output descriptor, they're all supposed 1944 * to come before any input descriptors. */ 1945 if (unlikely(*in_num)) { 1946 vq_err(vq, "Indirect descriptor " 1947 "has out after in: idx %d\n", i); 1948 return -EINVAL; 1949 } 1950 *out_num += ret; 1951 } 1952 } while ((i = next_desc(vq, &desc)) != -1); 1953 return 0; 1954 } 1955 1956 /* This looks in the virtqueue and for the first available buffer, and converts 1957 * it to an iovec for convenient access. Since descriptors consist of some 1958 * number of output then some number of input descriptors, it's actually two 1959 * iovecs, but we pack them into one and note how many of each there were. 1960 * 1961 * This function returns the descriptor number found, or vq->num (which is 1962 * never a valid descriptor number) if none was found. A negative code is 1963 * returned on error. */ 1964 int vhost_get_vq_desc(struct vhost_virtqueue *vq, 1965 struct iovec iov[], unsigned int iov_size, 1966 unsigned int *out_num, unsigned int *in_num, 1967 struct vhost_log *log, unsigned int *log_num) 1968 { 1969 struct vring_desc desc; 1970 unsigned int i, head, found = 0; 1971 u16 last_avail_idx; 1972 __virtio16 avail_idx; 1973 __virtio16 ring_head; 1974 int ret, access; 1975 1976 /* Check it isn't doing very strange things with descriptor numbers. */ 1977 last_avail_idx = vq->last_avail_idx; 1978 1979 if (vq->avail_idx == vq->last_avail_idx) { 1980 if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) { 1981 vq_err(vq, "Failed to access avail idx at %p\n", 1982 &vq->avail->idx); 1983 return -EFAULT; 1984 } 1985 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); 1986 1987 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { 1988 vq_err(vq, "Guest moved used index from %u to %u", 1989 last_avail_idx, vq->avail_idx); 1990 return -EFAULT; 1991 } 1992 1993 /* If there's nothing new since last we looked, return 1994 * invalid. 1995 */ 1996 if (vq->avail_idx == last_avail_idx) 1997 return vq->num; 1998 1999 /* Only get avail ring entries after they have been 2000 * exposed by guest. 2001 */ 2002 smp_rmb(); 2003 } 2004 2005 /* Grab the next descriptor number they're advertising, and increment 2006 * the index we've seen. */ 2007 if (unlikely(vhost_get_avail(vq, ring_head, 2008 &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) { 2009 vq_err(vq, "Failed to read head: idx %d address %p\n", 2010 last_avail_idx, 2011 &vq->avail->ring[last_avail_idx % vq->num]); 2012 return -EFAULT; 2013 } 2014 2015 head = vhost16_to_cpu(vq, ring_head); 2016 2017 /* If their number is silly, that's an error. */ 2018 if (unlikely(head >= vq->num)) { 2019 vq_err(vq, "Guest says index %u > %u is available", 2020 head, vq->num); 2021 return -EINVAL; 2022 } 2023 2024 /* When we start there are none of either input nor output. */ 2025 *out_num = *in_num = 0; 2026 if (unlikely(log)) 2027 *log_num = 0; 2028 2029 i = head; 2030 do { 2031 unsigned iov_count = *in_num + *out_num; 2032 if (unlikely(i >= vq->num)) { 2033 vq_err(vq, "Desc index is %u > %u, head = %u", 2034 i, vq->num, head); 2035 return -EINVAL; 2036 } 2037 if (unlikely(++found > vq->num)) { 2038 vq_err(vq, "Loop detected: last one at %u " 2039 "vq size %u head %u\n", 2040 i, vq->num, head); 2041 return -EINVAL; 2042 } 2043 ret = vhost_copy_from_user(vq, &desc, vq->desc + i, 2044 sizeof desc); 2045 if (unlikely(ret)) { 2046 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", 2047 i, vq->desc + i); 2048 return -EFAULT; 2049 } 2050 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { 2051 ret = get_indirect(vq, iov, iov_size, 2052 out_num, in_num, 2053 log, log_num, &desc); 2054 if (unlikely(ret < 0)) { 2055 if (ret != -EAGAIN) 2056 vq_err(vq, "Failure detected " 2057 "in indirect descriptor at idx %d\n", i); 2058 return ret; 2059 } 2060 continue; 2061 } 2062 2063 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) 2064 access = VHOST_ACCESS_WO; 2065 else 2066 access = VHOST_ACCESS_RO; 2067 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), 2068 vhost32_to_cpu(vq, desc.len), iov + iov_count, 2069 iov_size - iov_count, access); 2070 if (unlikely(ret < 0)) { 2071 if (ret != -EAGAIN) 2072 vq_err(vq, "Translation failure %d descriptor idx %d\n", 2073 ret, i); 2074 return ret; 2075 } 2076 if (access == VHOST_ACCESS_WO) { 2077 /* If this is an input descriptor, 2078 * increment that count. */ 2079 *in_num += ret; 2080 if (unlikely(log)) { 2081 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); 2082 log[*log_num].len = vhost32_to_cpu(vq, desc.len); 2083 ++*log_num; 2084 } 2085 } else { 2086 /* If it's an output descriptor, they're all supposed 2087 * to come before any input descriptors. */ 2088 if (unlikely(*in_num)) { 2089 vq_err(vq, "Descriptor has out after in: " 2090 "idx %d\n", i); 2091 return -EINVAL; 2092 } 2093 *out_num += ret; 2094 } 2095 } while ((i = next_desc(vq, &desc)) != -1); 2096 2097 /* On success, increment avail index. */ 2098 vq->last_avail_idx++; 2099 2100 /* Assume notifications from guest are disabled at this point, 2101 * if they aren't we would need to update avail_event index. */ 2102 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); 2103 return head; 2104 } 2105 EXPORT_SYMBOL_GPL(vhost_get_vq_desc); 2106 2107 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ 2108 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) 2109 { 2110 vq->last_avail_idx -= n; 2111 } 2112 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); 2113 2114 /* After we've used one of their buffers, we tell them about it. We'll then 2115 * want to notify the guest, using eventfd. */ 2116 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) 2117 { 2118 struct vring_used_elem heads = { 2119 cpu_to_vhost32(vq, head), 2120 cpu_to_vhost32(vq, len) 2121 }; 2122 2123 return vhost_add_used_n(vq, &heads, 1); 2124 } 2125 EXPORT_SYMBOL_GPL(vhost_add_used); 2126 2127 static int __vhost_add_used_n(struct vhost_virtqueue *vq, 2128 struct vring_used_elem *heads, 2129 unsigned count) 2130 { 2131 struct vring_used_elem __user *used; 2132 u16 old, new; 2133 int start; 2134 2135 start = vq->last_used_idx & (vq->num - 1); 2136 used = vq->used->ring + start; 2137 if (count == 1) { 2138 if (vhost_put_user(vq, heads[0].id, &used->id)) { 2139 vq_err(vq, "Failed to write used id"); 2140 return -EFAULT; 2141 } 2142 if (vhost_put_user(vq, heads[0].len, &used->len)) { 2143 vq_err(vq, "Failed to write used len"); 2144 return -EFAULT; 2145 } 2146 } else if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) { 2147 vq_err(vq, "Failed to write used"); 2148 return -EFAULT; 2149 } 2150 if (unlikely(vq->log_used)) { 2151 /* Make sure data is seen before log. */ 2152 smp_wmb(); 2153 /* Log used ring entry write. */ 2154 log_write(vq->log_base, 2155 vq->log_addr + 2156 ((void __user *)used - (void __user *)vq->used), 2157 count * sizeof *used); 2158 } 2159 old = vq->last_used_idx; 2160 new = (vq->last_used_idx += count); 2161 /* If the driver never bothers to signal in a very long while, 2162 * used index might wrap around. If that happens, invalidate 2163 * signalled_used index we stored. TODO: make sure driver 2164 * signals at least once in 2^16 and remove this. */ 2165 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) 2166 vq->signalled_used_valid = false; 2167 return 0; 2168 } 2169 2170 /* After we've used one of their buffers, we tell them about it. We'll then 2171 * want to notify the guest, using eventfd. */ 2172 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, 2173 unsigned count) 2174 { 2175 int start, n, r; 2176 2177 start = vq->last_used_idx & (vq->num - 1); 2178 n = vq->num - start; 2179 if (n < count) { 2180 r = __vhost_add_used_n(vq, heads, n); 2181 if (r < 0) 2182 return r; 2183 heads += n; 2184 count -= n; 2185 } 2186 r = __vhost_add_used_n(vq, heads, count); 2187 2188 /* Make sure buffer is written before we update index. */ 2189 smp_wmb(); 2190 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), 2191 &vq->used->idx)) { 2192 vq_err(vq, "Failed to increment used idx"); 2193 return -EFAULT; 2194 } 2195 if (unlikely(vq->log_used)) { 2196 /* Log used index update. */ 2197 log_write(vq->log_base, 2198 vq->log_addr + offsetof(struct vring_used, idx), 2199 sizeof vq->used->idx); 2200 if (vq->log_ctx) 2201 eventfd_signal(vq->log_ctx, 1); 2202 } 2203 return r; 2204 } 2205 EXPORT_SYMBOL_GPL(vhost_add_used_n); 2206 2207 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2208 { 2209 __u16 old, new; 2210 __virtio16 event; 2211 bool v; 2212 /* Flush out used index updates. This is paired 2213 * with the barrier that the Guest executes when enabling 2214 * interrupts. */ 2215 smp_mb(); 2216 2217 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && 2218 unlikely(vq->avail_idx == vq->last_avail_idx)) 2219 return true; 2220 2221 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2222 __virtio16 flags; 2223 if (vhost_get_avail(vq, flags, &vq->avail->flags)) { 2224 vq_err(vq, "Failed to get flags"); 2225 return true; 2226 } 2227 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); 2228 } 2229 old = vq->signalled_used; 2230 v = vq->signalled_used_valid; 2231 new = vq->signalled_used = vq->last_used_idx; 2232 vq->signalled_used_valid = true; 2233 2234 if (unlikely(!v)) 2235 return true; 2236 2237 if (vhost_get_avail(vq, event, vhost_used_event(vq))) { 2238 vq_err(vq, "Failed to get used event idx"); 2239 return true; 2240 } 2241 return vring_need_event(vhost16_to_cpu(vq, event), new, old); 2242 } 2243 2244 /* This actually signals the guest, using eventfd. */ 2245 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2246 { 2247 /* Signal the Guest tell them we used something up. */ 2248 if (vq->call_ctx && vhost_notify(dev, vq)) 2249 eventfd_signal(vq->call_ctx, 1); 2250 } 2251 EXPORT_SYMBOL_GPL(vhost_signal); 2252 2253 /* And here's the combo meal deal. Supersize me! */ 2254 void vhost_add_used_and_signal(struct vhost_dev *dev, 2255 struct vhost_virtqueue *vq, 2256 unsigned int head, int len) 2257 { 2258 vhost_add_used(vq, head, len); 2259 vhost_signal(dev, vq); 2260 } 2261 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal); 2262 2263 /* multi-buffer version of vhost_add_used_and_signal */ 2264 void vhost_add_used_and_signal_n(struct vhost_dev *dev, 2265 struct vhost_virtqueue *vq, 2266 struct vring_used_elem *heads, unsigned count) 2267 { 2268 vhost_add_used_n(vq, heads, count); 2269 vhost_signal(dev, vq); 2270 } 2271 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); 2272 2273 /* return true if we're sure that avaiable ring is empty */ 2274 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2275 { 2276 __virtio16 avail_idx; 2277 int r; 2278 2279 if (vq->avail_idx != vq->last_avail_idx) 2280 return false; 2281 2282 r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); 2283 if (unlikely(r)) 2284 return false; 2285 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); 2286 2287 return vq->avail_idx == vq->last_avail_idx; 2288 } 2289 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty); 2290 2291 /* OK, now we need to know about added descriptors. */ 2292 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2293 { 2294 __virtio16 avail_idx; 2295 int r; 2296 2297 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) 2298 return false; 2299 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; 2300 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2301 r = vhost_update_used_flags(vq); 2302 if (r) { 2303 vq_err(vq, "Failed to enable notification at %p: %d\n", 2304 &vq->used->flags, r); 2305 return false; 2306 } 2307 } else { 2308 r = vhost_update_avail_event(vq, vq->avail_idx); 2309 if (r) { 2310 vq_err(vq, "Failed to update avail event index at %p: %d\n", 2311 vhost_avail_event(vq), r); 2312 return false; 2313 } 2314 } 2315 /* They could have slipped one in as we were doing that: make 2316 * sure it's written, then check again. */ 2317 smp_mb(); 2318 r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); 2319 if (r) { 2320 vq_err(vq, "Failed to check avail idx at %p: %d\n", 2321 &vq->avail->idx, r); 2322 return false; 2323 } 2324 2325 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx; 2326 } 2327 EXPORT_SYMBOL_GPL(vhost_enable_notify); 2328 2329 /* We don't need to be notified again. */ 2330 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2331 { 2332 int r; 2333 2334 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) 2335 return; 2336 vq->used_flags |= VRING_USED_F_NO_NOTIFY; 2337 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2338 r = vhost_update_used_flags(vq); 2339 if (r) 2340 vq_err(vq, "Failed to enable notification at %p: %d\n", 2341 &vq->used->flags, r); 2342 } 2343 } 2344 EXPORT_SYMBOL_GPL(vhost_disable_notify); 2345 2346 /* Create a new message. */ 2347 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) 2348 { 2349 struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL); 2350 if (!node) 2351 return NULL; 2352 2353 /* Make sure all padding within the structure is initialized. */ 2354 memset(&node->msg, 0, sizeof node->msg); 2355 node->vq = vq; 2356 node->msg.type = type; 2357 return node; 2358 } 2359 EXPORT_SYMBOL_GPL(vhost_new_msg); 2360 2361 void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head, 2362 struct vhost_msg_node *node) 2363 { 2364 spin_lock(&dev->iotlb_lock); 2365 list_add_tail(&node->node, head); 2366 spin_unlock(&dev->iotlb_lock); 2367 2368 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); 2369 } 2370 EXPORT_SYMBOL_GPL(vhost_enqueue_msg); 2371 2372 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev, 2373 struct list_head *head) 2374 { 2375 struct vhost_msg_node *node = NULL; 2376 2377 spin_lock(&dev->iotlb_lock); 2378 if (!list_empty(head)) { 2379 node = list_first_entry(head, struct vhost_msg_node, 2380 node); 2381 list_del(&node->node); 2382 } 2383 spin_unlock(&dev->iotlb_lock); 2384 2385 return node; 2386 } 2387 EXPORT_SYMBOL_GPL(vhost_dequeue_msg); 2388 2389 2390 static int __init vhost_init(void) 2391 { 2392 return 0; 2393 } 2394 2395 static void __exit vhost_exit(void) 2396 { 2397 } 2398 2399 module_init(vhost_init); 2400 module_exit(vhost_exit); 2401 2402 MODULE_VERSION("0.0.1"); 2403 MODULE_LICENSE("GPL v2"); 2404 MODULE_AUTHOR("Michael S. Tsirkin"); 2405 MODULE_DESCRIPTION("Host kernel accelerator for virtio"); 2406