1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VFIO: IOMMU DMA mapping support for Type1 IOMMU 4 * 5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved. 6 * Author: Alex Williamson <alex.williamson@redhat.com> 7 * 8 * Derived from original vfio: 9 * Copyright 2010 Cisco Systems, Inc. All rights reserved. 10 * Author: Tom Lyon, pugs@cisco.com 11 * 12 * We arbitrarily define a Type1 IOMMU as one matching the below code. 13 * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel 14 * VT-d, but that makes it harder to re-use as theoretically anyone 15 * implementing a similar IOMMU could make use of this. We expect the 16 * IOMMU to support the IOMMU API and have few to no restrictions around 17 * the IOVA range that can be mapped. The Type1 IOMMU is currently 18 * optimized for relatively static mappings of a userspace process with 19 * userpsace pages pinned into memory. We also assume devices and IOMMU 20 * domains are PCI based as the IOMMU API is still centered around a 21 * device/bus interface rather than a group interface. 22 */ 23 24 #include <linux/compat.h> 25 #include <linux/device.h> 26 #include <linux/fs.h> 27 #include <linux/highmem.h> 28 #include <linux/iommu.h> 29 #include <linux/module.h> 30 #include <linux/mm.h> 31 #include <linux/kthread.h> 32 #include <linux/rbtree.h> 33 #include <linux/sched/signal.h> 34 #include <linux/sched/mm.h> 35 #include <linux/slab.h> 36 #include <linux/uaccess.h> 37 #include <linux/vfio.h> 38 #include <linux/workqueue.h> 39 #include <linux/mdev.h> 40 #include <linux/notifier.h> 41 #include <linux/dma-iommu.h> 42 #include <linux/irqdomain.h> 43 44 #define DRIVER_VERSION "0.2" 45 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" 46 #define DRIVER_DESC "Type1 IOMMU driver for VFIO" 47 48 static bool allow_unsafe_interrupts; 49 module_param_named(allow_unsafe_interrupts, 50 allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR); 51 MODULE_PARM_DESC(allow_unsafe_interrupts, 52 "Enable VFIO IOMMU support for on platforms without interrupt remapping support."); 53 54 static bool disable_hugepages; 55 module_param_named(disable_hugepages, 56 disable_hugepages, bool, S_IRUGO | S_IWUSR); 57 MODULE_PARM_DESC(disable_hugepages, 58 "Disable VFIO IOMMU support for IOMMU hugepages."); 59 60 static unsigned int dma_entry_limit __read_mostly = U16_MAX; 61 module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); 62 MODULE_PARM_DESC(dma_entry_limit, 63 "Maximum number of user DMA mappings per container (65535)."); 64 65 struct vfio_iommu { 66 struct list_head domain_list; 67 struct list_head iova_list; 68 struct vfio_domain *external_domain; /* domain for external user */ 69 struct mutex lock; 70 struct rb_root dma_list; 71 struct blocking_notifier_head notifier; 72 unsigned int dma_avail; 73 unsigned int vaddr_invalid_count; 74 uint64_t pgsize_bitmap; 75 uint64_t num_non_pinned_groups; 76 wait_queue_head_t vaddr_wait; 77 bool v2; 78 bool nesting; 79 bool dirty_page_tracking; 80 bool pinned_page_dirty_scope; 81 bool container_open; 82 }; 83 84 struct vfio_domain { 85 struct iommu_domain *domain; 86 struct list_head next; 87 struct list_head group_list; 88 int prot; /* IOMMU_CACHE */ 89 bool fgsp; /* Fine-grained super pages */ 90 }; 91 92 struct vfio_dma { 93 struct rb_node node; 94 dma_addr_t iova; /* Device address */ 95 unsigned long vaddr; /* Process virtual addr */ 96 size_t size; /* Map size (bytes) */ 97 int prot; /* IOMMU_READ/WRITE */ 98 bool iommu_mapped; 99 bool lock_cap; /* capable(CAP_IPC_LOCK) */ 100 bool vaddr_invalid; 101 struct task_struct *task; 102 struct rb_root pfn_list; /* Ex-user pinned pfn list */ 103 unsigned long *bitmap; 104 }; 105 106 struct vfio_batch { 107 struct page **pages; /* for pin_user_pages_remote */ 108 struct page *fallback_page; /* if pages alloc fails */ 109 int capacity; /* length of pages array */ 110 int size; /* of batch currently */ 111 int offset; /* of next entry in pages */ 112 }; 113 114 struct vfio_group { 115 struct iommu_group *iommu_group; 116 struct list_head next; 117 bool mdev_group; /* An mdev group */ 118 bool pinned_page_dirty_scope; 119 }; 120 121 struct vfio_iova { 122 struct list_head list; 123 dma_addr_t start; 124 dma_addr_t end; 125 }; 126 127 /* 128 * Guest RAM pinning working set or DMA target 129 */ 130 struct vfio_pfn { 131 struct rb_node node; 132 dma_addr_t iova; /* Device address */ 133 unsigned long pfn; /* Host pfn */ 134 unsigned int ref_count; 135 }; 136 137 struct vfio_regions { 138 struct list_head list; 139 dma_addr_t iova; 140 phys_addr_t phys; 141 size_t len; 142 }; 143 144 #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \ 145 (!list_empty(&iommu->domain_list)) 146 147 #define DIRTY_BITMAP_BYTES(n) (ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE) 148 149 /* 150 * Input argument of number of bits to bitmap_set() is unsigned integer, which 151 * further casts to signed integer for unaligned multi-bit operation, 152 * __bitmap_set(). 153 * Then maximum bitmap size supported is 2^31 bits divided by 2^3 bits/byte, 154 * that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page 155 * system. 156 */ 157 #define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX) 158 #define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX) 159 160 #define WAITED 1 161 162 static int put_pfn(unsigned long pfn, int prot); 163 164 static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, 165 struct iommu_group *iommu_group); 166 167 /* 168 * This code handles mapping and unmapping of user data buffers 169 * into DMA'ble space using the IOMMU 170 */ 171 172 static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, 173 dma_addr_t start, size_t size) 174 { 175 struct rb_node *node = iommu->dma_list.rb_node; 176 177 while (node) { 178 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); 179 180 if (start + size <= dma->iova) 181 node = node->rb_left; 182 else if (start >= dma->iova + dma->size) 183 node = node->rb_right; 184 else 185 return dma; 186 } 187 188 return NULL; 189 } 190 191 static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, 192 dma_addr_t start, size_t size) 193 { 194 struct rb_node *res = NULL; 195 struct rb_node *node = iommu->dma_list.rb_node; 196 struct vfio_dma *dma_res = NULL; 197 198 while (node) { 199 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); 200 201 if (start < dma->iova + dma->size) { 202 res = node; 203 dma_res = dma; 204 if (start >= dma->iova) 205 break; 206 node = node->rb_left; 207 } else { 208 node = node->rb_right; 209 } 210 } 211 if (res && size && dma_res->iova >= start + size) 212 res = NULL; 213 return res; 214 } 215 216 static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) 217 { 218 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; 219 struct vfio_dma *dma; 220 221 while (*link) { 222 parent = *link; 223 dma = rb_entry(parent, struct vfio_dma, node); 224 225 if (new->iova + new->size <= dma->iova) 226 link = &(*link)->rb_left; 227 else 228 link = &(*link)->rb_right; 229 } 230 231 rb_link_node(&new->node, parent, link); 232 rb_insert_color(&new->node, &iommu->dma_list); 233 } 234 235 static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) 236 { 237 rb_erase(&old->node, &iommu->dma_list); 238 } 239 240 241 static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize) 242 { 243 uint64_t npages = dma->size / pgsize; 244 245 if (npages > DIRTY_BITMAP_PAGES_MAX) 246 return -EINVAL; 247 248 /* 249 * Allocate extra 64 bits that are used to calculate shift required for 250 * bitmap_shift_left() to manipulate and club unaligned number of pages 251 * in adjacent vfio_dma ranges. 252 */ 253 dma->bitmap = kvzalloc(DIRTY_BITMAP_BYTES(npages) + sizeof(u64), 254 GFP_KERNEL); 255 if (!dma->bitmap) 256 return -ENOMEM; 257 258 return 0; 259 } 260 261 static void vfio_dma_bitmap_free(struct vfio_dma *dma) 262 { 263 kfree(dma->bitmap); 264 dma->bitmap = NULL; 265 } 266 267 static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) 268 { 269 struct rb_node *p; 270 unsigned long pgshift = __ffs(pgsize); 271 272 for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) { 273 struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node); 274 275 bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1); 276 } 277 } 278 279 static void vfio_iommu_populate_bitmap_full(struct vfio_iommu *iommu) 280 { 281 struct rb_node *n; 282 unsigned long pgshift = __ffs(iommu->pgsize_bitmap); 283 284 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { 285 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 286 287 bitmap_set(dma->bitmap, 0, dma->size >> pgshift); 288 } 289 } 290 291 static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize) 292 { 293 struct rb_node *n; 294 295 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { 296 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 297 int ret; 298 299 ret = vfio_dma_bitmap_alloc(dma, pgsize); 300 if (ret) { 301 struct rb_node *p; 302 303 for (p = rb_prev(n); p; p = rb_prev(p)) { 304 struct vfio_dma *dma = rb_entry(n, 305 struct vfio_dma, node); 306 307 vfio_dma_bitmap_free(dma); 308 } 309 return ret; 310 } 311 vfio_dma_populate_bitmap(dma, pgsize); 312 } 313 return 0; 314 } 315 316 static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu) 317 { 318 struct rb_node *n; 319 320 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { 321 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 322 323 vfio_dma_bitmap_free(dma); 324 } 325 } 326 327 /* 328 * Helper Functions for host iova-pfn list 329 */ 330 static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) 331 { 332 struct vfio_pfn *vpfn; 333 struct rb_node *node = dma->pfn_list.rb_node; 334 335 while (node) { 336 vpfn = rb_entry(node, struct vfio_pfn, node); 337 338 if (iova < vpfn->iova) 339 node = node->rb_left; 340 else if (iova > vpfn->iova) 341 node = node->rb_right; 342 else 343 return vpfn; 344 } 345 return NULL; 346 } 347 348 static void vfio_link_pfn(struct vfio_dma *dma, 349 struct vfio_pfn *new) 350 { 351 struct rb_node **link, *parent = NULL; 352 struct vfio_pfn *vpfn; 353 354 link = &dma->pfn_list.rb_node; 355 while (*link) { 356 parent = *link; 357 vpfn = rb_entry(parent, struct vfio_pfn, node); 358 359 if (new->iova < vpfn->iova) 360 link = &(*link)->rb_left; 361 else 362 link = &(*link)->rb_right; 363 } 364 365 rb_link_node(&new->node, parent, link); 366 rb_insert_color(&new->node, &dma->pfn_list); 367 } 368 369 static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old) 370 { 371 rb_erase(&old->node, &dma->pfn_list); 372 } 373 374 static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova, 375 unsigned long pfn) 376 { 377 struct vfio_pfn *vpfn; 378 379 vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL); 380 if (!vpfn) 381 return -ENOMEM; 382 383 vpfn->iova = iova; 384 vpfn->pfn = pfn; 385 vpfn->ref_count = 1; 386 vfio_link_pfn(dma, vpfn); 387 return 0; 388 } 389 390 static void vfio_remove_from_pfn_list(struct vfio_dma *dma, 391 struct vfio_pfn *vpfn) 392 { 393 vfio_unlink_pfn(dma, vpfn); 394 kfree(vpfn); 395 } 396 397 static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma, 398 unsigned long iova) 399 { 400 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); 401 402 if (vpfn) 403 vpfn->ref_count++; 404 return vpfn; 405 } 406 407 static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) 408 { 409 int ret = 0; 410 411 vpfn->ref_count--; 412 if (!vpfn->ref_count) { 413 ret = put_pfn(vpfn->pfn, dma->prot); 414 vfio_remove_from_pfn_list(dma, vpfn); 415 } 416 return ret; 417 } 418 419 static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) 420 { 421 struct mm_struct *mm; 422 int ret; 423 424 if (!npage) 425 return 0; 426 427 mm = async ? get_task_mm(dma->task) : dma->task->mm; 428 if (!mm) 429 return -ESRCH; /* process exited */ 430 431 ret = mmap_write_lock_killable(mm); 432 if (!ret) { 433 ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task, 434 dma->lock_cap); 435 mmap_write_unlock(mm); 436 } 437 438 if (async) 439 mmput(mm); 440 441 return ret; 442 } 443 444 /* 445 * Some mappings aren't backed by a struct page, for example an mmap'd 446 * MMIO range for our own or another device. These use a different 447 * pfn conversion and shouldn't be tracked as locked pages. 448 * For compound pages, any driver that sets the reserved bit in head 449 * page needs to set the reserved bit in all subpages to be safe. 450 */ 451 static bool is_invalid_reserved_pfn(unsigned long pfn) 452 { 453 if (pfn_valid(pfn)) 454 return PageReserved(pfn_to_page(pfn)); 455 456 return true; 457 } 458 459 static int put_pfn(unsigned long pfn, int prot) 460 { 461 if (!is_invalid_reserved_pfn(pfn)) { 462 struct page *page = pfn_to_page(pfn); 463 464 unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE); 465 return 1; 466 } 467 return 0; 468 } 469 470 #define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *)) 471 472 static void vfio_batch_init(struct vfio_batch *batch) 473 { 474 batch->size = 0; 475 batch->offset = 0; 476 477 if (unlikely(disable_hugepages)) 478 goto fallback; 479 480 batch->pages = (struct page **) __get_free_page(GFP_KERNEL); 481 if (!batch->pages) 482 goto fallback; 483 484 batch->capacity = VFIO_BATCH_MAX_CAPACITY; 485 return; 486 487 fallback: 488 batch->pages = &batch->fallback_page; 489 batch->capacity = 1; 490 } 491 492 static void vfio_batch_unpin(struct vfio_batch *batch, struct vfio_dma *dma) 493 { 494 while (batch->size) { 495 unsigned long pfn = page_to_pfn(batch->pages[batch->offset]); 496 497 put_pfn(pfn, dma->prot); 498 batch->offset++; 499 batch->size--; 500 } 501 } 502 503 static void vfio_batch_fini(struct vfio_batch *batch) 504 { 505 if (batch->capacity == VFIO_BATCH_MAX_CAPACITY) 506 free_page((unsigned long)batch->pages); 507 } 508 509 static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, 510 unsigned long vaddr, unsigned long *pfn, 511 bool write_fault) 512 { 513 pte_t *ptep; 514 spinlock_t *ptl; 515 int ret; 516 517 ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); 518 if (ret) { 519 bool unlocked = false; 520 521 ret = fixup_user_fault(mm, vaddr, 522 FAULT_FLAG_REMOTE | 523 (write_fault ? FAULT_FLAG_WRITE : 0), 524 &unlocked); 525 if (unlocked) 526 return -EAGAIN; 527 528 if (ret) 529 return ret; 530 531 ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); 532 if (ret) 533 return ret; 534 } 535 536 if (write_fault && !pte_write(*ptep)) 537 ret = -EFAULT; 538 else 539 *pfn = pte_pfn(*ptep); 540 541 pte_unmap_unlock(ptep, ptl); 542 return ret; 543 } 544 545 /* 546 * Returns the positive number of pfns successfully obtained or a negative 547 * error code. 548 */ 549 static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, 550 long npages, int prot, unsigned long *pfn, 551 struct page **pages) 552 { 553 struct vm_area_struct *vma; 554 unsigned int flags = 0; 555 int ret; 556 557 if (prot & IOMMU_WRITE) 558 flags |= FOLL_WRITE; 559 560 mmap_read_lock(mm); 561 ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, 562 pages, NULL, NULL); 563 if (ret > 0) { 564 *pfn = page_to_pfn(pages[0]); 565 goto done; 566 } 567 568 vaddr = untagged_addr(vaddr); 569 570 retry: 571 vma = find_vma_intersection(mm, vaddr, vaddr + 1); 572 573 if (vma && vma->vm_flags & VM_PFNMAP) { 574 ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); 575 if (ret == -EAGAIN) 576 goto retry; 577 578 if (!ret) { 579 if (is_invalid_reserved_pfn(*pfn)) 580 ret = 1; 581 else 582 ret = -EFAULT; 583 } 584 } 585 done: 586 mmap_read_unlock(mm); 587 return ret; 588 } 589 590 static int vfio_wait(struct vfio_iommu *iommu) 591 { 592 DEFINE_WAIT(wait); 593 594 prepare_to_wait(&iommu->vaddr_wait, &wait, TASK_KILLABLE); 595 mutex_unlock(&iommu->lock); 596 schedule(); 597 mutex_lock(&iommu->lock); 598 finish_wait(&iommu->vaddr_wait, &wait); 599 if (kthread_should_stop() || !iommu->container_open || 600 fatal_signal_pending(current)) { 601 return -EFAULT; 602 } 603 return WAITED; 604 } 605 606 /* 607 * Find dma struct and wait for its vaddr to be valid. iommu lock is dropped 608 * if the task waits, but is re-locked on return. Return result in *dma_p. 609 * Return 0 on success with no waiting, WAITED on success if waited, and -errno 610 * on error. 611 */ 612 static int vfio_find_dma_valid(struct vfio_iommu *iommu, dma_addr_t start, 613 size_t size, struct vfio_dma **dma_p) 614 { 615 int ret; 616 617 do { 618 *dma_p = vfio_find_dma(iommu, start, size); 619 if (!*dma_p) 620 ret = -EINVAL; 621 else if (!(*dma_p)->vaddr_invalid) 622 ret = 0; 623 else 624 ret = vfio_wait(iommu); 625 } while (ret > 0); 626 627 return ret; 628 } 629 630 /* 631 * Wait for all vaddr in the dma_list to become valid. iommu lock is dropped 632 * if the task waits, but is re-locked on return. Return 0 on success with no 633 * waiting, WAITED on success if waited, and -errno on error. 634 */ 635 static int vfio_wait_all_valid(struct vfio_iommu *iommu) 636 { 637 int ret = 0; 638 639 while (iommu->vaddr_invalid_count && ret >= 0) 640 ret = vfio_wait(iommu); 641 642 return ret; 643 } 644 645 /* 646 * Attempt to pin pages. We really don't want to track all the pfns and 647 * the iommu can only map chunks of consecutive pfns anyway, so get the 648 * first page and all consecutive pages with the same locking. 649 */ 650 static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, 651 long npage, unsigned long *pfn_base, 652 unsigned long limit, struct vfio_batch *batch) 653 { 654 unsigned long pfn; 655 struct mm_struct *mm = current->mm; 656 long ret, pinned = 0, lock_acct = 0; 657 bool rsvd; 658 dma_addr_t iova = vaddr - dma->vaddr + dma->iova; 659 660 /* This code path is only user initiated */ 661 if (!mm) 662 return -ENODEV; 663 664 if (batch->size) { 665 /* Leftover pages in batch from an earlier call. */ 666 *pfn_base = page_to_pfn(batch->pages[batch->offset]); 667 pfn = *pfn_base; 668 rsvd = is_invalid_reserved_pfn(*pfn_base); 669 } else { 670 *pfn_base = 0; 671 } 672 673 while (npage) { 674 if (!batch->size) { 675 /* Empty batch, so refill it. */ 676 long req_pages = min_t(long, npage, batch->capacity); 677 678 ret = vaddr_get_pfns(mm, vaddr, req_pages, dma->prot, 679 &pfn, batch->pages); 680 if (ret < 0) 681 goto unpin_out; 682 683 batch->size = ret; 684 batch->offset = 0; 685 686 if (!*pfn_base) { 687 *pfn_base = pfn; 688 rsvd = is_invalid_reserved_pfn(*pfn_base); 689 } 690 } 691 692 /* 693 * pfn is preset for the first iteration of this inner loop and 694 * updated at the end to handle a VM_PFNMAP pfn. In that case, 695 * batch->pages isn't valid (there's no struct page), so allow 696 * batch->pages to be touched only when there's more than one 697 * pfn to check, which guarantees the pfns are from a 698 * !VM_PFNMAP vma. 699 */ 700 while (true) { 701 if (pfn != *pfn_base + pinned || 702 rsvd != is_invalid_reserved_pfn(pfn)) 703 goto out; 704 705 /* 706 * Reserved pages aren't counted against the user, 707 * externally pinned pages are already counted against 708 * the user. 709 */ 710 if (!rsvd && !vfio_find_vpfn(dma, iova)) { 711 if (!dma->lock_cap && 712 mm->locked_vm + lock_acct + 1 > limit) { 713 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", 714 __func__, limit << PAGE_SHIFT); 715 ret = -ENOMEM; 716 goto unpin_out; 717 } 718 lock_acct++; 719 } 720 721 pinned++; 722 npage--; 723 vaddr += PAGE_SIZE; 724 iova += PAGE_SIZE; 725 batch->offset++; 726 batch->size--; 727 728 if (!batch->size) 729 break; 730 731 pfn = page_to_pfn(batch->pages[batch->offset]); 732 } 733 734 if (unlikely(disable_hugepages)) 735 break; 736 } 737 738 out: 739 ret = vfio_lock_acct(dma, lock_acct, false); 740 741 unpin_out: 742 if (ret < 0) { 743 if (pinned && !rsvd) { 744 for (pfn = *pfn_base ; pinned ; pfn++, pinned--) 745 put_pfn(pfn, dma->prot); 746 } 747 vfio_batch_unpin(batch, dma); 748 749 return ret; 750 } 751 752 return pinned; 753 } 754 755 static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, 756 unsigned long pfn, long npage, 757 bool do_accounting) 758 { 759 long unlocked = 0, locked = 0; 760 long i; 761 762 for (i = 0; i < npage; i++, iova += PAGE_SIZE) { 763 if (put_pfn(pfn++, dma->prot)) { 764 unlocked++; 765 if (vfio_find_vpfn(dma, iova)) 766 locked++; 767 } 768 } 769 770 if (do_accounting) 771 vfio_lock_acct(dma, locked - unlocked, true); 772 773 return unlocked; 774 } 775 776 static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, 777 unsigned long *pfn_base, bool do_accounting) 778 { 779 struct page *pages[1]; 780 struct mm_struct *mm; 781 int ret; 782 783 mm = get_task_mm(dma->task); 784 if (!mm) 785 return -ENODEV; 786 787 ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); 788 if (ret == 1 && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { 789 ret = vfio_lock_acct(dma, 1, true); 790 if (ret) { 791 put_pfn(*pfn_base, dma->prot); 792 if (ret == -ENOMEM) 793 pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK " 794 "(%ld) exceeded\n", __func__, 795 dma->task->comm, task_pid_nr(dma->task), 796 task_rlimit(dma->task, RLIMIT_MEMLOCK)); 797 } 798 } 799 800 mmput(mm); 801 return ret; 802 } 803 804 static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, 805 bool do_accounting) 806 { 807 int unlocked; 808 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); 809 810 if (!vpfn) 811 return 0; 812 813 unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); 814 815 if (do_accounting) 816 vfio_lock_acct(dma, -unlocked, true); 817 818 return unlocked; 819 } 820 821 static int vfio_iommu_type1_pin_pages(void *iommu_data, 822 struct iommu_group *iommu_group, 823 unsigned long *user_pfn, 824 int npage, int prot, 825 unsigned long *phys_pfn) 826 { 827 struct vfio_iommu *iommu = iommu_data; 828 struct vfio_group *group; 829 int i, j, ret; 830 unsigned long remote_vaddr; 831 struct vfio_dma *dma; 832 bool do_accounting; 833 dma_addr_t iova; 834 835 if (!iommu || !user_pfn || !phys_pfn) 836 return -EINVAL; 837 838 /* Supported for v2 version only */ 839 if (!iommu->v2) 840 return -EACCES; 841 842 mutex_lock(&iommu->lock); 843 844 /* 845 * Wait for all necessary vaddr's to be valid so they can be used in 846 * the main loop without dropping the lock, to avoid racing vs unmap. 847 */ 848 again: 849 if (iommu->vaddr_invalid_count) { 850 for (i = 0; i < npage; i++) { 851 iova = user_pfn[i] << PAGE_SHIFT; 852 ret = vfio_find_dma_valid(iommu, iova, PAGE_SIZE, &dma); 853 if (ret < 0) 854 goto pin_done; 855 if (ret == WAITED) 856 goto again; 857 } 858 } 859 860 /* Fail if notifier list is empty */ 861 if (!iommu->notifier.head) { 862 ret = -EINVAL; 863 goto pin_done; 864 } 865 866 /* 867 * If iommu capable domain exist in the container then all pages are 868 * already pinned and accounted. Accouting should be done if there is no 869 * iommu capable domain in the container. 870 */ 871 do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); 872 873 for (i = 0; i < npage; i++) { 874 struct vfio_pfn *vpfn; 875 876 iova = user_pfn[i] << PAGE_SHIFT; 877 dma = vfio_find_dma(iommu, iova, PAGE_SIZE); 878 if (!dma) { 879 ret = -EINVAL; 880 goto pin_unwind; 881 } 882 883 if ((dma->prot & prot) != prot) { 884 ret = -EPERM; 885 goto pin_unwind; 886 } 887 888 vpfn = vfio_iova_get_vfio_pfn(dma, iova); 889 if (vpfn) { 890 phys_pfn[i] = vpfn->pfn; 891 continue; 892 } 893 894 remote_vaddr = dma->vaddr + (iova - dma->iova); 895 ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i], 896 do_accounting); 897 if (ret) 898 goto pin_unwind; 899 900 ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]); 901 if (ret) { 902 if (put_pfn(phys_pfn[i], dma->prot) && do_accounting) 903 vfio_lock_acct(dma, -1, true); 904 goto pin_unwind; 905 } 906 907 if (iommu->dirty_page_tracking) { 908 unsigned long pgshift = __ffs(iommu->pgsize_bitmap); 909 910 /* 911 * Bitmap populated with the smallest supported page 912 * size 913 */ 914 bitmap_set(dma->bitmap, 915 (iova - dma->iova) >> pgshift, 1); 916 } 917 } 918 ret = i; 919 920 group = vfio_iommu_find_iommu_group(iommu, iommu_group); 921 if (!group->pinned_page_dirty_scope) { 922 group->pinned_page_dirty_scope = true; 923 iommu->num_non_pinned_groups--; 924 } 925 926 goto pin_done; 927 928 pin_unwind: 929 phys_pfn[i] = 0; 930 for (j = 0; j < i; j++) { 931 dma_addr_t iova; 932 933 iova = user_pfn[j] << PAGE_SHIFT; 934 dma = vfio_find_dma(iommu, iova, PAGE_SIZE); 935 vfio_unpin_page_external(dma, iova, do_accounting); 936 phys_pfn[j] = 0; 937 } 938 pin_done: 939 mutex_unlock(&iommu->lock); 940 return ret; 941 } 942 943 static int vfio_iommu_type1_unpin_pages(void *iommu_data, 944 unsigned long *user_pfn, 945 int npage) 946 { 947 struct vfio_iommu *iommu = iommu_data; 948 bool do_accounting; 949 int i; 950 951 if (!iommu || !user_pfn) 952 return -EINVAL; 953 954 /* Supported for v2 version only */ 955 if (!iommu->v2) 956 return -EACCES; 957 958 mutex_lock(&iommu->lock); 959 960 do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); 961 for (i = 0; i < npage; i++) { 962 struct vfio_dma *dma; 963 dma_addr_t iova; 964 965 iova = user_pfn[i] << PAGE_SHIFT; 966 dma = vfio_find_dma(iommu, iova, PAGE_SIZE); 967 if (!dma) 968 goto unpin_exit; 969 vfio_unpin_page_external(dma, iova, do_accounting); 970 } 971 972 unpin_exit: 973 mutex_unlock(&iommu->lock); 974 return i > npage ? npage : (i > 0 ? i : -EINVAL); 975 } 976 977 static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, 978 struct list_head *regions, 979 struct iommu_iotlb_gather *iotlb_gather) 980 { 981 long unlocked = 0; 982 struct vfio_regions *entry, *next; 983 984 iommu_iotlb_sync(domain->domain, iotlb_gather); 985 986 list_for_each_entry_safe(entry, next, regions, list) { 987 unlocked += vfio_unpin_pages_remote(dma, 988 entry->iova, 989 entry->phys >> PAGE_SHIFT, 990 entry->len >> PAGE_SHIFT, 991 false); 992 list_del(&entry->list); 993 kfree(entry); 994 } 995 996 cond_resched(); 997 998 return unlocked; 999 } 1000 1001 /* 1002 * Generally, VFIO needs to unpin remote pages after each IOTLB flush. 1003 * Therefore, when using IOTLB flush sync interface, VFIO need to keep track 1004 * of these regions (currently using a list). 1005 * 1006 * This value specifies maximum number of regions for each IOTLB flush sync. 1007 */ 1008 #define VFIO_IOMMU_TLB_SYNC_MAX 512 1009 1010 static size_t unmap_unpin_fast(struct vfio_domain *domain, 1011 struct vfio_dma *dma, dma_addr_t *iova, 1012 size_t len, phys_addr_t phys, long *unlocked, 1013 struct list_head *unmapped_list, 1014 int *unmapped_cnt, 1015 struct iommu_iotlb_gather *iotlb_gather) 1016 { 1017 size_t unmapped = 0; 1018 struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1019 1020 if (entry) { 1021 unmapped = iommu_unmap_fast(domain->domain, *iova, len, 1022 iotlb_gather); 1023 1024 if (!unmapped) { 1025 kfree(entry); 1026 } else { 1027 entry->iova = *iova; 1028 entry->phys = phys; 1029 entry->len = unmapped; 1030 list_add_tail(&entry->list, unmapped_list); 1031 1032 *iova += unmapped; 1033 (*unmapped_cnt)++; 1034 } 1035 } 1036 1037 /* 1038 * Sync if the number of fast-unmap regions hits the limit 1039 * or in case of errors. 1040 */ 1041 if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { 1042 *unlocked += vfio_sync_unpin(dma, domain, unmapped_list, 1043 iotlb_gather); 1044 *unmapped_cnt = 0; 1045 } 1046 1047 return unmapped; 1048 } 1049 1050 static size_t unmap_unpin_slow(struct vfio_domain *domain, 1051 struct vfio_dma *dma, dma_addr_t *iova, 1052 size_t len, phys_addr_t phys, 1053 long *unlocked) 1054 { 1055 size_t unmapped = iommu_unmap(domain->domain, *iova, len); 1056 1057 if (unmapped) { 1058 *unlocked += vfio_unpin_pages_remote(dma, *iova, 1059 phys >> PAGE_SHIFT, 1060 unmapped >> PAGE_SHIFT, 1061 false); 1062 *iova += unmapped; 1063 cond_resched(); 1064 } 1065 return unmapped; 1066 } 1067 1068 static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, 1069 bool do_accounting) 1070 { 1071 dma_addr_t iova = dma->iova, end = dma->iova + dma->size; 1072 struct vfio_domain *domain, *d; 1073 LIST_HEAD(unmapped_region_list); 1074 struct iommu_iotlb_gather iotlb_gather; 1075 int unmapped_region_cnt = 0; 1076 long unlocked = 0; 1077 1078 if (!dma->size) 1079 return 0; 1080 1081 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) 1082 return 0; 1083 1084 /* 1085 * We use the IOMMU to track the physical addresses, otherwise we'd 1086 * need a much more complicated tracking system. Unfortunately that 1087 * means we need to use one of the iommu domains to figure out the 1088 * pfns to unpin. The rest need to be unmapped in advance so we have 1089 * no iommu translations remaining when the pages are unpinned. 1090 */ 1091 domain = d = list_first_entry(&iommu->domain_list, 1092 struct vfio_domain, next); 1093 1094 list_for_each_entry_continue(d, &iommu->domain_list, next) { 1095 iommu_unmap(d->domain, dma->iova, dma->size); 1096 cond_resched(); 1097 } 1098 1099 iommu_iotlb_gather_init(&iotlb_gather); 1100 while (iova < end) { 1101 size_t unmapped, len; 1102 phys_addr_t phys, next; 1103 1104 phys = iommu_iova_to_phys(domain->domain, iova); 1105 if (WARN_ON(!phys)) { 1106 iova += PAGE_SIZE; 1107 continue; 1108 } 1109 1110 /* 1111 * To optimize for fewer iommu_unmap() calls, each of which 1112 * may require hardware cache flushing, try to find the 1113 * largest contiguous physical memory chunk to unmap. 1114 */ 1115 for (len = PAGE_SIZE; 1116 !domain->fgsp && iova + len < end; len += PAGE_SIZE) { 1117 next = iommu_iova_to_phys(domain->domain, iova + len); 1118 if (next != phys + len) 1119 break; 1120 } 1121 1122 /* 1123 * First, try to use fast unmap/unpin. In case of failure, 1124 * switch to slow unmap/unpin path. 1125 */ 1126 unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, 1127 &unlocked, &unmapped_region_list, 1128 &unmapped_region_cnt, 1129 &iotlb_gather); 1130 if (!unmapped) { 1131 unmapped = unmap_unpin_slow(domain, dma, &iova, len, 1132 phys, &unlocked); 1133 if (WARN_ON(!unmapped)) 1134 break; 1135 } 1136 } 1137 1138 dma->iommu_mapped = false; 1139 1140 if (unmapped_region_cnt) { 1141 unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list, 1142 &iotlb_gather); 1143 } 1144 1145 if (do_accounting) { 1146 vfio_lock_acct(dma, -unlocked, true); 1147 return 0; 1148 } 1149 return unlocked; 1150 } 1151 1152 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) 1153 { 1154 WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)); 1155 vfio_unmap_unpin(iommu, dma, true); 1156 vfio_unlink_dma(iommu, dma); 1157 put_task_struct(dma->task); 1158 vfio_dma_bitmap_free(dma); 1159 if (dma->vaddr_invalid) { 1160 iommu->vaddr_invalid_count--; 1161 wake_up_all(&iommu->vaddr_wait); 1162 } 1163 kfree(dma); 1164 iommu->dma_avail++; 1165 } 1166 1167 static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu) 1168 { 1169 struct vfio_domain *domain; 1170 1171 iommu->pgsize_bitmap = ULONG_MAX; 1172 1173 list_for_each_entry(domain, &iommu->domain_list, next) 1174 iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap; 1175 1176 /* 1177 * In case the IOMMU supports page sizes smaller than PAGE_SIZE 1178 * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes. 1179 * That way the user will be able to map/unmap buffers whose size/ 1180 * start address is aligned with PAGE_SIZE. Pinning code uses that 1181 * granularity while iommu driver can use the sub-PAGE_SIZE size 1182 * to map the buffer. 1183 */ 1184 if (iommu->pgsize_bitmap & ~PAGE_MASK) { 1185 iommu->pgsize_bitmap &= PAGE_MASK; 1186 iommu->pgsize_bitmap |= PAGE_SIZE; 1187 } 1188 } 1189 1190 static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, 1191 struct vfio_dma *dma, dma_addr_t base_iova, 1192 size_t pgsize) 1193 { 1194 unsigned long pgshift = __ffs(pgsize); 1195 unsigned long nbits = dma->size >> pgshift; 1196 unsigned long bit_offset = (dma->iova - base_iova) >> pgshift; 1197 unsigned long copy_offset = bit_offset / BITS_PER_LONG; 1198 unsigned long shift = bit_offset % BITS_PER_LONG; 1199 unsigned long leftover; 1200 1201 /* 1202 * mark all pages dirty if any IOMMU capable device is not able 1203 * to report dirty pages and all pages are pinned and mapped. 1204 */ 1205 if (iommu->num_non_pinned_groups && dma->iommu_mapped) 1206 bitmap_set(dma->bitmap, 0, nbits); 1207 1208 if (shift) { 1209 bitmap_shift_left(dma->bitmap, dma->bitmap, shift, 1210 nbits + shift); 1211 1212 if (copy_from_user(&leftover, 1213 (void __user *)(bitmap + copy_offset), 1214 sizeof(leftover))) 1215 return -EFAULT; 1216 1217 bitmap_or(dma->bitmap, dma->bitmap, &leftover, shift); 1218 } 1219 1220 if (copy_to_user((void __user *)(bitmap + copy_offset), dma->bitmap, 1221 DIRTY_BITMAP_BYTES(nbits + shift))) 1222 return -EFAULT; 1223 1224 return 0; 1225 } 1226 1227 static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, 1228 dma_addr_t iova, size_t size, size_t pgsize) 1229 { 1230 struct vfio_dma *dma; 1231 struct rb_node *n; 1232 unsigned long pgshift = __ffs(pgsize); 1233 int ret; 1234 1235 /* 1236 * GET_BITMAP request must fully cover vfio_dma mappings. Multiple 1237 * vfio_dma mappings may be clubbed by specifying large ranges, but 1238 * there must not be any previous mappings bisected by the range. 1239 * An error will be returned if these conditions are not met. 1240 */ 1241 dma = vfio_find_dma(iommu, iova, 1); 1242 if (dma && dma->iova != iova) 1243 return -EINVAL; 1244 1245 dma = vfio_find_dma(iommu, iova + size - 1, 0); 1246 if (dma && dma->iova + dma->size != iova + size) 1247 return -EINVAL; 1248 1249 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { 1250 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 1251 1252 if (dma->iova < iova) 1253 continue; 1254 1255 if (dma->iova > iova + size - 1) 1256 break; 1257 1258 ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize); 1259 if (ret) 1260 return ret; 1261 1262 /* 1263 * Re-populate bitmap to include all pinned pages which are 1264 * considered as dirty but exclude pages which are unpinned and 1265 * pages which are marked dirty by vfio_dma_rw() 1266 */ 1267 bitmap_clear(dma->bitmap, 0, dma->size >> pgshift); 1268 vfio_dma_populate_bitmap(dma, pgsize); 1269 } 1270 return 0; 1271 } 1272 1273 static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size) 1274 { 1275 if (!npages || !bitmap_size || (bitmap_size > DIRTY_BITMAP_SIZE_MAX) || 1276 (bitmap_size < DIRTY_BITMAP_BYTES(npages))) 1277 return -EINVAL; 1278 1279 return 0; 1280 } 1281 1282 static int vfio_dma_do_unmap(struct vfio_iommu *iommu, 1283 struct vfio_iommu_type1_dma_unmap *unmap, 1284 struct vfio_bitmap *bitmap) 1285 { 1286 struct vfio_dma *dma, *dma_last = NULL; 1287 size_t unmapped = 0, pgsize; 1288 int ret = -EINVAL, retries = 0; 1289 unsigned long pgshift; 1290 dma_addr_t iova = unmap->iova; 1291 unsigned long size = unmap->size; 1292 bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL; 1293 bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR; 1294 struct rb_node *n, *first_n; 1295 1296 mutex_lock(&iommu->lock); 1297 1298 pgshift = __ffs(iommu->pgsize_bitmap); 1299 pgsize = (size_t)1 << pgshift; 1300 1301 if (iova & (pgsize - 1)) 1302 goto unlock; 1303 1304 if (unmap_all) { 1305 if (iova || size) 1306 goto unlock; 1307 size = SIZE_MAX; 1308 } else if (!size || size & (pgsize - 1)) { 1309 goto unlock; 1310 } 1311 1312 if (iova + size - 1 < iova || size > SIZE_MAX) 1313 goto unlock; 1314 1315 /* When dirty tracking is enabled, allow only min supported pgsize */ 1316 if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) && 1317 (!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) { 1318 goto unlock; 1319 } 1320 1321 WARN_ON((pgsize - 1) & PAGE_MASK); 1322 again: 1323 /* 1324 * vfio-iommu-type1 (v1) - User mappings were coalesced together to 1325 * avoid tracking individual mappings. This means that the granularity 1326 * of the original mapping was lost and the user was allowed to attempt 1327 * to unmap any range. Depending on the contiguousness of physical 1328 * memory and page sizes supported by the IOMMU, arbitrary unmaps may 1329 * or may not have worked. We only guaranteed unmap granularity 1330 * matching the original mapping; even though it was untracked here, 1331 * the original mappings are reflected in IOMMU mappings. This 1332 * resulted in a couple unusual behaviors. First, if a range is not 1333 * able to be unmapped, ex. a set of 4k pages that was mapped as a 1334 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with 1335 * a zero sized unmap. Also, if an unmap request overlaps the first 1336 * address of a hugepage, the IOMMU will unmap the entire hugepage. 1337 * This also returns success and the returned unmap size reflects the 1338 * actual size unmapped. 1339 * 1340 * We attempt to maintain compatibility with this "v1" interface, but 1341 * we take control out of the hands of the IOMMU. Therefore, an unmap 1342 * request offset from the beginning of the original mapping will 1343 * return success with zero sized unmap. And an unmap request covering 1344 * the first iova of mapping will unmap the entire range. 1345 * 1346 * The v2 version of this interface intends to be more deterministic. 1347 * Unmap requests must fully cover previous mappings. Multiple 1348 * mappings may still be unmaped by specifying large ranges, but there 1349 * must not be any previous mappings bisected by the range. An error 1350 * will be returned if these conditions are not met. The v2 interface 1351 * will only return success and a size of zero if there were no 1352 * mappings within the range. 1353 */ 1354 if (iommu->v2 && !unmap_all) { 1355 dma = vfio_find_dma(iommu, iova, 1); 1356 if (dma && dma->iova != iova) 1357 goto unlock; 1358 1359 dma = vfio_find_dma(iommu, iova + size - 1, 0); 1360 if (dma && dma->iova + dma->size != iova + size) 1361 goto unlock; 1362 } 1363 1364 ret = 0; 1365 n = first_n = vfio_find_dma_first_node(iommu, iova, size); 1366 1367 while (n) { 1368 dma = rb_entry(n, struct vfio_dma, node); 1369 if (dma->iova >= iova + size) 1370 break; 1371 1372 if (!iommu->v2 && iova > dma->iova) 1373 break; 1374 /* 1375 * Task with same address space who mapped this iova range is 1376 * allowed to unmap the iova range. 1377 */ 1378 if (dma->task->mm != current->mm) 1379 break; 1380 1381 if (invalidate_vaddr) { 1382 if (dma->vaddr_invalid) { 1383 struct rb_node *last_n = n; 1384 1385 for (n = first_n; n != last_n; n = rb_next(n)) { 1386 dma = rb_entry(n, 1387 struct vfio_dma, node); 1388 dma->vaddr_invalid = false; 1389 iommu->vaddr_invalid_count--; 1390 } 1391 ret = -EINVAL; 1392 unmapped = 0; 1393 break; 1394 } 1395 dma->vaddr_invalid = true; 1396 iommu->vaddr_invalid_count++; 1397 unmapped += dma->size; 1398 n = rb_next(n); 1399 continue; 1400 } 1401 1402 if (!RB_EMPTY_ROOT(&dma->pfn_list)) { 1403 struct vfio_iommu_type1_dma_unmap nb_unmap; 1404 1405 if (dma_last == dma) { 1406 BUG_ON(++retries > 10); 1407 } else { 1408 dma_last = dma; 1409 retries = 0; 1410 } 1411 1412 nb_unmap.iova = dma->iova; 1413 nb_unmap.size = dma->size; 1414 1415 /* 1416 * Notify anyone (mdev vendor drivers) to invalidate and 1417 * unmap iovas within the range we're about to unmap. 1418 * Vendor drivers MUST unpin pages in response to an 1419 * invalidation. 1420 */ 1421 mutex_unlock(&iommu->lock); 1422 blocking_notifier_call_chain(&iommu->notifier, 1423 VFIO_IOMMU_NOTIFY_DMA_UNMAP, 1424 &nb_unmap); 1425 mutex_lock(&iommu->lock); 1426 goto again; 1427 } 1428 1429 if (unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) { 1430 ret = update_user_bitmap(bitmap->data, iommu, dma, 1431 iova, pgsize); 1432 if (ret) 1433 break; 1434 } 1435 1436 unmapped += dma->size; 1437 n = rb_next(n); 1438 vfio_remove_dma(iommu, dma); 1439 } 1440 1441 unlock: 1442 mutex_unlock(&iommu->lock); 1443 1444 /* Report how much was unmapped */ 1445 unmap->size = unmapped; 1446 1447 return ret; 1448 } 1449 1450 static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, 1451 unsigned long pfn, long npage, int prot) 1452 { 1453 struct vfio_domain *d; 1454 int ret; 1455 1456 list_for_each_entry(d, &iommu->domain_list, next) { 1457 ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, 1458 npage << PAGE_SHIFT, prot | d->prot); 1459 if (ret) 1460 goto unwind; 1461 1462 cond_resched(); 1463 } 1464 1465 return 0; 1466 1467 unwind: 1468 list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) { 1469 iommu_unmap(d->domain, iova, npage << PAGE_SHIFT); 1470 cond_resched(); 1471 } 1472 1473 return ret; 1474 } 1475 1476 static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, 1477 size_t map_size) 1478 { 1479 dma_addr_t iova = dma->iova; 1480 unsigned long vaddr = dma->vaddr; 1481 struct vfio_batch batch; 1482 size_t size = map_size; 1483 long npage; 1484 unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1485 int ret = 0; 1486 1487 vfio_batch_init(&batch); 1488 1489 while (size) { 1490 /* Pin a contiguous chunk of memory */ 1491 npage = vfio_pin_pages_remote(dma, vaddr + dma->size, 1492 size >> PAGE_SHIFT, &pfn, limit, 1493 &batch); 1494 if (npage <= 0) { 1495 WARN_ON(!npage); 1496 ret = (int)npage; 1497 break; 1498 } 1499 1500 /* Map it! */ 1501 ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, 1502 dma->prot); 1503 if (ret) { 1504 vfio_unpin_pages_remote(dma, iova + dma->size, pfn, 1505 npage, true); 1506 vfio_batch_unpin(&batch, dma); 1507 break; 1508 } 1509 1510 size -= npage << PAGE_SHIFT; 1511 dma->size += npage << PAGE_SHIFT; 1512 } 1513 1514 vfio_batch_fini(&batch); 1515 dma->iommu_mapped = true; 1516 1517 if (ret) 1518 vfio_remove_dma(iommu, dma); 1519 1520 return ret; 1521 } 1522 1523 /* 1524 * Check dma map request is within a valid iova range 1525 */ 1526 static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu, 1527 dma_addr_t start, dma_addr_t end) 1528 { 1529 struct list_head *iova = &iommu->iova_list; 1530 struct vfio_iova *node; 1531 1532 list_for_each_entry(node, iova, list) { 1533 if (start >= node->start && end <= node->end) 1534 return true; 1535 } 1536 1537 /* 1538 * Check for list_empty() as well since a container with 1539 * a single mdev device will have an empty list. 1540 */ 1541 return list_empty(iova); 1542 } 1543 1544 static int vfio_dma_do_map(struct vfio_iommu *iommu, 1545 struct vfio_iommu_type1_dma_map *map) 1546 { 1547 bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR; 1548 dma_addr_t iova = map->iova; 1549 unsigned long vaddr = map->vaddr; 1550 size_t size = map->size; 1551 int ret = 0, prot = 0; 1552 size_t pgsize; 1553 struct vfio_dma *dma; 1554 1555 /* Verify that none of our __u64 fields overflow */ 1556 if (map->size != size || map->vaddr != vaddr || map->iova != iova) 1557 return -EINVAL; 1558 1559 /* READ/WRITE from device perspective */ 1560 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) 1561 prot |= IOMMU_WRITE; 1562 if (map->flags & VFIO_DMA_MAP_FLAG_READ) 1563 prot |= IOMMU_READ; 1564 1565 if ((prot && set_vaddr) || (!prot && !set_vaddr)) 1566 return -EINVAL; 1567 1568 mutex_lock(&iommu->lock); 1569 1570 pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); 1571 1572 WARN_ON((pgsize - 1) & PAGE_MASK); 1573 1574 if (!size || (size | iova | vaddr) & (pgsize - 1)) { 1575 ret = -EINVAL; 1576 goto out_unlock; 1577 } 1578 1579 /* Don't allow IOVA or virtual address wrap */ 1580 if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) { 1581 ret = -EINVAL; 1582 goto out_unlock; 1583 } 1584 1585 dma = vfio_find_dma(iommu, iova, size); 1586 if (set_vaddr) { 1587 if (!dma) { 1588 ret = -ENOENT; 1589 } else if (!dma->vaddr_invalid || dma->iova != iova || 1590 dma->size != size) { 1591 ret = -EINVAL; 1592 } else { 1593 dma->vaddr = vaddr; 1594 dma->vaddr_invalid = false; 1595 iommu->vaddr_invalid_count--; 1596 wake_up_all(&iommu->vaddr_wait); 1597 } 1598 goto out_unlock; 1599 } else if (dma) { 1600 ret = -EEXIST; 1601 goto out_unlock; 1602 } 1603 1604 if (!iommu->dma_avail) { 1605 ret = -ENOSPC; 1606 goto out_unlock; 1607 } 1608 1609 if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) { 1610 ret = -EINVAL; 1611 goto out_unlock; 1612 } 1613 1614 dma = kzalloc(sizeof(*dma), GFP_KERNEL); 1615 if (!dma) { 1616 ret = -ENOMEM; 1617 goto out_unlock; 1618 } 1619 1620 iommu->dma_avail--; 1621 dma->iova = iova; 1622 dma->vaddr = vaddr; 1623 dma->prot = prot; 1624 1625 /* 1626 * We need to be able to both add to a task's locked memory and test 1627 * against the locked memory limit and we need to be able to do both 1628 * outside of this call path as pinning can be asynchronous via the 1629 * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a 1630 * task_struct and VM locked pages requires an mm_struct, however 1631 * holding an indefinite mm reference is not recommended, therefore we 1632 * only hold a reference to a task. We could hold a reference to 1633 * current, however QEMU uses this call path through vCPU threads, 1634 * which can be killed resulting in a NULL mm and failure in the unmap 1635 * path when called via a different thread. Avoid this problem by 1636 * using the group_leader as threads within the same group require 1637 * both CLONE_THREAD and CLONE_VM and will therefore use the same 1638 * mm_struct. 1639 * 1640 * Previously we also used the task for testing CAP_IPC_LOCK at the 1641 * time of pinning and accounting, however has_capability() makes use 1642 * of real_cred, a copy-on-write field, so we can't guarantee that it 1643 * matches group_leader, or in fact that it might not change by the 1644 * time it's evaluated. If a process were to call MAP_DMA with 1645 * CAP_IPC_LOCK but later drop it, it doesn't make sense that they 1646 * possibly see different results for an iommu_mapped vfio_dma vs 1647 * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the 1648 * time of calling MAP_DMA. 1649 */ 1650 get_task_struct(current->group_leader); 1651 dma->task = current->group_leader; 1652 dma->lock_cap = capable(CAP_IPC_LOCK); 1653 1654 dma->pfn_list = RB_ROOT; 1655 1656 /* Insert zero-sized and grow as we map chunks of it */ 1657 vfio_link_dma(iommu, dma); 1658 1659 /* Don't pin and map if container doesn't contain IOMMU capable domain*/ 1660 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) 1661 dma->size = size; 1662 else 1663 ret = vfio_pin_map_dma(iommu, dma, size); 1664 1665 if (!ret && iommu->dirty_page_tracking) { 1666 ret = vfio_dma_bitmap_alloc(dma, pgsize); 1667 if (ret) 1668 vfio_remove_dma(iommu, dma); 1669 } 1670 1671 out_unlock: 1672 mutex_unlock(&iommu->lock); 1673 return ret; 1674 } 1675 1676 static int vfio_bus_type(struct device *dev, void *data) 1677 { 1678 struct bus_type **bus = data; 1679 1680 if (*bus && *bus != dev->bus) 1681 return -EINVAL; 1682 1683 *bus = dev->bus; 1684 1685 return 0; 1686 } 1687 1688 static int vfio_iommu_replay(struct vfio_iommu *iommu, 1689 struct vfio_domain *domain) 1690 { 1691 struct vfio_batch batch; 1692 struct vfio_domain *d = NULL; 1693 struct rb_node *n; 1694 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1695 int ret; 1696 1697 ret = vfio_wait_all_valid(iommu); 1698 if (ret < 0) 1699 return ret; 1700 1701 /* Arbitrarily pick the first domain in the list for lookups */ 1702 if (!list_empty(&iommu->domain_list)) 1703 d = list_first_entry(&iommu->domain_list, 1704 struct vfio_domain, next); 1705 1706 vfio_batch_init(&batch); 1707 1708 n = rb_first(&iommu->dma_list); 1709 1710 for (; n; n = rb_next(n)) { 1711 struct vfio_dma *dma; 1712 dma_addr_t iova; 1713 1714 dma = rb_entry(n, struct vfio_dma, node); 1715 iova = dma->iova; 1716 1717 while (iova < dma->iova + dma->size) { 1718 phys_addr_t phys; 1719 size_t size; 1720 1721 if (dma->iommu_mapped) { 1722 phys_addr_t p; 1723 dma_addr_t i; 1724 1725 if (WARN_ON(!d)) { /* mapped w/o a domain?! */ 1726 ret = -EINVAL; 1727 goto unwind; 1728 } 1729 1730 phys = iommu_iova_to_phys(d->domain, iova); 1731 1732 if (WARN_ON(!phys)) { 1733 iova += PAGE_SIZE; 1734 continue; 1735 } 1736 1737 size = PAGE_SIZE; 1738 p = phys + size; 1739 i = iova + size; 1740 while (i < dma->iova + dma->size && 1741 p == iommu_iova_to_phys(d->domain, i)) { 1742 size += PAGE_SIZE; 1743 p += PAGE_SIZE; 1744 i += PAGE_SIZE; 1745 } 1746 } else { 1747 unsigned long pfn; 1748 unsigned long vaddr = dma->vaddr + 1749 (iova - dma->iova); 1750 size_t n = dma->iova + dma->size - iova; 1751 long npage; 1752 1753 npage = vfio_pin_pages_remote(dma, vaddr, 1754 n >> PAGE_SHIFT, 1755 &pfn, limit, 1756 &batch); 1757 if (npage <= 0) { 1758 WARN_ON(!npage); 1759 ret = (int)npage; 1760 goto unwind; 1761 } 1762 1763 phys = pfn << PAGE_SHIFT; 1764 size = npage << PAGE_SHIFT; 1765 } 1766 1767 ret = iommu_map(domain->domain, iova, phys, 1768 size, dma->prot | domain->prot); 1769 if (ret) { 1770 if (!dma->iommu_mapped) { 1771 vfio_unpin_pages_remote(dma, iova, 1772 phys >> PAGE_SHIFT, 1773 size >> PAGE_SHIFT, 1774 true); 1775 vfio_batch_unpin(&batch, dma); 1776 } 1777 goto unwind; 1778 } 1779 1780 iova += size; 1781 } 1782 } 1783 1784 /* All dmas are now mapped, defer to second tree walk for unwind */ 1785 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { 1786 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 1787 1788 dma->iommu_mapped = true; 1789 } 1790 1791 vfio_batch_fini(&batch); 1792 return 0; 1793 1794 unwind: 1795 for (; n; n = rb_prev(n)) { 1796 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 1797 dma_addr_t iova; 1798 1799 if (dma->iommu_mapped) { 1800 iommu_unmap(domain->domain, dma->iova, dma->size); 1801 continue; 1802 } 1803 1804 iova = dma->iova; 1805 while (iova < dma->iova + dma->size) { 1806 phys_addr_t phys, p; 1807 size_t size; 1808 dma_addr_t i; 1809 1810 phys = iommu_iova_to_phys(domain->domain, iova); 1811 if (!phys) { 1812 iova += PAGE_SIZE; 1813 continue; 1814 } 1815 1816 size = PAGE_SIZE; 1817 p = phys + size; 1818 i = iova + size; 1819 while (i < dma->iova + dma->size && 1820 p == iommu_iova_to_phys(domain->domain, i)) { 1821 size += PAGE_SIZE; 1822 p += PAGE_SIZE; 1823 i += PAGE_SIZE; 1824 } 1825 1826 iommu_unmap(domain->domain, iova, size); 1827 vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, 1828 size >> PAGE_SHIFT, true); 1829 } 1830 } 1831 1832 vfio_batch_fini(&batch); 1833 return ret; 1834 } 1835 1836 /* 1837 * We change our unmap behavior slightly depending on whether the IOMMU 1838 * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage 1839 * for practically any contiguous power-of-two mapping we give it. This means 1840 * we don't need to look for contiguous chunks ourselves to make unmapping 1841 * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d 1842 * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks 1843 * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when 1844 * hugetlbfs is in use. 1845 */ 1846 static void vfio_test_domain_fgsp(struct vfio_domain *domain) 1847 { 1848 struct page *pages; 1849 int ret, order = get_order(PAGE_SIZE * 2); 1850 1851 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 1852 if (!pages) 1853 return; 1854 1855 ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, 1856 IOMMU_READ | IOMMU_WRITE | domain->prot); 1857 if (!ret) { 1858 size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); 1859 1860 if (unmapped == PAGE_SIZE) 1861 iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE); 1862 else 1863 domain->fgsp = true; 1864 } 1865 1866 __free_pages(pages, order); 1867 } 1868 1869 static struct vfio_group *find_iommu_group(struct vfio_domain *domain, 1870 struct iommu_group *iommu_group) 1871 { 1872 struct vfio_group *g; 1873 1874 list_for_each_entry(g, &domain->group_list, next) { 1875 if (g->iommu_group == iommu_group) 1876 return g; 1877 } 1878 1879 return NULL; 1880 } 1881 1882 static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, 1883 struct iommu_group *iommu_group) 1884 { 1885 struct vfio_domain *domain; 1886 struct vfio_group *group = NULL; 1887 1888 list_for_each_entry(domain, &iommu->domain_list, next) { 1889 group = find_iommu_group(domain, iommu_group); 1890 if (group) 1891 return group; 1892 } 1893 1894 if (iommu->external_domain) 1895 group = find_iommu_group(iommu->external_domain, iommu_group); 1896 1897 return group; 1898 } 1899 1900 static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions, 1901 phys_addr_t *base) 1902 { 1903 struct iommu_resv_region *region; 1904 bool ret = false; 1905 1906 list_for_each_entry(region, group_resv_regions, list) { 1907 /* 1908 * The presence of any 'real' MSI regions should take 1909 * precedence over the software-managed one if the 1910 * IOMMU driver happens to advertise both types. 1911 */ 1912 if (region->type == IOMMU_RESV_MSI) { 1913 ret = false; 1914 break; 1915 } 1916 1917 if (region->type == IOMMU_RESV_SW_MSI) { 1918 *base = region->start; 1919 ret = true; 1920 } 1921 } 1922 1923 return ret; 1924 } 1925 1926 static struct device *vfio_mdev_get_iommu_device(struct device *dev) 1927 { 1928 struct device *(*fn)(struct device *dev); 1929 struct device *iommu_device; 1930 1931 fn = symbol_get(mdev_get_iommu_device); 1932 if (fn) { 1933 iommu_device = fn(dev); 1934 symbol_put(mdev_get_iommu_device); 1935 1936 return iommu_device; 1937 } 1938 1939 return NULL; 1940 } 1941 1942 static int vfio_mdev_attach_domain(struct device *dev, void *data) 1943 { 1944 struct iommu_domain *domain = data; 1945 struct device *iommu_device; 1946 1947 iommu_device = vfio_mdev_get_iommu_device(dev); 1948 if (iommu_device) { 1949 if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX)) 1950 return iommu_aux_attach_device(domain, iommu_device); 1951 else 1952 return iommu_attach_device(domain, iommu_device); 1953 } 1954 1955 return -EINVAL; 1956 } 1957 1958 static int vfio_mdev_detach_domain(struct device *dev, void *data) 1959 { 1960 struct iommu_domain *domain = data; 1961 struct device *iommu_device; 1962 1963 iommu_device = vfio_mdev_get_iommu_device(dev); 1964 if (iommu_device) { 1965 if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX)) 1966 iommu_aux_detach_device(domain, iommu_device); 1967 else 1968 iommu_detach_device(domain, iommu_device); 1969 } 1970 1971 return 0; 1972 } 1973 1974 static int vfio_iommu_attach_group(struct vfio_domain *domain, 1975 struct vfio_group *group) 1976 { 1977 if (group->mdev_group) 1978 return iommu_group_for_each_dev(group->iommu_group, 1979 domain->domain, 1980 vfio_mdev_attach_domain); 1981 else 1982 return iommu_attach_group(domain->domain, group->iommu_group); 1983 } 1984 1985 static void vfio_iommu_detach_group(struct vfio_domain *domain, 1986 struct vfio_group *group) 1987 { 1988 if (group->mdev_group) 1989 iommu_group_for_each_dev(group->iommu_group, domain->domain, 1990 vfio_mdev_detach_domain); 1991 else 1992 iommu_detach_group(domain->domain, group->iommu_group); 1993 } 1994 1995 static bool vfio_bus_is_mdev(struct bus_type *bus) 1996 { 1997 struct bus_type *mdev_bus; 1998 bool ret = false; 1999 2000 mdev_bus = symbol_get(mdev_bus_type); 2001 if (mdev_bus) { 2002 ret = (bus == mdev_bus); 2003 symbol_put(mdev_bus_type); 2004 } 2005 2006 return ret; 2007 } 2008 2009 static int vfio_mdev_iommu_device(struct device *dev, void *data) 2010 { 2011 struct device **old = data, *new; 2012 2013 new = vfio_mdev_get_iommu_device(dev); 2014 if (!new || (*old && *old != new)) 2015 return -EINVAL; 2016 2017 *old = new; 2018 2019 return 0; 2020 } 2021 2022 /* 2023 * This is a helper function to insert an address range to iova list. 2024 * The list is initially created with a single entry corresponding to 2025 * the IOMMU domain geometry to which the device group is attached. 2026 * The list aperture gets modified when a new domain is added to the 2027 * container if the new aperture doesn't conflict with the current one 2028 * or with any existing dma mappings. The list is also modified to 2029 * exclude any reserved regions associated with the device group. 2030 */ 2031 static int vfio_iommu_iova_insert(struct list_head *head, 2032 dma_addr_t start, dma_addr_t end) 2033 { 2034 struct vfio_iova *region; 2035 2036 region = kmalloc(sizeof(*region), GFP_KERNEL); 2037 if (!region) 2038 return -ENOMEM; 2039 2040 INIT_LIST_HEAD(®ion->list); 2041 region->start = start; 2042 region->end = end; 2043 2044 list_add_tail(®ion->list, head); 2045 return 0; 2046 } 2047 2048 /* 2049 * Check the new iommu aperture conflicts with existing aper or with any 2050 * existing dma mappings. 2051 */ 2052 static bool vfio_iommu_aper_conflict(struct vfio_iommu *iommu, 2053 dma_addr_t start, dma_addr_t end) 2054 { 2055 struct vfio_iova *first, *last; 2056 struct list_head *iova = &iommu->iova_list; 2057 2058 if (list_empty(iova)) 2059 return false; 2060 2061 /* Disjoint sets, return conflict */ 2062 first = list_first_entry(iova, struct vfio_iova, list); 2063 last = list_last_entry(iova, struct vfio_iova, list); 2064 if (start > last->end || end < first->start) 2065 return true; 2066 2067 /* Check for any existing dma mappings below the new start */ 2068 if (start > first->start) { 2069 if (vfio_find_dma(iommu, first->start, start - first->start)) 2070 return true; 2071 } 2072 2073 /* Check for any existing dma mappings beyond the new end */ 2074 if (end < last->end) { 2075 if (vfio_find_dma(iommu, end + 1, last->end - end)) 2076 return true; 2077 } 2078 2079 return false; 2080 } 2081 2082 /* 2083 * Resize iommu iova aperture window. This is called only if the new 2084 * aperture has no conflict with existing aperture and dma mappings. 2085 */ 2086 static int vfio_iommu_aper_resize(struct list_head *iova, 2087 dma_addr_t start, dma_addr_t end) 2088 { 2089 struct vfio_iova *node, *next; 2090 2091 if (list_empty(iova)) 2092 return vfio_iommu_iova_insert(iova, start, end); 2093 2094 /* Adjust iova list start */ 2095 list_for_each_entry_safe(node, next, iova, list) { 2096 if (start < node->start) 2097 break; 2098 if (start >= node->start && start < node->end) { 2099 node->start = start; 2100 break; 2101 } 2102 /* Delete nodes before new start */ 2103 list_del(&node->list); 2104 kfree(node); 2105 } 2106 2107 /* Adjust iova list end */ 2108 list_for_each_entry_safe(node, next, iova, list) { 2109 if (end > node->end) 2110 continue; 2111 if (end > node->start && end <= node->end) { 2112 node->end = end; 2113 continue; 2114 } 2115 /* Delete nodes after new end */ 2116 list_del(&node->list); 2117 kfree(node); 2118 } 2119 2120 return 0; 2121 } 2122 2123 /* 2124 * Check reserved region conflicts with existing dma mappings 2125 */ 2126 static bool vfio_iommu_resv_conflict(struct vfio_iommu *iommu, 2127 struct list_head *resv_regions) 2128 { 2129 struct iommu_resv_region *region; 2130 2131 /* Check for conflict with existing dma mappings */ 2132 list_for_each_entry(region, resv_regions, list) { 2133 if (region->type == IOMMU_RESV_DIRECT_RELAXABLE) 2134 continue; 2135 2136 if (vfio_find_dma(iommu, region->start, region->length)) 2137 return true; 2138 } 2139 2140 return false; 2141 } 2142 2143 /* 2144 * Check iova region overlap with reserved regions and 2145 * exclude them from the iommu iova range 2146 */ 2147 static int vfio_iommu_resv_exclude(struct list_head *iova, 2148 struct list_head *resv_regions) 2149 { 2150 struct iommu_resv_region *resv; 2151 struct vfio_iova *n, *next; 2152 2153 list_for_each_entry(resv, resv_regions, list) { 2154 phys_addr_t start, end; 2155 2156 if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE) 2157 continue; 2158 2159 start = resv->start; 2160 end = resv->start + resv->length - 1; 2161 2162 list_for_each_entry_safe(n, next, iova, list) { 2163 int ret = 0; 2164 2165 /* No overlap */ 2166 if (start > n->end || end < n->start) 2167 continue; 2168 /* 2169 * Insert a new node if current node overlaps with the 2170 * reserve region to exlude that from valid iova range. 2171 * Note that, new node is inserted before the current 2172 * node and finally the current node is deleted keeping 2173 * the list updated and sorted. 2174 */ 2175 if (start > n->start) 2176 ret = vfio_iommu_iova_insert(&n->list, n->start, 2177 start - 1); 2178 if (!ret && end < n->end) 2179 ret = vfio_iommu_iova_insert(&n->list, end + 1, 2180 n->end); 2181 if (ret) 2182 return ret; 2183 2184 list_del(&n->list); 2185 kfree(n); 2186 } 2187 } 2188 2189 if (list_empty(iova)) 2190 return -EINVAL; 2191 2192 return 0; 2193 } 2194 2195 static void vfio_iommu_resv_free(struct list_head *resv_regions) 2196 { 2197 struct iommu_resv_region *n, *next; 2198 2199 list_for_each_entry_safe(n, next, resv_regions, list) { 2200 list_del(&n->list); 2201 kfree(n); 2202 } 2203 } 2204 2205 static void vfio_iommu_iova_free(struct list_head *iova) 2206 { 2207 struct vfio_iova *n, *next; 2208 2209 list_for_each_entry_safe(n, next, iova, list) { 2210 list_del(&n->list); 2211 kfree(n); 2212 } 2213 } 2214 2215 static int vfio_iommu_iova_get_copy(struct vfio_iommu *iommu, 2216 struct list_head *iova_copy) 2217 { 2218 struct list_head *iova = &iommu->iova_list; 2219 struct vfio_iova *n; 2220 int ret; 2221 2222 list_for_each_entry(n, iova, list) { 2223 ret = vfio_iommu_iova_insert(iova_copy, n->start, n->end); 2224 if (ret) 2225 goto out_free; 2226 } 2227 2228 return 0; 2229 2230 out_free: 2231 vfio_iommu_iova_free(iova_copy); 2232 return ret; 2233 } 2234 2235 static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu, 2236 struct list_head *iova_copy) 2237 { 2238 struct list_head *iova = &iommu->iova_list; 2239 2240 vfio_iommu_iova_free(iova); 2241 2242 list_splice_tail(iova_copy, iova); 2243 } 2244 2245 static int vfio_iommu_type1_attach_group(void *iommu_data, 2246 struct iommu_group *iommu_group) 2247 { 2248 struct vfio_iommu *iommu = iommu_data; 2249 struct vfio_group *group; 2250 struct vfio_domain *domain, *d; 2251 struct bus_type *bus = NULL; 2252 int ret; 2253 bool resv_msi, msi_remap; 2254 phys_addr_t resv_msi_base = 0; 2255 struct iommu_domain_geometry geo; 2256 LIST_HEAD(iova_copy); 2257 LIST_HEAD(group_resv_regions); 2258 2259 mutex_lock(&iommu->lock); 2260 2261 /* Check for duplicates */ 2262 if (vfio_iommu_find_iommu_group(iommu, iommu_group)) { 2263 mutex_unlock(&iommu->lock); 2264 return -EINVAL; 2265 } 2266 2267 group = kzalloc(sizeof(*group), GFP_KERNEL); 2268 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 2269 if (!group || !domain) { 2270 ret = -ENOMEM; 2271 goto out_free; 2272 } 2273 2274 group->iommu_group = iommu_group; 2275 2276 /* Determine bus_type in order to allocate a domain */ 2277 ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type); 2278 if (ret) 2279 goto out_free; 2280 2281 if (vfio_bus_is_mdev(bus)) { 2282 struct device *iommu_device = NULL; 2283 2284 group->mdev_group = true; 2285 2286 /* Determine the isolation type */ 2287 ret = iommu_group_for_each_dev(iommu_group, &iommu_device, 2288 vfio_mdev_iommu_device); 2289 if (ret || !iommu_device) { 2290 if (!iommu->external_domain) { 2291 INIT_LIST_HEAD(&domain->group_list); 2292 iommu->external_domain = domain; 2293 vfio_update_pgsize_bitmap(iommu); 2294 } else { 2295 kfree(domain); 2296 } 2297 2298 list_add(&group->next, 2299 &iommu->external_domain->group_list); 2300 /* 2301 * Non-iommu backed group cannot dirty memory directly, 2302 * it can only use interfaces that provide dirty 2303 * tracking. 2304 * The iommu scope can only be promoted with the 2305 * addition of a dirty tracking group. 2306 */ 2307 group->pinned_page_dirty_scope = true; 2308 mutex_unlock(&iommu->lock); 2309 2310 return 0; 2311 } 2312 2313 bus = iommu_device->bus; 2314 } 2315 2316 domain->domain = iommu_domain_alloc(bus); 2317 if (!domain->domain) { 2318 ret = -EIO; 2319 goto out_free; 2320 } 2321 2322 if (iommu->nesting) { 2323 int attr = 1; 2324 2325 ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING, 2326 &attr); 2327 if (ret) 2328 goto out_domain; 2329 } 2330 2331 ret = vfio_iommu_attach_group(domain, group); 2332 if (ret) 2333 goto out_domain; 2334 2335 /* Get aperture info */ 2336 iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, &geo); 2337 2338 if (vfio_iommu_aper_conflict(iommu, geo.aperture_start, 2339 geo.aperture_end)) { 2340 ret = -EINVAL; 2341 goto out_detach; 2342 } 2343 2344 ret = iommu_get_group_resv_regions(iommu_group, &group_resv_regions); 2345 if (ret) 2346 goto out_detach; 2347 2348 if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) { 2349 ret = -EINVAL; 2350 goto out_detach; 2351 } 2352 2353 /* 2354 * We don't want to work on the original iova list as the list 2355 * gets modified and in case of failure we have to retain the 2356 * original list. Get a copy here. 2357 */ 2358 ret = vfio_iommu_iova_get_copy(iommu, &iova_copy); 2359 if (ret) 2360 goto out_detach; 2361 2362 ret = vfio_iommu_aper_resize(&iova_copy, geo.aperture_start, 2363 geo.aperture_end); 2364 if (ret) 2365 goto out_detach; 2366 2367 ret = vfio_iommu_resv_exclude(&iova_copy, &group_resv_regions); 2368 if (ret) 2369 goto out_detach; 2370 2371 resv_msi = vfio_iommu_has_sw_msi(&group_resv_regions, &resv_msi_base); 2372 2373 INIT_LIST_HEAD(&domain->group_list); 2374 list_add(&group->next, &domain->group_list); 2375 2376 msi_remap = irq_domain_check_msi_remap() || 2377 iommu_capable(bus, IOMMU_CAP_INTR_REMAP); 2378 2379 if (!allow_unsafe_interrupts && !msi_remap) { 2380 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", 2381 __func__); 2382 ret = -EPERM; 2383 goto out_detach; 2384 } 2385 2386 if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY)) 2387 domain->prot |= IOMMU_CACHE; 2388 2389 /* 2390 * Try to match an existing compatible domain. We don't want to 2391 * preclude an IOMMU driver supporting multiple bus_types and being 2392 * able to include different bus_types in the same IOMMU domain, so 2393 * we test whether the domains use the same iommu_ops rather than 2394 * testing if they're on the same bus_type. 2395 */ 2396 list_for_each_entry(d, &iommu->domain_list, next) { 2397 if (d->domain->ops == domain->domain->ops && 2398 d->prot == domain->prot) { 2399 vfio_iommu_detach_group(domain, group); 2400 if (!vfio_iommu_attach_group(d, group)) { 2401 list_add(&group->next, &d->group_list); 2402 iommu_domain_free(domain->domain); 2403 kfree(domain); 2404 goto done; 2405 } 2406 2407 ret = vfio_iommu_attach_group(domain, group); 2408 if (ret) 2409 goto out_domain; 2410 } 2411 } 2412 2413 vfio_test_domain_fgsp(domain); 2414 2415 /* replay mappings on new domains */ 2416 ret = vfio_iommu_replay(iommu, domain); 2417 if (ret) 2418 goto out_detach; 2419 2420 if (resv_msi) { 2421 ret = iommu_get_msi_cookie(domain->domain, resv_msi_base); 2422 if (ret && ret != -ENODEV) 2423 goto out_detach; 2424 } 2425 2426 list_add(&domain->next, &iommu->domain_list); 2427 vfio_update_pgsize_bitmap(iommu); 2428 done: 2429 /* Delete the old one and insert new iova list */ 2430 vfio_iommu_iova_insert_copy(iommu, &iova_copy); 2431 2432 /* 2433 * An iommu backed group can dirty memory directly and therefore 2434 * demotes the iommu scope until it declares itself dirty tracking 2435 * capable via the page pinning interface. 2436 */ 2437 iommu->num_non_pinned_groups++; 2438 mutex_unlock(&iommu->lock); 2439 vfio_iommu_resv_free(&group_resv_regions); 2440 2441 return 0; 2442 2443 out_detach: 2444 vfio_iommu_detach_group(domain, group); 2445 out_domain: 2446 iommu_domain_free(domain->domain); 2447 vfio_iommu_iova_free(&iova_copy); 2448 vfio_iommu_resv_free(&group_resv_regions); 2449 out_free: 2450 kfree(domain); 2451 kfree(group); 2452 mutex_unlock(&iommu->lock); 2453 return ret; 2454 } 2455 2456 static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu) 2457 { 2458 struct rb_node *node; 2459 2460 while ((node = rb_first(&iommu->dma_list))) 2461 vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); 2462 } 2463 2464 static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) 2465 { 2466 struct rb_node *n, *p; 2467 2468 n = rb_first(&iommu->dma_list); 2469 for (; n; n = rb_next(n)) { 2470 struct vfio_dma *dma; 2471 long locked = 0, unlocked = 0; 2472 2473 dma = rb_entry(n, struct vfio_dma, node); 2474 unlocked += vfio_unmap_unpin(iommu, dma, false); 2475 p = rb_first(&dma->pfn_list); 2476 for (; p; p = rb_next(p)) { 2477 struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, 2478 node); 2479 2480 if (!is_invalid_reserved_pfn(vpfn->pfn)) 2481 locked++; 2482 } 2483 vfio_lock_acct(dma, locked - unlocked, true); 2484 } 2485 } 2486 2487 /* 2488 * Called when a domain is removed in detach. It is possible that 2489 * the removed domain decided the iova aperture window. Modify the 2490 * iova aperture with the smallest window among existing domains. 2491 */ 2492 static void vfio_iommu_aper_expand(struct vfio_iommu *iommu, 2493 struct list_head *iova_copy) 2494 { 2495 struct vfio_domain *domain; 2496 struct iommu_domain_geometry geo; 2497 struct vfio_iova *node; 2498 dma_addr_t start = 0; 2499 dma_addr_t end = (dma_addr_t)~0; 2500 2501 if (list_empty(iova_copy)) 2502 return; 2503 2504 list_for_each_entry(domain, &iommu->domain_list, next) { 2505 iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, 2506 &geo); 2507 if (geo.aperture_start > start) 2508 start = geo.aperture_start; 2509 if (geo.aperture_end < end) 2510 end = geo.aperture_end; 2511 } 2512 2513 /* Modify aperture limits. The new aper is either same or bigger */ 2514 node = list_first_entry(iova_copy, struct vfio_iova, list); 2515 node->start = start; 2516 node = list_last_entry(iova_copy, struct vfio_iova, list); 2517 node->end = end; 2518 } 2519 2520 /* 2521 * Called when a group is detached. The reserved regions for that 2522 * group can be part of valid iova now. But since reserved regions 2523 * may be duplicated among groups, populate the iova valid regions 2524 * list again. 2525 */ 2526 static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu, 2527 struct list_head *iova_copy) 2528 { 2529 struct vfio_domain *d; 2530 struct vfio_group *g; 2531 struct vfio_iova *node; 2532 dma_addr_t start, end; 2533 LIST_HEAD(resv_regions); 2534 int ret; 2535 2536 if (list_empty(iova_copy)) 2537 return -EINVAL; 2538 2539 list_for_each_entry(d, &iommu->domain_list, next) { 2540 list_for_each_entry(g, &d->group_list, next) { 2541 ret = iommu_get_group_resv_regions(g->iommu_group, 2542 &resv_regions); 2543 if (ret) 2544 goto done; 2545 } 2546 } 2547 2548 node = list_first_entry(iova_copy, struct vfio_iova, list); 2549 start = node->start; 2550 node = list_last_entry(iova_copy, struct vfio_iova, list); 2551 end = node->end; 2552 2553 /* purge the iova list and create new one */ 2554 vfio_iommu_iova_free(iova_copy); 2555 2556 ret = vfio_iommu_aper_resize(iova_copy, start, end); 2557 if (ret) 2558 goto done; 2559 2560 /* Exclude current reserved regions from iova ranges */ 2561 ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions); 2562 done: 2563 vfio_iommu_resv_free(&resv_regions); 2564 return ret; 2565 } 2566 2567 static void vfio_iommu_type1_detach_group(void *iommu_data, 2568 struct iommu_group *iommu_group) 2569 { 2570 struct vfio_iommu *iommu = iommu_data; 2571 struct vfio_domain *domain; 2572 struct vfio_group *group; 2573 bool update_dirty_scope = false; 2574 LIST_HEAD(iova_copy); 2575 2576 mutex_lock(&iommu->lock); 2577 2578 if (iommu->external_domain) { 2579 group = find_iommu_group(iommu->external_domain, iommu_group); 2580 if (group) { 2581 update_dirty_scope = !group->pinned_page_dirty_scope; 2582 list_del(&group->next); 2583 kfree(group); 2584 2585 if (list_empty(&iommu->external_domain->group_list)) { 2586 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) { 2587 WARN_ON(iommu->notifier.head); 2588 vfio_iommu_unmap_unpin_all(iommu); 2589 } 2590 2591 kfree(iommu->external_domain); 2592 iommu->external_domain = NULL; 2593 } 2594 goto detach_group_done; 2595 } 2596 } 2597 2598 /* 2599 * Get a copy of iova list. This will be used to update 2600 * and to replace the current one later. Please note that 2601 * we will leave the original list as it is if update fails. 2602 */ 2603 vfio_iommu_iova_get_copy(iommu, &iova_copy); 2604 2605 list_for_each_entry(domain, &iommu->domain_list, next) { 2606 group = find_iommu_group(domain, iommu_group); 2607 if (!group) 2608 continue; 2609 2610 vfio_iommu_detach_group(domain, group); 2611 update_dirty_scope = !group->pinned_page_dirty_scope; 2612 list_del(&group->next); 2613 kfree(group); 2614 /* 2615 * Group ownership provides privilege, if the group list is 2616 * empty, the domain goes away. If it's the last domain with 2617 * iommu and external domain doesn't exist, then all the 2618 * mappings go away too. If it's the last domain with iommu and 2619 * external domain exist, update accounting 2620 */ 2621 if (list_empty(&domain->group_list)) { 2622 if (list_is_singular(&iommu->domain_list)) { 2623 if (!iommu->external_domain) { 2624 WARN_ON(iommu->notifier.head); 2625 vfio_iommu_unmap_unpin_all(iommu); 2626 } else { 2627 vfio_iommu_unmap_unpin_reaccount(iommu); 2628 } 2629 } 2630 iommu_domain_free(domain->domain); 2631 list_del(&domain->next); 2632 kfree(domain); 2633 vfio_iommu_aper_expand(iommu, &iova_copy); 2634 vfio_update_pgsize_bitmap(iommu); 2635 } 2636 break; 2637 } 2638 2639 if (!vfio_iommu_resv_refresh(iommu, &iova_copy)) 2640 vfio_iommu_iova_insert_copy(iommu, &iova_copy); 2641 else 2642 vfio_iommu_iova_free(&iova_copy); 2643 2644 detach_group_done: 2645 /* 2646 * Removal of a group without dirty tracking may allow the iommu scope 2647 * to be promoted. 2648 */ 2649 if (update_dirty_scope) { 2650 iommu->num_non_pinned_groups--; 2651 if (iommu->dirty_page_tracking) 2652 vfio_iommu_populate_bitmap_full(iommu); 2653 } 2654 mutex_unlock(&iommu->lock); 2655 } 2656 2657 static void *vfio_iommu_type1_open(unsigned long arg) 2658 { 2659 struct vfio_iommu *iommu; 2660 2661 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); 2662 if (!iommu) 2663 return ERR_PTR(-ENOMEM); 2664 2665 switch (arg) { 2666 case VFIO_TYPE1_IOMMU: 2667 break; 2668 case VFIO_TYPE1_NESTING_IOMMU: 2669 iommu->nesting = true; 2670 fallthrough; 2671 case VFIO_TYPE1v2_IOMMU: 2672 iommu->v2 = true; 2673 break; 2674 default: 2675 kfree(iommu); 2676 return ERR_PTR(-EINVAL); 2677 } 2678 2679 INIT_LIST_HEAD(&iommu->domain_list); 2680 INIT_LIST_HEAD(&iommu->iova_list); 2681 iommu->dma_list = RB_ROOT; 2682 iommu->dma_avail = dma_entry_limit; 2683 iommu->container_open = true; 2684 mutex_init(&iommu->lock); 2685 BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); 2686 init_waitqueue_head(&iommu->vaddr_wait); 2687 2688 return iommu; 2689 } 2690 2691 static void vfio_release_domain(struct vfio_domain *domain, bool external) 2692 { 2693 struct vfio_group *group, *group_tmp; 2694 2695 list_for_each_entry_safe(group, group_tmp, 2696 &domain->group_list, next) { 2697 if (!external) 2698 vfio_iommu_detach_group(domain, group); 2699 list_del(&group->next); 2700 kfree(group); 2701 } 2702 2703 if (!external) 2704 iommu_domain_free(domain->domain); 2705 } 2706 2707 static void vfio_iommu_type1_release(void *iommu_data) 2708 { 2709 struct vfio_iommu *iommu = iommu_data; 2710 struct vfio_domain *domain, *domain_tmp; 2711 2712 if (iommu->external_domain) { 2713 vfio_release_domain(iommu->external_domain, true); 2714 kfree(iommu->external_domain); 2715 } 2716 2717 vfio_iommu_unmap_unpin_all(iommu); 2718 2719 list_for_each_entry_safe(domain, domain_tmp, 2720 &iommu->domain_list, next) { 2721 vfio_release_domain(domain, false); 2722 list_del(&domain->next); 2723 kfree(domain); 2724 } 2725 2726 vfio_iommu_iova_free(&iommu->iova_list); 2727 2728 kfree(iommu); 2729 } 2730 2731 static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) 2732 { 2733 struct vfio_domain *domain; 2734 int ret = 1; 2735 2736 mutex_lock(&iommu->lock); 2737 list_for_each_entry(domain, &iommu->domain_list, next) { 2738 if (!(domain->prot & IOMMU_CACHE)) { 2739 ret = 0; 2740 break; 2741 } 2742 } 2743 mutex_unlock(&iommu->lock); 2744 2745 return ret; 2746 } 2747 2748 static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu, 2749 unsigned long arg) 2750 { 2751 switch (arg) { 2752 case VFIO_TYPE1_IOMMU: 2753 case VFIO_TYPE1v2_IOMMU: 2754 case VFIO_TYPE1_NESTING_IOMMU: 2755 case VFIO_UNMAP_ALL: 2756 case VFIO_UPDATE_VADDR: 2757 return 1; 2758 case VFIO_DMA_CC_IOMMU: 2759 if (!iommu) 2760 return 0; 2761 return vfio_domains_have_iommu_cache(iommu); 2762 default: 2763 return 0; 2764 } 2765 } 2766 2767 static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps, 2768 struct vfio_iommu_type1_info_cap_iova_range *cap_iovas, 2769 size_t size) 2770 { 2771 struct vfio_info_cap_header *header; 2772 struct vfio_iommu_type1_info_cap_iova_range *iova_cap; 2773 2774 header = vfio_info_cap_add(caps, size, 2775 VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE, 1); 2776 if (IS_ERR(header)) 2777 return PTR_ERR(header); 2778 2779 iova_cap = container_of(header, 2780 struct vfio_iommu_type1_info_cap_iova_range, 2781 header); 2782 iova_cap->nr_iovas = cap_iovas->nr_iovas; 2783 memcpy(iova_cap->iova_ranges, cap_iovas->iova_ranges, 2784 cap_iovas->nr_iovas * sizeof(*cap_iovas->iova_ranges)); 2785 return 0; 2786 } 2787 2788 static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu, 2789 struct vfio_info_cap *caps) 2790 { 2791 struct vfio_iommu_type1_info_cap_iova_range *cap_iovas; 2792 struct vfio_iova *iova; 2793 size_t size; 2794 int iovas = 0, i = 0, ret; 2795 2796 list_for_each_entry(iova, &iommu->iova_list, list) 2797 iovas++; 2798 2799 if (!iovas) { 2800 /* 2801 * Return 0 as a container with a single mdev device 2802 * will have an empty list 2803 */ 2804 return 0; 2805 } 2806 2807 size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges)); 2808 2809 cap_iovas = kzalloc(size, GFP_KERNEL); 2810 if (!cap_iovas) 2811 return -ENOMEM; 2812 2813 cap_iovas->nr_iovas = iovas; 2814 2815 list_for_each_entry(iova, &iommu->iova_list, list) { 2816 cap_iovas->iova_ranges[i].start = iova->start; 2817 cap_iovas->iova_ranges[i].end = iova->end; 2818 i++; 2819 } 2820 2821 ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size); 2822 2823 kfree(cap_iovas); 2824 return ret; 2825 } 2826 2827 static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu, 2828 struct vfio_info_cap *caps) 2829 { 2830 struct vfio_iommu_type1_info_cap_migration cap_mig; 2831 2832 cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION; 2833 cap_mig.header.version = 1; 2834 2835 cap_mig.flags = 0; 2836 /* support minimum pgsize */ 2837 cap_mig.pgsize_bitmap = (size_t)1 << __ffs(iommu->pgsize_bitmap); 2838 cap_mig.max_dirty_bitmap_size = DIRTY_BITMAP_SIZE_MAX; 2839 2840 return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig)); 2841 } 2842 2843 static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu, 2844 struct vfio_info_cap *caps) 2845 { 2846 struct vfio_iommu_type1_info_dma_avail cap_dma_avail; 2847 2848 cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL; 2849 cap_dma_avail.header.version = 1; 2850 2851 cap_dma_avail.avail = iommu->dma_avail; 2852 2853 return vfio_info_add_capability(caps, &cap_dma_avail.header, 2854 sizeof(cap_dma_avail)); 2855 } 2856 2857 static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu, 2858 unsigned long arg) 2859 { 2860 struct vfio_iommu_type1_info info; 2861 unsigned long minsz; 2862 struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; 2863 unsigned long capsz; 2864 int ret; 2865 2866 minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); 2867 2868 /* For backward compatibility, cannot require this */ 2869 capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); 2870 2871 if (copy_from_user(&info, (void __user *)arg, minsz)) 2872 return -EFAULT; 2873 2874 if (info.argsz < minsz) 2875 return -EINVAL; 2876 2877 if (info.argsz >= capsz) { 2878 minsz = capsz; 2879 info.cap_offset = 0; /* output, no-recopy necessary */ 2880 } 2881 2882 mutex_lock(&iommu->lock); 2883 info.flags = VFIO_IOMMU_INFO_PGSIZES; 2884 2885 info.iova_pgsizes = iommu->pgsize_bitmap; 2886 2887 ret = vfio_iommu_migration_build_caps(iommu, &caps); 2888 2889 if (!ret) 2890 ret = vfio_iommu_dma_avail_build_caps(iommu, &caps); 2891 2892 if (!ret) 2893 ret = vfio_iommu_iova_build_caps(iommu, &caps); 2894 2895 mutex_unlock(&iommu->lock); 2896 2897 if (ret) 2898 return ret; 2899 2900 if (caps.size) { 2901 info.flags |= VFIO_IOMMU_INFO_CAPS; 2902 2903 if (info.argsz < sizeof(info) + caps.size) { 2904 info.argsz = sizeof(info) + caps.size; 2905 } else { 2906 vfio_info_cap_shift(&caps, sizeof(info)); 2907 if (copy_to_user((void __user *)arg + 2908 sizeof(info), caps.buf, 2909 caps.size)) { 2910 kfree(caps.buf); 2911 return -EFAULT; 2912 } 2913 info.cap_offset = sizeof(info); 2914 } 2915 2916 kfree(caps.buf); 2917 } 2918 2919 return copy_to_user((void __user *)arg, &info, minsz) ? 2920 -EFAULT : 0; 2921 } 2922 2923 static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu, 2924 unsigned long arg) 2925 { 2926 struct vfio_iommu_type1_dma_map map; 2927 unsigned long minsz; 2928 uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE | 2929 VFIO_DMA_MAP_FLAG_VADDR; 2930 2931 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); 2932 2933 if (copy_from_user(&map, (void __user *)arg, minsz)) 2934 return -EFAULT; 2935 2936 if (map.argsz < minsz || map.flags & ~mask) 2937 return -EINVAL; 2938 2939 return vfio_dma_do_map(iommu, &map); 2940 } 2941 2942 static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu, 2943 unsigned long arg) 2944 { 2945 struct vfio_iommu_type1_dma_unmap unmap; 2946 struct vfio_bitmap bitmap = { 0 }; 2947 uint32_t mask = VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP | 2948 VFIO_DMA_UNMAP_FLAG_VADDR | 2949 VFIO_DMA_UNMAP_FLAG_ALL; 2950 unsigned long minsz; 2951 int ret; 2952 2953 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); 2954 2955 if (copy_from_user(&unmap, (void __user *)arg, minsz)) 2956 return -EFAULT; 2957 2958 if (unmap.argsz < minsz || unmap.flags & ~mask) 2959 return -EINVAL; 2960 2961 if ((unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) && 2962 (unmap.flags & (VFIO_DMA_UNMAP_FLAG_ALL | 2963 VFIO_DMA_UNMAP_FLAG_VADDR))) 2964 return -EINVAL; 2965 2966 if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) { 2967 unsigned long pgshift; 2968 2969 if (unmap.argsz < (minsz + sizeof(bitmap))) 2970 return -EINVAL; 2971 2972 if (copy_from_user(&bitmap, 2973 (void __user *)(arg + minsz), 2974 sizeof(bitmap))) 2975 return -EFAULT; 2976 2977 if (!access_ok((void __user *)bitmap.data, bitmap.size)) 2978 return -EINVAL; 2979 2980 pgshift = __ffs(bitmap.pgsize); 2981 ret = verify_bitmap_size(unmap.size >> pgshift, 2982 bitmap.size); 2983 if (ret) 2984 return ret; 2985 } 2986 2987 ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap); 2988 if (ret) 2989 return ret; 2990 2991 return copy_to_user((void __user *)arg, &unmap, minsz) ? 2992 -EFAULT : 0; 2993 } 2994 2995 static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu, 2996 unsigned long arg) 2997 { 2998 struct vfio_iommu_type1_dirty_bitmap dirty; 2999 uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START | 3000 VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP | 3001 VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; 3002 unsigned long minsz; 3003 int ret = 0; 3004 3005 if (!iommu->v2) 3006 return -EACCES; 3007 3008 minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags); 3009 3010 if (copy_from_user(&dirty, (void __user *)arg, minsz)) 3011 return -EFAULT; 3012 3013 if (dirty.argsz < minsz || dirty.flags & ~mask) 3014 return -EINVAL; 3015 3016 /* only one flag should be set at a time */ 3017 if (__ffs(dirty.flags) != __fls(dirty.flags)) 3018 return -EINVAL; 3019 3020 if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) { 3021 size_t pgsize; 3022 3023 mutex_lock(&iommu->lock); 3024 pgsize = 1 << __ffs(iommu->pgsize_bitmap); 3025 if (!iommu->dirty_page_tracking) { 3026 ret = vfio_dma_bitmap_alloc_all(iommu, pgsize); 3027 if (!ret) 3028 iommu->dirty_page_tracking = true; 3029 } 3030 mutex_unlock(&iommu->lock); 3031 return ret; 3032 } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) { 3033 mutex_lock(&iommu->lock); 3034 if (iommu->dirty_page_tracking) { 3035 iommu->dirty_page_tracking = false; 3036 vfio_dma_bitmap_free_all(iommu); 3037 } 3038 mutex_unlock(&iommu->lock); 3039 return 0; 3040 } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) { 3041 struct vfio_iommu_type1_dirty_bitmap_get range; 3042 unsigned long pgshift; 3043 size_t data_size = dirty.argsz - minsz; 3044 size_t iommu_pgsize; 3045 3046 if (!data_size || data_size < sizeof(range)) 3047 return -EINVAL; 3048 3049 if (copy_from_user(&range, (void __user *)(arg + minsz), 3050 sizeof(range))) 3051 return -EFAULT; 3052 3053 if (range.iova + range.size < range.iova) 3054 return -EINVAL; 3055 if (!access_ok((void __user *)range.bitmap.data, 3056 range.bitmap.size)) 3057 return -EINVAL; 3058 3059 pgshift = __ffs(range.bitmap.pgsize); 3060 ret = verify_bitmap_size(range.size >> pgshift, 3061 range.bitmap.size); 3062 if (ret) 3063 return ret; 3064 3065 mutex_lock(&iommu->lock); 3066 3067 iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); 3068 3069 /* allow only smallest supported pgsize */ 3070 if (range.bitmap.pgsize != iommu_pgsize) { 3071 ret = -EINVAL; 3072 goto out_unlock; 3073 } 3074 if (range.iova & (iommu_pgsize - 1)) { 3075 ret = -EINVAL; 3076 goto out_unlock; 3077 } 3078 if (!range.size || range.size & (iommu_pgsize - 1)) { 3079 ret = -EINVAL; 3080 goto out_unlock; 3081 } 3082 3083 if (iommu->dirty_page_tracking) 3084 ret = vfio_iova_dirty_bitmap(range.bitmap.data, 3085 iommu, range.iova, 3086 range.size, 3087 range.bitmap.pgsize); 3088 else 3089 ret = -EINVAL; 3090 out_unlock: 3091 mutex_unlock(&iommu->lock); 3092 3093 return ret; 3094 } 3095 3096 return -EINVAL; 3097 } 3098 3099 static long vfio_iommu_type1_ioctl(void *iommu_data, 3100 unsigned int cmd, unsigned long arg) 3101 { 3102 struct vfio_iommu *iommu = iommu_data; 3103 3104 switch (cmd) { 3105 case VFIO_CHECK_EXTENSION: 3106 return vfio_iommu_type1_check_extension(iommu, arg); 3107 case VFIO_IOMMU_GET_INFO: 3108 return vfio_iommu_type1_get_info(iommu, arg); 3109 case VFIO_IOMMU_MAP_DMA: 3110 return vfio_iommu_type1_map_dma(iommu, arg); 3111 case VFIO_IOMMU_UNMAP_DMA: 3112 return vfio_iommu_type1_unmap_dma(iommu, arg); 3113 case VFIO_IOMMU_DIRTY_PAGES: 3114 return vfio_iommu_type1_dirty_pages(iommu, arg); 3115 default: 3116 return -ENOTTY; 3117 } 3118 } 3119 3120 static int vfio_iommu_type1_register_notifier(void *iommu_data, 3121 unsigned long *events, 3122 struct notifier_block *nb) 3123 { 3124 struct vfio_iommu *iommu = iommu_data; 3125 3126 /* clear known events */ 3127 *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP; 3128 3129 /* refuse to register if still events remaining */ 3130 if (*events) 3131 return -EINVAL; 3132 3133 return blocking_notifier_chain_register(&iommu->notifier, nb); 3134 } 3135 3136 static int vfio_iommu_type1_unregister_notifier(void *iommu_data, 3137 struct notifier_block *nb) 3138 { 3139 struct vfio_iommu *iommu = iommu_data; 3140 3141 return blocking_notifier_chain_unregister(&iommu->notifier, nb); 3142 } 3143 3144 static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu, 3145 dma_addr_t user_iova, void *data, 3146 size_t count, bool write, 3147 size_t *copied) 3148 { 3149 struct mm_struct *mm; 3150 unsigned long vaddr; 3151 struct vfio_dma *dma; 3152 bool kthread = current->mm == NULL; 3153 size_t offset; 3154 int ret; 3155 3156 *copied = 0; 3157 3158 ret = vfio_find_dma_valid(iommu, user_iova, 1, &dma); 3159 if (ret < 0) 3160 return ret; 3161 3162 if ((write && !(dma->prot & IOMMU_WRITE)) || 3163 !(dma->prot & IOMMU_READ)) 3164 return -EPERM; 3165 3166 mm = get_task_mm(dma->task); 3167 3168 if (!mm) 3169 return -EPERM; 3170 3171 if (kthread) 3172 kthread_use_mm(mm); 3173 else if (current->mm != mm) 3174 goto out; 3175 3176 offset = user_iova - dma->iova; 3177 3178 if (count > dma->size - offset) 3179 count = dma->size - offset; 3180 3181 vaddr = dma->vaddr + offset; 3182 3183 if (write) { 3184 *copied = copy_to_user((void __user *)vaddr, data, 3185 count) ? 0 : count; 3186 if (*copied && iommu->dirty_page_tracking) { 3187 unsigned long pgshift = __ffs(iommu->pgsize_bitmap); 3188 /* 3189 * Bitmap populated with the smallest supported page 3190 * size 3191 */ 3192 bitmap_set(dma->bitmap, offset >> pgshift, 3193 ((offset + *copied - 1) >> pgshift) - 3194 (offset >> pgshift) + 1); 3195 } 3196 } else 3197 *copied = copy_from_user(data, (void __user *)vaddr, 3198 count) ? 0 : count; 3199 if (kthread) 3200 kthread_unuse_mm(mm); 3201 out: 3202 mmput(mm); 3203 return *copied ? 0 : -EFAULT; 3204 } 3205 3206 static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova, 3207 void *data, size_t count, bool write) 3208 { 3209 struct vfio_iommu *iommu = iommu_data; 3210 int ret = 0; 3211 size_t done; 3212 3213 mutex_lock(&iommu->lock); 3214 while (count > 0) { 3215 ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data, 3216 count, write, &done); 3217 if (ret) 3218 break; 3219 3220 count -= done; 3221 data += done; 3222 user_iova += done; 3223 } 3224 3225 mutex_unlock(&iommu->lock); 3226 return ret; 3227 } 3228 3229 static struct iommu_domain * 3230 vfio_iommu_type1_group_iommu_domain(void *iommu_data, 3231 struct iommu_group *iommu_group) 3232 { 3233 struct iommu_domain *domain = ERR_PTR(-ENODEV); 3234 struct vfio_iommu *iommu = iommu_data; 3235 struct vfio_domain *d; 3236 3237 if (!iommu || !iommu_group) 3238 return ERR_PTR(-EINVAL); 3239 3240 mutex_lock(&iommu->lock); 3241 list_for_each_entry(d, &iommu->domain_list, next) { 3242 if (find_iommu_group(d, iommu_group)) { 3243 domain = d->domain; 3244 break; 3245 } 3246 } 3247 mutex_unlock(&iommu->lock); 3248 3249 return domain; 3250 } 3251 3252 static void vfio_iommu_type1_notify(void *iommu_data, 3253 enum vfio_iommu_notify_type event) 3254 { 3255 struct vfio_iommu *iommu = iommu_data; 3256 3257 if (event != VFIO_IOMMU_CONTAINER_CLOSE) 3258 return; 3259 mutex_lock(&iommu->lock); 3260 iommu->container_open = false; 3261 mutex_unlock(&iommu->lock); 3262 wake_up_all(&iommu->vaddr_wait); 3263 } 3264 3265 static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { 3266 .name = "vfio-iommu-type1", 3267 .owner = THIS_MODULE, 3268 .open = vfio_iommu_type1_open, 3269 .release = vfio_iommu_type1_release, 3270 .ioctl = vfio_iommu_type1_ioctl, 3271 .attach_group = vfio_iommu_type1_attach_group, 3272 .detach_group = vfio_iommu_type1_detach_group, 3273 .pin_pages = vfio_iommu_type1_pin_pages, 3274 .unpin_pages = vfio_iommu_type1_unpin_pages, 3275 .register_notifier = vfio_iommu_type1_register_notifier, 3276 .unregister_notifier = vfio_iommu_type1_unregister_notifier, 3277 .dma_rw = vfio_iommu_type1_dma_rw, 3278 .group_iommu_domain = vfio_iommu_type1_group_iommu_domain, 3279 .notify = vfio_iommu_type1_notify, 3280 }; 3281 3282 static int __init vfio_iommu_type1_init(void) 3283 { 3284 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1); 3285 } 3286 3287 static void __exit vfio_iommu_type1_cleanup(void) 3288 { 3289 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1); 3290 } 3291 3292 module_init(vfio_iommu_type1_init); 3293 module_exit(vfio_iommu_type1_cleanup); 3294 3295 MODULE_VERSION(DRIVER_VERSION); 3296 MODULE_LICENSE("GPL v2"); 3297 MODULE_AUTHOR(DRIVER_AUTHOR); 3298 MODULE_DESCRIPTION(DRIVER_DESC); 3299