1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VFIO: IOMMU DMA mapping support for Type1 IOMMU 4 * 5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved. 6 * Author: Alex Williamson <alex.williamson@redhat.com> 7 * 8 * Derived from original vfio: 9 * Copyright 2010 Cisco Systems, Inc. All rights reserved. 10 * Author: Tom Lyon, pugs@cisco.com 11 * 12 * We arbitrarily define a Type1 IOMMU as one matching the below code. 13 * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel 14 * VT-d, but that makes it harder to re-use as theoretically anyone 15 * implementing a similar IOMMU could make use of this. We expect the 16 * IOMMU to support the IOMMU API and have few to no restrictions around 17 * the IOVA range that can be mapped. The Type1 IOMMU is currently 18 * optimized for relatively static mappings of a userspace process with 19 * userspace pages pinned into memory. We also assume devices and IOMMU 20 * domains are PCI based as the IOMMU API is still centered around a 21 * device/bus interface rather than a group interface. 22 */ 23 24 #include <linux/compat.h> 25 #include <linux/device.h> 26 #include <linux/fs.h> 27 #include <linux/highmem.h> 28 #include <linux/iommu.h> 29 #include <linux/module.h> 30 #include <linux/mm.h> 31 #include <linux/kthread.h> 32 #include <linux/rbtree.h> 33 #include <linux/sched/signal.h> 34 #include <linux/sched/mm.h> 35 #include <linux/slab.h> 36 #include <linux/uaccess.h> 37 #include <linux/vfio.h> 38 #include <linux/workqueue.h> 39 #include <linux/notifier.h> 40 #include "vfio.h" 41 42 #define DRIVER_VERSION "0.2" 43 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" 44 #define DRIVER_DESC "Type1 IOMMU driver for VFIO" 45 46 static bool allow_unsafe_interrupts; 47 module_param_named(allow_unsafe_interrupts, 48 allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR); 49 MODULE_PARM_DESC(allow_unsafe_interrupts, 50 "Enable VFIO IOMMU support for on platforms without interrupt remapping support."); 51 52 static bool disable_hugepages; 53 module_param_named(disable_hugepages, 54 disable_hugepages, bool, S_IRUGO | S_IWUSR); 55 MODULE_PARM_DESC(disable_hugepages, 56 "Disable VFIO IOMMU support for IOMMU hugepages."); 57 58 static unsigned int dma_entry_limit __read_mostly = U16_MAX; 59 module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); 60 MODULE_PARM_DESC(dma_entry_limit, 61 "Maximum number of user DMA mappings per container (65535)."); 62 63 struct vfio_iommu { 64 struct list_head domain_list; 65 struct list_head iova_list; 66 struct mutex lock; 67 struct rb_root dma_list; 68 struct list_head device_list; 69 struct mutex device_list_lock; 70 unsigned int dma_avail; 71 unsigned int vaddr_invalid_count; 72 uint64_t pgsize_bitmap; 73 uint64_t num_non_pinned_groups; 74 bool v2; 75 bool nesting; 76 bool dirty_page_tracking; 77 struct list_head emulated_iommu_groups; 78 }; 79 80 struct vfio_domain { 81 struct iommu_domain *domain; 82 struct list_head next; 83 struct list_head group_list; 84 bool fgsp : 1; /* Fine-grained super pages */ 85 bool enforce_cache_coherency : 1; 86 }; 87 88 struct vfio_dma { 89 struct rb_node node; 90 dma_addr_t iova; /* Device address */ 91 unsigned long vaddr; /* Process virtual addr */ 92 size_t size; /* Map size (bytes) */ 93 int prot; /* IOMMU_READ/WRITE */ 94 bool iommu_mapped; 95 bool lock_cap; /* capable(CAP_IPC_LOCK) */ 96 bool vaddr_invalid; 97 struct task_struct *task; 98 struct rb_root pfn_list; /* Ex-user pinned pfn list */ 99 unsigned long *bitmap; 100 struct mm_struct *mm; 101 size_t locked_vm; 102 }; 103 104 struct vfio_batch { 105 struct page **pages; /* for pin_user_pages_remote */ 106 struct page *fallback_page; /* if pages alloc fails */ 107 int capacity; /* length of pages array */ 108 int size; /* of batch currently */ 109 int offset; /* of next entry in pages */ 110 }; 111 112 struct vfio_iommu_group { 113 struct iommu_group *iommu_group; 114 struct list_head next; 115 bool pinned_page_dirty_scope; 116 }; 117 118 struct vfio_iova { 119 struct list_head list; 120 dma_addr_t start; 121 dma_addr_t end; 122 }; 123 124 /* 125 * Guest RAM pinning working set or DMA target 126 */ 127 struct vfio_pfn { 128 struct rb_node node; 129 dma_addr_t iova; /* Device address */ 130 unsigned long pfn; /* Host pfn */ 131 unsigned int ref_count; 132 }; 133 134 struct vfio_regions { 135 struct list_head list; 136 dma_addr_t iova; 137 phys_addr_t phys; 138 size_t len; 139 }; 140 141 #define DIRTY_BITMAP_BYTES(n) (ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE) 142 143 /* 144 * Input argument of number of bits to bitmap_set() is unsigned integer, which 145 * further casts to signed integer for unaligned multi-bit operation, 146 * __bitmap_set(). 147 * Then maximum bitmap size supported is 2^31 bits divided by 2^3 bits/byte, 148 * that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page 149 * system. 150 */ 151 #define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX) 152 #define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX) 153 154 static int put_pfn(unsigned long pfn, int prot); 155 156 static struct vfio_iommu_group* 157 vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, 158 struct iommu_group *iommu_group); 159 160 /* 161 * This code handles mapping and unmapping of user data buffers 162 * into DMA'ble space using the IOMMU 163 */ 164 165 static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, 166 dma_addr_t start, size_t size) 167 { 168 struct rb_node *node = iommu->dma_list.rb_node; 169 170 while (node) { 171 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); 172 173 if (start + size <= dma->iova) 174 node = node->rb_left; 175 else if (start >= dma->iova + dma->size) 176 node = node->rb_right; 177 else 178 return dma; 179 } 180 181 return NULL; 182 } 183 184 static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, 185 dma_addr_t start, u64 size) 186 { 187 struct rb_node *res = NULL; 188 struct rb_node *node = iommu->dma_list.rb_node; 189 struct vfio_dma *dma_res = NULL; 190 191 while (node) { 192 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); 193 194 if (start < dma->iova + dma->size) { 195 res = node; 196 dma_res = dma; 197 if (start >= dma->iova) 198 break; 199 node = node->rb_left; 200 } else { 201 node = node->rb_right; 202 } 203 } 204 if (res && size && dma_res->iova >= start + size) 205 res = NULL; 206 return res; 207 } 208 209 static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) 210 { 211 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; 212 struct vfio_dma *dma; 213 214 while (*link) { 215 parent = *link; 216 dma = rb_entry(parent, struct vfio_dma, node); 217 218 if (new->iova + new->size <= dma->iova) 219 link = &(*link)->rb_left; 220 else 221 link = &(*link)->rb_right; 222 } 223 224 rb_link_node(&new->node, parent, link); 225 rb_insert_color(&new->node, &iommu->dma_list); 226 } 227 228 static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) 229 { 230 rb_erase(&old->node, &iommu->dma_list); 231 } 232 233 234 static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize) 235 { 236 uint64_t npages = dma->size / pgsize; 237 238 if (npages > DIRTY_BITMAP_PAGES_MAX) 239 return -EINVAL; 240 241 /* 242 * Allocate extra 64 bits that are used to calculate shift required for 243 * bitmap_shift_left() to manipulate and club unaligned number of pages 244 * in adjacent vfio_dma ranges. 245 */ 246 dma->bitmap = kvzalloc(DIRTY_BITMAP_BYTES(npages) + sizeof(u64), 247 GFP_KERNEL); 248 if (!dma->bitmap) 249 return -ENOMEM; 250 251 return 0; 252 } 253 254 static void vfio_dma_bitmap_free(struct vfio_dma *dma) 255 { 256 kvfree(dma->bitmap); 257 dma->bitmap = NULL; 258 } 259 260 static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) 261 { 262 struct rb_node *p; 263 unsigned long pgshift = __ffs(pgsize); 264 265 for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) { 266 struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node); 267 268 bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1); 269 } 270 } 271 272 static void vfio_iommu_populate_bitmap_full(struct vfio_iommu *iommu) 273 { 274 struct rb_node *n; 275 unsigned long pgshift = __ffs(iommu->pgsize_bitmap); 276 277 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { 278 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 279 280 bitmap_set(dma->bitmap, 0, dma->size >> pgshift); 281 } 282 } 283 284 static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize) 285 { 286 struct rb_node *n; 287 288 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { 289 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 290 int ret; 291 292 ret = vfio_dma_bitmap_alloc(dma, pgsize); 293 if (ret) { 294 struct rb_node *p; 295 296 for (p = rb_prev(n); p; p = rb_prev(p)) { 297 struct vfio_dma *dma = rb_entry(n, 298 struct vfio_dma, node); 299 300 vfio_dma_bitmap_free(dma); 301 } 302 return ret; 303 } 304 vfio_dma_populate_bitmap(dma, pgsize); 305 } 306 return 0; 307 } 308 309 static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu) 310 { 311 struct rb_node *n; 312 313 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { 314 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 315 316 vfio_dma_bitmap_free(dma); 317 } 318 } 319 320 /* 321 * Helper Functions for host iova-pfn list 322 */ 323 static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) 324 { 325 struct vfio_pfn *vpfn; 326 struct rb_node *node = dma->pfn_list.rb_node; 327 328 while (node) { 329 vpfn = rb_entry(node, struct vfio_pfn, node); 330 331 if (iova < vpfn->iova) 332 node = node->rb_left; 333 else if (iova > vpfn->iova) 334 node = node->rb_right; 335 else 336 return vpfn; 337 } 338 return NULL; 339 } 340 341 static void vfio_link_pfn(struct vfio_dma *dma, 342 struct vfio_pfn *new) 343 { 344 struct rb_node **link, *parent = NULL; 345 struct vfio_pfn *vpfn; 346 347 link = &dma->pfn_list.rb_node; 348 while (*link) { 349 parent = *link; 350 vpfn = rb_entry(parent, struct vfio_pfn, node); 351 352 if (new->iova < vpfn->iova) 353 link = &(*link)->rb_left; 354 else 355 link = &(*link)->rb_right; 356 } 357 358 rb_link_node(&new->node, parent, link); 359 rb_insert_color(&new->node, &dma->pfn_list); 360 } 361 362 static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old) 363 { 364 rb_erase(&old->node, &dma->pfn_list); 365 } 366 367 static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova, 368 unsigned long pfn) 369 { 370 struct vfio_pfn *vpfn; 371 372 vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL); 373 if (!vpfn) 374 return -ENOMEM; 375 376 vpfn->iova = iova; 377 vpfn->pfn = pfn; 378 vpfn->ref_count = 1; 379 vfio_link_pfn(dma, vpfn); 380 return 0; 381 } 382 383 static void vfio_remove_from_pfn_list(struct vfio_dma *dma, 384 struct vfio_pfn *vpfn) 385 { 386 vfio_unlink_pfn(dma, vpfn); 387 kfree(vpfn); 388 } 389 390 static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma, 391 unsigned long iova) 392 { 393 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); 394 395 if (vpfn) 396 vpfn->ref_count++; 397 return vpfn; 398 } 399 400 static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) 401 { 402 int ret = 0; 403 404 vpfn->ref_count--; 405 if (!vpfn->ref_count) { 406 ret = put_pfn(vpfn->pfn, dma->prot); 407 vfio_remove_from_pfn_list(dma, vpfn); 408 } 409 return ret; 410 } 411 412 static int mm_lock_acct(struct task_struct *task, struct mm_struct *mm, 413 bool lock_cap, long npage) 414 { 415 int ret = mmap_write_lock_killable(mm); 416 417 if (ret) 418 return ret; 419 420 ret = __account_locked_vm(mm, abs(npage), npage > 0, task, lock_cap); 421 mmap_write_unlock(mm); 422 return ret; 423 } 424 425 static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) 426 { 427 struct mm_struct *mm; 428 int ret; 429 430 if (!npage) 431 return 0; 432 433 mm = dma->mm; 434 if (async && !mmget_not_zero(mm)) 435 return -ESRCH; /* process exited */ 436 437 ret = mm_lock_acct(dma->task, mm, dma->lock_cap, npage); 438 if (!ret) 439 dma->locked_vm += npage; 440 441 if (async) 442 mmput(mm); 443 444 return ret; 445 } 446 447 /* 448 * Some mappings aren't backed by a struct page, for example an mmap'd 449 * MMIO range for our own or another device. These use a different 450 * pfn conversion and shouldn't be tracked as locked pages. 451 * For compound pages, any driver that sets the reserved bit in head 452 * page needs to set the reserved bit in all subpages to be safe. 453 */ 454 static bool is_invalid_reserved_pfn(unsigned long pfn) 455 { 456 if (pfn_valid(pfn)) 457 return PageReserved(pfn_to_page(pfn)); 458 459 return true; 460 } 461 462 static int put_pfn(unsigned long pfn, int prot) 463 { 464 if (!is_invalid_reserved_pfn(pfn)) { 465 struct page *page = pfn_to_page(pfn); 466 467 unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE); 468 return 1; 469 } 470 return 0; 471 } 472 473 #define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *)) 474 475 static void vfio_batch_init(struct vfio_batch *batch) 476 { 477 batch->size = 0; 478 batch->offset = 0; 479 480 if (unlikely(disable_hugepages)) 481 goto fallback; 482 483 batch->pages = (struct page **) __get_free_page(GFP_KERNEL); 484 if (!batch->pages) 485 goto fallback; 486 487 batch->capacity = VFIO_BATCH_MAX_CAPACITY; 488 return; 489 490 fallback: 491 batch->pages = &batch->fallback_page; 492 batch->capacity = 1; 493 } 494 495 static void vfio_batch_unpin(struct vfio_batch *batch, struct vfio_dma *dma) 496 { 497 while (batch->size) { 498 unsigned long pfn = page_to_pfn(batch->pages[batch->offset]); 499 500 put_pfn(pfn, dma->prot); 501 batch->offset++; 502 batch->size--; 503 } 504 } 505 506 static void vfio_batch_fini(struct vfio_batch *batch) 507 { 508 if (batch->capacity == VFIO_BATCH_MAX_CAPACITY) 509 free_page((unsigned long)batch->pages); 510 } 511 512 static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, 513 unsigned long vaddr, unsigned long *pfn, 514 bool write_fault) 515 { 516 pte_t *ptep; 517 spinlock_t *ptl; 518 int ret; 519 520 ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); 521 if (ret) { 522 bool unlocked = false; 523 524 ret = fixup_user_fault(mm, vaddr, 525 FAULT_FLAG_REMOTE | 526 (write_fault ? FAULT_FLAG_WRITE : 0), 527 &unlocked); 528 if (unlocked) 529 return -EAGAIN; 530 531 if (ret) 532 return ret; 533 534 ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); 535 if (ret) 536 return ret; 537 } 538 539 if (write_fault && !pte_write(*ptep)) 540 ret = -EFAULT; 541 else 542 *pfn = pte_pfn(*ptep); 543 544 pte_unmap_unlock(ptep, ptl); 545 return ret; 546 } 547 548 /* 549 * Returns the positive number of pfns successfully obtained or a negative 550 * error code. 551 */ 552 static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, 553 long npages, int prot, unsigned long *pfn, 554 struct page **pages) 555 { 556 struct vm_area_struct *vma; 557 unsigned int flags = 0; 558 int ret; 559 560 if (prot & IOMMU_WRITE) 561 flags |= FOLL_WRITE; 562 563 mmap_read_lock(mm); 564 ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, 565 pages, NULL, NULL); 566 if (ret > 0) { 567 int i; 568 569 /* 570 * The zero page is always resident, we don't need to pin it 571 * and it falls into our invalid/reserved test so we don't 572 * unpin in put_pfn(). Unpin all zero pages in the batch here. 573 */ 574 for (i = 0 ; i < ret; i++) { 575 if (unlikely(is_zero_pfn(page_to_pfn(pages[i])))) 576 unpin_user_page(pages[i]); 577 } 578 579 *pfn = page_to_pfn(pages[0]); 580 goto done; 581 } 582 583 vaddr = untagged_addr(vaddr); 584 585 retry: 586 vma = vma_lookup(mm, vaddr); 587 588 if (vma && vma->vm_flags & VM_PFNMAP) { 589 ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); 590 if (ret == -EAGAIN) 591 goto retry; 592 593 if (!ret) { 594 if (is_invalid_reserved_pfn(*pfn)) 595 ret = 1; 596 else 597 ret = -EFAULT; 598 } 599 } 600 done: 601 mmap_read_unlock(mm); 602 return ret; 603 } 604 605 /* 606 * Attempt to pin pages. We really don't want to track all the pfns and 607 * the iommu can only map chunks of consecutive pfns anyway, so get the 608 * first page and all consecutive pages with the same locking. 609 */ 610 static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, 611 long npage, unsigned long *pfn_base, 612 unsigned long limit, struct vfio_batch *batch) 613 { 614 unsigned long pfn; 615 struct mm_struct *mm = current->mm; 616 long ret, pinned = 0, lock_acct = 0; 617 bool rsvd; 618 dma_addr_t iova = vaddr - dma->vaddr + dma->iova; 619 620 /* This code path is only user initiated */ 621 if (!mm) 622 return -ENODEV; 623 624 if (batch->size) { 625 /* Leftover pages in batch from an earlier call. */ 626 *pfn_base = page_to_pfn(batch->pages[batch->offset]); 627 pfn = *pfn_base; 628 rsvd = is_invalid_reserved_pfn(*pfn_base); 629 } else { 630 *pfn_base = 0; 631 } 632 633 while (npage) { 634 if (!batch->size) { 635 /* Empty batch, so refill it. */ 636 long req_pages = min_t(long, npage, batch->capacity); 637 638 ret = vaddr_get_pfns(mm, vaddr, req_pages, dma->prot, 639 &pfn, batch->pages); 640 if (ret < 0) 641 goto unpin_out; 642 643 batch->size = ret; 644 batch->offset = 0; 645 646 if (!*pfn_base) { 647 *pfn_base = pfn; 648 rsvd = is_invalid_reserved_pfn(*pfn_base); 649 } 650 } 651 652 /* 653 * pfn is preset for the first iteration of this inner loop and 654 * updated at the end to handle a VM_PFNMAP pfn. In that case, 655 * batch->pages isn't valid (there's no struct page), so allow 656 * batch->pages to be touched only when there's more than one 657 * pfn to check, which guarantees the pfns are from a 658 * !VM_PFNMAP vma. 659 */ 660 while (true) { 661 if (pfn != *pfn_base + pinned || 662 rsvd != is_invalid_reserved_pfn(pfn)) 663 goto out; 664 665 /* 666 * Reserved pages aren't counted against the user, 667 * externally pinned pages are already counted against 668 * the user. 669 */ 670 if (!rsvd && !vfio_find_vpfn(dma, iova)) { 671 if (!dma->lock_cap && 672 mm->locked_vm + lock_acct + 1 > limit) { 673 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", 674 __func__, limit << PAGE_SHIFT); 675 ret = -ENOMEM; 676 goto unpin_out; 677 } 678 lock_acct++; 679 } 680 681 pinned++; 682 npage--; 683 vaddr += PAGE_SIZE; 684 iova += PAGE_SIZE; 685 batch->offset++; 686 batch->size--; 687 688 if (!batch->size) 689 break; 690 691 pfn = page_to_pfn(batch->pages[batch->offset]); 692 } 693 694 if (unlikely(disable_hugepages)) 695 break; 696 } 697 698 out: 699 ret = vfio_lock_acct(dma, lock_acct, false); 700 701 unpin_out: 702 if (batch->size == 1 && !batch->offset) { 703 /* May be a VM_PFNMAP pfn, which the batch can't remember. */ 704 put_pfn(pfn, dma->prot); 705 batch->size = 0; 706 } 707 708 if (ret < 0) { 709 if (pinned && !rsvd) { 710 for (pfn = *pfn_base ; pinned ; pfn++, pinned--) 711 put_pfn(pfn, dma->prot); 712 } 713 vfio_batch_unpin(batch, dma); 714 715 return ret; 716 } 717 718 return pinned; 719 } 720 721 static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, 722 unsigned long pfn, long npage, 723 bool do_accounting) 724 { 725 long unlocked = 0, locked = 0; 726 long i; 727 728 for (i = 0; i < npage; i++, iova += PAGE_SIZE) { 729 if (put_pfn(pfn++, dma->prot)) { 730 unlocked++; 731 if (vfio_find_vpfn(dma, iova)) 732 locked++; 733 } 734 } 735 736 if (do_accounting) 737 vfio_lock_acct(dma, locked - unlocked, true); 738 739 return unlocked; 740 } 741 742 static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, 743 unsigned long *pfn_base, bool do_accounting) 744 { 745 struct page *pages[1]; 746 struct mm_struct *mm; 747 int ret; 748 749 mm = dma->mm; 750 if (!mmget_not_zero(mm)) 751 return -ENODEV; 752 753 ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); 754 if (ret != 1) 755 goto out; 756 757 ret = 0; 758 759 if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { 760 ret = vfio_lock_acct(dma, 1, false); 761 if (ret) { 762 put_pfn(*pfn_base, dma->prot); 763 if (ret == -ENOMEM) 764 pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK " 765 "(%ld) exceeded\n", __func__, 766 dma->task->comm, task_pid_nr(dma->task), 767 task_rlimit(dma->task, RLIMIT_MEMLOCK)); 768 } 769 } 770 771 out: 772 mmput(mm); 773 return ret; 774 } 775 776 static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, 777 bool do_accounting) 778 { 779 int unlocked; 780 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); 781 782 if (!vpfn) 783 return 0; 784 785 unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); 786 787 if (do_accounting) 788 vfio_lock_acct(dma, -unlocked, true); 789 790 return unlocked; 791 } 792 793 static int vfio_iommu_type1_pin_pages(void *iommu_data, 794 struct iommu_group *iommu_group, 795 dma_addr_t user_iova, 796 int npage, int prot, 797 struct page **pages) 798 { 799 struct vfio_iommu *iommu = iommu_data; 800 struct vfio_iommu_group *group; 801 int i, j, ret; 802 unsigned long remote_vaddr; 803 struct vfio_dma *dma; 804 bool do_accounting; 805 806 if (!iommu || !pages) 807 return -EINVAL; 808 809 /* Supported for v2 version only */ 810 if (!iommu->v2) 811 return -EACCES; 812 813 mutex_lock(&iommu->lock); 814 815 if (WARN_ONCE(iommu->vaddr_invalid_count, 816 "vfio_pin_pages not allowed with VFIO_UPDATE_VADDR\n")) { 817 ret = -EBUSY; 818 goto pin_done; 819 } 820 821 /* Fail if no dma_umap notifier is registered */ 822 if (list_empty(&iommu->device_list)) { 823 ret = -EINVAL; 824 goto pin_done; 825 } 826 827 /* 828 * If iommu capable domain exist in the container then all pages are 829 * already pinned and accounted. Accounting should be done if there is no 830 * iommu capable domain in the container. 831 */ 832 do_accounting = list_empty(&iommu->domain_list); 833 834 for (i = 0; i < npage; i++) { 835 unsigned long phys_pfn; 836 dma_addr_t iova; 837 struct vfio_pfn *vpfn; 838 839 iova = user_iova + PAGE_SIZE * i; 840 dma = vfio_find_dma(iommu, iova, PAGE_SIZE); 841 if (!dma) { 842 ret = -EINVAL; 843 goto pin_unwind; 844 } 845 846 if ((dma->prot & prot) != prot) { 847 ret = -EPERM; 848 goto pin_unwind; 849 } 850 851 vpfn = vfio_iova_get_vfio_pfn(dma, iova); 852 if (vpfn) { 853 pages[i] = pfn_to_page(vpfn->pfn); 854 continue; 855 } 856 857 remote_vaddr = dma->vaddr + (iova - dma->iova); 858 ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn, 859 do_accounting); 860 if (ret) 861 goto pin_unwind; 862 863 ret = vfio_add_to_pfn_list(dma, iova, phys_pfn); 864 if (ret) { 865 if (put_pfn(phys_pfn, dma->prot) && do_accounting) 866 vfio_lock_acct(dma, -1, true); 867 goto pin_unwind; 868 } 869 870 pages[i] = pfn_to_page(phys_pfn); 871 872 if (iommu->dirty_page_tracking) { 873 unsigned long pgshift = __ffs(iommu->pgsize_bitmap); 874 875 /* 876 * Bitmap populated with the smallest supported page 877 * size 878 */ 879 bitmap_set(dma->bitmap, 880 (iova - dma->iova) >> pgshift, 1); 881 } 882 } 883 ret = i; 884 885 group = vfio_iommu_find_iommu_group(iommu, iommu_group); 886 if (!group->pinned_page_dirty_scope) { 887 group->pinned_page_dirty_scope = true; 888 iommu->num_non_pinned_groups--; 889 } 890 891 goto pin_done; 892 893 pin_unwind: 894 pages[i] = NULL; 895 for (j = 0; j < i; j++) { 896 dma_addr_t iova; 897 898 iova = user_iova + PAGE_SIZE * j; 899 dma = vfio_find_dma(iommu, iova, PAGE_SIZE); 900 vfio_unpin_page_external(dma, iova, do_accounting); 901 pages[j] = NULL; 902 } 903 pin_done: 904 mutex_unlock(&iommu->lock); 905 return ret; 906 } 907 908 static void vfio_iommu_type1_unpin_pages(void *iommu_data, 909 dma_addr_t user_iova, int npage) 910 { 911 struct vfio_iommu *iommu = iommu_data; 912 bool do_accounting; 913 int i; 914 915 /* Supported for v2 version only */ 916 if (WARN_ON(!iommu->v2)) 917 return; 918 919 mutex_lock(&iommu->lock); 920 921 do_accounting = list_empty(&iommu->domain_list); 922 for (i = 0; i < npage; i++) { 923 dma_addr_t iova = user_iova + PAGE_SIZE * i; 924 struct vfio_dma *dma; 925 926 dma = vfio_find_dma(iommu, iova, PAGE_SIZE); 927 if (!dma) 928 break; 929 930 vfio_unpin_page_external(dma, iova, do_accounting); 931 } 932 933 mutex_unlock(&iommu->lock); 934 935 WARN_ON(i != npage); 936 } 937 938 static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, 939 struct list_head *regions, 940 struct iommu_iotlb_gather *iotlb_gather) 941 { 942 long unlocked = 0; 943 struct vfio_regions *entry, *next; 944 945 iommu_iotlb_sync(domain->domain, iotlb_gather); 946 947 list_for_each_entry_safe(entry, next, regions, list) { 948 unlocked += vfio_unpin_pages_remote(dma, 949 entry->iova, 950 entry->phys >> PAGE_SHIFT, 951 entry->len >> PAGE_SHIFT, 952 false); 953 list_del(&entry->list); 954 kfree(entry); 955 } 956 957 cond_resched(); 958 959 return unlocked; 960 } 961 962 /* 963 * Generally, VFIO needs to unpin remote pages after each IOTLB flush. 964 * Therefore, when using IOTLB flush sync interface, VFIO need to keep track 965 * of these regions (currently using a list). 966 * 967 * This value specifies maximum number of regions for each IOTLB flush sync. 968 */ 969 #define VFIO_IOMMU_TLB_SYNC_MAX 512 970 971 static size_t unmap_unpin_fast(struct vfio_domain *domain, 972 struct vfio_dma *dma, dma_addr_t *iova, 973 size_t len, phys_addr_t phys, long *unlocked, 974 struct list_head *unmapped_list, 975 int *unmapped_cnt, 976 struct iommu_iotlb_gather *iotlb_gather) 977 { 978 size_t unmapped = 0; 979 struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 980 981 if (entry) { 982 unmapped = iommu_unmap_fast(domain->domain, *iova, len, 983 iotlb_gather); 984 985 if (!unmapped) { 986 kfree(entry); 987 } else { 988 entry->iova = *iova; 989 entry->phys = phys; 990 entry->len = unmapped; 991 list_add_tail(&entry->list, unmapped_list); 992 993 *iova += unmapped; 994 (*unmapped_cnt)++; 995 } 996 } 997 998 /* 999 * Sync if the number of fast-unmap regions hits the limit 1000 * or in case of errors. 1001 */ 1002 if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { 1003 *unlocked += vfio_sync_unpin(dma, domain, unmapped_list, 1004 iotlb_gather); 1005 *unmapped_cnt = 0; 1006 } 1007 1008 return unmapped; 1009 } 1010 1011 static size_t unmap_unpin_slow(struct vfio_domain *domain, 1012 struct vfio_dma *dma, dma_addr_t *iova, 1013 size_t len, phys_addr_t phys, 1014 long *unlocked) 1015 { 1016 size_t unmapped = iommu_unmap(domain->domain, *iova, len); 1017 1018 if (unmapped) { 1019 *unlocked += vfio_unpin_pages_remote(dma, *iova, 1020 phys >> PAGE_SHIFT, 1021 unmapped >> PAGE_SHIFT, 1022 false); 1023 *iova += unmapped; 1024 cond_resched(); 1025 } 1026 return unmapped; 1027 } 1028 1029 static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, 1030 bool do_accounting) 1031 { 1032 dma_addr_t iova = dma->iova, end = dma->iova + dma->size; 1033 struct vfio_domain *domain, *d; 1034 LIST_HEAD(unmapped_region_list); 1035 struct iommu_iotlb_gather iotlb_gather; 1036 int unmapped_region_cnt = 0; 1037 long unlocked = 0; 1038 1039 if (!dma->size) 1040 return 0; 1041 1042 if (list_empty(&iommu->domain_list)) 1043 return 0; 1044 1045 /* 1046 * We use the IOMMU to track the physical addresses, otherwise we'd 1047 * need a much more complicated tracking system. Unfortunately that 1048 * means we need to use one of the iommu domains to figure out the 1049 * pfns to unpin. The rest need to be unmapped in advance so we have 1050 * no iommu translations remaining when the pages are unpinned. 1051 */ 1052 domain = d = list_first_entry(&iommu->domain_list, 1053 struct vfio_domain, next); 1054 1055 list_for_each_entry_continue(d, &iommu->domain_list, next) { 1056 iommu_unmap(d->domain, dma->iova, dma->size); 1057 cond_resched(); 1058 } 1059 1060 iommu_iotlb_gather_init(&iotlb_gather); 1061 while (iova < end) { 1062 size_t unmapped, len; 1063 phys_addr_t phys, next; 1064 1065 phys = iommu_iova_to_phys(domain->domain, iova); 1066 if (WARN_ON(!phys)) { 1067 iova += PAGE_SIZE; 1068 continue; 1069 } 1070 1071 /* 1072 * To optimize for fewer iommu_unmap() calls, each of which 1073 * may require hardware cache flushing, try to find the 1074 * largest contiguous physical memory chunk to unmap. 1075 */ 1076 for (len = PAGE_SIZE; 1077 !domain->fgsp && iova + len < end; len += PAGE_SIZE) { 1078 next = iommu_iova_to_phys(domain->domain, iova + len); 1079 if (next != phys + len) 1080 break; 1081 } 1082 1083 /* 1084 * First, try to use fast unmap/unpin. In case of failure, 1085 * switch to slow unmap/unpin path. 1086 */ 1087 unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, 1088 &unlocked, &unmapped_region_list, 1089 &unmapped_region_cnt, 1090 &iotlb_gather); 1091 if (!unmapped) { 1092 unmapped = unmap_unpin_slow(domain, dma, &iova, len, 1093 phys, &unlocked); 1094 if (WARN_ON(!unmapped)) 1095 break; 1096 } 1097 } 1098 1099 dma->iommu_mapped = false; 1100 1101 if (unmapped_region_cnt) { 1102 unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list, 1103 &iotlb_gather); 1104 } 1105 1106 if (do_accounting) { 1107 vfio_lock_acct(dma, -unlocked, true); 1108 return 0; 1109 } 1110 return unlocked; 1111 } 1112 1113 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) 1114 { 1115 WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)); 1116 vfio_unmap_unpin(iommu, dma, true); 1117 vfio_unlink_dma(iommu, dma); 1118 put_task_struct(dma->task); 1119 mmdrop(dma->mm); 1120 vfio_dma_bitmap_free(dma); 1121 if (dma->vaddr_invalid) 1122 iommu->vaddr_invalid_count--; 1123 kfree(dma); 1124 iommu->dma_avail++; 1125 } 1126 1127 static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu) 1128 { 1129 struct vfio_domain *domain; 1130 1131 iommu->pgsize_bitmap = ULONG_MAX; 1132 1133 list_for_each_entry(domain, &iommu->domain_list, next) 1134 iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap; 1135 1136 /* 1137 * In case the IOMMU supports page sizes smaller than PAGE_SIZE 1138 * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes. 1139 * That way the user will be able to map/unmap buffers whose size/ 1140 * start address is aligned with PAGE_SIZE. Pinning code uses that 1141 * granularity while iommu driver can use the sub-PAGE_SIZE size 1142 * to map the buffer. 1143 */ 1144 if (iommu->pgsize_bitmap & ~PAGE_MASK) { 1145 iommu->pgsize_bitmap &= PAGE_MASK; 1146 iommu->pgsize_bitmap |= PAGE_SIZE; 1147 } 1148 } 1149 1150 static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, 1151 struct vfio_dma *dma, dma_addr_t base_iova, 1152 size_t pgsize) 1153 { 1154 unsigned long pgshift = __ffs(pgsize); 1155 unsigned long nbits = dma->size >> pgshift; 1156 unsigned long bit_offset = (dma->iova - base_iova) >> pgshift; 1157 unsigned long copy_offset = bit_offset / BITS_PER_LONG; 1158 unsigned long shift = bit_offset % BITS_PER_LONG; 1159 unsigned long leftover; 1160 1161 /* 1162 * mark all pages dirty if any IOMMU capable device is not able 1163 * to report dirty pages and all pages are pinned and mapped. 1164 */ 1165 if (iommu->num_non_pinned_groups && dma->iommu_mapped) 1166 bitmap_set(dma->bitmap, 0, nbits); 1167 1168 if (shift) { 1169 bitmap_shift_left(dma->bitmap, dma->bitmap, shift, 1170 nbits + shift); 1171 1172 if (copy_from_user(&leftover, 1173 (void __user *)(bitmap + copy_offset), 1174 sizeof(leftover))) 1175 return -EFAULT; 1176 1177 bitmap_or(dma->bitmap, dma->bitmap, &leftover, shift); 1178 } 1179 1180 if (copy_to_user((void __user *)(bitmap + copy_offset), dma->bitmap, 1181 DIRTY_BITMAP_BYTES(nbits + shift))) 1182 return -EFAULT; 1183 1184 return 0; 1185 } 1186 1187 static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, 1188 dma_addr_t iova, size_t size, size_t pgsize) 1189 { 1190 struct vfio_dma *dma; 1191 struct rb_node *n; 1192 unsigned long pgshift = __ffs(pgsize); 1193 int ret; 1194 1195 /* 1196 * GET_BITMAP request must fully cover vfio_dma mappings. Multiple 1197 * vfio_dma mappings may be clubbed by specifying large ranges, but 1198 * there must not be any previous mappings bisected by the range. 1199 * An error will be returned if these conditions are not met. 1200 */ 1201 dma = vfio_find_dma(iommu, iova, 1); 1202 if (dma && dma->iova != iova) 1203 return -EINVAL; 1204 1205 dma = vfio_find_dma(iommu, iova + size - 1, 0); 1206 if (dma && dma->iova + dma->size != iova + size) 1207 return -EINVAL; 1208 1209 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { 1210 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 1211 1212 if (dma->iova < iova) 1213 continue; 1214 1215 if (dma->iova > iova + size - 1) 1216 break; 1217 1218 ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize); 1219 if (ret) 1220 return ret; 1221 1222 /* 1223 * Re-populate bitmap to include all pinned pages which are 1224 * considered as dirty but exclude pages which are unpinned and 1225 * pages which are marked dirty by vfio_dma_rw() 1226 */ 1227 bitmap_clear(dma->bitmap, 0, dma->size >> pgshift); 1228 vfio_dma_populate_bitmap(dma, pgsize); 1229 } 1230 return 0; 1231 } 1232 1233 static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size) 1234 { 1235 if (!npages || !bitmap_size || (bitmap_size > DIRTY_BITMAP_SIZE_MAX) || 1236 (bitmap_size < DIRTY_BITMAP_BYTES(npages))) 1237 return -EINVAL; 1238 1239 return 0; 1240 } 1241 1242 /* 1243 * Notify VFIO drivers using vfio_register_emulated_iommu_dev() to invalidate 1244 * and unmap iovas within the range we're about to unmap. Drivers MUST unpin 1245 * pages in response to an invalidation. 1246 */ 1247 static void vfio_notify_dma_unmap(struct vfio_iommu *iommu, 1248 struct vfio_dma *dma) 1249 { 1250 struct vfio_device *device; 1251 1252 if (list_empty(&iommu->device_list)) 1253 return; 1254 1255 /* 1256 * The device is expected to call vfio_unpin_pages() for any IOVA it has 1257 * pinned within the range. Since vfio_unpin_pages() will eventually 1258 * call back down to this code and try to obtain the iommu->lock we must 1259 * drop it. 1260 */ 1261 mutex_lock(&iommu->device_list_lock); 1262 mutex_unlock(&iommu->lock); 1263 1264 list_for_each_entry(device, &iommu->device_list, iommu_entry) 1265 device->ops->dma_unmap(device, dma->iova, dma->size); 1266 1267 mutex_unlock(&iommu->device_list_lock); 1268 mutex_lock(&iommu->lock); 1269 } 1270 1271 static int vfio_dma_do_unmap(struct vfio_iommu *iommu, 1272 struct vfio_iommu_type1_dma_unmap *unmap, 1273 struct vfio_bitmap *bitmap) 1274 { 1275 struct vfio_dma *dma, *dma_last = NULL; 1276 size_t unmapped = 0, pgsize; 1277 int ret = -EINVAL, retries = 0; 1278 unsigned long pgshift; 1279 dma_addr_t iova = unmap->iova; 1280 u64 size = unmap->size; 1281 bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL; 1282 bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR; 1283 struct rb_node *n, *first_n; 1284 1285 mutex_lock(&iommu->lock); 1286 1287 /* Cannot update vaddr if mdev is present. */ 1288 if (invalidate_vaddr && !list_empty(&iommu->emulated_iommu_groups)) { 1289 ret = -EBUSY; 1290 goto unlock; 1291 } 1292 1293 pgshift = __ffs(iommu->pgsize_bitmap); 1294 pgsize = (size_t)1 << pgshift; 1295 1296 if (iova & (pgsize - 1)) 1297 goto unlock; 1298 1299 if (unmap_all) { 1300 if (iova || size) 1301 goto unlock; 1302 size = U64_MAX; 1303 } else if (!size || size & (pgsize - 1) || 1304 iova + size - 1 < iova || size > SIZE_MAX) { 1305 goto unlock; 1306 } 1307 1308 /* When dirty tracking is enabled, allow only min supported pgsize */ 1309 if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) && 1310 (!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) { 1311 goto unlock; 1312 } 1313 1314 WARN_ON((pgsize - 1) & PAGE_MASK); 1315 again: 1316 /* 1317 * vfio-iommu-type1 (v1) - User mappings were coalesced together to 1318 * avoid tracking individual mappings. This means that the granularity 1319 * of the original mapping was lost and the user was allowed to attempt 1320 * to unmap any range. Depending on the contiguousness of physical 1321 * memory and page sizes supported by the IOMMU, arbitrary unmaps may 1322 * or may not have worked. We only guaranteed unmap granularity 1323 * matching the original mapping; even though it was untracked here, 1324 * the original mappings are reflected in IOMMU mappings. This 1325 * resulted in a couple unusual behaviors. First, if a range is not 1326 * able to be unmapped, ex. a set of 4k pages that was mapped as a 1327 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with 1328 * a zero sized unmap. Also, if an unmap request overlaps the first 1329 * address of a hugepage, the IOMMU will unmap the entire hugepage. 1330 * This also returns success and the returned unmap size reflects the 1331 * actual size unmapped. 1332 * 1333 * We attempt to maintain compatibility with this "v1" interface, but 1334 * we take control out of the hands of the IOMMU. Therefore, an unmap 1335 * request offset from the beginning of the original mapping will 1336 * return success with zero sized unmap. And an unmap request covering 1337 * the first iova of mapping will unmap the entire range. 1338 * 1339 * The v2 version of this interface intends to be more deterministic. 1340 * Unmap requests must fully cover previous mappings. Multiple 1341 * mappings may still be unmaped by specifying large ranges, but there 1342 * must not be any previous mappings bisected by the range. An error 1343 * will be returned if these conditions are not met. The v2 interface 1344 * will only return success and a size of zero if there were no 1345 * mappings within the range. 1346 */ 1347 if (iommu->v2 && !unmap_all) { 1348 dma = vfio_find_dma(iommu, iova, 1); 1349 if (dma && dma->iova != iova) 1350 goto unlock; 1351 1352 dma = vfio_find_dma(iommu, iova + size - 1, 0); 1353 if (dma && dma->iova + dma->size != iova + size) 1354 goto unlock; 1355 } 1356 1357 ret = 0; 1358 n = first_n = vfio_find_dma_first_node(iommu, iova, size); 1359 1360 while (n) { 1361 dma = rb_entry(n, struct vfio_dma, node); 1362 if (dma->iova >= iova + size) 1363 break; 1364 1365 if (!iommu->v2 && iova > dma->iova) 1366 break; 1367 1368 if (invalidate_vaddr) { 1369 if (dma->vaddr_invalid) { 1370 struct rb_node *last_n = n; 1371 1372 for (n = first_n; n != last_n; n = rb_next(n)) { 1373 dma = rb_entry(n, 1374 struct vfio_dma, node); 1375 dma->vaddr_invalid = false; 1376 iommu->vaddr_invalid_count--; 1377 } 1378 ret = -EINVAL; 1379 unmapped = 0; 1380 break; 1381 } 1382 dma->vaddr_invalid = true; 1383 iommu->vaddr_invalid_count++; 1384 unmapped += dma->size; 1385 n = rb_next(n); 1386 continue; 1387 } 1388 1389 if (!RB_EMPTY_ROOT(&dma->pfn_list)) { 1390 if (dma_last == dma) { 1391 BUG_ON(++retries > 10); 1392 } else { 1393 dma_last = dma; 1394 retries = 0; 1395 } 1396 1397 vfio_notify_dma_unmap(iommu, dma); 1398 goto again; 1399 } 1400 1401 if (unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) { 1402 ret = update_user_bitmap(bitmap->data, iommu, dma, 1403 iova, pgsize); 1404 if (ret) 1405 break; 1406 } 1407 1408 unmapped += dma->size; 1409 n = rb_next(n); 1410 vfio_remove_dma(iommu, dma); 1411 } 1412 1413 unlock: 1414 mutex_unlock(&iommu->lock); 1415 1416 /* Report how much was unmapped */ 1417 unmap->size = unmapped; 1418 1419 return ret; 1420 } 1421 1422 static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, 1423 unsigned long pfn, long npage, int prot) 1424 { 1425 struct vfio_domain *d; 1426 int ret; 1427 1428 list_for_each_entry(d, &iommu->domain_list, next) { 1429 ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, 1430 npage << PAGE_SHIFT, prot | IOMMU_CACHE, 1431 GFP_KERNEL); 1432 if (ret) 1433 goto unwind; 1434 1435 cond_resched(); 1436 } 1437 1438 return 0; 1439 1440 unwind: 1441 list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) { 1442 iommu_unmap(d->domain, iova, npage << PAGE_SHIFT); 1443 cond_resched(); 1444 } 1445 1446 return ret; 1447 } 1448 1449 static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, 1450 size_t map_size) 1451 { 1452 dma_addr_t iova = dma->iova; 1453 unsigned long vaddr = dma->vaddr; 1454 struct vfio_batch batch; 1455 size_t size = map_size; 1456 long npage; 1457 unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1458 int ret = 0; 1459 1460 vfio_batch_init(&batch); 1461 1462 while (size) { 1463 /* Pin a contiguous chunk of memory */ 1464 npage = vfio_pin_pages_remote(dma, vaddr + dma->size, 1465 size >> PAGE_SHIFT, &pfn, limit, 1466 &batch); 1467 if (npage <= 0) { 1468 WARN_ON(!npage); 1469 ret = (int)npage; 1470 break; 1471 } 1472 1473 /* Map it! */ 1474 ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, 1475 dma->prot); 1476 if (ret) { 1477 vfio_unpin_pages_remote(dma, iova + dma->size, pfn, 1478 npage, true); 1479 vfio_batch_unpin(&batch, dma); 1480 break; 1481 } 1482 1483 size -= npage << PAGE_SHIFT; 1484 dma->size += npage << PAGE_SHIFT; 1485 } 1486 1487 vfio_batch_fini(&batch); 1488 dma->iommu_mapped = true; 1489 1490 if (ret) 1491 vfio_remove_dma(iommu, dma); 1492 1493 return ret; 1494 } 1495 1496 /* 1497 * Check dma map request is within a valid iova range 1498 */ 1499 static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu, 1500 dma_addr_t start, dma_addr_t end) 1501 { 1502 struct list_head *iova = &iommu->iova_list; 1503 struct vfio_iova *node; 1504 1505 list_for_each_entry(node, iova, list) { 1506 if (start >= node->start && end <= node->end) 1507 return true; 1508 } 1509 1510 /* 1511 * Check for list_empty() as well since a container with 1512 * a single mdev device will have an empty list. 1513 */ 1514 return list_empty(iova); 1515 } 1516 1517 static int vfio_change_dma_owner(struct vfio_dma *dma) 1518 { 1519 struct task_struct *task = current->group_leader; 1520 struct mm_struct *mm = current->mm; 1521 long npage = dma->locked_vm; 1522 bool lock_cap; 1523 int ret; 1524 1525 if (mm == dma->mm) 1526 return 0; 1527 1528 lock_cap = capable(CAP_IPC_LOCK); 1529 ret = mm_lock_acct(task, mm, lock_cap, npage); 1530 if (ret) 1531 return ret; 1532 1533 if (mmget_not_zero(dma->mm)) { 1534 mm_lock_acct(dma->task, dma->mm, dma->lock_cap, -npage); 1535 mmput(dma->mm); 1536 } 1537 1538 if (dma->task != task) { 1539 put_task_struct(dma->task); 1540 dma->task = get_task_struct(task); 1541 } 1542 mmdrop(dma->mm); 1543 dma->mm = mm; 1544 mmgrab(dma->mm); 1545 dma->lock_cap = lock_cap; 1546 return 0; 1547 } 1548 1549 static int vfio_dma_do_map(struct vfio_iommu *iommu, 1550 struct vfio_iommu_type1_dma_map *map) 1551 { 1552 bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR; 1553 dma_addr_t iova = map->iova; 1554 unsigned long vaddr = map->vaddr; 1555 size_t size = map->size; 1556 int ret = 0, prot = 0; 1557 size_t pgsize; 1558 struct vfio_dma *dma; 1559 1560 /* Verify that none of our __u64 fields overflow */ 1561 if (map->size != size || map->vaddr != vaddr || map->iova != iova) 1562 return -EINVAL; 1563 1564 /* READ/WRITE from device perspective */ 1565 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) 1566 prot |= IOMMU_WRITE; 1567 if (map->flags & VFIO_DMA_MAP_FLAG_READ) 1568 prot |= IOMMU_READ; 1569 1570 if ((prot && set_vaddr) || (!prot && !set_vaddr)) 1571 return -EINVAL; 1572 1573 mutex_lock(&iommu->lock); 1574 1575 pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); 1576 1577 WARN_ON((pgsize - 1) & PAGE_MASK); 1578 1579 if (!size || (size | iova | vaddr) & (pgsize - 1)) { 1580 ret = -EINVAL; 1581 goto out_unlock; 1582 } 1583 1584 /* Don't allow IOVA or virtual address wrap */ 1585 if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) { 1586 ret = -EINVAL; 1587 goto out_unlock; 1588 } 1589 1590 dma = vfio_find_dma(iommu, iova, size); 1591 if (set_vaddr) { 1592 if (!dma) { 1593 ret = -ENOENT; 1594 } else if (!dma->vaddr_invalid || dma->iova != iova || 1595 dma->size != size) { 1596 ret = -EINVAL; 1597 } else { 1598 ret = vfio_change_dma_owner(dma); 1599 if (ret) 1600 goto out_unlock; 1601 dma->vaddr = vaddr; 1602 dma->vaddr_invalid = false; 1603 iommu->vaddr_invalid_count--; 1604 } 1605 goto out_unlock; 1606 } else if (dma) { 1607 ret = -EEXIST; 1608 goto out_unlock; 1609 } 1610 1611 if (!iommu->dma_avail) { 1612 ret = -ENOSPC; 1613 goto out_unlock; 1614 } 1615 1616 if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) { 1617 ret = -EINVAL; 1618 goto out_unlock; 1619 } 1620 1621 dma = kzalloc(sizeof(*dma), GFP_KERNEL); 1622 if (!dma) { 1623 ret = -ENOMEM; 1624 goto out_unlock; 1625 } 1626 1627 iommu->dma_avail--; 1628 dma->iova = iova; 1629 dma->vaddr = vaddr; 1630 dma->prot = prot; 1631 1632 /* 1633 * We need to be able to both add to a task's locked memory and test 1634 * against the locked memory limit and we need to be able to do both 1635 * outside of this call path as pinning can be asynchronous via the 1636 * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a 1637 * task_struct. Save the group_leader so that all DMA tracking uses 1638 * the same task, to make debugging easier. VM locked pages requires 1639 * an mm_struct, so grab the mm in case the task dies. 1640 */ 1641 get_task_struct(current->group_leader); 1642 dma->task = current->group_leader; 1643 dma->lock_cap = capable(CAP_IPC_LOCK); 1644 dma->mm = current->mm; 1645 mmgrab(dma->mm); 1646 1647 dma->pfn_list = RB_ROOT; 1648 1649 /* Insert zero-sized and grow as we map chunks of it */ 1650 vfio_link_dma(iommu, dma); 1651 1652 /* Don't pin and map if container doesn't contain IOMMU capable domain*/ 1653 if (list_empty(&iommu->domain_list)) 1654 dma->size = size; 1655 else 1656 ret = vfio_pin_map_dma(iommu, dma, size); 1657 1658 if (!ret && iommu->dirty_page_tracking) { 1659 ret = vfio_dma_bitmap_alloc(dma, pgsize); 1660 if (ret) 1661 vfio_remove_dma(iommu, dma); 1662 } 1663 1664 out_unlock: 1665 mutex_unlock(&iommu->lock); 1666 return ret; 1667 } 1668 1669 static int vfio_iommu_replay(struct vfio_iommu *iommu, 1670 struct vfio_domain *domain) 1671 { 1672 struct vfio_batch batch; 1673 struct vfio_domain *d = NULL; 1674 struct rb_node *n; 1675 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1676 int ret; 1677 1678 /* Arbitrarily pick the first domain in the list for lookups */ 1679 if (!list_empty(&iommu->domain_list)) 1680 d = list_first_entry(&iommu->domain_list, 1681 struct vfio_domain, next); 1682 1683 vfio_batch_init(&batch); 1684 1685 n = rb_first(&iommu->dma_list); 1686 1687 for (; n; n = rb_next(n)) { 1688 struct vfio_dma *dma; 1689 dma_addr_t iova; 1690 1691 dma = rb_entry(n, struct vfio_dma, node); 1692 iova = dma->iova; 1693 1694 while (iova < dma->iova + dma->size) { 1695 phys_addr_t phys; 1696 size_t size; 1697 1698 if (dma->iommu_mapped) { 1699 phys_addr_t p; 1700 dma_addr_t i; 1701 1702 if (WARN_ON(!d)) { /* mapped w/o a domain?! */ 1703 ret = -EINVAL; 1704 goto unwind; 1705 } 1706 1707 phys = iommu_iova_to_phys(d->domain, iova); 1708 1709 if (WARN_ON(!phys)) { 1710 iova += PAGE_SIZE; 1711 continue; 1712 } 1713 1714 size = PAGE_SIZE; 1715 p = phys + size; 1716 i = iova + size; 1717 while (i < dma->iova + dma->size && 1718 p == iommu_iova_to_phys(d->domain, i)) { 1719 size += PAGE_SIZE; 1720 p += PAGE_SIZE; 1721 i += PAGE_SIZE; 1722 } 1723 } else { 1724 unsigned long pfn; 1725 unsigned long vaddr = dma->vaddr + 1726 (iova - dma->iova); 1727 size_t n = dma->iova + dma->size - iova; 1728 long npage; 1729 1730 npage = vfio_pin_pages_remote(dma, vaddr, 1731 n >> PAGE_SHIFT, 1732 &pfn, limit, 1733 &batch); 1734 if (npage <= 0) { 1735 WARN_ON(!npage); 1736 ret = (int)npage; 1737 goto unwind; 1738 } 1739 1740 phys = pfn << PAGE_SHIFT; 1741 size = npage << PAGE_SHIFT; 1742 } 1743 1744 ret = iommu_map(domain->domain, iova, phys, size, 1745 dma->prot | IOMMU_CACHE, GFP_KERNEL); 1746 if (ret) { 1747 if (!dma->iommu_mapped) { 1748 vfio_unpin_pages_remote(dma, iova, 1749 phys >> PAGE_SHIFT, 1750 size >> PAGE_SHIFT, 1751 true); 1752 vfio_batch_unpin(&batch, dma); 1753 } 1754 goto unwind; 1755 } 1756 1757 iova += size; 1758 } 1759 } 1760 1761 /* All dmas are now mapped, defer to second tree walk for unwind */ 1762 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { 1763 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 1764 1765 dma->iommu_mapped = true; 1766 } 1767 1768 vfio_batch_fini(&batch); 1769 return 0; 1770 1771 unwind: 1772 for (; n; n = rb_prev(n)) { 1773 struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); 1774 dma_addr_t iova; 1775 1776 if (dma->iommu_mapped) { 1777 iommu_unmap(domain->domain, dma->iova, dma->size); 1778 continue; 1779 } 1780 1781 iova = dma->iova; 1782 while (iova < dma->iova + dma->size) { 1783 phys_addr_t phys, p; 1784 size_t size; 1785 dma_addr_t i; 1786 1787 phys = iommu_iova_to_phys(domain->domain, iova); 1788 if (!phys) { 1789 iova += PAGE_SIZE; 1790 continue; 1791 } 1792 1793 size = PAGE_SIZE; 1794 p = phys + size; 1795 i = iova + size; 1796 while (i < dma->iova + dma->size && 1797 p == iommu_iova_to_phys(domain->domain, i)) { 1798 size += PAGE_SIZE; 1799 p += PAGE_SIZE; 1800 i += PAGE_SIZE; 1801 } 1802 1803 iommu_unmap(domain->domain, iova, size); 1804 vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, 1805 size >> PAGE_SHIFT, true); 1806 } 1807 } 1808 1809 vfio_batch_fini(&batch); 1810 return ret; 1811 } 1812 1813 /* 1814 * We change our unmap behavior slightly depending on whether the IOMMU 1815 * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage 1816 * for practically any contiguous power-of-two mapping we give it. This means 1817 * we don't need to look for contiguous chunks ourselves to make unmapping 1818 * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d 1819 * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks 1820 * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when 1821 * hugetlbfs is in use. 1822 */ 1823 static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions) 1824 { 1825 int ret, order = get_order(PAGE_SIZE * 2); 1826 struct vfio_iova *region; 1827 struct page *pages; 1828 dma_addr_t start; 1829 1830 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 1831 if (!pages) 1832 return; 1833 1834 list_for_each_entry(region, regions, list) { 1835 start = ALIGN(region->start, PAGE_SIZE * 2); 1836 if (start >= region->end || (region->end - start < PAGE_SIZE * 2)) 1837 continue; 1838 1839 ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2, 1840 IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL); 1841 if (!ret) { 1842 size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE); 1843 1844 if (unmapped == PAGE_SIZE) 1845 iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE); 1846 else 1847 domain->fgsp = true; 1848 } 1849 break; 1850 } 1851 1852 __free_pages(pages, order); 1853 } 1854 1855 static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain, 1856 struct iommu_group *iommu_group) 1857 { 1858 struct vfio_iommu_group *g; 1859 1860 list_for_each_entry(g, &domain->group_list, next) { 1861 if (g->iommu_group == iommu_group) 1862 return g; 1863 } 1864 1865 return NULL; 1866 } 1867 1868 static struct vfio_iommu_group* 1869 vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, 1870 struct iommu_group *iommu_group) 1871 { 1872 struct vfio_iommu_group *group; 1873 struct vfio_domain *domain; 1874 1875 list_for_each_entry(domain, &iommu->domain_list, next) { 1876 group = find_iommu_group(domain, iommu_group); 1877 if (group) 1878 return group; 1879 } 1880 1881 list_for_each_entry(group, &iommu->emulated_iommu_groups, next) 1882 if (group->iommu_group == iommu_group) 1883 return group; 1884 return NULL; 1885 } 1886 1887 static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions, 1888 phys_addr_t *base) 1889 { 1890 struct iommu_resv_region *region; 1891 bool ret = false; 1892 1893 list_for_each_entry(region, group_resv_regions, list) { 1894 /* 1895 * The presence of any 'real' MSI regions should take 1896 * precedence over the software-managed one if the 1897 * IOMMU driver happens to advertise both types. 1898 */ 1899 if (region->type == IOMMU_RESV_MSI) { 1900 ret = false; 1901 break; 1902 } 1903 1904 if (region->type == IOMMU_RESV_SW_MSI) { 1905 *base = region->start; 1906 ret = true; 1907 } 1908 } 1909 1910 return ret; 1911 } 1912 1913 /* 1914 * This is a helper function to insert an address range to iova list. 1915 * The list is initially created with a single entry corresponding to 1916 * the IOMMU domain geometry to which the device group is attached. 1917 * The list aperture gets modified when a new domain is added to the 1918 * container if the new aperture doesn't conflict with the current one 1919 * or with any existing dma mappings. The list is also modified to 1920 * exclude any reserved regions associated with the device group. 1921 */ 1922 static int vfio_iommu_iova_insert(struct list_head *head, 1923 dma_addr_t start, dma_addr_t end) 1924 { 1925 struct vfio_iova *region; 1926 1927 region = kmalloc(sizeof(*region), GFP_KERNEL); 1928 if (!region) 1929 return -ENOMEM; 1930 1931 INIT_LIST_HEAD(®ion->list); 1932 region->start = start; 1933 region->end = end; 1934 1935 list_add_tail(®ion->list, head); 1936 return 0; 1937 } 1938 1939 /* 1940 * Check the new iommu aperture conflicts with existing aper or with any 1941 * existing dma mappings. 1942 */ 1943 static bool vfio_iommu_aper_conflict(struct vfio_iommu *iommu, 1944 dma_addr_t start, dma_addr_t end) 1945 { 1946 struct vfio_iova *first, *last; 1947 struct list_head *iova = &iommu->iova_list; 1948 1949 if (list_empty(iova)) 1950 return false; 1951 1952 /* Disjoint sets, return conflict */ 1953 first = list_first_entry(iova, struct vfio_iova, list); 1954 last = list_last_entry(iova, struct vfio_iova, list); 1955 if (start > last->end || end < first->start) 1956 return true; 1957 1958 /* Check for any existing dma mappings below the new start */ 1959 if (start > first->start) { 1960 if (vfio_find_dma(iommu, first->start, start - first->start)) 1961 return true; 1962 } 1963 1964 /* Check for any existing dma mappings beyond the new end */ 1965 if (end < last->end) { 1966 if (vfio_find_dma(iommu, end + 1, last->end - end)) 1967 return true; 1968 } 1969 1970 return false; 1971 } 1972 1973 /* 1974 * Resize iommu iova aperture window. This is called only if the new 1975 * aperture has no conflict with existing aperture and dma mappings. 1976 */ 1977 static int vfio_iommu_aper_resize(struct list_head *iova, 1978 dma_addr_t start, dma_addr_t end) 1979 { 1980 struct vfio_iova *node, *next; 1981 1982 if (list_empty(iova)) 1983 return vfio_iommu_iova_insert(iova, start, end); 1984 1985 /* Adjust iova list start */ 1986 list_for_each_entry_safe(node, next, iova, list) { 1987 if (start < node->start) 1988 break; 1989 if (start >= node->start && start < node->end) { 1990 node->start = start; 1991 break; 1992 } 1993 /* Delete nodes before new start */ 1994 list_del(&node->list); 1995 kfree(node); 1996 } 1997 1998 /* Adjust iova list end */ 1999 list_for_each_entry_safe(node, next, iova, list) { 2000 if (end > node->end) 2001 continue; 2002 if (end > node->start && end <= node->end) { 2003 node->end = end; 2004 continue; 2005 } 2006 /* Delete nodes after new end */ 2007 list_del(&node->list); 2008 kfree(node); 2009 } 2010 2011 return 0; 2012 } 2013 2014 /* 2015 * Check reserved region conflicts with existing dma mappings 2016 */ 2017 static bool vfio_iommu_resv_conflict(struct vfio_iommu *iommu, 2018 struct list_head *resv_regions) 2019 { 2020 struct iommu_resv_region *region; 2021 2022 /* Check for conflict with existing dma mappings */ 2023 list_for_each_entry(region, resv_regions, list) { 2024 if (region->type == IOMMU_RESV_DIRECT_RELAXABLE) 2025 continue; 2026 2027 if (vfio_find_dma(iommu, region->start, region->length)) 2028 return true; 2029 } 2030 2031 return false; 2032 } 2033 2034 /* 2035 * Check iova region overlap with reserved regions and 2036 * exclude them from the iommu iova range 2037 */ 2038 static int vfio_iommu_resv_exclude(struct list_head *iova, 2039 struct list_head *resv_regions) 2040 { 2041 struct iommu_resv_region *resv; 2042 struct vfio_iova *n, *next; 2043 2044 list_for_each_entry(resv, resv_regions, list) { 2045 phys_addr_t start, end; 2046 2047 if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE) 2048 continue; 2049 2050 start = resv->start; 2051 end = resv->start + resv->length - 1; 2052 2053 list_for_each_entry_safe(n, next, iova, list) { 2054 int ret = 0; 2055 2056 /* No overlap */ 2057 if (start > n->end || end < n->start) 2058 continue; 2059 /* 2060 * Insert a new node if current node overlaps with the 2061 * reserve region to exclude that from valid iova range. 2062 * Note that, new node is inserted before the current 2063 * node and finally the current node is deleted keeping 2064 * the list updated and sorted. 2065 */ 2066 if (start > n->start) 2067 ret = vfio_iommu_iova_insert(&n->list, n->start, 2068 start - 1); 2069 if (!ret && end < n->end) 2070 ret = vfio_iommu_iova_insert(&n->list, end + 1, 2071 n->end); 2072 if (ret) 2073 return ret; 2074 2075 list_del(&n->list); 2076 kfree(n); 2077 } 2078 } 2079 2080 if (list_empty(iova)) 2081 return -EINVAL; 2082 2083 return 0; 2084 } 2085 2086 static void vfio_iommu_resv_free(struct list_head *resv_regions) 2087 { 2088 struct iommu_resv_region *n, *next; 2089 2090 list_for_each_entry_safe(n, next, resv_regions, list) { 2091 list_del(&n->list); 2092 kfree(n); 2093 } 2094 } 2095 2096 static void vfio_iommu_iova_free(struct list_head *iova) 2097 { 2098 struct vfio_iova *n, *next; 2099 2100 list_for_each_entry_safe(n, next, iova, list) { 2101 list_del(&n->list); 2102 kfree(n); 2103 } 2104 } 2105 2106 static int vfio_iommu_iova_get_copy(struct vfio_iommu *iommu, 2107 struct list_head *iova_copy) 2108 { 2109 struct list_head *iova = &iommu->iova_list; 2110 struct vfio_iova *n; 2111 int ret; 2112 2113 list_for_each_entry(n, iova, list) { 2114 ret = vfio_iommu_iova_insert(iova_copy, n->start, n->end); 2115 if (ret) 2116 goto out_free; 2117 } 2118 2119 return 0; 2120 2121 out_free: 2122 vfio_iommu_iova_free(iova_copy); 2123 return ret; 2124 } 2125 2126 static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu, 2127 struct list_head *iova_copy) 2128 { 2129 struct list_head *iova = &iommu->iova_list; 2130 2131 vfio_iommu_iova_free(iova); 2132 2133 list_splice_tail(iova_copy, iova); 2134 } 2135 2136 static int vfio_iommu_domain_alloc(struct device *dev, void *data) 2137 { 2138 struct iommu_domain **domain = data; 2139 2140 *domain = iommu_domain_alloc(dev->bus); 2141 return 1; /* Don't iterate */ 2142 } 2143 2144 static int vfio_iommu_type1_attach_group(void *iommu_data, 2145 struct iommu_group *iommu_group, enum vfio_group_type type) 2146 { 2147 struct vfio_iommu *iommu = iommu_data; 2148 struct vfio_iommu_group *group; 2149 struct vfio_domain *domain, *d; 2150 bool resv_msi; 2151 phys_addr_t resv_msi_base = 0; 2152 struct iommu_domain_geometry *geo; 2153 LIST_HEAD(iova_copy); 2154 LIST_HEAD(group_resv_regions); 2155 int ret = -EBUSY; 2156 2157 mutex_lock(&iommu->lock); 2158 2159 /* Attach could require pinning, so disallow while vaddr is invalid. */ 2160 if (iommu->vaddr_invalid_count) 2161 goto out_unlock; 2162 2163 /* Check for duplicates */ 2164 ret = -EINVAL; 2165 if (vfio_iommu_find_iommu_group(iommu, iommu_group)) 2166 goto out_unlock; 2167 2168 ret = -ENOMEM; 2169 group = kzalloc(sizeof(*group), GFP_KERNEL); 2170 if (!group) 2171 goto out_unlock; 2172 group->iommu_group = iommu_group; 2173 2174 if (type == VFIO_EMULATED_IOMMU) { 2175 list_add(&group->next, &iommu->emulated_iommu_groups); 2176 /* 2177 * An emulated IOMMU group cannot dirty memory directly, it can 2178 * only use interfaces that provide dirty tracking. 2179 * The iommu scope can only be promoted with the addition of a 2180 * dirty tracking group. 2181 */ 2182 group->pinned_page_dirty_scope = true; 2183 ret = 0; 2184 goto out_unlock; 2185 } 2186 2187 ret = -ENOMEM; 2188 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 2189 if (!domain) 2190 goto out_free_group; 2191 2192 /* 2193 * Going via the iommu_group iterator avoids races, and trivially gives 2194 * us a representative device for the IOMMU API call. We don't actually 2195 * want to iterate beyond the first device (if any). 2196 */ 2197 ret = -EIO; 2198 iommu_group_for_each_dev(iommu_group, &domain->domain, 2199 vfio_iommu_domain_alloc); 2200 if (!domain->domain) 2201 goto out_free_domain; 2202 2203 if (iommu->nesting) { 2204 ret = iommu_enable_nesting(domain->domain); 2205 if (ret) 2206 goto out_domain; 2207 } 2208 2209 ret = iommu_attach_group(domain->domain, group->iommu_group); 2210 if (ret) 2211 goto out_domain; 2212 2213 /* Get aperture info */ 2214 geo = &domain->domain->geometry; 2215 if (vfio_iommu_aper_conflict(iommu, geo->aperture_start, 2216 geo->aperture_end)) { 2217 ret = -EINVAL; 2218 goto out_detach; 2219 } 2220 2221 ret = iommu_get_group_resv_regions(iommu_group, &group_resv_regions); 2222 if (ret) 2223 goto out_detach; 2224 2225 if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) { 2226 ret = -EINVAL; 2227 goto out_detach; 2228 } 2229 2230 /* 2231 * We don't want to work on the original iova list as the list 2232 * gets modified and in case of failure we have to retain the 2233 * original list. Get a copy here. 2234 */ 2235 ret = vfio_iommu_iova_get_copy(iommu, &iova_copy); 2236 if (ret) 2237 goto out_detach; 2238 2239 ret = vfio_iommu_aper_resize(&iova_copy, geo->aperture_start, 2240 geo->aperture_end); 2241 if (ret) 2242 goto out_detach; 2243 2244 ret = vfio_iommu_resv_exclude(&iova_copy, &group_resv_regions); 2245 if (ret) 2246 goto out_detach; 2247 2248 resv_msi = vfio_iommu_has_sw_msi(&group_resv_regions, &resv_msi_base); 2249 2250 INIT_LIST_HEAD(&domain->group_list); 2251 list_add(&group->next, &domain->group_list); 2252 2253 if (!allow_unsafe_interrupts && 2254 !iommu_group_has_isolated_msi(iommu_group)) { 2255 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", 2256 __func__); 2257 ret = -EPERM; 2258 goto out_detach; 2259 } 2260 2261 /* 2262 * If the IOMMU can block non-coherent operations (ie PCIe TLPs with 2263 * no-snoop set) then VFIO always turns this feature on because on Intel 2264 * platforms it optimizes KVM to disable wbinvd emulation. 2265 */ 2266 if (domain->domain->ops->enforce_cache_coherency) 2267 domain->enforce_cache_coherency = 2268 domain->domain->ops->enforce_cache_coherency( 2269 domain->domain); 2270 2271 /* 2272 * Try to match an existing compatible domain. We don't want to 2273 * preclude an IOMMU driver supporting multiple bus_types and being 2274 * able to include different bus_types in the same IOMMU domain, so 2275 * we test whether the domains use the same iommu_ops rather than 2276 * testing if they're on the same bus_type. 2277 */ 2278 list_for_each_entry(d, &iommu->domain_list, next) { 2279 if (d->domain->ops == domain->domain->ops && 2280 d->enforce_cache_coherency == 2281 domain->enforce_cache_coherency) { 2282 iommu_detach_group(domain->domain, group->iommu_group); 2283 if (!iommu_attach_group(d->domain, 2284 group->iommu_group)) { 2285 list_add(&group->next, &d->group_list); 2286 iommu_domain_free(domain->domain); 2287 kfree(domain); 2288 goto done; 2289 } 2290 2291 ret = iommu_attach_group(domain->domain, 2292 group->iommu_group); 2293 if (ret) 2294 goto out_domain; 2295 } 2296 } 2297 2298 vfio_test_domain_fgsp(domain, &iova_copy); 2299 2300 /* replay mappings on new domains */ 2301 ret = vfio_iommu_replay(iommu, domain); 2302 if (ret) 2303 goto out_detach; 2304 2305 if (resv_msi) { 2306 ret = iommu_get_msi_cookie(domain->domain, resv_msi_base); 2307 if (ret && ret != -ENODEV) 2308 goto out_detach; 2309 } 2310 2311 list_add(&domain->next, &iommu->domain_list); 2312 vfio_update_pgsize_bitmap(iommu); 2313 done: 2314 /* Delete the old one and insert new iova list */ 2315 vfio_iommu_iova_insert_copy(iommu, &iova_copy); 2316 2317 /* 2318 * An iommu backed group can dirty memory directly and therefore 2319 * demotes the iommu scope until it declares itself dirty tracking 2320 * capable via the page pinning interface. 2321 */ 2322 iommu->num_non_pinned_groups++; 2323 mutex_unlock(&iommu->lock); 2324 vfio_iommu_resv_free(&group_resv_regions); 2325 2326 return 0; 2327 2328 out_detach: 2329 iommu_detach_group(domain->domain, group->iommu_group); 2330 out_domain: 2331 iommu_domain_free(domain->domain); 2332 vfio_iommu_iova_free(&iova_copy); 2333 vfio_iommu_resv_free(&group_resv_regions); 2334 out_free_domain: 2335 kfree(domain); 2336 out_free_group: 2337 kfree(group); 2338 out_unlock: 2339 mutex_unlock(&iommu->lock); 2340 return ret; 2341 } 2342 2343 static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu) 2344 { 2345 struct rb_node *node; 2346 2347 while ((node = rb_first(&iommu->dma_list))) 2348 vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); 2349 } 2350 2351 static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) 2352 { 2353 struct rb_node *n, *p; 2354 2355 n = rb_first(&iommu->dma_list); 2356 for (; n; n = rb_next(n)) { 2357 struct vfio_dma *dma; 2358 long locked = 0, unlocked = 0; 2359 2360 dma = rb_entry(n, struct vfio_dma, node); 2361 unlocked += vfio_unmap_unpin(iommu, dma, false); 2362 p = rb_first(&dma->pfn_list); 2363 for (; p; p = rb_next(p)) { 2364 struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, 2365 node); 2366 2367 if (!is_invalid_reserved_pfn(vpfn->pfn)) 2368 locked++; 2369 } 2370 vfio_lock_acct(dma, locked - unlocked, true); 2371 } 2372 } 2373 2374 /* 2375 * Called when a domain is removed in detach. It is possible that 2376 * the removed domain decided the iova aperture window. Modify the 2377 * iova aperture with the smallest window among existing domains. 2378 */ 2379 static void vfio_iommu_aper_expand(struct vfio_iommu *iommu, 2380 struct list_head *iova_copy) 2381 { 2382 struct vfio_domain *domain; 2383 struct vfio_iova *node; 2384 dma_addr_t start = 0; 2385 dma_addr_t end = (dma_addr_t)~0; 2386 2387 if (list_empty(iova_copy)) 2388 return; 2389 2390 list_for_each_entry(domain, &iommu->domain_list, next) { 2391 struct iommu_domain_geometry *geo = &domain->domain->geometry; 2392 2393 if (geo->aperture_start > start) 2394 start = geo->aperture_start; 2395 if (geo->aperture_end < end) 2396 end = geo->aperture_end; 2397 } 2398 2399 /* Modify aperture limits. The new aper is either same or bigger */ 2400 node = list_first_entry(iova_copy, struct vfio_iova, list); 2401 node->start = start; 2402 node = list_last_entry(iova_copy, struct vfio_iova, list); 2403 node->end = end; 2404 } 2405 2406 /* 2407 * Called when a group is detached. The reserved regions for that 2408 * group can be part of valid iova now. But since reserved regions 2409 * may be duplicated among groups, populate the iova valid regions 2410 * list again. 2411 */ 2412 static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu, 2413 struct list_head *iova_copy) 2414 { 2415 struct vfio_domain *d; 2416 struct vfio_iommu_group *g; 2417 struct vfio_iova *node; 2418 dma_addr_t start, end; 2419 LIST_HEAD(resv_regions); 2420 int ret; 2421 2422 if (list_empty(iova_copy)) 2423 return -EINVAL; 2424 2425 list_for_each_entry(d, &iommu->domain_list, next) { 2426 list_for_each_entry(g, &d->group_list, next) { 2427 ret = iommu_get_group_resv_regions(g->iommu_group, 2428 &resv_regions); 2429 if (ret) 2430 goto done; 2431 } 2432 } 2433 2434 node = list_first_entry(iova_copy, struct vfio_iova, list); 2435 start = node->start; 2436 node = list_last_entry(iova_copy, struct vfio_iova, list); 2437 end = node->end; 2438 2439 /* purge the iova list and create new one */ 2440 vfio_iommu_iova_free(iova_copy); 2441 2442 ret = vfio_iommu_aper_resize(iova_copy, start, end); 2443 if (ret) 2444 goto done; 2445 2446 /* Exclude current reserved regions from iova ranges */ 2447 ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions); 2448 done: 2449 vfio_iommu_resv_free(&resv_regions); 2450 return ret; 2451 } 2452 2453 static void vfio_iommu_type1_detach_group(void *iommu_data, 2454 struct iommu_group *iommu_group) 2455 { 2456 struct vfio_iommu *iommu = iommu_data; 2457 struct vfio_domain *domain; 2458 struct vfio_iommu_group *group; 2459 bool update_dirty_scope = false; 2460 LIST_HEAD(iova_copy); 2461 2462 mutex_lock(&iommu->lock); 2463 list_for_each_entry(group, &iommu->emulated_iommu_groups, next) { 2464 if (group->iommu_group != iommu_group) 2465 continue; 2466 update_dirty_scope = !group->pinned_page_dirty_scope; 2467 list_del(&group->next); 2468 kfree(group); 2469 2470 if (list_empty(&iommu->emulated_iommu_groups) && 2471 list_empty(&iommu->domain_list)) { 2472 WARN_ON(!list_empty(&iommu->device_list)); 2473 vfio_iommu_unmap_unpin_all(iommu); 2474 } 2475 goto detach_group_done; 2476 } 2477 2478 /* 2479 * Get a copy of iova list. This will be used to update 2480 * and to replace the current one later. Please note that 2481 * we will leave the original list as it is if update fails. 2482 */ 2483 vfio_iommu_iova_get_copy(iommu, &iova_copy); 2484 2485 list_for_each_entry(domain, &iommu->domain_list, next) { 2486 group = find_iommu_group(domain, iommu_group); 2487 if (!group) 2488 continue; 2489 2490 iommu_detach_group(domain->domain, group->iommu_group); 2491 update_dirty_scope = !group->pinned_page_dirty_scope; 2492 list_del(&group->next); 2493 kfree(group); 2494 /* 2495 * Group ownership provides privilege, if the group list is 2496 * empty, the domain goes away. If it's the last domain with 2497 * iommu and external domain doesn't exist, then all the 2498 * mappings go away too. If it's the last domain with iommu and 2499 * external domain exist, update accounting 2500 */ 2501 if (list_empty(&domain->group_list)) { 2502 if (list_is_singular(&iommu->domain_list)) { 2503 if (list_empty(&iommu->emulated_iommu_groups)) { 2504 WARN_ON(!list_empty( 2505 &iommu->device_list)); 2506 vfio_iommu_unmap_unpin_all(iommu); 2507 } else { 2508 vfio_iommu_unmap_unpin_reaccount(iommu); 2509 } 2510 } 2511 iommu_domain_free(domain->domain); 2512 list_del(&domain->next); 2513 kfree(domain); 2514 vfio_iommu_aper_expand(iommu, &iova_copy); 2515 vfio_update_pgsize_bitmap(iommu); 2516 } 2517 break; 2518 } 2519 2520 if (!vfio_iommu_resv_refresh(iommu, &iova_copy)) 2521 vfio_iommu_iova_insert_copy(iommu, &iova_copy); 2522 else 2523 vfio_iommu_iova_free(&iova_copy); 2524 2525 detach_group_done: 2526 /* 2527 * Removal of a group without dirty tracking may allow the iommu scope 2528 * to be promoted. 2529 */ 2530 if (update_dirty_scope) { 2531 iommu->num_non_pinned_groups--; 2532 if (iommu->dirty_page_tracking) 2533 vfio_iommu_populate_bitmap_full(iommu); 2534 } 2535 mutex_unlock(&iommu->lock); 2536 } 2537 2538 static void *vfio_iommu_type1_open(unsigned long arg) 2539 { 2540 struct vfio_iommu *iommu; 2541 2542 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); 2543 if (!iommu) 2544 return ERR_PTR(-ENOMEM); 2545 2546 switch (arg) { 2547 case VFIO_TYPE1_IOMMU: 2548 break; 2549 case VFIO_TYPE1_NESTING_IOMMU: 2550 iommu->nesting = true; 2551 fallthrough; 2552 case VFIO_TYPE1v2_IOMMU: 2553 iommu->v2 = true; 2554 break; 2555 default: 2556 kfree(iommu); 2557 return ERR_PTR(-EINVAL); 2558 } 2559 2560 INIT_LIST_HEAD(&iommu->domain_list); 2561 INIT_LIST_HEAD(&iommu->iova_list); 2562 iommu->dma_list = RB_ROOT; 2563 iommu->dma_avail = dma_entry_limit; 2564 mutex_init(&iommu->lock); 2565 mutex_init(&iommu->device_list_lock); 2566 INIT_LIST_HEAD(&iommu->device_list); 2567 iommu->pgsize_bitmap = PAGE_MASK; 2568 INIT_LIST_HEAD(&iommu->emulated_iommu_groups); 2569 2570 return iommu; 2571 } 2572 2573 static void vfio_release_domain(struct vfio_domain *domain) 2574 { 2575 struct vfio_iommu_group *group, *group_tmp; 2576 2577 list_for_each_entry_safe(group, group_tmp, 2578 &domain->group_list, next) { 2579 iommu_detach_group(domain->domain, group->iommu_group); 2580 list_del(&group->next); 2581 kfree(group); 2582 } 2583 2584 iommu_domain_free(domain->domain); 2585 } 2586 2587 static void vfio_iommu_type1_release(void *iommu_data) 2588 { 2589 struct vfio_iommu *iommu = iommu_data; 2590 struct vfio_domain *domain, *domain_tmp; 2591 struct vfio_iommu_group *group, *next_group; 2592 2593 list_for_each_entry_safe(group, next_group, 2594 &iommu->emulated_iommu_groups, next) { 2595 list_del(&group->next); 2596 kfree(group); 2597 } 2598 2599 vfio_iommu_unmap_unpin_all(iommu); 2600 2601 list_for_each_entry_safe(domain, domain_tmp, 2602 &iommu->domain_list, next) { 2603 vfio_release_domain(domain); 2604 list_del(&domain->next); 2605 kfree(domain); 2606 } 2607 2608 vfio_iommu_iova_free(&iommu->iova_list); 2609 2610 kfree(iommu); 2611 } 2612 2613 static int vfio_domains_have_enforce_cache_coherency(struct vfio_iommu *iommu) 2614 { 2615 struct vfio_domain *domain; 2616 int ret = 1; 2617 2618 mutex_lock(&iommu->lock); 2619 list_for_each_entry(domain, &iommu->domain_list, next) { 2620 if (!(domain->enforce_cache_coherency)) { 2621 ret = 0; 2622 break; 2623 } 2624 } 2625 mutex_unlock(&iommu->lock); 2626 2627 return ret; 2628 } 2629 2630 static bool vfio_iommu_has_emulated(struct vfio_iommu *iommu) 2631 { 2632 bool ret; 2633 2634 mutex_lock(&iommu->lock); 2635 ret = !list_empty(&iommu->emulated_iommu_groups); 2636 mutex_unlock(&iommu->lock); 2637 return ret; 2638 } 2639 2640 static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu, 2641 unsigned long arg) 2642 { 2643 switch (arg) { 2644 case VFIO_TYPE1_IOMMU: 2645 case VFIO_TYPE1v2_IOMMU: 2646 case VFIO_TYPE1_NESTING_IOMMU: 2647 case VFIO_UNMAP_ALL: 2648 return 1; 2649 case VFIO_UPDATE_VADDR: 2650 /* 2651 * Disable this feature if mdevs are present. They cannot 2652 * safely pin/unpin/rw while vaddrs are being updated. 2653 */ 2654 return iommu && !vfio_iommu_has_emulated(iommu); 2655 case VFIO_DMA_CC_IOMMU: 2656 if (!iommu) 2657 return 0; 2658 return vfio_domains_have_enforce_cache_coherency(iommu); 2659 default: 2660 return 0; 2661 } 2662 } 2663 2664 static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps, 2665 struct vfio_iommu_type1_info_cap_iova_range *cap_iovas, 2666 size_t size) 2667 { 2668 struct vfio_info_cap_header *header; 2669 struct vfio_iommu_type1_info_cap_iova_range *iova_cap; 2670 2671 header = vfio_info_cap_add(caps, size, 2672 VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE, 1); 2673 if (IS_ERR(header)) 2674 return PTR_ERR(header); 2675 2676 iova_cap = container_of(header, 2677 struct vfio_iommu_type1_info_cap_iova_range, 2678 header); 2679 iova_cap->nr_iovas = cap_iovas->nr_iovas; 2680 memcpy(iova_cap->iova_ranges, cap_iovas->iova_ranges, 2681 cap_iovas->nr_iovas * sizeof(*cap_iovas->iova_ranges)); 2682 return 0; 2683 } 2684 2685 static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu, 2686 struct vfio_info_cap *caps) 2687 { 2688 struct vfio_iommu_type1_info_cap_iova_range *cap_iovas; 2689 struct vfio_iova *iova; 2690 size_t size; 2691 int iovas = 0, i = 0, ret; 2692 2693 list_for_each_entry(iova, &iommu->iova_list, list) 2694 iovas++; 2695 2696 if (!iovas) { 2697 /* 2698 * Return 0 as a container with a single mdev device 2699 * will have an empty list 2700 */ 2701 return 0; 2702 } 2703 2704 size = struct_size(cap_iovas, iova_ranges, iovas); 2705 2706 cap_iovas = kzalloc(size, GFP_KERNEL); 2707 if (!cap_iovas) 2708 return -ENOMEM; 2709 2710 cap_iovas->nr_iovas = iovas; 2711 2712 list_for_each_entry(iova, &iommu->iova_list, list) { 2713 cap_iovas->iova_ranges[i].start = iova->start; 2714 cap_iovas->iova_ranges[i].end = iova->end; 2715 i++; 2716 } 2717 2718 ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size); 2719 2720 kfree(cap_iovas); 2721 return ret; 2722 } 2723 2724 static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu, 2725 struct vfio_info_cap *caps) 2726 { 2727 struct vfio_iommu_type1_info_cap_migration cap_mig; 2728 2729 cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION; 2730 cap_mig.header.version = 1; 2731 2732 cap_mig.flags = 0; 2733 /* support minimum pgsize */ 2734 cap_mig.pgsize_bitmap = (size_t)1 << __ffs(iommu->pgsize_bitmap); 2735 cap_mig.max_dirty_bitmap_size = DIRTY_BITMAP_SIZE_MAX; 2736 2737 return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig)); 2738 } 2739 2740 static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu, 2741 struct vfio_info_cap *caps) 2742 { 2743 struct vfio_iommu_type1_info_dma_avail cap_dma_avail; 2744 2745 cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL; 2746 cap_dma_avail.header.version = 1; 2747 2748 cap_dma_avail.avail = iommu->dma_avail; 2749 2750 return vfio_info_add_capability(caps, &cap_dma_avail.header, 2751 sizeof(cap_dma_avail)); 2752 } 2753 2754 static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu, 2755 unsigned long arg) 2756 { 2757 struct vfio_iommu_type1_info info; 2758 unsigned long minsz; 2759 struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; 2760 unsigned long capsz; 2761 int ret; 2762 2763 minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); 2764 2765 /* For backward compatibility, cannot require this */ 2766 capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); 2767 2768 if (copy_from_user(&info, (void __user *)arg, minsz)) 2769 return -EFAULT; 2770 2771 if (info.argsz < minsz) 2772 return -EINVAL; 2773 2774 if (info.argsz >= capsz) { 2775 minsz = capsz; 2776 info.cap_offset = 0; /* output, no-recopy necessary */ 2777 } 2778 2779 mutex_lock(&iommu->lock); 2780 info.flags = VFIO_IOMMU_INFO_PGSIZES; 2781 2782 info.iova_pgsizes = iommu->pgsize_bitmap; 2783 2784 ret = vfio_iommu_migration_build_caps(iommu, &caps); 2785 2786 if (!ret) 2787 ret = vfio_iommu_dma_avail_build_caps(iommu, &caps); 2788 2789 if (!ret) 2790 ret = vfio_iommu_iova_build_caps(iommu, &caps); 2791 2792 mutex_unlock(&iommu->lock); 2793 2794 if (ret) 2795 return ret; 2796 2797 if (caps.size) { 2798 info.flags |= VFIO_IOMMU_INFO_CAPS; 2799 2800 if (info.argsz < sizeof(info) + caps.size) { 2801 info.argsz = sizeof(info) + caps.size; 2802 } else { 2803 vfio_info_cap_shift(&caps, sizeof(info)); 2804 if (copy_to_user((void __user *)arg + 2805 sizeof(info), caps.buf, 2806 caps.size)) { 2807 kfree(caps.buf); 2808 return -EFAULT; 2809 } 2810 info.cap_offset = sizeof(info); 2811 } 2812 2813 kfree(caps.buf); 2814 } 2815 2816 return copy_to_user((void __user *)arg, &info, minsz) ? 2817 -EFAULT : 0; 2818 } 2819 2820 static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu, 2821 unsigned long arg) 2822 { 2823 struct vfio_iommu_type1_dma_map map; 2824 unsigned long minsz; 2825 uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE | 2826 VFIO_DMA_MAP_FLAG_VADDR; 2827 2828 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); 2829 2830 if (copy_from_user(&map, (void __user *)arg, minsz)) 2831 return -EFAULT; 2832 2833 if (map.argsz < minsz || map.flags & ~mask) 2834 return -EINVAL; 2835 2836 return vfio_dma_do_map(iommu, &map); 2837 } 2838 2839 static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu, 2840 unsigned long arg) 2841 { 2842 struct vfio_iommu_type1_dma_unmap unmap; 2843 struct vfio_bitmap bitmap = { 0 }; 2844 uint32_t mask = VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP | 2845 VFIO_DMA_UNMAP_FLAG_VADDR | 2846 VFIO_DMA_UNMAP_FLAG_ALL; 2847 unsigned long minsz; 2848 int ret; 2849 2850 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); 2851 2852 if (copy_from_user(&unmap, (void __user *)arg, minsz)) 2853 return -EFAULT; 2854 2855 if (unmap.argsz < minsz || unmap.flags & ~mask) 2856 return -EINVAL; 2857 2858 if ((unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) && 2859 (unmap.flags & (VFIO_DMA_UNMAP_FLAG_ALL | 2860 VFIO_DMA_UNMAP_FLAG_VADDR))) 2861 return -EINVAL; 2862 2863 if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) { 2864 unsigned long pgshift; 2865 2866 if (unmap.argsz < (minsz + sizeof(bitmap))) 2867 return -EINVAL; 2868 2869 if (copy_from_user(&bitmap, 2870 (void __user *)(arg + minsz), 2871 sizeof(bitmap))) 2872 return -EFAULT; 2873 2874 if (!access_ok((void __user *)bitmap.data, bitmap.size)) 2875 return -EINVAL; 2876 2877 pgshift = __ffs(bitmap.pgsize); 2878 ret = verify_bitmap_size(unmap.size >> pgshift, 2879 bitmap.size); 2880 if (ret) 2881 return ret; 2882 } 2883 2884 ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap); 2885 if (ret) 2886 return ret; 2887 2888 return copy_to_user((void __user *)arg, &unmap, minsz) ? 2889 -EFAULT : 0; 2890 } 2891 2892 static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu, 2893 unsigned long arg) 2894 { 2895 struct vfio_iommu_type1_dirty_bitmap dirty; 2896 uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START | 2897 VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP | 2898 VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; 2899 unsigned long minsz; 2900 int ret = 0; 2901 2902 if (!iommu->v2) 2903 return -EACCES; 2904 2905 minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags); 2906 2907 if (copy_from_user(&dirty, (void __user *)arg, minsz)) 2908 return -EFAULT; 2909 2910 if (dirty.argsz < minsz || dirty.flags & ~mask) 2911 return -EINVAL; 2912 2913 /* only one flag should be set at a time */ 2914 if (__ffs(dirty.flags) != __fls(dirty.flags)) 2915 return -EINVAL; 2916 2917 if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) { 2918 size_t pgsize; 2919 2920 mutex_lock(&iommu->lock); 2921 pgsize = 1 << __ffs(iommu->pgsize_bitmap); 2922 if (!iommu->dirty_page_tracking) { 2923 ret = vfio_dma_bitmap_alloc_all(iommu, pgsize); 2924 if (!ret) 2925 iommu->dirty_page_tracking = true; 2926 } 2927 mutex_unlock(&iommu->lock); 2928 return ret; 2929 } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) { 2930 mutex_lock(&iommu->lock); 2931 if (iommu->dirty_page_tracking) { 2932 iommu->dirty_page_tracking = false; 2933 vfio_dma_bitmap_free_all(iommu); 2934 } 2935 mutex_unlock(&iommu->lock); 2936 return 0; 2937 } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) { 2938 struct vfio_iommu_type1_dirty_bitmap_get range; 2939 unsigned long pgshift; 2940 size_t data_size = dirty.argsz - minsz; 2941 size_t iommu_pgsize; 2942 2943 if (!data_size || data_size < sizeof(range)) 2944 return -EINVAL; 2945 2946 if (copy_from_user(&range, (void __user *)(arg + minsz), 2947 sizeof(range))) 2948 return -EFAULT; 2949 2950 if (range.iova + range.size < range.iova) 2951 return -EINVAL; 2952 if (!access_ok((void __user *)range.bitmap.data, 2953 range.bitmap.size)) 2954 return -EINVAL; 2955 2956 pgshift = __ffs(range.bitmap.pgsize); 2957 ret = verify_bitmap_size(range.size >> pgshift, 2958 range.bitmap.size); 2959 if (ret) 2960 return ret; 2961 2962 mutex_lock(&iommu->lock); 2963 2964 iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); 2965 2966 /* allow only smallest supported pgsize */ 2967 if (range.bitmap.pgsize != iommu_pgsize) { 2968 ret = -EINVAL; 2969 goto out_unlock; 2970 } 2971 if (range.iova & (iommu_pgsize - 1)) { 2972 ret = -EINVAL; 2973 goto out_unlock; 2974 } 2975 if (!range.size || range.size & (iommu_pgsize - 1)) { 2976 ret = -EINVAL; 2977 goto out_unlock; 2978 } 2979 2980 if (iommu->dirty_page_tracking) 2981 ret = vfio_iova_dirty_bitmap(range.bitmap.data, 2982 iommu, range.iova, 2983 range.size, 2984 range.bitmap.pgsize); 2985 else 2986 ret = -EINVAL; 2987 out_unlock: 2988 mutex_unlock(&iommu->lock); 2989 2990 return ret; 2991 } 2992 2993 return -EINVAL; 2994 } 2995 2996 static long vfio_iommu_type1_ioctl(void *iommu_data, 2997 unsigned int cmd, unsigned long arg) 2998 { 2999 struct vfio_iommu *iommu = iommu_data; 3000 3001 switch (cmd) { 3002 case VFIO_CHECK_EXTENSION: 3003 return vfio_iommu_type1_check_extension(iommu, arg); 3004 case VFIO_IOMMU_GET_INFO: 3005 return vfio_iommu_type1_get_info(iommu, arg); 3006 case VFIO_IOMMU_MAP_DMA: 3007 return vfio_iommu_type1_map_dma(iommu, arg); 3008 case VFIO_IOMMU_UNMAP_DMA: 3009 return vfio_iommu_type1_unmap_dma(iommu, arg); 3010 case VFIO_IOMMU_DIRTY_PAGES: 3011 return vfio_iommu_type1_dirty_pages(iommu, arg); 3012 default: 3013 return -ENOTTY; 3014 } 3015 } 3016 3017 static void vfio_iommu_type1_register_device(void *iommu_data, 3018 struct vfio_device *vdev) 3019 { 3020 struct vfio_iommu *iommu = iommu_data; 3021 3022 if (!vdev->ops->dma_unmap) 3023 return; 3024 3025 /* 3026 * list_empty(&iommu->device_list) is tested under the iommu->lock while 3027 * iteration for dma_unmap must be done under the device_list_lock. 3028 * Holding both locks here allows avoiding the device_list_lock in 3029 * several fast paths. See vfio_notify_dma_unmap() 3030 */ 3031 mutex_lock(&iommu->lock); 3032 mutex_lock(&iommu->device_list_lock); 3033 list_add(&vdev->iommu_entry, &iommu->device_list); 3034 mutex_unlock(&iommu->device_list_lock); 3035 mutex_unlock(&iommu->lock); 3036 } 3037 3038 static void vfio_iommu_type1_unregister_device(void *iommu_data, 3039 struct vfio_device *vdev) 3040 { 3041 struct vfio_iommu *iommu = iommu_data; 3042 3043 if (!vdev->ops->dma_unmap) 3044 return; 3045 3046 mutex_lock(&iommu->lock); 3047 mutex_lock(&iommu->device_list_lock); 3048 list_del(&vdev->iommu_entry); 3049 mutex_unlock(&iommu->device_list_lock); 3050 mutex_unlock(&iommu->lock); 3051 } 3052 3053 static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu, 3054 dma_addr_t user_iova, void *data, 3055 size_t count, bool write, 3056 size_t *copied) 3057 { 3058 struct mm_struct *mm; 3059 unsigned long vaddr; 3060 struct vfio_dma *dma; 3061 bool kthread = current->mm == NULL; 3062 size_t offset; 3063 3064 *copied = 0; 3065 3066 dma = vfio_find_dma(iommu, user_iova, 1); 3067 if (!dma) 3068 return -EINVAL; 3069 3070 if ((write && !(dma->prot & IOMMU_WRITE)) || 3071 !(dma->prot & IOMMU_READ)) 3072 return -EPERM; 3073 3074 mm = dma->mm; 3075 if (!mmget_not_zero(mm)) 3076 return -EPERM; 3077 3078 if (kthread) 3079 kthread_use_mm(mm); 3080 else if (current->mm != mm) 3081 goto out; 3082 3083 offset = user_iova - dma->iova; 3084 3085 if (count > dma->size - offset) 3086 count = dma->size - offset; 3087 3088 vaddr = dma->vaddr + offset; 3089 3090 if (write) { 3091 *copied = copy_to_user((void __user *)vaddr, data, 3092 count) ? 0 : count; 3093 if (*copied && iommu->dirty_page_tracking) { 3094 unsigned long pgshift = __ffs(iommu->pgsize_bitmap); 3095 /* 3096 * Bitmap populated with the smallest supported page 3097 * size 3098 */ 3099 bitmap_set(dma->bitmap, offset >> pgshift, 3100 ((offset + *copied - 1) >> pgshift) - 3101 (offset >> pgshift) + 1); 3102 } 3103 } else 3104 *copied = copy_from_user(data, (void __user *)vaddr, 3105 count) ? 0 : count; 3106 if (kthread) 3107 kthread_unuse_mm(mm); 3108 out: 3109 mmput(mm); 3110 return *copied ? 0 : -EFAULT; 3111 } 3112 3113 static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova, 3114 void *data, size_t count, bool write) 3115 { 3116 struct vfio_iommu *iommu = iommu_data; 3117 int ret = 0; 3118 size_t done; 3119 3120 mutex_lock(&iommu->lock); 3121 3122 if (WARN_ONCE(iommu->vaddr_invalid_count, 3123 "vfio_dma_rw not allowed with VFIO_UPDATE_VADDR\n")) { 3124 ret = -EBUSY; 3125 goto out; 3126 } 3127 3128 while (count > 0) { 3129 ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data, 3130 count, write, &done); 3131 if (ret) 3132 break; 3133 3134 count -= done; 3135 data += done; 3136 user_iova += done; 3137 } 3138 3139 out: 3140 mutex_unlock(&iommu->lock); 3141 return ret; 3142 } 3143 3144 static struct iommu_domain * 3145 vfio_iommu_type1_group_iommu_domain(void *iommu_data, 3146 struct iommu_group *iommu_group) 3147 { 3148 struct iommu_domain *domain = ERR_PTR(-ENODEV); 3149 struct vfio_iommu *iommu = iommu_data; 3150 struct vfio_domain *d; 3151 3152 if (!iommu || !iommu_group) 3153 return ERR_PTR(-EINVAL); 3154 3155 mutex_lock(&iommu->lock); 3156 list_for_each_entry(d, &iommu->domain_list, next) { 3157 if (find_iommu_group(d, iommu_group)) { 3158 domain = d->domain; 3159 break; 3160 } 3161 } 3162 mutex_unlock(&iommu->lock); 3163 3164 return domain; 3165 } 3166 3167 static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { 3168 .name = "vfio-iommu-type1", 3169 .owner = THIS_MODULE, 3170 .open = vfio_iommu_type1_open, 3171 .release = vfio_iommu_type1_release, 3172 .ioctl = vfio_iommu_type1_ioctl, 3173 .attach_group = vfio_iommu_type1_attach_group, 3174 .detach_group = vfio_iommu_type1_detach_group, 3175 .pin_pages = vfio_iommu_type1_pin_pages, 3176 .unpin_pages = vfio_iommu_type1_unpin_pages, 3177 .register_device = vfio_iommu_type1_register_device, 3178 .unregister_device = vfio_iommu_type1_unregister_device, 3179 .dma_rw = vfio_iommu_type1_dma_rw, 3180 .group_iommu_domain = vfio_iommu_type1_group_iommu_domain, 3181 }; 3182 3183 static int __init vfio_iommu_type1_init(void) 3184 { 3185 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1); 3186 } 3187 3188 static void __exit vfio_iommu_type1_cleanup(void) 3189 { 3190 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1); 3191 } 3192 3193 module_init(vfio_iommu_type1_init); 3194 module_exit(vfio_iommu_type1_cleanup); 3195 3196 MODULE_VERSION(DRIVER_VERSION); 3197 MODULE_LICENSE("GPL v2"); 3198 MODULE_AUTHOR(DRIVER_AUTHOR); 3199 MODULE_DESCRIPTION(DRIVER_DESC); 3200