1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. 3 * 4 * Kernel side components to support tools/testing/selftests/iommu 5 */ 6 #include <linux/slab.h> 7 #include <linux/iommu.h> 8 #include <linux/xarray.h> 9 #include <linux/file.h> 10 #include <linux/anon_inodes.h> 11 #include <linux/fault-inject.h> 12 #include <uapi/linux/iommufd.h> 13 14 #include "io_pagetable.h" 15 #include "iommufd_private.h" 16 #include "iommufd_test.h" 17 18 static DECLARE_FAULT_ATTR(fail_iommufd); 19 static struct dentry *dbgfs_root; 20 21 size_t iommufd_test_memory_limit = 65536; 22 23 enum { 24 MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2, 25 26 /* 27 * Like a real page table alignment requires the low bits of the address 28 * to be zero. xarray also requires the high bit to be zero, so we store 29 * the pfns shifted. The upper bits are used for metadata. 30 */ 31 MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE, 32 33 _MOCK_PFN_START = MOCK_PFN_MASK + 1, 34 MOCK_PFN_START_IOVA = _MOCK_PFN_START, 35 MOCK_PFN_LAST_IOVA = _MOCK_PFN_START, 36 }; 37 38 /* 39 * Syzkaller has trouble randomizing the correct iova to use since it is linked 40 * to the map ioctl's output, and it has no ide about that. So, simplify things. 41 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset 42 * value. This has a much smaller randomization space and syzkaller can hit it. 43 */ 44 static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt, 45 u64 *iova) 46 { 47 struct syz_layout { 48 __u32 nth_area; 49 __u32 offset; 50 }; 51 struct syz_layout *syz = (void *)iova; 52 unsigned int nth = syz->nth_area; 53 struct iopt_area *area; 54 55 down_read(&iopt->iova_rwsem); 56 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; 57 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { 58 if (nth == 0) { 59 up_read(&iopt->iova_rwsem); 60 return iopt_area_iova(area) + syz->offset; 61 } 62 nth--; 63 } 64 up_read(&iopt->iova_rwsem); 65 66 return 0; 67 } 68 69 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, 70 unsigned int ioas_id, u64 *iova, u32 *flags) 71 { 72 struct iommufd_ioas *ioas; 73 74 if (!(*flags & MOCK_FLAGS_ACCESS_SYZ)) 75 return; 76 *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ; 77 78 ioas = iommufd_get_ioas(ucmd, ioas_id); 79 if (IS_ERR(ioas)) 80 return; 81 *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova); 82 iommufd_put_object(&ioas->obj); 83 } 84 85 struct mock_iommu_domain { 86 struct iommu_domain domain; 87 struct xarray pfns; 88 }; 89 90 enum selftest_obj_type { 91 TYPE_IDEV, 92 }; 93 94 struct selftest_obj { 95 struct iommufd_object obj; 96 enum selftest_obj_type type; 97 98 union { 99 struct { 100 struct iommufd_hw_pagetable *hwpt; 101 struct iommufd_ctx *ictx; 102 struct device mock_dev; 103 } idev; 104 }; 105 }; 106 107 static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) 108 { 109 struct mock_iommu_domain *mock; 110 111 if (WARN_ON(iommu_domain_type != IOMMU_DOMAIN_UNMANAGED)) 112 return NULL; 113 114 mock = kzalloc(sizeof(*mock), GFP_KERNEL); 115 if (!mock) 116 return NULL; 117 mock->domain.geometry.aperture_start = MOCK_APERTURE_START; 118 mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST; 119 mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE; 120 xa_init(&mock->pfns); 121 return &mock->domain; 122 } 123 124 static void mock_domain_free(struct iommu_domain *domain) 125 { 126 struct mock_iommu_domain *mock = 127 container_of(domain, struct mock_iommu_domain, domain); 128 129 WARN_ON(!xa_empty(&mock->pfns)); 130 kfree(mock); 131 } 132 133 static int mock_domain_map_pages(struct iommu_domain *domain, 134 unsigned long iova, phys_addr_t paddr, 135 size_t pgsize, size_t pgcount, int prot, 136 gfp_t gfp, size_t *mapped) 137 { 138 struct mock_iommu_domain *mock = 139 container_of(domain, struct mock_iommu_domain, domain); 140 unsigned long flags = MOCK_PFN_START_IOVA; 141 unsigned long start_iova = iova; 142 143 /* 144 * xarray does not reliably work with fault injection because it does a 145 * retry allocation, so put our own failure point. 146 */ 147 if (iommufd_should_fail()) 148 return -ENOENT; 149 150 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 151 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); 152 for (; pgcount; pgcount--) { 153 size_t cur; 154 155 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { 156 void *old; 157 158 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) 159 flags = MOCK_PFN_LAST_IOVA; 160 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE, 161 xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) | 162 flags), 163 gfp); 164 if (xa_is_err(old)) { 165 for (; start_iova != iova; 166 start_iova += MOCK_IO_PAGE_SIZE) 167 xa_erase(&mock->pfns, 168 start_iova / 169 MOCK_IO_PAGE_SIZE); 170 return xa_err(old); 171 } 172 WARN_ON(old); 173 iova += MOCK_IO_PAGE_SIZE; 174 paddr += MOCK_IO_PAGE_SIZE; 175 *mapped += MOCK_IO_PAGE_SIZE; 176 flags = 0; 177 } 178 } 179 return 0; 180 } 181 182 static size_t mock_domain_unmap_pages(struct iommu_domain *domain, 183 unsigned long iova, size_t pgsize, 184 size_t pgcount, 185 struct iommu_iotlb_gather *iotlb_gather) 186 { 187 struct mock_iommu_domain *mock = 188 container_of(domain, struct mock_iommu_domain, domain); 189 bool first = true; 190 size_t ret = 0; 191 void *ent; 192 193 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 194 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); 195 196 for (; pgcount; pgcount--) { 197 size_t cur; 198 199 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { 200 ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 201 WARN_ON(!ent); 202 /* 203 * iommufd generates unmaps that must be a strict 204 * superset of the map's performend So every starting 205 * IOVA should have been an iova passed to map, and the 206 * 207 * First IOVA must be present and have been a first IOVA 208 * passed to map_pages 209 */ 210 if (first) { 211 WARN_ON(!(xa_to_value(ent) & 212 MOCK_PFN_START_IOVA)); 213 first = false; 214 } 215 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) 216 WARN_ON(!(xa_to_value(ent) & 217 MOCK_PFN_LAST_IOVA)); 218 219 iova += MOCK_IO_PAGE_SIZE; 220 ret += MOCK_IO_PAGE_SIZE; 221 } 222 } 223 return ret; 224 } 225 226 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain, 227 dma_addr_t iova) 228 { 229 struct mock_iommu_domain *mock = 230 container_of(domain, struct mock_iommu_domain, domain); 231 void *ent; 232 233 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 234 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 235 WARN_ON(!ent); 236 return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE; 237 } 238 239 static const struct iommu_ops mock_ops = { 240 .owner = THIS_MODULE, 241 .pgsize_bitmap = MOCK_IO_PAGE_SIZE, 242 .domain_alloc = mock_domain_alloc, 243 .default_domain_ops = 244 &(struct iommu_domain_ops){ 245 .free = mock_domain_free, 246 .map_pages = mock_domain_map_pages, 247 .unmap_pages = mock_domain_unmap_pages, 248 .iova_to_phys = mock_domain_iova_to_phys, 249 }, 250 }; 251 252 static inline struct iommufd_hw_pagetable * 253 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, 254 struct mock_iommu_domain **mock) 255 { 256 struct iommufd_hw_pagetable *hwpt; 257 struct iommufd_object *obj; 258 259 obj = iommufd_get_object(ucmd->ictx, mockpt_id, 260 IOMMUFD_OBJ_HW_PAGETABLE); 261 if (IS_ERR(obj)) 262 return ERR_CAST(obj); 263 hwpt = container_of(obj, struct iommufd_hw_pagetable, obj); 264 if (hwpt->domain->ops != mock_ops.default_domain_ops) { 265 iommufd_put_object(&hwpt->obj); 266 return ERR_PTR(-EINVAL); 267 } 268 *mock = container_of(hwpt->domain, struct mock_iommu_domain, domain); 269 return hwpt; 270 } 271 272 /* Create an hw_pagetable with the mock domain so we can test the domain ops */ 273 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, 274 struct iommu_test_cmd *cmd) 275 { 276 static struct bus_type mock_bus = { .iommu_ops = &mock_ops }; 277 struct iommufd_hw_pagetable *hwpt; 278 struct selftest_obj *sobj; 279 struct iommufd_ioas *ioas; 280 int rc; 281 282 ioas = iommufd_get_ioas(ucmd, cmd->id); 283 if (IS_ERR(ioas)) 284 return PTR_ERR(ioas); 285 286 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST); 287 if (IS_ERR(sobj)) { 288 rc = PTR_ERR(sobj); 289 goto out_ioas; 290 } 291 sobj->idev.ictx = ucmd->ictx; 292 sobj->type = TYPE_IDEV; 293 sobj->idev.mock_dev.bus = &mock_bus; 294 295 hwpt = iommufd_device_selftest_attach(ucmd->ictx, ioas, 296 &sobj->idev.mock_dev); 297 if (IS_ERR(hwpt)) { 298 rc = PTR_ERR(hwpt); 299 goto out_sobj; 300 } 301 sobj->idev.hwpt = hwpt; 302 303 /* Userspace must destroy both of these IDs to destroy the object */ 304 cmd->mock_domain.out_hwpt_id = hwpt->obj.id; 305 cmd->mock_domain.out_device_id = sobj->obj.id; 306 iommufd_object_finalize(ucmd->ictx, &sobj->obj); 307 iommufd_put_object(&ioas->obj); 308 return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 309 310 out_sobj: 311 iommufd_object_abort(ucmd->ictx, &sobj->obj); 312 out_ioas: 313 iommufd_put_object(&ioas->obj); 314 return rc; 315 } 316 317 /* Add an additional reserved IOVA to the IOAS */ 318 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd, 319 unsigned int mockpt_id, 320 unsigned long start, size_t length) 321 { 322 struct iommufd_ioas *ioas; 323 int rc; 324 325 ioas = iommufd_get_ioas(ucmd, mockpt_id); 326 if (IS_ERR(ioas)) 327 return PTR_ERR(ioas); 328 down_write(&ioas->iopt.iova_rwsem); 329 rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL); 330 up_write(&ioas->iopt.iova_rwsem); 331 iommufd_put_object(&ioas->obj); 332 return rc; 333 } 334 335 /* Check that every pfn under each iova matches the pfn under a user VA */ 336 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd, 337 unsigned int mockpt_id, unsigned long iova, 338 size_t length, void __user *uptr) 339 { 340 struct iommufd_hw_pagetable *hwpt; 341 struct mock_iommu_domain *mock; 342 int rc; 343 344 if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE || 345 (uintptr_t)uptr % MOCK_IO_PAGE_SIZE) 346 return -EINVAL; 347 348 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); 349 if (IS_ERR(hwpt)) 350 return PTR_ERR(hwpt); 351 352 for (; length; length -= MOCK_IO_PAGE_SIZE) { 353 struct page *pages[1]; 354 unsigned long pfn; 355 long npages; 356 void *ent; 357 358 npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0, 359 pages); 360 if (npages < 0) { 361 rc = npages; 362 goto out_put; 363 } 364 if (WARN_ON(npages != 1)) { 365 rc = -EFAULT; 366 goto out_put; 367 } 368 pfn = page_to_pfn(pages[0]); 369 put_page(pages[0]); 370 371 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 372 if (!ent || 373 (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE != 374 pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) { 375 rc = -EINVAL; 376 goto out_put; 377 } 378 iova += MOCK_IO_PAGE_SIZE; 379 uptr += MOCK_IO_PAGE_SIZE; 380 } 381 rc = 0; 382 383 out_put: 384 iommufd_put_object(&hwpt->obj); 385 return rc; 386 } 387 388 /* Check that the page ref count matches, to look for missing pin/unpins */ 389 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd, 390 void __user *uptr, size_t length, 391 unsigned int refs) 392 { 393 if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE) 394 return -EINVAL; 395 396 for (; length; length -= PAGE_SIZE) { 397 struct page *pages[1]; 398 long npages; 399 400 npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages); 401 if (npages < 0) 402 return npages; 403 if (WARN_ON(npages != 1)) 404 return -EFAULT; 405 if (!PageCompound(pages[0])) { 406 unsigned int count; 407 408 count = page_ref_count(pages[0]); 409 if (count / GUP_PIN_COUNTING_BIAS != refs) { 410 put_page(pages[0]); 411 return -EIO; 412 } 413 } 414 put_page(pages[0]); 415 uptr += PAGE_SIZE; 416 } 417 return 0; 418 } 419 420 struct selftest_access { 421 struct iommufd_access *access; 422 struct file *file; 423 struct mutex lock; 424 struct list_head items; 425 unsigned int next_id; 426 bool destroying; 427 }; 428 429 struct selftest_access_item { 430 struct list_head items_elm; 431 unsigned long iova; 432 size_t length; 433 unsigned int id; 434 }; 435 436 static const struct file_operations iommfd_test_staccess_fops; 437 438 static struct selftest_access *iommufd_access_get(int fd) 439 { 440 struct file *file; 441 442 file = fget(fd); 443 if (!file) 444 return ERR_PTR(-EBADFD); 445 446 if (file->f_op != &iommfd_test_staccess_fops) { 447 fput(file); 448 return ERR_PTR(-EBADFD); 449 } 450 return file->private_data; 451 } 452 453 static void iommufd_test_access_unmap(void *data, unsigned long iova, 454 unsigned long length) 455 { 456 unsigned long iova_last = iova + length - 1; 457 struct selftest_access *staccess = data; 458 struct selftest_access_item *item; 459 struct selftest_access_item *tmp; 460 461 mutex_lock(&staccess->lock); 462 list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) { 463 if (iova > item->iova + item->length - 1 || 464 iova_last < item->iova) 465 continue; 466 list_del(&item->items_elm); 467 iommufd_access_unpin_pages(staccess->access, item->iova, 468 item->length); 469 kfree(item); 470 } 471 mutex_unlock(&staccess->lock); 472 } 473 474 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd, 475 unsigned int access_id, 476 unsigned int item_id) 477 { 478 struct selftest_access_item *item; 479 struct selftest_access *staccess; 480 481 staccess = iommufd_access_get(access_id); 482 if (IS_ERR(staccess)) 483 return PTR_ERR(staccess); 484 485 mutex_lock(&staccess->lock); 486 list_for_each_entry(item, &staccess->items, items_elm) { 487 if (item->id == item_id) { 488 list_del(&item->items_elm); 489 iommufd_access_unpin_pages(staccess->access, item->iova, 490 item->length); 491 mutex_unlock(&staccess->lock); 492 kfree(item); 493 fput(staccess->file); 494 return 0; 495 } 496 } 497 mutex_unlock(&staccess->lock); 498 fput(staccess->file); 499 return -ENOENT; 500 } 501 502 static int iommufd_test_staccess_release(struct inode *inode, 503 struct file *filep) 504 { 505 struct selftest_access *staccess = filep->private_data; 506 507 if (staccess->access) { 508 iommufd_test_access_unmap(staccess, 0, ULONG_MAX); 509 iommufd_access_destroy(staccess->access); 510 } 511 mutex_destroy(&staccess->lock); 512 kfree(staccess); 513 return 0; 514 } 515 516 static const struct iommufd_access_ops selftest_access_ops_pin = { 517 .needs_pin_pages = 1, 518 .unmap = iommufd_test_access_unmap, 519 }; 520 521 static const struct iommufd_access_ops selftest_access_ops = { 522 .unmap = iommufd_test_access_unmap, 523 }; 524 525 static const struct file_operations iommfd_test_staccess_fops = { 526 .release = iommufd_test_staccess_release, 527 }; 528 529 static struct selftest_access *iommufd_test_alloc_access(void) 530 { 531 struct selftest_access *staccess; 532 struct file *filep; 533 534 staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT); 535 if (!staccess) 536 return ERR_PTR(-ENOMEM); 537 INIT_LIST_HEAD(&staccess->items); 538 mutex_init(&staccess->lock); 539 540 filep = anon_inode_getfile("[iommufd_test_staccess]", 541 &iommfd_test_staccess_fops, staccess, 542 O_RDWR); 543 if (IS_ERR(filep)) { 544 kfree(staccess); 545 return ERR_CAST(filep); 546 } 547 staccess->file = filep; 548 return staccess; 549 } 550 551 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd, 552 unsigned int ioas_id, unsigned int flags) 553 { 554 struct iommu_test_cmd *cmd = ucmd->cmd; 555 struct selftest_access *staccess; 556 struct iommufd_access *access; 557 int fdno; 558 int rc; 559 560 if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) 561 return -EOPNOTSUPP; 562 563 staccess = iommufd_test_alloc_access(); 564 if (IS_ERR(staccess)) 565 return PTR_ERR(staccess); 566 567 fdno = get_unused_fd_flags(O_CLOEXEC); 568 if (fdno < 0) { 569 rc = -ENOMEM; 570 goto out_free_staccess; 571 } 572 573 access = iommufd_access_create( 574 ucmd->ictx, ioas_id, 575 (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ? 576 &selftest_access_ops_pin : 577 &selftest_access_ops, 578 staccess); 579 if (IS_ERR(access)) { 580 rc = PTR_ERR(access); 581 goto out_put_fdno; 582 } 583 cmd->create_access.out_access_fd = fdno; 584 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 585 if (rc) 586 goto out_destroy; 587 588 staccess->access = access; 589 fd_install(fdno, staccess->file); 590 return 0; 591 592 out_destroy: 593 iommufd_access_destroy(access); 594 out_put_fdno: 595 put_unused_fd(fdno); 596 out_free_staccess: 597 fput(staccess->file); 598 return rc; 599 } 600 601 /* Check that the pages in a page array match the pages in the user VA */ 602 static int iommufd_test_check_pages(void __user *uptr, struct page **pages, 603 size_t npages) 604 { 605 for (; npages; npages--) { 606 struct page *tmp_pages[1]; 607 long rc; 608 609 rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages); 610 if (rc < 0) 611 return rc; 612 if (WARN_ON(rc != 1)) 613 return -EFAULT; 614 put_page(tmp_pages[0]); 615 if (tmp_pages[0] != *pages) 616 return -EBADE; 617 pages++; 618 uptr += PAGE_SIZE; 619 } 620 return 0; 621 } 622 623 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd, 624 unsigned int access_id, unsigned long iova, 625 size_t length, void __user *uptr, 626 u32 flags) 627 { 628 struct iommu_test_cmd *cmd = ucmd->cmd; 629 struct selftest_access_item *item; 630 struct selftest_access *staccess; 631 struct page **pages; 632 size_t npages; 633 int rc; 634 635 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */ 636 if (length > 16*1024*1024) 637 return -ENOMEM; 638 639 if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ)) 640 return -EOPNOTSUPP; 641 642 staccess = iommufd_access_get(access_id); 643 if (IS_ERR(staccess)) 644 return PTR_ERR(staccess); 645 646 if (staccess->access->ops != &selftest_access_ops_pin) { 647 rc = -EOPNOTSUPP; 648 goto out_put; 649 } 650 651 if (flags & MOCK_FLAGS_ACCESS_SYZ) 652 iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt, 653 &cmd->access_pages.iova); 654 655 npages = (ALIGN(iova + length, PAGE_SIZE) - 656 ALIGN_DOWN(iova, PAGE_SIZE)) / 657 PAGE_SIZE; 658 pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT); 659 if (!pages) { 660 rc = -ENOMEM; 661 goto out_put; 662 } 663 664 /* 665 * Drivers will need to think very carefully about this locking. The 666 * core code can do multiple unmaps instantaneously after 667 * iommufd_access_pin_pages() and *all* the unmaps must not return until 668 * the range is unpinned. This simple implementation puts a global lock 669 * around the pin, which may not suit drivers that want this to be a 670 * performance path. drivers that get this wrong will trigger WARN_ON 671 * races and cause EDEADLOCK failures to userspace. 672 */ 673 mutex_lock(&staccess->lock); 674 rc = iommufd_access_pin_pages(staccess->access, iova, length, pages, 675 flags & MOCK_FLAGS_ACCESS_WRITE); 676 if (rc) 677 goto out_unlock; 678 679 /* For syzkaller allow uptr to be NULL to skip this check */ 680 if (uptr) { 681 rc = iommufd_test_check_pages( 682 uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages, 683 npages); 684 if (rc) 685 goto out_unaccess; 686 } 687 688 item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT); 689 if (!item) { 690 rc = -ENOMEM; 691 goto out_unaccess; 692 } 693 694 item->iova = iova; 695 item->length = length; 696 item->id = staccess->next_id++; 697 list_add_tail(&item->items_elm, &staccess->items); 698 699 cmd->access_pages.out_access_pages_id = item->id; 700 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 701 if (rc) 702 goto out_free_item; 703 goto out_unlock; 704 705 out_free_item: 706 list_del(&item->items_elm); 707 kfree(item); 708 out_unaccess: 709 iommufd_access_unpin_pages(staccess->access, iova, length); 710 out_unlock: 711 mutex_unlock(&staccess->lock); 712 kvfree(pages); 713 out_put: 714 fput(staccess->file); 715 return rc; 716 } 717 718 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd, 719 unsigned int access_id, unsigned long iova, 720 size_t length, void __user *ubuf, 721 unsigned int flags) 722 { 723 struct iommu_test_cmd *cmd = ucmd->cmd; 724 struct selftest_access *staccess; 725 void *tmp; 726 int rc; 727 728 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */ 729 if (length > 16*1024*1024) 730 return -ENOMEM; 731 732 if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH | 733 MOCK_FLAGS_ACCESS_SYZ)) 734 return -EOPNOTSUPP; 735 736 staccess = iommufd_access_get(access_id); 737 if (IS_ERR(staccess)) 738 return PTR_ERR(staccess); 739 740 tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT); 741 if (!tmp) { 742 rc = -ENOMEM; 743 goto out_put; 744 } 745 746 if (flags & MOCK_ACCESS_RW_WRITE) { 747 if (copy_from_user(tmp, ubuf, length)) { 748 rc = -EFAULT; 749 goto out_free; 750 } 751 } 752 753 if (flags & MOCK_FLAGS_ACCESS_SYZ) 754 iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt, 755 &cmd->access_rw.iova); 756 757 rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags); 758 if (rc) 759 goto out_free; 760 if (!(flags & MOCK_ACCESS_RW_WRITE)) { 761 if (copy_to_user(ubuf, tmp, length)) { 762 rc = -EFAULT; 763 goto out_free; 764 } 765 } 766 767 out_free: 768 kvfree(tmp); 769 out_put: 770 fput(staccess->file); 771 return rc; 772 } 773 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE); 774 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH == 775 __IOMMUFD_ACCESS_RW_SLOW_PATH); 776 777 void iommufd_selftest_destroy(struct iommufd_object *obj) 778 { 779 struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj); 780 781 switch (sobj->type) { 782 case TYPE_IDEV: 783 iommufd_device_selftest_detach(sobj->idev.ictx, 784 sobj->idev.hwpt); 785 break; 786 } 787 } 788 789 int iommufd_test(struct iommufd_ucmd *ucmd) 790 { 791 struct iommu_test_cmd *cmd = ucmd->cmd; 792 793 switch (cmd->op) { 794 case IOMMU_TEST_OP_ADD_RESERVED: 795 return iommufd_test_add_reserved(ucmd, cmd->id, 796 cmd->add_reserved.start, 797 cmd->add_reserved.length); 798 case IOMMU_TEST_OP_MOCK_DOMAIN: 799 return iommufd_test_mock_domain(ucmd, cmd); 800 case IOMMU_TEST_OP_MD_CHECK_MAP: 801 return iommufd_test_md_check_pa( 802 ucmd, cmd->id, cmd->check_map.iova, 803 cmd->check_map.length, 804 u64_to_user_ptr(cmd->check_map.uptr)); 805 case IOMMU_TEST_OP_MD_CHECK_REFS: 806 return iommufd_test_md_check_refs( 807 ucmd, u64_to_user_ptr(cmd->check_refs.uptr), 808 cmd->check_refs.length, cmd->check_refs.refs); 809 case IOMMU_TEST_OP_CREATE_ACCESS: 810 return iommufd_test_create_access(ucmd, cmd->id, 811 cmd->create_access.flags); 812 case IOMMU_TEST_OP_ACCESS_PAGES: 813 return iommufd_test_access_pages( 814 ucmd, cmd->id, cmd->access_pages.iova, 815 cmd->access_pages.length, 816 u64_to_user_ptr(cmd->access_pages.uptr), 817 cmd->access_pages.flags); 818 case IOMMU_TEST_OP_ACCESS_RW: 819 return iommufd_test_access_rw( 820 ucmd, cmd->id, cmd->access_rw.iova, 821 cmd->access_rw.length, 822 u64_to_user_ptr(cmd->access_rw.uptr), 823 cmd->access_rw.flags); 824 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES: 825 return iommufd_test_access_item_destroy( 826 ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id); 827 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT: 828 /* Protect _batch_init(), can not be less than elmsz */ 829 if (cmd->memory_limit.limit < 830 sizeof(unsigned long) + sizeof(u32)) 831 return -EINVAL; 832 iommufd_test_memory_limit = cmd->memory_limit.limit; 833 return 0; 834 default: 835 return -EOPNOTSUPP; 836 } 837 } 838 839 bool iommufd_should_fail(void) 840 { 841 return should_fail(&fail_iommufd, 1); 842 } 843 844 void __init iommufd_test_init(void) 845 { 846 dbgfs_root = 847 fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd); 848 } 849 850 void iommufd_test_exit(void) 851 { 852 debugfs_remove_recursive(dbgfs_root); 853 } 854