1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. 3 * 4 * Kernel side components to support tools/testing/selftests/iommu 5 */ 6 #include <linux/slab.h> 7 #include <linux/iommu.h> 8 #include <linux/xarray.h> 9 #include <linux/file.h> 10 #include <linux/anon_inodes.h> 11 #include <linux/fault-inject.h> 12 #include <uapi/linux/iommufd.h> 13 14 #include "io_pagetable.h" 15 #include "iommufd_private.h" 16 #include "iommufd_test.h" 17 18 static DECLARE_FAULT_ATTR(fail_iommufd); 19 static struct dentry *dbgfs_root; 20 21 size_t iommufd_test_memory_limit = 65536; 22 23 enum { 24 MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2, 25 26 /* 27 * Like a real page table alignment requires the low bits of the address 28 * to be zero. xarray also requires the high bit to be zero, so we store 29 * the pfns shifted. The upper bits are used for metadata. 30 */ 31 MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE, 32 33 _MOCK_PFN_START = MOCK_PFN_MASK + 1, 34 MOCK_PFN_START_IOVA = _MOCK_PFN_START, 35 MOCK_PFN_LAST_IOVA = _MOCK_PFN_START, 36 }; 37 38 /* 39 * Syzkaller has trouble randomizing the correct iova to use since it is linked 40 * to the map ioctl's output, and it has no ide about that. So, simplify things. 41 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset 42 * value. This has a much smaller randomization space and syzkaller can hit it. 43 */ 44 static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt, 45 u64 *iova) 46 { 47 struct syz_layout { 48 __u32 nth_area; 49 __u32 offset; 50 }; 51 struct syz_layout *syz = (void *)iova; 52 unsigned int nth = syz->nth_area; 53 struct iopt_area *area; 54 55 down_read(&iopt->iova_rwsem); 56 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; 57 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { 58 if (nth == 0) { 59 up_read(&iopt->iova_rwsem); 60 return iopt_area_iova(area) + syz->offset; 61 } 62 nth--; 63 } 64 up_read(&iopt->iova_rwsem); 65 66 return 0; 67 } 68 69 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, 70 unsigned int ioas_id, u64 *iova, u32 *flags) 71 { 72 struct iommufd_ioas *ioas; 73 74 if (!(*flags & MOCK_FLAGS_ACCESS_SYZ)) 75 return; 76 *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ; 77 78 ioas = iommufd_get_ioas(ucmd->ictx, ioas_id); 79 if (IS_ERR(ioas)) 80 return; 81 *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova); 82 iommufd_put_object(&ioas->obj); 83 } 84 85 struct mock_iommu_domain { 86 struct iommu_domain domain; 87 struct xarray pfns; 88 }; 89 90 enum selftest_obj_type { 91 TYPE_IDEV, 92 }; 93 94 struct mock_dev { 95 struct device dev; 96 }; 97 98 struct selftest_obj { 99 struct iommufd_object obj; 100 enum selftest_obj_type type; 101 102 union { 103 struct { 104 struct iommufd_device *idev; 105 struct iommufd_ctx *ictx; 106 struct mock_dev *mock_dev; 107 } idev; 108 }; 109 }; 110 111 static void mock_domain_blocking_free(struct iommu_domain *domain) 112 { 113 } 114 115 static int mock_domain_nop_attach(struct iommu_domain *domain, 116 struct device *dev) 117 { 118 return 0; 119 } 120 121 static const struct iommu_domain_ops mock_blocking_ops = { 122 .free = mock_domain_blocking_free, 123 .attach_dev = mock_domain_nop_attach, 124 }; 125 126 static struct iommu_domain mock_blocking_domain = { 127 .type = IOMMU_DOMAIN_BLOCKED, 128 .ops = &mock_blocking_ops, 129 }; 130 131 static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) 132 { 133 struct mock_iommu_domain *mock; 134 135 if (iommu_domain_type == IOMMU_DOMAIN_BLOCKED) 136 return &mock_blocking_domain; 137 138 if (WARN_ON(iommu_domain_type != IOMMU_DOMAIN_UNMANAGED)) 139 return NULL; 140 141 mock = kzalloc(sizeof(*mock), GFP_KERNEL); 142 if (!mock) 143 return NULL; 144 mock->domain.geometry.aperture_start = MOCK_APERTURE_START; 145 mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST; 146 mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE; 147 xa_init(&mock->pfns); 148 return &mock->domain; 149 } 150 151 static void mock_domain_free(struct iommu_domain *domain) 152 { 153 struct mock_iommu_domain *mock = 154 container_of(domain, struct mock_iommu_domain, domain); 155 156 WARN_ON(!xa_empty(&mock->pfns)); 157 kfree(mock); 158 } 159 160 static int mock_domain_map_pages(struct iommu_domain *domain, 161 unsigned long iova, phys_addr_t paddr, 162 size_t pgsize, size_t pgcount, int prot, 163 gfp_t gfp, size_t *mapped) 164 { 165 struct mock_iommu_domain *mock = 166 container_of(domain, struct mock_iommu_domain, domain); 167 unsigned long flags = MOCK_PFN_START_IOVA; 168 unsigned long start_iova = iova; 169 170 /* 171 * xarray does not reliably work with fault injection because it does a 172 * retry allocation, so put our own failure point. 173 */ 174 if (iommufd_should_fail()) 175 return -ENOENT; 176 177 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 178 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); 179 for (; pgcount; pgcount--) { 180 size_t cur; 181 182 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { 183 void *old; 184 185 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) 186 flags = MOCK_PFN_LAST_IOVA; 187 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE, 188 xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) | 189 flags), 190 gfp); 191 if (xa_is_err(old)) { 192 for (; start_iova != iova; 193 start_iova += MOCK_IO_PAGE_SIZE) 194 xa_erase(&mock->pfns, 195 start_iova / 196 MOCK_IO_PAGE_SIZE); 197 return xa_err(old); 198 } 199 WARN_ON(old); 200 iova += MOCK_IO_PAGE_SIZE; 201 paddr += MOCK_IO_PAGE_SIZE; 202 *mapped += MOCK_IO_PAGE_SIZE; 203 flags = 0; 204 } 205 } 206 return 0; 207 } 208 209 static size_t mock_domain_unmap_pages(struct iommu_domain *domain, 210 unsigned long iova, size_t pgsize, 211 size_t pgcount, 212 struct iommu_iotlb_gather *iotlb_gather) 213 { 214 struct mock_iommu_domain *mock = 215 container_of(domain, struct mock_iommu_domain, domain); 216 bool first = true; 217 size_t ret = 0; 218 void *ent; 219 220 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 221 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); 222 223 for (; pgcount; pgcount--) { 224 size_t cur; 225 226 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { 227 ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 228 WARN_ON(!ent); 229 /* 230 * iommufd generates unmaps that must be a strict 231 * superset of the map's performend So every starting 232 * IOVA should have been an iova passed to map, and the 233 * 234 * First IOVA must be present and have been a first IOVA 235 * passed to map_pages 236 */ 237 if (first) { 238 WARN_ON(!(xa_to_value(ent) & 239 MOCK_PFN_START_IOVA)); 240 first = false; 241 } 242 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) 243 WARN_ON(!(xa_to_value(ent) & 244 MOCK_PFN_LAST_IOVA)); 245 246 iova += MOCK_IO_PAGE_SIZE; 247 ret += MOCK_IO_PAGE_SIZE; 248 } 249 } 250 return ret; 251 } 252 253 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain, 254 dma_addr_t iova) 255 { 256 struct mock_iommu_domain *mock = 257 container_of(domain, struct mock_iommu_domain, domain); 258 void *ent; 259 260 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 261 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 262 WARN_ON(!ent); 263 return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE; 264 } 265 266 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap) 267 { 268 return cap == IOMMU_CAP_CACHE_COHERENCY; 269 } 270 271 static void mock_domain_set_plaform_dma_ops(struct device *dev) 272 { 273 /* 274 * mock doesn't setup default domains because we can't hook into the 275 * normal probe path 276 */ 277 } 278 279 static const struct iommu_ops mock_ops = { 280 .owner = THIS_MODULE, 281 .pgsize_bitmap = MOCK_IO_PAGE_SIZE, 282 .domain_alloc = mock_domain_alloc, 283 .capable = mock_domain_capable, 284 .set_platform_dma_ops = mock_domain_set_plaform_dma_ops, 285 .default_domain_ops = 286 &(struct iommu_domain_ops){ 287 .free = mock_domain_free, 288 .attach_dev = mock_domain_nop_attach, 289 .map_pages = mock_domain_map_pages, 290 .unmap_pages = mock_domain_unmap_pages, 291 .iova_to_phys = mock_domain_iova_to_phys, 292 }, 293 }; 294 295 static struct iommu_device mock_iommu_device = { 296 .ops = &mock_ops, 297 }; 298 299 static inline struct iommufd_hw_pagetable * 300 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, 301 struct mock_iommu_domain **mock) 302 { 303 struct iommufd_hw_pagetable *hwpt; 304 struct iommufd_object *obj; 305 306 obj = iommufd_get_object(ucmd->ictx, mockpt_id, 307 IOMMUFD_OBJ_HW_PAGETABLE); 308 if (IS_ERR(obj)) 309 return ERR_CAST(obj); 310 hwpt = container_of(obj, struct iommufd_hw_pagetable, obj); 311 if (hwpt->domain->ops != mock_ops.default_domain_ops) { 312 iommufd_put_object(&hwpt->obj); 313 return ERR_PTR(-EINVAL); 314 } 315 *mock = container_of(hwpt->domain, struct mock_iommu_domain, domain); 316 return hwpt; 317 } 318 319 static struct bus_type iommufd_mock_bus_type = { 320 .name = "iommufd_mock", 321 .iommu_ops = &mock_ops, 322 }; 323 324 static void mock_dev_release(struct device *dev) 325 { 326 struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); 327 328 kfree(mdev); 329 } 330 331 static struct mock_dev *mock_dev_create(void) 332 { 333 struct iommu_group *iommu_group; 334 struct dev_iommu *dev_iommu; 335 struct mock_dev *mdev; 336 int rc; 337 338 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); 339 if (!mdev) 340 return ERR_PTR(-ENOMEM); 341 342 device_initialize(&mdev->dev); 343 mdev->dev.release = mock_dev_release; 344 mdev->dev.bus = &iommufd_mock_bus_type; 345 346 iommu_group = iommu_group_alloc(); 347 if (IS_ERR(iommu_group)) { 348 rc = PTR_ERR(iommu_group); 349 goto err_put; 350 } 351 352 rc = dev_set_name(&mdev->dev, "iommufd_mock%u", 353 iommu_group_id(iommu_group)); 354 if (rc) 355 goto err_group; 356 357 /* 358 * The iommu core has no way to associate a single device with an iommu 359 * driver (heck currently it can't even support two iommu_drivers 360 * registering). Hack it together with an open coded dev_iommu_get(). 361 * Notice that the normal notifier triggered iommu release process also 362 * does not work here because this bus is not in iommu_buses. 363 */ 364 mdev->dev.iommu = kzalloc(sizeof(*dev_iommu), GFP_KERNEL); 365 if (!mdev->dev.iommu) { 366 rc = -ENOMEM; 367 goto err_group; 368 } 369 mutex_init(&mdev->dev.iommu->lock); 370 mdev->dev.iommu->iommu_dev = &mock_iommu_device; 371 372 rc = device_add(&mdev->dev); 373 if (rc) 374 goto err_dev_iommu; 375 376 rc = iommu_group_add_device(iommu_group, &mdev->dev); 377 if (rc) 378 goto err_del; 379 iommu_group_put(iommu_group); 380 return mdev; 381 382 err_del: 383 device_del(&mdev->dev); 384 err_dev_iommu: 385 kfree(mdev->dev.iommu); 386 mdev->dev.iommu = NULL; 387 err_group: 388 iommu_group_put(iommu_group); 389 err_put: 390 put_device(&mdev->dev); 391 return ERR_PTR(rc); 392 } 393 394 static void mock_dev_destroy(struct mock_dev *mdev) 395 { 396 iommu_group_remove_device(&mdev->dev); 397 device_del(&mdev->dev); 398 kfree(mdev->dev.iommu); 399 mdev->dev.iommu = NULL; 400 put_device(&mdev->dev); 401 } 402 403 bool iommufd_selftest_is_mock_dev(struct device *dev) 404 { 405 return dev->release == mock_dev_release; 406 } 407 408 /* Create an hw_pagetable with the mock domain so we can test the domain ops */ 409 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, 410 struct iommu_test_cmd *cmd) 411 { 412 struct iommufd_device *idev; 413 struct selftest_obj *sobj; 414 u32 pt_id = cmd->id; 415 u32 idev_id; 416 int rc; 417 418 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST); 419 if (IS_ERR(sobj)) 420 return PTR_ERR(sobj); 421 422 sobj->idev.ictx = ucmd->ictx; 423 sobj->type = TYPE_IDEV; 424 425 sobj->idev.mock_dev = mock_dev_create(); 426 if (IS_ERR(sobj->idev.mock_dev)) { 427 rc = PTR_ERR(sobj->idev.mock_dev); 428 goto out_sobj; 429 } 430 431 idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev, 432 &idev_id); 433 if (IS_ERR(idev)) { 434 rc = PTR_ERR(idev); 435 goto out_mdev; 436 } 437 sobj->idev.idev = idev; 438 439 rc = iommufd_device_attach(idev, &pt_id); 440 if (rc) 441 goto out_unbind; 442 443 /* Userspace must destroy the device_id to destroy the object */ 444 cmd->mock_domain.out_hwpt_id = pt_id; 445 cmd->mock_domain.out_stdev_id = sobj->obj.id; 446 iommufd_object_finalize(ucmd->ictx, &sobj->obj); 447 return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 448 449 out_unbind: 450 iommufd_device_unbind(idev); 451 out_mdev: 452 mock_dev_destroy(sobj->idev.mock_dev); 453 out_sobj: 454 iommufd_object_abort(ucmd->ictx, &sobj->obj); 455 return rc; 456 } 457 458 /* Add an additional reserved IOVA to the IOAS */ 459 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd, 460 unsigned int mockpt_id, 461 unsigned long start, size_t length) 462 { 463 struct iommufd_ioas *ioas; 464 int rc; 465 466 ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id); 467 if (IS_ERR(ioas)) 468 return PTR_ERR(ioas); 469 down_write(&ioas->iopt.iova_rwsem); 470 rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL); 471 up_write(&ioas->iopt.iova_rwsem); 472 iommufd_put_object(&ioas->obj); 473 return rc; 474 } 475 476 /* Check that every pfn under each iova matches the pfn under a user VA */ 477 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd, 478 unsigned int mockpt_id, unsigned long iova, 479 size_t length, void __user *uptr) 480 { 481 struct iommufd_hw_pagetable *hwpt; 482 struct mock_iommu_domain *mock; 483 uintptr_t end; 484 int rc; 485 486 if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE || 487 (uintptr_t)uptr % MOCK_IO_PAGE_SIZE || 488 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end)) 489 return -EINVAL; 490 491 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); 492 if (IS_ERR(hwpt)) 493 return PTR_ERR(hwpt); 494 495 for (; length; length -= MOCK_IO_PAGE_SIZE) { 496 struct page *pages[1]; 497 unsigned long pfn; 498 long npages; 499 void *ent; 500 501 npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0, 502 pages); 503 if (npages < 0) { 504 rc = npages; 505 goto out_put; 506 } 507 if (WARN_ON(npages != 1)) { 508 rc = -EFAULT; 509 goto out_put; 510 } 511 pfn = page_to_pfn(pages[0]); 512 put_page(pages[0]); 513 514 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 515 if (!ent || 516 (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE != 517 pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) { 518 rc = -EINVAL; 519 goto out_put; 520 } 521 iova += MOCK_IO_PAGE_SIZE; 522 uptr += MOCK_IO_PAGE_SIZE; 523 } 524 rc = 0; 525 526 out_put: 527 iommufd_put_object(&hwpt->obj); 528 return rc; 529 } 530 531 /* Check that the page ref count matches, to look for missing pin/unpins */ 532 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd, 533 void __user *uptr, size_t length, 534 unsigned int refs) 535 { 536 uintptr_t end; 537 538 if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE || 539 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end)) 540 return -EINVAL; 541 542 for (; length; length -= PAGE_SIZE) { 543 struct page *pages[1]; 544 long npages; 545 546 npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages); 547 if (npages < 0) 548 return npages; 549 if (WARN_ON(npages != 1)) 550 return -EFAULT; 551 if (!PageCompound(pages[0])) { 552 unsigned int count; 553 554 count = page_ref_count(pages[0]); 555 if (count / GUP_PIN_COUNTING_BIAS != refs) { 556 put_page(pages[0]); 557 return -EIO; 558 } 559 } 560 put_page(pages[0]); 561 uptr += PAGE_SIZE; 562 } 563 return 0; 564 } 565 566 struct selftest_access { 567 struct iommufd_access *access; 568 struct file *file; 569 struct mutex lock; 570 struct list_head items; 571 unsigned int next_id; 572 bool destroying; 573 }; 574 575 struct selftest_access_item { 576 struct list_head items_elm; 577 unsigned long iova; 578 size_t length; 579 unsigned int id; 580 }; 581 582 static const struct file_operations iommfd_test_staccess_fops; 583 584 static struct selftest_access *iommufd_access_get(int fd) 585 { 586 struct file *file; 587 588 file = fget(fd); 589 if (!file) 590 return ERR_PTR(-EBADFD); 591 592 if (file->f_op != &iommfd_test_staccess_fops) { 593 fput(file); 594 return ERR_PTR(-EBADFD); 595 } 596 return file->private_data; 597 } 598 599 static void iommufd_test_access_unmap(void *data, unsigned long iova, 600 unsigned long length) 601 { 602 unsigned long iova_last = iova + length - 1; 603 struct selftest_access *staccess = data; 604 struct selftest_access_item *item; 605 struct selftest_access_item *tmp; 606 607 mutex_lock(&staccess->lock); 608 list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) { 609 if (iova > item->iova + item->length - 1 || 610 iova_last < item->iova) 611 continue; 612 list_del(&item->items_elm); 613 iommufd_access_unpin_pages(staccess->access, item->iova, 614 item->length); 615 kfree(item); 616 } 617 mutex_unlock(&staccess->lock); 618 } 619 620 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd, 621 unsigned int access_id, 622 unsigned int item_id) 623 { 624 struct selftest_access_item *item; 625 struct selftest_access *staccess; 626 627 staccess = iommufd_access_get(access_id); 628 if (IS_ERR(staccess)) 629 return PTR_ERR(staccess); 630 631 mutex_lock(&staccess->lock); 632 list_for_each_entry(item, &staccess->items, items_elm) { 633 if (item->id == item_id) { 634 list_del(&item->items_elm); 635 iommufd_access_unpin_pages(staccess->access, item->iova, 636 item->length); 637 mutex_unlock(&staccess->lock); 638 kfree(item); 639 fput(staccess->file); 640 return 0; 641 } 642 } 643 mutex_unlock(&staccess->lock); 644 fput(staccess->file); 645 return -ENOENT; 646 } 647 648 static int iommufd_test_staccess_release(struct inode *inode, 649 struct file *filep) 650 { 651 struct selftest_access *staccess = filep->private_data; 652 653 if (staccess->access) { 654 iommufd_test_access_unmap(staccess, 0, ULONG_MAX); 655 iommufd_access_destroy(staccess->access); 656 } 657 mutex_destroy(&staccess->lock); 658 kfree(staccess); 659 return 0; 660 } 661 662 static const struct iommufd_access_ops selftest_access_ops_pin = { 663 .needs_pin_pages = 1, 664 .unmap = iommufd_test_access_unmap, 665 }; 666 667 static const struct iommufd_access_ops selftest_access_ops = { 668 .unmap = iommufd_test_access_unmap, 669 }; 670 671 static const struct file_operations iommfd_test_staccess_fops = { 672 .release = iommufd_test_staccess_release, 673 }; 674 675 static struct selftest_access *iommufd_test_alloc_access(void) 676 { 677 struct selftest_access *staccess; 678 struct file *filep; 679 680 staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT); 681 if (!staccess) 682 return ERR_PTR(-ENOMEM); 683 INIT_LIST_HEAD(&staccess->items); 684 mutex_init(&staccess->lock); 685 686 filep = anon_inode_getfile("[iommufd_test_staccess]", 687 &iommfd_test_staccess_fops, staccess, 688 O_RDWR); 689 if (IS_ERR(filep)) { 690 kfree(staccess); 691 return ERR_CAST(filep); 692 } 693 staccess->file = filep; 694 return staccess; 695 } 696 697 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd, 698 unsigned int ioas_id, unsigned int flags) 699 { 700 struct iommu_test_cmd *cmd = ucmd->cmd; 701 struct selftest_access *staccess; 702 struct iommufd_access *access; 703 u32 id; 704 int fdno; 705 int rc; 706 707 if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) 708 return -EOPNOTSUPP; 709 710 staccess = iommufd_test_alloc_access(); 711 if (IS_ERR(staccess)) 712 return PTR_ERR(staccess); 713 714 fdno = get_unused_fd_flags(O_CLOEXEC); 715 if (fdno < 0) { 716 rc = -ENOMEM; 717 goto out_free_staccess; 718 } 719 720 access = iommufd_access_create( 721 ucmd->ictx, 722 (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ? 723 &selftest_access_ops_pin : 724 &selftest_access_ops, 725 staccess, &id); 726 if (IS_ERR(access)) { 727 rc = PTR_ERR(access); 728 goto out_put_fdno; 729 } 730 rc = iommufd_access_attach(access, ioas_id); 731 if (rc) 732 goto out_destroy; 733 cmd->create_access.out_access_fd = fdno; 734 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 735 if (rc) 736 goto out_destroy; 737 738 staccess->access = access; 739 fd_install(fdno, staccess->file); 740 return 0; 741 742 out_destroy: 743 iommufd_access_destroy(access); 744 out_put_fdno: 745 put_unused_fd(fdno); 746 out_free_staccess: 747 fput(staccess->file); 748 return rc; 749 } 750 751 /* Check that the pages in a page array match the pages in the user VA */ 752 static int iommufd_test_check_pages(void __user *uptr, struct page **pages, 753 size_t npages) 754 { 755 for (; npages; npages--) { 756 struct page *tmp_pages[1]; 757 long rc; 758 759 rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages); 760 if (rc < 0) 761 return rc; 762 if (WARN_ON(rc != 1)) 763 return -EFAULT; 764 put_page(tmp_pages[0]); 765 if (tmp_pages[0] != *pages) 766 return -EBADE; 767 pages++; 768 uptr += PAGE_SIZE; 769 } 770 return 0; 771 } 772 773 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd, 774 unsigned int access_id, unsigned long iova, 775 size_t length, void __user *uptr, 776 u32 flags) 777 { 778 struct iommu_test_cmd *cmd = ucmd->cmd; 779 struct selftest_access_item *item; 780 struct selftest_access *staccess; 781 struct page **pages; 782 size_t npages; 783 int rc; 784 785 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */ 786 if (length > 16*1024*1024) 787 return -ENOMEM; 788 789 if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ)) 790 return -EOPNOTSUPP; 791 792 staccess = iommufd_access_get(access_id); 793 if (IS_ERR(staccess)) 794 return PTR_ERR(staccess); 795 796 if (staccess->access->ops != &selftest_access_ops_pin) { 797 rc = -EOPNOTSUPP; 798 goto out_put; 799 } 800 801 if (flags & MOCK_FLAGS_ACCESS_SYZ) 802 iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt, 803 &cmd->access_pages.iova); 804 805 npages = (ALIGN(iova + length, PAGE_SIZE) - 806 ALIGN_DOWN(iova, PAGE_SIZE)) / 807 PAGE_SIZE; 808 pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT); 809 if (!pages) { 810 rc = -ENOMEM; 811 goto out_put; 812 } 813 814 /* 815 * Drivers will need to think very carefully about this locking. The 816 * core code can do multiple unmaps instantaneously after 817 * iommufd_access_pin_pages() and *all* the unmaps must not return until 818 * the range is unpinned. This simple implementation puts a global lock 819 * around the pin, which may not suit drivers that want this to be a 820 * performance path. drivers that get this wrong will trigger WARN_ON 821 * races and cause EDEADLOCK failures to userspace. 822 */ 823 mutex_lock(&staccess->lock); 824 rc = iommufd_access_pin_pages(staccess->access, iova, length, pages, 825 flags & MOCK_FLAGS_ACCESS_WRITE); 826 if (rc) 827 goto out_unlock; 828 829 /* For syzkaller allow uptr to be NULL to skip this check */ 830 if (uptr) { 831 rc = iommufd_test_check_pages( 832 uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages, 833 npages); 834 if (rc) 835 goto out_unaccess; 836 } 837 838 item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT); 839 if (!item) { 840 rc = -ENOMEM; 841 goto out_unaccess; 842 } 843 844 item->iova = iova; 845 item->length = length; 846 item->id = staccess->next_id++; 847 list_add_tail(&item->items_elm, &staccess->items); 848 849 cmd->access_pages.out_access_pages_id = item->id; 850 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 851 if (rc) 852 goto out_free_item; 853 goto out_unlock; 854 855 out_free_item: 856 list_del(&item->items_elm); 857 kfree(item); 858 out_unaccess: 859 iommufd_access_unpin_pages(staccess->access, iova, length); 860 out_unlock: 861 mutex_unlock(&staccess->lock); 862 kvfree(pages); 863 out_put: 864 fput(staccess->file); 865 return rc; 866 } 867 868 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd, 869 unsigned int access_id, unsigned long iova, 870 size_t length, void __user *ubuf, 871 unsigned int flags) 872 { 873 struct iommu_test_cmd *cmd = ucmd->cmd; 874 struct selftest_access *staccess; 875 void *tmp; 876 int rc; 877 878 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */ 879 if (length > 16*1024*1024) 880 return -ENOMEM; 881 882 if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH | 883 MOCK_FLAGS_ACCESS_SYZ)) 884 return -EOPNOTSUPP; 885 886 staccess = iommufd_access_get(access_id); 887 if (IS_ERR(staccess)) 888 return PTR_ERR(staccess); 889 890 tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT); 891 if (!tmp) { 892 rc = -ENOMEM; 893 goto out_put; 894 } 895 896 if (flags & MOCK_ACCESS_RW_WRITE) { 897 if (copy_from_user(tmp, ubuf, length)) { 898 rc = -EFAULT; 899 goto out_free; 900 } 901 } 902 903 if (flags & MOCK_FLAGS_ACCESS_SYZ) 904 iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt, 905 &cmd->access_rw.iova); 906 907 rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags); 908 if (rc) 909 goto out_free; 910 if (!(flags & MOCK_ACCESS_RW_WRITE)) { 911 if (copy_to_user(ubuf, tmp, length)) { 912 rc = -EFAULT; 913 goto out_free; 914 } 915 } 916 917 out_free: 918 kvfree(tmp); 919 out_put: 920 fput(staccess->file); 921 return rc; 922 } 923 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE); 924 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH == 925 __IOMMUFD_ACCESS_RW_SLOW_PATH); 926 927 void iommufd_selftest_destroy(struct iommufd_object *obj) 928 { 929 struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj); 930 931 switch (sobj->type) { 932 case TYPE_IDEV: 933 iommufd_device_detach(sobj->idev.idev); 934 iommufd_device_unbind(sobj->idev.idev); 935 mock_dev_destroy(sobj->idev.mock_dev); 936 break; 937 } 938 } 939 940 int iommufd_test(struct iommufd_ucmd *ucmd) 941 { 942 struct iommu_test_cmd *cmd = ucmd->cmd; 943 944 switch (cmd->op) { 945 case IOMMU_TEST_OP_ADD_RESERVED: 946 return iommufd_test_add_reserved(ucmd, cmd->id, 947 cmd->add_reserved.start, 948 cmd->add_reserved.length); 949 case IOMMU_TEST_OP_MOCK_DOMAIN: 950 return iommufd_test_mock_domain(ucmd, cmd); 951 case IOMMU_TEST_OP_MD_CHECK_MAP: 952 return iommufd_test_md_check_pa( 953 ucmd, cmd->id, cmd->check_map.iova, 954 cmd->check_map.length, 955 u64_to_user_ptr(cmd->check_map.uptr)); 956 case IOMMU_TEST_OP_MD_CHECK_REFS: 957 return iommufd_test_md_check_refs( 958 ucmd, u64_to_user_ptr(cmd->check_refs.uptr), 959 cmd->check_refs.length, cmd->check_refs.refs); 960 case IOMMU_TEST_OP_CREATE_ACCESS: 961 return iommufd_test_create_access(ucmd, cmd->id, 962 cmd->create_access.flags); 963 case IOMMU_TEST_OP_ACCESS_PAGES: 964 return iommufd_test_access_pages( 965 ucmd, cmd->id, cmd->access_pages.iova, 966 cmd->access_pages.length, 967 u64_to_user_ptr(cmd->access_pages.uptr), 968 cmd->access_pages.flags); 969 case IOMMU_TEST_OP_ACCESS_RW: 970 return iommufd_test_access_rw( 971 ucmd, cmd->id, cmd->access_rw.iova, 972 cmd->access_rw.length, 973 u64_to_user_ptr(cmd->access_rw.uptr), 974 cmd->access_rw.flags); 975 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES: 976 return iommufd_test_access_item_destroy( 977 ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id); 978 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT: 979 /* Protect _batch_init(), can not be less than elmsz */ 980 if (cmd->memory_limit.limit < 981 sizeof(unsigned long) + sizeof(u32)) 982 return -EINVAL; 983 iommufd_test_memory_limit = cmd->memory_limit.limit; 984 return 0; 985 default: 986 return -EOPNOTSUPP; 987 } 988 } 989 990 bool iommufd_should_fail(void) 991 { 992 return should_fail(&fail_iommufd, 1); 993 } 994 995 void __init iommufd_test_init(void) 996 { 997 dbgfs_root = 998 fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd); 999 WARN_ON(bus_register(&iommufd_mock_bus_type)); 1000 } 1001 1002 void iommufd_test_exit(void) 1003 { 1004 debugfs_remove_recursive(dbgfs_root); 1005 bus_unregister(&iommufd_mock_bus_type); 1006 } 1007