1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3 *
4 * Kernel side components to support tools/testing/selftests/iommu
5 */
6 #include <linux/slab.h>
7 #include <linux/iommu.h>
8 #include <linux/xarray.h>
9 #include <linux/file.h>
10 #include <linux/anon_inodes.h>
11 #include <linux/fault-inject.h>
12 #include <linux/platform_device.h>
13 #include <uapi/linux/iommufd.h>
14
15 #include "../iommu-priv.h"
16 #include "io_pagetable.h"
17 #include "iommufd_private.h"
18 #include "iommufd_test.h"
19
20 static DECLARE_FAULT_ATTR(fail_iommufd);
21 static struct dentry *dbgfs_root;
22 static struct platform_device *selftest_iommu_dev;
23
24 size_t iommufd_test_memory_limit = 65536;
25
26 enum {
27 MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
28
29 /*
30 * Like a real page table alignment requires the low bits of the address
31 * to be zero. xarray also requires the high bit to be zero, so we store
32 * the pfns shifted. The upper bits are used for metadata.
33 */
34 MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE,
35
36 _MOCK_PFN_START = MOCK_PFN_MASK + 1,
37 MOCK_PFN_START_IOVA = _MOCK_PFN_START,
38 MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
39 };
40
41 /*
42 * Syzkaller has trouble randomizing the correct iova to use since it is linked
43 * to the map ioctl's output, and it has no ide about that. So, simplify things.
44 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
45 * value. This has a much smaller randomization space and syzkaller can hit it.
46 */
__iommufd_test_syz_conv_iova(struct io_pagetable * iopt,u64 * iova)47 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
48 u64 *iova)
49 {
50 struct syz_layout {
51 __u32 nth_area;
52 __u32 offset;
53 };
54 struct syz_layout *syz = (void *)iova;
55 unsigned int nth = syz->nth_area;
56 struct iopt_area *area;
57
58 down_read(&iopt->iova_rwsem);
59 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
60 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
61 if (nth == 0) {
62 up_read(&iopt->iova_rwsem);
63 return iopt_area_iova(area) + syz->offset;
64 }
65 nth--;
66 }
67 up_read(&iopt->iova_rwsem);
68
69 return 0;
70 }
71
iommufd_test_syz_conv_iova(struct iommufd_access * access,u64 * iova)72 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
73 u64 *iova)
74 {
75 unsigned long ret;
76
77 mutex_lock(&access->ioas_lock);
78 if (!access->ioas) {
79 mutex_unlock(&access->ioas_lock);
80 return 0;
81 }
82 ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
83 mutex_unlock(&access->ioas_lock);
84 return ret;
85 }
86
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)87 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
88 unsigned int ioas_id, u64 *iova, u32 *flags)
89 {
90 struct iommufd_ioas *ioas;
91
92 if (!(*flags & MOCK_FLAGS_ACCESS_SYZ))
93 return;
94 *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
95
96 ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
97 if (IS_ERR(ioas))
98 return;
99 *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
100 iommufd_put_object(&ioas->obj);
101 }
102
103 struct mock_iommu_domain {
104 struct iommu_domain domain;
105 struct xarray pfns;
106 };
107
108 enum selftest_obj_type {
109 TYPE_IDEV,
110 };
111
112 struct mock_dev {
113 struct device dev;
114 };
115
116 struct selftest_obj {
117 struct iommufd_object obj;
118 enum selftest_obj_type type;
119
120 union {
121 struct {
122 struct iommufd_device *idev;
123 struct iommufd_ctx *ictx;
124 struct mock_dev *mock_dev;
125 } idev;
126 };
127 };
128
mock_domain_blocking_free(struct iommu_domain * domain)129 static void mock_domain_blocking_free(struct iommu_domain *domain)
130 {
131 }
132
mock_domain_nop_attach(struct iommu_domain * domain,struct device * dev)133 static int mock_domain_nop_attach(struct iommu_domain *domain,
134 struct device *dev)
135 {
136 return 0;
137 }
138
139 static const struct iommu_domain_ops mock_blocking_ops = {
140 .free = mock_domain_blocking_free,
141 .attach_dev = mock_domain_nop_attach,
142 };
143
144 static struct iommu_domain mock_blocking_domain = {
145 .type = IOMMU_DOMAIN_BLOCKED,
146 .ops = &mock_blocking_ops,
147 };
148
mock_domain_hw_info(struct device * dev,u32 * length,u32 * type)149 static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type)
150 {
151 struct iommu_test_hw_info *info;
152
153 info = kzalloc(sizeof(*info), GFP_KERNEL);
154 if (!info)
155 return ERR_PTR(-ENOMEM);
156
157 info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
158 *length = sizeof(*info);
159 *type = IOMMU_HW_INFO_TYPE_SELFTEST;
160
161 return info;
162 }
163
mock_domain_alloc(unsigned int iommu_domain_type)164 static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type)
165 {
166 struct mock_iommu_domain *mock;
167
168 if (iommu_domain_type == IOMMU_DOMAIN_BLOCKED)
169 return &mock_blocking_domain;
170
171 if (iommu_domain_type != IOMMU_DOMAIN_UNMANAGED)
172 return NULL;
173
174 mock = kzalloc(sizeof(*mock), GFP_KERNEL);
175 if (!mock)
176 return NULL;
177 mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
178 mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
179 mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
180 xa_init(&mock->pfns);
181 return &mock->domain;
182 }
183
mock_domain_free(struct iommu_domain * domain)184 static void mock_domain_free(struct iommu_domain *domain)
185 {
186 struct mock_iommu_domain *mock =
187 container_of(domain, struct mock_iommu_domain, domain);
188
189 WARN_ON(!xa_empty(&mock->pfns));
190 kfree(mock);
191 }
192
mock_domain_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)193 static int mock_domain_map_pages(struct iommu_domain *domain,
194 unsigned long iova, phys_addr_t paddr,
195 size_t pgsize, size_t pgcount, int prot,
196 gfp_t gfp, size_t *mapped)
197 {
198 struct mock_iommu_domain *mock =
199 container_of(domain, struct mock_iommu_domain, domain);
200 unsigned long flags = MOCK_PFN_START_IOVA;
201 unsigned long start_iova = iova;
202
203 /*
204 * xarray does not reliably work with fault injection because it does a
205 * retry allocation, so put our own failure point.
206 */
207 if (iommufd_should_fail())
208 return -ENOENT;
209
210 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
211 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
212 for (; pgcount; pgcount--) {
213 size_t cur;
214
215 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
216 void *old;
217
218 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
219 flags = MOCK_PFN_LAST_IOVA;
220 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
221 xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
222 flags),
223 gfp);
224 if (xa_is_err(old)) {
225 for (; start_iova != iova;
226 start_iova += MOCK_IO_PAGE_SIZE)
227 xa_erase(&mock->pfns,
228 start_iova /
229 MOCK_IO_PAGE_SIZE);
230 return xa_err(old);
231 }
232 WARN_ON(old);
233 iova += MOCK_IO_PAGE_SIZE;
234 paddr += MOCK_IO_PAGE_SIZE;
235 *mapped += MOCK_IO_PAGE_SIZE;
236 flags = 0;
237 }
238 }
239 return 0;
240 }
241
mock_domain_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * iotlb_gather)242 static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
243 unsigned long iova, size_t pgsize,
244 size_t pgcount,
245 struct iommu_iotlb_gather *iotlb_gather)
246 {
247 struct mock_iommu_domain *mock =
248 container_of(domain, struct mock_iommu_domain, domain);
249 bool first = true;
250 size_t ret = 0;
251 void *ent;
252
253 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
254 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
255
256 for (; pgcount; pgcount--) {
257 size_t cur;
258
259 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
260 ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
261 WARN_ON(!ent);
262 /*
263 * iommufd generates unmaps that must be a strict
264 * superset of the map's performend So every starting
265 * IOVA should have been an iova passed to map, and the
266 *
267 * First IOVA must be present and have been a first IOVA
268 * passed to map_pages
269 */
270 if (first) {
271 WARN_ON(!(xa_to_value(ent) &
272 MOCK_PFN_START_IOVA));
273 first = false;
274 }
275 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
276 WARN_ON(!(xa_to_value(ent) &
277 MOCK_PFN_LAST_IOVA));
278
279 iova += MOCK_IO_PAGE_SIZE;
280 ret += MOCK_IO_PAGE_SIZE;
281 }
282 }
283 return ret;
284 }
285
mock_domain_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)286 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
287 dma_addr_t iova)
288 {
289 struct mock_iommu_domain *mock =
290 container_of(domain, struct mock_iommu_domain, domain);
291 void *ent;
292
293 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
294 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
295 WARN_ON(!ent);
296 return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
297 }
298
mock_domain_capable(struct device * dev,enum iommu_cap cap)299 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
300 {
301 return cap == IOMMU_CAP_CACHE_COHERENCY;
302 }
303
mock_domain_set_plaform_dma_ops(struct device * dev)304 static void mock_domain_set_plaform_dma_ops(struct device *dev)
305 {
306 /*
307 * mock doesn't setup default domains because we can't hook into the
308 * normal probe path
309 */
310 }
311
312 static struct iommu_device mock_iommu_device = {
313 };
314
mock_probe_device(struct device * dev)315 static struct iommu_device *mock_probe_device(struct device *dev)
316 {
317 return &mock_iommu_device;
318 }
319
320 static const struct iommu_ops mock_ops = {
321 .owner = THIS_MODULE,
322 .pgsize_bitmap = MOCK_IO_PAGE_SIZE,
323 .hw_info = mock_domain_hw_info,
324 .domain_alloc = mock_domain_alloc,
325 .capable = mock_domain_capable,
326 .set_platform_dma_ops = mock_domain_set_plaform_dma_ops,
327 .device_group = generic_device_group,
328 .probe_device = mock_probe_device,
329 .default_domain_ops =
330 &(struct iommu_domain_ops){
331 .free = mock_domain_free,
332 .attach_dev = mock_domain_nop_attach,
333 .map_pages = mock_domain_map_pages,
334 .unmap_pages = mock_domain_unmap_pages,
335 .iova_to_phys = mock_domain_iova_to_phys,
336 },
337 };
338
339 static inline struct iommufd_hw_pagetable *
get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain ** mock)340 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
341 struct mock_iommu_domain **mock)
342 {
343 struct iommufd_hw_pagetable *hwpt;
344 struct iommufd_object *obj;
345
346 obj = iommufd_get_object(ucmd->ictx, mockpt_id,
347 IOMMUFD_OBJ_HW_PAGETABLE);
348 if (IS_ERR(obj))
349 return ERR_CAST(obj);
350 hwpt = container_of(obj, struct iommufd_hw_pagetable, obj);
351 if (hwpt->domain->ops != mock_ops.default_domain_ops) {
352 iommufd_put_object(&hwpt->obj);
353 return ERR_PTR(-EINVAL);
354 }
355 *mock = container_of(hwpt->domain, struct mock_iommu_domain, domain);
356 return hwpt;
357 }
358
359 struct mock_bus_type {
360 struct bus_type bus;
361 struct notifier_block nb;
362 };
363
364 static struct mock_bus_type iommufd_mock_bus_type = {
365 .bus = {
366 .name = "iommufd_mock",
367 },
368 };
369
370 static atomic_t mock_dev_num;
371
mock_dev_release(struct device * dev)372 static void mock_dev_release(struct device *dev)
373 {
374 struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
375
376 atomic_dec(&mock_dev_num);
377 kfree(mdev);
378 }
379
mock_dev_create(void)380 static struct mock_dev *mock_dev_create(void)
381 {
382 struct mock_dev *mdev;
383 int rc;
384
385 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
386 if (!mdev)
387 return ERR_PTR(-ENOMEM);
388
389 device_initialize(&mdev->dev);
390 mdev->dev.release = mock_dev_release;
391 mdev->dev.bus = &iommufd_mock_bus_type.bus;
392
393 rc = dev_set_name(&mdev->dev, "iommufd_mock%u",
394 atomic_inc_return(&mock_dev_num));
395 if (rc)
396 goto err_put;
397
398 rc = device_add(&mdev->dev);
399 if (rc)
400 goto err_put;
401 return mdev;
402
403 err_put:
404 put_device(&mdev->dev);
405 return ERR_PTR(rc);
406 }
407
mock_dev_destroy(struct mock_dev * mdev)408 static void mock_dev_destroy(struct mock_dev *mdev)
409 {
410 device_unregister(&mdev->dev);
411 }
412
iommufd_selftest_is_mock_dev(struct device * dev)413 bool iommufd_selftest_is_mock_dev(struct device *dev)
414 {
415 return dev->release == mock_dev_release;
416 }
417
418 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
iommufd_test_mock_domain(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)419 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
420 struct iommu_test_cmd *cmd)
421 {
422 struct iommufd_device *idev;
423 struct selftest_obj *sobj;
424 u32 pt_id = cmd->id;
425 u32 idev_id;
426 int rc;
427
428 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
429 if (IS_ERR(sobj))
430 return PTR_ERR(sobj);
431
432 sobj->idev.ictx = ucmd->ictx;
433 sobj->type = TYPE_IDEV;
434
435 sobj->idev.mock_dev = mock_dev_create();
436 if (IS_ERR(sobj->idev.mock_dev)) {
437 rc = PTR_ERR(sobj->idev.mock_dev);
438 goto out_sobj;
439 }
440
441 idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
442 &idev_id);
443 if (IS_ERR(idev)) {
444 rc = PTR_ERR(idev);
445 goto out_mdev;
446 }
447 sobj->idev.idev = idev;
448
449 rc = iommufd_device_attach(idev, &pt_id);
450 if (rc)
451 goto out_unbind;
452
453 /* Userspace must destroy the device_id to destroy the object */
454 cmd->mock_domain.out_hwpt_id = pt_id;
455 cmd->mock_domain.out_stdev_id = sobj->obj.id;
456 cmd->mock_domain.out_idev_id = idev_id;
457 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
458 if (rc)
459 goto out_detach;
460 iommufd_object_finalize(ucmd->ictx, &sobj->obj);
461 return 0;
462
463 out_detach:
464 iommufd_device_detach(idev);
465 out_unbind:
466 iommufd_device_unbind(idev);
467 out_mdev:
468 mock_dev_destroy(sobj->idev.mock_dev);
469 out_sobj:
470 iommufd_object_abort(ucmd->ictx, &sobj->obj);
471 return rc;
472 }
473
474 /* Replace the mock domain with a manually allocated hw_pagetable */
iommufd_test_mock_domain_replace(struct iommufd_ucmd * ucmd,unsigned int device_id,u32 pt_id,struct iommu_test_cmd * cmd)475 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
476 unsigned int device_id, u32 pt_id,
477 struct iommu_test_cmd *cmd)
478 {
479 struct iommufd_object *dev_obj;
480 struct selftest_obj *sobj;
481 int rc;
482
483 /*
484 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
485 * it doesn't race with detach, which is not allowed.
486 */
487 dev_obj =
488 iommufd_get_object(ucmd->ictx, device_id, IOMMUFD_OBJ_SELFTEST);
489 if (IS_ERR(dev_obj))
490 return PTR_ERR(dev_obj);
491
492 sobj = container_of(dev_obj, struct selftest_obj, obj);
493 if (sobj->type != TYPE_IDEV) {
494 rc = -EINVAL;
495 goto out_dev_obj;
496 }
497
498 rc = iommufd_device_replace(sobj->idev.idev, &pt_id);
499 if (rc)
500 goto out_dev_obj;
501
502 cmd->mock_domain_replace.pt_id = pt_id;
503 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
504
505 out_dev_obj:
506 iommufd_put_object(dev_obj);
507 return rc;
508 }
509
510 /* Add an additional reserved IOVA to the IOAS */
iommufd_test_add_reserved(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long start,size_t length)511 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
512 unsigned int mockpt_id,
513 unsigned long start, size_t length)
514 {
515 struct iommufd_ioas *ioas;
516 int rc;
517
518 ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
519 if (IS_ERR(ioas))
520 return PTR_ERR(ioas);
521 down_write(&ioas->iopt.iova_rwsem);
522 rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
523 up_write(&ioas->iopt.iova_rwsem);
524 iommufd_put_object(&ioas->obj);
525 return rc;
526 }
527
528 /* Check that every pfn under each iova matches the pfn under a user VA */
iommufd_test_md_check_pa(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,void __user * uptr)529 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
530 unsigned int mockpt_id, unsigned long iova,
531 size_t length, void __user *uptr)
532 {
533 struct iommufd_hw_pagetable *hwpt;
534 struct mock_iommu_domain *mock;
535 uintptr_t end;
536 int rc;
537
538 if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
539 (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
540 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
541 return -EINVAL;
542
543 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
544 if (IS_ERR(hwpt))
545 return PTR_ERR(hwpt);
546
547 for (; length; length -= MOCK_IO_PAGE_SIZE) {
548 struct page *pages[1];
549 unsigned long pfn;
550 long npages;
551 void *ent;
552
553 npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
554 pages);
555 if (npages < 0) {
556 rc = npages;
557 goto out_put;
558 }
559 if (WARN_ON(npages != 1)) {
560 rc = -EFAULT;
561 goto out_put;
562 }
563 pfn = page_to_pfn(pages[0]);
564 put_page(pages[0]);
565
566 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
567 if (!ent ||
568 (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE !=
569 pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
570 rc = -EINVAL;
571 goto out_put;
572 }
573 iova += MOCK_IO_PAGE_SIZE;
574 uptr += MOCK_IO_PAGE_SIZE;
575 }
576 rc = 0;
577
578 out_put:
579 iommufd_put_object(&hwpt->obj);
580 return rc;
581 }
582
583 /* Check that the page ref count matches, to look for missing pin/unpins */
iommufd_test_md_check_refs(struct iommufd_ucmd * ucmd,void __user * uptr,size_t length,unsigned int refs)584 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
585 void __user *uptr, size_t length,
586 unsigned int refs)
587 {
588 uintptr_t end;
589
590 if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
591 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
592 return -EINVAL;
593
594 for (; length; length -= PAGE_SIZE) {
595 struct page *pages[1];
596 long npages;
597
598 npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages);
599 if (npages < 0)
600 return npages;
601 if (WARN_ON(npages != 1))
602 return -EFAULT;
603 if (!PageCompound(pages[0])) {
604 unsigned int count;
605
606 count = page_ref_count(pages[0]);
607 if (count / GUP_PIN_COUNTING_BIAS != refs) {
608 put_page(pages[0]);
609 return -EIO;
610 }
611 }
612 put_page(pages[0]);
613 uptr += PAGE_SIZE;
614 }
615 return 0;
616 }
617
618 struct selftest_access {
619 struct iommufd_access *access;
620 struct file *file;
621 struct mutex lock;
622 struct list_head items;
623 unsigned int next_id;
624 bool destroying;
625 };
626
627 struct selftest_access_item {
628 struct list_head items_elm;
629 unsigned long iova;
630 size_t length;
631 unsigned int id;
632 };
633
634 static const struct file_operations iommfd_test_staccess_fops;
635
iommufd_access_get(int fd)636 static struct selftest_access *iommufd_access_get(int fd)
637 {
638 struct file *file;
639
640 file = fget(fd);
641 if (!file)
642 return ERR_PTR(-EBADFD);
643
644 if (file->f_op != &iommfd_test_staccess_fops) {
645 fput(file);
646 return ERR_PTR(-EBADFD);
647 }
648 return file->private_data;
649 }
650
iommufd_test_access_unmap(void * data,unsigned long iova,unsigned long length)651 static void iommufd_test_access_unmap(void *data, unsigned long iova,
652 unsigned long length)
653 {
654 unsigned long iova_last = iova + length - 1;
655 struct selftest_access *staccess = data;
656 struct selftest_access_item *item;
657 struct selftest_access_item *tmp;
658
659 mutex_lock(&staccess->lock);
660 list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) {
661 if (iova > item->iova + item->length - 1 ||
662 iova_last < item->iova)
663 continue;
664 list_del(&item->items_elm);
665 iommufd_access_unpin_pages(staccess->access, item->iova,
666 item->length);
667 kfree(item);
668 }
669 mutex_unlock(&staccess->lock);
670 }
671
iommufd_test_access_item_destroy(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int item_id)672 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd,
673 unsigned int access_id,
674 unsigned int item_id)
675 {
676 struct selftest_access_item *item;
677 struct selftest_access *staccess;
678
679 staccess = iommufd_access_get(access_id);
680 if (IS_ERR(staccess))
681 return PTR_ERR(staccess);
682
683 mutex_lock(&staccess->lock);
684 list_for_each_entry(item, &staccess->items, items_elm) {
685 if (item->id == item_id) {
686 list_del(&item->items_elm);
687 iommufd_access_unpin_pages(staccess->access, item->iova,
688 item->length);
689 mutex_unlock(&staccess->lock);
690 kfree(item);
691 fput(staccess->file);
692 return 0;
693 }
694 }
695 mutex_unlock(&staccess->lock);
696 fput(staccess->file);
697 return -ENOENT;
698 }
699
iommufd_test_staccess_release(struct inode * inode,struct file * filep)700 static int iommufd_test_staccess_release(struct inode *inode,
701 struct file *filep)
702 {
703 struct selftest_access *staccess = filep->private_data;
704
705 if (staccess->access) {
706 iommufd_test_access_unmap(staccess, 0, ULONG_MAX);
707 iommufd_access_destroy(staccess->access);
708 }
709 mutex_destroy(&staccess->lock);
710 kfree(staccess);
711 return 0;
712 }
713
714 static const struct iommufd_access_ops selftest_access_ops_pin = {
715 .needs_pin_pages = 1,
716 .unmap = iommufd_test_access_unmap,
717 };
718
719 static const struct iommufd_access_ops selftest_access_ops = {
720 .unmap = iommufd_test_access_unmap,
721 };
722
723 static const struct file_operations iommfd_test_staccess_fops = {
724 .release = iommufd_test_staccess_release,
725 };
726
iommufd_test_alloc_access(void)727 static struct selftest_access *iommufd_test_alloc_access(void)
728 {
729 struct selftest_access *staccess;
730 struct file *filep;
731
732 staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT);
733 if (!staccess)
734 return ERR_PTR(-ENOMEM);
735 INIT_LIST_HEAD(&staccess->items);
736 mutex_init(&staccess->lock);
737
738 filep = anon_inode_getfile("[iommufd_test_staccess]",
739 &iommfd_test_staccess_fops, staccess,
740 O_RDWR);
741 if (IS_ERR(filep)) {
742 kfree(staccess);
743 return ERR_CAST(filep);
744 }
745 staccess->file = filep;
746 return staccess;
747 }
748
iommufd_test_create_access(struct iommufd_ucmd * ucmd,unsigned int ioas_id,unsigned int flags)749 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
750 unsigned int ioas_id, unsigned int flags)
751 {
752 struct iommu_test_cmd *cmd = ucmd->cmd;
753 struct selftest_access *staccess;
754 struct iommufd_access *access;
755 u32 id;
756 int fdno;
757 int rc;
758
759 if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)
760 return -EOPNOTSUPP;
761
762 staccess = iommufd_test_alloc_access();
763 if (IS_ERR(staccess))
764 return PTR_ERR(staccess);
765
766 fdno = get_unused_fd_flags(O_CLOEXEC);
767 if (fdno < 0) {
768 rc = -ENOMEM;
769 goto out_free_staccess;
770 }
771
772 access = iommufd_access_create(
773 ucmd->ictx,
774 (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
775 &selftest_access_ops_pin :
776 &selftest_access_ops,
777 staccess, &id);
778 if (IS_ERR(access)) {
779 rc = PTR_ERR(access);
780 goto out_put_fdno;
781 }
782 rc = iommufd_access_attach(access, ioas_id);
783 if (rc)
784 goto out_destroy;
785 cmd->create_access.out_access_fd = fdno;
786 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
787 if (rc)
788 goto out_destroy;
789
790 staccess->access = access;
791 fd_install(fdno, staccess->file);
792 return 0;
793
794 out_destroy:
795 iommufd_access_destroy(access);
796 out_put_fdno:
797 put_unused_fd(fdno);
798 out_free_staccess:
799 fput(staccess->file);
800 return rc;
801 }
802
iommufd_test_access_replace_ioas(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int ioas_id)803 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
804 unsigned int access_id,
805 unsigned int ioas_id)
806 {
807 struct selftest_access *staccess;
808 int rc;
809
810 staccess = iommufd_access_get(access_id);
811 if (IS_ERR(staccess))
812 return PTR_ERR(staccess);
813
814 rc = iommufd_access_replace(staccess->access, ioas_id);
815 fput(staccess->file);
816 return rc;
817 }
818
819 /* Check that the pages in a page array match the pages in the user VA */
iommufd_test_check_pages(void __user * uptr,struct page ** pages,size_t npages)820 static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
821 size_t npages)
822 {
823 for (; npages; npages--) {
824 struct page *tmp_pages[1];
825 long rc;
826
827 rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages);
828 if (rc < 0)
829 return rc;
830 if (WARN_ON(rc != 1))
831 return -EFAULT;
832 put_page(tmp_pages[0]);
833 if (tmp_pages[0] != *pages)
834 return -EBADE;
835 pages++;
836 uptr += PAGE_SIZE;
837 }
838 return 0;
839 }
840
iommufd_test_access_pages(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * uptr,u32 flags)841 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
842 unsigned int access_id, unsigned long iova,
843 size_t length, void __user *uptr,
844 u32 flags)
845 {
846 struct iommu_test_cmd *cmd = ucmd->cmd;
847 struct selftest_access_item *item;
848 struct selftest_access *staccess;
849 struct page **pages;
850 size_t npages;
851 int rc;
852
853 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
854 if (length > 16*1024*1024)
855 return -ENOMEM;
856
857 if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
858 return -EOPNOTSUPP;
859
860 staccess = iommufd_access_get(access_id);
861 if (IS_ERR(staccess))
862 return PTR_ERR(staccess);
863
864 if (staccess->access->ops != &selftest_access_ops_pin) {
865 rc = -EOPNOTSUPP;
866 goto out_put;
867 }
868
869 if (flags & MOCK_FLAGS_ACCESS_SYZ)
870 iova = iommufd_test_syz_conv_iova(staccess->access,
871 &cmd->access_pages.iova);
872
873 npages = (ALIGN(iova + length, PAGE_SIZE) -
874 ALIGN_DOWN(iova, PAGE_SIZE)) /
875 PAGE_SIZE;
876 pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT);
877 if (!pages) {
878 rc = -ENOMEM;
879 goto out_put;
880 }
881
882 /*
883 * Drivers will need to think very carefully about this locking. The
884 * core code can do multiple unmaps instantaneously after
885 * iommufd_access_pin_pages() and *all* the unmaps must not return until
886 * the range is unpinned. This simple implementation puts a global lock
887 * around the pin, which may not suit drivers that want this to be a
888 * performance path. drivers that get this wrong will trigger WARN_ON
889 * races and cause EDEADLOCK failures to userspace.
890 */
891 mutex_lock(&staccess->lock);
892 rc = iommufd_access_pin_pages(staccess->access, iova, length, pages,
893 flags & MOCK_FLAGS_ACCESS_WRITE);
894 if (rc)
895 goto out_unlock;
896
897 /* For syzkaller allow uptr to be NULL to skip this check */
898 if (uptr) {
899 rc = iommufd_test_check_pages(
900 uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
901 npages);
902 if (rc)
903 goto out_unaccess;
904 }
905
906 item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT);
907 if (!item) {
908 rc = -ENOMEM;
909 goto out_unaccess;
910 }
911
912 item->iova = iova;
913 item->length = length;
914 item->id = staccess->next_id++;
915 list_add_tail(&item->items_elm, &staccess->items);
916
917 cmd->access_pages.out_access_pages_id = item->id;
918 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
919 if (rc)
920 goto out_free_item;
921 goto out_unlock;
922
923 out_free_item:
924 list_del(&item->items_elm);
925 kfree(item);
926 out_unaccess:
927 iommufd_access_unpin_pages(staccess->access, iova, length);
928 out_unlock:
929 mutex_unlock(&staccess->lock);
930 kvfree(pages);
931 out_put:
932 fput(staccess->file);
933 return rc;
934 }
935
iommufd_test_access_rw(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * ubuf,unsigned int flags)936 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
937 unsigned int access_id, unsigned long iova,
938 size_t length, void __user *ubuf,
939 unsigned int flags)
940 {
941 struct iommu_test_cmd *cmd = ucmd->cmd;
942 struct selftest_access *staccess;
943 void *tmp;
944 int rc;
945
946 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
947 if (length > 16*1024*1024)
948 return -ENOMEM;
949
950 if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
951 MOCK_FLAGS_ACCESS_SYZ))
952 return -EOPNOTSUPP;
953
954 staccess = iommufd_access_get(access_id);
955 if (IS_ERR(staccess))
956 return PTR_ERR(staccess);
957
958 tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT);
959 if (!tmp) {
960 rc = -ENOMEM;
961 goto out_put;
962 }
963
964 if (flags & MOCK_ACCESS_RW_WRITE) {
965 if (copy_from_user(tmp, ubuf, length)) {
966 rc = -EFAULT;
967 goto out_free;
968 }
969 }
970
971 if (flags & MOCK_FLAGS_ACCESS_SYZ)
972 iova = iommufd_test_syz_conv_iova(staccess->access,
973 &cmd->access_rw.iova);
974
975 rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
976 if (rc)
977 goto out_free;
978 if (!(flags & MOCK_ACCESS_RW_WRITE)) {
979 if (copy_to_user(ubuf, tmp, length)) {
980 rc = -EFAULT;
981 goto out_free;
982 }
983 }
984
985 out_free:
986 kvfree(tmp);
987 out_put:
988 fput(staccess->file);
989 return rc;
990 }
991 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE);
992 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH ==
993 __IOMMUFD_ACCESS_RW_SLOW_PATH);
994
iommufd_selftest_destroy(struct iommufd_object * obj)995 void iommufd_selftest_destroy(struct iommufd_object *obj)
996 {
997 struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj);
998
999 switch (sobj->type) {
1000 case TYPE_IDEV:
1001 iommufd_device_detach(sobj->idev.idev);
1002 iommufd_device_unbind(sobj->idev.idev);
1003 mock_dev_destroy(sobj->idev.mock_dev);
1004 break;
1005 }
1006 }
1007
iommufd_test(struct iommufd_ucmd * ucmd)1008 int iommufd_test(struct iommufd_ucmd *ucmd)
1009 {
1010 struct iommu_test_cmd *cmd = ucmd->cmd;
1011
1012 switch (cmd->op) {
1013 case IOMMU_TEST_OP_ADD_RESERVED:
1014 return iommufd_test_add_reserved(ucmd, cmd->id,
1015 cmd->add_reserved.start,
1016 cmd->add_reserved.length);
1017 case IOMMU_TEST_OP_MOCK_DOMAIN:
1018 return iommufd_test_mock_domain(ucmd, cmd);
1019 case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
1020 return iommufd_test_mock_domain_replace(
1021 ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
1022 case IOMMU_TEST_OP_MD_CHECK_MAP:
1023 return iommufd_test_md_check_pa(
1024 ucmd, cmd->id, cmd->check_map.iova,
1025 cmd->check_map.length,
1026 u64_to_user_ptr(cmd->check_map.uptr));
1027 case IOMMU_TEST_OP_MD_CHECK_REFS:
1028 return iommufd_test_md_check_refs(
1029 ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
1030 cmd->check_refs.length, cmd->check_refs.refs);
1031 case IOMMU_TEST_OP_CREATE_ACCESS:
1032 return iommufd_test_create_access(ucmd, cmd->id,
1033 cmd->create_access.flags);
1034 case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
1035 return iommufd_test_access_replace_ioas(
1036 ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
1037 case IOMMU_TEST_OP_ACCESS_PAGES:
1038 return iommufd_test_access_pages(
1039 ucmd, cmd->id, cmd->access_pages.iova,
1040 cmd->access_pages.length,
1041 u64_to_user_ptr(cmd->access_pages.uptr),
1042 cmd->access_pages.flags);
1043 case IOMMU_TEST_OP_ACCESS_RW:
1044 return iommufd_test_access_rw(
1045 ucmd, cmd->id, cmd->access_rw.iova,
1046 cmd->access_rw.length,
1047 u64_to_user_ptr(cmd->access_rw.uptr),
1048 cmd->access_rw.flags);
1049 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES:
1050 return iommufd_test_access_item_destroy(
1051 ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id);
1052 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
1053 /* Protect _batch_init(), can not be less than elmsz */
1054 if (cmd->memory_limit.limit <
1055 sizeof(unsigned long) + sizeof(u32))
1056 return -EINVAL;
1057 iommufd_test_memory_limit = cmd->memory_limit.limit;
1058 return 0;
1059 default:
1060 return -EOPNOTSUPP;
1061 }
1062 }
1063
iommufd_should_fail(void)1064 bool iommufd_should_fail(void)
1065 {
1066 return should_fail(&fail_iommufd, 1);
1067 }
1068
iommufd_test_init(void)1069 int __init iommufd_test_init(void)
1070 {
1071 struct platform_device_info pdevinfo = {
1072 .name = "iommufd_selftest_iommu",
1073 };
1074 int rc;
1075
1076 dbgfs_root =
1077 fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
1078
1079 selftest_iommu_dev = platform_device_register_full(&pdevinfo);
1080 if (IS_ERR(selftest_iommu_dev)) {
1081 rc = PTR_ERR(selftest_iommu_dev);
1082 goto err_dbgfs;
1083 }
1084
1085 rc = bus_register(&iommufd_mock_bus_type.bus);
1086 if (rc)
1087 goto err_platform;
1088
1089 rc = iommu_device_sysfs_add(&mock_iommu_device,
1090 &selftest_iommu_dev->dev, NULL, "%s",
1091 dev_name(&selftest_iommu_dev->dev));
1092 if (rc)
1093 goto err_bus;
1094
1095 rc = iommu_device_register_bus(&mock_iommu_device, &mock_ops,
1096 &iommufd_mock_bus_type.bus,
1097 &iommufd_mock_bus_type.nb);
1098 if (rc)
1099 goto err_sysfs;
1100 return 0;
1101
1102 err_sysfs:
1103 iommu_device_sysfs_remove(&mock_iommu_device);
1104 err_bus:
1105 bus_unregister(&iommufd_mock_bus_type.bus);
1106 err_platform:
1107 platform_device_unregister(selftest_iommu_dev);
1108 err_dbgfs:
1109 debugfs_remove_recursive(dbgfs_root);
1110 return rc;
1111 }
1112
iommufd_test_exit(void)1113 void iommufd_test_exit(void)
1114 {
1115 iommu_device_sysfs_remove(&mock_iommu_device);
1116 iommu_device_unregister_bus(&mock_iommu_device,
1117 &iommufd_mock_bus_type.bus,
1118 &iommufd_mock_bus_type.nb);
1119 bus_unregister(&iommufd_mock_bus_type.bus);
1120 platform_device_unregister(selftest_iommu_dev);
1121 debugfs_remove_recursive(dbgfs_root);
1122 }
1123