1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <stdlib.h>
4 #include <sys/mman.h>
5 #include <sys/eventfd.h>
6 
7 #define __EXPORTED_HEADERS__
8 #include <linux/vfio.h>
9 
10 #include "iommufd_utils.h"
11 
12 static void *buffer;
13 
14 static unsigned long PAGE_SIZE;
15 static unsigned long HUGEPAGE_SIZE;
16 
17 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
18 
19 static unsigned long get_huge_page_size(void)
20 {
21 	char buf[80];
22 	int ret;
23 	int fd;
24 
25 	fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
26 		  O_RDONLY);
27 	if (fd < 0)
28 		return 2 * 1024 * 1024;
29 
30 	ret = read(fd, buf, sizeof(buf));
31 	close(fd);
32 	if (ret <= 0 || ret == sizeof(buf))
33 		return 2 * 1024 * 1024;
34 	buf[ret] = 0;
35 	return strtoul(buf, NULL, 10);
36 }
37 
38 static __attribute__((constructor)) void setup_sizes(void)
39 {
40 	void *vrc;
41 	int rc;
42 
43 	PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
44 	HUGEPAGE_SIZE = get_huge_page_size();
45 
46 	BUFFER_SIZE = PAGE_SIZE * 16;
47 	rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
48 	assert(!rc);
49 	assert(buffer);
50 	assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
51 	vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
52 		   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
53 	assert(vrc == buffer);
54 }
55 
56 FIXTURE(iommufd)
57 {
58 	int fd;
59 };
60 
61 FIXTURE_SETUP(iommufd)
62 {
63 	self->fd = open("/dev/iommu", O_RDWR);
64 	ASSERT_NE(-1, self->fd);
65 }
66 
67 FIXTURE_TEARDOWN(iommufd)
68 {
69 	teardown_iommufd(self->fd, _metadata);
70 }
71 
72 TEST_F(iommufd, simple_close)
73 {
74 }
75 
76 TEST_F(iommufd, cmd_fail)
77 {
78 	struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
79 
80 	/* object id is invalid */
81 	EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
82 	/* Bad pointer */
83 	EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
84 	/* Unknown ioctl */
85 	EXPECT_ERRNO(ENOTTY,
86 		     ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
87 			   &cmd));
88 }
89 
90 TEST_F(iommufd, cmd_length)
91 {
92 #define TEST_LENGTH(_struct, _ioctl)                                     \
93 	{                                                                \
94 		struct {                                                 \
95 			struct _struct cmd;                              \
96 			uint8_t extra;                                   \
97 		} cmd = { .cmd = { .size = sizeof(struct _struct) - 1 }, \
98 			  .extra = UINT8_MAX };                          \
99 		int old_errno;                                           \
100 		int rc;                                                  \
101 									 \
102 		EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd));     \
103 		cmd.cmd.size = sizeof(struct _struct) + 1;               \
104 		EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd));      \
105 		cmd.cmd.size = sizeof(struct _struct);                   \
106 		rc = ioctl(self->fd, _ioctl, &cmd);                      \
107 		old_errno = errno;                                       \
108 		cmd.cmd.size = sizeof(struct _struct) + 1;               \
109 		cmd.extra = 0;                                           \
110 		if (rc) {                                                \
111 			EXPECT_ERRNO(old_errno,                          \
112 				     ioctl(self->fd, _ioctl, &cmd));     \
113 		} else {                                                 \
114 			ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd));     \
115 		}                                                        \
116 	}
117 
118 	TEST_LENGTH(iommu_destroy, IOMMU_DESTROY);
119 	TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC);
120 	TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES);
121 	TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS);
122 	TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP);
123 	TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY);
124 	TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP);
125 	TEST_LENGTH(iommu_option, IOMMU_OPTION);
126 	TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS);
127 #undef TEST_LENGTH
128 }
129 
130 TEST_F(iommufd, cmd_ex_fail)
131 {
132 	struct {
133 		struct iommu_destroy cmd;
134 		__u64 future;
135 	} cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
136 
137 	/* object id is invalid and command is longer */
138 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
139 	/* future area is non-zero */
140 	cmd.future = 1;
141 	EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
142 	/* Original command "works" */
143 	cmd.cmd.size = sizeof(cmd.cmd);
144 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
145 	/* Short command fails */
146 	cmd.cmd.size = sizeof(cmd.cmd) - 1;
147 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
148 }
149 
150 TEST_F(iommufd, global_options)
151 {
152 	struct iommu_option cmd = {
153 		.size = sizeof(cmd),
154 		.option_id = IOMMU_OPTION_RLIMIT_MODE,
155 		.op = IOMMU_OPTION_OP_GET,
156 		.val64 = 1,
157 	};
158 
159 	cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
160 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
161 	ASSERT_EQ(0, cmd.val64);
162 
163 	/* This requires root */
164 	cmd.op = IOMMU_OPTION_OP_SET;
165 	cmd.val64 = 1;
166 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
167 	cmd.val64 = 2;
168 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
169 
170 	cmd.op = IOMMU_OPTION_OP_GET;
171 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
172 	ASSERT_EQ(1, cmd.val64);
173 
174 	cmd.op = IOMMU_OPTION_OP_SET;
175 	cmd.val64 = 0;
176 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
177 
178 	cmd.op = IOMMU_OPTION_OP_GET;
179 	cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
180 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
181 	cmd.op = IOMMU_OPTION_OP_SET;
182 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
183 }
184 
185 FIXTURE(iommufd_ioas)
186 {
187 	int fd;
188 	uint32_t ioas_id;
189 	uint32_t stdev_id;
190 	uint32_t hwpt_id;
191 	uint64_t base_iova;
192 };
193 
194 FIXTURE_VARIANT(iommufd_ioas)
195 {
196 	unsigned int mock_domains;
197 	unsigned int memory_limit;
198 };
199 
200 FIXTURE_SETUP(iommufd_ioas)
201 {
202 	unsigned int i;
203 
204 
205 	self->fd = open("/dev/iommu", O_RDWR);
206 	ASSERT_NE(-1, self->fd);
207 	test_ioctl_ioas_alloc(&self->ioas_id);
208 
209 	if (!variant->memory_limit) {
210 		test_ioctl_set_default_memory_limit();
211 	} else {
212 		test_ioctl_set_temp_memory_limit(variant->memory_limit);
213 	}
214 
215 	for (i = 0; i != variant->mock_domains; i++) {
216 		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
217 				     &self->hwpt_id);
218 		self->base_iova = MOCK_APERTURE_START;
219 	}
220 }
221 
222 FIXTURE_TEARDOWN(iommufd_ioas)
223 {
224 	test_ioctl_set_default_memory_limit();
225 	teardown_iommufd(self->fd, _metadata);
226 }
227 
228 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
229 {
230 };
231 
232 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
233 {
234 	.mock_domains = 1,
235 };
236 
237 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
238 {
239 	.mock_domains = 2,
240 };
241 
242 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
243 {
244 	.mock_domains = 1,
245 	.memory_limit = 16,
246 };
247 
248 TEST_F(iommufd_ioas, ioas_auto_destroy)
249 {
250 }
251 
252 TEST_F(iommufd_ioas, ioas_destroy)
253 {
254 	if (self->stdev_id) {
255 		/* IOAS cannot be freed while a device has a HWPT using it */
256 		EXPECT_ERRNO(EBUSY,
257 			     _test_ioctl_destroy(self->fd, self->ioas_id));
258 	} else {
259 		/* Can allocate and manually free an IOAS table */
260 		test_ioctl_destroy(self->ioas_id);
261 	}
262 }
263 
264 TEST_F(iommufd_ioas, hwpt_attach)
265 {
266 	/* Create a device attached directly to a hwpt */
267 	if (self->stdev_id) {
268 		test_cmd_mock_domain(self->hwpt_id, NULL, NULL);
269 	} else {
270 		test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
271 	}
272 }
273 
274 TEST_F(iommufd_ioas, ioas_area_destroy)
275 {
276 	/* Adding an area does not change ability to destroy */
277 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
278 	if (self->stdev_id)
279 		EXPECT_ERRNO(EBUSY,
280 			     _test_ioctl_destroy(self->fd, self->ioas_id));
281 	else
282 		test_ioctl_destroy(self->ioas_id);
283 }
284 
285 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
286 {
287 	int i;
288 
289 	/* Can allocate and automatically free an IOAS table with many areas */
290 	for (i = 0; i != 10; i++) {
291 		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
292 					  self->base_iova + i * PAGE_SIZE);
293 	}
294 }
295 
296 TEST_F(iommufd_ioas, area)
297 {
298 	int i;
299 
300 	/* Unmap fails if nothing is mapped */
301 	for (i = 0; i != 10; i++)
302 		test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
303 
304 	/* Unmap works */
305 	for (i = 0; i != 10; i++)
306 		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
307 					  self->base_iova + i * PAGE_SIZE);
308 	for (i = 0; i != 10; i++)
309 		test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
310 				      PAGE_SIZE);
311 
312 	/* Split fails */
313 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
314 				  self->base_iova + 16 * PAGE_SIZE);
315 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
316 				  PAGE_SIZE);
317 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
318 				  PAGE_SIZE);
319 
320 	/* Over map fails */
321 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
322 				      self->base_iova + 16 * PAGE_SIZE);
323 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
324 				      self->base_iova + 16 * PAGE_SIZE);
325 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
326 				      self->base_iova + 17 * PAGE_SIZE);
327 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
328 				      self->base_iova + 15 * PAGE_SIZE);
329 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
330 				      self->base_iova + 15 * PAGE_SIZE);
331 
332 	/* unmap all works */
333 	test_ioctl_ioas_unmap(0, UINT64_MAX);
334 
335 	/* Unmap all succeeds on an empty IOAS */
336 	test_ioctl_ioas_unmap(0, UINT64_MAX);
337 }
338 
339 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
340 {
341 	uint64_t unmap_len;
342 	int i;
343 
344 	/* Give no_domain some space to rewind base_iova */
345 	self->base_iova += 4 * PAGE_SIZE;
346 
347 	for (i = 0; i != 4; i++)
348 		test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
349 					  self->base_iova + i * 16 * PAGE_SIZE);
350 
351 	/* Unmap not fully contained area doesn't work */
352 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
353 				  8 * PAGE_SIZE);
354 	test_err_ioctl_ioas_unmap(ENOENT,
355 				  self->base_iova + 3 * 16 * PAGE_SIZE +
356 					  8 * PAGE_SIZE - 4 * PAGE_SIZE,
357 				  8 * PAGE_SIZE);
358 
359 	/* Unmap fully contained areas works */
360 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
361 					    self->base_iova - 4 * PAGE_SIZE,
362 					    3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
363 						    4 * PAGE_SIZE,
364 					    &unmap_len));
365 	ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
366 }
367 
368 TEST_F(iommufd_ioas, area_auto_iova)
369 {
370 	struct iommu_test_cmd test_cmd = {
371 		.size = sizeof(test_cmd),
372 		.op = IOMMU_TEST_OP_ADD_RESERVED,
373 		.id = self->ioas_id,
374 		.add_reserved = { .start = PAGE_SIZE * 4,
375 				  .length = PAGE_SIZE * 100 },
376 	};
377 	struct iommu_iova_range ranges[1] = {};
378 	struct iommu_ioas_allow_iovas allow_cmd = {
379 		.size = sizeof(allow_cmd),
380 		.ioas_id = self->ioas_id,
381 		.num_iovas = 1,
382 		.allowed_iovas = (uintptr_t)ranges,
383 	};
384 	__u64 iovas[10];
385 	int i;
386 
387 	/* Simple 4k pages */
388 	for (i = 0; i != 10; i++)
389 		test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
390 	for (i = 0; i != 10; i++)
391 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
392 
393 	/* Kernel automatically aligns IOVAs properly */
394 	for (i = 0; i != 10; i++) {
395 		size_t length = PAGE_SIZE * (i + 1);
396 
397 		if (self->stdev_id) {
398 			test_ioctl_ioas_map(buffer, length, &iovas[i]);
399 		} else {
400 			test_ioctl_ioas_map((void *)(1UL << 31), length,
401 					    &iovas[i]);
402 		}
403 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
404 	}
405 	for (i = 0; i != 10; i++)
406 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
407 
408 	/* Avoids a reserved region */
409 	ASSERT_EQ(0,
410 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
411 			&test_cmd));
412 	for (i = 0; i != 10; i++) {
413 		size_t length = PAGE_SIZE * (i + 1);
414 
415 		test_ioctl_ioas_map(buffer, length, &iovas[i]);
416 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
417 		EXPECT_EQ(false,
418 			  iovas[i] > test_cmd.add_reserved.start &&
419 				  iovas[i] <
420 					  test_cmd.add_reserved.start +
421 						  test_cmd.add_reserved.length);
422 	}
423 	for (i = 0; i != 10; i++)
424 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
425 
426 	/* Allowed region intersects with a reserved region */
427 	ranges[0].start = PAGE_SIZE;
428 	ranges[0].last = PAGE_SIZE * 600;
429 	EXPECT_ERRNO(EADDRINUSE,
430 		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
431 
432 	/* Allocate from an allowed region */
433 	if (self->stdev_id) {
434 		ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
435 		ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
436 	} else {
437 		ranges[0].start = PAGE_SIZE * 200;
438 		ranges[0].last = PAGE_SIZE * 600 - 1;
439 	}
440 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
441 	for (i = 0; i != 10; i++) {
442 		size_t length = PAGE_SIZE * (i + 1);
443 
444 		test_ioctl_ioas_map(buffer, length, &iovas[i]);
445 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
446 		EXPECT_EQ(true, iovas[i] >= ranges[0].start);
447 		EXPECT_EQ(true, iovas[i] <= ranges[0].last);
448 		EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
449 		EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
450 	}
451 	for (i = 0; i != 10; i++)
452 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
453 }
454 
455 TEST_F(iommufd_ioas, area_allowed)
456 {
457 	struct iommu_test_cmd test_cmd = {
458 		.size = sizeof(test_cmd),
459 		.op = IOMMU_TEST_OP_ADD_RESERVED,
460 		.id = self->ioas_id,
461 		.add_reserved = { .start = PAGE_SIZE * 4,
462 				  .length = PAGE_SIZE * 100 },
463 	};
464 	struct iommu_iova_range ranges[1] = {};
465 	struct iommu_ioas_allow_iovas allow_cmd = {
466 		.size = sizeof(allow_cmd),
467 		.ioas_id = self->ioas_id,
468 		.num_iovas = 1,
469 		.allowed_iovas = (uintptr_t)ranges,
470 	};
471 
472 	/* Reserved intersects an allowed */
473 	allow_cmd.num_iovas = 1;
474 	ranges[0].start = self->base_iova;
475 	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
476 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
477 	test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
478 	test_cmd.add_reserved.length = PAGE_SIZE;
479 	EXPECT_ERRNO(EADDRINUSE,
480 		     ioctl(self->fd,
481 			   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
482 			   &test_cmd));
483 	allow_cmd.num_iovas = 0;
484 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
485 
486 	/* Allowed intersects a reserved */
487 	ASSERT_EQ(0,
488 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
489 			&test_cmd));
490 	allow_cmd.num_iovas = 1;
491 	ranges[0].start = self->base_iova;
492 	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
493 	EXPECT_ERRNO(EADDRINUSE,
494 		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
495 }
496 
497 TEST_F(iommufd_ioas, copy_area)
498 {
499 	struct iommu_ioas_copy copy_cmd = {
500 		.size = sizeof(copy_cmd),
501 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
502 		.dst_ioas_id = self->ioas_id,
503 		.src_ioas_id = self->ioas_id,
504 		.length = PAGE_SIZE,
505 	};
506 
507 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
508 
509 	/* Copy inside a single IOAS */
510 	copy_cmd.src_iova = self->base_iova;
511 	copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
512 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
513 
514 	/* Copy between IOAS's */
515 	copy_cmd.src_iova = self->base_iova;
516 	copy_cmd.dst_iova = 0;
517 	test_ioctl_ioas_alloc(&copy_cmd.dst_ioas_id);
518 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
519 }
520 
521 TEST_F(iommufd_ioas, iova_ranges)
522 {
523 	struct iommu_test_cmd test_cmd = {
524 		.size = sizeof(test_cmd),
525 		.op = IOMMU_TEST_OP_ADD_RESERVED,
526 		.id = self->ioas_id,
527 		.add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
528 	};
529 	struct iommu_iova_range *ranges = buffer;
530 	struct iommu_ioas_iova_ranges ranges_cmd = {
531 		.size = sizeof(ranges_cmd),
532 		.ioas_id = self->ioas_id,
533 		.num_iovas = BUFFER_SIZE / sizeof(*ranges),
534 		.allowed_iovas = (uintptr_t)ranges,
535 	};
536 
537 	/* Range can be read */
538 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
539 	EXPECT_EQ(1, ranges_cmd.num_iovas);
540 	if (!self->stdev_id) {
541 		EXPECT_EQ(0, ranges[0].start);
542 		EXPECT_EQ(SIZE_MAX, ranges[0].last);
543 		EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
544 	} else {
545 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
546 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
547 		EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
548 	}
549 
550 	/* Buffer too small */
551 	memset(ranges, 0, BUFFER_SIZE);
552 	ranges_cmd.num_iovas = 0;
553 	EXPECT_ERRNO(EMSGSIZE,
554 		     ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
555 	EXPECT_EQ(1, ranges_cmd.num_iovas);
556 	EXPECT_EQ(0, ranges[0].start);
557 	EXPECT_EQ(0, ranges[0].last);
558 
559 	/* 2 ranges */
560 	ASSERT_EQ(0,
561 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
562 			&test_cmd));
563 	ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
564 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
565 	if (!self->stdev_id) {
566 		EXPECT_EQ(2, ranges_cmd.num_iovas);
567 		EXPECT_EQ(0, ranges[0].start);
568 		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
569 		EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
570 		EXPECT_EQ(SIZE_MAX, ranges[1].last);
571 	} else {
572 		EXPECT_EQ(1, ranges_cmd.num_iovas);
573 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
574 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
575 	}
576 
577 	/* Buffer too small */
578 	memset(ranges, 0, BUFFER_SIZE);
579 	ranges_cmd.num_iovas = 1;
580 	if (!self->stdev_id) {
581 		EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
582 					     &ranges_cmd));
583 		EXPECT_EQ(2, ranges_cmd.num_iovas);
584 		EXPECT_EQ(0, ranges[0].start);
585 		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
586 	} else {
587 		ASSERT_EQ(0,
588 			  ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
589 		EXPECT_EQ(1, ranges_cmd.num_iovas);
590 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
591 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
592 	}
593 	EXPECT_EQ(0, ranges[1].start);
594 	EXPECT_EQ(0, ranges[1].last);
595 }
596 
597 TEST_F(iommufd_ioas, access_domain_destory)
598 {
599 	struct iommu_test_cmd access_cmd = {
600 		.size = sizeof(access_cmd),
601 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
602 		.access_pages = { .iova = self->base_iova + PAGE_SIZE,
603 				  .length = PAGE_SIZE},
604 	};
605 	size_t buf_size = 2 * HUGEPAGE_SIZE;
606 	uint8_t *buf;
607 
608 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
609 		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
610 		   0);
611 	ASSERT_NE(MAP_FAILED, buf);
612 	test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
613 
614 	test_cmd_create_access(self->ioas_id, &access_cmd.id,
615 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
616 	access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
617 	ASSERT_EQ(0,
618 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
619 			&access_cmd));
620 
621 	/* Causes a complicated unpin across a huge page boundary */
622 	if (self->stdev_id)
623 		test_ioctl_destroy(self->stdev_id);
624 
625 	test_cmd_destroy_access_pages(
626 		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
627 	test_cmd_destroy_access(access_cmd.id);
628 	ASSERT_EQ(0, munmap(buf, buf_size));
629 }
630 
631 TEST_F(iommufd_ioas, access_pin)
632 {
633 	struct iommu_test_cmd access_cmd = {
634 		.size = sizeof(access_cmd),
635 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
636 		.access_pages = { .iova = MOCK_APERTURE_START,
637 				  .length = BUFFER_SIZE,
638 				  .uptr = (uintptr_t)buffer },
639 	};
640 	struct iommu_test_cmd check_map_cmd = {
641 		.size = sizeof(check_map_cmd),
642 		.op = IOMMU_TEST_OP_MD_CHECK_MAP,
643 		.check_map = { .iova = MOCK_APERTURE_START,
644 			       .length = BUFFER_SIZE,
645 			       .uptr = (uintptr_t)buffer },
646 	};
647 	uint32_t access_pages_id;
648 	unsigned int npages;
649 
650 	test_cmd_create_access(self->ioas_id, &access_cmd.id,
651 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
652 
653 	for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
654 		uint32_t mock_stdev_id;
655 		uint32_t mock_hwpt_id;
656 
657 		access_cmd.access_pages.length = npages * PAGE_SIZE;
658 
659 		/* Single map/unmap */
660 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
661 					  MOCK_APERTURE_START);
662 		ASSERT_EQ(0, ioctl(self->fd,
663 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
664 				   &access_cmd));
665 		test_cmd_destroy_access_pages(
666 			access_cmd.id,
667 			access_cmd.access_pages.out_access_pages_id);
668 
669 		/* Double user */
670 		ASSERT_EQ(0, ioctl(self->fd,
671 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
672 				   &access_cmd));
673 		access_pages_id = access_cmd.access_pages.out_access_pages_id;
674 		ASSERT_EQ(0, ioctl(self->fd,
675 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
676 				   &access_cmd));
677 		test_cmd_destroy_access_pages(
678 			access_cmd.id,
679 			access_cmd.access_pages.out_access_pages_id);
680 		test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
681 
682 		/* Add/remove a domain with a user */
683 		ASSERT_EQ(0, ioctl(self->fd,
684 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
685 				   &access_cmd));
686 		test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
687 				     &mock_hwpt_id);
688 		check_map_cmd.id = mock_hwpt_id;
689 		ASSERT_EQ(0, ioctl(self->fd,
690 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
691 				   &check_map_cmd));
692 
693 		test_ioctl_destroy(mock_stdev_id);
694 		test_cmd_destroy_access_pages(
695 			access_cmd.id,
696 			access_cmd.access_pages.out_access_pages_id);
697 
698 		test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
699 	}
700 	test_cmd_destroy_access(access_cmd.id);
701 }
702 
703 TEST_F(iommufd_ioas, access_pin_unmap)
704 {
705 	struct iommu_test_cmd access_pages_cmd = {
706 		.size = sizeof(access_pages_cmd),
707 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
708 		.access_pages = { .iova = MOCK_APERTURE_START,
709 				  .length = BUFFER_SIZE,
710 				  .uptr = (uintptr_t)buffer },
711 	};
712 
713 	test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
714 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
715 	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
716 	ASSERT_EQ(0,
717 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
718 			&access_pages_cmd));
719 
720 	/* Trigger the unmap op */
721 	test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
722 
723 	/* kernel removed the item for us */
724 	test_err_destroy_access_pages(
725 		ENOENT, access_pages_cmd.id,
726 		access_pages_cmd.access_pages.out_access_pages_id);
727 }
728 
729 static void check_access_rw(struct __test_metadata *_metadata, int fd,
730 			    unsigned int access_id, uint64_t iova,
731 			    unsigned int def_flags)
732 {
733 	uint16_t tmp[32];
734 	struct iommu_test_cmd access_cmd = {
735 		.size = sizeof(access_cmd),
736 		.op = IOMMU_TEST_OP_ACCESS_RW,
737 		.id = access_id,
738 		.access_rw = { .uptr = (uintptr_t)tmp },
739 	};
740 	uint16_t *buffer16 = buffer;
741 	unsigned int i;
742 	void *tmp2;
743 
744 	for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
745 		buffer16[i] = rand();
746 
747 	for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
748 	     access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
749 	     access_cmd.access_rw.iova++) {
750 		for (access_cmd.access_rw.length = 1;
751 		     access_cmd.access_rw.length < sizeof(tmp);
752 		     access_cmd.access_rw.length++) {
753 			access_cmd.access_rw.flags = def_flags;
754 			ASSERT_EQ(0, ioctl(fd,
755 					   _IOMMU_TEST_CMD(
756 						   IOMMU_TEST_OP_ACCESS_RW),
757 					   &access_cmd));
758 			ASSERT_EQ(0,
759 				  memcmp(buffer + (access_cmd.access_rw.iova -
760 						   iova),
761 					 tmp, access_cmd.access_rw.length));
762 
763 			for (i = 0; i != ARRAY_SIZE(tmp); i++)
764 				tmp[i] = rand();
765 			access_cmd.access_rw.flags = def_flags |
766 						     MOCK_ACCESS_RW_WRITE;
767 			ASSERT_EQ(0, ioctl(fd,
768 					   _IOMMU_TEST_CMD(
769 						   IOMMU_TEST_OP_ACCESS_RW),
770 					   &access_cmd));
771 			ASSERT_EQ(0,
772 				  memcmp(buffer + (access_cmd.access_rw.iova -
773 						   iova),
774 					 tmp, access_cmd.access_rw.length));
775 		}
776 	}
777 
778 	/* Multi-page test */
779 	tmp2 = malloc(BUFFER_SIZE);
780 	ASSERT_NE(NULL, tmp2);
781 	access_cmd.access_rw.iova = iova;
782 	access_cmd.access_rw.length = BUFFER_SIZE;
783 	access_cmd.access_rw.flags = def_flags;
784 	access_cmd.access_rw.uptr = (uintptr_t)tmp2;
785 	ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
786 			   &access_cmd));
787 	ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
788 	free(tmp2);
789 }
790 
791 TEST_F(iommufd_ioas, access_rw)
792 {
793 	__u32 access_id;
794 	__u64 iova;
795 
796 	test_cmd_create_access(self->ioas_id, &access_id, 0);
797 	test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
798 	check_access_rw(_metadata, self->fd, access_id, iova, 0);
799 	check_access_rw(_metadata, self->fd, access_id, iova,
800 			MOCK_ACCESS_RW_SLOW_PATH);
801 	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
802 	test_cmd_destroy_access(access_id);
803 }
804 
805 TEST_F(iommufd_ioas, access_rw_unaligned)
806 {
807 	__u32 access_id;
808 	__u64 iova;
809 
810 	test_cmd_create_access(self->ioas_id, &access_id, 0);
811 
812 	/* Unaligned pages */
813 	iova = self->base_iova + MOCK_PAGE_SIZE;
814 	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
815 	check_access_rw(_metadata, self->fd, access_id, iova, 0);
816 	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
817 	test_cmd_destroy_access(access_id);
818 }
819 
820 TEST_F(iommufd_ioas, fork_gone)
821 {
822 	__u32 access_id;
823 	pid_t child;
824 
825 	test_cmd_create_access(self->ioas_id, &access_id, 0);
826 
827 	/* Create a mapping with a different mm */
828 	child = fork();
829 	if (!child) {
830 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
831 					  MOCK_APERTURE_START);
832 		exit(0);
833 	}
834 	ASSERT_NE(-1, child);
835 	ASSERT_EQ(child, waitpid(child, NULL, 0));
836 
837 	if (self->stdev_id) {
838 		/*
839 		 * If a domain already existed then everything was pinned within
840 		 * the fork, so this copies from one domain to another.
841 		 */
842 		test_cmd_mock_domain(self->ioas_id, NULL, NULL);
843 		check_access_rw(_metadata, self->fd, access_id,
844 				MOCK_APERTURE_START, 0);
845 
846 	} else {
847 		/*
848 		 * Otherwise we need to actually pin pages which can't happen
849 		 * since the fork is gone.
850 		 */
851 		test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
852 	}
853 
854 	test_cmd_destroy_access(access_id);
855 }
856 
857 TEST_F(iommufd_ioas, fork_present)
858 {
859 	__u32 access_id;
860 	int pipefds[2];
861 	uint64_t tmp;
862 	pid_t child;
863 	int efd;
864 
865 	test_cmd_create_access(self->ioas_id, &access_id, 0);
866 
867 	ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
868 	efd = eventfd(0, EFD_CLOEXEC);
869 	ASSERT_NE(-1, efd);
870 
871 	/* Create a mapping with a different mm */
872 	child = fork();
873 	if (!child) {
874 		__u64 iova;
875 		uint64_t one = 1;
876 
877 		close(pipefds[1]);
878 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
879 					  MOCK_APERTURE_START);
880 		if (write(efd, &one, sizeof(one)) != sizeof(one))
881 			exit(100);
882 		if (read(pipefds[0], &iova, 1) != 1)
883 			exit(100);
884 		exit(0);
885 	}
886 	close(pipefds[0]);
887 	ASSERT_NE(-1, child);
888 	ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
889 
890 	/* Read pages from the remote process */
891 	test_cmd_mock_domain(self->ioas_id, NULL, NULL);
892 	check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
893 
894 	ASSERT_EQ(0, close(pipefds[1]));
895 	ASSERT_EQ(child, waitpid(child, NULL, 0));
896 
897 	test_cmd_destroy_access(access_id);
898 }
899 
900 TEST_F(iommufd_ioas, ioas_option_huge_pages)
901 {
902 	struct iommu_option cmd = {
903 		.size = sizeof(cmd),
904 		.option_id = IOMMU_OPTION_HUGE_PAGES,
905 		.op = IOMMU_OPTION_OP_GET,
906 		.val64 = 3,
907 		.object_id = self->ioas_id,
908 	};
909 
910 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
911 	ASSERT_EQ(1, cmd.val64);
912 
913 	cmd.op = IOMMU_OPTION_OP_SET;
914 	cmd.val64 = 0;
915 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
916 
917 	cmd.op = IOMMU_OPTION_OP_GET;
918 	cmd.val64 = 3;
919 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
920 	ASSERT_EQ(0, cmd.val64);
921 
922 	cmd.op = IOMMU_OPTION_OP_SET;
923 	cmd.val64 = 2;
924 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
925 
926 	cmd.op = IOMMU_OPTION_OP_SET;
927 	cmd.val64 = 1;
928 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
929 }
930 
931 TEST_F(iommufd_ioas, ioas_iova_alloc)
932 {
933 	unsigned int length;
934 	__u64 iova;
935 
936 	for (length = 1; length != PAGE_SIZE * 2; length++) {
937 		if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
938 			test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
939 		} else {
940 			test_ioctl_ioas_map(buffer, length, &iova);
941 			test_ioctl_ioas_unmap(iova, length);
942 		}
943 	}
944 }
945 
946 TEST_F(iommufd_ioas, ioas_align_change)
947 {
948 	struct iommu_option cmd = {
949 		.size = sizeof(cmd),
950 		.option_id = IOMMU_OPTION_HUGE_PAGES,
951 		.op = IOMMU_OPTION_OP_SET,
952 		.object_id = self->ioas_id,
953 		/* 0 means everything must be aligned to PAGE_SIZE */
954 		.val64 = 0,
955 	};
956 
957 	/*
958 	 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
959 	 * and map are present.
960 	 */
961 	if (variant->mock_domains)
962 		return;
963 
964 	/*
965 	 * We can upgrade to PAGE_SIZE alignment when things are aligned right
966 	 */
967 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
968 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
969 
970 	/* Misalignment is rejected at map time */
971 	test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
972 				      PAGE_SIZE,
973 				      MOCK_APERTURE_START + PAGE_SIZE);
974 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
975 
976 	/* Reduce alignment */
977 	cmd.val64 = 1;
978 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
979 
980 	/* Confirm misalignment is rejected during alignment upgrade */
981 	test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
982 				  MOCK_APERTURE_START + PAGE_SIZE);
983 	cmd.val64 = 0;
984 	EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
985 
986 	test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
987 	test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
988 }
989 
990 TEST_F(iommufd_ioas, copy_sweep)
991 {
992 	struct iommu_ioas_copy copy_cmd = {
993 		.size = sizeof(copy_cmd),
994 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
995 		.src_ioas_id = self->ioas_id,
996 		.dst_iova = MOCK_APERTURE_START,
997 		.length = MOCK_PAGE_SIZE,
998 	};
999 	unsigned int dst_ioas_id;
1000 	uint64_t last_iova;
1001 	uint64_t iova;
1002 
1003 	test_ioctl_ioas_alloc(&dst_ioas_id);
1004 	copy_cmd.dst_ioas_id = dst_ioas_id;
1005 
1006 	if (variant->mock_domains)
1007 		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1008 	else
1009 		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1010 
1011 	test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1012 				  MOCK_APERTURE_START);
1013 
1014 	for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1015 	     iova += 511) {
1016 		copy_cmd.src_iova = iova;
1017 		if (iova < MOCK_APERTURE_START ||
1018 		    iova + copy_cmd.length - 1 > last_iova) {
1019 			EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1020 						   &copy_cmd));
1021 		} else {
1022 			ASSERT_EQ(0,
1023 				  ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1024 			test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1025 						 copy_cmd.length);
1026 		}
1027 	}
1028 
1029 	test_ioctl_destroy(dst_ioas_id);
1030 }
1031 
1032 FIXTURE(iommufd_mock_domain)
1033 {
1034 	int fd;
1035 	uint32_t ioas_id;
1036 	uint32_t hwpt_id;
1037 	uint32_t hwpt_ids[2];
1038 	int mmap_flags;
1039 	size_t mmap_buf_size;
1040 };
1041 
1042 FIXTURE_VARIANT(iommufd_mock_domain)
1043 {
1044 	unsigned int mock_domains;
1045 	bool hugepages;
1046 };
1047 
1048 FIXTURE_SETUP(iommufd_mock_domain)
1049 {
1050 	unsigned int i;
1051 
1052 	self->fd = open("/dev/iommu", O_RDWR);
1053 	ASSERT_NE(-1, self->fd);
1054 	test_ioctl_ioas_alloc(&self->ioas_id);
1055 
1056 	ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1057 
1058 	for (i = 0; i != variant->mock_domains; i++)
1059 		test_cmd_mock_domain(self->ioas_id, NULL, &self->hwpt_ids[i]);
1060 	self->hwpt_id = self->hwpt_ids[0];
1061 
1062 	self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1063 	self->mmap_buf_size = PAGE_SIZE * 8;
1064 	if (variant->hugepages) {
1065 		/*
1066 		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1067 		 * not available.
1068 		 */
1069 		self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1070 		self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1071 	}
1072 }
1073 
1074 FIXTURE_TEARDOWN(iommufd_mock_domain)
1075 {
1076 	teardown_iommufd(self->fd, _metadata);
1077 }
1078 
1079 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1080 {
1081 	.mock_domains = 1,
1082 	.hugepages = false,
1083 };
1084 
1085 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1086 {
1087 	.mock_domains = 2,
1088 	.hugepages = false,
1089 };
1090 
1091 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1092 {
1093 	.mock_domains = 1,
1094 	.hugepages = true,
1095 };
1096 
1097 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1098 {
1099 	.mock_domains = 2,
1100 	.hugepages = true,
1101 };
1102 
1103 /* Have the kernel check that the user pages made it to the iommu_domain */
1104 #define check_mock_iova(_ptr, _iova, _length)                                \
1105 	({                                                                   \
1106 		struct iommu_test_cmd check_map_cmd = {                      \
1107 			.size = sizeof(check_map_cmd),                       \
1108 			.op = IOMMU_TEST_OP_MD_CHECK_MAP,                    \
1109 			.id = self->hwpt_id,                                 \
1110 			.check_map = { .iova = _iova,                        \
1111 				       .length = _length,                    \
1112 				       .uptr = (uintptr_t)(_ptr) },          \
1113 		};                                                           \
1114 		ASSERT_EQ(0,                                                 \
1115 			  ioctl(self->fd,                                    \
1116 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1117 				&check_map_cmd));                            \
1118 		if (self->hwpt_ids[1]) {                                     \
1119 			check_map_cmd.id = self->hwpt_ids[1];                \
1120 			ASSERT_EQ(0,                                         \
1121 				  ioctl(self->fd,                            \
1122 					_IOMMU_TEST_CMD(                     \
1123 						IOMMU_TEST_OP_MD_CHECK_MAP), \
1124 					&check_map_cmd));                    \
1125 		}                                                            \
1126 	})
1127 
1128 TEST_F(iommufd_mock_domain, basic)
1129 {
1130 	size_t buf_size = self->mmap_buf_size;
1131 	uint8_t *buf;
1132 	__u64 iova;
1133 
1134 	/* Simple one page map */
1135 	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1136 	check_mock_iova(buffer, iova, PAGE_SIZE);
1137 
1138 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1139 		   0);
1140 	ASSERT_NE(MAP_FAILED, buf);
1141 
1142 	/* EFAULT half way through mapping */
1143 	ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1144 	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1145 
1146 	/* EFAULT on first page */
1147 	ASSERT_EQ(0, munmap(buf, buf_size / 2));
1148 	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1149 }
1150 
1151 TEST_F(iommufd_mock_domain, ro_unshare)
1152 {
1153 	uint8_t *buf;
1154 	__u64 iova;
1155 	int fd;
1156 
1157 	fd = open("/proc/self/exe", O_RDONLY);
1158 	ASSERT_NE(-1, fd);
1159 
1160 	buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1161 	ASSERT_NE(MAP_FAILED, buf);
1162 	close(fd);
1163 
1164 	/*
1165 	 * There have been lots of changes to the "unshare" mechanism in
1166 	 * get_user_pages(), make sure it works right. The write to the page
1167 	 * after we map it for reading should not change the assigned PFN.
1168 	 */
1169 	ASSERT_EQ(0,
1170 		  _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1171 				       &iova, IOMMU_IOAS_MAP_READABLE));
1172 	check_mock_iova(buf, iova, PAGE_SIZE);
1173 	memset(buf, 1, PAGE_SIZE);
1174 	check_mock_iova(buf, iova, PAGE_SIZE);
1175 	ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1176 }
1177 
1178 TEST_F(iommufd_mock_domain, all_aligns)
1179 {
1180 	size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1181 						MOCK_PAGE_SIZE;
1182 	size_t buf_size = self->mmap_buf_size;
1183 	unsigned int start;
1184 	unsigned int end;
1185 	uint8_t *buf;
1186 
1187 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1188 		   0);
1189 	ASSERT_NE(MAP_FAILED, buf);
1190 	check_refs(buf, buf_size, 0);
1191 
1192 	/*
1193 	 * Map every combination of page size and alignment within a big region,
1194 	 * less for hugepage case as it takes so long to finish.
1195 	 */
1196 	for (start = 0; start < buf_size; start += test_step) {
1197 		if (variant->hugepages)
1198 			end = buf_size;
1199 		else
1200 			end = start + MOCK_PAGE_SIZE;
1201 		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1202 			size_t length = end - start;
1203 			__u64 iova;
1204 
1205 			test_ioctl_ioas_map(buf + start, length, &iova);
1206 			check_mock_iova(buf + start, iova, length);
1207 			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1208 				   end / PAGE_SIZE * PAGE_SIZE -
1209 					   start / PAGE_SIZE * PAGE_SIZE,
1210 				   1);
1211 
1212 			test_ioctl_ioas_unmap(iova, length);
1213 		}
1214 	}
1215 	check_refs(buf, buf_size, 0);
1216 	ASSERT_EQ(0, munmap(buf, buf_size));
1217 }
1218 
1219 TEST_F(iommufd_mock_domain, all_aligns_copy)
1220 {
1221 	size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1222 						MOCK_PAGE_SIZE;
1223 	size_t buf_size = self->mmap_buf_size;
1224 	unsigned int start;
1225 	unsigned int end;
1226 	uint8_t *buf;
1227 
1228 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1229 		   0);
1230 	ASSERT_NE(MAP_FAILED, buf);
1231 	check_refs(buf, buf_size, 0);
1232 
1233 	/*
1234 	 * Map every combination of page size and alignment within a big region,
1235 	 * less for hugepage case as it takes so long to finish.
1236 	 */
1237 	for (start = 0; start < buf_size; start += test_step) {
1238 		if (variant->hugepages)
1239 			end = buf_size;
1240 		else
1241 			end = start + MOCK_PAGE_SIZE;
1242 		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1243 			size_t length = end - start;
1244 			unsigned int old_id;
1245 			uint32_t mock_stdev_id;
1246 			__u64 iova;
1247 
1248 			test_ioctl_ioas_map(buf + start, length, &iova);
1249 
1250 			/* Add and destroy a domain while the area exists */
1251 			old_id = self->hwpt_ids[1];
1252 			test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1253 					     &self->hwpt_ids[1]);
1254 
1255 			check_mock_iova(buf + start, iova, length);
1256 			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1257 				   end / PAGE_SIZE * PAGE_SIZE -
1258 					   start / PAGE_SIZE * PAGE_SIZE,
1259 				   1);
1260 
1261 			test_ioctl_destroy(mock_stdev_id);
1262 			self->hwpt_ids[1] = old_id;
1263 
1264 			test_ioctl_ioas_unmap(iova, length);
1265 		}
1266 	}
1267 	check_refs(buf, buf_size, 0);
1268 	ASSERT_EQ(0, munmap(buf, buf_size));
1269 }
1270 
1271 TEST_F(iommufd_mock_domain, user_copy)
1272 {
1273 	struct iommu_test_cmd access_cmd = {
1274 		.size = sizeof(access_cmd),
1275 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1276 		.access_pages = { .length = BUFFER_SIZE,
1277 				  .uptr = (uintptr_t)buffer },
1278 	};
1279 	struct iommu_ioas_copy copy_cmd = {
1280 		.size = sizeof(copy_cmd),
1281 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
1282 		.dst_ioas_id = self->ioas_id,
1283 		.dst_iova = MOCK_APERTURE_START,
1284 		.length = BUFFER_SIZE,
1285 	};
1286 	unsigned int ioas_id;
1287 
1288 	/* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1289 	test_ioctl_ioas_alloc(&ioas_id);
1290 	test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE,
1291 			       &copy_cmd.src_iova);
1292 
1293 	test_cmd_create_access(ioas_id, &access_cmd.id,
1294 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1295 
1296 	access_cmd.access_pages.iova = copy_cmd.src_iova;
1297 	ASSERT_EQ(0,
1298 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1299 			&access_cmd));
1300 	copy_cmd.src_ioas_id = ioas_id;
1301 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1302 	check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1303 
1304 	test_cmd_destroy_access_pages(
1305 		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1306 	test_cmd_destroy_access(access_cmd.id);
1307 
1308 	test_ioctl_destroy(ioas_id);
1309 }
1310 
1311 /* VFIO compatibility IOCTLs */
1312 
1313 TEST_F(iommufd, simple_ioctls)
1314 {
1315 	ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
1316 	ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
1317 }
1318 
1319 TEST_F(iommufd, unmap_cmd)
1320 {
1321 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1322 		.iova = MOCK_APERTURE_START,
1323 		.size = PAGE_SIZE,
1324 	};
1325 
1326 	unmap_cmd.argsz = 1;
1327 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1328 
1329 	unmap_cmd.argsz = sizeof(unmap_cmd);
1330 	unmap_cmd.flags = 1 << 31;
1331 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1332 
1333 	unmap_cmd.flags = 0;
1334 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1335 }
1336 
1337 TEST_F(iommufd, map_cmd)
1338 {
1339 	struct vfio_iommu_type1_dma_map map_cmd = {
1340 		.iova = MOCK_APERTURE_START,
1341 		.size = PAGE_SIZE,
1342 		.vaddr = (__u64)buffer,
1343 	};
1344 
1345 	map_cmd.argsz = 1;
1346 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1347 
1348 	map_cmd.argsz = sizeof(map_cmd);
1349 	map_cmd.flags = 1 << 31;
1350 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1351 
1352 	/* Requires a domain to be attached */
1353 	map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
1354 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1355 }
1356 
1357 TEST_F(iommufd, info_cmd)
1358 {
1359 	struct vfio_iommu_type1_info info_cmd = {};
1360 
1361 	/* Invalid argsz */
1362 	info_cmd.argsz = 1;
1363 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1364 
1365 	info_cmd.argsz = sizeof(info_cmd);
1366 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1367 }
1368 
1369 TEST_F(iommufd, set_iommu_cmd)
1370 {
1371 	/* Requires a domain to be attached */
1372 	EXPECT_ERRNO(ENODEV,
1373 		     ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
1374 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
1375 }
1376 
1377 TEST_F(iommufd, vfio_ioas)
1378 {
1379 	struct iommu_vfio_ioas vfio_ioas_cmd = {
1380 		.size = sizeof(vfio_ioas_cmd),
1381 		.op = IOMMU_VFIO_IOAS_GET,
1382 	};
1383 	__u32 ioas_id;
1384 
1385 	/* ENODEV if there is no compat ioas */
1386 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1387 
1388 	/* Invalid id for set */
1389 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
1390 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1391 
1392 	/* Valid id for set*/
1393 	test_ioctl_ioas_alloc(&ioas_id);
1394 	vfio_ioas_cmd.ioas_id = ioas_id;
1395 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1396 
1397 	/* Same id comes back from get */
1398 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
1399 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1400 	ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
1401 
1402 	/* Clear works */
1403 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
1404 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1405 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
1406 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1407 }
1408 
1409 FIXTURE(vfio_compat_mock_domain)
1410 {
1411 	int fd;
1412 	uint32_t ioas_id;
1413 };
1414 
1415 FIXTURE_VARIANT(vfio_compat_mock_domain)
1416 {
1417 	unsigned int version;
1418 };
1419 
1420 FIXTURE_SETUP(vfio_compat_mock_domain)
1421 {
1422 	struct iommu_vfio_ioas vfio_ioas_cmd = {
1423 		.size = sizeof(vfio_ioas_cmd),
1424 		.op = IOMMU_VFIO_IOAS_SET,
1425 	};
1426 
1427 	self->fd = open("/dev/iommu", O_RDWR);
1428 	ASSERT_NE(-1, self->fd);
1429 
1430 	/* Create what VFIO would consider a group */
1431 	test_ioctl_ioas_alloc(&self->ioas_id);
1432 	test_cmd_mock_domain(self->ioas_id, NULL, NULL);
1433 
1434 	/* Attach it to the vfio compat */
1435 	vfio_ioas_cmd.ioas_id = self->ioas_id;
1436 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1437 	ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
1438 }
1439 
1440 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
1441 {
1442 	teardown_iommufd(self->fd, _metadata);
1443 }
1444 
1445 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
1446 {
1447 	.version = VFIO_TYPE1v2_IOMMU,
1448 };
1449 
1450 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
1451 {
1452 	.version = VFIO_TYPE1_IOMMU,
1453 };
1454 
1455 TEST_F(vfio_compat_mock_domain, simple_close)
1456 {
1457 }
1458 
1459 TEST_F(vfio_compat_mock_domain, option_huge_pages)
1460 {
1461 	struct iommu_option cmd = {
1462 		.size = sizeof(cmd),
1463 		.option_id = IOMMU_OPTION_HUGE_PAGES,
1464 		.op = IOMMU_OPTION_OP_GET,
1465 		.val64 = 3,
1466 		.object_id = self->ioas_id,
1467 	};
1468 
1469 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1470 	if (variant->version == VFIO_TYPE1_IOMMU) {
1471 		ASSERT_EQ(0, cmd.val64);
1472 	} else {
1473 		ASSERT_EQ(1, cmd.val64);
1474 	}
1475 }
1476 
1477 /*
1478  * Execute an ioctl command stored in buffer and check that the result does not
1479  * overflow memory.
1480  */
1481 static bool is_filled(const void *buf, uint8_t c, size_t len)
1482 {
1483 	const uint8_t *cbuf = buf;
1484 
1485 	for (; len; cbuf++, len--)
1486 		if (*cbuf != c)
1487 			return false;
1488 	return true;
1489 }
1490 
1491 #define ioctl_check_buf(fd, cmd)                                         \
1492 	({                                                               \
1493 		size_t _cmd_len = *(__u32 *)buffer;                      \
1494 									 \
1495 		memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
1496 		ASSERT_EQ(0, ioctl(fd, cmd, buffer));                    \
1497 		ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA,       \
1498 					  BUFFER_SIZE - _cmd_len));      \
1499 	})
1500 
1501 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
1502 				      struct vfio_iommu_type1_info *info_cmd)
1503 {
1504 	const struct vfio_info_cap_header *cap;
1505 
1506 	ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
1507 	cap = buffer + info_cmd->cap_offset;
1508 	while (true) {
1509 		size_t cap_size;
1510 
1511 		if (cap->next)
1512 			cap_size = (buffer + cap->next) - (void *)cap;
1513 		else
1514 			cap_size = (buffer + info_cmd->argsz) - (void *)cap;
1515 
1516 		switch (cap->id) {
1517 		case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
1518 			struct vfio_iommu_type1_info_cap_iova_range *data =
1519 				(void *)cap;
1520 
1521 			ASSERT_EQ(1, data->header.version);
1522 			ASSERT_EQ(1, data->nr_iovas);
1523 			EXPECT_EQ(MOCK_APERTURE_START,
1524 				  data->iova_ranges[0].start);
1525 			EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
1526 			break;
1527 		}
1528 		case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
1529 			struct vfio_iommu_type1_info_dma_avail *data =
1530 				(void *)cap;
1531 
1532 			ASSERT_EQ(1, data->header.version);
1533 			ASSERT_EQ(sizeof(*data), cap_size);
1534 			break;
1535 		}
1536 		default:
1537 			ASSERT_EQ(false, true);
1538 			break;
1539 		}
1540 		if (!cap->next)
1541 			break;
1542 
1543 		ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
1544 		ASSERT_GE(buffer + cap->next, (void *)cap);
1545 		cap = buffer + cap->next;
1546 	}
1547 }
1548 
1549 TEST_F(vfio_compat_mock_domain, get_info)
1550 {
1551 	struct vfio_iommu_type1_info *info_cmd = buffer;
1552 	unsigned int i;
1553 	size_t caplen;
1554 
1555 	/* Pre-cap ABI */
1556 	*info_cmd = (struct vfio_iommu_type1_info){
1557 		.argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
1558 	};
1559 	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
1560 	ASSERT_NE(0, info_cmd->iova_pgsizes);
1561 	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
1562 		  info_cmd->flags);
1563 
1564 	/* Read the cap chain size */
1565 	*info_cmd = (struct vfio_iommu_type1_info){
1566 		.argsz = sizeof(*info_cmd),
1567 	};
1568 	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
1569 	ASSERT_NE(0, info_cmd->iova_pgsizes);
1570 	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
1571 		  info_cmd->flags);
1572 	ASSERT_EQ(0, info_cmd->cap_offset);
1573 	ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
1574 
1575 	/* Read the caps, kernel should never create a corrupted caps */
1576 	caplen = info_cmd->argsz;
1577 	for (i = sizeof(*info_cmd); i < caplen; i++) {
1578 		*info_cmd = (struct vfio_iommu_type1_info){
1579 			.argsz = i,
1580 		};
1581 		ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
1582 		ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
1583 			  info_cmd->flags);
1584 		if (!info_cmd->cap_offset)
1585 			continue;
1586 		check_vfio_info_cap_chain(_metadata, info_cmd);
1587 	}
1588 }
1589 
1590 static void shuffle_array(unsigned long *array, size_t nelms)
1591 {
1592 	unsigned int i;
1593 
1594 	/* Shuffle */
1595 	for (i = 0; i != nelms; i++) {
1596 		unsigned long tmp = array[i];
1597 		unsigned int other = rand() % (nelms - i);
1598 
1599 		array[i] = array[other];
1600 		array[other] = tmp;
1601 	}
1602 }
1603 
1604 TEST_F(vfio_compat_mock_domain, map)
1605 {
1606 	struct vfio_iommu_type1_dma_map map_cmd = {
1607 		.argsz = sizeof(map_cmd),
1608 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
1609 		.vaddr = (uintptr_t)buffer,
1610 		.size = BUFFER_SIZE,
1611 		.iova = MOCK_APERTURE_START,
1612 	};
1613 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1614 		.argsz = sizeof(unmap_cmd),
1615 		.size = BUFFER_SIZE,
1616 		.iova = MOCK_APERTURE_START,
1617 	};
1618 	unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
1619 	unsigned int i;
1620 
1621 	/* Simple map/unmap */
1622 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1623 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1624 	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
1625 
1626 	/* UNMAP_FLAG_ALL requres 0 iova/size */
1627 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1628 	unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
1629 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1630 
1631 	unmap_cmd.iova = 0;
1632 	unmap_cmd.size = 0;
1633 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1634 	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
1635 
1636 	/* Small pages */
1637 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
1638 		map_cmd.iova = pages_iova[i] =
1639 			MOCK_APERTURE_START + i * PAGE_SIZE;
1640 		map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
1641 		map_cmd.size = PAGE_SIZE;
1642 		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1643 	}
1644 	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
1645 
1646 	unmap_cmd.flags = 0;
1647 	unmap_cmd.size = PAGE_SIZE;
1648 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
1649 		unmap_cmd.iova = pages_iova[i];
1650 		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1651 	}
1652 }
1653 
1654 TEST_F(vfio_compat_mock_domain, huge_map)
1655 {
1656 	size_t buf_size = HUGEPAGE_SIZE * 2;
1657 	struct vfio_iommu_type1_dma_map map_cmd = {
1658 		.argsz = sizeof(map_cmd),
1659 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
1660 		.size = buf_size,
1661 		.iova = MOCK_APERTURE_START,
1662 	};
1663 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1664 		.argsz = sizeof(unmap_cmd),
1665 	};
1666 	unsigned long pages_iova[16];
1667 	unsigned int i;
1668 	void *buf;
1669 
1670 	/* Test huge pages and splitting */
1671 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
1672 		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
1673 		   0);
1674 	ASSERT_NE(MAP_FAILED, buf);
1675 	map_cmd.vaddr = (uintptr_t)buf;
1676 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1677 
1678 	unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
1679 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
1680 		pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
1681 	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
1682 
1683 	/* type1 mode can cut up larger mappings, type1v2 always fails */
1684 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
1685 		unmap_cmd.iova = pages_iova[i];
1686 		unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
1687 		if (variant->version == VFIO_TYPE1_IOMMU) {
1688 			ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
1689 					   &unmap_cmd));
1690 		} else {
1691 			EXPECT_ERRNO(ENOENT,
1692 				     ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
1693 					   &unmap_cmd));
1694 		}
1695 	}
1696 }
1697 
1698 TEST_HARNESS_MAIN
1699