1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <stdlib.h>
4 #include <sys/mman.h>
5 #include <sys/eventfd.h>
6
7 #define __EXPORTED_HEADERS__
8 #include <linux/vfio.h>
9
10 #include "iommufd_utils.h"
11
12 static unsigned long HUGEPAGE_SIZE;
13
14 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
15
get_huge_page_size(void)16 static unsigned long get_huge_page_size(void)
17 {
18 char buf[80];
19 int ret;
20 int fd;
21
22 fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
23 O_RDONLY);
24 if (fd < 0)
25 return 2 * 1024 * 1024;
26
27 ret = read(fd, buf, sizeof(buf));
28 close(fd);
29 if (ret <= 0 || ret == sizeof(buf))
30 return 2 * 1024 * 1024;
31 buf[ret] = 0;
32 return strtoul(buf, NULL, 10);
33 }
34
setup_sizes(void)35 static __attribute__((constructor)) void setup_sizes(void)
36 {
37 void *vrc;
38 int rc;
39
40 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
41 HUGEPAGE_SIZE = get_huge_page_size();
42
43 BUFFER_SIZE = PAGE_SIZE * 16;
44 rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
45 assert(!rc);
46 assert(buffer);
47 assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
48 vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
50 assert(vrc == buffer);
51 }
52
FIXTURE(iommufd)53 FIXTURE(iommufd)
54 {
55 int fd;
56 };
57
FIXTURE_SETUP(iommufd)58 FIXTURE_SETUP(iommufd)
59 {
60 self->fd = open("/dev/iommu", O_RDWR);
61 ASSERT_NE(-1, self->fd);
62 }
63
FIXTURE_TEARDOWN(iommufd)64 FIXTURE_TEARDOWN(iommufd)
65 {
66 teardown_iommufd(self->fd, _metadata);
67 }
68
TEST_F(iommufd,simple_close)69 TEST_F(iommufd, simple_close)
70 {
71 }
72
TEST_F(iommufd,cmd_fail)73 TEST_F(iommufd, cmd_fail)
74 {
75 struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
76
77 /* object id is invalid */
78 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
79 /* Bad pointer */
80 EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
81 /* Unknown ioctl */
82 EXPECT_ERRNO(ENOTTY,
83 ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
84 &cmd));
85 }
86
TEST_F(iommufd,cmd_length)87 TEST_F(iommufd, cmd_length)
88 {
89 #define TEST_LENGTH(_struct, _ioctl) \
90 { \
91 struct { \
92 struct _struct cmd; \
93 uint8_t extra; \
94 } cmd = { .cmd = { .size = sizeof(struct _struct) - 1 }, \
95 .extra = UINT8_MAX }; \
96 int old_errno; \
97 int rc; \
98 \
99 EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \
100 cmd.cmd.size = sizeof(struct _struct) + 1; \
101 EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \
102 cmd.cmd.size = sizeof(struct _struct); \
103 rc = ioctl(self->fd, _ioctl, &cmd); \
104 old_errno = errno; \
105 cmd.cmd.size = sizeof(struct _struct) + 1; \
106 cmd.extra = 0; \
107 if (rc) { \
108 EXPECT_ERRNO(old_errno, \
109 ioctl(self->fd, _ioctl, &cmd)); \
110 } else { \
111 ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \
112 } \
113 }
114
115 TEST_LENGTH(iommu_destroy, IOMMU_DESTROY);
116 TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO);
117 TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC);
118 TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES);
119 TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS);
120 TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP);
121 TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY);
122 TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP);
123 TEST_LENGTH(iommu_option, IOMMU_OPTION);
124 TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS);
125 #undef TEST_LENGTH
126 }
127
TEST_F(iommufd,cmd_ex_fail)128 TEST_F(iommufd, cmd_ex_fail)
129 {
130 struct {
131 struct iommu_destroy cmd;
132 __u64 future;
133 } cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
134
135 /* object id is invalid and command is longer */
136 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
137 /* future area is non-zero */
138 cmd.future = 1;
139 EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
140 /* Original command "works" */
141 cmd.cmd.size = sizeof(cmd.cmd);
142 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
143 /* Short command fails */
144 cmd.cmd.size = sizeof(cmd.cmd) - 1;
145 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
146 }
147
TEST_F(iommufd,global_options)148 TEST_F(iommufd, global_options)
149 {
150 struct iommu_option cmd = {
151 .size = sizeof(cmd),
152 .option_id = IOMMU_OPTION_RLIMIT_MODE,
153 .op = IOMMU_OPTION_OP_GET,
154 .val64 = 1,
155 };
156
157 cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
158 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
159 ASSERT_EQ(0, cmd.val64);
160
161 /* This requires root */
162 cmd.op = IOMMU_OPTION_OP_SET;
163 cmd.val64 = 1;
164 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
165 cmd.val64 = 2;
166 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
167
168 cmd.op = IOMMU_OPTION_OP_GET;
169 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
170 ASSERT_EQ(1, cmd.val64);
171
172 cmd.op = IOMMU_OPTION_OP_SET;
173 cmd.val64 = 0;
174 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
175
176 cmd.op = IOMMU_OPTION_OP_GET;
177 cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
178 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
179 cmd.op = IOMMU_OPTION_OP_SET;
180 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
181 }
182
FIXTURE(iommufd_ioas)183 FIXTURE(iommufd_ioas)
184 {
185 int fd;
186 uint32_t ioas_id;
187 uint32_t stdev_id;
188 uint32_t hwpt_id;
189 uint32_t device_id;
190 uint64_t base_iova;
191 };
192
FIXTURE_VARIANT(iommufd_ioas)193 FIXTURE_VARIANT(iommufd_ioas)
194 {
195 unsigned int mock_domains;
196 unsigned int memory_limit;
197 };
198
FIXTURE_SETUP(iommufd_ioas)199 FIXTURE_SETUP(iommufd_ioas)
200 {
201 unsigned int i;
202
203
204 self->fd = open("/dev/iommu", O_RDWR);
205 ASSERT_NE(-1, self->fd);
206 test_ioctl_ioas_alloc(&self->ioas_id);
207
208 if (!variant->memory_limit) {
209 test_ioctl_set_default_memory_limit();
210 } else {
211 test_ioctl_set_temp_memory_limit(variant->memory_limit);
212 }
213
214 for (i = 0; i != variant->mock_domains; i++) {
215 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
216 &self->hwpt_id, &self->device_id);
217 self->base_iova = MOCK_APERTURE_START;
218 }
219 }
220
FIXTURE_TEARDOWN(iommufd_ioas)221 FIXTURE_TEARDOWN(iommufd_ioas)
222 {
223 test_ioctl_set_default_memory_limit();
224 teardown_iommufd(self->fd, _metadata);
225 }
226
FIXTURE_VARIANT_ADD(iommufd_ioas,no_domain)227 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
228 {
229 };
230
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain)231 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
232 {
233 .mock_domains = 1,
234 };
235
FIXTURE_VARIANT_ADD(iommufd_ioas,two_mock_domain)236 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
237 {
238 .mock_domains = 2,
239 };
240
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain_limit)241 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
242 {
243 .mock_domains = 1,
244 .memory_limit = 16,
245 };
246
TEST_F(iommufd_ioas,ioas_auto_destroy)247 TEST_F(iommufd_ioas, ioas_auto_destroy)
248 {
249 }
250
TEST_F(iommufd_ioas,ioas_destroy)251 TEST_F(iommufd_ioas, ioas_destroy)
252 {
253 if (self->stdev_id) {
254 /* IOAS cannot be freed while a device has a HWPT using it */
255 EXPECT_ERRNO(EBUSY,
256 _test_ioctl_destroy(self->fd, self->ioas_id));
257 } else {
258 /* Can allocate and manually free an IOAS table */
259 test_ioctl_destroy(self->ioas_id);
260 }
261 }
262
TEST_F(iommufd_ioas,hwpt_attach)263 TEST_F(iommufd_ioas, hwpt_attach)
264 {
265 /* Create a device attached directly to a hwpt */
266 if (self->stdev_id) {
267 test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
268 } else {
269 test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
270 }
271 }
272
TEST_F(iommufd_ioas,ioas_area_destroy)273 TEST_F(iommufd_ioas, ioas_area_destroy)
274 {
275 /* Adding an area does not change ability to destroy */
276 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
277 if (self->stdev_id)
278 EXPECT_ERRNO(EBUSY,
279 _test_ioctl_destroy(self->fd, self->ioas_id));
280 else
281 test_ioctl_destroy(self->ioas_id);
282 }
283
TEST_F(iommufd_ioas,ioas_area_auto_destroy)284 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
285 {
286 int i;
287
288 /* Can allocate and automatically free an IOAS table with many areas */
289 for (i = 0; i != 10; i++) {
290 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
291 self->base_iova + i * PAGE_SIZE);
292 }
293 }
294
TEST_F(iommufd_ioas,get_hw_info)295 TEST_F(iommufd_ioas, get_hw_info)
296 {
297 struct iommu_test_hw_info buffer_exact;
298 struct iommu_test_hw_info_buffer_larger {
299 struct iommu_test_hw_info info;
300 uint64_t trailing_bytes;
301 } buffer_larger;
302 struct iommu_test_hw_info_buffer_smaller {
303 __u32 flags;
304 } buffer_smaller;
305
306 if (self->device_id) {
307 /* Provide a zero-size user_buffer */
308 test_cmd_get_hw_info(self->device_id, NULL, 0);
309 /* Provide a user_buffer with exact size */
310 test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
311 /*
312 * Provide a user_buffer with size larger than the exact size to check if
313 * kernel zero the trailing bytes.
314 */
315 test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
316 /*
317 * Provide a user_buffer with size smaller than the exact size to check if
318 * the fields within the size range still gets updated.
319 */
320 test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
321 } else {
322 test_err_get_hw_info(ENOENT, self->device_id,
323 &buffer_exact, sizeof(buffer_exact));
324 test_err_get_hw_info(ENOENT, self->device_id,
325 &buffer_larger, sizeof(buffer_larger));
326 }
327 }
328
TEST_F(iommufd_ioas,area)329 TEST_F(iommufd_ioas, area)
330 {
331 int i;
332
333 /* Unmap fails if nothing is mapped */
334 for (i = 0; i != 10; i++)
335 test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
336
337 /* Unmap works */
338 for (i = 0; i != 10; i++)
339 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
340 self->base_iova + i * PAGE_SIZE);
341 for (i = 0; i != 10; i++)
342 test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
343 PAGE_SIZE);
344
345 /* Split fails */
346 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
347 self->base_iova + 16 * PAGE_SIZE);
348 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
349 PAGE_SIZE);
350 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
351 PAGE_SIZE);
352
353 /* Over map fails */
354 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
355 self->base_iova + 16 * PAGE_SIZE);
356 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
357 self->base_iova + 16 * PAGE_SIZE);
358 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
359 self->base_iova + 17 * PAGE_SIZE);
360 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
361 self->base_iova + 15 * PAGE_SIZE);
362 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
363 self->base_iova + 15 * PAGE_SIZE);
364
365 /* unmap all works */
366 test_ioctl_ioas_unmap(0, UINT64_MAX);
367
368 /* Unmap all succeeds on an empty IOAS */
369 test_ioctl_ioas_unmap(0, UINT64_MAX);
370 }
371
TEST_F(iommufd_ioas,unmap_fully_contained_areas)372 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
373 {
374 uint64_t unmap_len;
375 int i;
376
377 /* Give no_domain some space to rewind base_iova */
378 self->base_iova += 4 * PAGE_SIZE;
379
380 for (i = 0; i != 4; i++)
381 test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
382 self->base_iova + i * 16 * PAGE_SIZE);
383
384 /* Unmap not fully contained area doesn't work */
385 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
386 8 * PAGE_SIZE);
387 test_err_ioctl_ioas_unmap(ENOENT,
388 self->base_iova + 3 * 16 * PAGE_SIZE +
389 8 * PAGE_SIZE - 4 * PAGE_SIZE,
390 8 * PAGE_SIZE);
391
392 /* Unmap fully contained areas works */
393 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
394 self->base_iova - 4 * PAGE_SIZE,
395 3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
396 4 * PAGE_SIZE,
397 &unmap_len));
398 ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
399 }
400
TEST_F(iommufd_ioas,area_auto_iova)401 TEST_F(iommufd_ioas, area_auto_iova)
402 {
403 struct iommu_test_cmd test_cmd = {
404 .size = sizeof(test_cmd),
405 .op = IOMMU_TEST_OP_ADD_RESERVED,
406 .id = self->ioas_id,
407 .add_reserved = { .start = PAGE_SIZE * 4,
408 .length = PAGE_SIZE * 100 },
409 };
410 struct iommu_iova_range ranges[1] = {};
411 struct iommu_ioas_allow_iovas allow_cmd = {
412 .size = sizeof(allow_cmd),
413 .ioas_id = self->ioas_id,
414 .num_iovas = 1,
415 .allowed_iovas = (uintptr_t)ranges,
416 };
417 __u64 iovas[10];
418 int i;
419
420 /* Simple 4k pages */
421 for (i = 0; i != 10; i++)
422 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
423 for (i = 0; i != 10; i++)
424 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
425
426 /* Kernel automatically aligns IOVAs properly */
427 for (i = 0; i != 10; i++) {
428 size_t length = PAGE_SIZE * (i + 1);
429
430 if (self->stdev_id) {
431 test_ioctl_ioas_map(buffer, length, &iovas[i]);
432 } else {
433 test_ioctl_ioas_map((void *)(1UL << 31), length,
434 &iovas[i]);
435 }
436 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
437 }
438 for (i = 0; i != 10; i++)
439 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
440
441 /* Avoids a reserved region */
442 ASSERT_EQ(0,
443 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
444 &test_cmd));
445 for (i = 0; i != 10; i++) {
446 size_t length = PAGE_SIZE * (i + 1);
447
448 test_ioctl_ioas_map(buffer, length, &iovas[i]);
449 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
450 EXPECT_EQ(false,
451 iovas[i] > test_cmd.add_reserved.start &&
452 iovas[i] <
453 test_cmd.add_reserved.start +
454 test_cmd.add_reserved.length);
455 }
456 for (i = 0; i != 10; i++)
457 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
458
459 /* Allowed region intersects with a reserved region */
460 ranges[0].start = PAGE_SIZE;
461 ranges[0].last = PAGE_SIZE * 600;
462 EXPECT_ERRNO(EADDRINUSE,
463 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
464
465 /* Allocate from an allowed region */
466 if (self->stdev_id) {
467 ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
468 ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
469 } else {
470 ranges[0].start = PAGE_SIZE * 200;
471 ranges[0].last = PAGE_SIZE * 600 - 1;
472 }
473 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
474 for (i = 0; i != 10; i++) {
475 size_t length = PAGE_SIZE * (i + 1);
476
477 test_ioctl_ioas_map(buffer, length, &iovas[i]);
478 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
479 EXPECT_EQ(true, iovas[i] >= ranges[0].start);
480 EXPECT_EQ(true, iovas[i] <= ranges[0].last);
481 EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
482 EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
483 }
484 for (i = 0; i != 10; i++)
485 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
486 }
487
TEST_F(iommufd_ioas,area_allowed)488 TEST_F(iommufd_ioas, area_allowed)
489 {
490 struct iommu_test_cmd test_cmd = {
491 .size = sizeof(test_cmd),
492 .op = IOMMU_TEST_OP_ADD_RESERVED,
493 .id = self->ioas_id,
494 .add_reserved = { .start = PAGE_SIZE * 4,
495 .length = PAGE_SIZE * 100 },
496 };
497 struct iommu_iova_range ranges[1] = {};
498 struct iommu_ioas_allow_iovas allow_cmd = {
499 .size = sizeof(allow_cmd),
500 .ioas_id = self->ioas_id,
501 .num_iovas = 1,
502 .allowed_iovas = (uintptr_t)ranges,
503 };
504
505 /* Reserved intersects an allowed */
506 allow_cmd.num_iovas = 1;
507 ranges[0].start = self->base_iova;
508 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
509 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
510 test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
511 test_cmd.add_reserved.length = PAGE_SIZE;
512 EXPECT_ERRNO(EADDRINUSE,
513 ioctl(self->fd,
514 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
515 &test_cmd));
516 allow_cmd.num_iovas = 0;
517 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
518
519 /* Allowed intersects a reserved */
520 ASSERT_EQ(0,
521 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
522 &test_cmd));
523 allow_cmd.num_iovas = 1;
524 ranges[0].start = self->base_iova;
525 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
526 EXPECT_ERRNO(EADDRINUSE,
527 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
528 }
529
TEST_F(iommufd_ioas,copy_area)530 TEST_F(iommufd_ioas, copy_area)
531 {
532 struct iommu_ioas_copy copy_cmd = {
533 .size = sizeof(copy_cmd),
534 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
535 .dst_ioas_id = self->ioas_id,
536 .src_ioas_id = self->ioas_id,
537 .length = PAGE_SIZE,
538 };
539
540 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
541
542 /* Copy inside a single IOAS */
543 copy_cmd.src_iova = self->base_iova;
544 copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
545 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
546
547 /* Copy between IOAS's */
548 copy_cmd.src_iova = self->base_iova;
549 copy_cmd.dst_iova = 0;
550 test_ioctl_ioas_alloc(©_cmd.dst_ioas_id);
551 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
552 }
553
TEST_F(iommufd_ioas,iova_ranges)554 TEST_F(iommufd_ioas, iova_ranges)
555 {
556 struct iommu_test_cmd test_cmd = {
557 .size = sizeof(test_cmd),
558 .op = IOMMU_TEST_OP_ADD_RESERVED,
559 .id = self->ioas_id,
560 .add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
561 };
562 struct iommu_iova_range *ranges = buffer;
563 struct iommu_ioas_iova_ranges ranges_cmd = {
564 .size = sizeof(ranges_cmd),
565 .ioas_id = self->ioas_id,
566 .num_iovas = BUFFER_SIZE / sizeof(*ranges),
567 .allowed_iovas = (uintptr_t)ranges,
568 };
569
570 /* Range can be read */
571 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
572 EXPECT_EQ(1, ranges_cmd.num_iovas);
573 if (!self->stdev_id) {
574 EXPECT_EQ(0, ranges[0].start);
575 EXPECT_EQ(SIZE_MAX, ranges[0].last);
576 EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
577 } else {
578 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
579 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
580 EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
581 }
582
583 /* Buffer too small */
584 memset(ranges, 0, BUFFER_SIZE);
585 ranges_cmd.num_iovas = 0;
586 EXPECT_ERRNO(EMSGSIZE,
587 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
588 EXPECT_EQ(1, ranges_cmd.num_iovas);
589 EXPECT_EQ(0, ranges[0].start);
590 EXPECT_EQ(0, ranges[0].last);
591
592 /* 2 ranges */
593 ASSERT_EQ(0,
594 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
595 &test_cmd));
596 ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
597 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
598 if (!self->stdev_id) {
599 EXPECT_EQ(2, ranges_cmd.num_iovas);
600 EXPECT_EQ(0, ranges[0].start);
601 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
602 EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
603 EXPECT_EQ(SIZE_MAX, ranges[1].last);
604 } else {
605 EXPECT_EQ(1, ranges_cmd.num_iovas);
606 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
607 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
608 }
609
610 /* Buffer too small */
611 memset(ranges, 0, BUFFER_SIZE);
612 ranges_cmd.num_iovas = 1;
613 if (!self->stdev_id) {
614 EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
615 &ranges_cmd));
616 EXPECT_EQ(2, ranges_cmd.num_iovas);
617 EXPECT_EQ(0, ranges[0].start);
618 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
619 } else {
620 ASSERT_EQ(0,
621 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
622 EXPECT_EQ(1, ranges_cmd.num_iovas);
623 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
624 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
625 }
626 EXPECT_EQ(0, ranges[1].start);
627 EXPECT_EQ(0, ranges[1].last);
628 }
629
TEST_F(iommufd_ioas,access_domain_destory)630 TEST_F(iommufd_ioas, access_domain_destory)
631 {
632 struct iommu_test_cmd access_cmd = {
633 .size = sizeof(access_cmd),
634 .op = IOMMU_TEST_OP_ACCESS_PAGES,
635 .access_pages = { .iova = self->base_iova + PAGE_SIZE,
636 .length = PAGE_SIZE},
637 };
638 size_t buf_size = 2 * HUGEPAGE_SIZE;
639 uint8_t *buf;
640
641 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
642 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
643 0);
644 ASSERT_NE(MAP_FAILED, buf);
645 test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
646
647 test_cmd_create_access(self->ioas_id, &access_cmd.id,
648 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
649 access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
650 ASSERT_EQ(0,
651 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
652 &access_cmd));
653
654 /* Causes a complicated unpin across a huge page boundary */
655 if (self->stdev_id)
656 test_ioctl_destroy(self->stdev_id);
657
658 test_cmd_destroy_access_pages(
659 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
660 test_cmd_destroy_access(access_cmd.id);
661 ASSERT_EQ(0, munmap(buf, buf_size));
662 }
663
TEST_F(iommufd_ioas,access_pin)664 TEST_F(iommufd_ioas, access_pin)
665 {
666 struct iommu_test_cmd access_cmd = {
667 .size = sizeof(access_cmd),
668 .op = IOMMU_TEST_OP_ACCESS_PAGES,
669 .access_pages = { .iova = MOCK_APERTURE_START,
670 .length = BUFFER_SIZE,
671 .uptr = (uintptr_t)buffer },
672 };
673 struct iommu_test_cmd check_map_cmd = {
674 .size = sizeof(check_map_cmd),
675 .op = IOMMU_TEST_OP_MD_CHECK_MAP,
676 .check_map = { .iova = MOCK_APERTURE_START,
677 .length = BUFFER_SIZE,
678 .uptr = (uintptr_t)buffer },
679 };
680 uint32_t access_pages_id;
681 unsigned int npages;
682
683 test_cmd_create_access(self->ioas_id, &access_cmd.id,
684 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
685
686 for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
687 uint32_t mock_stdev_id;
688 uint32_t mock_hwpt_id;
689
690 access_cmd.access_pages.length = npages * PAGE_SIZE;
691
692 /* Single map/unmap */
693 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
694 MOCK_APERTURE_START);
695 ASSERT_EQ(0, ioctl(self->fd,
696 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
697 &access_cmd));
698 test_cmd_destroy_access_pages(
699 access_cmd.id,
700 access_cmd.access_pages.out_access_pages_id);
701
702 /* Double user */
703 ASSERT_EQ(0, ioctl(self->fd,
704 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
705 &access_cmd));
706 access_pages_id = access_cmd.access_pages.out_access_pages_id;
707 ASSERT_EQ(0, ioctl(self->fd,
708 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
709 &access_cmd));
710 test_cmd_destroy_access_pages(
711 access_cmd.id,
712 access_cmd.access_pages.out_access_pages_id);
713 test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
714
715 /* Add/remove a domain with a user */
716 ASSERT_EQ(0, ioctl(self->fd,
717 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
718 &access_cmd));
719 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
720 &mock_hwpt_id, NULL);
721 check_map_cmd.id = mock_hwpt_id;
722 ASSERT_EQ(0, ioctl(self->fd,
723 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
724 &check_map_cmd));
725
726 test_ioctl_destroy(mock_stdev_id);
727 test_cmd_destroy_access_pages(
728 access_cmd.id,
729 access_cmd.access_pages.out_access_pages_id);
730
731 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
732 }
733 test_cmd_destroy_access(access_cmd.id);
734 }
735
TEST_F(iommufd_ioas,access_pin_unmap)736 TEST_F(iommufd_ioas, access_pin_unmap)
737 {
738 struct iommu_test_cmd access_pages_cmd = {
739 .size = sizeof(access_pages_cmd),
740 .op = IOMMU_TEST_OP_ACCESS_PAGES,
741 .access_pages = { .iova = MOCK_APERTURE_START,
742 .length = BUFFER_SIZE,
743 .uptr = (uintptr_t)buffer },
744 };
745
746 test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
747 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
748 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
749 ASSERT_EQ(0,
750 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
751 &access_pages_cmd));
752
753 /* Trigger the unmap op */
754 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
755
756 /* kernel removed the item for us */
757 test_err_destroy_access_pages(
758 ENOENT, access_pages_cmd.id,
759 access_pages_cmd.access_pages.out_access_pages_id);
760 }
761
check_access_rw(struct __test_metadata * _metadata,int fd,unsigned int access_id,uint64_t iova,unsigned int def_flags)762 static void check_access_rw(struct __test_metadata *_metadata, int fd,
763 unsigned int access_id, uint64_t iova,
764 unsigned int def_flags)
765 {
766 uint16_t tmp[32];
767 struct iommu_test_cmd access_cmd = {
768 .size = sizeof(access_cmd),
769 .op = IOMMU_TEST_OP_ACCESS_RW,
770 .id = access_id,
771 .access_rw = { .uptr = (uintptr_t)tmp },
772 };
773 uint16_t *buffer16 = buffer;
774 unsigned int i;
775 void *tmp2;
776
777 for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
778 buffer16[i] = rand();
779
780 for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
781 access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
782 access_cmd.access_rw.iova++) {
783 for (access_cmd.access_rw.length = 1;
784 access_cmd.access_rw.length < sizeof(tmp);
785 access_cmd.access_rw.length++) {
786 access_cmd.access_rw.flags = def_flags;
787 ASSERT_EQ(0, ioctl(fd,
788 _IOMMU_TEST_CMD(
789 IOMMU_TEST_OP_ACCESS_RW),
790 &access_cmd));
791 ASSERT_EQ(0,
792 memcmp(buffer + (access_cmd.access_rw.iova -
793 iova),
794 tmp, access_cmd.access_rw.length));
795
796 for (i = 0; i != ARRAY_SIZE(tmp); i++)
797 tmp[i] = rand();
798 access_cmd.access_rw.flags = def_flags |
799 MOCK_ACCESS_RW_WRITE;
800 ASSERT_EQ(0, ioctl(fd,
801 _IOMMU_TEST_CMD(
802 IOMMU_TEST_OP_ACCESS_RW),
803 &access_cmd));
804 ASSERT_EQ(0,
805 memcmp(buffer + (access_cmd.access_rw.iova -
806 iova),
807 tmp, access_cmd.access_rw.length));
808 }
809 }
810
811 /* Multi-page test */
812 tmp2 = malloc(BUFFER_SIZE);
813 ASSERT_NE(NULL, tmp2);
814 access_cmd.access_rw.iova = iova;
815 access_cmd.access_rw.length = BUFFER_SIZE;
816 access_cmd.access_rw.flags = def_flags;
817 access_cmd.access_rw.uptr = (uintptr_t)tmp2;
818 ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
819 &access_cmd));
820 ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
821 free(tmp2);
822 }
823
TEST_F(iommufd_ioas,access_rw)824 TEST_F(iommufd_ioas, access_rw)
825 {
826 __u32 access_id;
827 __u64 iova;
828
829 test_cmd_create_access(self->ioas_id, &access_id, 0);
830 test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
831 check_access_rw(_metadata, self->fd, access_id, iova, 0);
832 check_access_rw(_metadata, self->fd, access_id, iova,
833 MOCK_ACCESS_RW_SLOW_PATH);
834 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
835 test_cmd_destroy_access(access_id);
836 }
837
TEST_F(iommufd_ioas,access_rw_unaligned)838 TEST_F(iommufd_ioas, access_rw_unaligned)
839 {
840 __u32 access_id;
841 __u64 iova;
842
843 test_cmd_create_access(self->ioas_id, &access_id, 0);
844
845 /* Unaligned pages */
846 iova = self->base_iova + MOCK_PAGE_SIZE;
847 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
848 check_access_rw(_metadata, self->fd, access_id, iova, 0);
849 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
850 test_cmd_destroy_access(access_id);
851 }
852
TEST_F(iommufd_ioas,fork_gone)853 TEST_F(iommufd_ioas, fork_gone)
854 {
855 __u32 access_id;
856 pid_t child;
857
858 test_cmd_create_access(self->ioas_id, &access_id, 0);
859
860 /* Create a mapping with a different mm */
861 child = fork();
862 if (!child) {
863 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
864 MOCK_APERTURE_START);
865 exit(0);
866 }
867 ASSERT_NE(-1, child);
868 ASSERT_EQ(child, waitpid(child, NULL, 0));
869
870 if (self->stdev_id) {
871 /*
872 * If a domain already existed then everything was pinned within
873 * the fork, so this copies from one domain to another.
874 */
875 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
876 check_access_rw(_metadata, self->fd, access_id,
877 MOCK_APERTURE_START, 0);
878
879 } else {
880 /*
881 * Otherwise we need to actually pin pages which can't happen
882 * since the fork is gone.
883 */
884 test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
885 }
886
887 test_cmd_destroy_access(access_id);
888 }
889
TEST_F(iommufd_ioas,fork_present)890 TEST_F(iommufd_ioas, fork_present)
891 {
892 __u32 access_id;
893 int pipefds[2];
894 uint64_t tmp;
895 pid_t child;
896 int efd;
897
898 test_cmd_create_access(self->ioas_id, &access_id, 0);
899
900 ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
901 efd = eventfd(0, EFD_CLOEXEC);
902 ASSERT_NE(-1, efd);
903
904 /* Create a mapping with a different mm */
905 child = fork();
906 if (!child) {
907 __u64 iova;
908 uint64_t one = 1;
909
910 close(pipefds[1]);
911 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
912 MOCK_APERTURE_START);
913 if (write(efd, &one, sizeof(one)) != sizeof(one))
914 exit(100);
915 if (read(pipefds[0], &iova, 1) != 1)
916 exit(100);
917 exit(0);
918 }
919 close(pipefds[0]);
920 ASSERT_NE(-1, child);
921 ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
922
923 /* Read pages from the remote process */
924 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
925 check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
926
927 ASSERT_EQ(0, close(pipefds[1]));
928 ASSERT_EQ(child, waitpid(child, NULL, 0));
929
930 test_cmd_destroy_access(access_id);
931 }
932
TEST_F(iommufd_ioas,ioas_option_huge_pages)933 TEST_F(iommufd_ioas, ioas_option_huge_pages)
934 {
935 struct iommu_option cmd = {
936 .size = sizeof(cmd),
937 .option_id = IOMMU_OPTION_HUGE_PAGES,
938 .op = IOMMU_OPTION_OP_GET,
939 .val64 = 3,
940 .object_id = self->ioas_id,
941 };
942
943 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
944 ASSERT_EQ(1, cmd.val64);
945
946 cmd.op = IOMMU_OPTION_OP_SET;
947 cmd.val64 = 0;
948 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
949
950 cmd.op = IOMMU_OPTION_OP_GET;
951 cmd.val64 = 3;
952 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
953 ASSERT_EQ(0, cmd.val64);
954
955 cmd.op = IOMMU_OPTION_OP_SET;
956 cmd.val64 = 2;
957 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
958
959 cmd.op = IOMMU_OPTION_OP_SET;
960 cmd.val64 = 1;
961 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
962 }
963
TEST_F(iommufd_ioas,ioas_iova_alloc)964 TEST_F(iommufd_ioas, ioas_iova_alloc)
965 {
966 unsigned int length;
967 __u64 iova;
968
969 for (length = 1; length != PAGE_SIZE * 2; length++) {
970 if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
971 test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
972 } else {
973 test_ioctl_ioas_map(buffer, length, &iova);
974 test_ioctl_ioas_unmap(iova, length);
975 }
976 }
977 }
978
TEST_F(iommufd_ioas,ioas_align_change)979 TEST_F(iommufd_ioas, ioas_align_change)
980 {
981 struct iommu_option cmd = {
982 .size = sizeof(cmd),
983 .option_id = IOMMU_OPTION_HUGE_PAGES,
984 .op = IOMMU_OPTION_OP_SET,
985 .object_id = self->ioas_id,
986 /* 0 means everything must be aligned to PAGE_SIZE */
987 .val64 = 0,
988 };
989
990 /*
991 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
992 * and map are present.
993 */
994 if (variant->mock_domains)
995 return;
996
997 /*
998 * We can upgrade to PAGE_SIZE alignment when things are aligned right
999 */
1000 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1001 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1002
1003 /* Misalignment is rejected at map time */
1004 test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1005 PAGE_SIZE,
1006 MOCK_APERTURE_START + PAGE_SIZE);
1007 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1008
1009 /* Reduce alignment */
1010 cmd.val64 = 1;
1011 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1012
1013 /* Confirm misalignment is rejected during alignment upgrade */
1014 test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1015 MOCK_APERTURE_START + PAGE_SIZE);
1016 cmd.val64 = 0;
1017 EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1018
1019 test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1020 test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1021 }
1022
TEST_F(iommufd_ioas,copy_sweep)1023 TEST_F(iommufd_ioas, copy_sweep)
1024 {
1025 struct iommu_ioas_copy copy_cmd = {
1026 .size = sizeof(copy_cmd),
1027 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1028 .src_ioas_id = self->ioas_id,
1029 .dst_iova = MOCK_APERTURE_START,
1030 .length = MOCK_PAGE_SIZE,
1031 };
1032 unsigned int dst_ioas_id;
1033 uint64_t last_iova;
1034 uint64_t iova;
1035
1036 test_ioctl_ioas_alloc(&dst_ioas_id);
1037 copy_cmd.dst_ioas_id = dst_ioas_id;
1038
1039 if (variant->mock_domains)
1040 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1041 else
1042 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1043
1044 test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1045 MOCK_APERTURE_START);
1046
1047 for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1048 iova += 511) {
1049 copy_cmd.src_iova = iova;
1050 if (iova < MOCK_APERTURE_START ||
1051 iova + copy_cmd.length - 1 > last_iova) {
1052 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1053 ©_cmd));
1054 } else {
1055 ASSERT_EQ(0,
1056 ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1057 test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1058 copy_cmd.length);
1059 }
1060 }
1061
1062 test_ioctl_destroy(dst_ioas_id);
1063 }
1064
FIXTURE(iommufd_mock_domain)1065 FIXTURE(iommufd_mock_domain)
1066 {
1067 int fd;
1068 uint32_t ioas_id;
1069 uint32_t hwpt_id;
1070 uint32_t hwpt_ids[2];
1071 uint32_t stdev_ids[2];
1072 uint32_t idev_ids[2];
1073 int mmap_flags;
1074 size_t mmap_buf_size;
1075 };
1076
FIXTURE_VARIANT(iommufd_mock_domain)1077 FIXTURE_VARIANT(iommufd_mock_domain)
1078 {
1079 unsigned int mock_domains;
1080 bool hugepages;
1081 };
1082
FIXTURE_SETUP(iommufd_mock_domain)1083 FIXTURE_SETUP(iommufd_mock_domain)
1084 {
1085 unsigned int i;
1086
1087 self->fd = open("/dev/iommu", O_RDWR);
1088 ASSERT_NE(-1, self->fd);
1089 test_ioctl_ioas_alloc(&self->ioas_id);
1090
1091 ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1092
1093 for (i = 0; i != variant->mock_domains; i++)
1094 test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1095 &self->hwpt_ids[i], &self->idev_ids[i]);
1096 self->hwpt_id = self->hwpt_ids[0];
1097
1098 self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1099 self->mmap_buf_size = PAGE_SIZE * 8;
1100 if (variant->hugepages) {
1101 /*
1102 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1103 * not available.
1104 */
1105 self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1106 self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1107 }
1108 }
1109
FIXTURE_TEARDOWN(iommufd_mock_domain)1110 FIXTURE_TEARDOWN(iommufd_mock_domain)
1111 {
1112 teardown_iommufd(self->fd, _metadata);
1113 }
1114
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain)1115 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1116 {
1117 .mock_domains = 1,
1118 .hugepages = false,
1119 };
1120
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains)1121 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1122 {
1123 .mock_domains = 2,
1124 .hugepages = false,
1125 };
1126
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_hugepage)1127 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1128 {
1129 .mock_domains = 1,
1130 .hugepages = true,
1131 };
1132
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains_hugepage)1133 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1134 {
1135 .mock_domains = 2,
1136 .hugepages = true,
1137 };
1138
1139 /* Have the kernel check that the user pages made it to the iommu_domain */
1140 #define check_mock_iova(_ptr, _iova, _length) \
1141 ({ \
1142 struct iommu_test_cmd check_map_cmd = { \
1143 .size = sizeof(check_map_cmd), \
1144 .op = IOMMU_TEST_OP_MD_CHECK_MAP, \
1145 .id = self->hwpt_id, \
1146 .check_map = { .iova = _iova, \
1147 .length = _length, \
1148 .uptr = (uintptr_t)(_ptr) }, \
1149 }; \
1150 ASSERT_EQ(0, \
1151 ioctl(self->fd, \
1152 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1153 &check_map_cmd)); \
1154 if (self->hwpt_ids[1]) { \
1155 check_map_cmd.id = self->hwpt_ids[1]; \
1156 ASSERT_EQ(0, \
1157 ioctl(self->fd, \
1158 _IOMMU_TEST_CMD( \
1159 IOMMU_TEST_OP_MD_CHECK_MAP), \
1160 &check_map_cmd)); \
1161 } \
1162 })
1163
TEST_F(iommufd_mock_domain,basic)1164 TEST_F(iommufd_mock_domain, basic)
1165 {
1166 size_t buf_size = self->mmap_buf_size;
1167 uint8_t *buf;
1168 __u64 iova;
1169
1170 /* Simple one page map */
1171 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1172 check_mock_iova(buffer, iova, PAGE_SIZE);
1173
1174 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1175 0);
1176 ASSERT_NE(MAP_FAILED, buf);
1177
1178 /* EFAULT half way through mapping */
1179 ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1180 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1181
1182 /* EFAULT on first page */
1183 ASSERT_EQ(0, munmap(buf, buf_size / 2));
1184 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1185 }
1186
TEST_F(iommufd_mock_domain,ro_unshare)1187 TEST_F(iommufd_mock_domain, ro_unshare)
1188 {
1189 uint8_t *buf;
1190 __u64 iova;
1191 int fd;
1192
1193 fd = open("/proc/self/exe", O_RDONLY);
1194 ASSERT_NE(-1, fd);
1195
1196 buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1197 ASSERT_NE(MAP_FAILED, buf);
1198 close(fd);
1199
1200 /*
1201 * There have been lots of changes to the "unshare" mechanism in
1202 * get_user_pages(), make sure it works right. The write to the page
1203 * after we map it for reading should not change the assigned PFN.
1204 */
1205 ASSERT_EQ(0,
1206 _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1207 &iova, IOMMU_IOAS_MAP_READABLE));
1208 check_mock_iova(buf, iova, PAGE_SIZE);
1209 memset(buf, 1, PAGE_SIZE);
1210 check_mock_iova(buf, iova, PAGE_SIZE);
1211 ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1212 }
1213
TEST_F(iommufd_mock_domain,all_aligns)1214 TEST_F(iommufd_mock_domain, all_aligns)
1215 {
1216 size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1217 MOCK_PAGE_SIZE;
1218 size_t buf_size = self->mmap_buf_size;
1219 unsigned int start;
1220 unsigned int end;
1221 uint8_t *buf;
1222
1223 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1224 0);
1225 ASSERT_NE(MAP_FAILED, buf);
1226 check_refs(buf, buf_size, 0);
1227
1228 /*
1229 * Map every combination of page size and alignment within a big region,
1230 * less for hugepage case as it takes so long to finish.
1231 */
1232 for (start = 0; start < buf_size; start += test_step) {
1233 if (variant->hugepages)
1234 end = buf_size;
1235 else
1236 end = start + MOCK_PAGE_SIZE;
1237 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1238 size_t length = end - start;
1239 __u64 iova;
1240
1241 test_ioctl_ioas_map(buf + start, length, &iova);
1242 check_mock_iova(buf + start, iova, length);
1243 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1244 end / PAGE_SIZE * PAGE_SIZE -
1245 start / PAGE_SIZE * PAGE_SIZE,
1246 1);
1247
1248 test_ioctl_ioas_unmap(iova, length);
1249 }
1250 }
1251 check_refs(buf, buf_size, 0);
1252 ASSERT_EQ(0, munmap(buf, buf_size));
1253 }
1254
TEST_F(iommufd_mock_domain,all_aligns_copy)1255 TEST_F(iommufd_mock_domain, all_aligns_copy)
1256 {
1257 size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1258 MOCK_PAGE_SIZE;
1259 size_t buf_size = self->mmap_buf_size;
1260 unsigned int start;
1261 unsigned int end;
1262 uint8_t *buf;
1263
1264 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1265 0);
1266 ASSERT_NE(MAP_FAILED, buf);
1267 check_refs(buf, buf_size, 0);
1268
1269 /*
1270 * Map every combination of page size and alignment within a big region,
1271 * less for hugepage case as it takes so long to finish.
1272 */
1273 for (start = 0; start < buf_size; start += test_step) {
1274 if (variant->hugepages)
1275 end = buf_size;
1276 else
1277 end = start + MOCK_PAGE_SIZE;
1278 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1279 size_t length = end - start;
1280 unsigned int old_id;
1281 uint32_t mock_stdev_id;
1282 __u64 iova;
1283
1284 test_ioctl_ioas_map(buf + start, length, &iova);
1285
1286 /* Add and destroy a domain while the area exists */
1287 old_id = self->hwpt_ids[1];
1288 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1289 &self->hwpt_ids[1], NULL);
1290
1291 check_mock_iova(buf + start, iova, length);
1292 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1293 end / PAGE_SIZE * PAGE_SIZE -
1294 start / PAGE_SIZE * PAGE_SIZE,
1295 1);
1296
1297 test_ioctl_destroy(mock_stdev_id);
1298 self->hwpt_ids[1] = old_id;
1299
1300 test_ioctl_ioas_unmap(iova, length);
1301 }
1302 }
1303 check_refs(buf, buf_size, 0);
1304 ASSERT_EQ(0, munmap(buf, buf_size));
1305 }
1306
TEST_F(iommufd_mock_domain,user_copy)1307 TEST_F(iommufd_mock_domain, user_copy)
1308 {
1309 struct iommu_test_cmd access_cmd = {
1310 .size = sizeof(access_cmd),
1311 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1312 .access_pages = { .length = BUFFER_SIZE,
1313 .uptr = (uintptr_t)buffer },
1314 };
1315 struct iommu_ioas_copy copy_cmd = {
1316 .size = sizeof(copy_cmd),
1317 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1318 .dst_ioas_id = self->ioas_id,
1319 .dst_iova = MOCK_APERTURE_START,
1320 .length = BUFFER_SIZE,
1321 };
1322 struct iommu_ioas_unmap unmap_cmd = {
1323 .size = sizeof(unmap_cmd),
1324 .ioas_id = self->ioas_id,
1325 .iova = MOCK_APERTURE_START,
1326 .length = BUFFER_SIZE,
1327 };
1328 unsigned int new_ioas_id, ioas_id;
1329
1330 /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1331 test_ioctl_ioas_alloc(&ioas_id);
1332 test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE,
1333 ©_cmd.src_iova);
1334
1335 test_cmd_create_access(ioas_id, &access_cmd.id,
1336 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1337
1338 access_cmd.access_pages.iova = copy_cmd.src_iova;
1339 ASSERT_EQ(0,
1340 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1341 &access_cmd));
1342 copy_cmd.src_ioas_id = ioas_id;
1343 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1344 check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1345
1346 /* Now replace the ioas with a new one */
1347 test_ioctl_ioas_alloc(&new_ioas_id);
1348 test_ioctl_ioas_map_id(new_ioas_id, buffer, BUFFER_SIZE,
1349 ©_cmd.src_iova);
1350 test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1351
1352 /* Destroy the old ioas and cleanup copied mapping */
1353 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
1354 test_ioctl_destroy(ioas_id);
1355
1356 /* Then run the same test again with the new ioas */
1357 access_cmd.access_pages.iova = copy_cmd.src_iova;
1358 ASSERT_EQ(0,
1359 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1360 &access_cmd));
1361 copy_cmd.src_ioas_id = new_ioas_id;
1362 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1363 check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1364
1365 test_cmd_destroy_access_pages(
1366 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1367 test_cmd_destroy_access(access_cmd.id);
1368
1369 test_ioctl_destroy(new_ioas_id);
1370 }
1371
TEST_F(iommufd_mock_domain,replace)1372 TEST_F(iommufd_mock_domain, replace)
1373 {
1374 uint32_t ioas_id;
1375
1376 test_ioctl_ioas_alloc(&ioas_id);
1377
1378 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1379
1380 /*
1381 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
1382 * should get enoent when we try to use it.
1383 */
1384 if (variant->mock_domains == 1)
1385 test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
1386 self->hwpt_ids[0]);
1387
1388 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1389 if (variant->mock_domains >= 2) {
1390 test_cmd_mock_domain_replace(self->stdev_ids[0],
1391 self->hwpt_ids[1]);
1392 test_cmd_mock_domain_replace(self->stdev_ids[0],
1393 self->hwpt_ids[1]);
1394 test_cmd_mock_domain_replace(self->stdev_ids[0],
1395 self->hwpt_ids[0]);
1396 }
1397
1398 test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
1399 test_ioctl_destroy(ioas_id);
1400 }
1401
TEST_F(iommufd_mock_domain,alloc_hwpt)1402 TEST_F(iommufd_mock_domain, alloc_hwpt)
1403 {
1404 int i;
1405
1406 for (i = 0; i != variant->mock_domains; i++) {
1407 uint32_t stddev_id;
1408 uint32_t hwpt_id;
1409
1410 test_cmd_hwpt_alloc(self->idev_ids[0], self->ioas_id, &hwpt_id);
1411 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1412 test_ioctl_destroy(stddev_id);
1413 test_ioctl_destroy(hwpt_id);
1414 }
1415 }
1416
1417 /* VFIO compatibility IOCTLs */
1418
TEST_F(iommufd,simple_ioctls)1419 TEST_F(iommufd, simple_ioctls)
1420 {
1421 ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
1422 ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
1423 }
1424
TEST_F(iommufd,unmap_cmd)1425 TEST_F(iommufd, unmap_cmd)
1426 {
1427 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1428 .iova = MOCK_APERTURE_START,
1429 .size = PAGE_SIZE,
1430 };
1431
1432 unmap_cmd.argsz = 1;
1433 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1434
1435 unmap_cmd.argsz = sizeof(unmap_cmd);
1436 unmap_cmd.flags = 1 << 31;
1437 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1438
1439 unmap_cmd.flags = 0;
1440 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1441 }
1442
TEST_F(iommufd,map_cmd)1443 TEST_F(iommufd, map_cmd)
1444 {
1445 struct vfio_iommu_type1_dma_map map_cmd = {
1446 .iova = MOCK_APERTURE_START,
1447 .size = PAGE_SIZE,
1448 .vaddr = (__u64)buffer,
1449 };
1450
1451 map_cmd.argsz = 1;
1452 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1453
1454 map_cmd.argsz = sizeof(map_cmd);
1455 map_cmd.flags = 1 << 31;
1456 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1457
1458 /* Requires a domain to be attached */
1459 map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
1460 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1461 }
1462
TEST_F(iommufd,info_cmd)1463 TEST_F(iommufd, info_cmd)
1464 {
1465 struct vfio_iommu_type1_info info_cmd = {};
1466
1467 /* Invalid argsz */
1468 info_cmd.argsz = 1;
1469 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1470
1471 info_cmd.argsz = sizeof(info_cmd);
1472 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1473 }
1474
TEST_F(iommufd,set_iommu_cmd)1475 TEST_F(iommufd, set_iommu_cmd)
1476 {
1477 /* Requires a domain to be attached */
1478 EXPECT_ERRNO(ENODEV,
1479 ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
1480 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
1481 }
1482
TEST_F(iommufd,vfio_ioas)1483 TEST_F(iommufd, vfio_ioas)
1484 {
1485 struct iommu_vfio_ioas vfio_ioas_cmd = {
1486 .size = sizeof(vfio_ioas_cmd),
1487 .op = IOMMU_VFIO_IOAS_GET,
1488 };
1489 __u32 ioas_id;
1490
1491 /* ENODEV if there is no compat ioas */
1492 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1493
1494 /* Invalid id for set */
1495 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
1496 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1497
1498 /* Valid id for set*/
1499 test_ioctl_ioas_alloc(&ioas_id);
1500 vfio_ioas_cmd.ioas_id = ioas_id;
1501 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1502
1503 /* Same id comes back from get */
1504 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
1505 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1506 ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
1507
1508 /* Clear works */
1509 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
1510 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1511 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
1512 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1513 }
1514
FIXTURE(vfio_compat_mock_domain)1515 FIXTURE(vfio_compat_mock_domain)
1516 {
1517 int fd;
1518 uint32_t ioas_id;
1519 };
1520
FIXTURE_VARIANT(vfio_compat_mock_domain)1521 FIXTURE_VARIANT(vfio_compat_mock_domain)
1522 {
1523 unsigned int version;
1524 };
1525
FIXTURE_SETUP(vfio_compat_mock_domain)1526 FIXTURE_SETUP(vfio_compat_mock_domain)
1527 {
1528 struct iommu_vfio_ioas vfio_ioas_cmd = {
1529 .size = sizeof(vfio_ioas_cmd),
1530 .op = IOMMU_VFIO_IOAS_SET,
1531 };
1532
1533 self->fd = open("/dev/iommu", O_RDWR);
1534 ASSERT_NE(-1, self->fd);
1535
1536 /* Create what VFIO would consider a group */
1537 test_ioctl_ioas_alloc(&self->ioas_id);
1538 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1539
1540 /* Attach it to the vfio compat */
1541 vfio_ioas_cmd.ioas_id = self->ioas_id;
1542 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1543 ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
1544 }
1545
FIXTURE_TEARDOWN(vfio_compat_mock_domain)1546 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
1547 {
1548 teardown_iommufd(self->fd, _metadata);
1549 }
1550
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v2)1551 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
1552 {
1553 .version = VFIO_TYPE1v2_IOMMU,
1554 };
1555
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v0)1556 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
1557 {
1558 .version = VFIO_TYPE1_IOMMU,
1559 };
1560
TEST_F(vfio_compat_mock_domain,simple_close)1561 TEST_F(vfio_compat_mock_domain, simple_close)
1562 {
1563 }
1564
TEST_F(vfio_compat_mock_domain,option_huge_pages)1565 TEST_F(vfio_compat_mock_domain, option_huge_pages)
1566 {
1567 struct iommu_option cmd = {
1568 .size = sizeof(cmd),
1569 .option_id = IOMMU_OPTION_HUGE_PAGES,
1570 .op = IOMMU_OPTION_OP_GET,
1571 .val64 = 3,
1572 .object_id = self->ioas_id,
1573 };
1574
1575 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1576 if (variant->version == VFIO_TYPE1_IOMMU) {
1577 ASSERT_EQ(0, cmd.val64);
1578 } else {
1579 ASSERT_EQ(1, cmd.val64);
1580 }
1581 }
1582
1583 /*
1584 * Execute an ioctl command stored in buffer and check that the result does not
1585 * overflow memory.
1586 */
is_filled(const void * buf,uint8_t c,size_t len)1587 static bool is_filled(const void *buf, uint8_t c, size_t len)
1588 {
1589 const uint8_t *cbuf = buf;
1590
1591 for (; len; cbuf++, len--)
1592 if (*cbuf != c)
1593 return false;
1594 return true;
1595 }
1596
1597 #define ioctl_check_buf(fd, cmd) \
1598 ({ \
1599 size_t _cmd_len = *(__u32 *)buffer; \
1600 \
1601 memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
1602 ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \
1603 ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \
1604 BUFFER_SIZE - _cmd_len)); \
1605 })
1606
check_vfio_info_cap_chain(struct __test_metadata * _metadata,struct vfio_iommu_type1_info * info_cmd)1607 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
1608 struct vfio_iommu_type1_info *info_cmd)
1609 {
1610 const struct vfio_info_cap_header *cap;
1611
1612 ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
1613 cap = buffer + info_cmd->cap_offset;
1614 while (true) {
1615 size_t cap_size;
1616
1617 if (cap->next)
1618 cap_size = (buffer + cap->next) - (void *)cap;
1619 else
1620 cap_size = (buffer + info_cmd->argsz) - (void *)cap;
1621
1622 switch (cap->id) {
1623 case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
1624 struct vfio_iommu_type1_info_cap_iova_range *data =
1625 (void *)cap;
1626
1627 ASSERT_EQ(1, data->header.version);
1628 ASSERT_EQ(1, data->nr_iovas);
1629 EXPECT_EQ(MOCK_APERTURE_START,
1630 data->iova_ranges[0].start);
1631 EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
1632 break;
1633 }
1634 case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
1635 struct vfio_iommu_type1_info_dma_avail *data =
1636 (void *)cap;
1637
1638 ASSERT_EQ(1, data->header.version);
1639 ASSERT_EQ(sizeof(*data), cap_size);
1640 break;
1641 }
1642 default:
1643 ASSERT_EQ(false, true);
1644 break;
1645 }
1646 if (!cap->next)
1647 break;
1648
1649 ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
1650 ASSERT_GE(buffer + cap->next, (void *)cap);
1651 cap = buffer + cap->next;
1652 }
1653 }
1654
TEST_F(vfio_compat_mock_domain,get_info)1655 TEST_F(vfio_compat_mock_domain, get_info)
1656 {
1657 struct vfio_iommu_type1_info *info_cmd = buffer;
1658 unsigned int i;
1659 size_t caplen;
1660
1661 /* Pre-cap ABI */
1662 *info_cmd = (struct vfio_iommu_type1_info){
1663 .argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
1664 };
1665 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
1666 ASSERT_NE(0, info_cmd->iova_pgsizes);
1667 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
1668 info_cmd->flags);
1669
1670 /* Read the cap chain size */
1671 *info_cmd = (struct vfio_iommu_type1_info){
1672 .argsz = sizeof(*info_cmd),
1673 };
1674 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
1675 ASSERT_NE(0, info_cmd->iova_pgsizes);
1676 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
1677 info_cmd->flags);
1678 ASSERT_EQ(0, info_cmd->cap_offset);
1679 ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
1680
1681 /* Read the caps, kernel should never create a corrupted caps */
1682 caplen = info_cmd->argsz;
1683 for (i = sizeof(*info_cmd); i < caplen; i++) {
1684 *info_cmd = (struct vfio_iommu_type1_info){
1685 .argsz = i,
1686 };
1687 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
1688 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
1689 info_cmd->flags);
1690 if (!info_cmd->cap_offset)
1691 continue;
1692 check_vfio_info_cap_chain(_metadata, info_cmd);
1693 }
1694 }
1695
shuffle_array(unsigned long * array,size_t nelms)1696 static void shuffle_array(unsigned long *array, size_t nelms)
1697 {
1698 unsigned int i;
1699
1700 /* Shuffle */
1701 for (i = 0; i != nelms; i++) {
1702 unsigned long tmp = array[i];
1703 unsigned int other = rand() % (nelms - i);
1704
1705 array[i] = array[other];
1706 array[other] = tmp;
1707 }
1708 }
1709
TEST_F(vfio_compat_mock_domain,map)1710 TEST_F(vfio_compat_mock_domain, map)
1711 {
1712 struct vfio_iommu_type1_dma_map map_cmd = {
1713 .argsz = sizeof(map_cmd),
1714 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
1715 .vaddr = (uintptr_t)buffer,
1716 .size = BUFFER_SIZE,
1717 .iova = MOCK_APERTURE_START,
1718 };
1719 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1720 .argsz = sizeof(unmap_cmd),
1721 .size = BUFFER_SIZE,
1722 .iova = MOCK_APERTURE_START,
1723 };
1724 unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
1725 unsigned int i;
1726
1727 /* Simple map/unmap */
1728 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1729 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1730 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
1731
1732 /* UNMAP_FLAG_ALL requres 0 iova/size */
1733 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1734 unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
1735 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1736
1737 unmap_cmd.iova = 0;
1738 unmap_cmd.size = 0;
1739 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1740 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
1741
1742 /* Small pages */
1743 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
1744 map_cmd.iova = pages_iova[i] =
1745 MOCK_APERTURE_START + i * PAGE_SIZE;
1746 map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
1747 map_cmd.size = PAGE_SIZE;
1748 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1749 }
1750 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
1751
1752 unmap_cmd.flags = 0;
1753 unmap_cmd.size = PAGE_SIZE;
1754 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
1755 unmap_cmd.iova = pages_iova[i];
1756 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1757 }
1758 }
1759
TEST_F(vfio_compat_mock_domain,huge_map)1760 TEST_F(vfio_compat_mock_domain, huge_map)
1761 {
1762 size_t buf_size = HUGEPAGE_SIZE * 2;
1763 struct vfio_iommu_type1_dma_map map_cmd = {
1764 .argsz = sizeof(map_cmd),
1765 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
1766 .size = buf_size,
1767 .iova = MOCK_APERTURE_START,
1768 };
1769 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1770 .argsz = sizeof(unmap_cmd),
1771 };
1772 unsigned long pages_iova[16];
1773 unsigned int i;
1774 void *buf;
1775
1776 /* Test huge pages and splitting */
1777 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
1778 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
1779 0);
1780 ASSERT_NE(MAP_FAILED, buf);
1781 map_cmd.vaddr = (uintptr_t)buf;
1782 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1783
1784 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
1785 for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
1786 pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
1787 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
1788
1789 /* type1 mode can cut up larger mappings, type1v2 always fails */
1790 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
1791 unmap_cmd.iova = pages_iova[i];
1792 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
1793 if (variant->version == VFIO_TYPE1_IOMMU) {
1794 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
1795 &unmap_cmd));
1796 } else {
1797 EXPECT_ERRNO(ENOENT,
1798 ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
1799 &unmap_cmd));
1800 }
1801 }
1802 }
1803
1804 TEST_HARNESS_MAIN
1805