1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
3  *
4  * These tests are "kernel integrity" tests. They are looking for kernel
5  * WARN/OOPS/kasn/etc splats triggered by kernel sanitizers & debugging
6  * features. It does not attempt to verify that the system calls are doing what
7  * they are supposed to do.
8  *
9  * The basic philosophy is to run a sequence of calls that will succeed and then
10  * sweep every failure injection point on that call chain to look for
11  * interesting things in error handling.
12  *
13  * This test is best run with:
14  *  echo 1 > /proc/sys/kernel/panic_on_warn
15  * If something is actually going wrong.
16  */
17 #include <fcntl.h>
18 #include <dirent.h>
19 
20 #define __EXPORTED_HEADERS__
21 #include <linux/vfio.h>
22 
23 #include "iommufd_utils.h"
24 
25 static bool have_fault_injection;
26 
27 static int writeat(int dfd, const char *fn, const char *val)
28 {
29 	size_t val_len = strlen(val);
30 	ssize_t res;
31 	int fd;
32 
33 	fd = openat(dfd, fn, O_WRONLY);
34 	if (fd == -1)
35 		return -1;
36 	res = write(fd, val, val_len);
37 	assert(res == val_len);
38 	close(fd);
39 	return 0;
40 }
41 
42 static __attribute__((constructor)) void setup_buffer(void)
43 {
44 	BUFFER_SIZE = 2*1024*1024;
45 
46 	buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE,
47 		      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
48 }
49 
50 /*
51  * This sets up fail_injection in a way that is useful for this test.
52  * It does not attempt to restore things back to how they were.
53  */
54 static __attribute__((constructor)) void setup_fault_injection(void)
55 {
56 	DIR *debugfs = opendir("/sys/kernel/debug/");
57 	struct dirent *dent;
58 
59 	if (!debugfs)
60 		return;
61 
62 	/* Allow any allocation call to be fault injected */
63 	if (writeat(dirfd(debugfs), "failslab/ignore-gfp-wait", "N"))
64 		return;
65 	writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-wait", "N");
66 	writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-highmem", "N");
67 
68 	while ((dent = readdir(debugfs))) {
69 		char fn[300];
70 
71 		if (strncmp(dent->d_name, "fail", 4) != 0)
72 			continue;
73 
74 		/* We are looking for kernel splats, quiet down the log */
75 		snprintf(fn, sizeof(fn), "%s/verbose", dent->d_name);
76 		writeat(dirfd(debugfs), fn, "0");
77 	}
78 	closedir(debugfs);
79 	have_fault_injection = true;
80 }
81 
82 struct fail_nth_state {
83 	int proc_fd;
84 	unsigned int iteration;
85 };
86 
87 static void fail_nth_first(struct __test_metadata *_metadata,
88 			   struct fail_nth_state *nth_state)
89 {
90 	char buf[300];
91 
92 	snprintf(buf, sizeof(buf), "/proc/self/task/%u/fail-nth", getpid());
93 	nth_state->proc_fd = open(buf, O_RDWR);
94 	ASSERT_NE(-1, nth_state->proc_fd);
95 }
96 
97 static bool fail_nth_next(struct __test_metadata *_metadata,
98 			  struct fail_nth_state *nth_state,
99 			  int test_result)
100 {
101 	static const char disable_nth[] = "0";
102 	char buf[300];
103 
104 	/*
105 	 * This is just an arbitrary limit based on the current kernel
106 	 * situation. Changes in the kernel can dramtically change the number of
107 	 * required fault injection sites, so if this hits it doesn't
108 	 * necessarily mean a test failure, just that the limit has to be made
109 	 * bigger.
110 	 */
111 	ASSERT_GT(400, nth_state->iteration);
112 	if (nth_state->iteration != 0) {
113 		ssize_t res;
114 		ssize_t res2;
115 
116 		buf[0] = 0;
117 		/*
118 		 * Annoyingly disabling the nth can also fail. This means
119 		 * the test passed without triggering failure
120 		 */
121 		res = pread(nth_state->proc_fd, buf, sizeof(buf), 0);
122 		if (res == -1 && errno == EFAULT) {
123 			buf[0] = '1';
124 			buf[1] = '\n';
125 			res = 2;
126 		}
127 
128 		res2 = pwrite(nth_state->proc_fd, disable_nth,
129 			      ARRAY_SIZE(disable_nth) - 1, 0);
130 		if (res2 == -1 && errno == EFAULT) {
131 			res2 = pwrite(nth_state->proc_fd, disable_nth,
132 				      ARRAY_SIZE(disable_nth) - 1, 0);
133 			buf[0] = '1';
134 			buf[1] = '\n';
135 		}
136 		ASSERT_EQ(ARRAY_SIZE(disable_nth) - 1, res2);
137 
138 		/* printf("  nth %u result=%d nth=%u\n", nth_state->iteration,
139 		       test_result, atoi(buf)); */
140 		fflush(stdout);
141 		ASSERT_LT(1, res);
142 		if (res != 2 || buf[0] != '0' || buf[1] != '\n')
143 			return false;
144 	} else {
145 		/* printf("  nth %u result=%d\n", nth_state->iteration,
146 		       test_result); */
147 	}
148 	nth_state->iteration++;
149 	return true;
150 }
151 
152 /*
153  * This is called during the test to start failure injection. It allows the test
154  * to do some setup that has already been swept and thus reduce the required
155  * iterations.
156  */
157 void __fail_nth_enable(struct __test_metadata *_metadata,
158 		       struct fail_nth_state *nth_state)
159 {
160 	char buf[300];
161 	size_t len;
162 
163 	if (!nth_state->iteration)
164 		return;
165 
166 	len = snprintf(buf, sizeof(buf), "%u", nth_state->iteration);
167 	ASSERT_EQ(len, pwrite(nth_state->proc_fd, buf, len, 0));
168 }
169 #define fail_nth_enable() __fail_nth_enable(_metadata, _nth_state)
170 
171 #define TEST_FAIL_NTH(fixture_name, name)                                           \
172 	static int test_nth_##name(struct __test_metadata *_metadata,               \
173 				   FIXTURE_DATA(fixture_name) *self,                \
174 				   const FIXTURE_VARIANT(fixture_name)              \
175 					   *variant,                                \
176 				   struct fail_nth_state *_nth_state);              \
177 	TEST_F(fixture_name, name)                                                  \
178 	{                                                                           \
179 		struct fail_nth_state nth_state = {};                               \
180 		int test_result = 0;                                                \
181 										    \
182 		if (!have_fault_injection)                                          \
183 			SKIP(return,                                                \
184 				   "fault injection is not enabled in the kernel"); \
185 		fail_nth_first(_metadata, &nth_state);                              \
186 		ASSERT_EQ(0, test_nth_##name(_metadata, self, variant,              \
187 					     &nth_state));                          \
188 		while (fail_nth_next(_metadata, &nth_state, test_result)) {         \
189 			fixture_name##_teardown(_metadata, self, variant);          \
190 			fixture_name##_setup(_metadata, self, variant);             \
191 			test_result = test_nth_##name(_metadata, self,              \
192 						      variant, &nth_state);         \
193 		};                                                                  \
194 		ASSERT_EQ(0, test_result);                                          \
195 	}                                                                           \
196 	static int test_nth_##name(                                                 \
197 		struct __test_metadata __attribute__((unused)) *_metadata,          \
198 		FIXTURE_DATA(fixture_name) __attribute__((unused)) *self,           \
199 		const FIXTURE_VARIANT(fixture_name) __attribute__((unused))         \
200 			*variant,                                                   \
201 		struct fail_nth_state *_nth_state)
202 
203 FIXTURE(basic_fail_nth)
204 {
205 	int fd;
206 	uint32_t access_id;
207 };
208 
209 FIXTURE_SETUP(basic_fail_nth)
210 {
211 	self->fd = -1;
212 	self->access_id = 0;
213 }
214 
215 FIXTURE_TEARDOWN(basic_fail_nth)
216 {
217 	int rc;
218 
219 	if (self->access_id) {
220 		/* The access FD holds the iommufd open until it closes */
221 		rc = _test_cmd_destroy_access(self->access_id);
222 		assert(rc == 0);
223 	}
224 	teardown_iommufd(self->fd, _metadata);
225 }
226 
227 /* Cover ioas.c */
228 TEST_FAIL_NTH(basic_fail_nth, basic)
229 {
230 	struct iommu_iova_range ranges[10];
231 	uint32_t ioas_id;
232 	__u64 iova;
233 
234 	fail_nth_enable();
235 
236 	self->fd = open("/dev/iommu", O_RDWR);
237 	if (self->fd == -1)
238 		return -1;
239 
240 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
241 		return -1;
242 
243 	{
244 		struct iommu_ioas_iova_ranges ranges_cmd = {
245 			.size = sizeof(ranges_cmd),
246 			.num_iovas = ARRAY_SIZE(ranges),
247 			.ioas_id = ioas_id,
248 			.allowed_iovas = (uintptr_t)ranges,
249 		};
250 		if (ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd))
251 			return -1;
252 	}
253 
254 	{
255 		struct iommu_ioas_allow_iovas allow_cmd = {
256 			.size = sizeof(allow_cmd),
257 			.ioas_id = ioas_id,
258 			.num_iovas = 1,
259 			.allowed_iovas = (uintptr_t)ranges,
260 		};
261 
262 		ranges[0].start = 16*1024;
263 		ranges[0].last = BUFFER_SIZE + 16 * 1024 * 600 - 1;
264 		if (ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd))
265 			return -1;
266 	}
267 
268 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
269 				 IOMMU_IOAS_MAP_WRITEABLE |
270 					 IOMMU_IOAS_MAP_READABLE))
271 		return -1;
272 
273 	{
274 		struct iommu_ioas_copy copy_cmd = {
275 			.size = sizeof(copy_cmd),
276 			.flags = IOMMU_IOAS_MAP_WRITEABLE |
277 				 IOMMU_IOAS_MAP_READABLE,
278 			.dst_ioas_id = ioas_id,
279 			.src_ioas_id = ioas_id,
280 			.src_iova = iova,
281 			.length = sizeof(ranges),
282 		};
283 
284 		if (ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd))
285 			return -1;
286 	}
287 
288 	if (_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE,
289 				   NULL))
290 		return -1;
291 	/* Failure path of no IOVA to unmap */
292 	_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, NULL);
293 	return 0;
294 }
295 
296 /* iopt_area_fill_domains() and iopt_area_fill_domain() */
297 TEST_FAIL_NTH(basic_fail_nth, map_domain)
298 {
299 	uint32_t ioas_id;
300 	__u32 device_id;
301 	__u32 hwpt_id;
302 	__u64 iova;
303 
304 	self->fd = open("/dev/iommu", O_RDWR);
305 	if (self->fd == -1)
306 		return -1;
307 
308 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
309 		return -1;
310 
311 	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
312 		return -1;
313 
314 	fail_nth_enable();
315 
316 	if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
317 		return -1;
318 
319 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
320 				 IOMMU_IOAS_MAP_WRITEABLE |
321 					 IOMMU_IOAS_MAP_READABLE))
322 		return -1;
323 
324 	if (_test_ioctl_destroy(self->fd, device_id))
325 		return -1;
326 	if (_test_ioctl_destroy(self->fd, hwpt_id))
327 		return -1;
328 
329 	if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
330 		return -1;
331 	return 0;
332 }
333 
334 TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
335 {
336 	uint32_t ioas_id;
337 	__u32 device_id2;
338 	__u32 device_id;
339 	__u32 hwpt_id2;
340 	__u32 hwpt_id;
341 	__u64 iova;
342 
343 	self->fd = open("/dev/iommu", O_RDWR);
344 	if (self->fd == -1)
345 		return -1;
346 
347 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
348 		return -1;
349 
350 	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
351 		return -1;
352 
353 	if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
354 		return -1;
355 
356 	fail_nth_enable();
357 
358 	if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id2, &hwpt_id2))
359 		return -1;
360 
361 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
362 				 IOMMU_IOAS_MAP_WRITEABLE |
363 					 IOMMU_IOAS_MAP_READABLE))
364 		return -1;
365 
366 	if (_test_ioctl_destroy(self->fd, device_id))
367 		return -1;
368 	if (_test_ioctl_destroy(self->fd, hwpt_id))
369 		return -1;
370 
371 	if (_test_ioctl_destroy(self->fd, device_id2))
372 		return -1;
373 	if (_test_ioctl_destroy(self->fd, hwpt_id2))
374 		return -1;
375 
376 	if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
377 		return -1;
378 	if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id2, &hwpt_id2))
379 		return -1;
380 	return 0;
381 }
382 
383 TEST_FAIL_NTH(basic_fail_nth, access_rw)
384 {
385 	uint64_t tmp_big[4096];
386 	uint32_t ioas_id;
387 	uint16_t tmp[32];
388 	__u64 iova;
389 
390 	self->fd = open("/dev/iommu", O_RDWR);
391 	if (self->fd == -1)
392 		return -1;
393 
394 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
395 		return -1;
396 
397 	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
398 		return -1;
399 
400 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
401 				 IOMMU_IOAS_MAP_WRITEABLE |
402 					 IOMMU_IOAS_MAP_READABLE))
403 		return -1;
404 
405 	fail_nth_enable();
406 
407 	if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id, 0))
408 		return -1;
409 
410 	{
411 		struct iommu_test_cmd access_cmd = {
412 			.size = sizeof(access_cmd),
413 			.op = IOMMU_TEST_OP_ACCESS_RW,
414 			.id = self->access_id,
415 			.access_rw = { .iova = iova,
416 				       .length = sizeof(tmp),
417 				       .uptr = (uintptr_t)tmp },
418 		};
419 
420 		// READ
421 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
422 			  &access_cmd))
423 			return -1;
424 
425 		access_cmd.access_rw.flags = MOCK_ACCESS_RW_WRITE;
426 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
427 			  &access_cmd))
428 			return -1;
429 
430 		access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH;
431 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
432 			  &access_cmd))
433 			return -1;
434 		access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH |
435 					     MOCK_ACCESS_RW_WRITE;
436 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
437 			  &access_cmd))
438 			return -1;
439 	}
440 
441 	{
442 		struct iommu_test_cmd access_cmd = {
443 			.size = sizeof(access_cmd),
444 			.op = IOMMU_TEST_OP_ACCESS_RW,
445 			.id = self->access_id,
446 			.access_rw = { .iova = iova,
447 				       .flags = MOCK_ACCESS_RW_SLOW_PATH,
448 				       .length = sizeof(tmp_big),
449 				       .uptr = (uintptr_t)tmp_big },
450 		};
451 
452 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
453 			  &access_cmd))
454 			return -1;
455 	}
456 	if (_test_cmd_destroy_access(self->access_id))
457 		return -1;
458 	self->access_id = 0;
459 	return 0;
460 }
461 
462 /* pages.c access functions */
463 TEST_FAIL_NTH(basic_fail_nth, access_pin)
464 {
465 	uint32_t access_pages_id;
466 	uint32_t ioas_id;
467 	__u64 iova;
468 
469 	self->fd = open("/dev/iommu", O_RDWR);
470 	if (self->fd == -1)
471 		return -1;
472 
473 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
474 		return -1;
475 
476 	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
477 		return -1;
478 
479 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
480 				 IOMMU_IOAS_MAP_WRITEABLE |
481 					 IOMMU_IOAS_MAP_READABLE))
482 		return -1;
483 
484 	if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
485 				    MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
486 		return -1;
487 
488 	fail_nth_enable();
489 
490 	{
491 		struct iommu_test_cmd access_cmd = {
492 			.size = sizeof(access_cmd),
493 			.op = IOMMU_TEST_OP_ACCESS_PAGES,
494 			.id = self->access_id,
495 			.access_pages = { .iova = iova,
496 					  .length = BUFFER_SIZE,
497 					  .uptr = (uintptr_t)buffer },
498 		};
499 
500 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
501 			  &access_cmd))
502 			return -1;
503 		access_pages_id = access_cmd.access_pages.out_access_pages_id;
504 	}
505 
506 	if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
507 					   access_pages_id))
508 		return -1;
509 
510 	if (_test_cmd_destroy_access(self->access_id))
511 		return -1;
512 	self->access_id = 0;
513 	return 0;
514 }
515 
516 /* iopt_pages_fill_xarray() */
517 TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
518 {
519 	uint32_t access_pages_id;
520 	uint32_t ioas_id;
521 	__u32 device_id;
522 	__u32 hwpt_id;
523 	__u64 iova;
524 
525 	self->fd = open("/dev/iommu", O_RDWR);
526 	if (self->fd == -1)
527 		return -1;
528 
529 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
530 		return -1;
531 
532 	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
533 		return -1;
534 
535 	if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
536 		return -1;
537 
538 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
539 				 IOMMU_IOAS_MAP_WRITEABLE |
540 					 IOMMU_IOAS_MAP_READABLE))
541 		return -1;
542 
543 	if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
544 				    MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
545 		return -1;
546 
547 	fail_nth_enable();
548 
549 	{
550 		struct iommu_test_cmd access_cmd = {
551 			.size = sizeof(access_cmd),
552 			.op = IOMMU_TEST_OP_ACCESS_PAGES,
553 			.id = self->access_id,
554 			.access_pages = { .iova = iova,
555 					  .length = BUFFER_SIZE,
556 					  .uptr = (uintptr_t)buffer },
557 		};
558 
559 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
560 			  &access_cmd))
561 			return -1;
562 		access_pages_id = access_cmd.access_pages.out_access_pages_id;
563 	}
564 
565 	if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
566 					   access_pages_id))
567 		return -1;
568 
569 	if (_test_cmd_destroy_access(self->access_id))
570 		return -1;
571 	self->access_id = 0;
572 
573 	if (_test_ioctl_destroy(self->fd, device_id))
574 		return -1;
575 	if (_test_ioctl_destroy(self->fd, hwpt_id))
576 		return -1;
577 	return 0;
578 }
579 
580 TEST_HARNESS_MAIN
581