1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
3  *
4  * These tests are "kernel integrity" tests. They are looking for kernel
5  * WARN/OOPS/kasn/etc splats triggered by kernel sanitizers & debugging
6  * features. It does not attempt to verify that the system calls are doing what
7  * they are supposed to do.
8  *
9  * The basic philosophy is to run a sequence of calls that will succeed and then
10  * sweep every failure injection point on that call chain to look for
11  * interesting things in error handling.
12  *
13  * This test is best run with:
14  *  echo 1 > /proc/sys/kernel/panic_on_warn
15  * If something is actually going wrong.
16  */
17 #include <fcntl.h>
18 #include <dirent.h>
19 
20 #define __EXPORTED_HEADERS__
21 #include <linux/vfio.h>
22 
23 #include "iommufd_utils.h"
24 
25 static bool have_fault_injection;
26 
27 static int writeat(int dfd, const char *fn, const char *val)
28 {
29 	size_t val_len = strlen(val);
30 	ssize_t res;
31 	int fd;
32 
33 	fd = openat(dfd, fn, O_WRONLY);
34 	if (fd == -1)
35 		return -1;
36 	res = write(fd, val, val_len);
37 	assert(res == val_len);
38 	close(fd);
39 	return 0;
40 }
41 
42 static __attribute__((constructor)) void setup_buffer(void)
43 {
44 	BUFFER_SIZE = 2*1024*1024;
45 
46 	buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE,
47 		      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
48 }
49 
50 /*
51  * This sets up fail_injection in a way that is useful for this test.
52  * It does not attempt to restore things back to how they were.
53  */
54 static __attribute__((constructor)) void setup_fault_injection(void)
55 {
56 	DIR *debugfs = opendir("/sys/kernel/debug/");
57 	struct dirent *dent;
58 
59 	if (!debugfs)
60 		return;
61 
62 	/* Allow any allocation call to be fault injected */
63 	if (writeat(dirfd(debugfs), "failslab/ignore-gfp-wait", "N"))
64 		return;
65 	writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-wait", "N");
66 	writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-highmem", "N");
67 
68 	while ((dent = readdir(debugfs))) {
69 		char fn[300];
70 
71 		if (strncmp(dent->d_name, "fail", 4) != 0)
72 			continue;
73 
74 		/* We are looking for kernel splats, quiet down the log */
75 		snprintf(fn, sizeof(fn), "%s/verbose", dent->d_name);
76 		writeat(dirfd(debugfs), fn, "0");
77 	}
78 	closedir(debugfs);
79 	have_fault_injection = true;
80 }
81 
82 struct fail_nth_state {
83 	int proc_fd;
84 	unsigned int iteration;
85 };
86 
87 static void fail_nth_first(struct __test_metadata *_metadata,
88 			   struct fail_nth_state *nth_state)
89 {
90 	char buf[300];
91 
92 	snprintf(buf, sizeof(buf), "/proc/self/task/%u/fail-nth", getpid());
93 	nth_state->proc_fd = open(buf, O_RDWR);
94 	ASSERT_NE(-1, nth_state->proc_fd);
95 }
96 
97 static bool fail_nth_next(struct __test_metadata *_metadata,
98 			  struct fail_nth_state *nth_state,
99 			  int test_result)
100 {
101 	static const char disable_nth[] = "0";
102 	char buf[300];
103 
104 	/*
105 	 * This is just an arbitrary limit based on the current kernel
106 	 * situation. Changes in the kernel can dramtically change the number of
107 	 * required fault injection sites, so if this hits it doesn't
108 	 * necessarily mean a test failure, just that the limit has to be made
109 	 * bigger.
110 	 */
111 	ASSERT_GT(400, nth_state->iteration);
112 	if (nth_state->iteration != 0) {
113 		ssize_t res;
114 		ssize_t res2;
115 
116 		buf[0] = 0;
117 		/*
118 		 * Annoyingly disabling the nth can also fail. This means
119 		 * the test passed without triggering failure
120 		 */
121 		res = pread(nth_state->proc_fd, buf, sizeof(buf), 0);
122 		if (res == -1 && errno == EFAULT) {
123 			buf[0] = '1';
124 			buf[1] = '\n';
125 			res = 2;
126 		}
127 
128 		res2 = pwrite(nth_state->proc_fd, disable_nth,
129 			      ARRAY_SIZE(disable_nth) - 1, 0);
130 		if (res2 == -1 && errno == EFAULT) {
131 			res2 = pwrite(nth_state->proc_fd, disable_nth,
132 				      ARRAY_SIZE(disable_nth) - 1, 0);
133 			buf[0] = '1';
134 			buf[1] = '\n';
135 		}
136 		ASSERT_EQ(ARRAY_SIZE(disable_nth) - 1, res2);
137 
138 		/* printf("  nth %u result=%d nth=%u\n", nth_state->iteration,
139 		       test_result, atoi(buf)); */
140 		fflush(stdout);
141 		ASSERT_LT(1, res);
142 		if (res != 2 || buf[0] != '0' || buf[1] != '\n')
143 			return false;
144 	} else {
145 		/* printf("  nth %u result=%d\n", nth_state->iteration,
146 		       test_result); */
147 	}
148 	nth_state->iteration++;
149 	return true;
150 }
151 
152 /*
153  * This is called during the test to start failure injection. It allows the test
154  * to do some setup that has already been swept and thus reduce the required
155  * iterations.
156  */
157 void __fail_nth_enable(struct __test_metadata *_metadata,
158 		       struct fail_nth_state *nth_state)
159 {
160 	char buf[300];
161 	size_t len;
162 
163 	if (!nth_state->iteration)
164 		return;
165 
166 	len = snprintf(buf, sizeof(buf), "%u", nth_state->iteration);
167 	ASSERT_EQ(len, pwrite(nth_state->proc_fd, buf, len, 0));
168 }
169 #define fail_nth_enable() __fail_nth_enable(_metadata, _nth_state)
170 
171 #define TEST_FAIL_NTH(fixture_name, name)                                           \
172 	static int test_nth_##name(struct __test_metadata *_metadata,               \
173 				   FIXTURE_DATA(fixture_name) *self,                \
174 				   const FIXTURE_VARIANT(fixture_name)              \
175 					   *variant,                                \
176 				   struct fail_nth_state *_nth_state);              \
177 	TEST_F(fixture_name, name)                                                  \
178 	{                                                                           \
179 		struct fail_nth_state nth_state = {};                               \
180 		int test_result = 0;                                                \
181 										    \
182 		if (!have_fault_injection)                                          \
183 			SKIP(return,                                                \
184 				   "fault injection is not enabled in the kernel"); \
185 		fail_nth_first(_metadata, &nth_state);                              \
186 		ASSERT_EQ(0, test_nth_##name(_metadata, self, variant,              \
187 					     &nth_state));                          \
188 		while (fail_nth_next(_metadata, &nth_state, test_result)) {         \
189 			fixture_name##_teardown(_metadata, self, variant);          \
190 			fixture_name##_setup(_metadata, self, variant);             \
191 			test_result = test_nth_##name(_metadata, self,              \
192 						      variant, &nth_state);         \
193 		};                                                                  \
194 		ASSERT_EQ(0, test_result);                                          \
195 	}                                                                           \
196 	static int test_nth_##name(                                                 \
197 		struct __test_metadata __attribute__((unused)) *_metadata,          \
198 		FIXTURE_DATA(fixture_name) __attribute__((unused)) *self,           \
199 		const FIXTURE_VARIANT(fixture_name) __attribute__((unused))         \
200 			*variant,                                                   \
201 		struct fail_nth_state *_nth_state)
202 
203 FIXTURE(basic_fail_nth)
204 {
205 	int fd;
206 	uint32_t access_id;
207 };
208 
209 FIXTURE_SETUP(basic_fail_nth)
210 {
211 	self->fd = -1;
212 	self->access_id = 0;
213 }
214 
215 FIXTURE_TEARDOWN(basic_fail_nth)
216 {
217 	int rc;
218 
219 	if (self->access_id) {
220 		/* The access FD holds the iommufd open until it closes */
221 		rc = _test_cmd_destroy_access(self->access_id);
222 		assert(rc == 0);
223 	}
224 	teardown_iommufd(self->fd, _metadata);
225 }
226 
227 /* Cover ioas.c */
228 TEST_FAIL_NTH(basic_fail_nth, basic)
229 {
230 	struct iommu_iova_range ranges[10];
231 	uint32_t ioas_id;
232 	__u64 iova;
233 
234 	fail_nth_enable();
235 
236 	self->fd = open("/dev/iommu", O_RDWR);
237 	if (self->fd == -1)
238 		return -1;
239 
240 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
241 		return -1;
242 
243 	{
244 		struct iommu_ioas_iova_ranges ranges_cmd = {
245 			.size = sizeof(ranges_cmd),
246 			.num_iovas = ARRAY_SIZE(ranges),
247 			.ioas_id = ioas_id,
248 			.allowed_iovas = (uintptr_t)ranges,
249 		};
250 		if (ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd))
251 			return -1;
252 	}
253 
254 	{
255 		struct iommu_ioas_allow_iovas allow_cmd = {
256 			.size = sizeof(allow_cmd),
257 			.ioas_id = ioas_id,
258 			.num_iovas = 1,
259 			.allowed_iovas = (uintptr_t)ranges,
260 		};
261 
262 		ranges[0].start = 16*1024;
263 		ranges[0].last = BUFFER_SIZE + 16 * 1024 * 600 - 1;
264 		if (ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd))
265 			return -1;
266 	}
267 
268 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
269 				 IOMMU_IOAS_MAP_WRITEABLE |
270 					 IOMMU_IOAS_MAP_READABLE))
271 		return -1;
272 
273 	{
274 		struct iommu_ioas_copy copy_cmd = {
275 			.size = sizeof(copy_cmd),
276 			.flags = IOMMU_IOAS_MAP_WRITEABLE |
277 				 IOMMU_IOAS_MAP_READABLE,
278 			.dst_ioas_id = ioas_id,
279 			.src_ioas_id = ioas_id,
280 			.src_iova = iova,
281 			.length = sizeof(ranges),
282 		};
283 
284 		if (ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd))
285 			return -1;
286 	}
287 
288 	if (_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE,
289 				   NULL))
290 		return -1;
291 	/* Failure path of no IOVA to unmap */
292 	_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, NULL);
293 	return 0;
294 }
295 
296 /* iopt_area_fill_domains() and iopt_area_fill_domain() */
297 TEST_FAIL_NTH(basic_fail_nth, map_domain)
298 {
299 	uint32_t ioas_id;
300 	__u32 stdev_id;
301 	__u32 hwpt_id;
302 	__u64 iova;
303 
304 	self->fd = open("/dev/iommu", O_RDWR);
305 	if (self->fd == -1)
306 		return -1;
307 
308 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
309 		return -1;
310 
311 	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
312 		return -1;
313 
314 	fail_nth_enable();
315 
316 	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
317 		return -1;
318 
319 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
320 				 IOMMU_IOAS_MAP_WRITEABLE |
321 					 IOMMU_IOAS_MAP_READABLE))
322 		return -1;
323 
324 	if (_test_ioctl_destroy(self->fd, stdev_id))
325 		return -1;
326 
327 	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
328 		return -1;
329 	return 0;
330 }
331 
332 TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
333 {
334 	uint32_t ioas_id;
335 	__u32 stdev_id2;
336 	__u32 stdev_id;
337 	__u32 hwpt_id2;
338 	__u32 hwpt_id;
339 	__u64 iova;
340 
341 	self->fd = open("/dev/iommu", O_RDWR);
342 	if (self->fd == -1)
343 		return -1;
344 
345 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
346 		return -1;
347 
348 	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
349 		return -1;
350 
351 	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
352 		return -1;
353 
354 	fail_nth_enable();
355 
356 	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2))
357 		return -1;
358 
359 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
360 				 IOMMU_IOAS_MAP_WRITEABLE |
361 					 IOMMU_IOAS_MAP_READABLE))
362 		return -1;
363 
364 	if (_test_ioctl_destroy(self->fd, stdev_id))
365 		return -1;
366 
367 	if (_test_ioctl_destroy(self->fd, stdev_id2))
368 		return -1;
369 
370 	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
371 		return -1;
372 	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2))
373 		return -1;
374 	return 0;
375 }
376 
377 TEST_FAIL_NTH(basic_fail_nth, access_rw)
378 {
379 	uint64_t tmp_big[4096];
380 	uint32_t ioas_id;
381 	uint16_t tmp[32];
382 	__u64 iova;
383 
384 	self->fd = open("/dev/iommu", O_RDWR);
385 	if (self->fd == -1)
386 		return -1;
387 
388 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
389 		return -1;
390 
391 	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
392 		return -1;
393 
394 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
395 				 IOMMU_IOAS_MAP_WRITEABLE |
396 					 IOMMU_IOAS_MAP_READABLE))
397 		return -1;
398 
399 	fail_nth_enable();
400 
401 	if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id, 0))
402 		return -1;
403 
404 	{
405 		struct iommu_test_cmd access_cmd = {
406 			.size = sizeof(access_cmd),
407 			.op = IOMMU_TEST_OP_ACCESS_RW,
408 			.id = self->access_id,
409 			.access_rw = { .iova = iova,
410 				       .length = sizeof(tmp),
411 				       .uptr = (uintptr_t)tmp },
412 		};
413 
414 		// READ
415 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
416 			  &access_cmd))
417 			return -1;
418 
419 		access_cmd.access_rw.flags = MOCK_ACCESS_RW_WRITE;
420 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
421 			  &access_cmd))
422 			return -1;
423 
424 		access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH;
425 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
426 			  &access_cmd))
427 			return -1;
428 		access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH |
429 					     MOCK_ACCESS_RW_WRITE;
430 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
431 			  &access_cmd))
432 			return -1;
433 	}
434 
435 	{
436 		struct iommu_test_cmd access_cmd = {
437 			.size = sizeof(access_cmd),
438 			.op = IOMMU_TEST_OP_ACCESS_RW,
439 			.id = self->access_id,
440 			.access_rw = { .iova = iova,
441 				       .flags = MOCK_ACCESS_RW_SLOW_PATH,
442 				       .length = sizeof(tmp_big),
443 				       .uptr = (uintptr_t)tmp_big },
444 		};
445 
446 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
447 			  &access_cmd))
448 			return -1;
449 	}
450 	if (_test_cmd_destroy_access(self->access_id))
451 		return -1;
452 	self->access_id = 0;
453 	return 0;
454 }
455 
456 /* pages.c access functions */
457 TEST_FAIL_NTH(basic_fail_nth, access_pin)
458 {
459 	uint32_t access_pages_id;
460 	uint32_t ioas_id;
461 	__u64 iova;
462 
463 	self->fd = open("/dev/iommu", O_RDWR);
464 	if (self->fd == -1)
465 		return -1;
466 
467 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
468 		return -1;
469 
470 	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
471 		return -1;
472 
473 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
474 				 IOMMU_IOAS_MAP_WRITEABLE |
475 					 IOMMU_IOAS_MAP_READABLE))
476 		return -1;
477 
478 	if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
479 				    MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
480 		return -1;
481 
482 	fail_nth_enable();
483 
484 	{
485 		struct iommu_test_cmd access_cmd = {
486 			.size = sizeof(access_cmd),
487 			.op = IOMMU_TEST_OP_ACCESS_PAGES,
488 			.id = self->access_id,
489 			.access_pages = { .iova = iova,
490 					  .length = BUFFER_SIZE,
491 					  .uptr = (uintptr_t)buffer },
492 		};
493 
494 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
495 			  &access_cmd))
496 			return -1;
497 		access_pages_id = access_cmd.access_pages.out_access_pages_id;
498 	}
499 
500 	if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
501 					   access_pages_id))
502 		return -1;
503 
504 	if (_test_cmd_destroy_access(self->access_id))
505 		return -1;
506 	self->access_id = 0;
507 	return 0;
508 }
509 
510 /* iopt_pages_fill_xarray() */
511 TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
512 {
513 	uint32_t access_pages_id;
514 	uint32_t ioas_id;
515 	__u32 stdev_id;
516 	__u32 hwpt_id;
517 	__u64 iova;
518 
519 	self->fd = open("/dev/iommu", O_RDWR);
520 	if (self->fd == -1)
521 		return -1;
522 
523 	if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
524 		return -1;
525 
526 	if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
527 		return -1;
528 
529 	if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
530 		return -1;
531 
532 	if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
533 				 IOMMU_IOAS_MAP_WRITEABLE |
534 					 IOMMU_IOAS_MAP_READABLE))
535 		return -1;
536 
537 	if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
538 				    MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
539 		return -1;
540 
541 	fail_nth_enable();
542 
543 	{
544 		struct iommu_test_cmd access_cmd = {
545 			.size = sizeof(access_cmd),
546 			.op = IOMMU_TEST_OP_ACCESS_PAGES,
547 			.id = self->access_id,
548 			.access_pages = { .iova = iova,
549 					  .length = BUFFER_SIZE,
550 					  .uptr = (uintptr_t)buffer },
551 		};
552 
553 		if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
554 			  &access_cmd))
555 			return -1;
556 		access_pages_id = access_cmd.access_pages.out_access_pages_id;
557 	}
558 
559 	if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
560 					   access_pages_id))
561 		return -1;
562 
563 	if (_test_cmd_destroy_access(self->access_id))
564 		return -1;
565 	self->access_id = 0;
566 
567 	if (_test_ioctl_destroy(self->fd, stdev_id))
568 		return -1;
569 	return 0;
570 }
571 
572 TEST_HARNESS_MAIN
573