1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * HMM stands for Heterogeneous Memory Management, it is a helper layer inside
4  * the linux kernel to help device drivers mirror a process address space in
5  * the device. This allows the device to use the same address space which
6  * makes communication and data exchange a lot easier.
7  *
8  * This framework's sole purpose is to exercise various code paths inside
9  * the kernel to make sure that HMM performs as expected and to flush out any
10  * bugs.
11  */
12 
13 #include "../kselftest_harness.h"
14 
15 #include <errno.h>
16 #include <fcntl.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <strings.h>
22 #include <time.h>
23 #include <pthread.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/mman.h>
27 #include <sys/ioctl.h>
28 
29 
30 /*
31  * This is a private UAPI to the kernel test module so it isn't exported
32  * in the usual include/uapi/... directory.
33  */
34 #include <lib/test_hmm_uapi.h>
35 #include <mm/gup_test.h>
36 
37 struct hmm_buffer {
38 	void		*ptr;
39 	void		*mirror;
40 	unsigned long	size;
41 	int		fd;
42 	uint64_t	cpages;
43 	uint64_t	faults;
44 };
45 
46 enum {
47 	HMM_PRIVATE_DEVICE_ONE,
48 	HMM_PRIVATE_DEVICE_TWO,
49 	HMM_COHERENCE_DEVICE_ONE,
50 	HMM_COHERENCE_DEVICE_TWO,
51 };
52 
53 #define TWOMEG		(1 << 21)
54 #define HMM_BUFFER_SIZE (1024 << 12)
55 #define HMM_PATH_MAX    64
56 #define NTIMES		10
57 
58 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
59 /* Just the flags we need, copied from mm.h: */
60 #define FOLL_WRITE	0x01	/* check pte is writable */
61 #define FOLL_LONGTERM   0x10000 /* mapping lifetime is indefinite */
62 
63 FIXTURE(hmm)
64 {
65 	int		fd;
66 	unsigned int	page_size;
67 	unsigned int	page_shift;
68 };
69 
70 FIXTURE_VARIANT(hmm)
71 {
72 	int     device_number;
73 };
74 
75 FIXTURE_VARIANT_ADD(hmm, hmm_device_private)
76 {
77 	.device_number = HMM_PRIVATE_DEVICE_ONE,
78 };
79 
80 FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent)
81 {
82 	.device_number = HMM_COHERENCE_DEVICE_ONE,
83 };
84 
85 FIXTURE(hmm2)
86 {
87 	int		fd0;
88 	int		fd1;
89 	unsigned int	page_size;
90 	unsigned int	page_shift;
91 };
92 
93 FIXTURE_VARIANT(hmm2)
94 {
95 	int     device_number0;
96 	int     device_number1;
97 };
98 
99 FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private)
100 {
101 	.device_number0 = HMM_PRIVATE_DEVICE_ONE,
102 	.device_number1 = HMM_PRIVATE_DEVICE_TWO,
103 };
104 
105 FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent)
106 {
107 	.device_number0 = HMM_COHERENCE_DEVICE_ONE,
108 	.device_number1 = HMM_COHERENCE_DEVICE_TWO,
109 };
110 
111 static int hmm_open(int unit)
112 {
113 	char pathname[HMM_PATH_MAX];
114 	int fd;
115 
116 	snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
117 	fd = open(pathname, O_RDWR, 0);
118 	if (fd < 0)
119 		fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
120 			pathname);
121 	return fd;
122 }
123 
124 static bool hmm_is_coherent_type(int dev_num)
125 {
126 	return (dev_num >= HMM_COHERENCE_DEVICE_ONE);
127 }
128 
129 FIXTURE_SETUP(hmm)
130 {
131 	self->page_size = sysconf(_SC_PAGE_SIZE);
132 	self->page_shift = ffs(self->page_size) - 1;
133 
134 	self->fd = hmm_open(variant->device_number);
135 	if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
136 		SKIP(exit(0), "DEVICE_COHERENT not available");
137 	ASSERT_GE(self->fd, 0);
138 }
139 
140 FIXTURE_SETUP(hmm2)
141 {
142 	self->page_size = sysconf(_SC_PAGE_SIZE);
143 	self->page_shift = ffs(self->page_size) - 1;
144 
145 	self->fd0 = hmm_open(variant->device_number0);
146 	if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
147 		SKIP(exit(0), "DEVICE_COHERENT not available");
148 	ASSERT_GE(self->fd0, 0);
149 	self->fd1 = hmm_open(variant->device_number1);
150 	ASSERT_GE(self->fd1, 0);
151 }
152 
153 FIXTURE_TEARDOWN(hmm)
154 {
155 	int ret = close(self->fd);
156 
157 	ASSERT_EQ(ret, 0);
158 	self->fd = -1;
159 }
160 
161 FIXTURE_TEARDOWN(hmm2)
162 {
163 	int ret = close(self->fd0);
164 
165 	ASSERT_EQ(ret, 0);
166 	self->fd0 = -1;
167 
168 	ret = close(self->fd1);
169 	ASSERT_EQ(ret, 0);
170 	self->fd1 = -1;
171 }
172 
173 static int hmm_dmirror_cmd(int fd,
174 			   unsigned long request,
175 			   struct hmm_buffer *buffer,
176 			   unsigned long npages)
177 {
178 	struct hmm_dmirror_cmd cmd;
179 	int ret;
180 
181 	/* Simulate a device reading system memory. */
182 	cmd.addr = (__u64)buffer->ptr;
183 	cmd.ptr = (__u64)buffer->mirror;
184 	cmd.npages = npages;
185 
186 	for (;;) {
187 		ret = ioctl(fd, request, &cmd);
188 		if (ret == 0)
189 			break;
190 		if (errno == EINTR)
191 			continue;
192 		return -errno;
193 	}
194 	buffer->cpages = cmd.cpages;
195 	buffer->faults = cmd.faults;
196 
197 	return 0;
198 }
199 
200 static void hmm_buffer_free(struct hmm_buffer *buffer)
201 {
202 	if (buffer == NULL)
203 		return;
204 
205 	if (buffer->ptr)
206 		munmap(buffer->ptr, buffer->size);
207 	free(buffer->mirror);
208 	free(buffer);
209 }
210 
211 /*
212  * Create a temporary file that will be deleted on close.
213  */
214 static int hmm_create_file(unsigned long size)
215 {
216 	char path[HMM_PATH_MAX];
217 	int fd;
218 
219 	strcpy(path, "/tmp");
220 	fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
221 	if (fd >= 0) {
222 		int r;
223 
224 		do {
225 			r = ftruncate(fd, size);
226 		} while (r == -1 && errno == EINTR);
227 		if (!r)
228 			return fd;
229 		close(fd);
230 	}
231 	return -1;
232 }
233 
234 /*
235  * Return a random unsigned number.
236  */
237 static unsigned int hmm_random(void)
238 {
239 	static int fd = -1;
240 	unsigned int r;
241 
242 	if (fd < 0) {
243 		fd = open("/dev/urandom", O_RDONLY);
244 		if (fd < 0) {
245 			fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
246 					__FILE__, __LINE__);
247 			return ~0U;
248 		}
249 	}
250 	read(fd, &r, sizeof(r));
251 	return r;
252 }
253 
254 static void hmm_nanosleep(unsigned int n)
255 {
256 	struct timespec t;
257 
258 	t.tv_sec = 0;
259 	t.tv_nsec = n;
260 	nanosleep(&t, NULL);
261 }
262 
263 static int hmm_migrate_sys_to_dev(int fd,
264 				   struct hmm_buffer *buffer,
265 				   unsigned long npages)
266 {
267 	return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
268 }
269 
270 static int hmm_migrate_dev_to_sys(int fd,
271 				   struct hmm_buffer *buffer,
272 				   unsigned long npages)
273 {
274 	return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
275 }
276 
277 /*
278  * Simple NULL test of device open/close.
279  */
280 TEST_F(hmm, open_close)
281 {
282 }
283 
284 /*
285  * Read private anonymous memory.
286  */
287 TEST_F(hmm, anon_read)
288 {
289 	struct hmm_buffer *buffer;
290 	unsigned long npages;
291 	unsigned long size;
292 	unsigned long i;
293 	int *ptr;
294 	int ret;
295 	int val;
296 
297 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
298 	ASSERT_NE(npages, 0);
299 	size = npages << self->page_shift;
300 
301 	buffer = malloc(sizeof(*buffer));
302 	ASSERT_NE(buffer, NULL);
303 
304 	buffer->fd = -1;
305 	buffer->size = size;
306 	buffer->mirror = malloc(size);
307 	ASSERT_NE(buffer->mirror, NULL);
308 
309 	buffer->ptr = mmap(NULL, size,
310 			   PROT_READ | PROT_WRITE,
311 			   MAP_PRIVATE | MAP_ANONYMOUS,
312 			   buffer->fd, 0);
313 	ASSERT_NE(buffer->ptr, MAP_FAILED);
314 
315 	/*
316 	 * Initialize buffer in system memory but leave the first two pages
317 	 * zero (pte_none and pfn_zero).
318 	 */
319 	i = 2 * self->page_size / sizeof(*ptr);
320 	for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
321 		ptr[i] = i;
322 
323 	/* Set buffer permission to read-only. */
324 	ret = mprotect(buffer->ptr, size, PROT_READ);
325 	ASSERT_EQ(ret, 0);
326 
327 	/* Populate the CPU page table with a special zero page. */
328 	val = *(int *)(buffer->ptr + self->page_size);
329 	ASSERT_EQ(val, 0);
330 
331 	/* Simulate a device reading system memory. */
332 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
333 	ASSERT_EQ(ret, 0);
334 	ASSERT_EQ(buffer->cpages, npages);
335 	ASSERT_EQ(buffer->faults, 1);
336 
337 	/* Check what the device read. */
338 	ptr = buffer->mirror;
339 	for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
340 		ASSERT_EQ(ptr[i], 0);
341 	for (; i < size / sizeof(*ptr); ++i)
342 		ASSERT_EQ(ptr[i], i);
343 
344 	hmm_buffer_free(buffer);
345 }
346 
347 /*
348  * Read private anonymous memory which has been protected with
349  * mprotect() PROT_NONE.
350  */
351 TEST_F(hmm, anon_read_prot)
352 {
353 	struct hmm_buffer *buffer;
354 	unsigned long npages;
355 	unsigned long size;
356 	unsigned long i;
357 	int *ptr;
358 	int ret;
359 
360 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
361 	ASSERT_NE(npages, 0);
362 	size = npages << self->page_shift;
363 
364 	buffer = malloc(sizeof(*buffer));
365 	ASSERT_NE(buffer, NULL);
366 
367 	buffer->fd = -1;
368 	buffer->size = size;
369 	buffer->mirror = malloc(size);
370 	ASSERT_NE(buffer->mirror, NULL);
371 
372 	buffer->ptr = mmap(NULL, size,
373 			   PROT_READ | PROT_WRITE,
374 			   MAP_PRIVATE | MAP_ANONYMOUS,
375 			   buffer->fd, 0);
376 	ASSERT_NE(buffer->ptr, MAP_FAILED);
377 
378 	/* Initialize buffer in system memory. */
379 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
380 		ptr[i] = i;
381 
382 	/* Initialize mirror buffer so we can verify it isn't written. */
383 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
384 		ptr[i] = -i;
385 
386 	/* Protect buffer from reading. */
387 	ret = mprotect(buffer->ptr, size, PROT_NONE);
388 	ASSERT_EQ(ret, 0);
389 
390 	/* Simulate a device reading system memory. */
391 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
392 	ASSERT_EQ(ret, -EFAULT);
393 
394 	/* Allow CPU to read the buffer so we can check it. */
395 	ret = mprotect(buffer->ptr, size, PROT_READ);
396 	ASSERT_EQ(ret, 0);
397 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
398 		ASSERT_EQ(ptr[i], i);
399 
400 	/* Check what the device read. */
401 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
402 		ASSERT_EQ(ptr[i], -i);
403 
404 	hmm_buffer_free(buffer);
405 }
406 
407 /*
408  * Write private anonymous memory.
409  */
410 TEST_F(hmm, anon_write)
411 {
412 	struct hmm_buffer *buffer;
413 	unsigned long npages;
414 	unsigned long size;
415 	unsigned long i;
416 	int *ptr;
417 	int ret;
418 
419 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
420 	ASSERT_NE(npages, 0);
421 	size = npages << self->page_shift;
422 
423 	buffer = malloc(sizeof(*buffer));
424 	ASSERT_NE(buffer, NULL);
425 
426 	buffer->fd = -1;
427 	buffer->size = size;
428 	buffer->mirror = malloc(size);
429 	ASSERT_NE(buffer->mirror, NULL);
430 
431 	buffer->ptr = mmap(NULL, size,
432 			   PROT_READ | PROT_WRITE,
433 			   MAP_PRIVATE | MAP_ANONYMOUS,
434 			   buffer->fd, 0);
435 	ASSERT_NE(buffer->ptr, MAP_FAILED);
436 
437 	/* Initialize data that the device will write to buffer->ptr. */
438 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
439 		ptr[i] = i;
440 
441 	/* Simulate a device writing system memory. */
442 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
443 	ASSERT_EQ(ret, 0);
444 	ASSERT_EQ(buffer->cpages, npages);
445 	ASSERT_EQ(buffer->faults, 1);
446 
447 	/* Check what the device wrote. */
448 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
449 		ASSERT_EQ(ptr[i], i);
450 
451 	hmm_buffer_free(buffer);
452 }
453 
454 /*
455  * Write private anonymous memory which has been protected with
456  * mprotect() PROT_READ.
457  */
458 TEST_F(hmm, anon_write_prot)
459 {
460 	struct hmm_buffer *buffer;
461 	unsigned long npages;
462 	unsigned long size;
463 	unsigned long i;
464 	int *ptr;
465 	int ret;
466 
467 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
468 	ASSERT_NE(npages, 0);
469 	size = npages << self->page_shift;
470 
471 	buffer = malloc(sizeof(*buffer));
472 	ASSERT_NE(buffer, NULL);
473 
474 	buffer->fd = -1;
475 	buffer->size = size;
476 	buffer->mirror = malloc(size);
477 	ASSERT_NE(buffer->mirror, NULL);
478 
479 	buffer->ptr = mmap(NULL, size,
480 			   PROT_READ,
481 			   MAP_PRIVATE | MAP_ANONYMOUS,
482 			   buffer->fd, 0);
483 	ASSERT_NE(buffer->ptr, MAP_FAILED);
484 
485 	/* Simulate a device reading a zero page of memory. */
486 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
487 	ASSERT_EQ(ret, 0);
488 	ASSERT_EQ(buffer->cpages, 1);
489 	ASSERT_EQ(buffer->faults, 1);
490 
491 	/* Initialize data that the device will write to buffer->ptr. */
492 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
493 		ptr[i] = i;
494 
495 	/* Simulate a device writing system memory. */
496 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
497 	ASSERT_EQ(ret, -EPERM);
498 
499 	/* Check what the device wrote. */
500 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
501 		ASSERT_EQ(ptr[i], 0);
502 
503 	/* Now allow writing and see that the zero page is replaced. */
504 	ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
505 	ASSERT_EQ(ret, 0);
506 
507 	/* Simulate a device writing system memory. */
508 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
509 	ASSERT_EQ(ret, 0);
510 	ASSERT_EQ(buffer->cpages, npages);
511 	ASSERT_EQ(buffer->faults, 1);
512 
513 	/* Check what the device wrote. */
514 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
515 		ASSERT_EQ(ptr[i], i);
516 
517 	hmm_buffer_free(buffer);
518 }
519 
520 /*
521  * Check that a device writing an anonymous private mapping
522  * will copy-on-write if a child process inherits the mapping.
523  */
524 TEST_F(hmm, anon_write_child)
525 {
526 	struct hmm_buffer *buffer;
527 	unsigned long npages;
528 	unsigned long size;
529 	unsigned long i;
530 	int *ptr;
531 	pid_t pid;
532 	int child_fd;
533 	int ret;
534 
535 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
536 	ASSERT_NE(npages, 0);
537 	size = npages << self->page_shift;
538 
539 	buffer = malloc(sizeof(*buffer));
540 	ASSERT_NE(buffer, NULL);
541 
542 	buffer->fd = -1;
543 	buffer->size = size;
544 	buffer->mirror = malloc(size);
545 	ASSERT_NE(buffer->mirror, NULL);
546 
547 	buffer->ptr = mmap(NULL, size,
548 			   PROT_READ | PROT_WRITE,
549 			   MAP_PRIVATE | MAP_ANONYMOUS,
550 			   buffer->fd, 0);
551 	ASSERT_NE(buffer->ptr, MAP_FAILED);
552 
553 	/* Initialize buffer->ptr so we can tell if it is written. */
554 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
555 		ptr[i] = i;
556 
557 	/* Initialize data that the device will write to buffer->ptr. */
558 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
559 		ptr[i] = -i;
560 
561 	pid = fork();
562 	if (pid == -1)
563 		ASSERT_EQ(pid, 0);
564 	if (pid != 0) {
565 		waitpid(pid, &ret, 0);
566 		ASSERT_EQ(WIFEXITED(ret), 1);
567 
568 		/* Check that the parent's buffer did not change. */
569 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
570 			ASSERT_EQ(ptr[i], i);
571 		return;
572 	}
573 
574 	/* Check that we see the parent's values. */
575 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
576 		ASSERT_EQ(ptr[i], i);
577 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
578 		ASSERT_EQ(ptr[i], -i);
579 
580 	/* The child process needs its own mirror to its own mm. */
581 	child_fd = hmm_open(0);
582 	ASSERT_GE(child_fd, 0);
583 
584 	/* Simulate a device writing system memory. */
585 	ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
586 	ASSERT_EQ(ret, 0);
587 	ASSERT_EQ(buffer->cpages, npages);
588 	ASSERT_EQ(buffer->faults, 1);
589 
590 	/* Check what the device wrote. */
591 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
592 		ASSERT_EQ(ptr[i], -i);
593 
594 	close(child_fd);
595 	exit(0);
596 }
597 
598 /*
599  * Check that a device writing an anonymous shared mapping
600  * will not copy-on-write if a child process inherits the mapping.
601  */
602 TEST_F(hmm, anon_write_child_shared)
603 {
604 	struct hmm_buffer *buffer;
605 	unsigned long npages;
606 	unsigned long size;
607 	unsigned long i;
608 	int *ptr;
609 	pid_t pid;
610 	int child_fd;
611 	int ret;
612 
613 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
614 	ASSERT_NE(npages, 0);
615 	size = npages << self->page_shift;
616 
617 	buffer = malloc(sizeof(*buffer));
618 	ASSERT_NE(buffer, NULL);
619 
620 	buffer->fd = -1;
621 	buffer->size = size;
622 	buffer->mirror = malloc(size);
623 	ASSERT_NE(buffer->mirror, NULL);
624 
625 	buffer->ptr = mmap(NULL, size,
626 			   PROT_READ | PROT_WRITE,
627 			   MAP_SHARED | MAP_ANONYMOUS,
628 			   buffer->fd, 0);
629 	ASSERT_NE(buffer->ptr, MAP_FAILED);
630 
631 	/* Initialize buffer->ptr so we can tell if it is written. */
632 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
633 		ptr[i] = i;
634 
635 	/* Initialize data that the device will write to buffer->ptr. */
636 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
637 		ptr[i] = -i;
638 
639 	pid = fork();
640 	if (pid == -1)
641 		ASSERT_EQ(pid, 0);
642 	if (pid != 0) {
643 		waitpid(pid, &ret, 0);
644 		ASSERT_EQ(WIFEXITED(ret), 1);
645 
646 		/* Check that the parent's buffer did change. */
647 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
648 			ASSERT_EQ(ptr[i], -i);
649 		return;
650 	}
651 
652 	/* Check that we see the parent's values. */
653 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
654 		ASSERT_EQ(ptr[i], i);
655 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
656 		ASSERT_EQ(ptr[i], -i);
657 
658 	/* The child process needs its own mirror to its own mm. */
659 	child_fd = hmm_open(0);
660 	ASSERT_GE(child_fd, 0);
661 
662 	/* Simulate a device writing system memory. */
663 	ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
664 	ASSERT_EQ(ret, 0);
665 	ASSERT_EQ(buffer->cpages, npages);
666 	ASSERT_EQ(buffer->faults, 1);
667 
668 	/* Check what the device wrote. */
669 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
670 		ASSERT_EQ(ptr[i], -i);
671 
672 	close(child_fd);
673 	exit(0);
674 }
675 
676 /*
677  * Write private anonymous huge page.
678  */
679 TEST_F(hmm, anon_write_huge)
680 {
681 	struct hmm_buffer *buffer;
682 	unsigned long npages;
683 	unsigned long size;
684 	unsigned long i;
685 	void *old_ptr;
686 	void *map;
687 	int *ptr;
688 	int ret;
689 
690 	size = 2 * TWOMEG;
691 
692 	buffer = malloc(sizeof(*buffer));
693 	ASSERT_NE(buffer, NULL);
694 
695 	buffer->fd = -1;
696 	buffer->size = size;
697 	buffer->mirror = malloc(size);
698 	ASSERT_NE(buffer->mirror, NULL);
699 
700 	buffer->ptr = mmap(NULL, size,
701 			   PROT_READ | PROT_WRITE,
702 			   MAP_PRIVATE | MAP_ANONYMOUS,
703 			   buffer->fd, 0);
704 	ASSERT_NE(buffer->ptr, MAP_FAILED);
705 
706 	size = TWOMEG;
707 	npages = size >> self->page_shift;
708 	map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
709 	ret = madvise(map, size, MADV_HUGEPAGE);
710 	ASSERT_EQ(ret, 0);
711 	old_ptr = buffer->ptr;
712 	buffer->ptr = map;
713 
714 	/* Initialize data that the device will write to buffer->ptr. */
715 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
716 		ptr[i] = i;
717 
718 	/* Simulate a device writing system memory. */
719 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
720 	ASSERT_EQ(ret, 0);
721 	ASSERT_EQ(buffer->cpages, npages);
722 	ASSERT_EQ(buffer->faults, 1);
723 
724 	/* Check what the device wrote. */
725 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
726 		ASSERT_EQ(ptr[i], i);
727 
728 	buffer->ptr = old_ptr;
729 	hmm_buffer_free(buffer);
730 }
731 
732 /*
733  * Read numeric data from raw and tagged kernel status files.  Used to read
734  * /proc and /sys data (without a tag) and from /proc/meminfo (with a tag).
735  */
736 static long file_read_ulong(char *file, const char *tag)
737 {
738 	int fd;
739 	char buf[2048];
740 	int len;
741 	char *p, *q;
742 	long val;
743 
744 	fd = open(file, O_RDONLY);
745 	if (fd < 0) {
746 		/* Error opening the file */
747 		return -1;
748 	}
749 
750 	len = read(fd, buf, sizeof(buf));
751 	close(fd);
752 	if (len < 0) {
753 		/* Error in reading the file */
754 		return -1;
755 	}
756 	if (len == sizeof(buf)) {
757 		/* Error file is too large */
758 		return -1;
759 	}
760 	buf[len] = '\0';
761 
762 	/* Search for a tag if provided */
763 	if (tag) {
764 		p = strstr(buf, tag);
765 		if (!p)
766 			return -1; /* looks like the line we want isn't there */
767 		p += strlen(tag);
768 	} else
769 		p = buf;
770 
771 	val = strtol(p, &q, 0);
772 	if (*q != ' ') {
773 		/* Error parsing the file */
774 		return -1;
775 	}
776 
777 	return val;
778 }
779 
780 /*
781  * Write huge TLBFS page.
782  */
783 TEST_F(hmm, anon_write_hugetlbfs)
784 {
785 	struct hmm_buffer *buffer;
786 	unsigned long npages;
787 	unsigned long size;
788 	unsigned long default_hsize;
789 	unsigned long i;
790 	int *ptr;
791 	int ret;
792 
793 	default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
794 	if (default_hsize < 0 || default_hsize*1024 < default_hsize)
795 		SKIP(return, "Huge page size could not be determined");
796 	default_hsize = default_hsize*1024; /* KB to B */
797 
798 	size = ALIGN(TWOMEG, default_hsize);
799 	npages = size >> self->page_shift;
800 
801 	buffer = malloc(sizeof(*buffer));
802 	ASSERT_NE(buffer, NULL);
803 
804 	buffer->ptr = mmap(NULL, size,
805 				   PROT_READ | PROT_WRITE,
806 				   MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
807 				   -1, 0);
808 	if (buffer->ptr == MAP_FAILED) {
809 		free(buffer);
810 		SKIP(return, "Huge page could not be allocated");
811 	}
812 
813 	buffer->fd = -1;
814 	buffer->size = size;
815 	buffer->mirror = malloc(size);
816 	ASSERT_NE(buffer->mirror, NULL);
817 
818 	/* Initialize data that the device will write to buffer->ptr. */
819 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
820 		ptr[i] = i;
821 
822 	/* Simulate a device writing system memory. */
823 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
824 	ASSERT_EQ(ret, 0);
825 	ASSERT_EQ(buffer->cpages, npages);
826 	ASSERT_EQ(buffer->faults, 1);
827 
828 	/* Check what the device wrote. */
829 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
830 		ASSERT_EQ(ptr[i], i);
831 
832 	munmap(buffer->ptr, buffer->size);
833 	buffer->ptr = NULL;
834 	hmm_buffer_free(buffer);
835 }
836 
837 /*
838  * Read mmap'ed file memory.
839  */
840 TEST_F(hmm, file_read)
841 {
842 	struct hmm_buffer *buffer;
843 	unsigned long npages;
844 	unsigned long size;
845 	unsigned long i;
846 	int *ptr;
847 	int ret;
848 	int fd;
849 	ssize_t len;
850 
851 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
852 	ASSERT_NE(npages, 0);
853 	size = npages << self->page_shift;
854 
855 	fd = hmm_create_file(size);
856 	ASSERT_GE(fd, 0);
857 
858 	buffer = malloc(sizeof(*buffer));
859 	ASSERT_NE(buffer, NULL);
860 
861 	buffer->fd = fd;
862 	buffer->size = size;
863 	buffer->mirror = malloc(size);
864 	ASSERT_NE(buffer->mirror, NULL);
865 
866 	/* Write initial contents of the file. */
867 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
868 		ptr[i] = i;
869 	len = pwrite(fd, buffer->mirror, size, 0);
870 	ASSERT_EQ(len, size);
871 	memset(buffer->mirror, 0, size);
872 
873 	buffer->ptr = mmap(NULL, size,
874 			   PROT_READ,
875 			   MAP_SHARED,
876 			   buffer->fd, 0);
877 	ASSERT_NE(buffer->ptr, MAP_FAILED);
878 
879 	/* Simulate a device reading system memory. */
880 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
881 	ASSERT_EQ(ret, 0);
882 	ASSERT_EQ(buffer->cpages, npages);
883 	ASSERT_EQ(buffer->faults, 1);
884 
885 	/* Check what the device read. */
886 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
887 		ASSERT_EQ(ptr[i], i);
888 
889 	hmm_buffer_free(buffer);
890 }
891 
892 /*
893  * Write mmap'ed file memory.
894  */
895 TEST_F(hmm, file_write)
896 {
897 	struct hmm_buffer *buffer;
898 	unsigned long npages;
899 	unsigned long size;
900 	unsigned long i;
901 	int *ptr;
902 	int ret;
903 	int fd;
904 	ssize_t len;
905 
906 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
907 	ASSERT_NE(npages, 0);
908 	size = npages << self->page_shift;
909 
910 	fd = hmm_create_file(size);
911 	ASSERT_GE(fd, 0);
912 
913 	buffer = malloc(sizeof(*buffer));
914 	ASSERT_NE(buffer, NULL);
915 
916 	buffer->fd = fd;
917 	buffer->size = size;
918 	buffer->mirror = malloc(size);
919 	ASSERT_NE(buffer->mirror, NULL);
920 
921 	buffer->ptr = mmap(NULL, size,
922 			   PROT_READ | PROT_WRITE,
923 			   MAP_SHARED,
924 			   buffer->fd, 0);
925 	ASSERT_NE(buffer->ptr, MAP_FAILED);
926 
927 	/* Initialize data that the device will write to buffer->ptr. */
928 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
929 		ptr[i] = i;
930 
931 	/* Simulate a device writing system memory. */
932 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
933 	ASSERT_EQ(ret, 0);
934 	ASSERT_EQ(buffer->cpages, npages);
935 	ASSERT_EQ(buffer->faults, 1);
936 
937 	/* Check what the device wrote. */
938 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
939 		ASSERT_EQ(ptr[i], i);
940 
941 	/* Check that the device also wrote the file. */
942 	len = pread(fd, buffer->mirror, size, 0);
943 	ASSERT_EQ(len, size);
944 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
945 		ASSERT_EQ(ptr[i], i);
946 
947 	hmm_buffer_free(buffer);
948 }
949 
950 /*
951  * Migrate anonymous memory to device private memory.
952  */
953 TEST_F(hmm, migrate)
954 {
955 	struct hmm_buffer *buffer;
956 	unsigned long npages;
957 	unsigned long size;
958 	unsigned long i;
959 	int *ptr;
960 	int ret;
961 
962 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
963 	ASSERT_NE(npages, 0);
964 	size = npages << self->page_shift;
965 
966 	buffer = malloc(sizeof(*buffer));
967 	ASSERT_NE(buffer, NULL);
968 
969 	buffer->fd = -1;
970 	buffer->size = size;
971 	buffer->mirror = malloc(size);
972 	ASSERT_NE(buffer->mirror, NULL);
973 
974 	buffer->ptr = mmap(NULL, size,
975 			   PROT_READ | PROT_WRITE,
976 			   MAP_PRIVATE | MAP_ANONYMOUS,
977 			   buffer->fd, 0);
978 	ASSERT_NE(buffer->ptr, MAP_FAILED);
979 
980 	/* Initialize buffer in system memory. */
981 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
982 		ptr[i] = i;
983 
984 	/* Migrate memory to device. */
985 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
986 	ASSERT_EQ(ret, 0);
987 	ASSERT_EQ(buffer->cpages, npages);
988 
989 	/* Check what the device read. */
990 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
991 		ASSERT_EQ(ptr[i], i);
992 
993 	hmm_buffer_free(buffer);
994 }
995 
996 /*
997  * Migrate anonymous memory to device private memory and fault some of it back
998  * to system memory, then try migrating the resulting mix of system and device
999  * private memory to the device.
1000  */
1001 TEST_F(hmm, migrate_fault)
1002 {
1003 	struct hmm_buffer *buffer;
1004 	unsigned long npages;
1005 	unsigned long size;
1006 	unsigned long i;
1007 	int *ptr;
1008 	int ret;
1009 
1010 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1011 	ASSERT_NE(npages, 0);
1012 	size = npages << self->page_shift;
1013 
1014 	buffer = malloc(sizeof(*buffer));
1015 	ASSERT_NE(buffer, NULL);
1016 
1017 	buffer->fd = -1;
1018 	buffer->size = size;
1019 	buffer->mirror = malloc(size);
1020 	ASSERT_NE(buffer->mirror, NULL);
1021 
1022 	buffer->ptr = mmap(NULL, size,
1023 			   PROT_READ | PROT_WRITE,
1024 			   MAP_PRIVATE | MAP_ANONYMOUS,
1025 			   buffer->fd, 0);
1026 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1027 
1028 	/* Initialize buffer in system memory. */
1029 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1030 		ptr[i] = i;
1031 
1032 	/* Migrate memory to device. */
1033 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1034 	ASSERT_EQ(ret, 0);
1035 	ASSERT_EQ(buffer->cpages, npages);
1036 
1037 	/* Check what the device read. */
1038 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1039 		ASSERT_EQ(ptr[i], i);
1040 
1041 	/* Fault half the pages back to system memory and check them. */
1042 	for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
1043 		ASSERT_EQ(ptr[i], i);
1044 
1045 	/* Migrate memory to the device again. */
1046 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1047 	ASSERT_EQ(ret, 0);
1048 	ASSERT_EQ(buffer->cpages, npages);
1049 
1050 	/* Check what the device read. */
1051 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1052 		ASSERT_EQ(ptr[i], i);
1053 
1054 	hmm_buffer_free(buffer);
1055 }
1056 
1057 TEST_F(hmm, migrate_release)
1058 {
1059 	struct hmm_buffer *buffer;
1060 	unsigned long npages;
1061 	unsigned long size;
1062 	unsigned long i;
1063 	int *ptr;
1064 	int ret;
1065 
1066 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1067 	ASSERT_NE(npages, 0);
1068 	size = npages << self->page_shift;
1069 
1070 	buffer = malloc(sizeof(*buffer));
1071 	ASSERT_NE(buffer, NULL);
1072 
1073 	buffer->fd = -1;
1074 	buffer->size = size;
1075 	buffer->mirror = malloc(size);
1076 	ASSERT_NE(buffer->mirror, NULL);
1077 
1078 	buffer->ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
1079 			   MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
1080 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1081 
1082 	/* Initialize buffer in system memory. */
1083 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1084 		ptr[i] = i;
1085 
1086 	/* Migrate memory to device. */
1087 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1088 	ASSERT_EQ(ret, 0);
1089 	ASSERT_EQ(buffer->cpages, npages);
1090 
1091 	/* Check what the device read. */
1092 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1093 		ASSERT_EQ(ptr[i], i);
1094 
1095 	/* Release device memory. */
1096 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_RELEASE, buffer, npages);
1097 	ASSERT_EQ(ret, 0);
1098 
1099 	/* Fault pages back to system memory and check them. */
1100 	for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
1101 		ASSERT_EQ(ptr[i], i);
1102 
1103 	hmm_buffer_free(buffer);
1104 }
1105 
1106 /*
1107  * Migrate anonymous shared memory to device private memory.
1108  */
1109 TEST_F(hmm, migrate_shared)
1110 {
1111 	struct hmm_buffer *buffer;
1112 	unsigned long npages;
1113 	unsigned long size;
1114 	int ret;
1115 
1116 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1117 	ASSERT_NE(npages, 0);
1118 	size = npages << self->page_shift;
1119 
1120 	buffer = malloc(sizeof(*buffer));
1121 	ASSERT_NE(buffer, NULL);
1122 
1123 	buffer->fd = -1;
1124 	buffer->size = size;
1125 	buffer->mirror = malloc(size);
1126 	ASSERT_NE(buffer->mirror, NULL);
1127 
1128 	buffer->ptr = mmap(NULL, size,
1129 			   PROT_READ | PROT_WRITE,
1130 			   MAP_SHARED | MAP_ANONYMOUS,
1131 			   buffer->fd, 0);
1132 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1133 
1134 	/* Migrate memory to device. */
1135 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1136 	ASSERT_EQ(ret, -ENOENT);
1137 
1138 	hmm_buffer_free(buffer);
1139 }
1140 
1141 /*
1142  * Try to migrate various memory types to device private memory.
1143  */
1144 TEST_F(hmm2, migrate_mixed)
1145 {
1146 	struct hmm_buffer *buffer;
1147 	unsigned long npages;
1148 	unsigned long size;
1149 	int *ptr;
1150 	unsigned char *p;
1151 	int ret;
1152 	int val;
1153 
1154 	npages = 6;
1155 	size = npages << self->page_shift;
1156 
1157 	buffer = malloc(sizeof(*buffer));
1158 	ASSERT_NE(buffer, NULL);
1159 
1160 	buffer->fd = -1;
1161 	buffer->size = size;
1162 	buffer->mirror = malloc(size);
1163 	ASSERT_NE(buffer->mirror, NULL);
1164 
1165 	/* Reserve a range of addresses. */
1166 	buffer->ptr = mmap(NULL, size,
1167 			   PROT_NONE,
1168 			   MAP_PRIVATE | MAP_ANONYMOUS,
1169 			   buffer->fd, 0);
1170 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1171 	p = buffer->ptr;
1172 
1173 	/* Migrating a protected area should be an error. */
1174 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1175 	ASSERT_EQ(ret, -EINVAL);
1176 
1177 	/* Punch a hole after the first page address. */
1178 	ret = munmap(buffer->ptr + self->page_size, self->page_size);
1179 	ASSERT_EQ(ret, 0);
1180 
1181 	/* We expect an error if the vma doesn't cover the range. */
1182 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3);
1183 	ASSERT_EQ(ret, -EINVAL);
1184 
1185 	/* Page 2 will be a read-only zero page. */
1186 	ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1187 				PROT_READ);
1188 	ASSERT_EQ(ret, 0);
1189 	ptr = (int *)(buffer->ptr + 2 * self->page_size);
1190 	val = *ptr + 3;
1191 	ASSERT_EQ(val, 3);
1192 
1193 	/* Page 3 will be read-only. */
1194 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1195 				PROT_READ | PROT_WRITE);
1196 	ASSERT_EQ(ret, 0);
1197 	ptr = (int *)(buffer->ptr + 3 * self->page_size);
1198 	*ptr = val;
1199 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1200 				PROT_READ);
1201 	ASSERT_EQ(ret, 0);
1202 
1203 	/* Page 4-5 will be read-write. */
1204 	ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
1205 				PROT_READ | PROT_WRITE);
1206 	ASSERT_EQ(ret, 0);
1207 	ptr = (int *)(buffer->ptr + 4 * self->page_size);
1208 	*ptr = val;
1209 	ptr = (int *)(buffer->ptr + 5 * self->page_size);
1210 	*ptr = val;
1211 
1212 	/* Now try to migrate pages 2-5 to device 1. */
1213 	buffer->ptr = p + 2 * self->page_size;
1214 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
1215 	ASSERT_EQ(ret, 0);
1216 	ASSERT_EQ(buffer->cpages, 4);
1217 
1218 	/* Page 5 won't be migrated to device 0 because it's on device 1. */
1219 	buffer->ptr = p + 5 * self->page_size;
1220 	ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1221 	ASSERT_EQ(ret, -ENOENT);
1222 	buffer->ptr = p;
1223 
1224 	buffer->ptr = p;
1225 	hmm_buffer_free(buffer);
1226 }
1227 
1228 /*
1229  * Migrate anonymous memory to device memory and back to system memory
1230  * multiple times. In case of private zone configuration, this is done
1231  * through fault pages accessed by CPU. In case of coherent zone configuration,
1232  * the pages from the device should be explicitly migrated back to system memory.
1233  * The reason is Coherent device zone has coherent access by CPU, therefore
1234  * it will not generate any page fault.
1235  */
1236 TEST_F(hmm, migrate_multiple)
1237 {
1238 	struct hmm_buffer *buffer;
1239 	unsigned long npages;
1240 	unsigned long size;
1241 	unsigned long i;
1242 	unsigned long c;
1243 	int *ptr;
1244 	int ret;
1245 
1246 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1247 	ASSERT_NE(npages, 0);
1248 	size = npages << self->page_shift;
1249 
1250 	for (c = 0; c < NTIMES; c++) {
1251 		buffer = malloc(sizeof(*buffer));
1252 		ASSERT_NE(buffer, NULL);
1253 
1254 		buffer->fd = -1;
1255 		buffer->size = size;
1256 		buffer->mirror = malloc(size);
1257 		ASSERT_NE(buffer->mirror, NULL);
1258 
1259 		buffer->ptr = mmap(NULL, size,
1260 				   PROT_READ | PROT_WRITE,
1261 				   MAP_PRIVATE | MAP_ANONYMOUS,
1262 				   buffer->fd, 0);
1263 		ASSERT_NE(buffer->ptr, MAP_FAILED);
1264 
1265 		/* Initialize buffer in system memory. */
1266 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1267 			ptr[i] = i;
1268 
1269 		/* Migrate memory to device. */
1270 		ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1271 		ASSERT_EQ(ret, 0);
1272 		ASSERT_EQ(buffer->cpages, npages);
1273 
1274 		/* Check what the device read. */
1275 		for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1276 			ASSERT_EQ(ptr[i], i);
1277 
1278 		/* Migrate back to system memory and check them. */
1279 		if (hmm_is_coherent_type(variant->device_number)) {
1280 			ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
1281 			ASSERT_EQ(ret, 0);
1282 			ASSERT_EQ(buffer->cpages, npages);
1283 		}
1284 
1285 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1286 			ASSERT_EQ(ptr[i], i);
1287 
1288 		hmm_buffer_free(buffer);
1289 	}
1290 }
1291 
1292 /*
1293  * Read anonymous memory multiple times.
1294  */
1295 TEST_F(hmm, anon_read_multiple)
1296 {
1297 	struct hmm_buffer *buffer;
1298 	unsigned long npages;
1299 	unsigned long size;
1300 	unsigned long i;
1301 	unsigned long c;
1302 	int *ptr;
1303 	int ret;
1304 
1305 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1306 	ASSERT_NE(npages, 0);
1307 	size = npages << self->page_shift;
1308 
1309 	for (c = 0; c < NTIMES; c++) {
1310 		buffer = malloc(sizeof(*buffer));
1311 		ASSERT_NE(buffer, NULL);
1312 
1313 		buffer->fd = -1;
1314 		buffer->size = size;
1315 		buffer->mirror = malloc(size);
1316 		ASSERT_NE(buffer->mirror, NULL);
1317 
1318 		buffer->ptr = mmap(NULL, size,
1319 				   PROT_READ | PROT_WRITE,
1320 				   MAP_PRIVATE | MAP_ANONYMOUS,
1321 				   buffer->fd, 0);
1322 		ASSERT_NE(buffer->ptr, MAP_FAILED);
1323 
1324 		/* Initialize buffer in system memory. */
1325 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1326 			ptr[i] = i + c;
1327 
1328 		/* Simulate a device reading system memory. */
1329 		ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1330 				      npages);
1331 		ASSERT_EQ(ret, 0);
1332 		ASSERT_EQ(buffer->cpages, npages);
1333 		ASSERT_EQ(buffer->faults, 1);
1334 
1335 		/* Check what the device read. */
1336 		for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1337 			ASSERT_EQ(ptr[i], i + c);
1338 
1339 		hmm_buffer_free(buffer);
1340 	}
1341 }
1342 
1343 void *unmap_buffer(void *p)
1344 {
1345 	struct hmm_buffer *buffer = p;
1346 
1347 	/* Delay for a bit and then unmap buffer while it is being read. */
1348 	hmm_nanosleep(hmm_random() % 32000);
1349 	munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1350 	buffer->ptr = NULL;
1351 
1352 	return NULL;
1353 }
1354 
1355 /*
1356  * Try reading anonymous memory while it is being unmapped.
1357  */
1358 TEST_F(hmm, anon_teardown)
1359 {
1360 	unsigned long npages;
1361 	unsigned long size;
1362 	unsigned long c;
1363 	void *ret;
1364 
1365 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1366 	ASSERT_NE(npages, 0);
1367 	size = npages << self->page_shift;
1368 
1369 	for (c = 0; c < NTIMES; ++c) {
1370 		pthread_t thread;
1371 		struct hmm_buffer *buffer;
1372 		unsigned long i;
1373 		int *ptr;
1374 		int rc;
1375 
1376 		buffer = malloc(sizeof(*buffer));
1377 		ASSERT_NE(buffer, NULL);
1378 
1379 		buffer->fd = -1;
1380 		buffer->size = size;
1381 		buffer->mirror = malloc(size);
1382 		ASSERT_NE(buffer->mirror, NULL);
1383 
1384 		buffer->ptr = mmap(NULL, size,
1385 				   PROT_READ | PROT_WRITE,
1386 				   MAP_PRIVATE | MAP_ANONYMOUS,
1387 				   buffer->fd, 0);
1388 		ASSERT_NE(buffer->ptr, MAP_FAILED);
1389 
1390 		/* Initialize buffer in system memory. */
1391 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1392 			ptr[i] = i + c;
1393 
1394 		rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1395 		ASSERT_EQ(rc, 0);
1396 
1397 		/* Simulate a device reading system memory. */
1398 		rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1399 				     npages);
1400 		if (rc == 0) {
1401 			ASSERT_EQ(buffer->cpages, npages);
1402 			ASSERT_EQ(buffer->faults, 1);
1403 
1404 			/* Check what the device read. */
1405 			for (i = 0, ptr = buffer->mirror;
1406 			     i < size / sizeof(*ptr);
1407 			     ++i)
1408 				ASSERT_EQ(ptr[i], i + c);
1409 		}
1410 
1411 		pthread_join(thread, &ret);
1412 		hmm_buffer_free(buffer);
1413 	}
1414 }
1415 
1416 /*
1417  * Test memory snapshot without faulting in pages accessed by the device.
1418  */
1419 TEST_F(hmm, mixedmap)
1420 {
1421 	struct hmm_buffer *buffer;
1422 	unsigned long npages;
1423 	unsigned long size;
1424 	unsigned char *m;
1425 	int ret;
1426 
1427 	npages = 1;
1428 	size = npages << self->page_shift;
1429 
1430 	buffer = malloc(sizeof(*buffer));
1431 	ASSERT_NE(buffer, NULL);
1432 
1433 	buffer->fd = -1;
1434 	buffer->size = size;
1435 	buffer->mirror = malloc(npages);
1436 	ASSERT_NE(buffer->mirror, NULL);
1437 
1438 
1439 	/* Reserve a range of addresses. */
1440 	buffer->ptr = mmap(NULL, size,
1441 			   PROT_READ | PROT_WRITE,
1442 			   MAP_PRIVATE,
1443 			   self->fd, 0);
1444 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1445 
1446 	/* Simulate a device snapshotting CPU pagetables. */
1447 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1448 	ASSERT_EQ(ret, 0);
1449 	ASSERT_EQ(buffer->cpages, npages);
1450 
1451 	/* Check what the device saw. */
1452 	m = buffer->mirror;
1453 	ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ);
1454 
1455 	hmm_buffer_free(buffer);
1456 }
1457 
1458 /*
1459  * Test memory snapshot without faulting in pages accessed by the device.
1460  */
1461 TEST_F(hmm2, snapshot)
1462 {
1463 	struct hmm_buffer *buffer;
1464 	unsigned long npages;
1465 	unsigned long size;
1466 	int *ptr;
1467 	unsigned char *p;
1468 	unsigned char *m;
1469 	int ret;
1470 	int val;
1471 
1472 	npages = 7;
1473 	size = npages << self->page_shift;
1474 
1475 	buffer = malloc(sizeof(*buffer));
1476 	ASSERT_NE(buffer, NULL);
1477 
1478 	buffer->fd = -1;
1479 	buffer->size = size;
1480 	buffer->mirror = malloc(npages);
1481 	ASSERT_NE(buffer->mirror, NULL);
1482 
1483 	/* Reserve a range of addresses. */
1484 	buffer->ptr = mmap(NULL, size,
1485 			   PROT_NONE,
1486 			   MAP_PRIVATE | MAP_ANONYMOUS,
1487 			   buffer->fd, 0);
1488 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1489 	p = buffer->ptr;
1490 
1491 	/* Punch a hole after the first page address. */
1492 	ret = munmap(buffer->ptr + self->page_size, self->page_size);
1493 	ASSERT_EQ(ret, 0);
1494 
1495 	/* Page 2 will be read-only zero page. */
1496 	ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1497 				PROT_READ);
1498 	ASSERT_EQ(ret, 0);
1499 	ptr = (int *)(buffer->ptr + 2 * self->page_size);
1500 	val = *ptr + 3;
1501 	ASSERT_EQ(val, 3);
1502 
1503 	/* Page 3 will be read-only. */
1504 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1505 				PROT_READ | PROT_WRITE);
1506 	ASSERT_EQ(ret, 0);
1507 	ptr = (int *)(buffer->ptr + 3 * self->page_size);
1508 	*ptr = val;
1509 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1510 				PROT_READ);
1511 	ASSERT_EQ(ret, 0);
1512 
1513 	/* Page 4-6 will be read-write. */
1514 	ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1515 				PROT_READ | PROT_WRITE);
1516 	ASSERT_EQ(ret, 0);
1517 	ptr = (int *)(buffer->ptr + 4 * self->page_size);
1518 	*ptr = val;
1519 
1520 	/* Page 5 will be migrated to device 0. */
1521 	buffer->ptr = p + 5 * self->page_size;
1522 	ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1523 	ASSERT_EQ(ret, 0);
1524 	ASSERT_EQ(buffer->cpages, 1);
1525 
1526 	/* Page 6 will be migrated to device 1. */
1527 	buffer->ptr = p + 6 * self->page_size;
1528 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1);
1529 	ASSERT_EQ(ret, 0);
1530 	ASSERT_EQ(buffer->cpages, 1);
1531 
1532 	/* Simulate a device snapshotting CPU pagetables. */
1533 	buffer->ptr = p;
1534 	ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1535 	ASSERT_EQ(ret, 0);
1536 	ASSERT_EQ(buffer->cpages, npages);
1537 
1538 	/* Check what the device saw. */
1539 	m = buffer->mirror;
1540 	ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1541 	ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1542 	ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1543 	ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1544 	ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1545 	if (!hmm_is_coherent_type(variant->device_number0)) {
1546 		ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1547 				HMM_DMIRROR_PROT_WRITE);
1548 		ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1549 	} else {
1550 		ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL |
1551 				HMM_DMIRROR_PROT_WRITE);
1552 		ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE |
1553 				HMM_DMIRROR_PROT_WRITE);
1554 	}
1555 
1556 	hmm_buffer_free(buffer);
1557 }
1558 
1559 /*
1560  * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
1561  * should be mapped by a large page table entry.
1562  */
1563 TEST_F(hmm, compound)
1564 {
1565 	struct hmm_buffer *buffer;
1566 	unsigned long npages;
1567 	unsigned long size;
1568 	unsigned long default_hsize;
1569 	int *ptr;
1570 	unsigned char *m;
1571 	int ret;
1572 	unsigned long i;
1573 
1574 	/* Skip test if we can't allocate a hugetlbfs page. */
1575 
1576 	default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
1577 	if (default_hsize < 0 || default_hsize*1024 < default_hsize)
1578 		SKIP(return, "Huge page size could not be determined");
1579 	default_hsize = default_hsize*1024; /* KB to B */
1580 
1581 	size = ALIGN(TWOMEG, default_hsize);
1582 	npages = size >> self->page_shift;
1583 
1584 	buffer = malloc(sizeof(*buffer));
1585 	ASSERT_NE(buffer, NULL);
1586 
1587 	buffer->ptr = mmap(NULL, size,
1588 				   PROT_READ | PROT_WRITE,
1589 				   MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
1590 				   -1, 0);
1591 	if (buffer->ptr == MAP_FAILED) {
1592 		free(buffer);
1593 		return;
1594 	}
1595 
1596 	buffer->size = size;
1597 	buffer->mirror = malloc(npages);
1598 	ASSERT_NE(buffer->mirror, NULL);
1599 
1600 	/* Initialize the pages the device will snapshot in buffer->ptr. */
1601 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1602 		ptr[i] = i;
1603 
1604 	/* Simulate a device snapshotting CPU pagetables. */
1605 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1606 	ASSERT_EQ(ret, 0);
1607 	ASSERT_EQ(buffer->cpages, npages);
1608 
1609 	/* Check what the device saw. */
1610 	m = buffer->mirror;
1611 	for (i = 0; i < npages; ++i)
1612 		ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1613 				HMM_DMIRROR_PROT_PMD);
1614 
1615 	/* Make the region read-only. */
1616 	ret = mprotect(buffer->ptr, size, PROT_READ);
1617 	ASSERT_EQ(ret, 0);
1618 
1619 	/* Simulate a device snapshotting CPU pagetables. */
1620 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1621 	ASSERT_EQ(ret, 0);
1622 	ASSERT_EQ(buffer->cpages, npages);
1623 
1624 	/* Check what the device saw. */
1625 	m = buffer->mirror;
1626 	for (i = 0; i < npages; ++i)
1627 		ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1628 				HMM_DMIRROR_PROT_PMD);
1629 
1630 	munmap(buffer->ptr, buffer->size);
1631 	buffer->ptr = NULL;
1632 	hmm_buffer_free(buffer);
1633 }
1634 
1635 /*
1636  * Test two devices reading the same memory (double mapped).
1637  */
1638 TEST_F(hmm2, double_map)
1639 {
1640 	struct hmm_buffer *buffer;
1641 	unsigned long npages;
1642 	unsigned long size;
1643 	unsigned long i;
1644 	int *ptr;
1645 	int ret;
1646 
1647 	npages = 6;
1648 	size = npages << self->page_shift;
1649 
1650 	buffer = malloc(sizeof(*buffer));
1651 	ASSERT_NE(buffer, NULL);
1652 
1653 	buffer->fd = -1;
1654 	buffer->size = size;
1655 	buffer->mirror = malloc(npages);
1656 	ASSERT_NE(buffer->mirror, NULL);
1657 
1658 	/* Reserve a range of addresses. */
1659 	buffer->ptr = mmap(NULL, size,
1660 			   PROT_READ | PROT_WRITE,
1661 			   MAP_PRIVATE | MAP_ANONYMOUS,
1662 			   buffer->fd, 0);
1663 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1664 
1665 	/* Initialize buffer in system memory. */
1666 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1667 		ptr[i] = i;
1668 
1669 	/* Make region read-only. */
1670 	ret = mprotect(buffer->ptr, size, PROT_READ);
1671 	ASSERT_EQ(ret, 0);
1672 
1673 	/* Simulate device 0 reading system memory. */
1674 	ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1675 	ASSERT_EQ(ret, 0);
1676 	ASSERT_EQ(buffer->cpages, npages);
1677 	ASSERT_EQ(buffer->faults, 1);
1678 
1679 	/* Check what the device read. */
1680 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1681 		ASSERT_EQ(ptr[i], i);
1682 
1683 	/* Simulate device 1 reading system memory. */
1684 	ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1685 	ASSERT_EQ(ret, 0);
1686 	ASSERT_EQ(buffer->cpages, npages);
1687 	ASSERT_EQ(buffer->faults, 1);
1688 
1689 	/* Check what the device read. */
1690 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1691 		ASSERT_EQ(ptr[i], i);
1692 
1693 	/* Migrate pages to device 1 and try to read from device 0. */
1694 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1695 	ASSERT_EQ(ret, 0);
1696 	ASSERT_EQ(buffer->cpages, npages);
1697 
1698 	ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1699 	ASSERT_EQ(ret, 0);
1700 	ASSERT_EQ(buffer->cpages, npages);
1701 	ASSERT_EQ(buffer->faults, 1);
1702 
1703 	/* Check what device 0 read. */
1704 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1705 		ASSERT_EQ(ptr[i], i);
1706 
1707 	hmm_buffer_free(buffer);
1708 }
1709 
1710 /*
1711  * Basic check of exclusive faulting.
1712  */
1713 TEST_F(hmm, exclusive)
1714 {
1715 	struct hmm_buffer *buffer;
1716 	unsigned long npages;
1717 	unsigned long size;
1718 	unsigned long i;
1719 	int *ptr;
1720 	int ret;
1721 
1722 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1723 	ASSERT_NE(npages, 0);
1724 	size = npages << self->page_shift;
1725 
1726 	buffer = malloc(sizeof(*buffer));
1727 	ASSERT_NE(buffer, NULL);
1728 
1729 	buffer->fd = -1;
1730 	buffer->size = size;
1731 	buffer->mirror = malloc(size);
1732 	ASSERT_NE(buffer->mirror, NULL);
1733 
1734 	buffer->ptr = mmap(NULL, size,
1735 			   PROT_READ | PROT_WRITE,
1736 			   MAP_PRIVATE | MAP_ANONYMOUS,
1737 			   buffer->fd, 0);
1738 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1739 
1740 	/* Initialize buffer in system memory. */
1741 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1742 		ptr[i] = i;
1743 
1744 	/* Map memory exclusively for device access. */
1745 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1746 	ASSERT_EQ(ret, 0);
1747 	ASSERT_EQ(buffer->cpages, npages);
1748 
1749 	/* Check what the device read. */
1750 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1751 		ASSERT_EQ(ptr[i], i);
1752 
1753 	/* Fault pages back to system memory and check them. */
1754 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1755 		ASSERT_EQ(ptr[i]++, i);
1756 
1757 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1758 		ASSERT_EQ(ptr[i], i+1);
1759 
1760 	/* Check atomic access revoked */
1761 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
1762 	ASSERT_EQ(ret, 0);
1763 
1764 	hmm_buffer_free(buffer);
1765 }
1766 
1767 TEST_F(hmm, exclusive_mprotect)
1768 {
1769 	struct hmm_buffer *buffer;
1770 	unsigned long npages;
1771 	unsigned long size;
1772 	unsigned long i;
1773 	int *ptr;
1774 	int ret;
1775 
1776 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1777 	ASSERT_NE(npages, 0);
1778 	size = npages << self->page_shift;
1779 
1780 	buffer = malloc(sizeof(*buffer));
1781 	ASSERT_NE(buffer, NULL);
1782 
1783 	buffer->fd = -1;
1784 	buffer->size = size;
1785 	buffer->mirror = malloc(size);
1786 	ASSERT_NE(buffer->mirror, NULL);
1787 
1788 	buffer->ptr = mmap(NULL, size,
1789 			   PROT_READ | PROT_WRITE,
1790 			   MAP_PRIVATE | MAP_ANONYMOUS,
1791 			   buffer->fd, 0);
1792 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1793 
1794 	/* Initialize buffer in system memory. */
1795 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1796 		ptr[i] = i;
1797 
1798 	/* Map memory exclusively for device access. */
1799 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1800 	ASSERT_EQ(ret, 0);
1801 	ASSERT_EQ(buffer->cpages, npages);
1802 
1803 	/* Check what the device read. */
1804 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1805 		ASSERT_EQ(ptr[i], i);
1806 
1807 	ret = mprotect(buffer->ptr, size, PROT_READ);
1808 	ASSERT_EQ(ret, 0);
1809 
1810 	/* Simulate a device writing system memory. */
1811 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
1812 	ASSERT_EQ(ret, -EPERM);
1813 
1814 	hmm_buffer_free(buffer);
1815 }
1816 
1817 /*
1818  * Check copy-on-write works.
1819  */
1820 TEST_F(hmm, exclusive_cow)
1821 {
1822 	struct hmm_buffer *buffer;
1823 	unsigned long npages;
1824 	unsigned long size;
1825 	unsigned long i;
1826 	int *ptr;
1827 	int ret;
1828 
1829 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1830 	ASSERT_NE(npages, 0);
1831 	size = npages << self->page_shift;
1832 
1833 	buffer = malloc(sizeof(*buffer));
1834 	ASSERT_NE(buffer, NULL);
1835 
1836 	buffer->fd = -1;
1837 	buffer->size = size;
1838 	buffer->mirror = malloc(size);
1839 	ASSERT_NE(buffer->mirror, NULL);
1840 
1841 	buffer->ptr = mmap(NULL, size,
1842 			   PROT_READ | PROT_WRITE,
1843 			   MAP_PRIVATE | MAP_ANONYMOUS,
1844 			   buffer->fd, 0);
1845 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1846 
1847 	/* Initialize buffer in system memory. */
1848 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1849 		ptr[i] = i;
1850 
1851 	/* Map memory exclusively for device access. */
1852 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1853 	ASSERT_EQ(ret, 0);
1854 	ASSERT_EQ(buffer->cpages, npages);
1855 
1856 	fork();
1857 
1858 	/* Fault pages back to system memory and check them. */
1859 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1860 		ASSERT_EQ(ptr[i]++, i);
1861 
1862 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1863 		ASSERT_EQ(ptr[i], i+1);
1864 
1865 	hmm_buffer_free(buffer);
1866 }
1867 
1868 static int gup_test_exec(int gup_fd, unsigned long addr, int cmd,
1869 			 int npages, int size, int flags)
1870 {
1871 	struct gup_test gup = {
1872 		.nr_pages_per_call	= npages,
1873 		.addr			= addr,
1874 		.gup_flags		= FOLL_WRITE | flags,
1875 		.size			= size,
1876 	};
1877 
1878 	if (ioctl(gup_fd, cmd, &gup)) {
1879 		perror("ioctl on error\n");
1880 		return errno;
1881 	}
1882 
1883 	return 0;
1884 }
1885 
1886 /*
1887  * Test get user device pages through gup_test. Setting PIN_LONGTERM flag.
1888  * This should trigger a migration back to system memory for both, private
1889  * and coherent type pages.
1890  * This test makes use of gup_test module. Make sure GUP_TEST_CONFIG is added
1891  * to your configuration before you run it.
1892  */
1893 TEST_F(hmm, hmm_gup_test)
1894 {
1895 	struct hmm_buffer *buffer;
1896 	int gup_fd;
1897 	unsigned long npages;
1898 	unsigned long size;
1899 	unsigned long i;
1900 	int *ptr;
1901 	int ret;
1902 	unsigned char *m;
1903 
1904 	gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
1905 	if (gup_fd == -1)
1906 		SKIP(return, "Skipping test, could not find gup_test driver");
1907 
1908 	npages = 4;
1909 	size = npages << self->page_shift;
1910 
1911 	buffer = malloc(sizeof(*buffer));
1912 	ASSERT_NE(buffer, NULL);
1913 
1914 	buffer->fd = -1;
1915 	buffer->size = size;
1916 	buffer->mirror = malloc(size);
1917 	ASSERT_NE(buffer->mirror, NULL);
1918 
1919 	buffer->ptr = mmap(NULL, size,
1920 			   PROT_READ | PROT_WRITE,
1921 			   MAP_PRIVATE | MAP_ANONYMOUS,
1922 			   buffer->fd, 0);
1923 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1924 
1925 	/* Initialize buffer in system memory. */
1926 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1927 		ptr[i] = i;
1928 
1929 	/* Migrate memory to device. */
1930 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1931 	ASSERT_EQ(ret, 0);
1932 	ASSERT_EQ(buffer->cpages, npages);
1933 	/* Check what the device read. */
1934 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1935 		ASSERT_EQ(ptr[i], i);
1936 
1937 	ASSERT_EQ(gup_test_exec(gup_fd,
1938 				(unsigned long)buffer->ptr,
1939 				GUP_BASIC_TEST, 1, self->page_size, 0), 0);
1940 	ASSERT_EQ(gup_test_exec(gup_fd,
1941 				(unsigned long)buffer->ptr + 1 * self->page_size,
1942 				GUP_FAST_BENCHMARK, 1, self->page_size, 0), 0);
1943 	ASSERT_EQ(gup_test_exec(gup_fd,
1944 				(unsigned long)buffer->ptr + 2 * self->page_size,
1945 				PIN_FAST_BENCHMARK, 1, self->page_size, FOLL_LONGTERM), 0);
1946 	ASSERT_EQ(gup_test_exec(gup_fd,
1947 				(unsigned long)buffer->ptr + 3 * self->page_size,
1948 				PIN_LONGTERM_BENCHMARK, 1, self->page_size, 0), 0);
1949 
1950 	/* Take snapshot to CPU pagetables */
1951 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1952 	ASSERT_EQ(ret, 0);
1953 	ASSERT_EQ(buffer->cpages, npages);
1954 	m = buffer->mirror;
1955 	if (hmm_is_coherent_type(variant->device_number)) {
1956 		ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]);
1957 		ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]);
1958 	} else {
1959 		ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]);
1960 		ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]);
1961 	}
1962 	ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]);
1963 	ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[3]);
1964 	/*
1965 	 * Check again the content on the pages. Make sure there's no
1966 	 * corrupted data.
1967 	 */
1968 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1969 		ASSERT_EQ(ptr[i], i);
1970 
1971 	close(gup_fd);
1972 	hmm_buffer_free(buffer);
1973 }
1974 
1975 /*
1976  * Test copy-on-write in device pages.
1977  * In case of writing to COW private page(s), a page fault will migrate pages
1978  * back to system memory first. Then, these pages will be duplicated. In case
1979  * of COW device coherent type, pages are duplicated directly from device
1980  * memory.
1981  */
1982 TEST_F(hmm, hmm_cow_in_device)
1983 {
1984 	struct hmm_buffer *buffer;
1985 	unsigned long npages;
1986 	unsigned long size;
1987 	unsigned long i;
1988 	int *ptr;
1989 	int ret;
1990 	unsigned char *m;
1991 	pid_t pid;
1992 	int status;
1993 
1994 	npages = 4;
1995 	size = npages << self->page_shift;
1996 
1997 	buffer = malloc(sizeof(*buffer));
1998 	ASSERT_NE(buffer, NULL);
1999 
2000 	buffer->fd = -1;
2001 	buffer->size = size;
2002 	buffer->mirror = malloc(size);
2003 	ASSERT_NE(buffer->mirror, NULL);
2004 
2005 	buffer->ptr = mmap(NULL, size,
2006 			   PROT_READ | PROT_WRITE,
2007 			   MAP_PRIVATE | MAP_ANONYMOUS,
2008 			   buffer->fd, 0);
2009 	ASSERT_NE(buffer->ptr, MAP_FAILED);
2010 
2011 	/* Initialize buffer in system memory. */
2012 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
2013 		ptr[i] = i;
2014 
2015 	/* Migrate memory to device. */
2016 
2017 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
2018 	ASSERT_EQ(ret, 0);
2019 	ASSERT_EQ(buffer->cpages, npages);
2020 
2021 	pid = fork();
2022 	if (pid == -1)
2023 		ASSERT_EQ(pid, 0);
2024 	if (!pid) {
2025 		/* Child process waitd for SIGTERM from the parent. */
2026 		while (1) {
2027 		}
2028 		perror("Should not reach this\n");
2029 		exit(0);
2030 	}
2031 	/* Parent process writes to COW pages(s) and gets a
2032 	 * new copy in system. In case of device private pages,
2033 	 * this write causes a migration to system mem first.
2034 	 */
2035 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
2036 		ptr[i] = i;
2037 
2038 	/* Terminate child and wait */
2039 	EXPECT_EQ(0, kill(pid, SIGTERM));
2040 	EXPECT_EQ(pid, waitpid(pid, &status, 0));
2041 	EXPECT_NE(0, WIFSIGNALED(status));
2042 	EXPECT_EQ(SIGTERM, WTERMSIG(status));
2043 
2044 	/* Take snapshot to CPU pagetables */
2045 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
2046 	ASSERT_EQ(ret, 0);
2047 	ASSERT_EQ(buffer->cpages, npages);
2048 	m = buffer->mirror;
2049 	for (i = 0; i < npages; i++)
2050 		ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[i]);
2051 
2052 	hmm_buffer_free(buffer);
2053 }
2054 TEST_HARNESS_MAIN
2055