xref: /openbmc/linux/tools/testing/selftests/mm/hmm-tests.c (revision baa489fabd01596d5426d6e112b34ba5fb59ab82)
1*baa489faSSeongJae Park // SPDX-License-Identifier: GPL-2.0
2*baa489faSSeongJae Park /*
3*baa489faSSeongJae Park  * HMM stands for Heterogeneous Memory Management, it is a helper layer inside
4*baa489faSSeongJae Park  * the linux kernel to help device drivers mirror a process address space in
5*baa489faSSeongJae Park  * the device. This allows the device to use the same address space which
6*baa489faSSeongJae Park  * makes communication and data exchange a lot easier.
7*baa489faSSeongJae Park  *
8*baa489faSSeongJae Park  * This framework's sole purpose is to exercise various code paths inside
9*baa489faSSeongJae Park  * the kernel to make sure that HMM performs as expected and to flush out any
10*baa489faSSeongJae Park  * bugs.
11*baa489faSSeongJae Park  */
12*baa489faSSeongJae Park 
13*baa489faSSeongJae Park #include "../kselftest_harness.h"
14*baa489faSSeongJae Park 
15*baa489faSSeongJae Park #include <errno.h>
16*baa489faSSeongJae Park #include <fcntl.h>
17*baa489faSSeongJae Park #include <stdio.h>
18*baa489faSSeongJae Park #include <stdlib.h>
19*baa489faSSeongJae Park #include <stdint.h>
20*baa489faSSeongJae Park #include <unistd.h>
21*baa489faSSeongJae Park #include <strings.h>
22*baa489faSSeongJae Park #include <time.h>
23*baa489faSSeongJae Park #include <pthread.h>
24*baa489faSSeongJae Park #include <sys/types.h>
25*baa489faSSeongJae Park #include <sys/stat.h>
26*baa489faSSeongJae Park #include <sys/mman.h>
27*baa489faSSeongJae Park #include <sys/ioctl.h>
28*baa489faSSeongJae Park 
29*baa489faSSeongJae Park 
30*baa489faSSeongJae Park /*
31*baa489faSSeongJae Park  * This is a private UAPI to the kernel test module so it isn't exported
32*baa489faSSeongJae Park  * in the usual include/uapi/... directory.
33*baa489faSSeongJae Park  */
34*baa489faSSeongJae Park #include <lib/test_hmm_uapi.h>
35*baa489faSSeongJae Park #include <mm/gup_test.h>
36*baa489faSSeongJae Park 
37*baa489faSSeongJae Park struct hmm_buffer {
38*baa489faSSeongJae Park 	void		*ptr;
39*baa489faSSeongJae Park 	void		*mirror;
40*baa489faSSeongJae Park 	unsigned long	size;
41*baa489faSSeongJae Park 	int		fd;
42*baa489faSSeongJae Park 	uint64_t	cpages;
43*baa489faSSeongJae Park 	uint64_t	faults;
44*baa489faSSeongJae Park };
45*baa489faSSeongJae Park 
46*baa489faSSeongJae Park enum {
47*baa489faSSeongJae Park 	HMM_PRIVATE_DEVICE_ONE,
48*baa489faSSeongJae Park 	HMM_PRIVATE_DEVICE_TWO,
49*baa489faSSeongJae Park 	HMM_COHERENCE_DEVICE_ONE,
50*baa489faSSeongJae Park 	HMM_COHERENCE_DEVICE_TWO,
51*baa489faSSeongJae Park };
52*baa489faSSeongJae Park 
53*baa489faSSeongJae Park #define TWOMEG		(1 << 21)
54*baa489faSSeongJae Park #define HMM_BUFFER_SIZE (1024 << 12)
55*baa489faSSeongJae Park #define HMM_PATH_MAX    64
56*baa489faSSeongJae Park #define NTIMES		10
57*baa489faSSeongJae Park 
58*baa489faSSeongJae Park #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
59*baa489faSSeongJae Park /* Just the flags we need, copied from mm.h: */
60*baa489faSSeongJae Park #define FOLL_WRITE	0x01	/* check pte is writable */
61*baa489faSSeongJae Park #define FOLL_LONGTERM   0x10000 /* mapping lifetime is indefinite */
62*baa489faSSeongJae Park 
63*baa489faSSeongJae Park FIXTURE(hmm)
64*baa489faSSeongJae Park {
65*baa489faSSeongJae Park 	int		fd;
66*baa489faSSeongJae Park 	unsigned int	page_size;
67*baa489faSSeongJae Park 	unsigned int	page_shift;
68*baa489faSSeongJae Park };
69*baa489faSSeongJae Park 
70*baa489faSSeongJae Park FIXTURE_VARIANT(hmm)
71*baa489faSSeongJae Park {
72*baa489faSSeongJae Park 	int     device_number;
73*baa489faSSeongJae Park };
74*baa489faSSeongJae Park 
75*baa489faSSeongJae Park FIXTURE_VARIANT_ADD(hmm, hmm_device_private)
76*baa489faSSeongJae Park {
77*baa489faSSeongJae Park 	.device_number = HMM_PRIVATE_DEVICE_ONE,
78*baa489faSSeongJae Park };
79*baa489faSSeongJae Park 
80*baa489faSSeongJae Park FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent)
81*baa489faSSeongJae Park {
82*baa489faSSeongJae Park 	.device_number = HMM_COHERENCE_DEVICE_ONE,
83*baa489faSSeongJae Park };
84*baa489faSSeongJae Park 
85*baa489faSSeongJae Park FIXTURE(hmm2)
86*baa489faSSeongJae Park {
87*baa489faSSeongJae Park 	int		fd0;
88*baa489faSSeongJae Park 	int		fd1;
89*baa489faSSeongJae Park 	unsigned int	page_size;
90*baa489faSSeongJae Park 	unsigned int	page_shift;
91*baa489faSSeongJae Park };
92*baa489faSSeongJae Park 
93*baa489faSSeongJae Park FIXTURE_VARIANT(hmm2)
94*baa489faSSeongJae Park {
95*baa489faSSeongJae Park 	int     device_number0;
96*baa489faSSeongJae Park 	int     device_number1;
97*baa489faSSeongJae Park };
98*baa489faSSeongJae Park 
99*baa489faSSeongJae Park FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private)
100*baa489faSSeongJae Park {
101*baa489faSSeongJae Park 	.device_number0 = HMM_PRIVATE_DEVICE_ONE,
102*baa489faSSeongJae Park 	.device_number1 = HMM_PRIVATE_DEVICE_TWO,
103*baa489faSSeongJae Park };
104*baa489faSSeongJae Park 
105*baa489faSSeongJae Park FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent)
106*baa489faSSeongJae Park {
107*baa489faSSeongJae Park 	.device_number0 = HMM_COHERENCE_DEVICE_ONE,
108*baa489faSSeongJae Park 	.device_number1 = HMM_COHERENCE_DEVICE_TWO,
109*baa489faSSeongJae Park };
110*baa489faSSeongJae Park 
111*baa489faSSeongJae Park static int hmm_open(int unit)
112*baa489faSSeongJae Park {
113*baa489faSSeongJae Park 	char pathname[HMM_PATH_MAX];
114*baa489faSSeongJae Park 	int fd;
115*baa489faSSeongJae Park 
116*baa489faSSeongJae Park 	snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
117*baa489faSSeongJae Park 	fd = open(pathname, O_RDWR, 0);
118*baa489faSSeongJae Park 	if (fd < 0)
119*baa489faSSeongJae Park 		fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
120*baa489faSSeongJae Park 			pathname);
121*baa489faSSeongJae Park 	return fd;
122*baa489faSSeongJae Park }
123*baa489faSSeongJae Park 
124*baa489faSSeongJae Park static bool hmm_is_coherent_type(int dev_num)
125*baa489faSSeongJae Park {
126*baa489faSSeongJae Park 	return (dev_num >= HMM_COHERENCE_DEVICE_ONE);
127*baa489faSSeongJae Park }
128*baa489faSSeongJae Park 
129*baa489faSSeongJae Park FIXTURE_SETUP(hmm)
130*baa489faSSeongJae Park {
131*baa489faSSeongJae Park 	self->page_size = sysconf(_SC_PAGE_SIZE);
132*baa489faSSeongJae Park 	self->page_shift = ffs(self->page_size) - 1;
133*baa489faSSeongJae Park 
134*baa489faSSeongJae Park 	self->fd = hmm_open(variant->device_number);
135*baa489faSSeongJae Park 	if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
136*baa489faSSeongJae Park 		SKIP(exit(0), "DEVICE_COHERENT not available");
137*baa489faSSeongJae Park 	ASSERT_GE(self->fd, 0);
138*baa489faSSeongJae Park }
139*baa489faSSeongJae Park 
140*baa489faSSeongJae Park FIXTURE_SETUP(hmm2)
141*baa489faSSeongJae Park {
142*baa489faSSeongJae Park 	self->page_size = sysconf(_SC_PAGE_SIZE);
143*baa489faSSeongJae Park 	self->page_shift = ffs(self->page_size) - 1;
144*baa489faSSeongJae Park 
145*baa489faSSeongJae Park 	self->fd0 = hmm_open(variant->device_number0);
146*baa489faSSeongJae Park 	if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
147*baa489faSSeongJae Park 		SKIP(exit(0), "DEVICE_COHERENT not available");
148*baa489faSSeongJae Park 	ASSERT_GE(self->fd0, 0);
149*baa489faSSeongJae Park 	self->fd1 = hmm_open(variant->device_number1);
150*baa489faSSeongJae Park 	ASSERT_GE(self->fd1, 0);
151*baa489faSSeongJae Park }
152*baa489faSSeongJae Park 
153*baa489faSSeongJae Park FIXTURE_TEARDOWN(hmm)
154*baa489faSSeongJae Park {
155*baa489faSSeongJae Park 	int ret = close(self->fd);
156*baa489faSSeongJae Park 
157*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
158*baa489faSSeongJae Park 	self->fd = -1;
159*baa489faSSeongJae Park }
160*baa489faSSeongJae Park 
161*baa489faSSeongJae Park FIXTURE_TEARDOWN(hmm2)
162*baa489faSSeongJae Park {
163*baa489faSSeongJae Park 	int ret = close(self->fd0);
164*baa489faSSeongJae Park 
165*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
166*baa489faSSeongJae Park 	self->fd0 = -1;
167*baa489faSSeongJae Park 
168*baa489faSSeongJae Park 	ret = close(self->fd1);
169*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
170*baa489faSSeongJae Park 	self->fd1 = -1;
171*baa489faSSeongJae Park }
172*baa489faSSeongJae Park 
173*baa489faSSeongJae Park static int hmm_dmirror_cmd(int fd,
174*baa489faSSeongJae Park 			   unsigned long request,
175*baa489faSSeongJae Park 			   struct hmm_buffer *buffer,
176*baa489faSSeongJae Park 			   unsigned long npages)
177*baa489faSSeongJae Park {
178*baa489faSSeongJae Park 	struct hmm_dmirror_cmd cmd;
179*baa489faSSeongJae Park 	int ret;
180*baa489faSSeongJae Park 
181*baa489faSSeongJae Park 	/* Simulate a device reading system memory. */
182*baa489faSSeongJae Park 	cmd.addr = (__u64)buffer->ptr;
183*baa489faSSeongJae Park 	cmd.ptr = (__u64)buffer->mirror;
184*baa489faSSeongJae Park 	cmd.npages = npages;
185*baa489faSSeongJae Park 
186*baa489faSSeongJae Park 	for (;;) {
187*baa489faSSeongJae Park 		ret = ioctl(fd, request, &cmd);
188*baa489faSSeongJae Park 		if (ret == 0)
189*baa489faSSeongJae Park 			break;
190*baa489faSSeongJae Park 		if (errno == EINTR)
191*baa489faSSeongJae Park 			continue;
192*baa489faSSeongJae Park 		return -errno;
193*baa489faSSeongJae Park 	}
194*baa489faSSeongJae Park 	buffer->cpages = cmd.cpages;
195*baa489faSSeongJae Park 	buffer->faults = cmd.faults;
196*baa489faSSeongJae Park 
197*baa489faSSeongJae Park 	return 0;
198*baa489faSSeongJae Park }
199*baa489faSSeongJae Park 
200*baa489faSSeongJae Park static void hmm_buffer_free(struct hmm_buffer *buffer)
201*baa489faSSeongJae Park {
202*baa489faSSeongJae Park 	if (buffer == NULL)
203*baa489faSSeongJae Park 		return;
204*baa489faSSeongJae Park 
205*baa489faSSeongJae Park 	if (buffer->ptr)
206*baa489faSSeongJae Park 		munmap(buffer->ptr, buffer->size);
207*baa489faSSeongJae Park 	free(buffer->mirror);
208*baa489faSSeongJae Park 	free(buffer);
209*baa489faSSeongJae Park }
210*baa489faSSeongJae Park 
211*baa489faSSeongJae Park /*
212*baa489faSSeongJae Park  * Create a temporary file that will be deleted on close.
213*baa489faSSeongJae Park  */
214*baa489faSSeongJae Park static int hmm_create_file(unsigned long size)
215*baa489faSSeongJae Park {
216*baa489faSSeongJae Park 	char path[HMM_PATH_MAX];
217*baa489faSSeongJae Park 	int fd;
218*baa489faSSeongJae Park 
219*baa489faSSeongJae Park 	strcpy(path, "/tmp");
220*baa489faSSeongJae Park 	fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
221*baa489faSSeongJae Park 	if (fd >= 0) {
222*baa489faSSeongJae Park 		int r;
223*baa489faSSeongJae Park 
224*baa489faSSeongJae Park 		do {
225*baa489faSSeongJae Park 			r = ftruncate(fd, size);
226*baa489faSSeongJae Park 		} while (r == -1 && errno == EINTR);
227*baa489faSSeongJae Park 		if (!r)
228*baa489faSSeongJae Park 			return fd;
229*baa489faSSeongJae Park 		close(fd);
230*baa489faSSeongJae Park 	}
231*baa489faSSeongJae Park 	return -1;
232*baa489faSSeongJae Park }
233*baa489faSSeongJae Park 
234*baa489faSSeongJae Park /*
235*baa489faSSeongJae Park  * Return a random unsigned number.
236*baa489faSSeongJae Park  */
237*baa489faSSeongJae Park static unsigned int hmm_random(void)
238*baa489faSSeongJae Park {
239*baa489faSSeongJae Park 	static int fd = -1;
240*baa489faSSeongJae Park 	unsigned int r;
241*baa489faSSeongJae Park 
242*baa489faSSeongJae Park 	if (fd < 0) {
243*baa489faSSeongJae Park 		fd = open("/dev/urandom", O_RDONLY);
244*baa489faSSeongJae Park 		if (fd < 0) {
245*baa489faSSeongJae Park 			fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
246*baa489faSSeongJae Park 					__FILE__, __LINE__);
247*baa489faSSeongJae Park 			return ~0U;
248*baa489faSSeongJae Park 		}
249*baa489faSSeongJae Park 	}
250*baa489faSSeongJae Park 	read(fd, &r, sizeof(r));
251*baa489faSSeongJae Park 	return r;
252*baa489faSSeongJae Park }
253*baa489faSSeongJae Park 
254*baa489faSSeongJae Park static void hmm_nanosleep(unsigned int n)
255*baa489faSSeongJae Park {
256*baa489faSSeongJae Park 	struct timespec t;
257*baa489faSSeongJae Park 
258*baa489faSSeongJae Park 	t.tv_sec = 0;
259*baa489faSSeongJae Park 	t.tv_nsec = n;
260*baa489faSSeongJae Park 	nanosleep(&t, NULL);
261*baa489faSSeongJae Park }
262*baa489faSSeongJae Park 
263*baa489faSSeongJae Park static int hmm_migrate_sys_to_dev(int fd,
264*baa489faSSeongJae Park 				   struct hmm_buffer *buffer,
265*baa489faSSeongJae Park 				   unsigned long npages)
266*baa489faSSeongJae Park {
267*baa489faSSeongJae Park 	return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
268*baa489faSSeongJae Park }
269*baa489faSSeongJae Park 
270*baa489faSSeongJae Park static int hmm_migrate_dev_to_sys(int fd,
271*baa489faSSeongJae Park 				   struct hmm_buffer *buffer,
272*baa489faSSeongJae Park 				   unsigned long npages)
273*baa489faSSeongJae Park {
274*baa489faSSeongJae Park 	return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
275*baa489faSSeongJae Park }
276*baa489faSSeongJae Park 
277*baa489faSSeongJae Park /*
278*baa489faSSeongJae Park  * Simple NULL test of device open/close.
279*baa489faSSeongJae Park  */
280*baa489faSSeongJae Park TEST_F(hmm, open_close)
281*baa489faSSeongJae Park {
282*baa489faSSeongJae Park }
283*baa489faSSeongJae Park 
284*baa489faSSeongJae Park /*
285*baa489faSSeongJae Park  * Read private anonymous memory.
286*baa489faSSeongJae Park  */
287*baa489faSSeongJae Park TEST_F(hmm, anon_read)
288*baa489faSSeongJae Park {
289*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
290*baa489faSSeongJae Park 	unsigned long npages;
291*baa489faSSeongJae Park 	unsigned long size;
292*baa489faSSeongJae Park 	unsigned long i;
293*baa489faSSeongJae Park 	int *ptr;
294*baa489faSSeongJae Park 	int ret;
295*baa489faSSeongJae Park 	int val;
296*baa489faSSeongJae Park 
297*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
298*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
299*baa489faSSeongJae Park 	size = npages << self->page_shift;
300*baa489faSSeongJae Park 
301*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
302*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
303*baa489faSSeongJae Park 
304*baa489faSSeongJae Park 	buffer->fd = -1;
305*baa489faSSeongJae Park 	buffer->size = size;
306*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
307*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
308*baa489faSSeongJae Park 
309*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
310*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
311*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
312*baa489faSSeongJae Park 			   buffer->fd, 0);
313*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
314*baa489faSSeongJae Park 
315*baa489faSSeongJae Park 	/*
316*baa489faSSeongJae Park 	 * Initialize buffer in system memory but leave the first two pages
317*baa489faSSeongJae Park 	 * zero (pte_none and pfn_zero).
318*baa489faSSeongJae Park 	 */
319*baa489faSSeongJae Park 	i = 2 * self->page_size / sizeof(*ptr);
320*baa489faSSeongJae Park 	for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
321*baa489faSSeongJae Park 		ptr[i] = i;
322*baa489faSSeongJae Park 
323*baa489faSSeongJae Park 	/* Set buffer permission to read-only. */
324*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr, size, PROT_READ);
325*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
326*baa489faSSeongJae Park 
327*baa489faSSeongJae Park 	/* Populate the CPU page table with a special zero page. */
328*baa489faSSeongJae Park 	val = *(int *)(buffer->ptr + self->page_size);
329*baa489faSSeongJae Park 	ASSERT_EQ(val, 0);
330*baa489faSSeongJae Park 
331*baa489faSSeongJae Park 	/* Simulate a device reading system memory. */
332*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
333*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
334*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
335*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
336*baa489faSSeongJae Park 
337*baa489faSSeongJae Park 	/* Check what the device read. */
338*baa489faSSeongJae Park 	ptr = buffer->mirror;
339*baa489faSSeongJae Park 	for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
340*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], 0);
341*baa489faSSeongJae Park 	for (; i < size / sizeof(*ptr); ++i)
342*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
343*baa489faSSeongJae Park 
344*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
345*baa489faSSeongJae Park }
346*baa489faSSeongJae Park 
347*baa489faSSeongJae Park /*
348*baa489faSSeongJae Park  * Read private anonymous memory which has been protected with
349*baa489faSSeongJae Park  * mprotect() PROT_NONE.
350*baa489faSSeongJae Park  */
351*baa489faSSeongJae Park TEST_F(hmm, anon_read_prot)
352*baa489faSSeongJae Park {
353*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
354*baa489faSSeongJae Park 	unsigned long npages;
355*baa489faSSeongJae Park 	unsigned long size;
356*baa489faSSeongJae Park 	unsigned long i;
357*baa489faSSeongJae Park 	int *ptr;
358*baa489faSSeongJae Park 	int ret;
359*baa489faSSeongJae Park 
360*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
361*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
362*baa489faSSeongJae Park 	size = npages << self->page_shift;
363*baa489faSSeongJae Park 
364*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
365*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
366*baa489faSSeongJae Park 
367*baa489faSSeongJae Park 	buffer->fd = -1;
368*baa489faSSeongJae Park 	buffer->size = size;
369*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
370*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
371*baa489faSSeongJae Park 
372*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
373*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
374*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
375*baa489faSSeongJae Park 			   buffer->fd, 0);
376*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
377*baa489faSSeongJae Park 
378*baa489faSSeongJae Park 	/* Initialize buffer in system memory. */
379*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
380*baa489faSSeongJae Park 		ptr[i] = i;
381*baa489faSSeongJae Park 
382*baa489faSSeongJae Park 	/* Initialize mirror buffer so we can verify it isn't written. */
383*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
384*baa489faSSeongJae Park 		ptr[i] = -i;
385*baa489faSSeongJae Park 
386*baa489faSSeongJae Park 	/* Protect buffer from reading. */
387*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr, size, PROT_NONE);
388*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
389*baa489faSSeongJae Park 
390*baa489faSSeongJae Park 	/* Simulate a device reading system memory. */
391*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
392*baa489faSSeongJae Park 	ASSERT_EQ(ret, -EFAULT);
393*baa489faSSeongJae Park 
394*baa489faSSeongJae Park 	/* Allow CPU to read the buffer so we can check it. */
395*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr, size, PROT_READ);
396*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
397*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
398*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
399*baa489faSSeongJae Park 
400*baa489faSSeongJae Park 	/* Check what the device read. */
401*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
402*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], -i);
403*baa489faSSeongJae Park 
404*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
405*baa489faSSeongJae Park }
406*baa489faSSeongJae Park 
407*baa489faSSeongJae Park /*
408*baa489faSSeongJae Park  * Write private anonymous memory.
409*baa489faSSeongJae Park  */
410*baa489faSSeongJae Park TEST_F(hmm, anon_write)
411*baa489faSSeongJae Park {
412*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
413*baa489faSSeongJae Park 	unsigned long npages;
414*baa489faSSeongJae Park 	unsigned long size;
415*baa489faSSeongJae Park 	unsigned long i;
416*baa489faSSeongJae Park 	int *ptr;
417*baa489faSSeongJae Park 	int ret;
418*baa489faSSeongJae Park 
419*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
420*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
421*baa489faSSeongJae Park 	size = npages << self->page_shift;
422*baa489faSSeongJae Park 
423*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
424*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
425*baa489faSSeongJae Park 
426*baa489faSSeongJae Park 	buffer->fd = -1;
427*baa489faSSeongJae Park 	buffer->size = size;
428*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
429*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
430*baa489faSSeongJae Park 
431*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
432*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
433*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
434*baa489faSSeongJae Park 			   buffer->fd, 0);
435*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
436*baa489faSSeongJae Park 
437*baa489faSSeongJae Park 	/* Initialize data that the device will write to buffer->ptr. */
438*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
439*baa489faSSeongJae Park 		ptr[i] = i;
440*baa489faSSeongJae Park 
441*baa489faSSeongJae Park 	/* Simulate a device writing system memory. */
442*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
443*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
444*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
445*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
446*baa489faSSeongJae Park 
447*baa489faSSeongJae Park 	/* Check what the device wrote. */
448*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
449*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
450*baa489faSSeongJae Park 
451*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
452*baa489faSSeongJae Park }
453*baa489faSSeongJae Park 
454*baa489faSSeongJae Park /*
455*baa489faSSeongJae Park  * Write private anonymous memory which has been protected with
456*baa489faSSeongJae Park  * mprotect() PROT_READ.
457*baa489faSSeongJae Park  */
458*baa489faSSeongJae Park TEST_F(hmm, anon_write_prot)
459*baa489faSSeongJae Park {
460*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
461*baa489faSSeongJae Park 	unsigned long npages;
462*baa489faSSeongJae Park 	unsigned long size;
463*baa489faSSeongJae Park 	unsigned long i;
464*baa489faSSeongJae Park 	int *ptr;
465*baa489faSSeongJae Park 	int ret;
466*baa489faSSeongJae Park 
467*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
468*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
469*baa489faSSeongJae Park 	size = npages << self->page_shift;
470*baa489faSSeongJae Park 
471*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
472*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
473*baa489faSSeongJae Park 
474*baa489faSSeongJae Park 	buffer->fd = -1;
475*baa489faSSeongJae Park 	buffer->size = size;
476*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
477*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
478*baa489faSSeongJae Park 
479*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
480*baa489faSSeongJae Park 			   PROT_READ,
481*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
482*baa489faSSeongJae Park 			   buffer->fd, 0);
483*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
484*baa489faSSeongJae Park 
485*baa489faSSeongJae Park 	/* Simulate a device reading a zero page of memory. */
486*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
487*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
488*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, 1);
489*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
490*baa489faSSeongJae Park 
491*baa489faSSeongJae Park 	/* Initialize data that the device will write to buffer->ptr. */
492*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
493*baa489faSSeongJae Park 		ptr[i] = i;
494*baa489faSSeongJae Park 
495*baa489faSSeongJae Park 	/* Simulate a device writing system memory. */
496*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
497*baa489faSSeongJae Park 	ASSERT_EQ(ret, -EPERM);
498*baa489faSSeongJae Park 
499*baa489faSSeongJae Park 	/* Check what the device wrote. */
500*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
501*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], 0);
502*baa489faSSeongJae Park 
503*baa489faSSeongJae Park 	/* Now allow writing and see that the zero page is replaced. */
504*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
505*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
506*baa489faSSeongJae Park 
507*baa489faSSeongJae Park 	/* Simulate a device writing system memory. */
508*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
509*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
510*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
511*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
512*baa489faSSeongJae Park 
513*baa489faSSeongJae Park 	/* Check what the device wrote. */
514*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
515*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
516*baa489faSSeongJae Park 
517*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
518*baa489faSSeongJae Park }
519*baa489faSSeongJae Park 
520*baa489faSSeongJae Park /*
521*baa489faSSeongJae Park  * Check that a device writing an anonymous private mapping
522*baa489faSSeongJae Park  * will copy-on-write if a child process inherits the mapping.
523*baa489faSSeongJae Park  */
524*baa489faSSeongJae Park TEST_F(hmm, anon_write_child)
525*baa489faSSeongJae Park {
526*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
527*baa489faSSeongJae Park 	unsigned long npages;
528*baa489faSSeongJae Park 	unsigned long size;
529*baa489faSSeongJae Park 	unsigned long i;
530*baa489faSSeongJae Park 	int *ptr;
531*baa489faSSeongJae Park 	pid_t pid;
532*baa489faSSeongJae Park 	int child_fd;
533*baa489faSSeongJae Park 	int ret;
534*baa489faSSeongJae Park 
535*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
536*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
537*baa489faSSeongJae Park 	size = npages << self->page_shift;
538*baa489faSSeongJae Park 
539*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
540*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
541*baa489faSSeongJae Park 
542*baa489faSSeongJae Park 	buffer->fd = -1;
543*baa489faSSeongJae Park 	buffer->size = size;
544*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
545*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
546*baa489faSSeongJae Park 
547*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
548*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
549*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
550*baa489faSSeongJae Park 			   buffer->fd, 0);
551*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
552*baa489faSSeongJae Park 
553*baa489faSSeongJae Park 	/* Initialize buffer->ptr so we can tell if it is written. */
554*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
555*baa489faSSeongJae Park 		ptr[i] = i;
556*baa489faSSeongJae Park 
557*baa489faSSeongJae Park 	/* Initialize data that the device will write to buffer->ptr. */
558*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
559*baa489faSSeongJae Park 		ptr[i] = -i;
560*baa489faSSeongJae Park 
561*baa489faSSeongJae Park 	pid = fork();
562*baa489faSSeongJae Park 	if (pid == -1)
563*baa489faSSeongJae Park 		ASSERT_EQ(pid, 0);
564*baa489faSSeongJae Park 	if (pid != 0) {
565*baa489faSSeongJae Park 		waitpid(pid, &ret, 0);
566*baa489faSSeongJae Park 		ASSERT_EQ(WIFEXITED(ret), 1);
567*baa489faSSeongJae Park 
568*baa489faSSeongJae Park 		/* Check that the parent's buffer did not change. */
569*baa489faSSeongJae Park 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
570*baa489faSSeongJae Park 			ASSERT_EQ(ptr[i], i);
571*baa489faSSeongJae Park 		return;
572*baa489faSSeongJae Park 	}
573*baa489faSSeongJae Park 
574*baa489faSSeongJae Park 	/* Check that we see the parent's values. */
575*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
576*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
577*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
578*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], -i);
579*baa489faSSeongJae Park 
580*baa489faSSeongJae Park 	/* The child process needs its own mirror to its own mm. */
581*baa489faSSeongJae Park 	child_fd = hmm_open(0);
582*baa489faSSeongJae Park 	ASSERT_GE(child_fd, 0);
583*baa489faSSeongJae Park 
584*baa489faSSeongJae Park 	/* Simulate a device writing system memory. */
585*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
586*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
587*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
588*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
589*baa489faSSeongJae Park 
590*baa489faSSeongJae Park 	/* Check what the device wrote. */
591*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
592*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], -i);
593*baa489faSSeongJae Park 
594*baa489faSSeongJae Park 	close(child_fd);
595*baa489faSSeongJae Park 	exit(0);
596*baa489faSSeongJae Park }
597*baa489faSSeongJae Park 
598*baa489faSSeongJae Park /*
599*baa489faSSeongJae Park  * Check that a device writing an anonymous shared mapping
600*baa489faSSeongJae Park  * will not copy-on-write if a child process inherits the mapping.
601*baa489faSSeongJae Park  */
602*baa489faSSeongJae Park TEST_F(hmm, anon_write_child_shared)
603*baa489faSSeongJae Park {
604*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
605*baa489faSSeongJae Park 	unsigned long npages;
606*baa489faSSeongJae Park 	unsigned long size;
607*baa489faSSeongJae Park 	unsigned long i;
608*baa489faSSeongJae Park 	int *ptr;
609*baa489faSSeongJae Park 	pid_t pid;
610*baa489faSSeongJae Park 	int child_fd;
611*baa489faSSeongJae Park 	int ret;
612*baa489faSSeongJae Park 
613*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
614*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
615*baa489faSSeongJae Park 	size = npages << self->page_shift;
616*baa489faSSeongJae Park 
617*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
618*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
619*baa489faSSeongJae Park 
620*baa489faSSeongJae Park 	buffer->fd = -1;
621*baa489faSSeongJae Park 	buffer->size = size;
622*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
623*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
624*baa489faSSeongJae Park 
625*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
626*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
627*baa489faSSeongJae Park 			   MAP_SHARED | MAP_ANONYMOUS,
628*baa489faSSeongJae Park 			   buffer->fd, 0);
629*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
630*baa489faSSeongJae Park 
631*baa489faSSeongJae Park 	/* Initialize buffer->ptr so we can tell if it is written. */
632*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
633*baa489faSSeongJae Park 		ptr[i] = i;
634*baa489faSSeongJae Park 
635*baa489faSSeongJae Park 	/* Initialize data that the device will write to buffer->ptr. */
636*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
637*baa489faSSeongJae Park 		ptr[i] = -i;
638*baa489faSSeongJae Park 
639*baa489faSSeongJae Park 	pid = fork();
640*baa489faSSeongJae Park 	if (pid == -1)
641*baa489faSSeongJae Park 		ASSERT_EQ(pid, 0);
642*baa489faSSeongJae Park 	if (pid != 0) {
643*baa489faSSeongJae Park 		waitpid(pid, &ret, 0);
644*baa489faSSeongJae Park 		ASSERT_EQ(WIFEXITED(ret), 1);
645*baa489faSSeongJae Park 
646*baa489faSSeongJae Park 		/* Check that the parent's buffer did change. */
647*baa489faSSeongJae Park 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
648*baa489faSSeongJae Park 			ASSERT_EQ(ptr[i], -i);
649*baa489faSSeongJae Park 		return;
650*baa489faSSeongJae Park 	}
651*baa489faSSeongJae Park 
652*baa489faSSeongJae Park 	/* Check that we see the parent's values. */
653*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
654*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
655*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
656*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], -i);
657*baa489faSSeongJae Park 
658*baa489faSSeongJae Park 	/* The child process needs its own mirror to its own mm. */
659*baa489faSSeongJae Park 	child_fd = hmm_open(0);
660*baa489faSSeongJae Park 	ASSERT_GE(child_fd, 0);
661*baa489faSSeongJae Park 
662*baa489faSSeongJae Park 	/* Simulate a device writing system memory. */
663*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
664*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
665*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
666*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
667*baa489faSSeongJae Park 
668*baa489faSSeongJae Park 	/* Check what the device wrote. */
669*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
670*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], -i);
671*baa489faSSeongJae Park 
672*baa489faSSeongJae Park 	close(child_fd);
673*baa489faSSeongJae Park 	exit(0);
674*baa489faSSeongJae Park }
675*baa489faSSeongJae Park 
676*baa489faSSeongJae Park /*
677*baa489faSSeongJae Park  * Write private anonymous huge page.
678*baa489faSSeongJae Park  */
679*baa489faSSeongJae Park TEST_F(hmm, anon_write_huge)
680*baa489faSSeongJae Park {
681*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
682*baa489faSSeongJae Park 	unsigned long npages;
683*baa489faSSeongJae Park 	unsigned long size;
684*baa489faSSeongJae Park 	unsigned long i;
685*baa489faSSeongJae Park 	void *old_ptr;
686*baa489faSSeongJae Park 	void *map;
687*baa489faSSeongJae Park 	int *ptr;
688*baa489faSSeongJae Park 	int ret;
689*baa489faSSeongJae Park 
690*baa489faSSeongJae Park 	size = 2 * TWOMEG;
691*baa489faSSeongJae Park 
692*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
693*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
694*baa489faSSeongJae Park 
695*baa489faSSeongJae Park 	buffer->fd = -1;
696*baa489faSSeongJae Park 	buffer->size = size;
697*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
698*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
699*baa489faSSeongJae Park 
700*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
701*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
702*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
703*baa489faSSeongJae Park 			   buffer->fd, 0);
704*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
705*baa489faSSeongJae Park 
706*baa489faSSeongJae Park 	size = TWOMEG;
707*baa489faSSeongJae Park 	npages = size >> self->page_shift;
708*baa489faSSeongJae Park 	map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
709*baa489faSSeongJae Park 	ret = madvise(map, size, MADV_HUGEPAGE);
710*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
711*baa489faSSeongJae Park 	old_ptr = buffer->ptr;
712*baa489faSSeongJae Park 	buffer->ptr = map;
713*baa489faSSeongJae Park 
714*baa489faSSeongJae Park 	/* Initialize data that the device will write to buffer->ptr. */
715*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
716*baa489faSSeongJae Park 		ptr[i] = i;
717*baa489faSSeongJae Park 
718*baa489faSSeongJae Park 	/* Simulate a device writing system memory. */
719*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
720*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
721*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
722*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
723*baa489faSSeongJae Park 
724*baa489faSSeongJae Park 	/* Check what the device wrote. */
725*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
726*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
727*baa489faSSeongJae Park 
728*baa489faSSeongJae Park 	buffer->ptr = old_ptr;
729*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
730*baa489faSSeongJae Park }
731*baa489faSSeongJae Park 
732*baa489faSSeongJae Park /*
733*baa489faSSeongJae Park  * Read numeric data from raw and tagged kernel status files.  Used to read
734*baa489faSSeongJae Park  * /proc and /sys data (without a tag) and from /proc/meminfo (with a tag).
735*baa489faSSeongJae Park  */
736*baa489faSSeongJae Park static long file_read_ulong(char *file, const char *tag)
737*baa489faSSeongJae Park {
738*baa489faSSeongJae Park 	int fd;
739*baa489faSSeongJae Park 	char buf[2048];
740*baa489faSSeongJae Park 	int len;
741*baa489faSSeongJae Park 	char *p, *q;
742*baa489faSSeongJae Park 	long val;
743*baa489faSSeongJae Park 
744*baa489faSSeongJae Park 	fd = open(file, O_RDONLY);
745*baa489faSSeongJae Park 	if (fd < 0) {
746*baa489faSSeongJae Park 		/* Error opening the file */
747*baa489faSSeongJae Park 		return -1;
748*baa489faSSeongJae Park 	}
749*baa489faSSeongJae Park 
750*baa489faSSeongJae Park 	len = read(fd, buf, sizeof(buf));
751*baa489faSSeongJae Park 	close(fd);
752*baa489faSSeongJae Park 	if (len < 0) {
753*baa489faSSeongJae Park 		/* Error in reading the file */
754*baa489faSSeongJae Park 		return -1;
755*baa489faSSeongJae Park 	}
756*baa489faSSeongJae Park 	if (len == sizeof(buf)) {
757*baa489faSSeongJae Park 		/* Error file is too large */
758*baa489faSSeongJae Park 		return -1;
759*baa489faSSeongJae Park 	}
760*baa489faSSeongJae Park 	buf[len] = '\0';
761*baa489faSSeongJae Park 
762*baa489faSSeongJae Park 	/* Search for a tag if provided */
763*baa489faSSeongJae Park 	if (tag) {
764*baa489faSSeongJae Park 		p = strstr(buf, tag);
765*baa489faSSeongJae Park 		if (!p)
766*baa489faSSeongJae Park 			return -1; /* looks like the line we want isn't there */
767*baa489faSSeongJae Park 		p += strlen(tag);
768*baa489faSSeongJae Park 	} else
769*baa489faSSeongJae Park 		p = buf;
770*baa489faSSeongJae Park 
771*baa489faSSeongJae Park 	val = strtol(p, &q, 0);
772*baa489faSSeongJae Park 	if (*q != ' ') {
773*baa489faSSeongJae Park 		/* Error parsing the file */
774*baa489faSSeongJae Park 		return -1;
775*baa489faSSeongJae Park 	}
776*baa489faSSeongJae Park 
777*baa489faSSeongJae Park 	return val;
778*baa489faSSeongJae Park }
779*baa489faSSeongJae Park 
780*baa489faSSeongJae Park /*
781*baa489faSSeongJae Park  * Write huge TLBFS page.
782*baa489faSSeongJae Park  */
783*baa489faSSeongJae Park TEST_F(hmm, anon_write_hugetlbfs)
784*baa489faSSeongJae Park {
785*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
786*baa489faSSeongJae Park 	unsigned long npages;
787*baa489faSSeongJae Park 	unsigned long size;
788*baa489faSSeongJae Park 	unsigned long default_hsize;
789*baa489faSSeongJae Park 	unsigned long i;
790*baa489faSSeongJae Park 	int *ptr;
791*baa489faSSeongJae Park 	int ret;
792*baa489faSSeongJae Park 
793*baa489faSSeongJae Park 	default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
794*baa489faSSeongJae Park 	if (default_hsize < 0 || default_hsize*1024 < default_hsize)
795*baa489faSSeongJae Park 		SKIP(return, "Huge page size could not be determined");
796*baa489faSSeongJae Park 	default_hsize = default_hsize*1024; /* KB to B */
797*baa489faSSeongJae Park 
798*baa489faSSeongJae Park 	size = ALIGN(TWOMEG, default_hsize);
799*baa489faSSeongJae Park 	npages = size >> self->page_shift;
800*baa489faSSeongJae Park 
801*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
802*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
803*baa489faSSeongJae Park 
804*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
805*baa489faSSeongJae Park 				   PROT_READ | PROT_WRITE,
806*baa489faSSeongJae Park 				   MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
807*baa489faSSeongJae Park 				   -1, 0);
808*baa489faSSeongJae Park 	if (buffer->ptr == MAP_FAILED) {
809*baa489faSSeongJae Park 		free(buffer);
810*baa489faSSeongJae Park 		SKIP(return, "Huge page could not be allocated");
811*baa489faSSeongJae Park 	}
812*baa489faSSeongJae Park 
813*baa489faSSeongJae Park 	buffer->fd = -1;
814*baa489faSSeongJae Park 	buffer->size = size;
815*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
816*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
817*baa489faSSeongJae Park 
818*baa489faSSeongJae Park 	/* Initialize data that the device will write to buffer->ptr. */
819*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
820*baa489faSSeongJae Park 		ptr[i] = i;
821*baa489faSSeongJae Park 
822*baa489faSSeongJae Park 	/* Simulate a device writing system memory. */
823*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
824*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
825*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
826*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
827*baa489faSSeongJae Park 
828*baa489faSSeongJae Park 	/* Check what the device wrote. */
829*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
830*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
831*baa489faSSeongJae Park 
832*baa489faSSeongJae Park 	munmap(buffer->ptr, buffer->size);
833*baa489faSSeongJae Park 	buffer->ptr = NULL;
834*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
835*baa489faSSeongJae Park }
836*baa489faSSeongJae Park 
837*baa489faSSeongJae Park /*
838*baa489faSSeongJae Park  * Read mmap'ed file memory.
839*baa489faSSeongJae Park  */
840*baa489faSSeongJae Park TEST_F(hmm, file_read)
841*baa489faSSeongJae Park {
842*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
843*baa489faSSeongJae Park 	unsigned long npages;
844*baa489faSSeongJae Park 	unsigned long size;
845*baa489faSSeongJae Park 	unsigned long i;
846*baa489faSSeongJae Park 	int *ptr;
847*baa489faSSeongJae Park 	int ret;
848*baa489faSSeongJae Park 	int fd;
849*baa489faSSeongJae Park 	ssize_t len;
850*baa489faSSeongJae Park 
851*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
852*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
853*baa489faSSeongJae Park 	size = npages << self->page_shift;
854*baa489faSSeongJae Park 
855*baa489faSSeongJae Park 	fd = hmm_create_file(size);
856*baa489faSSeongJae Park 	ASSERT_GE(fd, 0);
857*baa489faSSeongJae Park 
858*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
859*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
860*baa489faSSeongJae Park 
861*baa489faSSeongJae Park 	buffer->fd = fd;
862*baa489faSSeongJae Park 	buffer->size = size;
863*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
864*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
865*baa489faSSeongJae Park 
866*baa489faSSeongJae Park 	/* Write initial contents of the file. */
867*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
868*baa489faSSeongJae Park 		ptr[i] = i;
869*baa489faSSeongJae Park 	len = pwrite(fd, buffer->mirror, size, 0);
870*baa489faSSeongJae Park 	ASSERT_EQ(len, size);
871*baa489faSSeongJae Park 	memset(buffer->mirror, 0, size);
872*baa489faSSeongJae Park 
873*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
874*baa489faSSeongJae Park 			   PROT_READ,
875*baa489faSSeongJae Park 			   MAP_SHARED,
876*baa489faSSeongJae Park 			   buffer->fd, 0);
877*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
878*baa489faSSeongJae Park 
879*baa489faSSeongJae Park 	/* Simulate a device reading system memory. */
880*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
881*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
882*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
883*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
884*baa489faSSeongJae Park 
885*baa489faSSeongJae Park 	/* Check what the device read. */
886*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
887*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
888*baa489faSSeongJae Park 
889*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
890*baa489faSSeongJae Park }
891*baa489faSSeongJae Park 
892*baa489faSSeongJae Park /*
893*baa489faSSeongJae Park  * Write mmap'ed file memory.
894*baa489faSSeongJae Park  */
895*baa489faSSeongJae Park TEST_F(hmm, file_write)
896*baa489faSSeongJae Park {
897*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
898*baa489faSSeongJae Park 	unsigned long npages;
899*baa489faSSeongJae Park 	unsigned long size;
900*baa489faSSeongJae Park 	unsigned long i;
901*baa489faSSeongJae Park 	int *ptr;
902*baa489faSSeongJae Park 	int ret;
903*baa489faSSeongJae Park 	int fd;
904*baa489faSSeongJae Park 	ssize_t len;
905*baa489faSSeongJae Park 
906*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
907*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
908*baa489faSSeongJae Park 	size = npages << self->page_shift;
909*baa489faSSeongJae Park 
910*baa489faSSeongJae Park 	fd = hmm_create_file(size);
911*baa489faSSeongJae Park 	ASSERT_GE(fd, 0);
912*baa489faSSeongJae Park 
913*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
914*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
915*baa489faSSeongJae Park 
916*baa489faSSeongJae Park 	buffer->fd = fd;
917*baa489faSSeongJae Park 	buffer->size = size;
918*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
919*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
920*baa489faSSeongJae Park 
921*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
922*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
923*baa489faSSeongJae Park 			   MAP_SHARED,
924*baa489faSSeongJae Park 			   buffer->fd, 0);
925*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
926*baa489faSSeongJae Park 
927*baa489faSSeongJae Park 	/* Initialize data that the device will write to buffer->ptr. */
928*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
929*baa489faSSeongJae Park 		ptr[i] = i;
930*baa489faSSeongJae Park 
931*baa489faSSeongJae Park 	/* Simulate a device writing system memory. */
932*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
933*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
934*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
935*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
936*baa489faSSeongJae Park 
937*baa489faSSeongJae Park 	/* Check what the device wrote. */
938*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
939*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
940*baa489faSSeongJae Park 
941*baa489faSSeongJae Park 	/* Check that the device also wrote the file. */
942*baa489faSSeongJae Park 	len = pread(fd, buffer->mirror, size, 0);
943*baa489faSSeongJae Park 	ASSERT_EQ(len, size);
944*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
945*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
946*baa489faSSeongJae Park 
947*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
948*baa489faSSeongJae Park }
949*baa489faSSeongJae Park 
950*baa489faSSeongJae Park /*
951*baa489faSSeongJae Park  * Migrate anonymous memory to device private memory.
952*baa489faSSeongJae Park  */
953*baa489faSSeongJae Park TEST_F(hmm, migrate)
954*baa489faSSeongJae Park {
955*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
956*baa489faSSeongJae Park 	unsigned long npages;
957*baa489faSSeongJae Park 	unsigned long size;
958*baa489faSSeongJae Park 	unsigned long i;
959*baa489faSSeongJae Park 	int *ptr;
960*baa489faSSeongJae Park 	int ret;
961*baa489faSSeongJae Park 
962*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
963*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
964*baa489faSSeongJae Park 	size = npages << self->page_shift;
965*baa489faSSeongJae Park 
966*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
967*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
968*baa489faSSeongJae Park 
969*baa489faSSeongJae Park 	buffer->fd = -1;
970*baa489faSSeongJae Park 	buffer->size = size;
971*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
972*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
973*baa489faSSeongJae Park 
974*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
975*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
976*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
977*baa489faSSeongJae Park 			   buffer->fd, 0);
978*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
979*baa489faSSeongJae Park 
980*baa489faSSeongJae Park 	/* Initialize buffer in system memory. */
981*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
982*baa489faSSeongJae Park 		ptr[i] = i;
983*baa489faSSeongJae Park 
984*baa489faSSeongJae Park 	/* Migrate memory to device. */
985*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
986*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
987*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
988*baa489faSSeongJae Park 
989*baa489faSSeongJae Park 	/* Check what the device read. */
990*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
991*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
992*baa489faSSeongJae Park 
993*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
994*baa489faSSeongJae Park }
995*baa489faSSeongJae Park 
996*baa489faSSeongJae Park /*
997*baa489faSSeongJae Park  * Migrate anonymous memory to device private memory and fault some of it back
998*baa489faSSeongJae Park  * to system memory, then try migrating the resulting mix of system and device
999*baa489faSSeongJae Park  * private memory to the device.
1000*baa489faSSeongJae Park  */
1001*baa489faSSeongJae Park TEST_F(hmm, migrate_fault)
1002*baa489faSSeongJae Park {
1003*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1004*baa489faSSeongJae Park 	unsigned long npages;
1005*baa489faSSeongJae Park 	unsigned long size;
1006*baa489faSSeongJae Park 	unsigned long i;
1007*baa489faSSeongJae Park 	int *ptr;
1008*baa489faSSeongJae Park 	int ret;
1009*baa489faSSeongJae Park 
1010*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1011*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
1012*baa489faSSeongJae Park 	size = npages << self->page_shift;
1013*baa489faSSeongJae Park 
1014*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1015*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1016*baa489faSSeongJae Park 
1017*baa489faSSeongJae Park 	buffer->fd = -1;
1018*baa489faSSeongJae Park 	buffer->size = size;
1019*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
1020*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1021*baa489faSSeongJae Park 
1022*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
1023*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
1024*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
1025*baa489faSSeongJae Park 			   buffer->fd, 0);
1026*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1027*baa489faSSeongJae Park 
1028*baa489faSSeongJae Park 	/* Initialize buffer in system memory. */
1029*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1030*baa489faSSeongJae Park 		ptr[i] = i;
1031*baa489faSSeongJae Park 
1032*baa489faSSeongJae Park 	/* Migrate memory to device. */
1033*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1034*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1035*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1036*baa489faSSeongJae Park 
1037*baa489faSSeongJae Park 	/* Check what the device read. */
1038*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1039*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1040*baa489faSSeongJae Park 
1041*baa489faSSeongJae Park 	/* Fault half the pages back to system memory and check them. */
1042*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
1043*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1044*baa489faSSeongJae Park 
1045*baa489faSSeongJae Park 	/* Migrate memory to the device again. */
1046*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1047*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1048*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1049*baa489faSSeongJae Park 
1050*baa489faSSeongJae Park 	/* Check what the device read. */
1051*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1052*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1053*baa489faSSeongJae Park 
1054*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1055*baa489faSSeongJae Park }
1056*baa489faSSeongJae Park 
1057*baa489faSSeongJae Park TEST_F(hmm, migrate_release)
1058*baa489faSSeongJae Park {
1059*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1060*baa489faSSeongJae Park 	unsigned long npages;
1061*baa489faSSeongJae Park 	unsigned long size;
1062*baa489faSSeongJae Park 	unsigned long i;
1063*baa489faSSeongJae Park 	int *ptr;
1064*baa489faSSeongJae Park 	int ret;
1065*baa489faSSeongJae Park 
1066*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1067*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
1068*baa489faSSeongJae Park 	size = npages << self->page_shift;
1069*baa489faSSeongJae Park 
1070*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1071*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1072*baa489faSSeongJae Park 
1073*baa489faSSeongJae Park 	buffer->fd = -1;
1074*baa489faSSeongJae Park 	buffer->size = size;
1075*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
1076*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1077*baa489faSSeongJae Park 
1078*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
1079*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
1080*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1081*baa489faSSeongJae Park 
1082*baa489faSSeongJae Park 	/* Initialize buffer in system memory. */
1083*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1084*baa489faSSeongJae Park 		ptr[i] = i;
1085*baa489faSSeongJae Park 
1086*baa489faSSeongJae Park 	/* Migrate memory to device. */
1087*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1088*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1089*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1090*baa489faSSeongJae Park 
1091*baa489faSSeongJae Park 	/* Check what the device read. */
1092*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1093*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1094*baa489faSSeongJae Park 
1095*baa489faSSeongJae Park 	/* Release device memory. */
1096*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_RELEASE, buffer, npages);
1097*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1098*baa489faSSeongJae Park 
1099*baa489faSSeongJae Park 	/* Fault pages back to system memory and check them. */
1100*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
1101*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1102*baa489faSSeongJae Park 
1103*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1104*baa489faSSeongJae Park }
1105*baa489faSSeongJae Park 
1106*baa489faSSeongJae Park /*
1107*baa489faSSeongJae Park  * Migrate anonymous shared memory to device private memory.
1108*baa489faSSeongJae Park  */
1109*baa489faSSeongJae Park TEST_F(hmm, migrate_shared)
1110*baa489faSSeongJae Park {
1111*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1112*baa489faSSeongJae Park 	unsigned long npages;
1113*baa489faSSeongJae Park 	unsigned long size;
1114*baa489faSSeongJae Park 	int ret;
1115*baa489faSSeongJae Park 
1116*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1117*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
1118*baa489faSSeongJae Park 	size = npages << self->page_shift;
1119*baa489faSSeongJae Park 
1120*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1121*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1122*baa489faSSeongJae Park 
1123*baa489faSSeongJae Park 	buffer->fd = -1;
1124*baa489faSSeongJae Park 	buffer->size = size;
1125*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
1126*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1127*baa489faSSeongJae Park 
1128*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
1129*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
1130*baa489faSSeongJae Park 			   MAP_SHARED | MAP_ANONYMOUS,
1131*baa489faSSeongJae Park 			   buffer->fd, 0);
1132*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1133*baa489faSSeongJae Park 
1134*baa489faSSeongJae Park 	/* Migrate memory to device. */
1135*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1136*baa489faSSeongJae Park 	ASSERT_EQ(ret, -ENOENT);
1137*baa489faSSeongJae Park 
1138*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1139*baa489faSSeongJae Park }
1140*baa489faSSeongJae Park 
1141*baa489faSSeongJae Park /*
1142*baa489faSSeongJae Park  * Try to migrate various memory types to device private memory.
1143*baa489faSSeongJae Park  */
1144*baa489faSSeongJae Park TEST_F(hmm2, migrate_mixed)
1145*baa489faSSeongJae Park {
1146*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1147*baa489faSSeongJae Park 	unsigned long npages;
1148*baa489faSSeongJae Park 	unsigned long size;
1149*baa489faSSeongJae Park 	int *ptr;
1150*baa489faSSeongJae Park 	unsigned char *p;
1151*baa489faSSeongJae Park 	int ret;
1152*baa489faSSeongJae Park 	int val;
1153*baa489faSSeongJae Park 
1154*baa489faSSeongJae Park 	npages = 6;
1155*baa489faSSeongJae Park 	size = npages << self->page_shift;
1156*baa489faSSeongJae Park 
1157*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1158*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1159*baa489faSSeongJae Park 
1160*baa489faSSeongJae Park 	buffer->fd = -1;
1161*baa489faSSeongJae Park 	buffer->size = size;
1162*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
1163*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1164*baa489faSSeongJae Park 
1165*baa489faSSeongJae Park 	/* Reserve a range of addresses. */
1166*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
1167*baa489faSSeongJae Park 			   PROT_NONE,
1168*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
1169*baa489faSSeongJae Park 			   buffer->fd, 0);
1170*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1171*baa489faSSeongJae Park 	p = buffer->ptr;
1172*baa489faSSeongJae Park 
1173*baa489faSSeongJae Park 	/* Migrating a protected area should be an error. */
1174*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1175*baa489faSSeongJae Park 	ASSERT_EQ(ret, -EINVAL);
1176*baa489faSSeongJae Park 
1177*baa489faSSeongJae Park 	/* Punch a hole after the first page address. */
1178*baa489faSSeongJae Park 	ret = munmap(buffer->ptr + self->page_size, self->page_size);
1179*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1180*baa489faSSeongJae Park 
1181*baa489faSSeongJae Park 	/* We expect an error if the vma doesn't cover the range. */
1182*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3);
1183*baa489faSSeongJae Park 	ASSERT_EQ(ret, -EINVAL);
1184*baa489faSSeongJae Park 
1185*baa489faSSeongJae Park 	/* Page 2 will be a read-only zero page. */
1186*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1187*baa489faSSeongJae Park 				PROT_READ);
1188*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1189*baa489faSSeongJae Park 	ptr = (int *)(buffer->ptr + 2 * self->page_size);
1190*baa489faSSeongJae Park 	val = *ptr + 3;
1191*baa489faSSeongJae Park 	ASSERT_EQ(val, 3);
1192*baa489faSSeongJae Park 
1193*baa489faSSeongJae Park 	/* Page 3 will be read-only. */
1194*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1195*baa489faSSeongJae Park 				PROT_READ | PROT_WRITE);
1196*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1197*baa489faSSeongJae Park 	ptr = (int *)(buffer->ptr + 3 * self->page_size);
1198*baa489faSSeongJae Park 	*ptr = val;
1199*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1200*baa489faSSeongJae Park 				PROT_READ);
1201*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1202*baa489faSSeongJae Park 
1203*baa489faSSeongJae Park 	/* Page 4-5 will be read-write. */
1204*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
1205*baa489faSSeongJae Park 				PROT_READ | PROT_WRITE);
1206*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1207*baa489faSSeongJae Park 	ptr = (int *)(buffer->ptr + 4 * self->page_size);
1208*baa489faSSeongJae Park 	*ptr = val;
1209*baa489faSSeongJae Park 	ptr = (int *)(buffer->ptr + 5 * self->page_size);
1210*baa489faSSeongJae Park 	*ptr = val;
1211*baa489faSSeongJae Park 
1212*baa489faSSeongJae Park 	/* Now try to migrate pages 2-5 to device 1. */
1213*baa489faSSeongJae Park 	buffer->ptr = p + 2 * self->page_size;
1214*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
1215*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1216*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, 4);
1217*baa489faSSeongJae Park 
1218*baa489faSSeongJae Park 	/* Page 5 won't be migrated to device 0 because it's on device 1. */
1219*baa489faSSeongJae Park 	buffer->ptr = p + 5 * self->page_size;
1220*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1221*baa489faSSeongJae Park 	ASSERT_EQ(ret, -ENOENT);
1222*baa489faSSeongJae Park 	buffer->ptr = p;
1223*baa489faSSeongJae Park 
1224*baa489faSSeongJae Park 	buffer->ptr = p;
1225*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1226*baa489faSSeongJae Park }
1227*baa489faSSeongJae Park 
1228*baa489faSSeongJae Park /*
1229*baa489faSSeongJae Park  * Migrate anonymous memory to device memory and back to system memory
1230*baa489faSSeongJae Park  * multiple times. In case of private zone configuration, this is done
1231*baa489faSSeongJae Park  * through fault pages accessed by CPU. In case of coherent zone configuration,
1232*baa489faSSeongJae Park  * the pages from the device should be explicitly migrated back to system memory.
1233*baa489faSSeongJae Park  * The reason is Coherent device zone has coherent access by CPU, therefore
1234*baa489faSSeongJae Park  * it will not generate any page fault.
1235*baa489faSSeongJae Park  */
1236*baa489faSSeongJae Park TEST_F(hmm, migrate_multiple)
1237*baa489faSSeongJae Park {
1238*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1239*baa489faSSeongJae Park 	unsigned long npages;
1240*baa489faSSeongJae Park 	unsigned long size;
1241*baa489faSSeongJae Park 	unsigned long i;
1242*baa489faSSeongJae Park 	unsigned long c;
1243*baa489faSSeongJae Park 	int *ptr;
1244*baa489faSSeongJae Park 	int ret;
1245*baa489faSSeongJae Park 
1246*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1247*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
1248*baa489faSSeongJae Park 	size = npages << self->page_shift;
1249*baa489faSSeongJae Park 
1250*baa489faSSeongJae Park 	for (c = 0; c < NTIMES; c++) {
1251*baa489faSSeongJae Park 		buffer = malloc(sizeof(*buffer));
1252*baa489faSSeongJae Park 		ASSERT_NE(buffer, NULL);
1253*baa489faSSeongJae Park 
1254*baa489faSSeongJae Park 		buffer->fd = -1;
1255*baa489faSSeongJae Park 		buffer->size = size;
1256*baa489faSSeongJae Park 		buffer->mirror = malloc(size);
1257*baa489faSSeongJae Park 		ASSERT_NE(buffer->mirror, NULL);
1258*baa489faSSeongJae Park 
1259*baa489faSSeongJae Park 		buffer->ptr = mmap(NULL, size,
1260*baa489faSSeongJae Park 				   PROT_READ | PROT_WRITE,
1261*baa489faSSeongJae Park 				   MAP_PRIVATE | MAP_ANONYMOUS,
1262*baa489faSSeongJae Park 				   buffer->fd, 0);
1263*baa489faSSeongJae Park 		ASSERT_NE(buffer->ptr, MAP_FAILED);
1264*baa489faSSeongJae Park 
1265*baa489faSSeongJae Park 		/* Initialize buffer in system memory. */
1266*baa489faSSeongJae Park 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1267*baa489faSSeongJae Park 			ptr[i] = i;
1268*baa489faSSeongJae Park 
1269*baa489faSSeongJae Park 		/* Migrate memory to device. */
1270*baa489faSSeongJae Park 		ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1271*baa489faSSeongJae Park 		ASSERT_EQ(ret, 0);
1272*baa489faSSeongJae Park 		ASSERT_EQ(buffer->cpages, npages);
1273*baa489faSSeongJae Park 
1274*baa489faSSeongJae Park 		/* Check what the device read. */
1275*baa489faSSeongJae Park 		for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1276*baa489faSSeongJae Park 			ASSERT_EQ(ptr[i], i);
1277*baa489faSSeongJae Park 
1278*baa489faSSeongJae Park 		/* Migrate back to system memory and check them. */
1279*baa489faSSeongJae Park 		if (hmm_is_coherent_type(variant->device_number)) {
1280*baa489faSSeongJae Park 			ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
1281*baa489faSSeongJae Park 			ASSERT_EQ(ret, 0);
1282*baa489faSSeongJae Park 			ASSERT_EQ(buffer->cpages, npages);
1283*baa489faSSeongJae Park 		}
1284*baa489faSSeongJae Park 
1285*baa489faSSeongJae Park 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1286*baa489faSSeongJae Park 			ASSERT_EQ(ptr[i], i);
1287*baa489faSSeongJae Park 
1288*baa489faSSeongJae Park 		hmm_buffer_free(buffer);
1289*baa489faSSeongJae Park 	}
1290*baa489faSSeongJae Park }
1291*baa489faSSeongJae Park 
1292*baa489faSSeongJae Park /*
1293*baa489faSSeongJae Park  * Read anonymous memory multiple times.
1294*baa489faSSeongJae Park  */
1295*baa489faSSeongJae Park TEST_F(hmm, anon_read_multiple)
1296*baa489faSSeongJae Park {
1297*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1298*baa489faSSeongJae Park 	unsigned long npages;
1299*baa489faSSeongJae Park 	unsigned long size;
1300*baa489faSSeongJae Park 	unsigned long i;
1301*baa489faSSeongJae Park 	unsigned long c;
1302*baa489faSSeongJae Park 	int *ptr;
1303*baa489faSSeongJae Park 	int ret;
1304*baa489faSSeongJae Park 
1305*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1306*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
1307*baa489faSSeongJae Park 	size = npages << self->page_shift;
1308*baa489faSSeongJae Park 
1309*baa489faSSeongJae Park 	for (c = 0; c < NTIMES; c++) {
1310*baa489faSSeongJae Park 		buffer = malloc(sizeof(*buffer));
1311*baa489faSSeongJae Park 		ASSERT_NE(buffer, NULL);
1312*baa489faSSeongJae Park 
1313*baa489faSSeongJae Park 		buffer->fd = -1;
1314*baa489faSSeongJae Park 		buffer->size = size;
1315*baa489faSSeongJae Park 		buffer->mirror = malloc(size);
1316*baa489faSSeongJae Park 		ASSERT_NE(buffer->mirror, NULL);
1317*baa489faSSeongJae Park 
1318*baa489faSSeongJae Park 		buffer->ptr = mmap(NULL, size,
1319*baa489faSSeongJae Park 				   PROT_READ | PROT_WRITE,
1320*baa489faSSeongJae Park 				   MAP_PRIVATE | MAP_ANONYMOUS,
1321*baa489faSSeongJae Park 				   buffer->fd, 0);
1322*baa489faSSeongJae Park 		ASSERT_NE(buffer->ptr, MAP_FAILED);
1323*baa489faSSeongJae Park 
1324*baa489faSSeongJae Park 		/* Initialize buffer in system memory. */
1325*baa489faSSeongJae Park 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1326*baa489faSSeongJae Park 			ptr[i] = i + c;
1327*baa489faSSeongJae Park 
1328*baa489faSSeongJae Park 		/* Simulate a device reading system memory. */
1329*baa489faSSeongJae Park 		ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1330*baa489faSSeongJae Park 				      npages);
1331*baa489faSSeongJae Park 		ASSERT_EQ(ret, 0);
1332*baa489faSSeongJae Park 		ASSERT_EQ(buffer->cpages, npages);
1333*baa489faSSeongJae Park 		ASSERT_EQ(buffer->faults, 1);
1334*baa489faSSeongJae Park 
1335*baa489faSSeongJae Park 		/* Check what the device read. */
1336*baa489faSSeongJae Park 		for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1337*baa489faSSeongJae Park 			ASSERT_EQ(ptr[i], i + c);
1338*baa489faSSeongJae Park 
1339*baa489faSSeongJae Park 		hmm_buffer_free(buffer);
1340*baa489faSSeongJae Park 	}
1341*baa489faSSeongJae Park }
1342*baa489faSSeongJae Park 
1343*baa489faSSeongJae Park void *unmap_buffer(void *p)
1344*baa489faSSeongJae Park {
1345*baa489faSSeongJae Park 	struct hmm_buffer *buffer = p;
1346*baa489faSSeongJae Park 
1347*baa489faSSeongJae Park 	/* Delay for a bit and then unmap buffer while it is being read. */
1348*baa489faSSeongJae Park 	hmm_nanosleep(hmm_random() % 32000);
1349*baa489faSSeongJae Park 	munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1350*baa489faSSeongJae Park 	buffer->ptr = NULL;
1351*baa489faSSeongJae Park 
1352*baa489faSSeongJae Park 	return NULL;
1353*baa489faSSeongJae Park }
1354*baa489faSSeongJae Park 
1355*baa489faSSeongJae Park /*
1356*baa489faSSeongJae Park  * Try reading anonymous memory while it is being unmapped.
1357*baa489faSSeongJae Park  */
1358*baa489faSSeongJae Park TEST_F(hmm, anon_teardown)
1359*baa489faSSeongJae Park {
1360*baa489faSSeongJae Park 	unsigned long npages;
1361*baa489faSSeongJae Park 	unsigned long size;
1362*baa489faSSeongJae Park 	unsigned long c;
1363*baa489faSSeongJae Park 	void *ret;
1364*baa489faSSeongJae Park 
1365*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1366*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
1367*baa489faSSeongJae Park 	size = npages << self->page_shift;
1368*baa489faSSeongJae Park 
1369*baa489faSSeongJae Park 	for (c = 0; c < NTIMES; ++c) {
1370*baa489faSSeongJae Park 		pthread_t thread;
1371*baa489faSSeongJae Park 		struct hmm_buffer *buffer;
1372*baa489faSSeongJae Park 		unsigned long i;
1373*baa489faSSeongJae Park 		int *ptr;
1374*baa489faSSeongJae Park 		int rc;
1375*baa489faSSeongJae Park 
1376*baa489faSSeongJae Park 		buffer = malloc(sizeof(*buffer));
1377*baa489faSSeongJae Park 		ASSERT_NE(buffer, NULL);
1378*baa489faSSeongJae Park 
1379*baa489faSSeongJae Park 		buffer->fd = -1;
1380*baa489faSSeongJae Park 		buffer->size = size;
1381*baa489faSSeongJae Park 		buffer->mirror = malloc(size);
1382*baa489faSSeongJae Park 		ASSERT_NE(buffer->mirror, NULL);
1383*baa489faSSeongJae Park 
1384*baa489faSSeongJae Park 		buffer->ptr = mmap(NULL, size,
1385*baa489faSSeongJae Park 				   PROT_READ | PROT_WRITE,
1386*baa489faSSeongJae Park 				   MAP_PRIVATE | MAP_ANONYMOUS,
1387*baa489faSSeongJae Park 				   buffer->fd, 0);
1388*baa489faSSeongJae Park 		ASSERT_NE(buffer->ptr, MAP_FAILED);
1389*baa489faSSeongJae Park 
1390*baa489faSSeongJae Park 		/* Initialize buffer in system memory. */
1391*baa489faSSeongJae Park 		for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1392*baa489faSSeongJae Park 			ptr[i] = i + c;
1393*baa489faSSeongJae Park 
1394*baa489faSSeongJae Park 		rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1395*baa489faSSeongJae Park 		ASSERT_EQ(rc, 0);
1396*baa489faSSeongJae Park 
1397*baa489faSSeongJae Park 		/* Simulate a device reading system memory. */
1398*baa489faSSeongJae Park 		rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1399*baa489faSSeongJae Park 				     npages);
1400*baa489faSSeongJae Park 		if (rc == 0) {
1401*baa489faSSeongJae Park 			ASSERT_EQ(buffer->cpages, npages);
1402*baa489faSSeongJae Park 			ASSERT_EQ(buffer->faults, 1);
1403*baa489faSSeongJae Park 
1404*baa489faSSeongJae Park 			/* Check what the device read. */
1405*baa489faSSeongJae Park 			for (i = 0, ptr = buffer->mirror;
1406*baa489faSSeongJae Park 			     i < size / sizeof(*ptr);
1407*baa489faSSeongJae Park 			     ++i)
1408*baa489faSSeongJae Park 				ASSERT_EQ(ptr[i], i + c);
1409*baa489faSSeongJae Park 		}
1410*baa489faSSeongJae Park 
1411*baa489faSSeongJae Park 		pthread_join(thread, &ret);
1412*baa489faSSeongJae Park 		hmm_buffer_free(buffer);
1413*baa489faSSeongJae Park 	}
1414*baa489faSSeongJae Park }
1415*baa489faSSeongJae Park 
1416*baa489faSSeongJae Park /*
1417*baa489faSSeongJae Park  * Test memory snapshot without faulting in pages accessed by the device.
1418*baa489faSSeongJae Park  */
1419*baa489faSSeongJae Park TEST_F(hmm, mixedmap)
1420*baa489faSSeongJae Park {
1421*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1422*baa489faSSeongJae Park 	unsigned long npages;
1423*baa489faSSeongJae Park 	unsigned long size;
1424*baa489faSSeongJae Park 	unsigned char *m;
1425*baa489faSSeongJae Park 	int ret;
1426*baa489faSSeongJae Park 
1427*baa489faSSeongJae Park 	npages = 1;
1428*baa489faSSeongJae Park 	size = npages << self->page_shift;
1429*baa489faSSeongJae Park 
1430*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1431*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1432*baa489faSSeongJae Park 
1433*baa489faSSeongJae Park 	buffer->fd = -1;
1434*baa489faSSeongJae Park 	buffer->size = size;
1435*baa489faSSeongJae Park 	buffer->mirror = malloc(npages);
1436*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1437*baa489faSSeongJae Park 
1438*baa489faSSeongJae Park 
1439*baa489faSSeongJae Park 	/* Reserve a range of addresses. */
1440*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
1441*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
1442*baa489faSSeongJae Park 			   MAP_PRIVATE,
1443*baa489faSSeongJae Park 			   self->fd, 0);
1444*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1445*baa489faSSeongJae Park 
1446*baa489faSSeongJae Park 	/* Simulate a device snapshotting CPU pagetables. */
1447*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1448*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1449*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1450*baa489faSSeongJae Park 
1451*baa489faSSeongJae Park 	/* Check what the device saw. */
1452*baa489faSSeongJae Park 	m = buffer->mirror;
1453*baa489faSSeongJae Park 	ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ);
1454*baa489faSSeongJae Park 
1455*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1456*baa489faSSeongJae Park }
1457*baa489faSSeongJae Park 
1458*baa489faSSeongJae Park /*
1459*baa489faSSeongJae Park  * Test memory snapshot without faulting in pages accessed by the device.
1460*baa489faSSeongJae Park  */
1461*baa489faSSeongJae Park TEST_F(hmm2, snapshot)
1462*baa489faSSeongJae Park {
1463*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1464*baa489faSSeongJae Park 	unsigned long npages;
1465*baa489faSSeongJae Park 	unsigned long size;
1466*baa489faSSeongJae Park 	int *ptr;
1467*baa489faSSeongJae Park 	unsigned char *p;
1468*baa489faSSeongJae Park 	unsigned char *m;
1469*baa489faSSeongJae Park 	int ret;
1470*baa489faSSeongJae Park 	int val;
1471*baa489faSSeongJae Park 
1472*baa489faSSeongJae Park 	npages = 7;
1473*baa489faSSeongJae Park 	size = npages << self->page_shift;
1474*baa489faSSeongJae Park 
1475*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1476*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1477*baa489faSSeongJae Park 
1478*baa489faSSeongJae Park 	buffer->fd = -1;
1479*baa489faSSeongJae Park 	buffer->size = size;
1480*baa489faSSeongJae Park 	buffer->mirror = malloc(npages);
1481*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1482*baa489faSSeongJae Park 
1483*baa489faSSeongJae Park 	/* Reserve a range of addresses. */
1484*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
1485*baa489faSSeongJae Park 			   PROT_NONE,
1486*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
1487*baa489faSSeongJae Park 			   buffer->fd, 0);
1488*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1489*baa489faSSeongJae Park 	p = buffer->ptr;
1490*baa489faSSeongJae Park 
1491*baa489faSSeongJae Park 	/* Punch a hole after the first page address. */
1492*baa489faSSeongJae Park 	ret = munmap(buffer->ptr + self->page_size, self->page_size);
1493*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1494*baa489faSSeongJae Park 
1495*baa489faSSeongJae Park 	/* Page 2 will be read-only zero page. */
1496*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1497*baa489faSSeongJae Park 				PROT_READ);
1498*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1499*baa489faSSeongJae Park 	ptr = (int *)(buffer->ptr + 2 * self->page_size);
1500*baa489faSSeongJae Park 	val = *ptr + 3;
1501*baa489faSSeongJae Park 	ASSERT_EQ(val, 3);
1502*baa489faSSeongJae Park 
1503*baa489faSSeongJae Park 	/* Page 3 will be read-only. */
1504*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1505*baa489faSSeongJae Park 				PROT_READ | PROT_WRITE);
1506*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1507*baa489faSSeongJae Park 	ptr = (int *)(buffer->ptr + 3 * self->page_size);
1508*baa489faSSeongJae Park 	*ptr = val;
1509*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1510*baa489faSSeongJae Park 				PROT_READ);
1511*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1512*baa489faSSeongJae Park 
1513*baa489faSSeongJae Park 	/* Page 4-6 will be read-write. */
1514*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1515*baa489faSSeongJae Park 				PROT_READ | PROT_WRITE);
1516*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1517*baa489faSSeongJae Park 	ptr = (int *)(buffer->ptr + 4 * self->page_size);
1518*baa489faSSeongJae Park 	*ptr = val;
1519*baa489faSSeongJae Park 
1520*baa489faSSeongJae Park 	/* Page 5 will be migrated to device 0. */
1521*baa489faSSeongJae Park 	buffer->ptr = p + 5 * self->page_size;
1522*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1523*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1524*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, 1);
1525*baa489faSSeongJae Park 
1526*baa489faSSeongJae Park 	/* Page 6 will be migrated to device 1. */
1527*baa489faSSeongJae Park 	buffer->ptr = p + 6 * self->page_size;
1528*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1);
1529*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1530*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, 1);
1531*baa489faSSeongJae Park 
1532*baa489faSSeongJae Park 	/* Simulate a device snapshotting CPU pagetables. */
1533*baa489faSSeongJae Park 	buffer->ptr = p;
1534*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1535*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1536*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1537*baa489faSSeongJae Park 
1538*baa489faSSeongJae Park 	/* Check what the device saw. */
1539*baa489faSSeongJae Park 	m = buffer->mirror;
1540*baa489faSSeongJae Park 	ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1541*baa489faSSeongJae Park 	ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1542*baa489faSSeongJae Park 	ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1543*baa489faSSeongJae Park 	ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1544*baa489faSSeongJae Park 	ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1545*baa489faSSeongJae Park 	if (!hmm_is_coherent_type(variant->device_number0)) {
1546*baa489faSSeongJae Park 		ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1547*baa489faSSeongJae Park 				HMM_DMIRROR_PROT_WRITE);
1548*baa489faSSeongJae Park 		ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1549*baa489faSSeongJae Park 	} else {
1550*baa489faSSeongJae Park 		ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL |
1551*baa489faSSeongJae Park 				HMM_DMIRROR_PROT_WRITE);
1552*baa489faSSeongJae Park 		ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE |
1553*baa489faSSeongJae Park 				HMM_DMIRROR_PROT_WRITE);
1554*baa489faSSeongJae Park 	}
1555*baa489faSSeongJae Park 
1556*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1557*baa489faSSeongJae Park }
1558*baa489faSSeongJae Park 
1559*baa489faSSeongJae Park /*
1560*baa489faSSeongJae Park  * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
1561*baa489faSSeongJae Park  * should be mapped by a large page table entry.
1562*baa489faSSeongJae Park  */
1563*baa489faSSeongJae Park TEST_F(hmm, compound)
1564*baa489faSSeongJae Park {
1565*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1566*baa489faSSeongJae Park 	unsigned long npages;
1567*baa489faSSeongJae Park 	unsigned long size;
1568*baa489faSSeongJae Park 	unsigned long default_hsize;
1569*baa489faSSeongJae Park 	int *ptr;
1570*baa489faSSeongJae Park 	unsigned char *m;
1571*baa489faSSeongJae Park 	int ret;
1572*baa489faSSeongJae Park 	unsigned long i;
1573*baa489faSSeongJae Park 
1574*baa489faSSeongJae Park 	/* Skip test if we can't allocate a hugetlbfs page. */
1575*baa489faSSeongJae Park 
1576*baa489faSSeongJae Park 	default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
1577*baa489faSSeongJae Park 	if (default_hsize < 0 || default_hsize*1024 < default_hsize)
1578*baa489faSSeongJae Park 		SKIP(return, "Huge page size could not be determined");
1579*baa489faSSeongJae Park 	default_hsize = default_hsize*1024; /* KB to B */
1580*baa489faSSeongJae Park 
1581*baa489faSSeongJae Park 	size = ALIGN(TWOMEG, default_hsize);
1582*baa489faSSeongJae Park 	npages = size >> self->page_shift;
1583*baa489faSSeongJae Park 
1584*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1585*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1586*baa489faSSeongJae Park 
1587*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
1588*baa489faSSeongJae Park 				   PROT_READ | PROT_WRITE,
1589*baa489faSSeongJae Park 				   MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
1590*baa489faSSeongJae Park 				   -1, 0);
1591*baa489faSSeongJae Park 	if (buffer->ptr == MAP_FAILED) {
1592*baa489faSSeongJae Park 		free(buffer);
1593*baa489faSSeongJae Park 		return;
1594*baa489faSSeongJae Park 	}
1595*baa489faSSeongJae Park 
1596*baa489faSSeongJae Park 	buffer->size = size;
1597*baa489faSSeongJae Park 	buffer->mirror = malloc(npages);
1598*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1599*baa489faSSeongJae Park 
1600*baa489faSSeongJae Park 	/* Initialize the pages the device will snapshot in buffer->ptr. */
1601*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1602*baa489faSSeongJae Park 		ptr[i] = i;
1603*baa489faSSeongJae Park 
1604*baa489faSSeongJae Park 	/* Simulate a device snapshotting CPU pagetables. */
1605*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1606*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1607*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1608*baa489faSSeongJae Park 
1609*baa489faSSeongJae Park 	/* Check what the device saw. */
1610*baa489faSSeongJae Park 	m = buffer->mirror;
1611*baa489faSSeongJae Park 	for (i = 0; i < npages; ++i)
1612*baa489faSSeongJae Park 		ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1613*baa489faSSeongJae Park 				HMM_DMIRROR_PROT_PMD);
1614*baa489faSSeongJae Park 
1615*baa489faSSeongJae Park 	/* Make the region read-only. */
1616*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr, size, PROT_READ);
1617*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1618*baa489faSSeongJae Park 
1619*baa489faSSeongJae Park 	/* Simulate a device snapshotting CPU pagetables. */
1620*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1621*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1622*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1623*baa489faSSeongJae Park 
1624*baa489faSSeongJae Park 	/* Check what the device saw. */
1625*baa489faSSeongJae Park 	m = buffer->mirror;
1626*baa489faSSeongJae Park 	for (i = 0; i < npages; ++i)
1627*baa489faSSeongJae Park 		ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1628*baa489faSSeongJae Park 				HMM_DMIRROR_PROT_PMD);
1629*baa489faSSeongJae Park 
1630*baa489faSSeongJae Park 	munmap(buffer->ptr, buffer->size);
1631*baa489faSSeongJae Park 	buffer->ptr = NULL;
1632*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1633*baa489faSSeongJae Park }
1634*baa489faSSeongJae Park 
1635*baa489faSSeongJae Park /*
1636*baa489faSSeongJae Park  * Test two devices reading the same memory (double mapped).
1637*baa489faSSeongJae Park  */
1638*baa489faSSeongJae Park TEST_F(hmm2, double_map)
1639*baa489faSSeongJae Park {
1640*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1641*baa489faSSeongJae Park 	unsigned long npages;
1642*baa489faSSeongJae Park 	unsigned long size;
1643*baa489faSSeongJae Park 	unsigned long i;
1644*baa489faSSeongJae Park 	int *ptr;
1645*baa489faSSeongJae Park 	int ret;
1646*baa489faSSeongJae Park 
1647*baa489faSSeongJae Park 	npages = 6;
1648*baa489faSSeongJae Park 	size = npages << self->page_shift;
1649*baa489faSSeongJae Park 
1650*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1651*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1652*baa489faSSeongJae Park 
1653*baa489faSSeongJae Park 	buffer->fd = -1;
1654*baa489faSSeongJae Park 	buffer->size = size;
1655*baa489faSSeongJae Park 	buffer->mirror = malloc(npages);
1656*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1657*baa489faSSeongJae Park 
1658*baa489faSSeongJae Park 	/* Reserve a range of addresses. */
1659*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
1660*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
1661*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
1662*baa489faSSeongJae Park 			   buffer->fd, 0);
1663*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1664*baa489faSSeongJae Park 
1665*baa489faSSeongJae Park 	/* Initialize buffer in system memory. */
1666*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1667*baa489faSSeongJae Park 		ptr[i] = i;
1668*baa489faSSeongJae Park 
1669*baa489faSSeongJae Park 	/* Make region read-only. */
1670*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr, size, PROT_READ);
1671*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1672*baa489faSSeongJae Park 
1673*baa489faSSeongJae Park 	/* Simulate device 0 reading system memory. */
1674*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1675*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1676*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1677*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
1678*baa489faSSeongJae Park 
1679*baa489faSSeongJae Park 	/* Check what the device read. */
1680*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1681*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1682*baa489faSSeongJae Park 
1683*baa489faSSeongJae Park 	/* Simulate device 1 reading system memory. */
1684*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1685*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1686*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1687*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
1688*baa489faSSeongJae Park 
1689*baa489faSSeongJae Park 	/* Check what the device read. */
1690*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1691*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1692*baa489faSSeongJae Park 
1693*baa489faSSeongJae Park 	/* Migrate pages to device 1 and try to read from device 0. */
1694*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1695*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1696*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1697*baa489faSSeongJae Park 
1698*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1699*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1700*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1701*baa489faSSeongJae Park 	ASSERT_EQ(buffer->faults, 1);
1702*baa489faSSeongJae Park 
1703*baa489faSSeongJae Park 	/* Check what device 0 read. */
1704*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1705*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1706*baa489faSSeongJae Park 
1707*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1708*baa489faSSeongJae Park }
1709*baa489faSSeongJae Park 
1710*baa489faSSeongJae Park /*
1711*baa489faSSeongJae Park  * Basic check of exclusive faulting.
1712*baa489faSSeongJae Park  */
1713*baa489faSSeongJae Park TEST_F(hmm, exclusive)
1714*baa489faSSeongJae Park {
1715*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1716*baa489faSSeongJae Park 	unsigned long npages;
1717*baa489faSSeongJae Park 	unsigned long size;
1718*baa489faSSeongJae Park 	unsigned long i;
1719*baa489faSSeongJae Park 	int *ptr;
1720*baa489faSSeongJae Park 	int ret;
1721*baa489faSSeongJae Park 
1722*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1723*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
1724*baa489faSSeongJae Park 	size = npages << self->page_shift;
1725*baa489faSSeongJae Park 
1726*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1727*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1728*baa489faSSeongJae Park 
1729*baa489faSSeongJae Park 	buffer->fd = -1;
1730*baa489faSSeongJae Park 	buffer->size = size;
1731*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
1732*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1733*baa489faSSeongJae Park 
1734*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
1735*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
1736*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
1737*baa489faSSeongJae Park 			   buffer->fd, 0);
1738*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1739*baa489faSSeongJae Park 
1740*baa489faSSeongJae Park 	/* Initialize buffer in system memory. */
1741*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1742*baa489faSSeongJae Park 		ptr[i] = i;
1743*baa489faSSeongJae Park 
1744*baa489faSSeongJae Park 	/* Map memory exclusively for device access. */
1745*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1746*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1747*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1748*baa489faSSeongJae Park 
1749*baa489faSSeongJae Park 	/* Check what the device read. */
1750*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1751*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1752*baa489faSSeongJae Park 
1753*baa489faSSeongJae Park 	/* Fault pages back to system memory and check them. */
1754*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1755*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i]++, i);
1756*baa489faSSeongJae Park 
1757*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1758*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i+1);
1759*baa489faSSeongJae Park 
1760*baa489faSSeongJae Park 	/* Check atomic access revoked */
1761*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
1762*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1763*baa489faSSeongJae Park 
1764*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1765*baa489faSSeongJae Park }
1766*baa489faSSeongJae Park 
1767*baa489faSSeongJae Park TEST_F(hmm, exclusive_mprotect)
1768*baa489faSSeongJae Park {
1769*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1770*baa489faSSeongJae Park 	unsigned long npages;
1771*baa489faSSeongJae Park 	unsigned long size;
1772*baa489faSSeongJae Park 	unsigned long i;
1773*baa489faSSeongJae Park 	int *ptr;
1774*baa489faSSeongJae Park 	int ret;
1775*baa489faSSeongJae Park 
1776*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1777*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
1778*baa489faSSeongJae Park 	size = npages << self->page_shift;
1779*baa489faSSeongJae Park 
1780*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1781*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1782*baa489faSSeongJae Park 
1783*baa489faSSeongJae Park 	buffer->fd = -1;
1784*baa489faSSeongJae Park 	buffer->size = size;
1785*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
1786*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1787*baa489faSSeongJae Park 
1788*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
1789*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
1790*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
1791*baa489faSSeongJae Park 			   buffer->fd, 0);
1792*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1793*baa489faSSeongJae Park 
1794*baa489faSSeongJae Park 	/* Initialize buffer in system memory. */
1795*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1796*baa489faSSeongJae Park 		ptr[i] = i;
1797*baa489faSSeongJae Park 
1798*baa489faSSeongJae Park 	/* Map memory exclusively for device access. */
1799*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1800*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1801*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1802*baa489faSSeongJae Park 
1803*baa489faSSeongJae Park 	/* Check what the device read. */
1804*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1805*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1806*baa489faSSeongJae Park 
1807*baa489faSSeongJae Park 	ret = mprotect(buffer->ptr, size, PROT_READ);
1808*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1809*baa489faSSeongJae Park 
1810*baa489faSSeongJae Park 	/* Simulate a device writing system memory. */
1811*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
1812*baa489faSSeongJae Park 	ASSERT_EQ(ret, -EPERM);
1813*baa489faSSeongJae Park 
1814*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1815*baa489faSSeongJae Park }
1816*baa489faSSeongJae Park 
1817*baa489faSSeongJae Park /*
1818*baa489faSSeongJae Park  * Check copy-on-write works.
1819*baa489faSSeongJae Park  */
1820*baa489faSSeongJae Park TEST_F(hmm, exclusive_cow)
1821*baa489faSSeongJae Park {
1822*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1823*baa489faSSeongJae Park 	unsigned long npages;
1824*baa489faSSeongJae Park 	unsigned long size;
1825*baa489faSSeongJae Park 	unsigned long i;
1826*baa489faSSeongJae Park 	int *ptr;
1827*baa489faSSeongJae Park 	int ret;
1828*baa489faSSeongJae Park 
1829*baa489faSSeongJae Park 	npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1830*baa489faSSeongJae Park 	ASSERT_NE(npages, 0);
1831*baa489faSSeongJae Park 	size = npages << self->page_shift;
1832*baa489faSSeongJae Park 
1833*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1834*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1835*baa489faSSeongJae Park 
1836*baa489faSSeongJae Park 	buffer->fd = -1;
1837*baa489faSSeongJae Park 	buffer->size = size;
1838*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
1839*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1840*baa489faSSeongJae Park 
1841*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
1842*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
1843*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
1844*baa489faSSeongJae Park 			   buffer->fd, 0);
1845*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1846*baa489faSSeongJae Park 
1847*baa489faSSeongJae Park 	/* Initialize buffer in system memory. */
1848*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1849*baa489faSSeongJae Park 		ptr[i] = i;
1850*baa489faSSeongJae Park 
1851*baa489faSSeongJae Park 	/* Map memory exclusively for device access. */
1852*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1853*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1854*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1855*baa489faSSeongJae Park 
1856*baa489faSSeongJae Park 	fork();
1857*baa489faSSeongJae Park 
1858*baa489faSSeongJae Park 	/* Fault pages back to system memory and check them. */
1859*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1860*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i]++, i);
1861*baa489faSSeongJae Park 
1862*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1863*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i+1);
1864*baa489faSSeongJae Park 
1865*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1866*baa489faSSeongJae Park }
1867*baa489faSSeongJae Park 
1868*baa489faSSeongJae Park static int gup_test_exec(int gup_fd, unsigned long addr, int cmd,
1869*baa489faSSeongJae Park 			 int npages, int size, int flags)
1870*baa489faSSeongJae Park {
1871*baa489faSSeongJae Park 	struct gup_test gup = {
1872*baa489faSSeongJae Park 		.nr_pages_per_call	= npages,
1873*baa489faSSeongJae Park 		.addr			= addr,
1874*baa489faSSeongJae Park 		.gup_flags		= FOLL_WRITE | flags,
1875*baa489faSSeongJae Park 		.size			= size,
1876*baa489faSSeongJae Park 	};
1877*baa489faSSeongJae Park 
1878*baa489faSSeongJae Park 	if (ioctl(gup_fd, cmd, &gup)) {
1879*baa489faSSeongJae Park 		perror("ioctl on error\n");
1880*baa489faSSeongJae Park 		return errno;
1881*baa489faSSeongJae Park 	}
1882*baa489faSSeongJae Park 
1883*baa489faSSeongJae Park 	return 0;
1884*baa489faSSeongJae Park }
1885*baa489faSSeongJae Park 
1886*baa489faSSeongJae Park /*
1887*baa489faSSeongJae Park  * Test get user device pages through gup_test. Setting PIN_LONGTERM flag.
1888*baa489faSSeongJae Park  * This should trigger a migration back to system memory for both, private
1889*baa489faSSeongJae Park  * and coherent type pages.
1890*baa489faSSeongJae Park  * This test makes use of gup_test module. Make sure GUP_TEST_CONFIG is added
1891*baa489faSSeongJae Park  * to your configuration before you run it.
1892*baa489faSSeongJae Park  */
1893*baa489faSSeongJae Park TEST_F(hmm, hmm_gup_test)
1894*baa489faSSeongJae Park {
1895*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1896*baa489faSSeongJae Park 	int gup_fd;
1897*baa489faSSeongJae Park 	unsigned long npages;
1898*baa489faSSeongJae Park 	unsigned long size;
1899*baa489faSSeongJae Park 	unsigned long i;
1900*baa489faSSeongJae Park 	int *ptr;
1901*baa489faSSeongJae Park 	int ret;
1902*baa489faSSeongJae Park 	unsigned char *m;
1903*baa489faSSeongJae Park 
1904*baa489faSSeongJae Park 	gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
1905*baa489faSSeongJae Park 	if (gup_fd == -1)
1906*baa489faSSeongJae Park 		SKIP(return, "Skipping test, could not find gup_test driver");
1907*baa489faSSeongJae Park 
1908*baa489faSSeongJae Park 	npages = 4;
1909*baa489faSSeongJae Park 	size = npages << self->page_shift;
1910*baa489faSSeongJae Park 
1911*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1912*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1913*baa489faSSeongJae Park 
1914*baa489faSSeongJae Park 	buffer->fd = -1;
1915*baa489faSSeongJae Park 	buffer->size = size;
1916*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
1917*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
1918*baa489faSSeongJae Park 
1919*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
1920*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
1921*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
1922*baa489faSSeongJae Park 			   buffer->fd, 0);
1923*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
1924*baa489faSSeongJae Park 
1925*baa489faSSeongJae Park 	/* Initialize buffer in system memory. */
1926*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1927*baa489faSSeongJae Park 		ptr[i] = i;
1928*baa489faSSeongJae Park 
1929*baa489faSSeongJae Park 	/* Migrate memory to device. */
1930*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1931*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1932*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1933*baa489faSSeongJae Park 	/* Check what the device read. */
1934*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1935*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1936*baa489faSSeongJae Park 
1937*baa489faSSeongJae Park 	ASSERT_EQ(gup_test_exec(gup_fd,
1938*baa489faSSeongJae Park 				(unsigned long)buffer->ptr,
1939*baa489faSSeongJae Park 				GUP_BASIC_TEST, 1, self->page_size, 0), 0);
1940*baa489faSSeongJae Park 	ASSERT_EQ(gup_test_exec(gup_fd,
1941*baa489faSSeongJae Park 				(unsigned long)buffer->ptr + 1 * self->page_size,
1942*baa489faSSeongJae Park 				GUP_FAST_BENCHMARK, 1, self->page_size, 0), 0);
1943*baa489faSSeongJae Park 	ASSERT_EQ(gup_test_exec(gup_fd,
1944*baa489faSSeongJae Park 				(unsigned long)buffer->ptr + 2 * self->page_size,
1945*baa489faSSeongJae Park 				PIN_FAST_BENCHMARK, 1, self->page_size, FOLL_LONGTERM), 0);
1946*baa489faSSeongJae Park 	ASSERT_EQ(gup_test_exec(gup_fd,
1947*baa489faSSeongJae Park 				(unsigned long)buffer->ptr + 3 * self->page_size,
1948*baa489faSSeongJae Park 				PIN_LONGTERM_BENCHMARK, 1, self->page_size, 0), 0);
1949*baa489faSSeongJae Park 
1950*baa489faSSeongJae Park 	/* Take snapshot to CPU pagetables */
1951*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1952*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
1953*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
1954*baa489faSSeongJae Park 	m = buffer->mirror;
1955*baa489faSSeongJae Park 	if (hmm_is_coherent_type(variant->device_number)) {
1956*baa489faSSeongJae Park 		ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]);
1957*baa489faSSeongJae Park 		ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]);
1958*baa489faSSeongJae Park 	} else {
1959*baa489faSSeongJae Park 		ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]);
1960*baa489faSSeongJae Park 		ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]);
1961*baa489faSSeongJae Park 	}
1962*baa489faSSeongJae Park 	ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]);
1963*baa489faSSeongJae Park 	ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[3]);
1964*baa489faSSeongJae Park 	/*
1965*baa489faSSeongJae Park 	 * Check again the content on the pages. Make sure there's no
1966*baa489faSSeongJae Park 	 * corrupted data.
1967*baa489faSSeongJae Park 	 */
1968*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1969*baa489faSSeongJae Park 		ASSERT_EQ(ptr[i], i);
1970*baa489faSSeongJae Park 
1971*baa489faSSeongJae Park 	close(gup_fd);
1972*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
1973*baa489faSSeongJae Park }
1974*baa489faSSeongJae Park 
1975*baa489faSSeongJae Park /*
1976*baa489faSSeongJae Park  * Test copy-on-write in device pages.
1977*baa489faSSeongJae Park  * In case of writing to COW private page(s), a page fault will migrate pages
1978*baa489faSSeongJae Park  * back to system memory first. Then, these pages will be duplicated. In case
1979*baa489faSSeongJae Park  * of COW device coherent type, pages are duplicated directly from device
1980*baa489faSSeongJae Park  * memory.
1981*baa489faSSeongJae Park  */
1982*baa489faSSeongJae Park TEST_F(hmm, hmm_cow_in_device)
1983*baa489faSSeongJae Park {
1984*baa489faSSeongJae Park 	struct hmm_buffer *buffer;
1985*baa489faSSeongJae Park 	unsigned long npages;
1986*baa489faSSeongJae Park 	unsigned long size;
1987*baa489faSSeongJae Park 	unsigned long i;
1988*baa489faSSeongJae Park 	int *ptr;
1989*baa489faSSeongJae Park 	int ret;
1990*baa489faSSeongJae Park 	unsigned char *m;
1991*baa489faSSeongJae Park 	pid_t pid;
1992*baa489faSSeongJae Park 	int status;
1993*baa489faSSeongJae Park 
1994*baa489faSSeongJae Park 	npages = 4;
1995*baa489faSSeongJae Park 	size = npages << self->page_shift;
1996*baa489faSSeongJae Park 
1997*baa489faSSeongJae Park 	buffer = malloc(sizeof(*buffer));
1998*baa489faSSeongJae Park 	ASSERT_NE(buffer, NULL);
1999*baa489faSSeongJae Park 
2000*baa489faSSeongJae Park 	buffer->fd = -1;
2001*baa489faSSeongJae Park 	buffer->size = size;
2002*baa489faSSeongJae Park 	buffer->mirror = malloc(size);
2003*baa489faSSeongJae Park 	ASSERT_NE(buffer->mirror, NULL);
2004*baa489faSSeongJae Park 
2005*baa489faSSeongJae Park 	buffer->ptr = mmap(NULL, size,
2006*baa489faSSeongJae Park 			   PROT_READ | PROT_WRITE,
2007*baa489faSSeongJae Park 			   MAP_PRIVATE | MAP_ANONYMOUS,
2008*baa489faSSeongJae Park 			   buffer->fd, 0);
2009*baa489faSSeongJae Park 	ASSERT_NE(buffer->ptr, MAP_FAILED);
2010*baa489faSSeongJae Park 
2011*baa489faSSeongJae Park 	/* Initialize buffer in system memory. */
2012*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
2013*baa489faSSeongJae Park 		ptr[i] = i;
2014*baa489faSSeongJae Park 
2015*baa489faSSeongJae Park 	/* Migrate memory to device. */
2016*baa489faSSeongJae Park 
2017*baa489faSSeongJae Park 	ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
2018*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
2019*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
2020*baa489faSSeongJae Park 
2021*baa489faSSeongJae Park 	pid = fork();
2022*baa489faSSeongJae Park 	if (pid == -1)
2023*baa489faSSeongJae Park 		ASSERT_EQ(pid, 0);
2024*baa489faSSeongJae Park 	if (!pid) {
2025*baa489faSSeongJae Park 		/* Child process waitd for SIGTERM from the parent. */
2026*baa489faSSeongJae Park 		while (1) {
2027*baa489faSSeongJae Park 		}
2028*baa489faSSeongJae Park 		perror("Should not reach this\n");
2029*baa489faSSeongJae Park 		exit(0);
2030*baa489faSSeongJae Park 	}
2031*baa489faSSeongJae Park 	/* Parent process writes to COW pages(s) and gets a
2032*baa489faSSeongJae Park 	 * new copy in system. In case of device private pages,
2033*baa489faSSeongJae Park 	 * this write causes a migration to system mem first.
2034*baa489faSSeongJae Park 	 */
2035*baa489faSSeongJae Park 	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
2036*baa489faSSeongJae Park 		ptr[i] = i;
2037*baa489faSSeongJae Park 
2038*baa489faSSeongJae Park 	/* Terminate child and wait */
2039*baa489faSSeongJae Park 	EXPECT_EQ(0, kill(pid, SIGTERM));
2040*baa489faSSeongJae Park 	EXPECT_EQ(pid, waitpid(pid, &status, 0));
2041*baa489faSSeongJae Park 	EXPECT_NE(0, WIFSIGNALED(status));
2042*baa489faSSeongJae Park 	EXPECT_EQ(SIGTERM, WTERMSIG(status));
2043*baa489faSSeongJae Park 
2044*baa489faSSeongJae Park 	/* Take snapshot to CPU pagetables */
2045*baa489faSSeongJae Park 	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
2046*baa489faSSeongJae Park 	ASSERT_EQ(ret, 0);
2047*baa489faSSeongJae Park 	ASSERT_EQ(buffer->cpages, npages);
2048*baa489faSSeongJae Park 	m = buffer->mirror;
2049*baa489faSSeongJae Park 	for (i = 0; i < npages; i++)
2050*baa489faSSeongJae Park 		ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[i]);
2051*baa489faSSeongJae Park 
2052*baa489faSSeongJae Park 	hmm_buffer_free(buffer);
2053*baa489faSSeongJae Park }
2054*baa489faSSeongJae Park TEST_HARNESS_MAIN
2055