1686a8bb7SPeter Xu // SPDX-License-Identifier: GPL-2.0-only
2686a8bb7SPeter Xu /*
3686a8bb7SPeter Xu * Stress userfaultfd syscall.
4686a8bb7SPeter Xu *
5686a8bb7SPeter Xu * Copyright (C) 2015 Red Hat, Inc.
6686a8bb7SPeter Xu *
7686a8bb7SPeter Xu * This test allocates two virtual areas and bounces the physical
8686a8bb7SPeter Xu * memory across the two virtual areas (from area_src to area_dst)
9686a8bb7SPeter Xu * using userfaultfd.
10686a8bb7SPeter Xu *
11686a8bb7SPeter Xu * There are three threads running per CPU:
12686a8bb7SPeter Xu *
13686a8bb7SPeter Xu * 1) one per-CPU thread takes a per-page pthread_mutex in a random
14686a8bb7SPeter Xu * page of the area_dst (while the physical page may still be in
15686a8bb7SPeter Xu * area_src), and increments a per-page counter in the same page,
16686a8bb7SPeter Xu * and checks its value against a verification region.
17686a8bb7SPeter Xu *
18686a8bb7SPeter Xu * 2) another per-CPU thread handles the userfaults generated by
19686a8bb7SPeter Xu * thread 1 above. userfaultfd blocking reads or poll() modes are
20686a8bb7SPeter Xu * exercised interleaved.
21686a8bb7SPeter Xu *
22686a8bb7SPeter Xu * 3) one last per-CPU thread transfers the memory in the background
23686a8bb7SPeter Xu * at maximum bandwidth (if not already transferred by thread
24686a8bb7SPeter Xu * 2). Each cpu thread takes cares of transferring a portion of the
25686a8bb7SPeter Xu * area.
26686a8bb7SPeter Xu *
27686a8bb7SPeter Xu * When all threads of type 3 completed the transfer, one bounce is
28686a8bb7SPeter Xu * complete. area_src and area_dst are then swapped. All threads are
29686a8bb7SPeter Xu * respawned and so the bounce is immediately restarted in the
30686a8bb7SPeter Xu * opposite direction.
31686a8bb7SPeter Xu *
32686a8bb7SPeter Xu * per-CPU threads 1 by triggering userfaults inside
33686a8bb7SPeter Xu * pthread_mutex_lock will also verify the atomicity of the memory
34686a8bb7SPeter Xu * transfer (UFFDIO_COPY).
35686a8bb7SPeter Xu */
36686a8bb7SPeter Xu
37686a8bb7SPeter Xu #include "uffd-common.h"
38686a8bb7SPeter Xu
39686a8bb7SPeter Xu #ifdef __NR_userfaultfd
40686a8bb7SPeter Xu
41686a8bb7SPeter Xu #define BOUNCE_RANDOM (1<<0)
42686a8bb7SPeter Xu #define BOUNCE_RACINGFAULTS (1<<1)
43686a8bb7SPeter Xu #define BOUNCE_VERIFY (1<<2)
44686a8bb7SPeter Xu #define BOUNCE_POLL (1<<3)
45686a8bb7SPeter Xu static int bounces;
46686a8bb7SPeter Xu
47686a8bb7SPeter Xu /* exercise the test_uffdio_*_eexist every ALARM_INTERVAL_SECS */
48686a8bb7SPeter Xu #define ALARM_INTERVAL_SECS 10
49686a8bb7SPeter Xu static char *zeropage;
50686a8bb7SPeter Xu pthread_attr_t attr;
51686a8bb7SPeter Xu
52686a8bb7SPeter Xu #define swap(a, b) \
53686a8bb7SPeter Xu do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
54686a8bb7SPeter Xu
55686a8bb7SPeter Xu const char *examples =
56686a8bb7SPeter Xu "# Run anonymous memory test on 100MiB region with 99999 bounces:\n"
57*708879a1SRong Tao "./uffd-stress anon 100 99999\n\n"
58686a8bb7SPeter Xu "# Run share memory test on 1GiB region with 99 bounces:\n"
59*708879a1SRong Tao "./uffd-stress shmem 1000 99\n\n"
60686a8bb7SPeter Xu "# Run hugetlb memory test on 256MiB region with 50 bounces:\n"
61*708879a1SRong Tao "./uffd-stress hugetlb 256 50\n\n"
625aec236fSPeter Xu "# Run the same hugetlb test but using private file:\n"
63*708879a1SRong Tao "./uffd-stress hugetlb-private 256 50\n\n"
64686a8bb7SPeter Xu "# 10MiB-~6GiB 999 bounces anonymous test, "
65686a8bb7SPeter Xu "continue forever unless an error triggers\n"
66*708879a1SRong Tao "while ./uffd-stress anon $[RANDOM % 6000 + 10] 999; do true; done\n\n";
67686a8bb7SPeter Xu
usage(void)68686a8bb7SPeter Xu static void usage(void)
69686a8bb7SPeter Xu {
70*708879a1SRong Tao fprintf(stderr, "\nUsage: ./uffd-stress <test type> <MiB> <bounces>\n\n");
71686a8bb7SPeter Xu fprintf(stderr, "Supported <test type>: anon, hugetlb, "
725aec236fSPeter Xu "hugetlb-private, shmem, shmem-private\n\n");
73686a8bb7SPeter Xu fprintf(stderr, "Examples:\n\n");
74686a8bb7SPeter Xu fprintf(stderr, "%s", examples);
75686a8bb7SPeter Xu exit(1);
76686a8bb7SPeter Xu }
77686a8bb7SPeter Xu
uffd_stats_reset(struct uffd_args * args,unsigned long n_cpus)7850834084SPeter Xu static void uffd_stats_reset(struct uffd_args *args, unsigned long n_cpus)
79686a8bb7SPeter Xu {
80686a8bb7SPeter Xu int i;
81686a8bb7SPeter Xu
82686a8bb7SPeter Xu for (i = 0; i < n_cpus; i++) {
8350834084SPeter Xu args[i].cpu = i;
840210c43eSPeter Xu args[i].apply_wp = test_uffdio_wp;
8550834084SPeter Xu args[i].missing_faults = 0;
8650834084SPeter Xu args[i].wp_faults = 0;
8750834084SPeter Xu args[i].minor_faults = 0;
88686a8bb7SPeter Xu }
89686a8bb7SPeter Xu }
90686a8bb7SPeter Xu
locking_thread(void * arg)91686a8bb7SPeter Xu static void *locking_thread(void *arg)
92686a8bb7SPeter Xu {
93686a8bb7SPeter Xu unsigned long cpu = (unsigned long) arg;
94686a8bb7SPeter Xu unsigned long page_nr;
95686a8bb7SPeter Xu unsigned long long count;
96686a8bb7SPeter Xu
97686a8bb7SPeter Xu if (!(bounces & BOUNCE_RANDOM)) {
98686a8bb7SPeter Xu page_nr = -bounces;
99686a8bb7SPeter Xu if (!(bounces & BOUNCE_RACINGFAULTS))
100686a8bb7SPeter Xu page_nr += cpu * nr_pages_per_cpu;
101686a8bb7SPeter Xu }
102686a8bb7SPeter Xu
103686a8bb7SPeter Xu while (!finished) {
104686a8bb7SPeter Xu if (bounces & BOUNCE_RANDOM) {
105686a8bb7SPeter Xu if (getrandom(&page_nr, sizeof(page_nr), 0) != sizeof(page_nr))
106686a8bb7SPeter Xu err("getrandom failed");
107686a8bb7SPeter Xu } else
108686a8bb7SPeter Xu page_nr += 1;
109686a8bb7SPeter Xu page_nr %= nr_pages;
110686a8bb7SPeter Xu pthread_mutex_lock(area_mutex(area_dst, page_nr));
111686a8bb7SPeter Xu count = *area_count(area_dst, page_nr);
112686a8bb7SPeter Xu if (count != count_verify[page_nr])
113686a8bb7SPeter Xu err("page_nr %lu memory corruption %llu %llu",
114686a8bb7SPeter Xu page_nr, count, count_verify[page_nr]);
115686a8bb7SPeter Xu count++;
116686a8bb7SPeter Xu *area_count(area_dst, page_nr) = count_verify[page_nr] = count;
117686a8bb7SPeter Xu pthread_mutex_unlock(area_mutex(area_dst, page_nr));
118686a8bb7SPeter Xu }
119686a8bb7SPeter Xu
120686a8bb7SPeter Xu return NULL;
121686a8bb7SPeter Xu }
122686a8bb7SPeter Xu
copy_page_retry(int ufd,unsigned long offset)123686a8bb7SPeter Xu static int copy_page_retry(int ufd, unsigned long offset)
124686a8bb7SPeter Xu {
1250210c43eSPeter Xu return __copy_page(ufd, offset, true, test_uffdio_wp);
126686a8bb7SPeter Xu }
127686a8bb7SPeter Xu
128686a8bb7SPeter Xu pthread_mutex_t uffd_read_mutex = PTHREAD_MUTEX_INITIALIZER;
129686a8bb7SPeter Xu
uffd_read_thread(void * arg)130686a8bb7SPeter Xu static void *uffd_read_thread(void *arg)
131686a8bb7SPeter Xu {
13250834084SPeter Xu struct uffd_args *args = (struct uffd_args *)arg;
133686a8bb7SPeter Xu struct uffd_msg msg;
134686a8bb7SPeter Xu
135686a8bb7SPeter Xu pthread_mutex_unlock(&uffd_read_mutex);
136686a8bb7SPeter Xu /* from here cancellation is ok */
137686a8bb7SPeter Xu
138686a8bb7SPeter Xu for (;;) {
139686a8bb7SPeter Xu if (uffd_read_msg(uffd, &msg))
140686a8bb7SPeter Xu continue;
14150834084SPeter Xu uffd_handle_page_fault(&msg, args);
142686a8bb7SPeter Xu }
143686a8bb7SPeter Xu
144686a8bb7SPeter Xu return NULL;
145686a8bb7SPeter Xu }
146686a8bb7SPeter Xu
background_thread(void * arg)147686a8bb7SPeter Xu static void *background_thread(void *arg)
148686a8bb7SPeter Xu {
149686a8bb7SPeter Xu unsigned long cpu = (unsigned long) arg;
150686a8bb7SPeter Xu unsigned long page_nr, start_nr, mid_nr, end_nr;
151686a8bb7SPeter Xu
152686a8bb7SPeter Xu start_nr = cpu * nr_pages_per_cpu;
153686a8bb7SPeter Xu end_nr = (cpu+1) * nr_pages_per_cpu;
154686a8bb7SPeter Xu mid_nr = (start_nr + end_nr) / 2;
155686a8bb7SPeter Xu
156686a8bb7SPeter Xu /* Copy the first half of the pages */
157686a8bb7SPeter Xu for (page_nr = start_nr; page_nr < mid_nr; page_nr++)
158686a8bb7SPeter Xu copy_page_retry(uffd, page_nr * page_size);
159686a8bb7SPeter Xu
160686a8bb7SPeter Xu /*
161686a8bb7SPeter Xu * If we need to test uffd-wp, set it up now. Then we'll have
162686a8bb7SPeter Xu * at least the first half of the pages mapped already which
163686a8bb7SPeter Xu * can be write-protected for testing
164686a8bb7SPeter Xu */
165686a8bb7SPeter Xu if (test_uffdio_wp)
166686a8bb7SPeter Xu wp_range(uffd, (unsigned long)area_dst + start_nr * page_size,
167686a8bb7SPeter Xu nr_pages_per_cpu * page_size, true);
168686a8bb7SPeter Xu
169686a8bb7SPeter Xu /*
170686a8bb7SPeter Xu * Continue the 2nd half of the page copying, handling write
171686a8bb7SPeter Xu * protection faults if any
172686a8bb7SPeter Xu */
173686a8bb7SPeter Xu for (page_nr = mid_nr; page_nr < end_nr; page_nr++)
174686a8bb7SPeter Xu copy_page_retry(uffd, page_nr * page_size);
175686a8bb7SPeter Xu
176686a8bb7SPeter Xu return NULL;
177686a8bb7SPeter Xu }
178686a8bb7SPeter Xu
stress(struct uffd_args * args)17950834084SPeter Xu static int stress(struct uffd_args *args)
180686a8bb7SPeter Xu {
181686a8bb7SPeter Xu unsigned long cpu;
182686a8bb7SPeter Xu pthread_t locking_threads[nr_cpus];
183686a8bb7SPeter Xu pthread_t uffd_threads[nr_cpus];
184686a8bb7SPeter Xu pthread_t background_threads[nr_cpus];
185686a8bb7SPeter Xu
186686a8bb7SPeter Xu finished = 0;
187686a8bb7SPeter Xu for (cpu = 0; cpu < nr_cpus; cpu++) {
188686a8bb7SPeter Xu if (pthread_create(&locking_threads[cpu], &attr,
189686a8bb7SPeter Xu locking_thread, (void *)cpu))
190686a8bb7SPeter Xu return 1;
191686a8bb7SPeter Xu if (bounces & BOUNCE_POLL) {
1927cf0f9e8SAxel Rasmussen if (pthread_create(&uffd_threads[cpu], &attr, uffd_poll_thread, &args[cpu]))
1937cf0f9e8SAxel Rasmussen err("uffd_poll_thread create");
194686a8bb7SPeter Xu } else {
195686a8bb7SPeter Xu if (pthread_create(&uffd_threads[cpu], &attr,
196686a8bb7SPeter Xu uffd_read_thread,
19750834084SPeter Xu (void *)&args[cpu]))
198686a8bb7SPeter Xu return 1;
199686a8bb7SPeter Xu pthread_mutex_lock(&uffd_read_mutex);
200686a8bb7SPeter Xu }
201686a8bb7SPeter Xu if (pthread_create(&background_threads[cpu], &attr,
202686a8bb7SPeter Xu background_thread, (void *)cpu))
203686a8bb7SPeter Xu return 1;
204686a8bb7SPeter Xu }
205686a8bb7SPeter Xu for (cpu = 0; cpu < nr_cpus; cpu++)
206686a8bb7SPeter Xu if (pthread_join(background_threads[cpu], NULL))
207686a8bb7SPeter Xu return 1;
208686a8bb7SPeter Xu
209686a8bb7SPeter Xu /*
210686a8bb7SPeter Xu * Be strict and immediately zap area_src, the whole area has
211686a8bb7SPeter Xu * been transferred already by the background treads. The
212686a8bb7SPeter Xu * area_src could then be faulted in a racy way by still
213686a8bb7SPeter Xu * running uffdio_threads reading zeropages after we zapped
214686a8bb7SPeter Xu * area_src (but they're guaranteed to get -EEXIST from
215686a8bb7SPeter Xu * UFFDIO_COPY without writing zero pages into area_dst
216686a8bb7SPeter Xu * because the background threads already completed).
217686a8bb7SPeter Xu */
218686a8bb7SPeter Xu uffd_test_ops->release_pages(area_src);
219686a8bb7SPeter Xu
220686a8bb7SPeter Xu finished = 1;
221686a8bb7SPeter Xu for (cpu = 0; cpu < nr_cpus; cpu++)
222686a8bb7SPeter Xu if (pthread_join(locking_threads[cpu], NULL))
223686a8bb7SPeter Xu return 1;
224686a8bb7SPeter Xu
225686a8bb7SPeter Xu for (cpu = 0; cpu < nr_cpus; cpu++) {
226686a8bb7SPeter Xu char c;
227686a8bb7SPeter Xu if (bounces & BOUNCE_POLL) {
228686a8bb7SPeter Xu if (write(pipefd[cpu*2+1], &c, 1) != 1)
229686a8bb7SPeter Xu err("pipefd write error");
230686a8bb7SPeter Xu if (pthread_join(uffd_threads[cpu],
23150834084SPeter Xu (void *)&args[cpu]))
232686a8bb7SPeter Xu return 1;
233686a8bb7SPeter Xu } else {
234686a8bb7SPeter Xu if (pthread_cancel(uffd_threads[cpu]))
235686a8bb7SPeter Xu return 1;
236686a8bb7SPeter Xu if (pthread_join(uffd_threads[cpu], NULL))
237686a8bb7SPeter Xu return 1;
238686a8bb7SPeter Xu }
239686a8bb7SPeter Xu }
240686a8bb7SPeter Xu
241686a8bb7SPeter Xu return 0;
242686a8bb7SPeter Xu }
243686a8bb7SPeter Xu
userfaultfd_stress(void)244686a8bb7SPeter Xu static int userfaultfd_stress(void)
245686a8bb7SPeter Xu {
246686a8bb7SPeter Xu void *area;
247686a8bb7SPeter Xu unsigned long nr;
24850834084SPeter Xu struct uffd_args args[nr_cpus];
249c4277cb6SPeter Xu uint64_t mem_size = nr_pages * page_size;
250686a8bb7SPeter Xu
2517cf0f9e8SAxel Rasmussen memset(args, 0, sizeof(struct uffd_args) * nr_cpus);
2527cf0f9e8SAxel Rasmussen
253f9da2426SPeter Xu if (uffd_test_ctx_init(UFFD_FEATURE_WP_UNPOPULATED, NULL))
254f9da2426SPeter Xu err("context init failed");
255686a8bb7SPeter Xu
256686a8bb7SPeter Xu if (posix_memalign(&area, page_size, page_size))
257686a8bb7SPeter Xu err("out of memory");
258686a8bb7SPeter Xu zeropage = area;
259686a8bb7SPeter Xu bzero(zeropage, page_size);
260686a8bb7SPeter Xu
261686a8bb7SPeter Xu pthread_mutex_lock(&uffd_read_mutex);
262686a8bb7SPeter Xu
263686a8bb7SPeter Xu pthread_attr_init(&attr);
264686a8bb7SPeter Xu pthread_attr_setstacksize(&attr, 16*1024*1024);
265686a8bb7SPeter Xu
266686a8bb7SPeter Xu while (bounces--) {
267686a8bb7SPeter Xu printf("bounces: %d, mode:", bounces);
268686a8bb7SPeter Xu if (bounces & BOUNCE_RANDOM)
269686a8bb7SPeter Xu printf(" rnd");
270686a8bb7SPeter Xu if (bounces & BOUNCE_RACINGFAULTS)
271686a8bb7SPeter Xu printf(" racing");
272686a8bb7SPeter Xu if (bounces & BOUNCE_VERIFY)
273686a8bb7SPeter Xu printf(" ver");
274686a8bb7SPeter Xu if (bounces & BOUNCE_POLL)
275686a8bb7SPeter Xu printf(" poll");
276686a8bb7SPeter Xu else
277686a8bb7SPeter Xu printf(" read");
278686a8bb7SPeter Xu printf(", ");
279686a8bb7SPeter Xu fflush(stdout);
280686a8bb7SPeter Xu
281686a8bb7SPeter Xu if (bounces & BOUNCE_POLL)
282686a8bb7SPeter Xu fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
283686a8bb7SPeter Xu else
284686a8bb7SPeter Xu fcntl(uffd, F_SETFL, uffd_flags & ~O_NONBLOCK);
285686a8bb7SPeter Xu
286686a8bb7SPeter Xu /* register */
287c4277cb6SPeter Xu if (uffd_register(uffd, area_dst, mem_size,
288c4277cb6SPeter Xu true, test_uffdio_wp, false))
289686a8bb7SPeter Xu err("register failure");
290686a8bb7SPeter Xu
291686a8bb7SPeter Xu if (area_dst_alias) {
292c4277cb6SPeter Xu if (uffd_register(uffd, area_dst_alias, mem_size,
293c4277cb6SPeter Xu true, test_uffdio_wp, false))
294686a8bb7SPeter Xu err("register failure alias");
295686a8bb7SPeter Xu }
296686a8bb7SPeter Xu
297686a8bb7SPeter Xu /*
298686a8bb7SPeter Xu * The madvise done previously isn't enough: some
299686a8bb7SPeter Xu * uffd_thread could have read userfaults (one of
300686a8bb7SPeter Xu * those already resolved by the background thread)
301686a8bb7SPeter Xu * and it may be in the process of calling
302686a8bb7SPeter Xu * UFFDIO_COPY. UFFDIO_COPY will read the zapped
303686a8bb7SPeter Xu * area_src and it would map a zero page in it (of
304686a8bb7SPeter Xu * course such a UFFDIO_COPY is perfectly safe as it'd
305686a8bb7SPeter Xu * return -EEXIST). The problem comes at the next
306686a8bb7SPeter Xu * bounce though: that racing UFFDIO_COPY would
307686a8bb7SPeter Xu * generate zeropages in the area_src, so invalidating
308686a8bb7SPeter Xu * the previous MADV_DONTNEED. Without this additional
309686a8bb7SPeter Xu * MADV_DONTNEED those zeropages leftovers in the
310686a8bb7SPeter Xu * area_src would lead to -EEXIST failure during the
311686a8bb7SPeter Xu * next bounce, effectively leaving a zeropage in the
312686a8bb7SPeter Xu * area_dst.
313686a8bb7SPeter Xu *
314686a8bb7SPeter Xu * Try to comment this out madvise to see the memory
315686a8bb7SPeter Xu * corruption being caught pretty quick.
316686a8bb7SPeter Xu *
317686a8bb7SPeter Xu * khugepaged is also inhibited to collapse THP after
318686a8bb7SPeter Xu * MADV_DONTNEED only after the UFFDIO_REGISTER, so it's
319686a8bb7SPeter Xu * required to MADV_DONTNEED here.
320686a8bb7SPeter Xu */
321686a8bb7SPeter Xu uffd_test_ops->release_pages(area_dst);
322686a8bb7SPeter Xu
32350834084SPeter Xu uffd_stats_reset(args, nr_cpus);
324686a8bb7SPeter Xu
325686a8bb7SPeter Xu /* bounce pass */
32650834084SPeter Xu if (stress(args))
327686a8bb7SPeter Xu return 1;
328686a8bb7SPeter Xu
329686a8bb7SPeter Xu /* Clear all the write protections if there is any */
330686a8bb7SPeter Xu if (test_uffdio_wp)
331686a8bb7SPeter Xu wp_range(uffd, (unsigned long)area_dst,
332686a8bb7SPeter Xu nr_pages * page_size, false);
333686a8bb7SPeter Xu
334686a8bb7SPeter Xu /* unregister */
335c4277cb6SPeter Xu if (uffd_unregister(uffd, area_dst, mem_size))
336686a8bb7SPeter Xu err("unregister failure");
337686a8bb7SPeter Xu if (area_dst_alias) {
338c4277cb6SPeter Xu if (uffd_unregister(uffd, area_dst_alias, mem_size))
339686a8bb7SPeter Xu err("unregister failure alias");
340686a8bb7SPeter Xu }
341686a8bb7SPeter Xu
342686a8bb7SPeter Xu /* verification */
343686a8bb7SPeter Xu if (bounces & BOUNCE_VERIFY)
344686a8bb7SPeter Xu for (nr = 0; nr < nr_pages; nr++)
345686a8bb7SPeter Xu if (*area_count(area_dst, nr) != count_verify[nr])
346686a8bb7SPeter Xu err("error area_count %llu %llu %lu\n",
347686a8bb7SPeter Xu *area_count(area_src, nr),
348686a8bb7SPeter Xu count_verify[nr], nr);
349686a8bb7SPeter Xu
350686a8bb7SPeter Xu /* prepare next bounce */
351686a8bb7SPeter Xu swap(area_src, area_dst);
352686a8bb7SPeter Xu
353686a8bb7SPeter Xu swap(area_src_alias, area_dst_alias);
354686a8bb7SPeter Xu
35550834084SPeter Xu uffd_stats_report(args, nr_cpus);
356686a8bb7SPeter Xu }
357686a8bb7SPeter Xu
358c3315502SPeter Xu return 0;
359686a8bb7SPeter Xu }
360686a8bb7SPeter Xu
set_test_type(const char * type)361686a8bb7SPeter Xu static void set_test_type(const char *type)
362686a8bb7SPeter Xu {
363686a8bb7SPeter Xu if (!strcmp(type, "anon")) {
364686a8bb7SPeter Xu test_type = TEST_ANON;
365686a8bb7SPeter Xu uffd_test_ops = &anon_uffd_test_ops;
366686a8bb7SPeter Xu } else if (!strcmp(type, "hugetlb")) {
367686a8bb7SPeter Xu test_type = TEST_HUGETLB;
368686a8bb7SPeter Xu uffd_test_ops = &hugetlb_uffd_test_ops;
369686a8bb7SPeter Xu map_shared = true;
3705aec236fSPeter Xu } else if (!strcmp(type, "hugetlb-private")) {
371686a8bb7SPeter Xu test_type = TEST_HUGETLB;
372686a8bb7SPeter Xu uffd_test_ops = &hugetlb_uffd_test_ops;
373686a8bb7SPeter Xu } else if (!strcmp(type, "shmem")) {
374686a8bb7SPeter Xu map_shared = true;
375686a8bb7SPeter Xu test_type = TEST_SHMEM;
376686a8bb7SPeter Xu uffd_test_ops = &shmem_uffd_test_ops;
3775aec236fSPeter Xu } else if (!strcmp(type, "shmem-private")) {
3785aec236fSPeter Xu test_type = TEST_SHMEM;
3795aec236fSPeter Xu uffd_test_ops = &shmem_uffd_test_ops;
380686a8bb7SPeter Xu }
381686a8bb7SPeter Xu }
382686a8bb7SPeter Xu
parse_test_type_arg(const char * raw_type)383686a8bb7SPeter Xu static void parse_test_type_arg(const char *raw_type)
384686a8bb7SPeter Xu {
385686a8bb7SPeter Xu uint64_t features = UFFD_API_FEATURES;
386686a8bb7SPeter Xu
387111fd29bSPeter Xu set_test_type(raw_type);
388686a8bb7SPeter Xu
389686a8bb7SPeter Xu if (!test_type)
390686a8bb7SPeter Xu err("failed to parse test type argument: '%s'", raw_type);
391686a8bb7SPeter Xu
392686a8bb7SPeter Xu if (test_type == TEST_HUGETLB)
393265818efSPeter Xu page_size = default_huge_page_size();
394686a8bb7SPeter Xu else
395686a8bb7SPeter Xu page_size = sysconf(_SC_PAGE_SIZE);
396686a8bb7SPeter Xu
397686a8bb7SPeter Xu if (!page_size)
398686a8bb7SPeter Xu err("Unable to determine page size");
399686a8bb7SPeter Xu if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2
400686a8bb7SPeter Xu > page_size)
401686a8bb7SPeter Xu err("Impossible to run this test");
402686a8bb7SPeter Xu
403686a8bb7SPeter Xu /*
404686a8bb7SPeter Xu * Whether we can test certain features depends not just on test type,
405686a8bb7SPeter Xu * but also on whether or not this particular kernel supports the
406686a8bb7SPeter Xu * feature.
407686a8bb7SPeter Xu */
408686a8bb7SPeter Xu
409f9da2426SPeter Xu if (userfaultfd_open(&features))
410f9da2426SPeter Xu err("Userfaultfd open failed");
411686a8bb7SPeter Xu
412686a8bb7SPeter Xu test_uffdio_wp = test_uffdio_wp &&
413686a8bb7SPeter Xu (features & UFFD_FEATURE_PAGEFAULT_FLAG_WP);
414686a8bb7SPeter Xu
415686a8bb7SPeter Xu close(uffd);
416686a8bb7SPeter Xu uffd = -1;
417686a8bb7SPeter Xu }
418686a8bb7SPeter Xu
sigalrm(int sig)419686a8bb7SPeter Xu static void sigalrm(int sig)
420686a8bb7SPeter Xu {
421686a8bb7SPeter Xu if (sig != SIGALRM)
422686a8bb7SPeter Xu abort();
423686a8bb7SPeter Xu test_uffdio_copy_eexist = true;
424686a8bb7SPeter Xu alarm(ALARM_INTERVAL_SECS);
425686a8bb7SPeter Xu }
426686a8bb7SPeter Xu
main(int argc,char ** argv)427686a8bb7SPeter Xu int main(int argc, char **argv)
428686a8bb7SPeter Xu {
429686a8bb7SPeter Xu size_t bytes;
430686a8bb7SPeter Xu
431686a8bb7SPeter Xu if (argc < 4)
432686a8bb7SPeter Xu usage();
433686a8bb7SPeter Xu
434686a8bb7SPeter Xu if (signal(SIGALRM, sigalrm) == SIG_ERR)
435686a8bb7SPeter Xu err("failed to arm SIGALRM");
436686a8bb7SPeter Xu alarm(ALARM_INTERVAL_SECS);
437686a8bb7SPeter Xu
438686a8bb7SPeter Xu parse_test_type_arg(argv[1]);
439686a8bb7SPeter Xu bytes = atol(argv[2]) * 1024 * 1024;
440686a8bb7SPeter Xu
441686a8bb7SPeter Xu nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
442686a8bb7SPeter Xu
443686a8bb7SPeter Xu nr_pages_per_cpu = bytes / page_size / nr_cpus;
444686a8bb7SPeter Xu if (!nr_pages_per_cpu) {
445686a8bb7SPeter Xu _err("invalid MiB");
446686a8bb7SPeter Xu usage();
447686a8bb7SPeter Xu }
448686a8bb7SPeter Xu
449686a8bb7SPeter Xu bounces = atoi(argv[3]);
450686a8bb7SPeter Xu if (bounces <= 0) {
451686a8bb7SPeter Xu _err("invalid bounces");
452686a8bb7SPeter Xu usage();
453686a8bb7SPeter Xu }
454686a8bb7SPeter Xu nr_pages = nr_pages_per_cpu * nr_cpus;
455686a8bb7SPeter Xu
456686a8bb7SPeter Xu printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n",
457686a8bb7SPeter Xu nr_pages, nr_pages_per_cpu);
458686a8bb7SPeter Xu return userfaultfd_stress();
459686a8bb7SPeter Xu }
460686a8bb7SPeter Xu
461686a8bb7SPeter Xu #else /* __NR_userfaultfd */
462686a8bb7SPeter Xu
463686a8bb7SPeter Xu #warning "missing __NR_userfaultfd definition"
464686a8bb7SPeter Xu
main(void)465686a8bb7SPeter Xu int main(void)
466686a8bb7SPeter Xu {
467686a8bb7SPeter Xu printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
468686a8bb7SPeter Xu return KSFT_SKIP;
469686a8bb7SPeter Xu }
470686a8bb7SPeter Xu
471686a8bb7SPeter Xu #endif /* __NR_userfaultfd */
472