1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Authors: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
5 * Authors: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
6 */
7
8 #include <stdio.h>
9 #include <sys/mman.h>
10 #include <string.h>
11
12 #include "../kselftest.h"
13
14 #ifdef __powerpc64__
15 #define PAGE_SIZE (64 << 10)
16 /*
17 * This will work with 16M and 2M hugepage size
18 */
19 #define HUGETLB_SIZE (16 << 20)
20 #elif __aarch64__
21 /*
22 * The default hugepage size for 64k base pagesize
23 * is 512MB.
24 */
25 #define PAGE_SIZE (64 << 10)
26 #define HUGETLB_SIZE (512 << 20)
27 #else
28 #define PAGE_SIZE (4 << 10)
29 #define HUGETLB_SIZE (2 << 20)
30 #endif
31
32 /*
33 * The hint addr value is used to allocate addresses
34 * beyond the high address switch boundary.
35 */
36
37 #define ADDR_MARK_128TB (1UL << 47)
38 #define ADDR_MARK_256TB (1UL << 48)
39
40 #define HIGH_ADDR_128TB ((void *) (1UL << 48))
41 #define HIGH_ADDR_256TB ((void *) (1UL << 49))
42
43 #define LOW_ADDR ((void *) (1UL << 30))
44
45 #ifdef __aarch64__
46 #define ADDR_SWITCH_HINT ADDR_MARK_256TB
47 #define HIGH_ADDR HIGH_ADDR_256TB
48 #else
49 #define ADDR_SWITCH_HINT ADDR_MARK_128TB
50 #define HIGH_ADDR HIGH_ADDR_128TB
51 #endif
52
53 struct testcase {
54 void *addr;
55 unsigned long size;
56 unsigned long flags;
57 const char *msg;
58 unsigned int low_addr_required:1;
59 unsigned int keep_mapped:1;
60 };
61
62 static struct testcase testcases[] = {
63 {
64 /*
65 * If stack is moved, we could possibly allocate
66 * this at the requested address.
67 */
68 .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
69 .size = PAGE_SIZE,
70 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
71 .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, PAGE_SIZE)",
72 .low_addr_required = 1,
73 },
74 {
75 /*
76 * Unless MAP_FIXED is specified, allocation based on hint
77 * addr is never at requested address or above it, which is
78 * beyond high address switch boundary in this case. Instead,
79 * a suitable allocation is found in lower address space.
80 */
81 .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
82 .size = 2 * PAGE_SIZE,
83 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
84 .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, (2 * PAGE_SIZE))",
85 .low_addr_required = 1,
86 },
87 {
88 /*
89 * Exact mapping at high address switch boundary, should
90 * be obtained even without MAP_FIXED as area is free.
91 */
92 .addr = ((void *)(ADDR_SWITCH_HINT)),
93 .size = PAGE_SIZE,
94 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
95 .msg = "mmap(ADDR_SWITCH_HINT, PAGE_SIZE)",
96 .keep_mapped = 1,
97 },
98 {
99 .addr = (void *)(ADDR_SWITCH_HINT),
100 .size = 2 * PAGE_SIZE,
101 .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
102 .msg = "mmap(ADDR_SWITCH_HINT, 2 * PAGE_SIZE, MAP_FIXED)",
103 },
104 {
105 .addr = NULL,
106 .size = 2 * PAGE_SIZE,
107 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
108 .msg = "mmap(NULL)",
109 .low_addr_required = 1,
110 },
111 {
112 .addr = LOW_ADDR,
113 .size = 2 * PAGE_SIZE,
114 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
115 .msg = "mmap(LOW_ADDR)",
116 .low_addr_required = 1,
117 },
118 {
119 .addr = HIGH_ADDR,
120 .size = 2 * PAGE_SIZE,
121 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
122 .msg = "mmap(HIGH_ADDR)",
123 .keep_mapped = 1,
124 },
125 {
126 .addr = HIGH_ADDR,
127 .size = 2 * PAGE_SIZE,
128 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
129 .msg = "mmap(HIGH_ADDR) again",
130 .keep_mapped = 1,
131 },
132 {
133 .addr = HIGH_ADDR,
134 .size = 2 * PAGE_SIZE,
135 .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
136 .msg = "mmap(HIGH_ADDR, MAP_FIXED)",
137 },
138 {
139 .addr = (void *) -1,
140 .size = 2 * PAGE_SIZE,
141 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
142 .msg = "mmap(-1)",
143 .keep_mapped = 1,
144 },
145 {
146 .addr = (void *) -1,
147 .size = 2 * PAGE_SIZE,
148 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
149 .msg = "mmap(-1) again",
150 },
151 {
152 .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
153 .size = PAGE_SIZE,
154 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
155 .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, PAGE_SIZE)",
156 .low_addr_required = 1,
157 },
158 {
159 .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE),
160 .size = 2 * PAGE_SIZE,
161 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
162 .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, 2 * PAGE_SIZE)",
163 .low_addr_required = 1,
164 .keep_mapped = 1,
165 },
166 {
167 .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE / 2),
168 .size = 2 * PAGE_SIZE,
169 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
170 .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE/2 , 2 * PAGE_SIZE)",
171 .low_addr_required = 1,
172 .keep_mapped = 1,
173 },
174 {
175 .addr = ((void *)(ADDR_SWITCH_HINT)),
176 .size = PAGE_SIZE,
177 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
178 .msg = "mmap(ADDR_SWITCH_HINT, PAGE_SIZE)",
179 },
180 {
181 .addr = (void *)(ADDR_SWITCH_HINT),
182 .size = 2 * PAGE_SIZE,
183 .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
184 .msg = "mmap(ADDR_SWITCH_HINT, 2 * PAGE_SIZE, MAP_FIXED)",
185 },
186 };
187
188 static struct testcase hugetlb_testcases[] = {
189 {
190 .addr = NULL,
191 .size = HUGETLB_SIZE,
192 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
193 .msg = "mmap(NULL, MAP_HUGETLB)",
194 .low_addr_required = 1,
195 },
196 {
197 .addr = LOW_ADDR,
198 .size = HUGETLB_SIZE,
199 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
200 .msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
201 .low_addr_required = 1,
202 },
203 {
204 .addr = HIGH_ADDR,
205 .size = HUGETLB_SIZE,
206 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
207 .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
208 .keep_mapped = 1,
209 },
210 {
211 .addr = HIGH_ADDR,
212 .size = HUGETLB_SIZE,
213 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
214 .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
215 .keep_mapped = 1,
216 },
217 {
218 .addr = HIGH_ADDR,
219 .size = HUGETLB_SIZE,
220 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
221 .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
222 },
223 {
224 .addr = (void *) -1,
225 .size = HUGETLB_SIZE,
226 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
227 .msg = "mmap(-1, MAP_HUGETLB)",
228 .keep_mapped = 1,
229 },
230 {
231 .addr = (void *) -1,
232 .size = HUGETLB_SIZE,
233 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
234 .msg = "mmap(-1, MAP_HUGETLB) again",
235 },
236 {
237 .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE),
238 .size = 2 * HUGETLB_SIZE,
239 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
240 .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, 2*HUGETLB_SIZE, MAP_HUGETLB)",
241 .low_addr_required = 1,
242 .keep_mapped = 1,
243 },
244 {
245 .addr = (void *)(ADDR_SWITCH_HINT),
246 .size = 2 * HUGETLB_SIZE,
247 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
248 .msg = "mmap(ADDR_SWITCH_HINT , 2*HUGETLB_SIZE, MAP_FIXED | MAP_HUGETLB)",
249 },
250 };
251
run_test(struct testcase * test,int count)252 static int run_test(struct testcase *test, int count)
253 {
254 void *p;
255 int i, ret = KSFT_PASS;
256
257 for (i = 0; i < count; i++) {
258 struct testcase *t = test + i;
259
260 p = mmap(t->addr, t->size, PROT_READ | PROT_WRITE, t->flags, -1, 0);
261
262 printf("%s: %p - ", t->msg, p);
263
264 if (p == MAP_FAILED) {
265 printf("FAILED\n");
266 ret = KSFT_FAIL;
267 continue;
268 }
269
270 if (t->low_addr_required && p >= (void *)(ADDR_SWITCH_HINT)) {
271 printf("FAILED\n");
272 ret = KSFT_FAIL;
273 } else {
274 /*
275 * Do a dereference of the address returned so that we catch
276 * bugs in page fault handling
277 */
278 memset(p, 0, t->size);
279 printf("OK\n");
280 }
281 if (!t->keep_mapped)
282 munmap(p, t->size);
283 }
284
285 return ret;
286 }
287
supported_arch(void)288 static int supported_arch(void)
289 {
290 #if defined(__powerpc64__)
291 return 1;
292 #elif defined(__x86_64__)
293 return 1;
294 #elif defined(__aarch64__)
295 return getpagesize() == PAGE_SIZE;
296 #else
297 return 0;
298 #endif
299 }
300
main(int argc,char ** argv)301 int main(int argc, char **argv)
302 {
303 int ret;
304
305 if (!supported_arch())
306 return KSFT_SKIP;
307
308 ret = run_test(testcases, ARRAY_SIZE(testcases));
309 if (argc == 2 && !strcmp(argv[1], "--run-hugetlb"))
310 ret = run_test(hugetlb_testcases, ARRAY_SIZE(hugetlb_testcases));
311 return ret;
312 }
313