xref: /openbmc/linux/arch/arm64/kernel/vdso.c (revision 453431a5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/time_namespace.h>
22 #include <linux/timekeeper_internal.h>
23 #include <linux/vmalloc.h>
24 #include <vdso/datapage.h>
25 #include <vdso/helpers.h>
26 #include <vdso/vsyscall.h>
27 
28 #include <asm/cacheflush.h>
29 #include <asm/signal32.h>
30 #include <asm/vdso.h>
31 
32 extern char vdso_start[], vdso_end[];
33 #ifdef CONFIG_COMPAT_VDSO
34 extern char vdso32_start[], vdso32_end[];
35 #endif /* CONFIG_COMPAT_VDSO */
36 
37 enum vdso_abi {
38 	VDSO_ABI_AA64,
39 #ifdef CONFIG_COMPAT_VDSO
40 	VDSO_ABI_AA32,
41 #endif /* CONFIG_COMPAT_VDSO */
42 };
43 
44 enum vvar_pages {
45 	VVAR_DATA_PAGE_OFFSET,
46 	VVAR_TIMENS_PAGE_OFFSET,
47 	VVAR_NR_PAGES,
48 };
49 
50 struct vdso_abi_info {
51 	const char *name;
52 	const char *vdso_code_start;
53 	const char *vdso_code_end;
54 	unsigned long vdso_pages;
55 	/* Data Mapping */
56 	struct vm_special_mapping *dm;
57 	/* Code Mapping */
58 	struct vm_special_mapping *cm;
59 };
60 
61 static struct vdso_abi_info vdso_info[] __ro_after_init = {
62 	[VDSO_ABI_AA64] = {
63 		.name = "vdso",
64 		.vdso_code_start = vdso_start,
65 		.vdso_code_end = vdso_end,
66 	},
67 #ifdef CONFIG_COMPAT_VDSO
68 	[VDSO_ABI_AA32] = {
69 		.name = "vdso32",
70 		.vdso_code_start = vdso32_start,
71 		.vdso_code_end = vdso32_end,
72 	},
73 #endif /* CONFIG_COMPAT_VDSO */
74 };
75 
76 /*
77  * The vDSO data page.
78  */
79 static union {
80 	struct vdso_data	data[CS_BASES];
81 	u8			page[PAGE_SIZE];
82 } vdso_data_store __page_aligned_data;
83 struct vdso_data *vdso_data = vdso_data_store.data;
84 
85 static int __vdso_remap(enum vdso_abi abi,
86 			const struct vm_special_mapping *sm,
87 			struct vm_area_struct *new_vma)
88 {
89 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
90 	unsigned long vdso_size = vdso_info[abi].vdso_code_end -
91 				  vdso_info[abi].vdso_code_start;
92 
93 	if (vdso_size != new_size)
94 		return -EINVAL;
95 
96 	current->mm->context.vdso = (void *)new_vma->vm_start;
97 
98 	return 0;
99 }
100 
101 static int __vdso_init(enum vdso_abi abi)
102 {
103 	int i;
104 	struct page **vdso_pagelist;
105 	unsigned long pfn;
106 
107 	if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
108 		pr_err("vDSO is not a valid ELF object!\n");
109 		return -EINVAL;
110 	}
111 
112 	vdso_info[abi].vdso_pages = (
113 			vdso_info[abi].vdso_code_end -
114 			vdso_info[abi].vdso_code_start) >>
115 			PAGE_SHIFT;
116 
117 	vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
118 				sizeof(struct page *),
119 				GFP_KERNEL);
120 	if (vdso_pagelist == NULL)
121 		return -ENOMEM;
122 
123 	/* Grab the vDSO code pages. */
124 	pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
125 
126 	for (i = 0; i < vdso_info[abi].vdso_pages; i++)
127 		vdso_pagelist[i] = pfn_to_page(pfn + i);
128 
129 	vdso_info[abi].cm->pages = vdso_pagelist;
130 
131 	return 0;
132 }
133 
134 #ifdef CONFIG_TIME_NS
135 struct vdso_data *arch_get_vdso_data(void *vvar_page)
136 {
137 	return (struct vdso_data *)(vvar_page);
138 }
139 
140 /*
141  * The vvar mapping contains data for a specific time namespace, so when a task
142  * changes namespace we must unmap its vvar data for the old namespace.
143  * Subsequent faults will map in data for the new namespace.
144  *
145  * For more details see timens_setup_vdso_data().
146  */
147 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
148 {
149 	struct mm_struct *mm = task->mm;
150 	struct vm_area_struct *vma;
151 
152 	mmap_read_lock(mm);
153 
154 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
155 		unsigned long size = vma->vm_end - vma->vm_start;
156 
157 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
158 			zap_page_range(vma, vma->vm_start, size);
159 #ifdef CONFIG_COMPAT_VDSO
160 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
161 			zap_page_range(vma, vma->vm_start, size);
162 #endif
163 	}
164 
165 	mmap_read_unlock(mm);
166 	return 0;
167 }
168 
169 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
170 {
171 	if (likely(vma->vm_mm == current->mm))
172 		return current->nsproxy->time_ns->vvar_page;
173 
174 	/*
175 	 * VM_PFNMAP | VM_IO protect .fault() handler from being called
176 	 * through interfaces like /proc/$pid/mem or
177 	 * process_vm_{readv,writev}() as long as there's no .access()
178 	 * in special_mapping_vmops.
179 	 * For more details check_vma_flags() and __access_remote_vm()
180 	 */
181 	WARN(1, "vvar_page accessed remotely");
182 
183 	return NULL;
184 }
185 #else
186 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
187 {
188 	return NULL;
189 }
190 #endif
191 
192 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
193 			     struct vm_area_struct *vma, struct vm_fault *vmf)
194 {
195 	struct page *timens_page = find_timens_vvar_page(vma);
196 	unsigned long pfn;
197 
198 	switch (vmf->pgoff) {
199 	case VVAR_DATA_PAGE_OFFSET:
200 		if (timens_page)
201 			pfn = page_to_pfn(timens_page);
202 		else
203 			pfn = sym_to_pfn(vdso_data);
204 		break;
205 #ifdef CONFIG_TIME_NS
206 	case VVAR_TIMENS_PAGE_OFFSET:
207 		/*
208 		 * If a task belongs to a time namespace then a namespace
209 		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
210 		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
211 		 * offset.
212 		 * See also the comment near timens_setup_vdso_data().
213 		 */
214 		if (!timens_page)
215 			return VM_FAULT_SIGBUS;
216 		pfn = sym_to_pfn(vdso_data);
217 		break;
218 #endif /* CONFIG_TIME_NS */
219 	default:
220 		return VM_FAULT_SIGBUS;
221 	}
222 
223 	return vmf_insert_pfn(vma, vmf->address, pfn);
224 }
225 
226 static int vvar_mremap(const struct vm_special_mapping *sm,
227 		       struct vm_area_struct *new_vma)
228 {
229 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
230 
231 	if (new_size != VVAR_NR_PAGES * PAGE_SIZE)
232 		return -EINVAL;
233 
234 	return 0;
235 }
236 
237 static int __setup_additional_pages(enum vdso_abi abi,
238 				    struct mm_struct *mm,
239 				    struct linux_binprm *bprm,
240 				    int uses_interp)
241 {
242 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
243 	unsigned long gp_flags = 0;
244 	void *ret;
245 
246 	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
247 
248 	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
249 	/* Be sure to map the data page */
250 	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
251 
252 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
253 	if (IS_ERR_VALUE(vdso_base)) {
254 		ret = ERR_PTR(vdso_base);
255 		goto up_fail;
256 	}
257 
258 	ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
259 				       VM_READ|VM_MAYREAD|VM_PFNMAP,
260 				       vdso_info[abi].dm);
261 	if (IS_ERR(ret))
262 		goto up_fail;
263 
264 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
265 		gp_flags = VM_ARM64_BTI;
266 
267 	vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
268 	mm->context.vdso = (void *)vdso_base;
269 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
270 				       VM_READ|VM_EXEC|gp_flags|
271 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
272 				       vdso_info[abi].cm);
273 	if (IS_ERR(ret))
274 		goto up_fail;
275 
276 	return 0;
277 
278 up_fail:
279 	mm->context.vdso = NULL;
280 	return PTR_ERR(ret);
281 }
282 
283 #ifdef CONFIG_COMPAT
284 /*
285  * Create and map the vectors page for AArch32 tasks.
286  */
287 #ifdef CONFIG_COMPAT_VDSO
288 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
289 		struct vm_area_struct *new_vma)
290 {
291 	return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
292 }
293 #endif /* CONFIG_COMPAT_VDSO */
294 
295 enum aarch32_map {
296 	AA32_MAP_VECTORS, /* kuser helpers */
297 #ifdef CONFIG_COMPAT_VDSO
298 	AA32_MAP_VVAR,
299 	AA32_MAP_VDSO,
300 #endif
301 	AA32_MAP_SIGPAGE
302 };
303 
304 static struct page *aarch32_vectors_page __ro_after_init;
305 static struct page *aarch32_sig_page __ro_after_init;
306 
307 static struct vm_special_mapping aarch32_vdso_maps[] = {
308 	[AA32_MAP_VECTORS] = {
309 		.name	= "[vectors]", /* ABI */
310 		.pages	= &aarch32_vectors_page,
311 	},
312 #ifdef CONFIG_COMPAT_VDSO
313 	[AA32_MAP_VVAR] = {
314 		.name = "[vvar]",
315 		.fault = vvar_fault,
316 		.mremap = vvar_mremap,
317 	},
318 	[AA32_MAP_VDSO] = {
319 		.name = "[vdso]",
320 		.mremap = aarch32_vdso_mremap,
321 	},
322 #endif /* CONFIG_COMPAT_VDSO */
323 	[AA32_MAP_SIGPAGE] = {
324 		.name	= "[sigpage]", /* ABI */
325 		.pages	= &aarch32_sig_page,
326 	},
327 };
328 
329 static int aarch32_alloc_kuser_vdso_page(void)
330 {
331 	extern char __kuser_helper_start[], __kuser_helper_end[];
332 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
333 	unsigned long vdso_page;
334 
335 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
336 		return 0;
337 
338 	vdso_page = get_zeroed_page(GFP_ATOMIC);
339 	if (!vdso_page)
340 		return -ENOMEM;
341 
342 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
343 	       kuser_sz);
344 	aarch32_vectors_page = virt_to_page(vdso_page);
345 	flush_dcache_page(aarch32_vectors_page);
346 	return 0;
347 }
348 
349 static int aarch32_alloc_sigpage(void)
350 {
351 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
352 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
353 	unsigned long sigpage;
354 
355 	sigpage = get_zeroed_page(GFP_ATOMIC);
356 	if (!sigpage)
357 		return -ENOMEM;
358 
359 	memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
360 	aarch32_sig_page = virt_to_page(sigpage);
361 	flush_dcache_page(aarch32_sig_page);
362 	return 0;
363 }
364 
365 #ifdef CONFIG_COMPAT_VDSO
366 static int __aarch32_alloc_vdso_pages(void)
367 {
368 	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
369 	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
370 
371 	return __vdso_init(VDSO_ABI_AA32);
372 }
373 #endif /* CONFIG_COMPAT_VDSO */
374 
375 static int __init aarch32_alloc_vdso_pages(void)
376 {
377 	int ret;
378 
379 #ifdef CONFIG_COMPAT_VDSO
380 	ret = __aarch32_alloc_vdso_pages();
381 	if (ret)
382 		return ret;
383 #endif
384 
385 	ret = aarch32_alloc_sigpage();
386 	if (ret)
387 		return ret;
388 
389 	return aarch32_alloc_kuser_vdso_page();
390 }
391 arch_initcall(aarch32_alloc_vdso_pages);
392 
393 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
394 {
395 	void *ret;
396 
397 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
398 		return 0;
399 
400 	/*
401 	 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
402 	 * not safe to CoW the page containing the CPU exception vectors.
403 	 */
404 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
405 				       VM_READ | VM_EXEC |
406 				       VM_MAYREAD | VM_MAYEXEC,
407 				       &aarch32_vdso_maps[AA32_MAP_VECTORS]);
408 
409 	return PTR_ERR_OR_ZERO(ret);
410 }
411 
412 static int aarch32_sigreturn_setup(struct mm_struct *mm)
413 {
414 	unsigned long addr;
415 	void *ret;
416 
417 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
418 	if (IS_ERR_VALUE(addr)) {
419 		ret = ERR_PTR(addr);
420 		goto out;
421 	}
422 
423 	/*
424 	 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
425 	 * set breakpoints.
426 	 */
427 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
428 				       VM_READ | VM_EXEC | VM_MAYREAD |
429 				       VM_MAYWRITE | VM_MAYEXEC,
430 				       &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
431 	if (IS_ERR(ret))
432 		goto out;
433 
434 	mm->context.sigpage = (void *)addr;
435 
436 out:
437 	return PTR_ERR_OR_ZERO(ret);
438 }
439 
440 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
441 {
442 	struct mm_struct *mm = current->mm;
443 	int ret;
444 
445 	if (mmap_write_lock_killable(mm))
446 		return -EINTR;
447 
448 	ret = aarch32_kuser_helpers_setup(mm);
449 	if (ret)
450 		goto out;
451 
452 #ifdef CONFIG_COMPAT_VDSO
453 	ret = __setup_additional_pages(VDSO_ABI_AA32,
454 				       mm,
455 				       bprm,
456 				       uses_interp);
457 	if (ret)
458 		goto out;
459 #endif /* CONFIG_COMPAT_VDSO */
460 
461 	ret = aarch32_sigreturn_setup(mm);
462 out:
463 	mmap_write_unlock(mm);
464 	return ret;
465 }
466 #endif /* CONFIG_COMPAT */
467 
468 static int vdso_mremap(const struct vm_special_mapping *sm,
469 		struct vm_area_struct *new_vma)
470 {
471 	return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
472 }
473 
474 enum aarch64_map {
475 	AA64_MAP_VVAR,
476 	AA64_MAP_VDSO,
477 };
478 
479 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
480 	[AA64_MAP_VVAR] = {
481 		.name	= "[vvar]",
482 		.fault = vvar_fault,
483 		.mremap = vvar_mremap,
484 	},
485 	[AA64_MAP_VDSO] = {
486 		.name	= "[vdso]",
487 		.mremap = vdso_mremap,
488 	},
489 };
490 
491 static int __init vdso_init(void)
492 {
493 	vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
494 	vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
495 
496 	return __vdso_init(VDSO_ABI_AA64);
497 }
498 arch_initcall(vdso_init);
499 
500 int arch_setup_additional_pages(struct linux_binprm *bprm,
501 				int uses_interp)
502 {
503 	struct mm_struct *mm = current->mm;
504 	int ret;
505 
506 	if (mmap_write_lock_killable(mm))
507 		return -EINTR;
508 
509 	ret = __setup_additional_pages(VDSO_ABI_AA64,
510 				       mm,
511 				       bprm,
512 				       uses_interp);
513 
514 	mmap_write_unlock(mm);
515 
516 	return ret;
517 }
518