xref: /openbmc/linux/arch/arm64/kernel/vdso.c (revision be2b81b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/time_namespace.h>
22 #include <linux/timekeeper_internal.h>
23 #include <linux/vmalloc.h>
24 #include <vdso/datapage.h>
25 #include <vdso/helpers.h>
26 #include <vdso/vsyscall.h>
27 
28 #include <asm/cacheflush.h>
29 #include <asm/signal32.h>
30 #include <asm/vdso.h>
31 
32 enum vdso_abi {
33 	VDSO_ABI_AA64,
34 	VDSO_ABI_AA32,
35 };
36 
37 enum vvar_pages {
38 	VVAR_DATA_PAGE_OFFSET,
39 	VVAR_TIMENS_PAGE_OFFSET,
40 	VVAR_NR_PAGES,
41 };
42 
43 struct vdso_abi_info {
44 	const char *name;
45 	const char *vdso_code_start;
46 	const char *vdso_code_end;
47 	unsigned long vdso_pages;
48 	/* Data Mapping */
49 	struct vm_special_mapping *dm;
50 	/* Code Mapping */
51 	struct vm_special_mapping *cm;
52 };
53 
54 static struct vdso_abi_info vdso_info[] __ro_after_init = {
55 	[VDSO_ABI_AA64] = {
56 		.name = "vdso",
57 		.vdso_code_start = vdso_start,
58 		.vdso_code_end = vdso_end,
59 	},
60 #ifdef CONFIG_COMPAT_VDSO
61 	[VDSO_ABI_AA32] = {
62 		.name = "vdso32",
63 		.vdso_code_start = vdso32_start,
64 		.vdso_code_end = vdso32_end,
65 	},
66 #endif /* CONFIG_COMPAT_VDSO */
67 };
68 
69 /*
70  * The vDSO data page.
71  */
72 static union {
73 	struct vdso_data	data[CS_BASES];
74 	u8			page[PAGE_SIZE];
75 } vdso_data_store __page_aligned_data;
76 struct vdso_data *vdso_data = vdso_data_store.data;
77 
78 static int vdso_mremap(const struct vm_special_mapping *sm,
79 		struct vm_area_struct *new_vma)
80 {
81 	current->mm->context.vdso = (void *)new_vma->vm_start;
82 
83 	return 0;
84 }
85 
86 static int __init __vdso_init(enum vdso_abi abi)
87 {
88 	int i;
89 	struct page **vdso_pagelist;
90 	unsigned long pfn;
91 
92 	if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
93 		pr_err("vDSO is not a valid ELF object!\n");
94 		return -EINVAL;
95 	}
96 
97 	vdso_info[abi].vdso_pages = (
98 			vdso_info[abi].vdso_code_end -
99 			vdso_info[abi].vdso_code_start) >>
100 			PAGE_SHIFT;
101 
102 	vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
103 				sizeof(struct page *),
104 				GFP_KERNEL);
105 	if (vdso_pagelist == NULL)
106 		return -ENOMEM;
107 
108 	/* Grab the vDSO code pages. */
109 	pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
110 
111 	for (i = 0; i < vdso_info[abi].vdso_pages; i++)
112 		vdso_pagelist[i] = pfn_to_page(pfn + i);
113 
114 	vdso_info[abi].cm->pages = vdso_pagelist;
115 
116 	return 0;
117 }
118 
119 #ifdef CONFIG_TIME_NS
120 struct vdso_data *arch_get_vdso_data(void *vvar_page)
121 {
122 	return (struct vdso_data *)(vvar_page);
123 }
124 
125 /*
126  * The vvar mapping contains data for a specific time namespace, so when a task
127  * changes namespace we must unmap its vvar data for the old namespace.
128  * Subsequent faults will map in data for the new namespace.
129  *
130  * For more details see timens_setup_vdso_data().
131  */
132 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
133 {
134 	struct mm_struct *mm = task->mm;
135 	struct vm_area_struct *vma;
136 	VMA_ITERATOR(vmi, mm, 0);
137 
138 	mmap_read_lock(mm);
139 
140 	for_each_vma(vmi, vma) {
141 		unsigned long size = vma->vm_end - vma->vm_start;
142 
143 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
144 			zap_page_range(vma, vma->vm_start, size);
145 #ifdef CONFIG_COMPAT_VDSO
146 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
147 			zap_page_range(vma, vma->vm_start, size);
148 #endif
149 	}
150 
151 	mmap_read_unlock(mm);
152 	return 0;
153 }
154 #endif
155 
156 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
157 			     struct vm_area_struct *vma, struct vm_fault *vmf)
158 {
159 	struct page *timens_page = find_timens_vvar_page(vma);
160 	unsigned long pfn;
161 
162 	switch (vmf->pgoff) {
163 	case VVAR_DATA_PAGE_OFFSET:
164 		if (timens_page)
165 			pfn = page_to_pfn(timens_page);
166 		else
167 			pfn = sym_to_pfn(vdso_data);
168 		break;
169 #ifdef CONFIG_TIME_NS
170 	case VVAR_TIMENS_PAGE_OFFSET:
171 		/*
172 		 * If a task belongs to a time namespace then a namespace
173 		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
174 		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
175 		 * offset.
176 		 * See also the comment near timens_setup_vdso_data().
177 		 */
178 		if (!timens_page)
179 			return VM_FAULT_SIGBUS;
180 		pfn = sym_to_pfn(vdso_data);
181 		break;
182 #endif /* CONFIG_TIME_NS */
183 	default:
184 		return VM_FAULT_SIGBUS;
185 	}
186 
187 	return vmf_insert_pfn(vma, vmf->address, pfn);
188 }
189 
190 static int __setup_additional_pages(enum vdso_abi abi,
191 				    struct mm_struct *mm,
192 				    struct linux_binprm *bprm,
193 				    int uses_interp)
194 {
195 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
196 	unsigned long gp_flags = 0;
197 	void *ret;
198 
199 	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
200 
201 	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
202 	/* Be sure to map the data page */
203 	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
204 
205 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
206 	if (IS_ERR_VALUE(vdso_base)) {
207 		ret = ERR_PTR(vdso_base);
208 		goto up_fail;
209 	}
210 
211 	ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
212 				       VM_READ|VM_MAYREAD|VM_PFNMAP,
213 				       vdso_info[abi].dm);
214 	if (IS_ERR(ret))
215 		goto up_fail;
216 
217 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
218 		gp_flags = VM_ARM64_BTI;
219 
220 	vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
221 	mm->context.vdso = (void *)vdso_base;
222 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
223 				       VM_READ|VM_EXEC|gp_flags|
224 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
225 				       vdso_info[abi].cm);
226 	if (IS_ERR(ret))
227 		goto up_fail;
228 
229 	return 0;
230 
231 up_fail:
232 	mm->context.vdso = NULL;
233 	return PTR_ERR(ret);
234 }
235 
236 #ifdef CONFIG_COMPAT
237 /*
238  * Create and map the vectors page for AArch32 tasks.
239  */
240 enum aarch32_map {
241 	AA32_MAP_VECTORS, /* kuser helpers */
242 	AA32_MAP_SIGPAGE,
243 	AA32_MAP_VVAR,
244 	AA32_MAP_VDSO,
245 };
246 
247 static struct page *aarch32_vectors_page __ro_after_init;
248 static struct page *aarch32_sig_page __ro_after_init;
249 
250 static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
251 				  struct vm_area_struct *new_vma)
252 {
253 	current->mm->context.sigpage = (void *)new_vma->vm_start;
254 
255 	return 0;
256 }
257 
258 static struct vm_special_mapping aarch32_vdso_maps[] = {
259 	[AA32_MAP_VECTORS] = {
260 		.name	= "[vectors]", /* ABI */
261 		.pages	= &aarch32_vectors_page,
262 	},
263 	[AA32_MAP_SIGPAGE] = {
264 		.name	= "[sigpage]", /* ABI */
265 		.pages	= &aarch32_sig_page,
266 		.mremap	= aarch32_sigpage_mremap,
267 	},
268 	[AA32_MAP_VVAR] = {
269 		.name = "[vvar]",
270 		.fault = vvar_fault,
271 	},
272 	[AA32_MAP_VDSO] = {
273 		.name = "[vdso]",
274 		.mremap = vdso_mremap,
275 	},
276 };
277 
278 static int aarch32_alloc_kuser_vdso_page(void)
279 {
280 	extern char __kuser_helper_start[], __kuser_helper_end[];
281 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
282 	unsigned long vdso_page;
283 
284 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
285 		return 0;
286 
287 	vdso_page = get_zeroed_page(GFP_KERNEL);
288 	if (!vdso_page)
289 		return -ENOMEM;
290 
291 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
292 	       kuser_sz);
293 	aarch32_vectors_page = virt_to_page(vdso_page);
294 	return 0;
295 }
296 
297 #define COMPAT_SIGPAGE_POISON_WORD	0xe7fddef1
298 static int aarch32_alloc_sigpage(void)
299 {
300 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
301 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
302 	__le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
303 	void *sigpage;
304 
305 	sigpage = (void *)__get_free_page(GFP_KERNEL);
306 	if (!sigpage)
307 		return -ENOMEM;
308 
309 	memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
310 	memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
311 	aarch32_sig_page = virt_to_page(sigpage);
312 	return 0;
313 }
314 
315 static int __init __aarch32_alloc_vdso_pages(void)
316 {
317 
318 	if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
319 		return 0;
320 
321 	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
322 	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
323 
324 	return __vdso_init(VDSO_ABI_AA32);
325 }
326 
327 static int __init aarch32_alloc_vdso_pages(void)
328 {
329 	int ret;
330 
331 	ret = __aarch32_alloc_vdso_pages();
332 	if (ret)
333 		return ret;
334 
335 	ret = aarch32_alloc_sigpage();
336 	if (ret)
337 		return ret;
338 
339 	return aarch32_alloc_kuser_vdso_page();
340 }
341 arch_initcall(aarch32_alloc_vdso_pages);
342 
343 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
344 {
345 	void *ret;
346 
347 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
348 		return 0;
349 
350 	/*
351 	 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
352 	 * not safe to CoW the page containing the CPU exception vectors.
353 	 */
354 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
355 				       VM_READ | VM_EXEC |
356 				       VM_MAYREAD | VM_MAYEXEC,
357 				       &aarch32_vdso_maps[AA32_MAP_VECTORS]);
358 
359 	return PTR_ERR_OR_ZERO(ret);
360 }
361 
362 static int aarch32_sigreturn_setup(struct mm_struct *mm)
363 {
364 	unsigned long addr;
365 	void *ret;
366 
367 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
368 	if (IS_ERR_VALUE(addr)) {
369 		ret = ERR_PTR(addr);
370 		goto out;
371 	}
372 
373 	/*
374 	 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
375 	 * set breakpoints.
376 	 */
377 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
378 				       VM_READ | VM_EXEC | VM_MAYREAD |
379 				       VM_MAYWRITE | VM_MAYEXEC,
380 				       &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
381 	if (IS_ERR(ret))
382 		goto out;
383 
384 	mm->context.sigpage = (void *)addr;
385 
386 out:
387 	return PTR_ERR_OR_ZERO(ret);
388 }
389 
390 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
391 {
392 	struct mm_struct *mm = current->mm;
393 	int ret;
394 
395 	if (mmap_write_lock_killable(mm))
396 		return -EINTR;
397 
398 	ret = aarch32_kuser_helpers_setup(mm);
399 	if (ret)
400 		goto out;
401 
402 	if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
403 		ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
404 					       uses_interp);
405 		if (ret)
406 			goto out;
407 	}
408 
409 	ret = aarch32_sigreturn_setup(mm);
410 out:
411 	mmap_write_unlock(mm);
412 	return ret;
413 }
414 #endif /* CONFIG_COMPAT */
415 
416 enum aarch64_map {
417 	AA64_MAP_VVAR,
418 	AA64_MAP_VDSO,
419 };
420 
421 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
422 	[AA64_MAP_VVAR] = {
423 		.name	= "[vvar]",
424 		.fault = vvar_fault,
425 	},
426 	[AA64_MAP_VDSO] = {
427 		.name	= "[vdso]",
428 		.mremap = vdso_mremap,
429 	},
430 };
431 
432 static int __init vdso_init(void)
433 {
434 	vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
435 	vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
436 
437 	return __vdso_init(VDSO_ABI_AA64);
438 }
439 arch_initcall(vdso_init);
440 
441 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
442 {
443 	struct mm_struct *mm = current->mm;
444 	int ret;
445 
446 	if (mmap_write_lock_killable(mm))
447 		return -EINTR;
448 
449 	ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
450 	mmap_write_unlock(mm);
451 
452 	return ret;
453 }
454