xref: /openbmc/linux/arch/arm64/kernel/vdso.c (revision 6e554abd)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
29031fefdSWill Deacon /*
30d747f65SVincenzo Frascino  * VDSO implementations.
49031fefdSWill Deacon  *
59031fefdSWill Deacon  * Copyright (C) 2012 ARM Limited
69031fefdSWill Deacon  *
79031fefdSWill Deacon  * Author: Will Deacon <will.deacon@arm.com>
89031fefdSWill Deacon  */
99031fefdSWill Deacon 
105a9e3e15SJisheng Zhang #include <linux/cache.h>
119031fefdSWill Deacon #include <linux/clocksource.h>
129031fefdSWill Deacon #include <linux/elf.h>
139031fefdSWill Deacon #include <linux/err.h>
149031fefdSWill Deacon #include <linux/errno.h>
159031fefdSWill Deacon #include <linux/gfp.h>
165a9e3e15SJisheng Zhang #include <linux/kernel.h>
179031fefdSWill Deacon #include <linux/mm.h>
189031fefdSWill Deacon #include <linux/sched.h>
199031fefdSWill Deacon #include <linux/signal.h>
209031fefdSWill Deacon #include <linux/slab.h>
21ee3cda8eSAndrei Vagin #include <linux/time_namespace.h>
22c60b0c28SCatalin Marinas #include <linux/timekeeper_internal.h>
239031fefdSWill Deacon #include <linux/vmalloc.h>
2428b1a824SVincenzo Frascino #include <vdso/datapage.h>
2528b1a824SVincenzo Frascino #include <vdso/helpers.h>
2628b1a824SVincenzo Frascino #include <vdso/vsyscall.h>
279031fefdSWill Deacon 
289031fefdSWill Deacon #include <asm/cacheflush.h>
299031fefdSWill Deacon #include <asm/signal32.h>
309031fefdSWill Deacon #include <asm/vdso.h>
319031fefdSWill Deacon 
32dbbb08f5SKees Cook extern char vdso_start[], vdso_end[];
337c1deeebSVincenzo Frascino extern char vdso32_start[], vdso32_end[];
34c7aa2d71SVincenzo Frascino 
35d3418f38SMark Rutland enum vdso_abi {
36d3418f38SMark Rutland 	VDSO_ABI_AA64,
37d3418f38SMark Rutland 	VDSO_ABI_AA32,
38c7aa2d71SVincenzo Frascino };
39c7aa2d71SVincenzo Frascino 
403503d56cSAndrei Vagin enum vvar_pages {
413503d56cSAndrei Vagin 	VVAR_DATA_PAGE_OFFSET,
423503d56cSAndrei Vagin 	VVAR_TIMENS_PAGE_OFFSET,
433503d56cSAndrei Vagin 	VVAR_NR_PAGES,
443503d56cSAndrei Vagin };
453503d56cSAndrei Vagin 
46d3418f38SMark Rutland struct vdso_abi_info {
47c7aa2d71SVincenzo Frascino 	const char *name;
48c7aa2d71SVincenzo Frascino 	const char *vdso_code_start;
49c7aa2d71SVincenzo Frascino 	const char *vdso_code_end;
50c7aa2d71SVincenzo Frascino 	unsigned long vdso_pages;
51c7aa2d71SVincenzo Frascino 	/* Data Mapping */
52c7aa2d71SVincenzo Frascino 	struct vm_special_mapping *dm;
53c7aa2d71SVincenzo Frascino 	/* Code Mapping */
54c7aa2d71SVincenzo Frascino 	struct vm_special_mapping *cm;
55c7aa2d71SVincenzo Frascino };
56c7aa2d71SVincenzo Frascino 
57d3418f38SMark Rutland static struct vdso_abi_info vdso_info[] __ro_after_init = {
58d3418f38SMark Rutland 	[VDSO_ABI_AA64] = {
59c7aa2d71SVincenzo Frascino 		.name = "vdso",
60c7aa2d71SVincenzo Frascino 		.vdso_code_start = vdso_start,
61c7aa2d71SVincenzo Frascino 		.vdso_code_end = vdso_end,
62c7aa2d71SVincenzo Frascino 	},
637c1deeebSVincenzo Frascino #ifdef CONFIG_COMPAT_VDSO
64d3418f38SMark Rutland 	[VDSO_ABI_AA32] = {
657c1deeebSVincenzo Frascino 		.name = "vdso32",
667c1deeebSVincenzo Frascino 		.vdso_code_start = vdso32_start,
677c1deeebSVincenzo Frascino 		.vdso_code_end = vdso32_end,
687c1deeebSVincenzo Frascino 	},
697c1deeebSVincenzo Frascino #endif /* CONFIG_COMPAT_VDSO */
70c7aa2d71SVincenzo Frascino };
719031fefdSWill Deacon 
729031fefdSWill Deacon /*
739031fefdSWill Deacon  * The vDSO data page.
749031fefdSWill Deacon  */
759031fefdSWill Deacon static union {
7628b1a824SVincenzo Frascino 	struct vdso_data	data[CS_BASES];
779031fefdSWill Deacon 	u8			page[PAGE_SIZE];
789031fefdSWill Deacon } vdso_data_store __page_aligned_data;
7928b1a824SVincenzo Frascino struct vdso_data *vdso_data = vdso_data_store.data;
809031fefdSWill Deacon 
81871402e0SDmitry Safonov static int vdso_mremap(const struct vm_special_mapping *sm,
82c7aa2d71SVincenzo Frascino 		struct vm_area_struct *new_vma)
83c7aa2d71SVincenzo Frascino {
84c7aa2d71SVincenzo Frascino 	current->mm->context.vdso = (void *)new_vma->vm_start;
85c7aa2d71SVincenzo Frascino 
86c7aa2d71SVincenzo Frascino 	return 0;
87c7aa2d71SVincenzo Frascino }
88c7aa2d71SVincenzo Frascino 
89d3418f38SMark Rutland static int __vdso_init(enum vdso_abi abi)
90c7aa2d71SVincenzo Frascino {
91c7aa2d71SVincenzo Frascino 	int i;
92c7aa2d71SVincenzo Frascino 	struct page **vdso_pagelist;
93c7aa2d71SVincenzo Frascino 	unsigned long pfn;
94c7aa2d71SVincenzo Frascino 
95d3418f38SMark Rutland 	if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
96c7aa2d71SVincenzo Frascino 		pr_err("vDSO is not a valid ELF object!\n");
97c7aa2d71SVincenzo Frascino 		return -EINVAL;
98c7aa2d71SVincenzo Frascino 	}
99c7aa2d71SVincenzo Frascino 
100d3418f38SMark Rutland 	vdso_info[abi].vdso_pages = (
101d3418f38SMark Rutland 			vdso_info[abi].vdso_code_end -
102d3418f38SMark Rutland 			vdso_info[abi].vdso_code_start) >>
103c7aa2d71SVincenzo Frascino 			PAGE_SHIFT;
104c7aa2d71SVincenzo Frascino 
105d53b5c01SAndrei Vagin 	vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
106c7aa2d71SVincenzo Frascino 				sizeof(struct page *),
107c7aa2d71SVincenzo Frascino 				GFP_KERNEL);
108c7aa2d71SVincenzo Frascino 	if (vdso_pagelist == NULL)
109c7aa2d71SVincenzo Frascino 		return -ENOMEM;
110c7aa2d71SVincenzo Frascino 
111c7aa2d71SVincenzo Frascino 	/* Grab the vDSO code pages. */
112d3418f38SMark Rutland 	pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
113c7aa2d71SVincenzo Frascino 
114d3418f38SMark Rutland 	for (i = 0; i < vdso_info[abi].vdso_pages; i++)
115d53b5c01SAndrei Vagin 		vdso_pagelist[i] = pfn_to_page(pfn + i);
116c7aa2d71SVincenzo Frascino 
117d53b5c01SAndrei Vagin 	vdso_info[abi].cm->pages = vdso_pagelist;
118c7aa2d71SVincenzo Frascino 
119c7aa2d71SVincenzo Frascino 	return 0;
120c7aa2d71SVincenzo Frascino }
121c7aa2d71SVincenzo Frascino 
1221b6867d2SAndrei Vagin #ifdef CONFIG_TIME_NS
1233503d56cSAndrei Vagin struct vdso_data *arch_get_vdso_data(void *vvar_page)
1243503d56cSAndrei Vagin {
1253503d56cSAndrei Vagin 	return (struct vdso_data *)(vvar_page);
1263503d56cSAndrei Vagin }
1273503d56cSAndrei Vagin 
1281b6867d2SAndrei Vagin /*
1291b6867d2SAndrei Vagin  * The vvar mapping contains data for a specific time namespace, so when a task
1301b6867d2SAndrei Vagin  * changes namespace we must unmap its vvar data for the old namespace.
1311b6867d2SAndrei Vagin  * Subsequent faults will map in data for the new namespace.
1321b6867d2SAndrei Vagin  *
1331b6867d2SAndrei Vagin  * For more details see timens_setup_vdso_data().
1341b6867d2SAndrei Vagin  */
1351b6867d2SAndrei Vagin int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
1361b6867d2SAndrei Vagin {
1371b6867d2SAndrei Vagin 	struct mm_struct *mm = task->mm;
1381b6867d2SAndrei Vagin 	struct vm_area_struct *vma;
1391b6867d2SAndrei Vagin 
1401b6867d2SAndrei Vagin 	mmap_read_lock(mm);
1411b6867d2SAndrei Vagin 
1421b6867d2SAndrei Vagin 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1431b6867d2SAndrei Vagin 		unsigned long size = vma->vm_end - vma->vm_start;
1441b6867d2SAndrei Vagin 
1451b6867d2SAndrei Vagin 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
1461b6867d2SAndrei Vagin 			zap_page_range(vma, vma->vm_start, size);
1471b6867d2SAndrei Vagin #ifdef CONFIG_COMPAT_VDSO
1481b6867d2SAndrei Vagin 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
1491b6867d2SAndrei Vagin 			zap_page_range(vma, vma->vm_start, size);
1501b6867d2SAndrei Vagin #endif
1511b6867d2SAndrei Vagin 	}
1521b6867d2SAndrei Vagin 
1531b6867d2SAndrei Vagin 	mmap_read_unlock(mm);
1541b6867d2SAndrei Vagin 	return 0;
1551b6867d2SAndrei Vagin }
156ee3cda8eSAndrei Vagin 
157ee3cda8eSAndrei Vagin static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
158ee3cda8eSAndrei Vagin {
159ee3cda8eSAndrei Vagin 	if (likely(vma->vm_mm == current->mm))
160ee3cda8eSAndrei Vagin 		return current->nsproxy->time_ns->vvar_page;
161ee3cda8eSAndrei Vagin 
162ee3cda8eSAndrei Vagin 	/*
163ee3cda8eSAndrei Vagin 	 * VM_PFNMAP | VM_IO protect .fault() handler from being called
164ee3cda8eSAndrei Vagin 	 * through interfaces like /proc/$pid/mem or
165ee3cda8eSAndrei Vagin 	 * process_vm_{readv,writev}() as long as there's no .access()
166ee3cda8eSAndrei Vagin 	 * in special_mapping_vmops.
167ee3cda8eSAndrei Vagin 	 * For more details check_vma_flags() and __access_remote_vm()
168ee3cda8eSAndrei Vagin 	 */
169ee3cda8eSAndrei Vagin 	WARN(1, "vvar_page accessed remotely");
170ee3cda8eSAndrei Vagin 
171ee3cda8eSAndrei Vagin 	return NULL;
172ee3cda8eSAndrei Vagin }
173ee3cda8eSAndrei Vagin #else
174ee3cda8eSAndrei Vagin static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
175ee3cda8eSAndrei Vagin {
176ee3cda8eSAndrei Vagin 	return NULL;
177ee3cda8eSAndrei Vagin }
1781b6867d2SAndrei Vagin #endif
1791b6867d2SAndrei Vagin 
180d53b5c01SAndrei Vagin static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
181d53b5c01SAndrei Vagin 			     struct vm_area_struct *vma, struct vm_fault *vmf)
182d53b5c01SAndrei Vagin {
183ee3cda8eSAndrei Vagin 	struct page *timens_page = find_timens_vvar_page(vma);
184ee3cda8eSAndrei Vagin 	unsigned long pfn;
185ee3cda8eSAndrei Vagin 
186ee3cda8eSAndrei Vagin 	switch (vmf->pgoff) {
187ee3cda8eSAndrei Vagin 	case VVAR_DATA_PAGE_OFFSET:
188ee3cda8eSAndrei Vagin 		if (timens_page)
189ee3cda8eSAndrei Vagin 			pfn = page_to_pfn(timens_page);
190ee3cda8eSAndrei Vagin 		else
191ee3cda8eSAndrei Vagin 			pfn = sym_to_pfn(vdso_data);
192ee3cda8eSAndrei Vagin 		break;
193ee3cda8eSAndrei Vagin #ifdef CONFIG_TIME_NS
194ee3cda8eSAndrei Vagin 	case VVAR_TIMENS_PAGE_OFFSET:
195ee3cda8eSAndrei Vagin 		/*
196ee3cda8eSAndrei Vagin 		 * If a task belongs to a time namespace then a namespace
197ee3cda8eSAndrei Vagin 		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
198ee3cda8eSAndrei Vagin 		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
199ee3cda8eSAndrei Vagin 		 * offset.
200ee3cda8eSAndrei Vagin 		 * See also the comment near timens_setup_vdso_data().
201ee3cda8eSAndrei Vagin 		 */
202ee3cda8eSAndrei Vagin 		if (!timens_page)
203d53b5c01SAndrei Vagin 			return VM_FAULT_SIGBUS;
204ee3cda8eSAndrei Vagin 		pfn = sym_to_pfn(vdso_data);
205ee3cda8eSAndrei Vagin 		break;
206ee3cda8eSAndrei Vagin #endif /* CONFIG_TIME_NS */
207ee3cda8eSAndrei Vagin 	default:
208ee3cda8eSAndrei Vagin 		return VM_FAULT_SIGBUS;
209ee3cda8eSAndrei Vagin 	}
210ee3cda8eSAndrei Vagin 
211ee3cda8eSAndrei Vagin 	return vmf_insert_pfn(vma, vmf->address, pfn);
212d53b5c01SAndrei Vagin }
213d53b5c01SAndrei Vagin 
214d3418f38SMark Rutland static int __setup_additional_pages(enum vdso_abi abi,
215c7aa2d71SVincenzo Frascino 				    struct mm_struct *mm,
216c7aa2d71SVincenzo Frascino 				    struct linux_binprm *bprm,
217c7aa2d71SVincenzo Frascino 				    int uses_interp)
218c7aa2d71SVincenzo Frascino {
219c7aa2d71SVincenzo Frascino 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
220bf740a90SMark Brown 	unsigned long gp_flags = 0;
221c7aa2d71SVincenzo Frascino 	void *ret;
222c7aa2d71SVincenzo Frascino 
2233503d56cSAndrei Vagin 	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
2243503d56cSAndrei Vagin 
225d3418f38SMark Rutland 	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
226c7aa2d71SVincenzo Frascino 	/* Be sure to map the data page */
2273503d56cSAndrei Vagin 	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
228c7aa2d71SVincenzo Frascino 
229c7aa2d71SVincenzo Frascino 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
230c7aa2d71SVincenzo Frascino 	if (IS_ERR_VALUE(vdso_base)) {
231c7aa2d71SVincenzo Frascino 		ret = ERR_PTR(vdso_base);
232c7aa2d71SVincenzo Frascino 		goto up_fail;
233c7aa2d71SVincenzo Frascino 	}
234c7aa2d71SVincenzo Frascino 
2353503d56cSAndrei Vagin 	ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
236d53b5c01SAndrei Vagin 				       VM_READ|VM_MAYREAD|VM_PFNMAP,
237d3418f38SMark Rutland 				       vdso_info[abi].dm);
238c7aa2d71SVincenzo Frascino 	if (IS_ERR(ret))
239c7aa2d71SVincenzo Frascino 		goto up_fail;
240c7aa2d71SVincenzo Frascino 
241bf740a90SMark Brown 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
242bf740a90SMark Brown 		gp_flags = VM_ARM64_BTI;
243bf740a90SMark Brown 
2443503d56cSAndrei Vagin 	vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
245c7aa2d71SVincenzo Frascino 	mm->context.vdso = (void *)vdso_base;
246c7aa2d71SVincenzo Frascino 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
247bf740a90SMark Brown 				       VM_READ|VM_EXEC|gp_flags|
248c7aa2d71SVincenzo Frascino 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
249d3418f38SMark Rutland 				       vdso_info[abi].cm);
250c7aa2d71SVincenzo Frascino 	if (IS_ERR(ret))
251c7aa2d71SVincenzo Frascino 		goto up_fail;
252c7aa2d71SVincenzo Frascino 
253c7aa2d71SVincenzo Frascino 	return 0;
254c7aa2d71SVincenzo Frascino 
255c7aa2d71SVincenzo Frascino up_fail:
256c7aa2d71SVincenzo Frascino 	mm->context.vdso = NULL;
257c7aa2d71SVincenzo Frascino 	return PTR_ERR(ret);
258c7aa2d71SVincenzo Frascino }
259c7aa2d71SVincenzo Frascino 
2609031fefdSWill Deacon #ifdef CONFIG_COMPAT
2619031fefdSWill Deacon /*
2629031fefdSWill Deacon  * Create and map the vectors page for AArch32 tasks.
2639031fefdSWill Deacon  */
2641d09094aSMark Rutland enum aarch32_map {
2651d09094aSMark Rutland 	AA32_MAP_VECTORS, /* kuser helpers */
2660cbc2659SWill Deacon 	AA32_MAP_SIGPAGE,
2671d09094aSMark Rutland 	AA32_MAP_VVAR,
2681d09094aSMark Rutland 	AA32_MAP_VDSO,
2691d09094aSMark Rutland };
27074fc72e7SMark Rutland 
27174fc72e7SMark Rutland static struct page *aarch32_vectors_page __ro_after_init;
27274fc72e7SMark Rutland static struct page *aarch32_sig_page __ro_after_init;
27374fc72e7SMark Rutland 
2747adbf10eSWill Deacon static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
2757adbf10eSWill Deacon 				  struct vm_area_struct *new_vma)
2767adbf10eSWill Deacon {
2777adbf10eSWill Deacon 	current->mm->context.sigpage = (void *)new_vma->vm_start;
2787adbf10eSWill Deacon 
2797adbf10eSWill Deacon 	return 0;
2807adbf10eSWill Deacon }
2817adbf10eSWill Deacon 
2821d09094aSMark Rutland static struct vm_special_mapping aarch32_vdso_maps[] = {
2831d09094aSMark Rutland 	[AA32_MAP_VECTORS] = {
2840d747f65SVincenzo Frascino 		.name	= "[vectors]", /* ABI */
28574fc72e7SMark Rutland 		.pages	= &aarch32_vectors_page,
2860d747f65SVincenzo Frascino 	},
2870cbc2659SWill Deacon 	[AA32_MAP_SIGPAGE] = {
2880cbc2659SWill Deacon 		.name	= "[sigpage]", /* ABI */
2890cbc2659SWill Deacon 		.pages	= &aarch32_sig_page,
2907adbf10eSWill Deacon 		.mremap	= aarch32_sigpage_mremap,
2910cbc2659SWill Deacon 	},
2921d09094aSMark Rutland 	[AA32_MAP_VVAR] = {
2937c1deeebSVincenzo Frascino 		.name = "[vvar]",
294d53b5c01SAndrei Vagin 		.fault = vvar_fault,
2957c1deeebSVincenzo Frascino 	},
2961d09094aSMark Rutland 	[AA32_MAP_VDSO] = {
2977c1deeebSVincenzo Frascino 		.name = "[vdso]",
298871402e0SDmitry Safonov 		.mremap = vdso_mremap,
2997c1deeebSVincenzo Frascino 	},
3000d747f65SVincenzo Frascino };
3019031fefdSWill Deacon 
3021255a734SVincenzo Frascino static int aarch32_alloc_kuser_vdso_page(void)
3039031fefdSWill Deacon {
3049031fefdSWill Deacon 	extern char __kuser_helper_start[], __kuser_helper_end[];
3059031fefdSWill Deacon 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
3061255a734SVincenzo Frascino 	unsigned long vdso_page;
3079031fefdSWill Deacon 
308af1b3cf2SVincenzo Frascino 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
309af1b3cf2SVincenzo Frascino 		return 0;
310af1b3cf2SVincenzo Frascino 
3117cd6ca1dSWill Deacon 	vdso_page = get_zeroed_page(GFP_KERNEL);
3121255a734SVincenzo Frascino 	if (!vdso_page)
3139031fefdSWill Deacon 		return -ENOMEM;
3149031fefdSWill Deacon 
3151255a734SVincenzo Frascino 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
3161255a734SVincenzo Frascino 	       kuser_sz);
31774fc72e7SMark Rutland 	aarch32_vectors_page = virt_to_page(vdso_page);
3181255a734SVincenzo Frascino 	return 0;
3190d747f65SVincenzo Frascino }
3200d747f65SVincenzo Frascino 
321*6e554abdSWill Deacon #define COMPAT_SIGPAGE_POISON_WORD	0xe7fddef1
322a39060b0SWill Deacon static int aarch32_alloc_sigpage(void)
3231255a734SVincenzo Frascino {
3241255a734SVincenzo Frascino 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
3251255a734SVincenzo Frascino 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
326*6e554abdSWill Deacon 	__le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
327*6e554abdSWill Deacon 	void *sigpage;
3289031fefdSWill Deacon 
329*6e554abdSWill Deacon 	sigpage = (void *)__get_free_page(GFP_KERNEL);
3301255a734SVincenzo Frascino 	if (!sigpage)
3311255a734SVincenzo Frascino 		return -ENOMEM;
3329031fefdSWill Deacon 
333*6e554abdSWill Deacon 	memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
334*6e554abdSWill Deacon 	memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
33574fc72e7SMark Rutland 	aarch32_sig_page = virt_to_page(sigpage);
336a39060b0SWill Deacon 	return 0;
337a39060b0SWill Deacon }
3380d747f65SVincenzo Frascino 
339a39060b0SWill Deacon static int __aarch32_alloc_vdso_pages(void)
340a39060b0SWill Deacon {
3410cbc2659SWill Deacon 
3420cbc2659SWill Deacon 	if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
3430cbc2659SWill Deacon 		return 0;
3440cbc2659SWill Deacon 
345a39060b0SWill Deacon 	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
346a39060b0SWill Deacon 	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
3479031fefdSWill Deacon 
348a39060b0SWill Deacon 	return __vdso_init(VDSO_ABI_AA32);
3499031fefdSWill Deacon }
3507c1deeebSVincenzo Frascino 
3517c1deeebSVincenzo Frascino static int __init aarch32_alloc_vdso_pages(void)
3527c1deeebSVincenzo Frascino {
353a39060b0SWill Deacon 	int ret;
354a39060b0SWill Deacon 
355a39060b0SWill Deacon 	ret = __aarch32_alloc_vdso_pages();
356a39060b0SWill Deacon 	if (ret)
357a39060b0SWill Deacon 		return ret;
358a39060b0SWill Deacon 
359a39060b0SWill Deacon 	ret = aarch32_alloc_sigpage();
360a39060b0SWill Deacon 	if (ret)
361a39060b0SWill Deacon 		return ret;
362a39060b0SWill Deacon 
363a39060b0SWill Deacon 	return aarch32_alloc_kuser_vdso_page();
3647c1deeebSVincenzo Frascino }
3650d747f65SVincenzo Frascino arch_initcall(aarch32_alloc_vdso_pages);
3669031fefdSWill Deacon 
3670d747f65SVincenzo Frascino static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
3680d747f65SVincenzo Frascino {
3690d747f65SVincenzo Frascino 	void *ret;
3700d747f65SVincenzo Frascino 
371af1b3cf2SVincenzo Frascino 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
372af1b3cf2SVincenzo Frascino 		return 0;
373af1b3cf2SVincenzo Frascino 
3740d747f65SVincenzo Frascino 	/*
3750d747f65SVincenzo Frascino 	 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
3760d747f65SVincenzo Frascino 	 * not safe to CoW the page containing the CPU exception vectors.
3770d747f65SVincenzo Frascino 	 */
3780d747f65SVincenzo Frascino 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
3790d747f65SVincenzo Frascino 				       VM_READ | VM_EXEC |
3800d747f65SVincenzo Frascino 				       VM_MAYREAD | VM_MAYEXEC,
3811d09094aSMark Rutland 				       &aarch32_vdso_maps[AA32_MAP_VECTORS]);
3820d747f65SVincenzo Frascino 
3830d747f65SVincenzo Frascino 	return PTR_ERR_OR_ZERO(ret);
3840d747f65SVincenzo Frascino }
3850d747f65SVincenzo Frascino 
3860d747f65SVincenzo Frascino static int aarch32_sigreturn_setup(struct mm_struct *mm)
3870d747f65SVincenzo Frascino {
3880d747f65SVincenzo Frascino 	unsigned long addr;
3890d747f65SVincenzo Frascino 	void *ret;
3900d747f65SVincenzo Frascino 
3910d747f65SVincenzo Frascino 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
3920d747f65SVincenzo Frascino 	if (IS_ERR_VALUE(addr)) {
3930d747f65SVincenzo Frascino 		ret = ERR_PTR(addr);
3940d747f65SVincenzo Frascino 		goto out;
3950d747f65SVincenzo Frascino 	}
3960d747f65SVincenzo Frascino 
3970d747f65SVincenzo Frascino 	/*
3980d747f65SVincenzo Frascino 	 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
3990d747f65SVincenzo Frascino 	 * set breakpoints.
4000d747f65SVincenzo Frascino 	 */
4010d747f65SVincenzo Frascino 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
4020d747f65SVincenzo Frascino 				       VM_READ | VM_EXEC | VM_MAYREAD |
4030d747f65SVincenzo Frascino 				       VM_MAYWRITE | VM_MAYEXEC,
4041d09094aSMark Rutland 				       &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
4050d747f65SVincenzo Frascino 	if (IS_ERR(ret))
4060d747f65SVincenzo Frascino 		goto out;
4070d747f65SVincenzo Frascino 
408a39060b0SWill Deacon 	mm->context.sigpage = (void *)addr;
4090d747f65SVincenzo Frascino 
4100d747f65SVincenzo Frascino out:
4110d747f65SVincenzo Frascino 	return PTR_ERR_OR_ZERO(ret);
4120d747f65SVincenzo Frascino }
4130d747f65SVincenzo Frascino 
4140d747f65SVincenzo Frascino int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4159031fefdSWill Deacon {
4169031fefdSWill Deacon 	struct mm_struct *mm = current->mm;
4170d747f65SVincenzo Frascino 	int ret;
4189031fefdSWill Deacon 
419d8ed45c5SMichel Lespinasse 	if (mmap_write_lock_killable(mm))
42069048176SMichal Hocko 		return -EINTR;
4219031fefdSWill Deacon 
4220d747f65SVincenzo Frascino 	ret = aarch32_kuser_helpers_setup(mm);
4230d747f65SVincenzo Frascino 	if (ret)
4240d747f65SVincenzo Frascino 		goto out;
4259031fefdSWill Deacon 
4260cbc2659SWill Deacon 	if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
4272a30aca8SWill Deacon 		ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
4287c1deeebSVincenzo Frascino 					       uses_interp);
429a39060b0SWill Deacon 		if (ret)
430a39060b0SWill Deacon 			goto out;
4310cbc2659SWill Deacon 	}
4320d747f65SVincenzo Frascino 
433a39060b0SWill Deacon 	ret = aarch32_sigreturn_setup(mm);
4340d747f65SVincenzo Frascino out:
435d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
4360d747f65SVincenzo Frascino 	return ret;
4379031fefdSWill Deacon }
4389031fefdSWill Deacon #endif /* CONFIG_COMPAT */
4399031fefdSWill Deacon 
4401d09094aSMark Rutland enum aarch64_map {
4411d09094aSMark Rutland 	AA64_MAP_VVAR,
4421d09094aSMark Rutland 	AA64_MAP_VDSO,
4431d09094aSMark Rutland };
4441d09094aSMark Rutland 
4451d09094aSMark Rutland static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
4461d09094aSMark Rutland 	[AA64_MAP_VVAR] = {
4475a9e3e15SJisheng Zhang 		.name	= "[vvar]",
448d53b5c01SAndrei Vagin 		.fault = vvar_fault,
4495a9e3e15SJisheng Zhang 	},
4501d09094aSMark Rutland 	[AA64_MAP_VDSO] = {
4515a9e3e15SJisheng Zhang 		.name	= "[vdso]",
45273958695SDmitry Safonov 		.mremap = vdso_mremap,
4535a9e3e15SJisheng Zhang 	},
4545a9e3e15SJisheng Zhang };
4552fea7f6cSWill Deacon 
4569031fefdSWill Deacon static int __init vdso_init(void)
4579031fefdSWill Deacon {
4581d09094aSMark Rutland 	vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
4591d09094aSMark Rutland 	vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
46016fb1a9bSNathan Lynch 
461d3418f38SMark Rutland 	return __vdso_init(VDSO_ABI_AA64);
4629031fefdSWill Deacon }
4639031fefdSWill Deacon arch_initcall(vdso_init);
4649031fefdSWill Deacon 
4652a30aca8SWill Deacon int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4669031fefdSWill Deacon {
4679031fefdSWill Deacon 	struct mm_struct *mm = current->mm;
468c7aa2d71SVincenzo Frascino 	int ret;
4699031fefdSWill Deacon 
470d8ed45c5SMichel Lespinasse 	if (mmap_write_lock_killable(mm))
47169048176SMichal Hocko 		return -EINTR;
47287154938SWill Deacon 
4732a30aca8SWill Deacon 	ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
474d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
4759031fefdSWill Deacon 
476c7aa2d71SVincenzo Frascino 	return ret;
4779031fefdSWill Deacon }
478