xref: /openbmc/linux/arch/arm64/kernel/vdso.c (revision e3d786a3)
1 /*
2  * VDSO implementation for AArch64 and vector page setup for AArch32.
3  *
4  * Copyright (C) 2012 ARM Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * Author: Will Deacon <will.deacon@arm.com>
19  */
20 
21 #include <linux/cache.h>
22 #include <linux/clocksource.h>
23 #include <linux/elf.h>
24 #include <linux/err.h>
25 #include <linux/errno.h>
26 #include <linux/gfp.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/sched.h>
30 #include <linux/signal.h>
31 #include <linux/slab.h>
32 #include <linux/timekeeper_internal.h>
33 #include <linux/vmalloc.h>
34 
35 #include <asm/cacheflush.h>
36 #include <asm/signal32.h>
37 #include <asm/vdso.h>
38 #include <asm/vdso_datapage.h>
39 
40 extern char vdso_start[], vdso_end[];
41 static unsigned long vdso_pages __ro_after_init;
42 
43 /*
44  * The vDSO data page.
45  */
46 static union {
47 	struct vdso_data	data;
48 	u8			page[PAGE_SIZE];
49 } vdso_data_store __page_aligned_data;
50 struct vdso_data *vdso_data = &vdso_data_store.data;
51 
52 #ifdef CONFIG_COMPAT
53 /*
54  * Create and map the vectors page for AArch32 tasks.
55  */
56 static struct page *vectors_page[1] __ro_after_init;
57 
58 static int __init alloc_vectors_page(void)
59 {
60 	extern char __kuser_helper_start[], __kuser_helper_end[];
61 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
62 
63 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
64 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
65 	unsigned long vpage;
66 
67 	vpage = get_zeroed_page(GFP_ATOMIC);
68 
69 	if (!vpage)
70 		return -ENOMEM;
71 
72 	/* kuser helpers */
73 	memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start,
74 		kuser_sz);
75 
76 	/* sigreturn code */
77 	memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
78                __aarch32_sigret_code_start, sigret_sz);
79 
80 	flush_icache_range(vpage, vpage + PAGE_SIZE);
81 	vectors_page[0] = virt_to_page(vpage);
82 
83 	return 0;
84 }
85 arch_initcall(alloc_vectors_page);
86 
87 int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
88 {
89 	struct mm_struct *mm = current->mm;
90 	unsigned long addr = AARCH32_VECTORS_BASE;
91 	static const struct vm_special_mapping spec = {
92 		.name	= "[vectors]",
93 		.pages	= vectors_page,
94 
95 	};
96 	void *ret;
97 
98 	if (down_write_killable(&mm->mmap_sem))
99 		return -EINTR;
100 	current->mm->context.vdso = (void *)addr;
101 
102 	/* Map vectors page at the high address. */
103 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
104 				       VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
105 				       &spec);
106 
107 	up_write(&mm->mmap_sem);
108 
109 	return PTR_ERR_OR_ZERO(ret);
110 }
111 #endif /* CONFIG_COMPAT */
112 
113 static int vdso_mremap(const struct vm_special_mapping *sm,
114 		struct vm_area_struct *new_vma)
115 {
116 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
117 	unsigned long vdso_size = vdso_end - vdso_start;
118 
119 	if (vdso_size != new_size)
120 		return -EINVAL;
121 
122 	current->mm->context.vdso = (void *)new_vma->vm_start;
123 
124 	return 0;
125 }
126 
127 static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
128 	{
129 		.name	= "[vvar]",
130 	},
131 	{
132 		.name	= "[vdso]",
133 		.mremap = vdso_mremap,
134 	},
135 };
136 
137 static int __init vdso_init(void)
138 {
139 	int i;
140 	struct page **vdso_pagelist;
141 	unsigned long pfn;
142 
143 	if (memcmp(vdso_start, "\177ELF", 4)) {
144 		pr_err("vDSO is not a valid ELF object!\n");
145 		return -EINVAL;
146 	}
147 
148 	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
149 	pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
150 		vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
151 
152 	/* Allocate the vDSO pagelist, plus a page for the data. */
153 	vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
154 				GFP_KERNEL);
155 	if (vdso_pagelist == NULL)
156 		return -ENOMEM;
157 
158 	/* Grab the vDSO data page. */
159 	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
160 
161 
162 	/* Grab the vDSO code pages. */
163 	pfn = sym_to_pfn(vdso_start);
164 
165 	for (i = 0; i < vdso_pages; i++)
166 		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
167 
168 	vdso_spec[0].pages = &vdso_pagelist[0];
169 	vdso_spec[1].pages = &vdso_pagelist[1];
170 
171 	return 0;
172 }
173 arch_initcall(vdso_init);
174 
175 int arch_setup_additional_pages(struct linux_binprm *bprm,
176 				int uses_interp)
177 {
178 	struct mm_struct *mm = current->mm;
179 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
180 	void *ret;
181 
182 	vdso_text_len = vdso_pages << PAGE_SHIFT;
183 	/* Be sure to map the data page */
184 	vdso_mapping_len = vdso_text_len + PAGE_SIZE;
185 
186 	if (down_write_killable(&mm->mmap_sem))
187 		return -EINTR;
188 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
189 	if (IS_ERR_VALUE(vdso_base)) {
190 		ret = ERR_PTR(vdso_base);
191 		goto up_fail;
192 	}
193 	ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
194 				       VM_READ|VM_MAYREAD,
195 				       &vdso_spec[0]);
196 	if (IS_ERR(ret))
197 		goto up_fail;
198 
199 	vdso_base += PAGE_SIZE;
200 	mm->context.vdso = (void *)vdso_base;
201 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
202 				       VM_READ|VM_EXEC|
203 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
204 				       &vdso_spec[1]);
205 	if (IS_ERR(ret))
206 		goto up_fail;
207 
208 
209 	up_write(&mm->mmap_sem);
210 	return 0;
211 
212 up_fail:
213 	mm->context.vdso = NULL;
214 	up_write(&mm->mmap_sem);
215 	return PTR_ERR(ret);
216 }
217 
218 /*
219  * Update the vDSO data page to keep in sync with kernel timekeeping.
220  */
221 void update_vsyscall(struct timekeeper *tk)
222 {
223 	u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct;
224 
225 	++vdso_data->tb_seq_count;
226 	smp_wmb();
227 
228 	vdso_data->use_syscall			= use_syscall;
229 	vdso_data->xtime_coarse_sec		= tk->xtime_sec;
230 	vdso_data->xtime_coarse_nsec		= tk->tkr_mono.xtime_nsec >>
231 							tk->tkr_mono.shift;
232 	vdso_data->wtm_clock_sec		= tk->wall_to_monotonic.tv_sec;
233 	vdso_data->wtm_clock_nsec		= tk->wall_to_monotonic.tv_nsec;
234 
235 	if (!use_syscall) {
236 		/* tkr_mono.cycle_last == tkr_raw.cycle_last */
237 		vdso_data->cs_cycle_last	= tk->tkr_mono.cycle_last;
238 		vdso_data->raw_time_sec         = tk->raw_sec;
239 		vdso_data->raw_time_nsec        = tk->tkr_raw.xtime_nsec;
240 		vdso_data->xtime_clock_sec	= tk->xtime_sec;
241 		vdso_data->xtime_clock_nsec	= tk->tkr_mono.xtime_nsec;
242 		vdso_data->cs_mono_mult		= tk->tkr_mono.mult;
243 		vdso_data->cs_raw_mult		= tk->tkr_raw.mult;
244 		/* tkr_mono.shift == tkr_raw.shift */
245 		vdso_data->cs_shift		= tk->tkr_mono.shift;
246 	}
247 
248 	smp_wmb();
249 	++vdso_data->tb_seq_count;
250 }
251 
252 void update_vsyscall_tz(void)
253 {
254 	vdso_data->tz_minuteswest	= sys_tz.tz_minuteswest;
255 	vdso_data->tz_dsttime		= sys_tz.tz_dsttime;
256 }
257