xref: /openbmc/linux/arch/x86/entry/vdso/vma.c (revision 82003e04)
1 /*
2  * Copyright 2007 Andi Kleen, SUSE Labs.
3  * Subject to the GPL, v.2
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <linux/ptrace.h>
16 #include <asm/pvclock.h>
17 #include <asm/vgtod.h>
18 #include <asm/proto.h>
19 #include <asm/vdso.h>
20 #include <asm/vvar.h>
21 #include <asm/page.h>
22 #include <asm/desc.h>
23 #include <asm/cpufeature.h>
24 
25 #if defined(CONFIG_X86_64)
26 unsigned int __read_mostly vdso64_enabled = 1;
27 #endif
28 
29 void __init init_vdso_image(const struct vdso_image *image)
30 {
31 	BUG_ON(image->size % PAGE_SIZE != 0);
32 
33 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
34 			   (struct alt_instr *)(image->data + image->alt +
35 						image->alt_len));
36 }
37 
38 struct linux_binprm;
39 
40 static int vdso_fault(const struct vm_special_mapping *sm,
41 		      struct vm_area_struct *vma, struct vm_fault *vmf)
42 {
43 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
44 
45 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
46 		return VM_FAULT_SIGBUS;
47 
48 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
49 	get_page(vmf->page);
50 	return 0;
51 }
52 
53 static void vdso_fix_landing(const struct vdso_image *image,
54 		struct vm_area_struct *new_vma)
55 {
56 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
57 	if (in_ia32_syscall() && image == &vdso_image_32) {
58 		struct pt_regs *regs = current_pt_regs();
59 		unsigned long vdso_land = image->sym_int80_landing_pad;
60 		unsigned long old_land_addr = vdso_land +
61 			(unsigned long)current->mm->context.vdso;
62 
63 		/* Fixing userspace landing - look at do_fast_syscall_32 */
64 		if (regs->ip == old_land_addr)
65 			regs->ip = new_vma->vm_start + vdso_land;
66 	}
67 #endif
68 }
69 
70 static int vdso_mremap(const struct vm_special_mapping *sm,
71 		struct vm_area_struct *new_vma)
72 {
73 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
74 	const struct vdso_image *image = current->mm->context.vdso_image;
75 
76 	if (image->size != new_size)
77 		return -EINVAL;
78 
79 	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
80 		return -EFAULT;
81 
82 	vdso_fix_landing(image, new_vma);
83 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
84 
85 	return 0;
86 }
87 
88 static int vvar_fault(const struct vm_special_mapping *sm,
89 		      struct vm_area_struct *vma, struct vm_fault *vmf)
90 {
91 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
92 	long sym_offset;
93 	int ret = -EFAULT;
94 
95 	if (!image)
96 		return VM_FAULT_SIGBUS;
97 
98 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
99 		image->sym_vvar_start;
100 
101 	/*
102 	 * Sanity check: a symbol offset of zero means that the page
103 	 * does not exist for this vdso image, not that the page is at
104 	 * offset zero relative to the text mapping.  This should be
105 	 * impossible here, because sym_offset should only be zero for
106 	 * the page past the end of the vvar mapping.
107 	 */
108 	if (sym_offset == 0)
109 		return VM_FAULT_SIGBUS;
110 
111 	if (sym_offset == image->sym_vvar_page) {
112 		ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
113 				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
114 	} else if (sym_offset == image->sym_pvclock_page) {
115 		struct pvclock_vsyscall_time_info *pvti =
116 			pvclock_pvti_cpu0_va();
117 		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
118 			ret = vm_insert_pfn(
119 				vma,
120 				(unsigned long)vmf->virtual_address,
121 				__pa(pvti) >> PAGE_SHIFT);
122 		}
123 	}
124 
125 	if (ret == 0 || ret == -EBUSY)
126 		return VM_FAULT_NOPAGE;
127 
128 	return VM_FAULT_SIGBUS;
129 }
130 
131 static const struct vm_special_mapping vdso_mapping = {
132 	.name = "[vdso]",
133 	.fault = vdso_fault,
134 	.mremap = vdso_mremap,
135 };
136 static const struct vm_special_mapping vvar_mapping = {
137 	.name = "[vvar]",
138 	.fault = vvar_fault,
139 };
140 
141 /*
142  * Add vdso and vvar mappings to current process.
143  * @image          - blob to map
144  * @addr           - request a specific address (zero to map at free addr)
145  */
146 static int map_vdso(const struct vdso_image *image, unsigned long addr)
147 {
148 	struct mm_struct *mm = current->mm;
149 	struct vm_area_struct *vma;
150 	unsigned long text_start;
151 	int ret = 0;
152 
153 	if (down_write_killable(&mm->mmap_sem))
154 		return -EINTR;
155 
156 	addr = get_unmapped_area(NULL, addr,
157 				 image->size - image->sym_vvar_start, 0, 0);
158 	if (IS_ERR_VALUE(addr)) {
159 		ret = addr;
160 		goto up_fail;
161 	}
162 
163 	text_start = addr - image->sym_vvar_start;
164 	current->mm->context.vdso = (void __user *)text_start;
165 	current->mm->context.vdso_image = image;
166 
167 	/*
168 	 * MAYWRITE to allow gdb to COW and set breakpoints
169 	 */
170 	vma = _install_special_mapping(mm,
171 				       text_start,
172 				       image->size,
173 				       VM_READ|VM_EXEC|
174 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
175 				       &vdso_mapping);
176 
177 	if (IS_ERR(vma)) {
178 		ret = PTR_ERR(vma);
179 		goto up_fail;
180 	}
181 
182 	vma = _install_special_mapping(mm,
183 				       addr,
184 				       -image->sym_vvar_start,
185 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
186 				       VM_PFNMAP,
187 				       &vvar_mapping);
188 
189 	if (IS_ERR(vma)) {
190 		ret = PTR_ERR(vma);
191 		do_munmap(mm, text_start, image->size);
192 	}
193 
194 up_fail:
195 	if (ret) {
196 		current->mm->context.vdso = NULL;
197 		current->mm->context.vdso_image = NULL;
198 	}
199 
200 	up_write(&mm->mmap_sem);
201 	return ret;
202 }
203 
204 #ifdef CONFIG_X86_64
205 /*
206  * Put the vdso above the (randomized) stack with another randomized
207  * offset.  This way there is no hole in the middle of address space.
208  * To save memory make sure it is still in the same PTE as the stack
209  * top.  This doesn't give that many random bits.
210  *
211  * Note that this algorithm is imperfect: the distribution of the vdso
212  * start address within a PMD is biased toward the end.
213  *
214  * Only used for the 64-bit and x32 vdsos.
215  */
216 static unsigned long vdso_addr(unsigned long start, unsigned len)
217 {
218 	unsigned long addr, end;
219 	unsigned offset;
220 
221 	/*
222 	 * Round up the start address.  It can start out unaligned as a result
223 	 * of stack start randomization.
224 	 */
225 	start = PAGE_ALIGN(start);
226 
227 	/* Round the lowest possible end address up to a PMD boundary. */
228 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
229 	if (end >= TASK_SIZE_MAX)
230 		end = TASK_SIZE_MAX;
231 	end -= len;
232 
233 	if (end > start) {
234 		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
235 		addr = start + (offset << PAGE_SHIFT);
236 	} else {
237 		addr = start;
238 	}
239 
240 	/*
241 	 * Forcibly align the final address in case we have a hardware
242 	 * issue that requires alignment for performance reasons.
243 	 */
244 	addr = align_vdso_addr(addr);
245 
246 	return addr;
247 }
248 
249 static int map_vdso_randomized(const struct vdso_image *image)
250 {
251 	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
252 
253 	return map_vdso(image, addr);
254 }
255 #endif
256 
257 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
258 {
259 	struct mm_struct *mm = current->mm;
260 	struct vm_area_struct *vma;
261 
262 	down_write(&mm->mmap_sem);
263 	/*
264 	 * Check if we have already mapped vdso blob - fail to prevent
265 	 * abusing from userspace install_speciall_mapping, which may
266 	 * not do accounting and rlimit right.
267 	 * We could search vma near context.vdso, but it's a slowpath,
268 	 * so let's explicitely check all VMAs to be completely sure.
269 	 */
270 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
271 		if (vma_is_special_mapping(vma, &vdso_mapping) ||
272 				vma_is_special_mapping(vma, &vvar_mapping)) {
273 			up_write(&mm->mmap_sem);
274 			return -EEXIST;
275 		}
276 	}
277 	up_write(&mm->mmap_sem);
278 
279 	return map_vdso(image, addr);
280 }
281 
282 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
283 static int load_vdso32(void)
284 {
285 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
286 		return 0;
287 
288 	return map_vdso(&vdso_image_32, 0);
289 }
290 #endif
291 
292 #ifdef CONFIG_X86_64
293 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
294 {
295 	if (!vdso64_enabled)
296 		return 0;
297 
298 	return map_vdso_randomized(&vdso_image_64);
299 }
300 
301 #ifdef CONFIG_COMPAT
302 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
303 				       int uses_interp)
304 {
305 #ifdef CONFIG_X86_X32_ABI
306 	if (test_thread_flag(TIF_X32)) {
307 		if (!vdso64_enabled)
308 			return 0;
309 		return map_vdso_randomized(&vdso_image_x32);
310 	}
311 #endif
312 #ifdef CONFIG_IA32_EMULATION
313 	return load_vdso32();
314 #else
315 	return 0;
316 #endif
317 }
318 #endif
319 #else
320 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
321 {
322 	return load_vdso32();
323 }
324 #endif
325 
326 #ifdef CONFIG_X86_64
327 static __init int vdso_setup(char *s)
328 {
329 	vdso64_enabled = simple_strtoul(s, NULL, 0);
330 	return 0;
331 }
332 __setup("vdso=", vdso_setup);
333 #endif
334 
335 #ifdef CONFIG_X86_64
336 static void vgetcpu_cpu_init(void *arg)
337 {
338 	int cpu = smp_processor_id();
339 	struct desc_struct d = { };
340 	unsigned long node = 0;
341 #ifdef CONFIG_NUMA
342 	node = cpu_to_node(cpu);
343 #endif
344 	if (static_cpu_has(X86_FEATURE_RDTSCP))
345 		write_rdtscp_aux((node << 12) | cpu);
346 
347 	/*
348 	 * Store cpu number in limit so that it can be loaded
349 	 * quickly in user space in vgetcpu. (12 bits for the CPU
350 	 * and 8 bits for the node)
351 	 */
352 	d.limit0 = cpu | ((node & 0xf) << 12);
353 	d.limit = node >> 4;
354 	d.type = 5;		/* RO data, expand down, accessed */
355 	d.dpl = 3;		/* Visible to user code */
356 	d.s = 1;		/* Not a system segment */
357 	d.p = 1;		/* Present */
358 	d.d = 1;		/* 32-bit */
359 
360 	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
361 }
362 
363 static int vgetcpu_online(unsigned int cpu)
364 {
365 	return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
366 }
367 
368 static int __init init_vdso(void)
369 {
370 	init_vdso_image(&vdso_image_64);
371 
372 #ifdef CONFIG_X86_X32_ABI
373 	init_vdso_image(&vdso_image_x32);
374 #endif
375 
376 	/* notifier priority > KVM */
377 	return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
378 				 "AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
379 }
380 subsys_initcall(init_vdso);
381 #endif /* CONFIG_X86_64 */
382