xref: /openbmc/linux/arch/x86/entry/vdso/vma.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <linux/time_namespace.h>
18 
19 #include <asm/pvclock.h>
20 #include <asm/vgtod.h>
21 #include <asm/proto.h>
22 #include <asm/vdso.h>
23 #include <asm/vvar.h>
24 #include <asm/tlb.h>
25 #include <asm/page.h>
26 #include <asm/desc.h>
27 #include <asm/cpufeature.h>
28 #include <clocksource/hyperv_timer.h>
29 
30 #undef _ASM_X86_VVAR_H
31 #define EMIT_VVAR(name, offset)	\
32 	const size_t name ## _offset = offset;
33 #include <asm/vvar.h>
34 
35 struct vdso_data *arch_get_vdso_data(void *vvar_page)
36 {
37 	return (struct vdso_data *)(vvar_page + _vdso_data_offset);
38 }
39 #undef EMIT_VVAR
40 
41 unsigned int vclocks_used __read_mostly;
42 
43 #if defined(CONFIG_X86_64)
44 unsigned int __read_mostly vdso64_enabled = 1;
45 #endif
46 
47 void __init init_vdso_image(const struct vdso_image *image)
48 {
49 	BUG_ON(image->size % PAGE_SIZE != 0);
50 
51 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
52 			   (struct alt_instr *)(image->data + image->alt +
53 						image->alt_len));
54 }
55 
56 static const struct vm_special_mapping vvar_mapping;
57 struct linux_binprm;
58 
59 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
60 		      struct vm_area_struct *vma, struct vm_fault *vmf)
61 {
62 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
63 
64 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
65 		return VM_FAULT_SIGBUS;
66 
67 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
68 	get_page(vmf->page);
69 	return 0;
70 }
71 
72 static void vdso_fix_landing(const struct vdso_image *image,
73 		struct vm_area_struct *new_vma)
74 {
75 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
76 	if (in_ia32_syscall() && image == &vdso_image_32) {
77 		struct pt_regs *regs = current_pt_regs();
78 		unsigned long vdso_land = image->sym_int80_landing_pad;
79 		unsigned long old_land_addr = vdso_land +
80 			(unsigned long)current->mm->context.vdso;
81 
82 		/* Fixing userspace landing - look at do_fast_syscall_32 */
83 		if (regs->ip == old_land_addr)
84 			regs->ip = new_vma->vm_start + vdso_land;
85 	}
86 #endif
87 }
88 
89 static int vdso_mremap(const struct vm_special_mapping *sm,
90 		struct vm_area_struct *new_vma)
91 {
92 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
93 	const struct vdso_image *image = current->mm->context.vdso_image;
94 
95 	if (image->size != new_size)
96 		return -EINVAL;
97 
98 	vdso_fix_landing(image, new_vma);
99 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
100 
101 	return 0;
102 }
103 
104 static int vvar_mremap(const struct vm_special_mapping *sm,
105 		struct vm_area_struct *new_vma)
106 {
107 	const struct vdso_image *image = new_vma->vm_mm->context.vdso_image;
108 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
109 
110 	if (new_size != -image->sym_vvar_start)
111 		return -EINVAL;
112 
113 	return 0;
114 }
115 
116 #ifdef CONFIG_TIME_NS
117 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
118 {
119 	if (likely(vma->vm_mm == current->mm))
120 		return current->nsproxy->time_ns->vvar_page;
121 
122 	/*
123 	 * VM_PFNMAP | VM_IO protect .fault() handler from being called
124 	 * through interfaces like /proc/$pid/mem or
125 	 * process_vm_{readv,writev}() as long as there's no .access()
126 	 * in special_mapping_vmops().
127 	 * For more details check_vma_flags() and __access_remote_vm()
128 	 */
129 
130 	WARN(1, "vvar_page accessed remotely");
131 
132 	return NULL;
133 }
134 
135 /*
136  * The vvar page layout depends on whether a task belongs to the root or
137  * non-root time namespace. Whenever a task changes its namespace, the VVAR
138  * page tables are cleared and then they will re-faulted with a
139  * corresponding layout.
140  * See also the comment near timens_setup_vdso_data() for details.
141  */
142 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
143 {
144 	struct mm_struct *mm = task->mm;
145 	struct vm_area_struct *vma;
146 
147 	if (mmap_write_lock_killable(mm))
148 		return -EINTR;
149 
150 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
151 		unsigned long size = vma->vm_end - vma->vm_start;
152 
153 		if (vma_is_special_mapping(vma, &vvar_mapping))
154 			zap_page_range(vma, vma->vm_start, size);
155 	}
156 
157 	mmap_write_unlock(mm);
158 	return 0;
159 }
160 #else
161 static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
162 {
163 	return NULL;
164 }
165 #endif
166 
167 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
168 		      struct vm_area_struct *vma, struct vm_fault *vmf)
169 {
170 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
171 	unsigned long pfn;
172 	long sym_offset;
173 
174 	if (!image)
175 		return VM_FAULT_SIGBUS;
176 
177 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
178 		image->sym_vvar_start;
179 
180 	/*
181 	 * Sanity check: a symbol offset of zero means that the page
182 	 * does not exist for this vdso image, not that the page is at
183 	 * offset zero relative to the text mapping.  This should be
184 	 * impossible here, because sym_offset should only be zero for
185 	 * the page past the end of the vvar mapping.
186 	 */
187 	if (sym_offset == 0)
188 		return VM_FAULT_SIGBUS;
189 
190 	if (sym_offset == image->sym_vvar_page) {
191 		struct page *timens_page = find_timens_vvar_page(vma);
192 
193 		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
194 
195 		/*
196 		 * If a task belongs to a time namespace then a namespace
197 		 * specific VVAR is mapped with the sym_vvar_page offset and
198 		 * the real VVAR page is mapped with the sym_timens_page
199 		 * offset.
200 		 * See also the comment near timens_setup_vdso_data().
201 		 */
202 		if (timens_page) {
203 			unsigned long addr;
204 			vm_fault_t err;
205 
206 			/*
207 			 * Optimization: inside time namespace pre-fault
208 			 * VVAR page too. As on timens page there are only
209 			 * offsets for clocks on VVAR, it'll be faulted
210 			 * shortly by VDSO code.
211 			 */
212 			addr = vmf->address + (image->sym_timens_page - sym_offset);
213 			err = vmf_insert_pfn(vma, addr, pfn);
214 			if (unlikely(err & VM_FAULT_ERROR))
215 				return err;
216 
217 			pfn = page_to_pfn(timens_page);
218 		}
219 
220 		return vmf_insert_pfn(vma, vmf->address, pfn);
221 	} else if (sym_offset == image->sym_pvclock_page) {
222 		struct pvclock_vsyscall_time_info *pvti =
223 			pvclock_get_pvti_cpu0_va();
224 		if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
225 			return vmf_insert_pfn_prot(vma, vmf->address,
226 					__pa(pvti) >> PAGE_SHIFT,
227 					pgprot_decrypted(vma->vm_page_prot));
228 		}
229 	} else if (sym_offset == image->sym_hvclock_page) {
230 		struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
231 
232 		if (tsc_pg && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
233 			return vmf_insert_pfn(vma, vmf->address,
234 					virt_to_phys(tsc_pg) >> PAGE_SHIFT);
235 	} else if (sym_offset == image->sym_timens_page) {
236 		struct page *timens_page = find_timens_vvar_page(vma);
237 
238 		if (!timens_page)
239 			return VM_FAULT_SIGBUS;
240 
241 		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
242 		return vmf_insert_pfn(vma, vmf->address, pfn);
243 	}
244 
245 	return VM_FAULT_SIGBUS;
246 }
247 
248 static const struct vm_special_mapping vdso_mapping = {
249 	.name = "[vdso]",
250 	.fault = vdso_fault,
251 	.mremap = vdso_mremap,
252 };
253 static const struct vm_special_mapping vvar_mapping = {
254 	.name = "[vvar]",
255 	.fault = vvar_fault,
256 	.mremap = vvar_mremap,
257 };
258 
259 /*
260  * Add vdso and vvar mappings to current process.
261  * @image          - blob to map
262  * @addr           - request a specific address (zero to map at free addr)
263  */
264 static int map_vdso(const struct vdso_image *image, unsigned long addr)
265 {
266 	struct mm_struct *mm = current->mm;
267 	struct vm_area_struct *vma;
268 	unsigned long text_start;
269 	int ret = 0;
270 
271 	if (mmap_write_lock_killable(mm))
272 		return -EINTR;
273 
274 	addr = get_unmapped_area(NULL, addr,
275 				 image->size - image->sym_vvar_start, 0, 0);
276 	if (IS_ERR_VALUE(addr)) {
277 		ret = addr;
278 		goto up_fail;
279 	}
280 
281 	text_start = addr - image->sym_vvar_start;
282 
283 	/*
284 	 * MAYWRITE to allow gdb to COW and set breakpoints
285 	 */
286 	vma = _install_special_mapping(mm,
287 				       text_start,
288 				       image->size,
289 				       VM_READ|VM_EXEC|
290 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
291 				       &vdso_mapping);
292 
293 	if (IS_ERR(vma)) {
294 		ret = PTR_ERR(vma);
295 		goto up_fail;
296 	}
297 
298 	vma = _install_special_mapping(mm,
299 				       addr,
300 				       -image->sym_vvar_start,
301 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
302 				       VM_PFNMAP,
303 				       &vvar_mapping);
304 
305 	if (IS_ERR(vma)) {
306 		ret = PTR_ERR(vma);
307 		do_munmap(mm, text_start, image->size, NULL);
308 	} else {
309 		current->mm->context.vdso = (void __user *)text_start;
310 		current->mm->context.vdso_image = image;
311 	}
312 
313 up_fail:
314 	mmap_write_unlock(mm);
315 	return ret;
316 }
317 
318 #ifdef CONFIG_X86_64
319 /*
320  * Put the vdso above the (randomized) stack with another randomized
321  * offset.  This way there is no hole in the middle of address space.
322  * To save memory make sure it is still in the same PTE as the stack
323  * top.  This doesn't give that many random bits.
324  *
325  * Note that this algorithm is imperfect: the distribution of the vdso
326  * start address within a PMD is biased toward the end.
327  *
328  * Only used for the 64-bit and x32 vdsos.
329  */
330 static unsigned long vdso_addr(unsigned long start, unsigned len)
331 {
332 	unsigned long addr, end;
333 	unsigned offset;
334 
335 	/*
336 	 * Round up the start address.  It can start out unaligned as a result
337 	 * of stack start randomization.
338 	 */
339 	start = PAGE_ALIGN(start);
340 
341 	/* Round the lowest possible end address up to a PMD boundary. */
342 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
343 	if (end >= TASK_SIZE_MAX)
344 		end = TASK_SIZE_MAX;
345 	end -= len;
346 
347 	if (end > start) {
348 		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
349 		addr = start + (offset << PAGE_SHIFT);
350 	} else {
351 		addr = start;
352 	}
353 
354 	/*
355 	 * Forcibly align the final address in case we have a hardware
356 	 * issue that requires alignment for performance reasons.
357 	 */
358 	addr = align_vdso_addr(addr);
359 
360 	return addr;
361 }
362 
363 static int map_vdso_randomized(const struct vdso_image *image)
364 {
365 	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
366 
367 	return map_vdso(image, addr);
368 }
369 #endif
370 
371 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
372 {
373 	struct mm_struct *mm = current->mm;
374 	struct vm_area_struct *vma;
375 
376 	mmap_write_lock(mm);
377 	/*
378 	 * Check if we have already mapped vdso blob - fail to prevent
379 	 * abusing from userspace install_speciall_mapping, which may
380 	 * not do accounting and rlimit right.
381 	 * We could search vma near context.vdso, but it's a slowpath,
382 	 * so let's explicitly check all VMAs to be completely sure.
383 	 */
384 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
385 		if (vma_is_special_mapping(vma, &vdso_mapping) ||
386 				vma_is_special_mapping(vma, &vvar_mapping)) {
387 			mmap_write_unlock(mm);
388 			return -EEXIST;
389 		}
390 	}
391 	mmap_write_unlock(mm);
392 
393 	return map_vdso(image, addr);
394 }
395 
396 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
397 static int load_vdso32(void)
398 {
399 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
400 		return 0;
401 
402 	return map_vdso(&vdso_image_32, 0);
403 }
404 #endif
405 
406 #ifdef CONFIG_X86_64
407 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
408 {
409 	if (!vdso64_enabled)
410 		return 0;
411 
412 	return map_vdso_randomized(&vdso_image_64);
413 }
414 
415 #ifdef CONFIG_COMPAT
416 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
417 				       int uses_interp)
418 {
419 #ifdef CONFIG_X86_X32_ABI
420 	if (test_thread_flag(TIF_X32)) {
421 		if (!vdso64_enabled)
422 			return 0;
423 		return map_vdso_randomized(&vdso_image_x32);
424 	}
425 #endif
426 #ifdef CONFIG_IA32_EMULATION
427 	return load_vdso32();
428 #else
429 	return 0;
430 #endif
431 }
432 #endif
433 #else
434 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
435 {
436 	return load_vdso32();
437 }
438 #endif
439 
440 #ifdef CONFIG_X86_64
441 static __init int vdso_setup(char *s)
442 {
443 	vdso64_enabled = simple_strtoul(s, NULL, 0);
444 	return 0;
445 }
446 __setup("vdso=", vdso_setup);
447 
448 static int __init init_vdso(void)
449 {
450 	BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
451 
452 	init_vdso_image(&vdso_image_64);
453 
454 #ifdef CONFIG_X86_X32_ABI
455 	init_vdso_image(&vdso_image_x32);
456 #endif
457 
458 	return 0;
459 }
460 subsys_initcall(init_vdso);
461 #endif /* CONFIG_X86_64 */
462