xref: /openbmc/linux/arch/x86/entry/vdso/vma.c (revision f125e2d4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <linux/time_namespace.h>
18 
19 #include <asm/pvclock.h>
20 #include <asm/vgtod.h>
21 #include <asm/proto.h>
22 #include <asm/vdso.h>
23 #include <asm/vvar.h>
24 #include <asm/tlb.h>
25 #include <asm/page.h>
26 #include <asm/desc.h>
27 #include <asm/cpufeature.h>
28 #include <clocksource/hyperv_timer.h>
29 
30 #undef _ASM_X86_VVAR_H
31 #define EMIT_VVAR(name, offset)	\
32 	const size_t name ## _offset = offset;
33 #include <asm/vvar.h>
34 
35 struct vdso_data *arch_get_vdso_data(void *vvar_page)
36 {
37 	return (struct vdso_data *)(vvar_page + _vdso_data_offset);
38 }
39 #undef EMIT_VVAR
40 
41 #if defined(CONFIG_X86_64)
42 unsigned int __read_mostly vdso64_enabled = 1;
43 #endif
44 
45 void __init init_vdso_image(const struct vdso_image *image)
46 {
47 	BUG_ON(image->size % PAGE_SIZE != 0);
48 
49 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
50 			   (struct alt_instr *)(image->data + image->alt +
51 						image->alt_len));
52 }
53 
54 static const struct vm_special_mapping vvar_mapping;
55 struct linux_binprm;
56 
57 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
58 		      struct vm_area_struct *vma, struct vm_fault *vmf)
59 {
60 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
61 
62 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
63 		return VM_FAULT_SIGBUS;
64 
65 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
66 	get_page(vmf->page);
67 	return 0;
68 }
69 
70 static void vdso_fix_landing(const struct vdso_image *image,
71 		struct vm_area_struct *new_vma)
72 {
73 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
74 	if (in_ia32_syscall() && image == &vdso_image_32) {
75 		struct pt_regs *regs = current_pt_regs();
76 		unsigned long vdso_land = image->sym_int80_landing_pad;
77 		unsigned long old_land_addr = vdso_land +
78 			(unsigned long)current->mm->context.vdso;
79 
80 		/* Fixing userspace landing - look at do_fast_syscall_32 */
81 		if (regs->ip == old_land_addr)
82 			regs->ip = new_vma->vm_start + vdso_land;
83 	}
84 #endif
85 }
86 
87 static int vdso_mremap(const struct vm_special_mapping *sm,
88 		struct vm_area_struct *new_vma)
89 {
90 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
91 	const struct vdso_image *image = current->mm->context.vdso_image;
92 
93 	if (image->size != new_size)
94 		return -EINVAL;
95 
96 	vdso_fix_landing(image, new_vma);
97 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
98 
99 	return 0;
100 }
101 
102 static int vvar_mremap(const struct vm_special_mapping *sm,
103 		struct vm_area_struct *new_vma)
104 {
105 	const struct vdso_image *image = new_vma->vm_mm->context.vdso_image;
106 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
107 
108 	if (new_size != -image->sym_vvar_start)
109 		return -EINVAL;
110 
111 	return 0;
112 }
113 
114 #ifdef CONFIG_TIME_NS
115 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
116 {
117 	if (likely(vma->vm_mm == current->mm))
118 		return current->nsproxy->time_ns->vvar_page;
119 
120 	/*
121 	 * VM_PFNMAP | VM_IO protect .fault() handler from being called
122 	 * through interfaces like /proc/$pid/mem or
123 	 * process_vm_{readv,writev}() as long as there's no .access()
124 	 * in special_mapping_vmops().
125 	 * For more details check_vma_flags() and __access_remote_vm()
126 	 */
127 
128 	WARN(1, "vvar_page accessed remotely");
129 
130 	return NULL;
131 }
132 
133 /*
134  * The vvar page layout depends on whether a task belongs to the root or
135  * non-root time namespace. Whenever a task changes its namespace, the VVAR
136  * page tables are cleared and then they will re-faulted with a
137  * corresponding layout.
138  * See also the comment near timens_setup_vdso_data() for details.
139  */
140 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
141 {
142 	struct mm_struct *mm = task->mm;
143 	struct vm_area_struct *vma;
144 
145 	if (down_write_killable(&mm->mmap_sem))
146 		return -EINTR;
147 
148 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
149 		unsigned long size = vma->vm_end - vma->vm_start;
150 
151 		if (vma_is_special_mapping(vma, &vvar_mapping))
152 			zap_page_range(vma, vma->vm_start, size);
153 	}
154 
155 	up_write(&mm->mmap_sem);
156 	return 0;
157 }
158 #else
159 static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
160 {
161 	return NULL;
162 }
163 #endif
164 
165 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
166 		      struct vm_area_struct *vma, struct vm_fault *vmf)
167 {
168 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
169 	unsigned long pfn;
170 	long sym_offset;
171 
172 	if (!image)
173 		return VM_FAULT_SIGBUS;
174 
175 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
176 		image->sym_vvar_start;
177 
178 	/*
179 	 * Sanity check: a symbol offset of zero means that the page
180 	 * does not exist for this vdso image, not that the page is at
181 	 * offset zero relative to the text mapping.  This should be
182 	 * impossible here, because sym_offset should only be zero for
183 	 * the page past the end of the vvar mapping.
184 	 */
185 	if (sym_offset == 0)
186 		return VM_FAULT_SIGBUS;
187 
188 	if (sym_offset == image->sym_vvar_page) {
189 		struct page *timens_page = find_timens_vvar_page(vma);
190 
191 		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
192 
193 		/*
194 		 * If a task belongs to a time namespace then a namespace
195 		 * specific VVAR is mapped with the sym_vvar_page offset and
196 		 * the real VVAR page is mapped with the sym_timens_page
197 		 * offset.
198 		 * See also the comment near timens_setup_vdso_data().
199 		 */
200 		if (timens_page) {
201 			unsigned long addr;
202 			vm_fault_t err;
203 
204 			/*
205 			 * Optimization: inside time namespace pre-fault
206 			 * VVAR page too. As on timens page there are only
207 			 * offsets for clocks on VVAR, it'll be faulted
208 			 * shortly by VDSO code.
209 			 */
210 			addr = vmf->address + (image->sym_timens_page - sym_offset);
211 			err = vmf_insert_pfn(vma, addr, pfn);
212 			if (unlikely(err & VM_FAULT_ERROR))
213 				return err;
214 
215 			pfn = page_to_pfn(timens_page);
216 		}
217 
218 		return vmf_insert_pfn(vma, vmf->address, pfn);
219 	} else if (sym_offset == image->sym_pvclock_page) {
220 		struct pvclock_vsyscall_time_info *pvti =
221 			pvclock_get_pvti_cpu0_va();
222 		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
223 			return vmf_insert_pfn_prot(vma, vmf->address,
224 					__pa(pvti) >> PAGE_SHIFT,
225 					pgprot_decrypted(vma->vm_page_prot));
226 		}
227 	} else if (sym_offset == image->sym_hvclock_page) {
228 		struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
229 
230 		if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
231 			return vmf_insert_pfn(vma, vmf->address,
232 					virt_to_phys(tsc_pg) >> PAGE_SHIFT);
233 	} else if (sym_offset == image->sym_timens_page) {
234 		struct page *timens_page = find_timens_vvar_page(vma);
235 
236 		if (!timens_page)
237 			return VM_FAULT_SIGBUS;
238 
239 		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
240 		return vmf_insert_pfn(vma, vmf->address, pfn);
241 	}
242 
243 	return VM_FAULT_SIGBUS;
244 }
245 
246 static const struct vm_special_mapping vdso_mapping = {
247 	.name = "[vdso]",
248 	.fault = vdso_fault,
249 	.mremap = vdso_mremap,
250 };
251 static const struct vm_special_mapping vvar_mapping = {
252 	.name = "[vvar]",
253 	.fault = vvar_fault,
254 	.mremap = vvar_mremap,
255 };
256 
257 /*
258  * Add vdso and vvar mappings to current process.
259  * @image          - blob to map
260  * @addr           - request a specific address (zero to map at free addr)
261  */
262 static int map_vdso(const struct vdso_image *image, unsigned long addr)
263 {
264 	struct mm_struct *mm = current->mm;
265 	struct vm_area_struct *vma;
266 	unsigned long text_start;
267 	int ret = 0;
268 
269 	if (down_write_killable(&mm->mmap_sem))
270 		return -EINTR;
271 
272 	addr = get_unmapped_area(NULL, addr,
273 				 image->size - image->sym_vvar_start, 0, 0);
274 	if (IS_ERR_VALUE(addr)) {
275 		ret = addr;
276 		goto up_fail;
277 	}
278 
279 	text_start = addr - image->sym_vvar_start;
280 
281 	/*
282 	 * MAYWRITE to allow gdb to COW and set breakpoints
283 	 */
284 	vma = _install_special_mapping(mm,
285 				       text_start,
286 				       image->size,
287 				       VM_READ|VM_EXEC|
288 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
289 				       &vdso_mapping);
290 
291 	if (IS_ERR(vma)) {
292 		ret = PTR_ERR(vma);
293 		goto up_fail;
294 	}
295 
296 	vma = _install_special_mapping(mm,
297 				       addr,
298 				       -image->sym_vvar_start,
299 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
300 				       VM_PFNMAP,
301 				       &vvar_mapping);
302 
303 	if (IS_ERR(vma)) {
304 		ret = PTR_ERR(vma);
305 		do_munmap(mm, text_start, image->size, NULL);
306 	} else {
307 		current->mm->context.vdso = (void __user *)text_start;
308 		current->mm->context.vdso_image = image;
309 	}
310 
311 up_fail:
312 	up_write(&mm->mmap_sem);
313 	return ret;
314 }
315 
316 #ifdef CONFIG_X86_64
317 /*
318  * Put the vdso above the (randomized) stack with another randomized
319  * offset.  This way there is no hole in the middle of address space.
320  * To save memory make sure it is still in the same PTE as the stack
321  * top.  This doesn't give that many random bits.
322  *
323  * Note that this algorithm is imperfect: the distribution of the vdso
324  * start address within a PMD is biased toward the end.
325  *
326  * Only used for the 64-bit and x32 vdsos.
327  */
328 static unsigned long vdso_addr(unsigned long start, unsigned len)
329 {
330 	unsigned long addr, end;
331 	unsigned offset;
332 
333 	/*
334 	 * Round up the start address.  It can start out unaligned as a result
335 	 * of stack start randomization.
336 	 */
337 	start = PAGE_ALIGN(start);
338 
339 	/* Round the lowest possible end address up to a PMD boundary. */
340 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
341 	if (end >= TASK_SIZE_MAX)
342 		end = TASK_SIZE_MAX;
343 	end -= len;
344 
345 	if (end > start) {
346 		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
347 		addr = start + (offset << PAGE_SHIFT);
348 	} else {
349 		addr = start;
350 	}
351 
352 	/*
353 	 * Forcibly align the final address in case we have a hardware
354 	 * issue that requires alignment for performance reasons.
355 	 */
356 	addr = align_vdso_addr(addr);
357 
358 	return addr;
359 }
360 
361 static int map_vdso_randomized(const struct vdso_image *image)
362 {
363 	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
364 
365 	return map_vdso(image, addr);
366 }
367 #endif
368 
369 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
370 {
371 	struct mm_struct *mm = current->mm;
372 	struct vm_area_struct *vma;
373 
374 	down_write(&mm->mmap_sem);
375 	/*
376 	 * Check if we have already mapped vdso blob - fail to prevent
377 	 * abusing from userspace install_speciall_mapping, which may
378 	 * not do accounting and rlimit right.
379 	 * We could search vma near context.vdso, but it's a slowpath,
380 	 * so let's explicitly check all VMAs to be completely sure.
381 	 */
382 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
383 		if (vma_is_special_mapping(vma, &vdso_mapping) ||
384 				vma_is_special_mapping(vma, &vvar_mapping)) {
385 			up_write(&mm->mmap_sem);
386 			return -EEXIST;
387 		}
388 	}
389 	up_write(&mm->mmap_sem);
390 
391 	return map_vdso(image, addr);
392 }
393 
394 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
395 static int load_vdso32(void)
396 {
397 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
398 		return 0;
399 
400 	return map_vdso(&vdso_image_32, 0);
401 }
402 #endif
403 
404 #ifdef CONFIG_X86_64
405 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
406 {
407 	if (!vdso64_enabled)
408 		return 0;
409 
410 	return map_vdso_randomized(&vdso_image_64);
411 }
412 
413 #ifdef CONFIG_COMPAT
414 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
415 				       int uses_interp)
416 {
417 #ifdef CONFIG_X86_X32_ABI
418 	if (test_thread_flag(TIF_X32)) {
419 		if (!vdso64_enabled)
420 			return 0;
421 		return map_vdso_randomized(&vdso_image_x32);
422 	}
423 #endif
424 #ifdef CONFIG_IA32_EMULATION
425 	return load_vdso32();
426 #else
427 	return 0;
428 #endif
429 }
430 #endif
431 #else
432 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
433 {
434 	return load_vdso32();
435 }
436 #endif
437 
438 #ifdef CONFIG_X86_64
439 static __init int vdso_setup(char *s)
440 {
441 	vdso64_enabled = simple_strtoul(s, NULL, 0);
442 	return 0;
443 }
444 __setup("vdso=", vdso_setup);
445 
446 static int __init init_vdso(void)
447 {
448 	init_vdso_image(&vdso_image_64);
449 
450 #ifdef CONFIG_X86_X32_ABI
451 	init_vdso_image(&vdso_image_x32);
452 #endif
453 
454 	return 0;
455 }
456 subsys_initcall(init_vdso);
457 #endif /* CONFIG_X86_64 */
458