xref: /openbmc/linux/arch/x86/entry/vdso/vma.c (revision 1c2dd16a)
1 /*
2  * Copyright 2007 Andi Kleen, SUSE Labs.
3  * Subject to the GPL, v.2
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <asm/pvclock.h>
18 #include <asm/vgtod.h>
19 #include <asm/proto.h>
20 #include <asm/vdso.h>
21 #include <asm/vvar.h>
22 #include <asm/page.h>
23 #include <asm/desc.h>
24 #include <asm/cpufeature.h>
25 #include <asm/mshyperv.h>
26 
27 #if defined(CONFIG_X86_64)
28 unsigned int __read_mostly vdso64_enabled = 1;
29 #endif
30 
31 void __init init_vdso_image(const struct vdso_image *image)
32 {
33 	BUG_ON(image->size % PAGE_SIZE != 0);
34 
35 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
36 			   (struct alt_instr *)(image->data + image->alt +
37 						image->alt_len));
38 }
39 
40 struct linux_binprm;
41 
42 static int vdso_fault(const struct vm_special_mapping *sm,
43 		      struct vm_area_struct *vma, struct vm_fault *vmf)
44 {
45 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
46 
47 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
48 		return VM_FAULT_SIGBUS;
49 
50 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
51 	get_page(vmf->page);
52 	return 0;
53 }
54 
55 static void vdso_fix_landing(const struct vdso_image *image,
56 		struct vm_area_struct *new_vma)
57 {
58 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
59 	if (in_ia32_syscall() && image == &vdso_image_32) {
60 		struct pt_regs *regs = current_pt_regs();
61 		unsigned long vdso_land = image->sym_int80_landing_pad;
62 		unsigned long old_land_addr = vdso_land +
63 			(unsigned long)current->mm->context.vdso;
64 
65 		/* Fixing userspace landing - look at do_fast_syscall_32 */
66 		if (regs->ip == old_land_addr)
67 			regs->ip = new_vma->vm_start + vdso_land;
68 	}
69 #endif
70 }
71 
72 static int vdso_mremap(const struct vm_special_mapping *sm,
73 		struct vm_area_struct *new_vma)
74 {
75 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
76 	const struct vdso_image *image = current->mm->context.vdso_image;
77 
78 	if (image->size != new_size)
79 		return -EINVAL;
80 
81 	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
82 		return -EFAULT;
83 
84 	vdso_fix_landing(image, new_vma);
85 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
86 
87 	return 0;
88 }
89 
90 static int vvar_fault(const struct vm_special_mapping *sm,
91 		      struct vm_area_struct *vma, struct vm_fault *vmf)
92 {
93 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
94 	long sym_offset;
95 	int ret = -EFAULT;
96 
97 	if (!image)
98 		return VM_FAULT_SIGBUS;
99 
100 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
101 		image->sym_vvar_start;
102 
103 	/*
104 	 * Sanity check: a symbol offset of zero means that the page
105 	 * does not exist for this vdso image, not that the page is at
106 	 * offset zero relative to the text mapping.  This should be
107 	 * impossible here, because sym_offset should only be zero for
108 	 * the page past the end of the vvar mapping.
109 	 */
110 	if (sym_offset == 0)
111 		return VM_FAULT_SIGBUS;
112 
113 	if (sym_offset == image->sym_vvar_page) {
114 		ret = vm_insert_pfn(vma, vmf->address,
115 				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
116 	} else if (sym_offset == image->sym_pvclock_page) {
117 		struct pvclock_vsyscall_time_info *pvti =
118 			pvclock_pvti_cpu0_va();
119 		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
120 			ret = vm_insert_pfn(
121 				vma,
122 				vmf->address,
123 				__pa(pvti) >> PAGE_SHIFT);
124 		}
125 	} else if (sym_offset == image->sym_hvclock_page) {
126 		struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
127 
128 		if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
129 			ret = vm_insert_pfn(vma, vmf->address,
130 					    vmalloc_to_pfn(tsc_pg));
131 	}
132 
133 	if (ret == 0 || ret == -EBUSY)
134 		return VM_FAULT_NOPAGE;
135 
136 	return VM_FAULT_SIGBUS;
137 }
138 
139 static const struct vm_special_mapping vdso_mapping = {
140 	.name = "[vdso]",
141 	.fault = vdso_fault,
142 	.mremap = vdso_mremap,
143 };
144 static const struct vm_special_mapping vvar_mapping = {
145 	.name = "[vvar]",
146 	.fault = vvar_fault,
147 };
148 
149 /*
150  * Add vdso and vvar mappings to current process.
151  * @image          - blob to map
152  * @addr           - request a specific address (zero to map at free addr)
153  */
154 static int map_vdso(const struct vdso_image *image, unsigned long addr)
155 {
156 	struct mm_struct *mm = current->mm;
157 	struct vm_area_struct *vma;
158 	unsigned long text_start;
159 	int ret = 0;
160 
161 	if (down_write_killable(&mm->mmap_sem))
162 		return -EINTR;
163 
164 	addr = get_unmapped_area(NULL, addr,
165 				 image->size - image->sym_vvar_start, 0, 0);
166 	if (IS_ERR_VALUE(addr)) {
167 		ret = addr;
168 		goto up_fail;
169 	}
170 
171 	text_start = addr - image->sym_vvar_start;
172 
173 	/*
174 	 * MAYWRITE to allow gdb to COW and set breakpoints
175 	 */
176 	vma = _install_special_mapping(mm,
177 				       text_start,
178 				       image->size,
179 				       VM_READ|VM_EXEC|
180 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
181 				       &vdso_mapping);
182 
183 	if (IS_ERR(vma)) {
184 		ret = PTR_ERR(vma);
185 		goto up_fail;
186 	}
187 
188 	vma = _install_special_mapping(mm,
189 				       addr,
190 				       -image->sym_vvar_start,
191 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
192 				       VM_PFNMAP,
193 				       &vvar_mapping);
194 
195 	if (IS_ERR(vma)) {
196 		ret = PTR_ERR(vma);
197 		do_munmap(mm, text_start, image->size, NULL);
198 	} else {
199 		current->mm->context.vdso = (void __user *)text_start;
200 		current->mm->context.vdso_image = image;
201 	}
202 
203 up_fail:
204 	up_write(&mm->mmap_sem);
205 	return ret;
206 }
207 
208 #ifdef CONFIG_X86_64
209 /*
210  * Put the vdso above the (randomized) stack with another randomized
211  * offset.  This way there is no hole in the middle of address space.
212  * To save memory make sure it is still in the same PTE as the stack
213  * top.  This doesn't give that many random bits.
214  *
215  * Note that this algorithm is imperfect: the distribution of the vdso
216  * start address within a PMD is biased toward the end.
217  *
218  * Only used for the 64-bit and x32 vdsos.
219  */
220 static unsigned long vdso_addr(unsigned long start, unsigned len)
221 {
222 	unsigned long addr, end;
223 	unsigned offset;
224 
225 	/*
226 	 * Round up the start address.  It can start out unaligned as a result
227 	 * of stack start randomization.
228 	 */
229 	start = PAGE_ALIGN(start);
230 
231 	/* Round the lowest possible end address up to a PMD boundary. */
232 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
233 	if (end >= TASK_SIZE_MAX)
234 		end = TASK_SIZE_MAX;
235 	end -= len;
236 
237 	if (end > start) {
238 		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
239 		addr = start + (offset << PAGE_SHIFT);
240 	} else {
241 		addr = start;
242 	}
243 
244 	/*
245 	 * Forcibly align the final address in case we have a hardware
246 	 * issue that requires alignment for performance reasons.
247 	 */
248 	addr = align_vdso_addr(addr);
249 
250 	return addr;
251 }
252 
253 static int map_vdso_randomized(const struct vdso_image *image)
254 {
255 	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
256 
257 	return map_vdso(image, addr);
258 }
259 #endif
260 
261 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
262 {
263 	struct mm_struct *mm = current->mm;
264 	struct vm_area_struct *vma;
265 
266 	down_write(&mm->mmap_sem);
267 	/*
268 	 * Check if we have already mapped vdso blob - fail to prevent
269 	 * abusing from userspace install_speciall_mapping, which may
270 	 * not do accounting and rlimit right.
271 	 * We could search vma near context.vdso, but it's a slowpath,
272 	 * so let's explicitely check all VMAs to be completely sure.
273 	 */
274 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
275 		if (vma_is_special_mapping(vma, &vdso_mapping) ||
276 				vma_is_special_mapping(vma, &vvar_mapping)) {
277 			up_write(&mm->mmap_sem);
278 			return -EEXIST;
279 		}
280 	}
281 	up_write(&mm->mmap_sem);
282 
283 	return map_vdso(image, addr);
284 }
285 
286 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
287 static int load_vdso32(void)
288 {
289 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
290 		return 0;
291 
292 	return map_vdso(&vdso_image_32, 0);
293 }
294 #endif
295 
296 #ifdef CONFIG_X86_64
297 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
298 {
299 	if (!vdso64_enabled)
300 		return 0;
301 
302 	return map_vdso_randomized(&vdso_image_64);
303 }
304 
305 #ifdef CONFIG_COMPAT
306 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
307 				       int uses_interp)
308 {
309 #ifdef CONFIG_X86_X32_ABI
310 	if (test_thread_flag(TIF_X32)) {
311 		if (!vdso64_enabled)
312 			return 0;
313 		return map_vdso_randomized(&vdso_image_x32);
314 	}
315 #endif
316 #ifdef CONFIG_IA32_EMULATION
317 	return load_vdso32();
318 #else
319 	return 0;
320 #endif
321 }
322 #endif
323 #else
324 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
325 {
326 	return load_vdso32();
327 }
328 #endif
329 
330 #ifdef CONFIG_X86_64
331 static __init int vdso_setup(char *s)
332 {
333 	vdso64_enabled = simple_strtoul(s, NULL, 0);
334 	return 0;
335 }
336 __setup("vdso=", vdso_setup);
337 #endif
338 
339 #ifdef CONFIG_X86_64
340 static void vgetcpu_cpu_init(void *arg)
341 {
342 	int cpu = smp_processor_id();
343 	struct desc_struct d = { };
344 	unsigned long node = 0;
345 #ifdef CONFIG_NUMA
346 	node = cpu_to_node(cpu);
347 #endif
348 	if (static_cpu_has(X86_FEATURE_RDTSCP))
349 		write_rdtscp_aux((node << 12) | cpu);
350 
351 	/*
352 	 * Store cpu number in limit so that it can be loaded
353 	 * quickly in user space in vgetcpu. (12 bits for the CPU
354 	 * and 8 bits for the node)
355 	 */
356 	d.limit0 = cpu | ((node & 0xf) << 12);
357 	d.limit = node >> 4;
358 	d.type = 5;		/* RO data, expand down, accessed */
359 	d.dpl = 3;		/* Visible to user code */
360 	d.s = 1;		/* Not a system segment */
361 	d.p = 1;		/* Present */
362 	d.d = 1;		/* 32-bit */
363 
364 	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
365 }
366 
367 static int vgetcpu_online(unsigned int cpu)
368 {
369 	return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
370 }
371 
372 static int __init init_vdso(void)
373 {
374 	init_vdso_image(&vdso_image_64);
375 
376 #ifdef CONFIG_X86_X32_ABI
377 	init_vdso_image(&vdso_image_x32);
378 #endif
379 
380 	/* notifier priority > KVM */
381 	return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
382 				 "x86/vdso/vma:online", vgetcpu_online, NULL);
383 }
384 subsys_initcall(init_vdso);
385 #endif /* CONFIG_X86_64 */
386