xref: /openbmc/linux/arch/x86/entry/vdso/vma.c (revision 4f139972b489f8bc2c821aa25ac65018d92af3f7)
1 /*
2  * Copyright 2007 Andi Kleen, SUSE Labs.
3  * Subject to the GPL, v.2
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <asm/pvclock.h>
18 #include <asm/vgtod.h>
19 #include <asm/proto.h>
20 #include <asm/vdso.h>
21 #include <asm/vvar.h>
22 #include <asm/page.h>
23 #include <asm/desc.h>
24 #include <asm/cpufeature.h>
25 
26 #if defined(CONFIG_X86_64)
27 unsigned int __read_mostly vdso64_enabled = 1;
28 #endif
29 
30 void __init init_vdso_image(const struct vdso_image *image)
31 {
32 	BUG_ON(image->size % PAGE_SIZE != 0);
33 
34 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
35 			   (struct alt_instr *)(image->data + image->alt +
36 						image->alt_len));
37 }
38 
39 struct linux_binprm;
40 
41 static int vdso_fault(const struct vm_special_mapping *sm,
42 		      struct vm_area_struct *vma, struct vm_fault *vmf)
43 {
44 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
45 
46 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
47 		return VM_FAULT_SIGBUS;
48 
49 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
50 	get_page(vmf->page);
51 	return 0;
52 }
53 
54 static void vdso_fix_landing(const struct vdso_image *image,
55 		struct vm_area_struct *new_vma)
56 {
57 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
58 	if (in_ia32_syscall() && image == &vdso_image_32) {
59 		struct pt_regs *regs = current_pt_regs();
60 		unsigned long vdso_land = image->sym_int80_landing_pad;
61 		unsigned long old_land_addr = vdso_land +
62 			(unsigned long)current->mm->context.vdso;
63 
64 		/* Fixing userspace landing - look at do_fast_syscall_32 */
65 		if (regs->ip == old_land_addr)
66 			regs->ip = new_vma->vm_start + vdso_land;
67 	}
68 #endif
69 }
70 
71 static int vdso_mremap(const struct vm_special_mapping *sm,
72 		struct vm_area_struct *new_vma)
73 {
74 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
75 	const struct vdso_image *image = current->mm->context.vdso_image;
76 
77 	if (image->size != new_size)
78 		return -EINVAL;
79 
80 	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
81 		return -EFAULT;
82 
83 	vdso_fix_landing(image, new_vma);
84 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
85 
86 	return 0;
87 }
88 
89 static int vvar_fault(const struct vm_special_mapping *sm,
90 		      struct vm_area_struct *vma, struct vm_fault *vmf)
91 {
92 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
93 	long sym_offset;
94 	int ret = -EFAULT;
95 
96 	if (!image)
97 		return VM_FAULT_SIGBUS;
98 
99 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
100 		image->sym_vvar_start;
101 
102 	/*
103 	 * Sanity check: a symbol offset of zero means that the page
104 	 * does not exist for this vdso image, not that the page is at
105 	 * offset zero relative to the text mapping.  This should be
106 	 * impossible here, because sym_offset should only be zero for
107 	 * the page past the end of the vvar mapping.
108 	 */
109 	if (sym_offset == 0)
110 		return VM_FAULT_SIGBUS;
111 
112 	if (sym_offset == image->sym_vvar_page) {
113 		ret = vm_insert_pfn(vma, vmf->address,
114 				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
115 	} else if (sym_offset == image->sym_pvclock_page) {
116 		struct pvclock_vsyscall_time_info *pvti =
117 			pvclock_pvti_cpu0_va();
118 		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
119 			ret = vm_insert_pfn(
120 				vma,
121 				vmf->address,
122 				__pa(pvti) >> PAGE_SHIFT);
123 		}
124 	}
125 
126 	if (ret == 0 || ret == -EBUSY)
127 		return VM_FAULT_NOPAGE;
128 
129 	return VM_FAULT_SIGBUS;
130 }
131 
132 static const struct vm_special_mapping vdso_mapping = {
133 	.name = "[vdso]",
134 	.fault = vdso_fault,
135 	.mremap = vdso_mremap,
136 };
137 static const struct vm_special_mapping vvar_mapping = {
138 	.name = "[vvar]",
139 	.fault = vvar_fault,
140 };
141 
142 /*
143  * Add vdso and vvar mappings to current process.
144  * @image          - blob to map
145  * @addr           - request a specific address (zero to map at free addr)
146  */
147 static int map_vdso(const struct vdso_image *image, unsigned long addr)
148 {
149 	struct mm_struct *mm = current->mm;
150 	struct vm_area_struct *vma;
151 	unsigned long text_start;
152 	int ret = 0;
153 
154 	if (down_write_killable(&mm->mmap_sem))
155 		return -EINTR;
156 
157 	addr = get_unmapped_area(NULL, addr,
158 				 image->size - image->sym_vvar_start, 0, 0);
159 	if (IS_ERR_VALUE(addr)) {
160 		ret = addr;
161 		goto up_fail;
162 	}
163 
164 	text_start = addr - image->sym_vvar_start;
165 
166 	/*
167 	 * MAYWRITE to allow gdb to COW and set breakpoints
168 	 */
169 	vma = _install_special_mapping(mm,
170 				       text_start,
171 				       image->size,
172 				       VM_READ|VM_EXEC|
173 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
174 				       &vdso_mapping);
175 
176 	if (IS_ERR(vma)) {
177 		ret = PTR_ERR(vma);
178 		goto up_fail;
179 	}
180 
181 	vma = _install_special_mapping(mm,
182 				       addr,
183 				       -image->sym_vvar_start,
184 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
185 				       VM_PFNMAP,
186 				       &vvar_mapping);
187 
188 	if (IS_ERR(vma)) {
189 		ret = PTR_ERR(vma);
190 		do_munmap(mm, text_start, image->size, NULL);
191 	} else {
192 		current->mm->context.vdso = (void __user *)text_start;
193 		current->mm->context.vdso_image = image;
194 	}
195 
196 up_fail:
197 	up_write(&mm->mmap_sem);
198 	return ret;
199 }
200 
201 #ifdef CONFIG_X86_64
202 /*
203  * Put the vdso above the (randomized) stack with another randomized
204  * offset.  This way there is no hole in the middle of address space.
205  * To save memory make sure it is still in the same PTE as the stack
206  * top.  This doesn't give that many random bits.
207  *
208  * Note that this algorithm is imperfect: the distribution of the vdso
209  * start address within a PMD is biased toward the end.
210  *
211  * Only used for the 64-bit and x32 vdsos.
212  */
213 static unsigned long vdso_addr(unsigned long start, unsigned len)
214 {
215 	unsigned long addr, end;
216 	unsigned offset;
217 
218 	/*
219 	 * Round up the start address.  It can start out unaligned as a result
220 	 * of stack start randomization.
221 	 */
222 	start = PAGE_ALIGN(start);
223 
224 	/* Round the lowest possible end address up to a PMD boundary. */
225 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
226 	if (end >= TASK_SIZE_MAX)
227 		end = TASK_SIZE_MAX;
228 	end -= len;
229 
230 	if (end > start) {
231 		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
232 		addr = start + (offset << PAGE_SHIFT);
233 	} else {
234 		addr = start;
235 	}
236 
237 	/*
238 	 * Forcibly align the final address in case we have a hardware
239 	 * issue that requires alignment for performance reasons.
240 	 */
241 	addr = align_vdso_addr(addr);
242 
243 	return addr;
244 }
245 
246 static int map_vdso_randomized(const struct vdso_image *image)
247 {
248 	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
249 
250 	return map_vdso(image, addr);
251 }
252 #endif
253 
254 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
255 {
256 	struct mm_struct *mm = current->mm;
257 	struct vm_area_struct *vma;
258 
259 	down_write(&mm->mmap_sem);
260 	/*
261 	 * Check if we have already mapped vdso blob - fail to prevent
262 	 * abusing from userspace install_speciall_mapping, which may
263 	 * not do accounting and rlimit right.
264 	 * We could search vma near context.vdso, but it's a slowpath,
265 	 * so let's explicitely check all VMAs to be completely sure.
266 	 */
267 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
268 		if (vma_is_special_mapping(vma, &vdso_mapping) ||
269 				vma_is_special_mapping(vma, &vvar_mapping)) {
270 			up_write(&mm->mmap_sem);
271 			return -EEXIST;
272 		}
273 	}
274 	up_write(&mm->mmap_sem);
275 
276 	return map_vdso(image, addr);
277 }
278 
279 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
280 static int load_vdso32(void)
281 {
282 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
283 		return 0;
284 
285 	return map_vdso(&vdso_image_32, 0);
286 }
287 #endif
288 
289 #ifdef CONFIG_X86_64
290 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
291 {
292 	if (!vdso64_enabled)
293 		return 0;
294 
295 	return map_vdso_randomized(&vdso_image_64);
296 }
297 
298 #ifdef CONFIG_COMPAT
299 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
300 				       int uses_interp)
301 {
302 #ifdef CONFIG_X86_X32_ABI
303 	if (test_thread_flag(TIF_X32)) {
304 		if (!vdso64_enabled)
305 			return 0;
306 		return map_vdso_randomized(&vdso_image_x32);
307 	}
308 #endif
309 #ifdef CONFIG_IA32_EMULATION
310 	return load_vdso32();
311 #else
312 	return 0;
313 #endif
314 }
315 #endif
316 #else
317 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
318 {
319 	return load_vdso32();
320 }
321 #endif
322 
323 #ifdef CONFIG_X86_64
324 static __init int vdso_setup(char *s)
325 {
326 	vdso64_enabled = simple_strtoul(s, NULL, 0);
327 	return 0;
328 }
329 __setup("vdso=", vdso_setup);
330 #endif
331 
332 #ifdef CONFIG_X86_64
333 static void vgetcpu_cpu_init(void *arg)
334 {
335 	int cpu = smp_processor_id();
336 	struct desc_struct d = { };
337 	unsigned long node = 0;
338 #ifdef CONFIG_NUMA
339 	node = cpu_to_node(cpu);
340 #endif
341 	if (static_cpu_has(X86_FEATURE_RDTSCP))
342 		write_rdtscp_aux((node << 12) | cpu);
343 
344 	/*
345 	 * Store cpu number in limit so that it can be loaded
346 	 * quickly in user space in vgetcpu. (12 bits for the CPU
347 	 * and 8 bits for the node)
348 	 */
349 	d.limit0 = cpu | ((node & 0xf) << 12);
350 	d.limit = node >> 4;
351 	d.type = 5;		/* RO data, expand down, accessed */
352 	d.dpl = 3;		/* Visible to user code */
353 	d.s = 1;		/* Not a system segment */
354 	d.p = 1;		/* Present */
355 	d.d = 1;		/* 32-bit */
356 
357 	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
358 }
359 
360 static int vgetcpu_online(unsigned int cpu)
361 {
362 	return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
363 }
364 
365 static int __init init_vdso(void)
366 {
367 	init_vdso_image(&vdso_image_64);
368 
369 #ifdef CONFIG_X86_X32_ABI
370 	init_vdso_image(&vdso_image_x32);
371 #endif
372 
373 	/* notifier priority > KVM */
374 	return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
375 				 "x86/vdso/vma:online", vgetcpu_online, NULL);
376 }
377 subsys_initcall(init_vdso);
378 #endif /* CONFIG_X86_64 */
379