xref: /openbmc/linux/arch/x86/entry/vdso/vma.c (revision 293d5b43)
1 /*
2  * Copyright 2007 Andi Kleen, SUSE Labs.
3  * Subject to the GPL, v.2
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <linux/ptrace.h>
16 #include <asm/pvclock.h>
17 #include <asm/vgtod.h>
18 #include <asm/proto.h>
19 #include <asm/vdso.h>
20 #include <asm/vvar.h>
21 #include <asm/page.h>
22 #include <asm/desc.h>
23 #include <asm/cpufeature.h>
24 
25 #if defined(CONFIG_X86_64)
26 unsigned int __read_mostly vdso64_enabled = 1;
27 #endif
28 
29 void __init init_vdso_image(const struct vdso_image *image)
30 {
31 	BUG_ON(image->size % PAGE_SIZE != 0);
32 
33 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
34 			   (struct alt_instr *)(image->data + image->alt +
35 						image->alt_len));
36 }
37 
38 struct linux_binprm;
39 
40 /*
41  * Put the vdso above the (randomized) stack with another randomized
42  * offset.  This way there is no hole in the middle of address space.
43  * To save memory make sure it is still in the same PTE as the stack
44  * top.  This doesn't give that many random bits.
45  *
46  * Note that this algorithm is imperfect: the distribution of the vdso
47  * start address within a PMD is biased toward the end.
48  *
49  * Only used for the 64-bit and x32 vdsos.
50  */
51 static unsigned long vdso_addr(unsigned long start, unsigned len)
52 {
53 #ifdef CONFIG_X86_32
54 	return 0;
55 #else
56 	unsigned long addr, end;
57 	unsigned offset;
58 
59 	/*
60 	 * Round up the start address.  It can start out unaligned as a result
61 	 * of stack start randomization.
62 	 */
63 	start = PAGE_ALIGN(start);
64 
65 	/* Round the lowest possible end address up to a PMD boundary. */
66 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
67 	if (end >= TASK_SIZE_MAX)
68 		end = TASK_SIZE_MAX;
69 	end -= len;
70 
71 	if (end > start) {
72 		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
73 		addr = start + (offset << PAGE_SHIFT);
74 	} else {
75 		addr = start;
76 	}
77 
78 	/*
79 	 * Forcibly align the final address in case we have a hardware
80 	 * issue that requires alignment for performance reasons.
81 	 */
82 	addr = align_vdso_addr(addr);
83 
84 	return addr;
85 #endif
86 }
87 
88 static int vdso_fault(const struct vm_special_mapping *sm,
89 		      struct vm_area_struct *vma, struct vm_fault *vmf)
90 {
91 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
92 
93 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
94 		return VM_FAULT_SIGBUS;
95 
96 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
97 	get_page(vmf->page);
98 	return 0;
99 }
100 
101 static void vdso_fix_landing(const struct vdso_image *image,
102 		struct vm_area_struct *new_vma)
103 {
104 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
105 	if (in_ia32_syscall() && image == &vdso_image_32) {
106 		struct pt_regs *regs = current_pt_regs();
107 		unsigned long vdso_land = image->sym_int80_landing_pad;
108 		unsigned long old_land_addr = vdso_land +
109 			(unsigned long)current->mm->context.vdso;
110 
111 		/* Fixing userspace landing - look at do_fast_syscall_32 */
112 		if (regs->ip == old_land_addr)
113 			regs->ip = new_vma->vm_start + vdso_land;
114 	}
115 #endif
116 }
117 
118 static int vdso_mremap(const struct vm_special_mapping *sm,
119 		struct vm_area_struct *new_vma)
120 {
121 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
122 	const struct vdso_image *image = current->mm->context.vdso_image;
123 
124 	if (image->size != new_size)
125 		return -EINVAL;
126 
127 	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
128 		return -EFAULT;
129 
130 	vdso_fix_landing(image, new_vma);
131 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
132 
133 	return 0;
134 }
135 
136 static int vvar_fault(const struct vm_special_mapping *sm,
137 		      struct vm_area_struct *vma, struct vm_fault *vmf)
138 {
139 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
140 	long sym_offset;
141 	int ret = -EFAULT;
142 
143 	if (!image)
144 		return VM_FAULT_SIGBUS;
145 
146 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
147 		image->sym_vvar_start;
148 
149 	/*
150 	 * Sanity check: a symbol offset of zero means that the page
151 	 * does not exist for this vdso image, not that the page is at
152 	 * offset zero relative to the text mapping.  This should be
153 	 * impossible here, because sym_offset should only be zero for
154 	 * the page past the end of the vvar mapping.
155 	 */
156 	if (sym_offset == 0)
157 		return VM_FAULT_SIGBUS;
158 
159 	if (sym_offset == image->sym_vvar_page) {
160 		ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
161 				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
162 	} else if (sym_offset == image->sym_pvclock_page) {
163 		struct pvclock_vsyscall_time_info *pvti =
164 			pvclock_pvti_cpu0_va();
165 		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
166 			ret = vm_insert_pfn(
167 				vma,
168 				(unsigned long)vmf->virtual_address,
169 				__pa(pvti) >> PAGE_SHIFT);
170 		}
171 	}
172 
173 	if (ret == 0 || ret == -EBUSY)
174 		return VM_FAULT_NOPAGE;
175 
176 	return VM_FAULT_SIGBUS;
177 }
178 
179 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
180 {
181 	struct mm_struct *mm = current->mm;
182 	struct vm_area_struct *vma;
183 	unsigned long addr, text_start;
184 	int ret = 0;
185 
186 	static const struct vm_special_mapping vdso_mapping = {
187 		.name = "[vdso]",
188 		.fault = vdso_fault,
189 		.mremap = vdso_mremap,
190 	};
191 	static const struct vm_special_mapping vvar_mapping = {
192 		.name = "[vvar]",
193 		.fault = vvar_fault,
194 	};
195 
196 	if (calculate_addr) {
197 		addr = vdso_addr(current->mm->start_stack,
198 				 image->size - image->sym_vvar_start);
199 	} else {
200 		addr = 0;
201 	}
202 
203 	if (down_write_killable(&mm->mmap_sem))
204 		return -EINTR;
205 
206 	addr = get_unmapped_area(NULL, addr,
207 				 image->size - image->sym_vvar_start, 0, 0);
208 	if (IS_ERR_VALUE(addr)) {
209 		ret = addr;
210 		goto up_fail;
211 	}
212 
213 	text_start = addr - image->sym_vvar_start;
214 	current->mm->context.vdso = (void __user *)text_start;
215 	current->mm->context.vdso_image = image;
216 
217 	/*
218 	 * MAYWRITE to allow gdb to COW and set breakpoints
219 	 */
220 	vma = _install_special_mapping(mm,
221 				       text_start,
222 				       image->size,
223 				       VM_READ|VM_EXEC|
224 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
225 				       &vdso_mapping);
226 
227 	if (IS_ERR(vma)) {
228 		ret = PTR_ERR(vma);
229 		goto up_fail;
230 	}
231 
232 	vma = _install_special_mapping(mm,
233 				       addr,
234 				       -image->sym_vvar_start,
235 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
236 				       VM_PFNMAP,
237 				       &vvar_mapping);
238 
239 	if (IS_ERR(vma)) {
240 		ret = PTR_ERR(vma);
241 		goto up_fail;
242 	}
243 
244 up_fail:
245 	if (ret)
246 		current->mm->context.vdso = NULL;
247 
248 	up_write(&mm->mmap_sem);
249 	return ret;
250 }
251 
252 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
253 static int load_vdso32(void)
254 {
255 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
256 		return 0;
257 
258 	return map_vdso(&vdso_image_32, false);
259 }
260 #endif
261 
262 #ifdef CONFIG_X86_64
263 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
264 {
265 	if (!vdso64_enabled)
266 		return 0;
267 
268 	return map_vdso(&vdso_image_64, true);
269 }
270 
271 #ifdef CONFIG_COMPAT
272 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
273 				       int uses_interp)
274 {
275 #ifdef CONFIG_X86_X32_ABI
276 	if (test_thread_flag(TIF_X32)) {
277 		if (!vdso64_enabled)
278 			return 0;
279 
280 		return map_vdso(&vdso_image_x32, true);
281 	}
282 #endif
283 #ifdef CONFIG_IA32_EMULATION
284 	return load_vdso32();
285 #else
286 	return 0;
287 #endif
288 }
289 #endif
290 #else
291 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
292 {
293 	return load_vdso32();
294 }
295 #endif
296 
297 #ifdef CONFIG_X86_64
298 static __init int vdso_setup(char *s)
299 {
300 	vdso64_enabled = simple_strtoul(s, NULL, 0);
301 	return 0;
302 }
303 __setup("vdso=", vdso_setup);
304 #endif
305 
306 #ifdef CONFIG_X86_64
307 static void vgetcpu_cpu_init(void *arg)
308 {
309 	int cpu = smp_processor_id();
310 	struct desc_struct d = { };
311 	unsigned long node = 0;
312 #ifdef CONFIG_NUMA
313 	node = cpu_to_node(cpu);
314 #endif
315 	if (static_cpu_has(X86_FEATURE_RDTSCP))
316 		write_rdtscp_aux((node << 12) | cpu);
317 
318 	/*
319 	 * Store cpu number in limit so that it can be loaded
320 	 * quickly in user space in vgetcpu. (12 bits for the CPU
321 	 * and 8 bits for the node)
322 	 */
323 	d.limit0 = cpu | ((node & 0xf) << 12);
324 	d.limit = node >> 4;
325 	d.type = 5;		/* RO data, expand down, accessed */
326 	d.dpl = 3;		/* Visible to user code */
327 	d.s = 1;		/* Not a system segment */
328 	d.p = 1;		/* Present */
329 	d.d = 1;		/* 32-bit */
330 
331 	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
332 }
333 
334 static int vgetcpu_online(unsigned int cpu)
335 {
336 	return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
337 }
338 
339 static int __init init_vdso(void)
340 {
341 	init_vdso_image(&vdso_image_64);
342 
343 #ifdef CONFIG_X86_X32_ABI
344 	init_vdso_image(&vdso_image_x32);
345 #endif
346 
347 	/* notifier priority > KVM */
348 	return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
349 				 "AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
350 }
351 subsys_initcall(init_vdso);
352 #endif /* CONFIG_X86_64 */
353