xref: /openbmc/linux/arch/x86/entry/vdso/vma.c (revision 3e26a691)
1 /*
2  * Copyright 2007 Andi Kleen, SUSE Labs.
3  * Subject to the GPL, v.2
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <asm/pvclock.h>
16 #include <asm/vgtod.h>
17 #include <asm/proto.h>
18 #include <asm/vdso.h>
19 #include <asm/vvar.h>
20 #include <asm/page.h>
21 #include <asm/hpet.h>
22 #include <asm/desc.h>
23 #include <asm/cpufeature.h>
24 
25 #if defined(CONFIG_X86_64)
26 unsigned int __read_mostly vdso64_enabled = 1;
27 #endif
28 
29 void __init init_vdso_image(const struct vdso_image *image)
30 {
31 	BUG_ON(image->size % PAGE_SIZE != 0);
32 
33 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
34 			   (struct alt_instr *)(image->data + image->alt +
35 						image->alt_len));
36 }
37 
38 struct linux_binprm;
39 
40 /*
41  * Put the vdso above the (randomized) stack with another randomized
42  * offset.  This way there is no hole in the middle of address space.
43  * To save memory make sure it is still in the same PTE as the stack
44  * top.  This doesn't give that many random bits.
45  *
46  * Note that this algorithm is imperfect: the distribution of the vdso
47  * start address within a PMD is biased toward the end.
48  *
49  * Only used for the 64-bit and x32 vdsos.
50  */
51 static unsigned long vdso_addr(unsigned long start, unsigned len)
52 {
53 #ifdef CONFIG_X86_32
54 	return 0;
55 #else
56 	unsigned long addr, end;
57 	unsigned offset;
58 
59 	/*
60 	 * Round up the start address.  It can start out unaligned as a result
61 	 * of stack start randomization.
62 	 */
63 	start = PAGE_ALIGN(start);
64 
65 	/* Round the lowest possible end address up to a PMD boundary. */
66 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
67 	if (end >= TASK_SIZE_MAX)
68 		end = TASK_SIZE_MAX;
69 	end -= len;
70 
71 	if (end > start) {
72 		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
73 		addr = start + (offset << PAGE_SHIFT);
74 	} else {
75 		addr = start;
76 	}
77 
78 	/*
79 	 * Forcibly align the final address in case we have a hardware
80 	 * issue that requires alignment for performance reasons.
81 	 */
82 	addr = align_vdso_addr(addr);
83 
84 	return addr;
85 #endif
86 }
87 
88 static int vdso_fault(const struct vm_special_mapping *sm,
89 		      struct vm_area_struct *vma, struct vm_fault *vmf)
90 {
91 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
92 
93 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
94 		return VM_FAULT_SIGBUS;
95 
96 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
97 	get_page(vmf->page);
98 	return 0;
99 }
100 
101 static const struct vm_special_mapping text_mapping = {
102 	.name = "[vdso]",
103 	.fault = vdso_fault,
104 };
105 
106 static int vvar_fault(const struct vm_special_mapping *sm,
107 		      struct vm_area_struct *vma, struct vm_fault *vmf)
108 {
109 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
110 	long sym_offset;
111 	int ret = -EFAULT;
112 
113 	if (!image)
114 		return VM_FAULT_SIGBUS;
115 
116 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
117 		image->sym_vvar_start;
118 
119 	/*
120 	 * Sanity check: a symbol offset of zero means that the page
121 	 * does not exist for this vdso image, not that the page is at
122 	 * offset zero relative to the text mapping.  This should be
123 	 * impossible here, because sym_offset should only be zero for
124 	 * the page past the end of the vvar mapping.
125 	 */
126 	if (sym_offset == 0)
127 		return VM_FAULT_SIGBUS;
128 
129 	if (sym_offset == image->sym_vvar_page) {
130 		ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
131 				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
132 	} else if (sym_offset == image->sym_hpet_page) {
133 #ifdef CONFIG_HPET_TIMER
134 		if (hpet_address && vclock_was_used(VCLOCK_HPET)) {
135 			ret = vm_insert_pfn_prot(
136 				vma,
137 				(unsigned long)vmf->virtual_address,
138 				hpet_address >> PAGE_SHIFT,
139 				pgprot_noncached(PAGE_READONLY));
140 		}
141 #endif
142 	} else if (sym_offset == image->sym_pvclock_page) {
143 		struct pvclock_vsyscall_time_info *pvti =
144 			pvclock_pvti_cpu0_va();
145 		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
146 			ret = vm_insert_pfn(
147 				vma,
148 				(unsigned long)vmf->virtual_address,
149 				__pa(pvti) >> PAGE_SHIFT);
150 		}
151 	}
152 
153 	if (ret == 0 || ret == -EBUSY)
154 		return VM_FAULT_NOPAGE;
155 
156 	return VM_FAULT_SIGBUS;
157 }
158 
159 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
160 {
161 	struct mm_struct *mm = current->mm;
162 	struct vm_area_struct *vma;
163 	unsigned long addr, text_start;
164 	int ret = 0;
165 	static const struct vm_special_mapping vvar_mapping = {
166 		.name = "[vvar]",
167 		.fault = vvar_fault,
168 	};
169 
170 	if (calculate_addr) {
171 		addr = vdso_addr(current->mm->start_stack,
172 				 image->size - image->sym_vvar_start);
173 	} else {
174 		addr = 0;
175 	}
176 
177 	down_write(&mm->mmap_sem);
178 
179 	addr = get_unmapped_area(NULL, addr,
180 				 image->size - image->sym_vvar_start, 0, 0);
181 	if (IS_ERR_VALUE(addr)) {
182 		ret = addr;
183 		goto up_fail;
184 	}
185 
186 	text_start = addr - image->sym_vvar_start;
187 	current->mm->context.vdso = (void __user *)text_start;
188 	current->mm->context.vdso_image = image;
189 
190 	/*
191 	 * MAYWRITE to allow gdb to COW and set breakpoints
192 	 */
193 	vma = _install_special_mapping(mm,
194 				       text_start,
195 				       image->size,
196 				       VM_READ|VM_EXEC|
197 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
198 				       &text_mapping);
199 
200 	if (IS_ERR(vma)) {
201 		ret = PTR_ERR(vma);
202 		goto up_fail;
203 	}
204 
205 	vma = _install_special_mapping(mm,
206 				       addr,
207 				       -image->sym_vvar_start,
208 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
209 				       VM_PFNMAP,
210 				       &vvar_mapping);
211 
212 	if (IS_ERR(vma)) {
213 		ret = PTR_ERR(vma);
214 		goto up_fail;
215 	}
216 
217 up_fail:
218 	if (ret)
219 		current->mm->context.vdso = NULL;
220 
221 	up_write(&mm->mmap_sem);
222 	return ret;
223 }
224 
225 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
226 static int load_vdso32(void)
227 {
228 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
229 		return 0;
230 
231 	return map_vdso(&vdso_image_32, false);
232 }
233 #endif
234 
235 #ifdef CONFIG_X86_64
236 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
237 {
238 	if (!vdso64_enabled)
239 		return 0;
240 
241 	return map_vdso(&vdso_image_64, true);
242 }
243 
244 #ifdef CONFIG_COMPAT
245 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
246 				       int uses_interp)
247 {
248 #ifdef CONFIG_X86_X32_ABI
249 	if (test_thread_flag(TIF_X32)) {
250 		if (!vdso64_enabled)
251 			return 0;
252 
253 		return map_vdso(&vdso_image_x32, true);
254 	}
255 #endif
256 #ifdef CONFIG_IA32_EMULATION
257 	return load_vdso32();
258 #else
259 	return 0;
260 #endif
261 }
262 #endif
263 #else
264 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
265 {
266 	return load_vdso32();
267 }
268 #endif
269 
270 #ifdef CONFIG_X86_64
271 static __init int vdso_setup(char *s)
272 {
273 	vdso64_enabled = simple_strtoul(s, NULL, 0);
274 	return 0;
275 }
276 __setup("vdso=", vdso_setup);
277 #endif
278 
279 #ifdef CONFIG_X86_64
280 static void vgetcpu_cpu_init(void *arg)
281 {
282 	int cpu = smp_processor_id();
283 	struct desc_struct d = { };
284 	unsigned long node = 0;
285 #ifdef CONFIG_NUMA
286 	node = cpu_to_node(cpu);
287 #endif
288 	if (static_cpu_has(X86_FEATURE_RDTSCP))
289 		write_rdtscp_aux((node << 12) | cpu);
290 
291 	/*
292 	 * Store cpu number in limit so that it can be loaded
293 	 * quickly in user space in vgetcpu. (12 bits for the CPU
294 	 * and 8 bits for the node)
295 	 */
296 	d.limit0 = cpu | ((node & 0xf) << 12);
297 	d.limit = node >> 4;
298 	d.type = 5;		/* RO data, expand down, accessed */
299 	d.dpl = 3;		/* Visible to user code */
300 	d.s = 1;		/* Not a system segment */
301 	d.p = 1;		/* Present */
302 	d.d = 1;		/* 32-bit */
303 
304 	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
305 }
306 
307 static int
308 vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
309 {
310 	long cpu = (long)arg;
311 
312 	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
313 		smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
314 
315 	return NOTIFY_DONE;
316 }
317 
318 static int __init init_vdso(void)
319 {
320 	init_vdso_image(&vdso_image_64);
321 
322 #ifdef CONFIG_X86_X32_ABI
323 	init_vdso_image(&vdso_image_x32);
324 #endif
325 
326 	cpu_notifier_register_begin();
327 
328 	on_each_cpu(vgetcpu_cpu_init, NULL, 1);
329 	/* notifier priority > KVM */
330 	__hotcpu_notifier(vgetcpu_cpu_notifier, 30);
331 
332 	cpu_notifier_register_done();
333 
334 	return 0;
335 }
336 subsys_initcall(init_vdso);
337 #endif /* CONFIG_X86_64 */
338