xref: /openbmc/linux/arch/x86/entry/vdso/vma.c (revision a8da474e)
1 /*
2  * Copyright 2007 Andi Kleen, SUSE Labs.
3  * Subject to the GPL, v.2
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <asm/vgtod.h>
16 #include <asm/proto.h>
17 #include <asm/vdso.h>
18 #include <asm/vvar.h>
19 #include <asm/page.h>
20 #include <asm/hpet.h>
21 #include <asm/desc.h>
22 
23 #if defined(CONFIG_X86_64)
24 unsigned int __read_mostly vdso64_enabled = 1;
25 #endif
26 
27 void __init init_vdso_image(const struct vdso_image *image)
28 {
29 	int i;
30 	int npages = (image->size) / PAGE_SIZE;
31 
32 	BUG_ON(image->size % PAGE_SIZE != 0);
33 	for (i = 0; i < npages; i++)
34 		image->text_mapping.pages[i] =
35 			virt_to_page(image->data + i*PAGE_SIZE);
36 
37 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
38 			   (struct alt_instr *)(image->data + image->alt +
39 						image->alt_len));
40 }
41 
42 struct linux_binprm;
43 
44 /*
45  * Put the vdso above the (randomized) stack with another randomized
46  * offset.  This way there is no hole in the middle of address space.
47  * To save memory make sure it is still in the same PTE as the stack
48  * top.  This doesn't give that many random bits.
49  *
50  * Note that this algorithm is imperfect: the distribution of the vdso
51  * start address within a PMD is biased toward the end.
52  *
53  * Only used for the 64-bit and x32 vdsos.
54  */
55 static unsigned long vdso_addr(unsigned long start, unsigned len)
56 {
57 #ifdef CONFIG_X86_32
58 	return 0;
59 #else
60 	unsigned long addr, end;
61 	unsigned offset;
62 
63 	/*
64 	 * Round up the start address.  It can start out unaligned as a result
65 	 * of stack start randomization.
66 	 */
67 	start = PAGE_ALIGN(start);
68 
69 	/* Round the lowest possible end address up to a PMD boundary. */
70 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
71 	if (end >= TASK_SIZE_MAX)
72 		end = TASK_SIZE_MAX;
73 	end -= len;
74 
75 	if (end > start) {
76 		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
77 		addr = start + (offset << PAGE_SHIFT);
78 	} else {
79 		addr = start;
80 	}
81 
82 	/*
83 	 * Forcibly align the final address in case we have a hardware
84 	 * issue that requires alignment for performance reasons.
85 	 */
86 	addr = align_vdso_addr(addr);
87 
88 	return addr;
89 #endif
90 }
91 
92 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
93 {
94 	struct mm_struct *mm = current->mm;
95 	struct vm_area_struct *vma;
96 	unsigned long addr, text_start;
97 	int ret = 0;
98 	static struct page *no_pages[] = {NULL};
99 	static struct vm_special_mapping vvar_mapping = {
100 		.name = "[vvar]",
101 		.pages = no_pages,
102 	};
103 
104 	if (calculate_addr) {
105 		addr = vdso_addr(current->mm->start_stack,
106 				 image->size - image->sym_vvar_start);
107 	} else {
108 		addr = 0;
109 	}
110 
111 	down_write(&mm->mmap_sem);
112 
113 	addr = get_unmapped_area(NULL, addr,
114 				 image->size - image->sym_vvar_start, 0, 0);
115 	if (IS_ERR_VALUE(addr)) {
116 		ret = addr;
117 		goto up_fail;
118 	}
119 
120 	text_start = addr - image->sym_vvar_start;
121 	current->mm->context.vdso = (void __user *)text_start;
122 
123 	/*
124 	 * MAYWRITE to allow gdb to COW and set breakpoints
125 	 */
126 	vma = _install_special_mapping(mm,
127 				       text_start,
128 				       image->size,
129 				       VM_READ|VM_EXEC|
130 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
131 				       &image->text_mapping);
132 
133 	if (IS_ERR(vma)) {
134 		ret = PTR_ERR(vma);
135 		goto up_fail;
136 	}
137 
138 	vma = _install_special_mapping(mm,
139 				       addr,
140 				       -image->sym_vvar_start,
141 				       VM_READ|VM_MAYREAD,
142 				       &vvar_mapping);
143 
144 	if (IS_ERR(vma)) {
145 		ret = PTR_ERR(vma);
146 		goto up_fail;
147 	}
148 
149 	if (image->sym_vvar_page)
150 		ret = remap_pfn_range(vma,
151 				      text_start + image->sym_vvar_page,
152 				      __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
153 				      PAGE_SIZE,
154 				      PAGE_READONLY);
155 
156 	if (ret)
157 		goto up_fail;
158 
159 #ifdef CONFIG_HPET_TIMER
160 	if (hpet_address && image->sym_hpet_page) {
161 		ret = io_remap_pfn_range(vma,
162 			text_start + image->sym_hpet_page,
163 			hpet_address >> PAGE_SHIFT,
164 			PAGE_SIZE,
165 			pgprot_noncached(PAGE_READONLY));
166 
167 		if (ret)
168 			goto up_fail;
169 	}
170 #endif
171 
172 up_fail:
173 	if (ret)
174 		current->mm->context.vdso = NULL;
175 
176 	up_write(&mm->mmap_sem);
177 	return ret;
178 }
179 
180 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
181 static int load_vdso32(void)
182 {
183 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
184 		return 0;
185 
186 	return map_vdso(&vdso_image_32, false);
187 }
188 #endif
189 
190 #ifdef CONFIG_X86_64
191 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
192 {
193 	if (!vdso64_enabled)
194 		return 0;
195 
196 	return map_vdso(&vdso_image_64, true);
197 }
198 
199 #ifdef CONFIG_COMPAT
200 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
201 				       int uses_interp)
202 {
203 #ifdef CONFIG_X86_X32_ABI
204 	if (test_thread_flag(TIF_X32)) {
205 		if (!vdso64_enabled)
206 			return 0;
207 
208 		return map_vdso(&vdso_image_x32, true);
209 	}
210 #endif
211 #ifdef CONFIG_IA32_EMULATION
212 	return load_vdso32();
213 #else
214 	return 0;
215 #endif
216 }
217 #endif
218 #else
219 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
220 {
221 	return load_vdso32();
222 }
223 #endif
224 
225 #ifdef CONFIG_X86_64
226 static __init int vdso_setup(char *s)
227 {
228 	vdso64_enabled = simple_strtoul(s, NULL, 0);
229 	return 0;
230 }
231 __setup("vdso=", vdso_setup);
232 #endif
233 
234 #ifdef CONFIG_X86_64
235 static void vgetcpu_cpu_init(void *arg)
236 {
237 	int cpu = smp_processor_id();
238 	struct desc_struct d = { };
239 	unsigned long node = 0;
240 #ifdef CONFIG_NUMA
241 	node = cpu_to_node(cpu);
242 #endif
243 	if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
244 		write_rdtscp_aux((node << 12) | cpu);
245 
246 	/*
247 	 * Store cpu number in limit so that it can be loaded
248 	 * quickly in user space in vgetcpu. (12 bits for the CPU
249 	 * and 8 bits for the node)
250 	 */
251 	d.limit0 = cpu | ((node & 0xf) << 12);
252 	d.limit = node >> 4;
253 	d.type = 5;		/* RO data, expand down, accessed */
254 	d.dpl = 3;		/* Visible to user code */
255 	d.s = 1;		/* Not a system segment */
256 	d.p = 1;		/* Present */
257 	d.d = 1;		/* 32-bit */
258 
259 	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
260 }
261 
262 static int
263 vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
264 {
265 	long cpu = (long)arg;
266 
267 	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
268 		smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
269 
270 	return NOTIFY_DONE;
271 }
272 
273 static int __init init_vdso(void)
274 {
275 	init_vdso_image(&vdso_image_64);
276 
277 #ifdef CONFIG_X86_X32_ABI
278 	init_vdso_image(&vdso_image_x32);
279 #endif
280 
281 	cpu_notifier_register_begin();
282 
283 	on_each_cpu(vgetcpu_cpu_init, NULL, 1);
284 	/* notifier priority > KVM */
285 	__hotcpu_notifier(vgetcpu_cpu_notifier, 30);
286 
287 	cpu_notifier_register_done();
288 
289 	return 0;
290 }
291 subsys_initcall(init_vdso);
292 #endif /* CONFIG_X86_64 */
293