1003ba957SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d603c8e1SIngo Molnar /*
3d603c8e1SIngo Molnar * Copyright 2007 Andi Kleen, SUSE Labs.
4d603c8e1SIngo Molnar *
5d603c8e1SIngo Molnar * This contains most of the x86 vDSO kernel-side code.
6d603c8e1SIngo Molnar */
7d603c8e1SIngo Molnar #include <linux/mm.h>
8d603c8e1SIngo Molnar #include <linux/err.h>
9d603c8e1SIngo Molnar #include <linux/sched.h>
1068db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
11d603c8e1SIngo Molnar #include <linux/slab.h>
12d603c8e1SIngo Molnar #include <linux/init.h>
13d603c8e1SIngo Molnar #include <linux/random.h>
14d603c8e1SIngo Molnar #include <linux/elf.h>
15d603c8e1SIngo Molnar #include <linux/cpu.h>
16b059a453SDmitry Safonov #include <linux/ptrace.h>
17af34ebebSDmitry Safonov #include <linux/time_namespace.h>
18af34ebebSDmitry Safonov
19cc1e24fdSAndy Lutomirski #include <asm/pvclock.h>
20d603c8e1SIngo Molnar #include <asm/vgtod.h>
21d603c8e1SIngo Molnar #include <asm/proto.h>
22d603c8e1SIngo Molnar #include <asm/vdso.h>
23d603c8e1SIngo Molnar #include <asm/vvar.h>
24af34ebebSDmitry Safonov #include <asm/tlb.h>
25d603c8e1SIngo Molnar #include <asm/page.h>
26d603c8e1SIngo Molnar #include <asm/desc.h>
27cd4d09ecSBorislav Petkov #include <asm/cpufeature.h>
28dd2cb348SMichael Kelley #include <clocksource/hyperv_timer.h>
29d603c8e1SIngo Molnar
3064b302abSDmitry Safonov #undef _ASM_X86_VVAR_H
3164b302abSDmitry Safonov #define EMIT_VVAR(name, offset) \
3264b302abSDmitry Safonov const size_t name ## _offset = offset;
3364b302abSDmitry Safonov #include <asm/vvar.h>
3464b302abSDmitry Safonov
arch_get_vdso_data(void * vvar_page)3564b302abSDmitry Safonov struct vdso_data *arch_get_vdso_data(void *vvar_page)
3664b302abSDmitry Safonov {
3764b302abSDmitry Safonov return (struct vdso_data *)(vvar_page + _vdso_data_offset);
3864b302abSDmitry Safonov }
3964b302abSDmitry Safonov #undef EMIT_VVAR
4064b302abSDmitry Safonov
41eec399ddSThomas Gleixner unsigned int vclocks_used __read_mostly;
42eec399ddSThomas Gleixner
43d603c8e1SIngo Molnar #if defined(CONFIG_X86_64)
44d603c8e1SIngo Molnar unsigned int __read_mostly vdso64_enabled = 1;
45d603c8e1SIngo Molnar #endif
46d603c8e1SIngo Molnar
init_vdso_image(const struct vdso_image * image)474c382d72SBrian Gerst int __init init_vdso_image(const struct vdso_image *image)
48d603c8e1SIngo Molnar {
494c382d72SBrian Gerst BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
50d603c8e1SIngo Molnar BUG_ON(image->size % PAGE_SIZE != 0);
51d603c8e1SIngo Molnar
52d603c8e1SIngo Molnar apply_alternatives((struct alt_instr *)(image->data + image->alt),
53d603c8e1SIngo Molnar (struct alt_instr *)(image->data + image->alt +
54d603c8e1SIngo Molnar image->alt_len));
554c382d72SBrian Gerst
564c382d72SBrian Gerst return 0;
57d603c8e1SIngo Molnar }
58d603c8e1SIngo Molnar
5970ddf651SDmitry Safonov static const struct vm_special_mapping vvar_mapping;
60d603c8e1SIngo Molnar struct linux_binprm;
61d603c8e1SIngo Molnar
vdso_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)62b13fd1dcSMatthew Wilcox static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
6305ef76b2SAndy Lutomirski struct vm_area_struct *vma, struct vm_fault *vmf)
6405ef76b2SAndy Lutomirski {
6505ef76b2SAndy Lutomirski const struct vdso_image *image = vma->vm_mm->context.vdso_image;
6605ef76b2SAndy Lutomirski
6705ef76b2SAndy Lutomirski if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
6805ef76b2SAndy Lutomirski return VM_FAULT_SIGBUS;
6905ef76b2SAndy Lutomirski
7005ef76b2SAndy Lutomirski vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
7105ef76b2SAndy Lutomirski get_page(vmf->page);
7205ef76b2SAndy Lutomirski return 0;
7305ef76b2SAndy Lutomirski }
7405ef76b2SAndy Lutomirski
vdso_fix_landing(const struct vdso_image * image,struct vm_area_struct * new_vma)75b059a453SDmitry Safonov static void vdso_fix_landing(const struct vdso_image *image,
76b059a453SDmitry Safonov struct vm_area_struct *new_vma)
77b059a453SDmitry Safonov {
78b059a453SDmitry Safonov #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
79b059a453SDmitry Safonov if (in_ia32_syscall() && image == &vdso_image_32) {
80b059a453SDmitry Safonov struct pt_regs *regs = current_pt_regs();
81b059a453SDmitry Safonov unsigned long vdso_land = image->sym_int80_landing_pad;
82b059a453SDmitry Safonov unsigned long old_land_addr = vdso_land +
83b059a453SDmitry Safonov (unsigned long)current->mm->context.vdso;
84b059a453SDmitry Safonov
85b059a453SDmitry Safonov /* Fixing userspace landing - look at do_fast_syscall_32 */
86b059a453SDmitry Safonov if (regs->ip == old_land_addr)
87b059a453SDmitry Safonov regs->ip = new_vma->vm_start + vdso_land;
88b059a453SDmitry Safonov }
89b059a453SDmitry Safonov #endif
90b059a453SDmitry Safonov }
91b059a453SDmitry Safonov
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)92b059a453SDmitry Safonov static int vdso_mremap(const struct vm_special_mapping *sm,
93b059a453SDmitry Safonov struct vm_area_struct *new_vma)
94b059a453SDmitry Safonov {
95b059a453SDmitry Safonov const struct vdso_image *image = current->mm->context.vdso_image;
96b059a453SDmitry Safonov
97b059a453SDmitry Safonov vdso_fix_landing(image, new_vma);
98b059a453SDmitry Safonov current->mm->context.vdso = (void __user *)new_vma->vm_start;
99b059a453SDmitry Safonov
100b059a453SDmitry Safonov return 0;
101b059a453SDmitry Safonov }
10205ef76b2SAndy Lutomirski
103af34ebebSDmitry Safonov #ifdef CONFIG_TIME_NS
10470ddf651SDmitry Safonov /*
10570ddf651SDmitry Safonov * The vvar page layout depends on whether a task belongs to the root or
10670ddf651SDmitry Safonov * non-root time namespace. Whenever a task changes its namespace, the VVAR
10770ddf651SDmitry Safonov * page tables are cleared and then they will re-faulted with a
10870ddf651SDmitry Safonov * corresponding layout.
10970ddf651SDmitry Safonov * See also the comment near timens_setup_vdso_data() for details.
11070ddf651SDmitry Safonov */
vdso_join_timens(struct task_struct * task,struct time_namespace * ns)11170ddf651SDmitry Safonov int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
11270ddf651SDmitry Safonov {
11370ddf651SDmitry Safonov struct mm_struct *mm = task->mm;
11470ddf651SDmitry Safonov struct vm_area_struct *vma;
115a3884621SMatthew Wilcox (Oracle) VMA_ITERATOR(vmi, mm, 0);
11670ddf651SDmitry Safonov
11742815808SChristian Brauner mmap_read_lock(mm);
118a3884621SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) {
11970ddf651SDmitry Safonov if (vma_is_special_mapping(vma, &vvar_mapping))
120e9adcfecSMike Kravetz zap_vma_pages(vma);
12170ddf651SDmitry Safonov }
12242815808SChristian Brauner mmap_read_unlock(mm);
123a3884621SMatthew Wilcox (Oracle)
12470ddf651SDmitry Safonov return 0;
12570ddf651SDmitry Safonov }
126af34ebebSDmitry Safonov #endif
127af34ebebSDmitry Safonov
vvar_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)128b13fd1dcSMatthew Wilcox static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
129a48a7042SAndy Lutomirski struct vm_area_struct *vma, struct vm_fault *vmf)
130a48a7042SAndy Lutomirski {
131a48a7042SAndy Lutomirski const struct vdso_image *image = vma->vm_mm->context.vdso_image;
132af34ebebSDmitry Safonov unsigned long pfn;
133a48a7042SAndy Lutomirski long sym_offset;
134a48a7042SAndy Lutomirski
135a48a7042SAndy Lutomirski if (!image)
136a48a7042SAndy Lutomirski return VM_FAULT_SIGBUS;
137a48a7042SAndy Lutomirski
138a48a7042SAndy Lutomirski sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
139a48a7042SAndy Lutomirski image->sym_vvar_start;
140a48a7042SAndy Lutomirski
141a48a7042SAndy Lutomirski /*
142a48a7042SAndy Lutomirski * Sanity check: a symbol offset of zero means that the page
143a48a7042SAndy Lutomirski * does not exist for this vdso image, not that the page is at
144a48a7042SAndy Lutomirski * offset zero relative to the text mapping. This should be
145a48a7042SAndy Lutomirski * impossible here, because sym_offset should only be zero for
146a48a7042SAndy Lutomirski * the page past the end of the vvar mapping.
147a48a7042SAndy Lutomirski */
148a48a7042SAndy Lutomirski if (sym_offset == 0)
149a48a7042SAndy Lutomirski return VM_FAULT_SIGBUS;
150a48a7042SAndy Lutomirski
151a48a7042SAndy Lutomirski if (sym_offset == image->sym_vvar_page) {
152af34ebebSDmitry Safonov struct page *timens_page = find_timens_vvar_page(vma);
153af34ebebSDmitry Safonov
154af34ebebSDmitry Safonov pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
155af34ebebSDmitry Safonov
156af34ebebSDmitry Safonov /*
157af34ebebSDmitry Safonov * If a task belongs to a time namespace then a namespace
158af34ebebSDmitry Safonov * specific VVAR is mapped with the sym_vvar_page offset and
159af34ebebSDmitry Safonov * the real VVAR page is mapped with the sym_timens_page
160af34ebebSDmitry Safonov * offset.
161af34ebebSDmitry Safonov * See also the comment near timens_setup_vdso_data().
162af34ebebSDmitry Safonov */
163e6b28ec6SDmitry Safonov if (timens_page) {
164e6b28ec6SDmitry Safonov unsigned long addr;
165e6b28ec6SDmitry Safonov vm_fault_t err;
166e6b28ec6SDmitry Safonov
167e6b28ec6SDmitry Safonov /*
168e6b28ec6SDmitry Safonov * Optimization: inside time namespace pre-fault
169e6b28ec6SDmitry Safonov * VVAR page too. As on timens page there are only
170e6b28ec6SDmitry Safonov * offsets for clocks on VVAR, it'll be faulted
171e6b28ec6SDmitry Safonov * shortly by VDSO code.
172e6b28ec6SDmitry Safonov */
173e6b28ec6SDmitry Safonov addr = vmf->address + (image->sym_timens_page - sym_offset);
174e6b28ec6SDmitry Safonov err = vmf_insert_pfn(vma, addr, pfn);
175e6b28ec6SDmitry Safonov if (unlikely(err & VM_FAULT_ERROR))
176e6b28ec6SDmitry Safonov return err;
177e6b28ec6SDmitry Safonov
178af34ebebSDmitry Safonov pfn = page_to_pfn(timens_page);
179e6b28ec6SDmitry Safonov }
180af34ebebSDmitry Safonov
181af34ebebSDmitry Safonov return vmf_insert_pfn(vma, vmf->address, pfn);
182a48a7042SAndy Lutomirski } else if (sym_offset == image->sym_pvclock_page) {
183a48a7042SAndy Lutomirski struct pvclock_vsyscall_time_info *pvti =
1849f08890aSJoao Martins pvclock_get_pvti_cpu0_va();
185b95a8a27SThomas Gleixner if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
186b13fd1dcSMatthew Wilcox return vmf_insert_pfn_prot(vma, vmf->address,
187819aeee0SBrijesh Singh __pa(pvti) >> PAGE_SHIFT,
188819aeee0SBrijesh Singh pgprot_decrypted(vma->vm_page_prot));
189a48a7042SAndy Lutomirski }
19090b20432SVitaly Kuznetsov } else if (sym_offset == image->sym_hvclock_page) {
191364adc45SStanislav Kinsburskiy pfn = hv_get_tsc_pfn();
19290b20432SVitaly Kuznetsov
193364adc45SStanislav Kinsburskiy if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
194364adc45SStanislav Kinsburskiy return vmf_insert_pfn(vma, vmf->address, pfn);
195af34ebebSDmitry Safonov } else if (sym_offset == image->sym_timens_page) {
196af34ebebSDmitry Safonov struct page *timens_page = find_timens_vvar_page(vma);
197af34ebebSDmitry Safonov
198af34ebebSDmitry Safonov if (!timens_page)
199af34ebebSDmitry Safonov return VM_FAULT_SIGBUS;
200af34ebebSDmitry Safonov
201af34ebebSDmitry Safonov pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
202af34ebebSDmitry Safonov return vmf_insert_pfn(vma, vmf->address, pfn);
203a48a7042SAndy Lutomirski }
204a48a7042SAndy Lutomirski
205a48a7042SAndy Lutomirski return VM_FAULT_SIGBUS;
206a48a7042SAndy Lutomirski }
207a48a7042SAndy Lutomirski
2082eefd878SDmitry Safonov static const struct vm_special_mapping vdso_mapping = {
2092eefd878SDmitry Safonov .name = "[vdso]",
2102eefd878SDmitry Safonov .fault = vdso_fault,
2112eefd878SDmitry Safonov .mremap = vdso_mremap,
2122eefd878SDmitry Safonov };
2132eefd878SDmitry Safonov static const struct vm_special_mapping vvar_mapping = {
2142eefd878SDmitry Safonov .name = "[vvar]",
2152eefd878SDmitry Safonov .fault = vvar_fault,
2162eefd878SDmitry Safonov };
2172eefd878SDmitry Safonov
218576ebfefSDmitry Safonov /*
219576ebfefSDmitry Safonov * Add vdso and vvar mappings to current process.
220576ebfefSDmitry Safonov * @image - blob to map
221576ebfefSDmitry Safonov * @addr - request a specific address (zero to map at free addr)
222576ebfefSDmitry Safonov */
map_vdso(const struct vdso_image * image,unsigned long addr)223576ebfefSDmitry Safonov static int map_vdso(const struct vdso_image *image, unsigned long addr)
224d603c8e1SIngo Molnar {
225d603c8e1SIngo Molnar struct mm_struct *mm = current->mm;
226d603c8e1SIngo Molnar struct vm_area_struct *vma;
227576ebfefSDmitry Safonov unsigned long text_start;
228d603c8e1SIngo Molnar int ret = 0;
229b059a453SDmitry Safonov
230d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(mm))
23169048176SMichal Hocko return -EINTR;
232d603c8e1SIngo Molnar
233d603c8e1SIngo Molnar addr = get_unmapped_area(NULL, addr,
234d603c8e1SIngo Molnar image->size - image->sym_vvar_start, 0, 0);
235d603c8e1SIngo Molnar if (IS_ERR_VALUE(addr)) {
236d603c8e1SIngo Molnar ret = addr;
237d603c8e1SIngo Molnar goto up_fail;
238d603c8e1SIngo Molnar }
239d603c8e1SIngo Molnar
240d603c8e1SIngo Molnar text_start = addr - image->sym_vvar_start;
241d603c8e1SIngo Molnar
242d603c8e1SIngo Molnar /*
243d603c8e1SIngo Molnar * MAYWRITE to allow gdb to COW and set breakpoints
244d603c8e1SIngo Molnar */
245d603c8e1SIngo Molnar vma = _install_special_mapping(mm,
246d603c8e1SIngo Molnar text_start,
247d603c8e1SIngo Molnar image->size,
248d603c8e1SIngo Molnar VM_READ|VM_EXEC|
249d603c8e1SIngo Molnar VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
250b059a453SDmitry Safonov &vdso_mapping);
251d603c8e1SIngo Molnar
252d603c8e1SIngo Molnar if (IS_ERR(vma)) {
253d603c8e1SIngo Molnar ret = PTR_ERR(vma);
254d603c8e1SIngo Molnar goto up_fail;
255d603c8e1SIngo Molnar }
256d603c8e1SIngo Molnar
257d603c8e1SIngo Molnar vma = _install_special_mapping(mm,
258d603c8e1SIngo Molnar addr,
259d603c8e1SIngo Molnar -image->sym_vvar_start,
260a48a7042SAndy Lutomirski VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
261a48a7042SAndy Lutomirski VM_PFNMAP,
262d603c8e1SIngo Molnar &vvar_mapping);
263d603c8e1SIngo Molnar
264d603c8e1SIngo Molnar if (IS_ERR(vma)) {
265d603c8e1SIngo Molnar ret = PTR_ERR(vma);
266897ab3e0SMike Rapoport do_munmap(mm, text_start, image->size, NULL);
26767dece7dSDmitry Safonov } else {
26867dece7dSDmitry Safonov current->mm->context.vdso = (void __user *)text_start;
26967dece7dSDmitry Safonov current->mm->context.vdso_image = image;
270d603c8e1SIngo Molnar }
271d603c8e1SIngo Molnar
272d603c8e1SIngo Molnar up_fail:
273d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
274d603c8e1SIngo Molnar return ret;
275d603c8e1SIngo Molnar }
276d603c8e1SIngo Molnar
2773947f493SIngo Molnar #ifdef CONFIG_X86_64
2783947f493SIngo Molnar /*
2793947f493SIngo Molnar * Put the vdso above the (randomized) stack with another randomized
2803947f493SIngo Molnar * offset. This way there is no hole in the middle of address space.
2813947f493SIngo Molnar * To save memory make sure it is still in the same PTE as the stack
2823947f493SIngo Molnar * top. This doesn't give that many random bits.
2833947f493SIngo Molnar *
2843947f493SIngo Molnar * Note that this algorithm is imperfect: the distribution of the vdso
2853947f493SIngo Molnar * start address within a PMD is biased toward the end.
2863947f493SIngo Molnar *
2873947f493SIngo Molnar * Only used for the 64-bit and x32 vdsos.
2883947f493SIngo Molnar */
vdso_addr(unsigned long start,unsigned len)2893947f493SIngo Molnar static unsigned long vdso_addr(unsigned long start, unsigned len)
2903947f493SIngo Molnar {
2913947f493SIngo Molnar unsigned long addr, end;
2923947f493SIngo Molnar unsigned offset;
2933947f493SIngo Molnar
2943947f493SIngo Molnar /*
2953947f493SIngo Molnar * Round up the start address. It can start out unaligned as a result
2963947f493SIngo Molnar * of stack start randomization.
2973947f493SIngo Molnar */
2983947f493SIngo Molnar start = PAGE_ALIGN(start);
2993947f493SIngo Molnar
3003947f493SIngo Molnar /* Round the lowest possible end address up to a PMD boundary. */
3013947f493SIngo Molnar end = (start + len + PMD_SIZE - 1) & PMD_MASK;
302*1b8b1aa9SKirill A. Shutemov if (end >= DEFAULT_MAP_WINDOW)
303*1b8b1aa9SKirill A. Shutemov end = DEFAULT_MAP_WINDOW;
3043947f493SIngo Molnar end -= len;
3053947f493SIngo Molnar
3063947f493SIngo Molnar if (end > start) {
3078032bf12SJason A. Donenfeld offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
3083947f493SIngo Molnar addr = start + (offset << PAGE_SHIFT);
3093947f493SIngo Molnar } else {
3103947f493SIngo Molnar addr = start;
3113947f493SIngo Molnar }
3123947f493SIngo Molnar
3133947f493SIngo Molnar /*
3143947f493SIngo Molnar * Forcibly align the final address in case we have a hardware
3153947f493SIngo Molnar * issue that requires alignment for performance reasons.
3163947f493SIngo Molnar */
3173947f493SIngo Molnar addr = align_vdso_addr(addr);
3183947f493SIngo Molnar
3193947f493SIngo Molnar return addr;
3203947f493SIngo Molnar }
3213947f493SIngo Molnar
map_vdso_randomized(const struct vdso_image * image)322576ebfefSDmitry Safonov static int map_vdso_randomized(const struct vdso_image *image)
323576ebfefSDmitry Safonov {
3243947f493SIngo Molnar unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
3253947f493SIngo Molnar
326576ebfefSDmitry Safonov return map_vdso(image, addr);
327576ebfefSDmitry Safonov }
3283947f493SIngo Molnar #endif
329576ebfefSDmitry Safonov
map_vdso_once(const struct vdso_image * image,unsigned long addr)3302eefd878SDmitry Safonov int map_vdso_once(const struct vdso_image *image, unsigned long addr)
3312eefd878SDmitry Safonov {
3322eefd878SDmitry Safonov struct mm_struct *mm = current->mm;
3332eefd878SDmitry Safonov struct vm_area_struct *vma;
334a3884621SMatthew Wilcox (Oracle) VMA_ITERATOR(vmi, mm, 0);
3352eefd878SDmitry Safonov
336d8ed45c5SMichel Lespinasse mmap_write_lock(mm);
3372eefd878SDmitry Safonov /*
3382eefd878SDmitry Safonov * Check if we have already mapped vdso blob - fail to prevent
339163b0991SIngo Molnar * abusing from userspace install_special_mapping, which may
3402eefd878SDmitry Safonov * not do accounting and rlimit right.
3412eefd878SDmitry Safonov * We could search vma near context.vdso, but it's a slowpath,
342a97673a1SIngo Molnar * so let's explicitly check all VMAs to be completely sure.
3432eefd878SDmitry Safonov */
344a3884621SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) {
3452eefd878SDmitry Safonov if (vma_is_special_mapping(vma, &vdso_mapping) ||
3462eefd878SDmitry Safonov vma_is_special_mapping(vma, &vvar_mapping)) {
347d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
3482eefd878SDmitry Safonov return -EEXIST;
3492eefd878SDmitry Safonov }
3502eefd878SDmitry Safonov }
351d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
3522eefd878SDmitry Safonov
3532eefd878SDmitry Safonov return map_vdso(image, addr);
3542eefd878SDmitry Safonov }
3552eefd878SDmitry Safonov
356ab8b82eeSBrian Gerst #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
load_vdso32(void)357d603c8e1SIngo Molnar static int load_vdso32(void)
358d603c8e1SIngo Molnar {
359d603c8e1SIngo Molnar if (vdso32_enabled != 1) /* Other values all mean "disabled" */
360d603c8e1SIngo Molnar return 0;
361d603c8e1SIngo Molnar
362576ebfefSDmitry Safonov return map_vdso(&vdso_image_32, 0);
363d603c8e1SIngo Molnar }
364d603c8e1SIngo Molnar #endif
365d603c8e1SIngo Molnar
366d603c8e1SIngo Molnar #ifdef CONFIG_X86_64
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)367d603c8e1SIngo Molnar int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
368d603c8e1SIngo Molnar {
369d603c8e1SIngo Molnar if (!vdso64_enabled)
370d603c8e1SIngo Molnar return 0;
371d603c8e1SIngo Molnar
372576ebfefSDmitry Safonov return map_vdso_randomized(&vdso_image_64);
373d603c8e1SIngo Molnar }
374d603c8e1SIngo Molnar
375d603c8e1SIngo Molnar #ifdef CONFIG_COMPAT
compat_arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp,bool x32)376d603c8e1SIngo Molnar int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
3773316ec8cSGabriel Krisman Bertazi int uses_interp, bool x32)
378d603c8e1SIngo Molnar {
379d603c8e1SIngo Molnar #ifdef CONFIG_X86_X32_ABI
3803316ec8cSGabriel Krisman Bertazi if (x32) {
381d603c8e1SIngo Molnar if (!vdso64_enabled)
382d603c8e1SIngo Molnar return 0;
383576ebfefSDmitry Safonov return map_vdso_randomized(&vdso_image_x32);
384d603c8e1SIngo Molnar }
385d603c8e1SIngo Molnar #endif
386ab8b82eeSBrian Gerst #ifdef CONFIG_IA32_EMULATION
387d603c8e1SIngo Molnar return load_vdso32();
388ab8b82eeSBrian Gerst #else
389ab8b82eeSBrian Gerst return 0;
390ab8b82eeSBrian Gerst #endif
391d603c8e1SIngo Molnar }
392d603c8e1SIngo Molnar #endif
393d603c8e1SIngo Molnar #else
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)394d603c8e1SIngo Molnar int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
395d603c8e1SIngo Molnar {
396d603c8e1SIngo Molnar return load_vdso32();
397d603c8e1SIngo Molnar }
398d603c8e1SIngo Molnar #endif
399d603c8e1SIngo Molnar
arch_syscall_is_vdso_sigreturn(struct pt_regs * regs)400c5c87812SGabriel Krisman Bertazi bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
401c5c87812SGabriel Krisman Bertazi {
402c5c87812SGabriel Krisman Bertazi #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
403c5c87812SGabriel Krisman Bertazi const struct vdso_image *image = current->mm->context.vdso_image;
404c5c87812SGabriel Krisman Bertazi unsigned long vdso = (unsigned long) current->mm->context.vdso;
405c5c87812SGabriel Krisman Bertazi
406c5c87812SGabriel Krisman Bertazi if (in_ia32_syscall() && image == &vdso_image_32) {
407c5c87812SGabriel Krisman Bertazi if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
408c5c87812SGabriel Krisman Bertazi regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
409c5c87812SGabriel Krisman Bertazi return true;
410c5c87812SGabriel Krisman Bertazi }
411c5c87812SGabriel Krisman Bertazi #endif
412c5c87812SGabriel Krisman Bertazi return false;
413c5c87812SGabriel Krisman Bertazi }
414c5c87812SGabriel Krisman Bertazi
415d603c8e1SIngo Molnar #ifdef CONFIG_X86_64
vdso_setup(char * s)416d603c8e1SIngo Molnar static __init int vdso_setup(char *s)
417d603c8e1SIngo Molnar {
418d603c8e1SIngo Molnar vdso64_enabled = simple_strtoul(s, NULL, 0);
41912441ccdSRandy Dunlap return 1;
420d603c8e1SIngo Molnar }
421d603c8e1SIngo Molnar __setup("vdso=", vdso_setup);
422d603c8e1SIngo Molnar #endif /* CONFIG_X86_64 */
423