108dbd0f8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2cd5b61d6SRichard Kuo /*
3cd5b61d6SRichard Kuo * vDSO implementation for Hexagon
4cd5b61d6SRichard Kuo *
5e1858b2aSRichard Kuo * Copyright (c) 2011, The Linux Foundation. All rights reserved.
6cd5b61d6SRichard Kuo */
7cd5b61d6SRichard Kuo
8cd5b61d6SRichard Kuo #include <linux/err.h>
9cd5b61d6SRichard Kuo #include <linux/mm.h>
10cd5b61d6SRichard Kuo #include <linux/vmalloc.h>
116bbbc30cSRichard Kuo #include <linux/binfmts.h>
12cd5b61d6SRichard Kuo
13cd5b61d6SRichard Kuo #include <asm/vdso.h>
14cd5b61d6SRichard Kuo
15cd5b61d6SRichard Kuo static struct page *vdso_page;
16cd5b61d6SRichard Kuo
17cd5b61d6SRichard Kuo /* Create a vDSO page holding the signal trampoline.
18cd5b61d6SRichard Kuo * We want this for a non-executable stack.
19cd5b61d6SRichard Kuo */
vdso_init(void)20cd5b61d6SRichard Kuo static int __init vdso_init(void)
21cd5b61d6SRichard Kuo {
22cd5b61d6SRichard Kuo struct hexagon_vdso *vdso;
23cd5b61d6SRichard Kuo
24cd5b61d6SRichard Kuo vdso_page = alloc_page(GFP_KERNEL);
25cd5b61d6SRichard Kuo if (!vdso_page)
26cd5b61d6SRichard Kuo panic("Cannot allocate vdso");
27cd5b61d6SRichard Kuo
28cd5b61d6SRichard Kuo vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
29cd5b61d6SRichard Kuo if (!vdso)
30cd5b61d6SRichard Kuo panic("Cannot map vdso");
31cd5b61d6SRichard Kuo clear_page(vdso);
32cd5b61d6SRichard Kuo
33cd5b61d6SRichard Kuo /* Install the signal trampoline; currently looks like this:
34cd5b61d6SRichard Kuo * r6 = #__NR_rt_sigreturn;
35cd5b61d6SRichard Kuo * trap0(#1);
36cd5b61d6SRichard Kuo */
37cd5b61d6SRichard Kuo vdso->rt_signal_trampoline[0] = __rt_sigtramp_template[0];
38cd5b61d6SRichard Kuo vdso->rt_signal_trampoline[1] = __rt_sigtramp_template[1];
39cd5b61d6SRichard Kuo
40cd5b61d6SRichard Kuo vunmap(vdso);
41cd5b61d6SRichard Kuo
42cd5b61d6SRichard Kuo return 0;
43cd5b61d6SRichard Kuo }
44cd5b61d6SRichard Kuo arch_initcall(vdso_init);
45cd5b61d6SRichard Kuo
46cd5b61d6SRichard Kuo /*
47cd5b61d6SRichard Kuo * Called from binfmt_elf. Create a VMA for the vDSO page.
48cd5b61d6SRichard Kuo */
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)49cd5b61d6SRichard Kuo int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
50cd5b61d6SRichard Kuo {
51cd5b61d6SRichard Kuo int ret;
52cd5b61d6SRichard Kuo unsigned long vdso_base;
53cd5b61d6SRichard Kuo struct mm_struct *mm = current->mm;
54cd5b61d6SRichard Kuo
55*d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(mm))
5669048176SMichal Hocko return -EINTR;
57cd5b61d6SRichard Kuo
58cd5b61d6SRichard Kuo /* Try to get it loaded right near ld.so/glibc. */
59cd5b61d6SRichard Kuo vdso_base = STACK_TOP;
60cd5b61d6SRichard Kuo
61cd5b61d6SRichard Kuo vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0);
62cd5b61d6SRichard Kuo if (IS_ERR_VALUE(vdso_base)) {
63cd5b61d6SRichard Kuo ret = vdso_base;
64cd5b61d6SRichard Kuo goto up_fail;
65cd5b61d6SRichard Kuo }
66cd5b61d6SRichard Kuo
67cd5b61d6SRichard Kuo /* MAYWRITE to allow gdb to COW and set breakpoints. */
68cd5b61d6SRichard Kuo ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
69cd5b61d6SRichard Kuo VM_READ|VM_EXEC|
70909af768SJason Baron VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
71cd5b61d6SRichard Kuo &vdso_page);
72cd5b61d6SRichard Kuo
73cd5b61d6SRichard Kuo if (ret)
74cd5b61d6SRichard Kuo goto up_fail;
75cd5b61d6SRichard Kuo
76cd5b61d6SRichard Kuo mm->context.vdso = (void *)vdso_base;
77cd5b61d6SRichard Kuo
78cd5b61d6SRichard Kuo up_fail:
79*d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
80cd5b61d6SRichard Kuo return ret;
81cd5b61d6SRichard Kuo }
82cd5b61d6SRichard Kuo
arch_vma_name(struct vm_area_struct * vma)83cd5b61d6SRichard Kuo const char *arch_vma_name(struct vm_area_struct *vma)
84cd5b61d6SRichard Kuo {
85cd5b61d6SRichard Kuo if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
86cd5b61d6SRichard Kuo return "[vdso]";
87cd5b61d6SRichard Kuo return NULL;
88cd5b61d6SRichard Kuo }
89