1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * vdso setup for s390 4 * 5 * Copyright IBM Corp. 2008 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 7 */ 8 9 #include <linux/init.h> 10 #include <linux/errno.h> 11 #include <linux/sched.h> 12 #include <linux/kernel.h> 13 #include <linux/mm.h> 14 #include <linux/smp.h> 15 #include <linux/stddef.h> 16 #include <linux/unistd.h> 17 #include <linux/slab.h> 18 #include <linux/user.h> 19 #include <linux/elf.h> 20 #include <linux/security.h> 21 #include <linux/memblock.h> 22 #include <linux/compat.h> 23 #include <linux/binfmts.h> 24 #include <vdso/datapage.h> 25 #include <asm/asm-offsets.h> 26 #include <asm/processor.h> 27 #include <asm/mmu.h> 28 #include <asm/mmu_context.h> 29 #include <asm/sections.h> 30 #include <asm/vdso.h> 31 #include <asm/facility.h> 32 #include <asm/timex.h> 33 34 extern char vdso64_start, vdso64_end; 35 static void *vdso64_kbase = &vdso64_start; 36 static unsigned int vdso64_pages; 37 static struct page **vdso64_pagelist; 38 39 /* 40 * Should the kernel map a VDSO page into processes and pass its 41 * address down to glibc upon exec()? 42 */ 43 unsigned int __read_mostly vdso_enabled = 1; 44 45 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, 46 struct vm_area_struct *vma, struct vm_fault *vmf) 47 { 48 struct page **vdso_pagelist; 49 unsigned long vdso_pages; 50 51 vdso_pagelist = vdso64_pagelist; 52 vdso_pages = vdso64_pages; 53 54 if (vmf->pgoff >= vdso_pages) 55 return VM_FAULT_SIGBUS; 56 57 vmf->page = vdso_pagelist[vmf->pgoff]; 58 get_page(vmf->page); 59 return 0; 60 } 61 62 static int vdso_mremap(const struct vm_special_mapping *sm, 63 struct vm_area_struct *vma) 64 { 65 current->mm->context.vdso_base = vma->vm_start; 66 67 return 0; 68 } 69 70 static const struct vm_special_mapping vdso_mapping = { 71 .name = "[vdso]", 72 .fault = vdso_fault, 73 .mremap = vdso_mremap, 74 }; 75 76 static int __init vdso_setup(char *str) 77 { 78 bool enabled; 79 80 if (!kstrtobool(str, &enabled)) 81 vdso_enabled = enabled; 82 return 1; 83 } 84 __setup("vdso=", vdso_setup); 85 86 /* 87 * The vdso data page 88 */ 89 static union { 90 struct vdso_data data; 91 u8 page[PAGE_SIZE]; 92 } vdso_data_store __page_aligned_data; 93 struct vdso_data *vdso_data = (struct vdso_data *)&vdso_data_store.data; 94 95 void vdso_getcpu_init(void) 96 { 97 set_tod_programmable_field(smp_processor_id()); 98 } 99 100 /* 101 * This is called from binfmt_elf, we create the special vma for the 102 * vDSO and insert it into the mm struct tree 103 */ 104 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 105 { 106 struct mm_struct *mm = current->mm; 107 struct vm_area_struct *vma; 108 unsigned long vdso_pages; 109 unsigned long vdso_base; 110 int rc; 111 112 if (!vdso_enabled) 113 return 0; 114 115 if (is_compat_task()) 116 return 0; 117 118 vdso_pages = vdso64_pages; 119 /* 120 * vDSO has a problem and was disabled, just don't "enable" it for 121 * the process 122 */ 123 if (vdso_pages == 0) 124 return 0; 125 126 /* 127 * pick a base address for the vDSO in process space. We try to put 128 * it at vdso_base which is the "natural" base for it, but we might 129 * fail and end up putting it elsewhere. 130 */ 131 if (mmap_write_lock_killable(mm)) 132 return -EINTR; 133 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); 134 if (IS_ERR_VALUE(vdso_base)) { 135 rc = vdso_base; 136 goto out_up; 137 } 138 139 /* 140 * our vma flags don't have VM_WRITE so by default, the process 141 * isn't allowed to write those pages. 142 * gdb can break that with ptrace interface, and thus trigger COW 143 * on those pages but it's then your responsibility to never do that 144 * on the "data" page of the vDSO or you'll stop getting kernel 145 * updates and your nice userland gettimeofday will be totally dead. 146 * It's fine to use that for setting breakpoints in the vDSO code 147 * pages though. 148 */ 149 vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 150 VM_READ|VM_EXEC| 151 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 152 &vdso_mapping); 153 if (IS_ERR(vma)) { 154 rc = PTR_ERR(vma); 155 goto out_up; 156 } 157 158 current->mm->context.vdso_base = vdso_base; 159 rc = 0; 160 161 out_up: 162 mmap_write_unlock(mm); 163 return rc; 164 } 165 166 static int __init vdso_init(void) 167 { 168 int i; 169 170 vdso_getcpu_init(); 171 /* Calculate the size of the 64 bit vDSO */ 172 vdso64_pages = ((&vdso64_end - &vdso64_start 173 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 174 175 /* Make sure pages are in the correct state */ 176 vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *), 177 GFP_KERNEL); 178 BUG_ON(vdso64_pagelist == NULL); 179 for (i = 0; i < vdso64_pages - 1; i++) { 180 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); 181 get_page(pg); 182 vdso64_pagelist[i] = pg; 183 } 184 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); 185 vdso64_pagelist[vdso64_pages] = NULL; 186 187 get_page(virt_to_page(vdso_data)); 188 189 return 0; 190 } 191 early_initcall(vdso_init); 192