1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * vdso setup for s390 4 * 5 * Copyright IBM Corp. 2008 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 7 */ 8 9 #include <linux/init.h> 10 #include <linux/errno.h> 11 #include <linux/sched.h> 12 #include <linux/kernel.h> 13 #include <linux/mm.h> 14 #include <linux/smp.h> 15 #include <linux/stddef.h> 16 #include <linux/unistd.h> 17 #include <linux/slab.h> 18 #include <linux/user.h> 19 #include <linux/elf.h> 20 #include <linux/security.h> 21 #include <linux/memblock.h> 22 #include <linux/compat.h> 23 #include <asm/asm-offsets.h> 24 #include <asm/pgtable.h> 25 #include <asm/processor.h> 26 #include <asm/mmu.h> 27 #include <asm/mmu_context.h> 28 #include <asm/sections.h> 29 #include <asm/vdso.h> 30 #include <asm/facility.h> 31 32 #ifdef CONFIG_COMPAT_VDSO 33 extern char vdso32_start, vdso32_end; 34 static void *vdso32_kbase = &vdso32_start; 35 static unsigned int vdso32_pages; 36 static struct page **vdso32_pagelist; 37 #endif 38 39 extern char vdso64_start, vdso64_end; 40 static void *vdso64_kbase = &vdso64_start; 41 static unsigned int vdso64_pages; 42 static struct page **vdso64_pagelist; 43 44 /* 45 * Should the kernel map a VDSO page into processes and pass its 46 * address down to glibc upon exec()? 47 */ 48 unsigned int __read_mostly vdso_enabled = 1; 49 50 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, 51 struct vm_area_struct *vma, struct vm_fault *vmf) 52 { 53 struct page **vdso_pagelist; 54 unsigned long vdso_pages; 55 56 vdso_pagelist = vdso64_pagelist; 57 vdso_pages = vdso64_pages; 58 #ifdef CONFIG_COMPAT_VDSO 59 if (vma->vm_mm->context.compat_mm) { 60 vdso_pagelist = vdso32_pagelist; 61 vdso_pages = vdso32_pages; 62 } 63 #endif 64 65 if (vmf->pgoff >= vdso_pages) 66 return VM_FAULT_SIGBUS; 67 68 vmf->page = vdso_pagelist[vmf->pgoff]; 69 get_page(vmf->page); 70 return 0; 71 } 72 73 static int vdso_mremap(const struct vm_special_mapping *sm, 74 struct vm_area_struct *vma) 75 { 76 unsigned long vdso_pages; 77 78 vdso_pages = vdso64_pages; 79 #ifdef CONFIG_COMPAT_VDSO 80 if (vma->vm_mm->context.compat_mm) 81 vdso_pages = vdso32_pages; 82 #endif 83 84 if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start) 85 return -EINVAL; 86 87 if (WARN_ON_ONCE(current->mm != vma->vm_mm)) 88 return -EFAULT; 89 90 current->mm->context.vdso_base = vma->vm_start; 91 return 0; 92 } 93 94 static const struct vm_special_mapping vdso_mapping = { 95 .name = "[vdso]", 96 .fault = vdso_fault, 97 .mremap = vdso_mremap, 98 }; 99 100 static int __init vdso_setup(char *s) 101 { 102 unsigned long val; 103 int rc; 104 105 rc = 0; 106 if (strncmp(s, "on", 3) == 0) 107 vdso_enabled = 1; 108 else if (strncmp(s, "off", 4) == 0) 109 vdso_enabled = 0; 110 else { 111 rc = kstrtoul(s, 0, &val); 112 vdso_enabled = rc ? 0 : !!val; 113 } 114 return !rc; 115 } 116 __setup("vdso=", vdso_setup); 117 118 /* 119 * The vdso data page 120 */ 121 static union { 122 struct vdso_data data; 123 u8 page[PAGE_SIZE]; 124 } vdso_data_store __page_aligned_data; 125 struct vdso_data *vdso_data = &vdso_data_store.data; 126 127 /* 128 * Setup vdso data page. 129 */ 130 static void __init vdso_init_data(struct vdso_data *vd) 131 { 132 vd->ectg_available = test_facility(31); 133 } 134 135 /* 136 * Allocate/free per cpu vdso data. 137 */ 138 #define SEGMENT_ORDER 2 139 140 /* 141 * The initial vdso_data structure for the boot CPU. Eventually 142 * it is replaced with a properly allocated structure in vdso_init. 143 * This is necessary because a valid S390_lowcore.vdso_per_cpu_data 144 * pointer is required to be able to return from an interrupt or 145 * program check. See the exit paths in entry.S. 146 */ 147 struct vdso_data boot_vdso_data __initdata; 148 149 void __init vdso_alloc_boot_cpu(struct lowcore *lowcore) 150 { 151 lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data; 152 } 153 154 int vdso_alloc_per_cpu(struct lowcore *lowcore) 155 { 156 unsigned long segment_table, page_table, page_frame; 157 struct vdso_per_cpu_data *vd; 158 159 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 160 page_table = get_zeroed_page(GFP_KERNEL); 161 page_frame = get_zeroed_page(GFP_KERNEL); 162 if (!segment_table || !page_table || !page_frame) 163 goto out; 164 arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER); 165 arch_set_page_dat(virt_to_page(page_table), 0); 166 167 /* Initialize per-cpu vdso data page */ 168 vd = (struct vdso_per_cpu_data *) page_frame; 169 vd->cpu_nr = lowcore->cpu_nr; 170 vd->node_id = cpu_to_node(vd->cpu_nr); 171 172 /* Set up page table for the vdso address space */ 173 memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES); 174 memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE); 175 176 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; 177 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame; 178 179 lowcore->vdso_asce = segment_table + 180 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; 181 lowcore->vdso_per_cpu_data = page_frame; 182 183 return 0; 184 185 out: 186 free_page(page_frame); 187 free_page(page_table); 188 free_pages(segment_table, SEGMENT_ORDER); 189 return -ENOMEM; 190 } 191 192 void vdso_free_per_cpu(struct lowcore *lowcore) 193 { 194 unsigned long segment_table, page_table, page_frame; 195 196 segment_table = lowcore->vdso_asce & PAGE_MASK; 197 page_table = *(unsigned long *) segment_table; 198 page_frame = *(unsigned long *) page_table; 199 200 free_page(page_frame); 201 free_page(page_table); 202 free_pages(segment_table, SEGMENT_ORDER); 203 } 204 205 /* 206 * This is called from binfmt_elf, we create the special vma for the 207 * vDSO and insert it into the mm struct tree 208 */ 209 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 210 { 211 struct mm_struct *mm = current->mm; 212 struct vm_area_struct *vma; 213 unsigned long vdso_pages; 214 unsigned long vdso_base; 215 int rc; 216 217 if (!vdso_enabled) 218 return 0; 219 /* 220 * Only map the vdso for dynamically linked elf binaries. 221 */ 222 if (!uses_interp) 223 return 0; 224 225 vdso_pages = vdso64_pages; 226 #ifdef CONFIG_COMPAT_VDSO 227 mm->context.compat_mm = is_compat_task(); 228 if (mm->context.compat_mm) 229 vdso_pages = vdso32_pages; 230 #endif 231 /* 232 * vDSO has a problem and was disabled, just don't "enable" it for 233 * the process 234 */ 235 if (vdso_pages == 0) 236 return 0; 237 238 /* 239 * pick a base address for the vDSO in process space. We try to put 240 * it at vdso_base which is the "natural" base for it, but we might 241 * fail and end up putting it elsewhere. 242 */ 243 if (down_write_killable(&mm->mmap_sem)) 244 return -EINTR; 245 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); 246 if (IS_ERR_VALUE(vdso_base)) { 247 rc = vdso_base; 248 goto out_up; 249 } 250 251 /* 252 * our vma flags don't have VM_WRITE so by default, the process 253 * isn't allowed to write those pages. 254 * gdb can break that with ptrace interface, and thus trigger COW 255 * on those pages but it's then your responsibility to never do that 256 * on the "data" page of the vDSO or you'll stop getting kernel 257 * updates and your nice userland gettimeofday will be totally dead. 258 * It's fine to use that for setting breakpoints in the vDSO code 259 * pages though. 260 */ 261 vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 262 VM_READ|VM_EXEC| 263 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 264 &vdso_mapping); 265 if (IS_ERR(vma)) { 266 rc = PTR_ERR(vma); 267 goto out_up; 268 } 269 270 current->mm->context.vdso_base = vdso_base; 271 rc = 0; 272 273 out_up: 274 up_write(&mm->mmap_sem); 275 return rc; 276 } 277 278 static int __init vdso_init(void) 279 { 280 int i; 281 282 vdso_init_data(vdso_data); 283 #ifdef CONFIG_COMPAT_VDSO 284 /* Calculate the size of the 32 bit vDSO */ 285 vdso32_pages = ((&vdso32_end - &vdso32_start 286 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 287 288 /* Make sure pages are in the correct state */ 289 vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *), 290 GFP_KERNEL); 291 BUG_ON(vdso32_pagelist == NULL); 292 for (i = 0; i < vdso32_pages - 1; i++) { 293 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); 294 get_page(pg); 295 vdso32_pagelist[i] = pg; 296 } 297 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); 298 vdso32_pagelist[vdso32_pages] = NULL; 299 #endif 300 301 /* Calculate the size of the 64 bit vDSO */ 302 vdso64_pages = ((&vdso64_end - &vdso64_start 303 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 304 305 /* Make sure pages are in the correct state */ 306 vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *), 307 GFP_KERNEL); 308 BUG_ON(vdso64_pagelist == NULL); 309 for (i = 0; i < vdso64_pages - 1; i++) { 310 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); 311 get_page(pg); 312 vdso64_pagelist[i] = pg; 313 } 314 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); 315 vdso64_pagelist[vdso64_pages] = NULL; 316 if (vdso_alloc_per_cpu(&S390_lowcore)) 317 BUG(); 318 319 get_page(virt_to_page(vdso_data)); 320 321 return 0; 322 } 323 early_initcall(vdso_init); 324