1 /* 2 * vdso setup for s390 3 * 4 * Copyright IBM Corp. 2008 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License (version 2 only) 9 * as published by the Free Software Foundation. 10 */ 11 12 #include <linux/init.h> 13 #include <linux/errno.h> 14 #include <linux/sched.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/smp.h> 18 #include <linux/stddef.h> 19 #include <linux/unistd.h> 20 #include <linux/slab.h> 21 #include <linux/user.h> 22 #include <linux/elf.h> 23 #include <linux/security.h> 24 #include <linux/bootmem.h> 25 #include <linux/compat.h> 26 #include <asm/asm-offsets.h> 27 #include <asm/pgtable.h> 28 #include <asm/processor.h> 29 #include <asm/mmu.h> 30 #include <asm/mmu_context.h> 31 #include <asm/sections.h> 32 #include <asm/vdso.h> 33 #include <asm/facility.h> 34 35 #ifdef CONFIG_COMPAT 36 extern char vdso32_start, vdso32_end; 37 static void *vdso32_kbase = &vdso32_start; 38 static unsigned int vdso32_pages; 39 static struct page **vdso32_pagelist; 40 #endif 41 42 extern char vdso64_start, vdso64_end; 43 static void *vdso64_kbase = &vdso64_start; 44 static unsigned int vdso64_pages; 45 static struct page **vdso64_pagelist; 46 47 /* 48 * Should the kernel map a VDSO page into processes and pass its 49 * address down to glibc upon exec()? 50 */ 51 unsigned int __read_mostly vdso_enabled = 1; 52 53 static int vdso_fault(const struct vm_special_mapping *sm, 54 struct vm_area_struct *vma, struct vm_fault *vmf) 55 { 56 struct page **vdso_pagelist; 57 unsigned long vdso_pages; 58 59 vdso_pagelist = vdso64_pagelist; 60 vdso_pages = vdso64_pages; 61 #ifdef CONFIG_COMPAT 62 if (is_compat_task()) { 63 vdso_pagelist = vdso32_pagelist; 64 vdso_pages = vdso32_pages; 65 } 66 #endif 67 68 if (vmf->pgoff >= vdso_pages) 69 return VM_FAULT_SIGBUS; 70 71 vmf->page = vdso_pagelist[vmf->pgoff]; 72 get_page(vmf->page); 73 return 0; 74 } 75 76 static int vdso_mremap(const struct vm_special_mapping *sm, 77 struct vm_area_struct *vma) 78 { 79 unsigned long vdso_pages; 80 81 vdso_pages = vdso64_pages; 82 #ifdef CONFIG_COMPAT 83 if (is_compat_task()) 84 vdso_pages = vdso32_pages; 85 #endif 86 87 if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start) 88 return -EINVAL; 89 90 if (WARN_ON_ONCE(current->mm != vma->vm_mm)) 91 return -EFAULT; 92 93 current->mm->context.vdso_base = vma->vm_start; 94 return 0; 95 } 96 97 static const struct vm_special_mapping vdso_mapping = { 98 .name = "[vdso]", 99 .fault = vdso_fault, 100 .mremap = vdso_mremap, 101 }; 102 103 static int __init vdso_setup(char *s) 104 { 105 unsigned long val; 106 int rc; 107 108 rc = 0; 109 if (strncmp(s, "on", 3) == 0) 110 vdso_enabled = 1; 111 else if (strncmp(s, "off", 4) == 0) 112 vdso_enabled = 0; 113 else { 114 rc = kstrtoul(s, 0, &val); 115 vdso_enabled = rc ? 0 : !!val; 116 } 117 return !rc; 118 } 119 __setup("vdso=", vdso_setup); 120 121 /* 122 * The vdso data page 123 */ 124 static union { 125 struct vdso_data data; 126 u8 page[PAGE_SIZE]; 127 } vdso_data_store __page_aligned_data; 128 struct vdso_data *vdso_data = &vdso_data_store.data; 129 130 /* 131 * Setup vdso data page. 132 */ 133 static void __init vdso_init_data(struct vdso_data *vd) 134 { 135 vd->ectg_available = test_facility(31); 136 } 137 138 /* 139 * Allocate/free per cpu vdso data. 140 */ 141 #define SEGMENT_ORDER 2 142 143 int vdso_alloc_per_cpu(struct lowcore *lowcore) 144 { 145 unsigned long segment_table, page_table, page_frame; 146 struct vdso_per_cpu_data *vd; 147 u32 *psal, *aste; 148 int i; 149 150 lowcore->vdso_per_cpu_data = __LC_PASTE; 151 152 if (!vdso_enabled) 153 return 0; 154 155 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 156 page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA); 157 page_frame = get_zeroed_page(GFP_KERNEL); 158 if (!segment_table || !page_table || !page_frame) 159 goto out; 160 161 /* Initialize per-cpu vdso data page */ 162 vd = (struct vdso_per_cpu_data *) page_frame; 163 vd->cpu_nr = lowcore->cpu_nr; 164 vd->node_id = cpu_to_node(vd->cpu_nr); 165 166 /* Set up access register mode page table */ 167 clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, 168 PAGE_SIZE << SEGMENT_ORDER); 169 clear_table((unsigned long *) page_table, _PAGE_INVALID, 170 256*sizeof(unsigned long)); 171 172 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; 173 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame; 174 175 psal = (u32 *) (page_table + 256*sizeof(unsigned long)); 176 aste = psal + 32; 177 178 for (i = 4; i < 32; i += 4) 179 psal[i] = 0x80000000; 180 181 lowcore->paste[4] = (u32)(addr_t) psal; 182 psal[0] = 0x02000000; 183 psal[2] = (u32)(addr_t) aste; 184 *(unsigned long *) (aste + 2) = segment_table + 185 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; 186 aste[4] = (u32)(addr_t) psal; 187 lowcore->vdso_per_cpu_data = page_frame; 188 189 return 0; 190 191 out: 192 free_page(page_frame); 193 free_page(page_table); 194 free_pages(segment_table, SEGMENT_ORDER); 195 return -ENOMEM; 196 } 197 198 void vdso_free_per_cpu(struct lowcore *lowcore) 199 { 200 unsigned long segment_table, page_table, page_frame; 201 u32 *psal, *aste; 202 203 if (!vdso_enabled) 204 return; 205 206 psal = (u32 *)(addr_t) lowcore->paste[4]; 207 aste = (u32 *)(addr_t) psal[2]; 208 segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK; 209 page_table = *(unsigned long *) segment_table; 210 page_frame = *(unsigned long *) page_table; 211 212 free_page(page_frame); 213 free_page(page_table); 214 free_pages(segment_table, SEGMENT_ORDER); 215 } 216 217 static void vdso_init_cr5(void) 218 { 219 unsigned long cr5; 220 221 if (!vdso_enabled) 222 return; 223 cr5 = offsetof(struct lowcore, paste); 224 __ctl_load(cr5, 5, 5); 225 } 226 227 /* 228 * This is called from binfmt_elf, we create the special vma for the 229 * vDSO and insert it into the mm struct tree 230 */ 231 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 232 { 233 struct mm_struct *mm = current->mm; 234 struct vm_area_struct *vma; 235 unsigned long vdso_pages; 236 unsigned long vdso_base; 237 int rc; 238 239 if (!vdso_enabled) 240 return 0; 241 /* 242 * Only map the vdso for dynamically linked elf binaries. 243 */ 244 if (!uses_interp) 245 return 0; 246 247 vdso_pages = vdso64_pages; 248 #ifdef CONFIG_COMPAT 249 if (is_compat_task()) 250 vdso_pages = vdso32_pages; 251 #endif 252 /* 253 * vDSO has a problem and was disabled, just don't "enable" it for 254 * the process 255 */ 256 if (vdso_pages == 0) 257 return 0; 258 259 /* 260 * pick a base address for the vDSO in process space. We try to put 261 * it at vdso_base which is the "natural" base for it, but we might 262 * fail and end up putting it elsewhere. 263 */ 264 if (down_write_killable(&mm->mmap_sem)) 265 return -EINTR; 266 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); 267 if (IS_ERR_VALUE(vdso_base)) { 268 rc = vdso_base; 269 goto out_up; 270 } 271 272 /* 273 * our vma flags don't have VM_WRITE so by default, the process 274 * isn't allowed to write those pages. 275 * gdb can break that with ptrace interface, and thus trigger COW 276 * on those pages but it's then your responsibility to never do that 277 * on the "data" page of the vDSO or you'll stop getting kernel 278 * updates and your nice userland gettimeofday will be totally dead. 279 * It's fine to use that for setting breakpoints in the vDSO code 280 * pages though. 281 */ 282 vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 283 VM_READ|VM_EXEC| 284 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 285 &vdso_mapping); 286 if (IS_ERR(vma)) { 287 rc = PTR_ERR(vma); 288 goto out_up; 289 } 290 291 current->mm->context.vdso_base = vdso_base; 292 rc = 0; 293 294 out_up: 295 up_write(&mm->mmap_sem); 296 return rc; 297 } 298 299 static int __init vdso_init(void) 300 { 301 int i; 302 303 if (!vdso_enabled) 304 return 0; 305 vdso_init_data(vdso_data); 306 #ifdef CONFIG_COMPAT 307 /* Calculate the size of the 32 bit vDSO */ 308 vdso32_pages = ((&vdso32_end - &vdso32_start 309 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 310 311 /* Make sure pages are in the correct state */ 312 vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1), 313 GFP_KERNEL); 314 BUG_ON(vdso32_pagelist == NULL); 315 for (i = 0; i < vdso32_pages - 1; i++) { 316 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); 317 ClearPageReserved(pg); 318 get_page(pg); 319 vdso32_pagelist[i] = pg; 320 } 321 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); 322 vdso32_pagelist[vdso32_pages] = NULL; 323 #endif 324 325 /* Calculate the size of the 64 bit vDSO */ 326 vdso64_pages = ((&vdso64_end - &vdso64_start 327 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 328 329 /* Make sure pages are in the correct state */ 330 vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1), 331 GFP_KERNEL); 332 BUG_ON(vdso64_pagelist == NULL); 333 for (i = 0; i < vdso64_pages - 1; i++) { 334 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); 335 ClearPageReserved(pg); 336 get_page(pg); 337 vdso64_pagelist[i] = pg; 338 } 339 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); 340 vdso64_pagelist[vdso64_pages] = NULL; 341 if (vdso_alloc_per_cpu(&S390_lowcore)) 342 BUG(); 343 vdso_init_cr5(); 344 345 get_page(virt_to_page(vdso_data)); 346 347 return 0; 348 } 349 early_initcall(vdso_init); 350