1 /* 2 * vdso setup for s390 3 * 4 * Copyright IBM Corp. 2008 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License (version 2 only) 9 * as published by the Free Software Foundation. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/errno.h> 14 #include <linux/sched.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/smp.h> 18 #include <linux/stddef.h> 19 #include <linux/unistd.h> 20 #include <linux/slab.h> 21 #include <linux/user.h> 22 #include <linux/elf.h> 23 #include <linux/security.h> 24 #include <linux/bootmem.h> 25 #include <linux/compat.h> 26 #include <asm/asm-offsets.h> 27 #include <asm/pgtable.h> 28 #include <asm/processor.h> 29 #include <asm/mmu.h> 30 #include <asm/mmu_context.h> 31 #include <asm/sections.h> 32 #include <asm/vdso.h> 33 #include <asm/facility.h> 34 35 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 36 extern char vdso32_start, vdso32_end; 37 static void *vdso32_kbase = &vdso32_start; 38 static unsigned int vdso32_pages; 39 static struct page **vdso32_pagelist; 40 #endif 41 42 #ifdef CONFIG_64BIT 43 extern char vdso64_start, vdso64_end; 44 static void *vdso64_kbase = &vdso64_start; 45 static unsigned int vdso64_pages; 46 static struct page **vdso64_pagelist; 47 #endif /* CONFIG_64BIT */ 48 49 /* 50 * Should the kernel map a VDSO page into processes and pass its 51 * address down to glibc upon exec()? 52 */ 53 unsigned int __read_mostly vdso_enabled = 1; 54 55 static int __init vdso_setup(char *s) 56 { 57 unsigned long val; 58 int rc; 59 60 rc = 0; 61 if (strncmp(s, "on", 3) == 0) 62 vdso_enabled = 1; 63 else if (strncmp(s, "off", 4) == 0) 64 vdso_enabled = 0; 65 else { 66 rc = kstrtoul(s, 0, &val); 67 vdso_enabled = rc ? 0 : !!val; 68 } 69 return !rc; 70 } 71 __setup("vdso=", vdso_setup); 72 73 /* 74 * The vdso data page 75 */ 76 static union { 77 struct vdso_data data; 78 u8 page[PAGE_SIZE]; 79 } vdso_data_store __page_aligned_data; 80 struct vdso_data *vdso_data = &vdso_data_store.data; 81 82 /* 83 * Setup vdso data page. 84 */ 85 static void vdso_init_data(struct vdso_data *vd) 86 { 87 vd->ectg_available = 88 s390_user_mode != HOME_SPACE_MODE && test_facility(31); 89 } 90 91 #ifdef CONFIG_64BIT 92 /* 93 * Allocate/free per cpu vdso data. 94 */ 95 #define SEGMENT_ORDER 2 96 97 int vdso_alloc_per_cpu(struct _lowcore *lowcore) 98 { 99 unsigned long segment_table, page_table, page_frame; 100 u32 *psal, *aste; 101 int i; 102 103 lowcore->vdso_per_cpu_data = __LC_PASTE; 104 105 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 106 return 0; 107 108 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 109 page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA); 110 page_frame = get_zeroed_page(GFP_KERNEL); 111 if (!segment_table || !page_table || !page_frame) 112 goto out; 113 114 clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, 115 PAGE_SIZE << SEGMENT_ORDER); 116 clear_table((unsigned long *) page_table, _PAGE_INVALID, 117 256*sizeof(unsigned long)); 118 119 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; 120 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame; 121 122 psal = (u32 *) (page_table + 256*sizeof(unsigned long)); 123 aste = psal + 32; 124 125 for (i = 4; i < 32; i += 4) 126 psal[i] = 0x80000000; 127 128 lowcore->paste[4] = (u32)(addr_t) psal; 129 psal[0] = 0x20000000; 130 psal[2] = (u32)(addr_t) aste; 131 *(unsigned long *) (aste + 2) = segment_table + 132 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; 133 aste[4] = (u32)(addr_t) psal; 134 lowcore->vdso_per_cpu_data = page_frame; 135 136 return 0; 137 138 out: 139 free_page(page_frame); 140 free_page(page_table); 141 free_pages(segment_table, SEGMENT_ORDER); 142 return -ENOMEM; 143 } 144 145 void vdso_free_per_cpu(struct _lowcore *lowcore) 146 { 147 unsigned long segment_table, page_table, page_frame; 148 u32 *psal, *aste; 149 150 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 151 return; 152 153 psal = (u32 *)(addr_t) lowcore->paste[4]; 154 aste = (u32 *)(addr_t) psal[2]; 155 segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK; 156 page_table = *(unsigned long *) segment_table; 157 page_frame = *(unsigned long *) page_table; 158 159 free_page(page_frame); 160 free_page(page_table); 161 free_pages(segment_table, SEGMENT_ORDER); 162 } 163 164 static void vdso_init_cr5(void) 165 { 166 unsigned long cr5; 167 168 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 169 return; 170 cr5 = offsetof(struct _lowcore, paste); 171 __ctl_load(cr5, 5, 5); 172 } 173 #endif /* CONFIG_64BIT */ 174 175 /* 176 * This is called from binfmt_elf, we create the special vma for the 177 * vDSO and insert it into the mm struct tree 178 */ 179 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 180 { 181 struct mm_struct *mm = current->mm; 182 struct page **vdso_pagelist; 183 unsigned long vdso_pages; 184 unsigned long vdso_base; 185 int rc; 186 187 if (!vdso_enabled) 188 return 0; 189 /* 190 * Only map the vdso for dynamically linked elf binaries. 191 */ 192 if (!uses_interp) 193 return 0; 194 195 #ifdef CONFIG_64BIT 196 vdso_pagelist = vdso64_pagelist; 197 vdso_pages = vdso64_pages; 198 #ifdef CONFIG_COMPAT 199 if (is_compat_task()) { 200 vdso_pagelist = vdso32_pagelist; 201 vdso_pages = vdso32_pages; 202 } 203 #endif 204 #else 205 vdso_pagelist = vdso32_pagelist; 206 vdso_pages = vdso32_pages; 207 #endif 208 209 /* 210 * vDSO has a problem and was disabled, just don't "enable" it for 211 * the process 212 */ 213 if (vdso_pages == 0) 214 return 0; 215 216 current->mm->context.vdso_base = 0; 217 218 /* 219 * pick a base address for the vDSO in process space. We try to put 220 * it at vdso_base which is the "natural" base for it, but we might 221 * fail and end up putting it elsewhere. 222 */ 223 down_write(&mm->mmap_sem); 224 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); 225 if (IS_ERR_VALUE(vdso_base)) { 226 rc = vdso_base; 227 goto out_up; 228 } 229 230 /* 231 * Put vDSO base into mm struct. We need to do this before calling 232 * install_special_mapping or the perf counter mmap tracking code 233 * will fail to recognise it as a vDSO (since arch_vma_name fails). 234 */ 235 current->mm->context.vdso_base = vdso_base; 236 237 /* 238 * our vma flags don't have VM_WRITE so by default, the process 239 * isn't allowed to write those pages. 240 * gdb can break that with ptrace interface, and thus trigger COW 241 * on those pages but it's then your responsibility to never do that 242 * on the "data" page of the vDSO or you'll stop getting kernel 243 * updates and your nice userland gettimeofday will be totally dead. 244 * It's fine to use that for setting breakpoints in the vDSO code 245 * pages though. 246 */ 247 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 248 VM_READ|VM_EXEC| 249 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 250 vdso_pagelist); 251 if (rc) 252 current->mm->context.vdso_base = 0; 253 out_up: 254 up_write(&mm->mmap_sem); 255 return rc; 256 } 257 258 const char *arch_vma_name(struct vm_area_struct *vma) 259 { 260 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) 261 return "[vdso]"; 262 return NULL; 263 } 264 265 static int __init vdso_init(void) 266 { 267 int i; 268 269 if (!vdso_enabled) 270 return 0; 271 vdso_init_data(vdso_data); 272 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 273 /* Calculate the size of the 32 bit vDSO */ 274 vdso32_pages = ((&vdso32_end - &vdso32_start 275 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 276 277 /* Make sure pages are in the correct state */ 278 vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1), 279 GFP_KERNEL); 280 BUG_ON(vdso32_pagelist == NULL); 281 for (i = 0; i < vdso32_pages - 1; i++) { 282 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); 283 ClearPageReserved(pg); 284 get_page(pg); 285 vdso32_pagelist[i] = pg; 286 } 287 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); 288 vdso32_pagelist[vdso32_pages] = NULL; 289 #endif 290 291 #ifdef CONFIG_64BIT 292 /* Calculate the size of the 64 bit vDSO */ 293 vdso64_pages = ((&vdso64_end - &vdso64_start 294 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 295 296 /* Make sure pages are in the correct state */ 297 vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1), 298 GFP_KERNEL); 299 BUG_ON(vdso64_pagelist == NULL); 300 for (i = 0; i < vdso64_pages - 1; i++) { 301 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); 302 ClearPageReserved(pg); 303 get_page(pg); 304 vdso64_pagelist[i] = pg; 305 } 306 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); 307 vdso64_pagelist[vdso64_pages] = NULL; 308 if (vdso_alloc_per_cpu(&S390_lowcore)) 309 BUG(); 310 vdso_init_cr5(); 311 #endif /* CONFIG_64BIT */ 312 313 get_page(virt_to_page(vdso_data)); 314 315 smp_wmb(); 316 317 return 0; 318 } 319 early_initcall(vdso_init); 320 321 int in_gate_area_no_mm(unsigned long addr) 322 { 323 return 0; 324 } 325 326 int in_gate_area(struct mm_struct *mm, unsigned long addr) 327 { 328 return 0; 329 } 330 331 struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 332 { 333 return NULL; 334 } 335