1 /* 2 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 3 * <benh@kernel.crashing.org> 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/errno.h> 13 #include <linux/sched.h> 14 #include <linux/kernel.h> 15 #include <linux/mm.h> 16 #include <linux/smp.h> 17 #include <linux/stddef.h> 18 #include <linux/unistd.h> 19 #include <linux/slab.h> 20 #include <linux/user.h> 21 #include <linux/elf.h> 22 #include <linux/security.h> 23 #include <linux/bootmem.h> 24 25 #include <asm/pgtable.h> 26 #include <asm/system.h> 27 #include <asm/processor.h> 28 #include <asm/mmu.h> 29 #include <asm/mmu_context.h> 30 #include <asm/lmb.h> 31 #include <asm/machdep.h> 32 #include <asm/cputable.h> 33 #include <asm/sections.h> 34 #include <asm/firmware.h> 35 #include <asm/vdso.h> 36 #include <asm/vdso_datapage.h> 37 38 #include "setup.h" 39 40 #undef DEBUG 41 42 #ifdef DEBUG 43 #define DBG(fmt...) printk(fmt) 44 #else 45 #define DBG(fmt...) 46 #endif 47 48 /* Max supported size for symbol names */ 49 #define MAX_SYMNAME 64 50 51 extern char vdso32_start, vdso32_end; 52 static void *vdso32_kbase = &vdso32_start; 53 static unsigned int vdso32_pages; 54 static struct page **vdso32_pagelist; 55 unsigned long vdso32_sigtramp; 56 unsigned long vdso32_rt_sigtramp; 57 58 #ifdef CONFIG_PPC64 59 extern char vdso64_start, vdso64_end; 60 static void *vdso64_kbase = &vdso64_start; 61 static unsigned int vdso64_pages; 62 static struct page **vdso64_pagelist; 63 unsigned long vdso64_rt_sigtramp; 64 #endif /* CONFIG_PPC64 */ 65 66 static int vdso_ready; 67 68 /* 69 * The vdso data page (aka. systemcfg for old ppc64 fans) is here. 70 * Once the early boot kernel code no longer needs to muck around 71 * with it, it will become dynamically allocated 72 */ 73 static union { 74 struct vdso_data data; 75 u8 page[PAGE_SIZE]; 76 } vdso_data_store __attribute__((__section__(".data.page_aligned"))); 77 struct vdso_data *vdso_data = &vdso_data_store.data; 78 79 /* Format of the patch table */ 80 struct vdso_patch_def 81 { 82 unsigned long ftr_mask, ftr_value; 83 const char *gen_name; 84 const char *fix_name; 85 }; 86 87 /* Table of functions to patch based on the CPU type/revision 88 * 89 * Currently, we only change sync_dicache to do nothing on processors 90 * with a coherent icache 91 */ 92 static struct vdso_patch_def vdso_patches[] = { 93 { 94 CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE, 95 "__kernel_sync_dicache", "__kernel_sync_dicache_p5" 96 }, 97 { 98 CPU_FTR_USE_TB, 0, 99 "__kernel_gettimeofday", NULL 100 }, 101 }; 102 103 /* 104 * Some infos carried around for each of them during parsing at 105 * boot time. 106 */ 107 struct lib32_elfinfo 108 { 109 Elf32_Ehdr *hdr; /* ptr to ELF */ 110 Elf32_Sym *dynsym; /* ptr to .dynsym section */ 111 unsigned long dynsymsize; /* size of .dynsym section */ 112 char *dynstr; /* ptr to .dynstr section */ 113 unsigned long text; /* offset of .text section in .so */ 114 }; 115 116 struct lib64_elfinfo 117 { 118 Elf64_Ehdr *hdr; 119 Elf64_Sym *dynsym; 120 unsigned long dynsymsize; 121 char *dynstr; 122 unsigned long text; 123 }; 124 125 126 #ifdef __DEBUG 127 static void dump_one_vdso_page(struct page *pg, struct page *upg) 128 { 129 printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT), 130 page_count(pg), 131 pg->flags); 132 if (upg/* && pg != upg*/) { 133 printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) 134 << PAGE_SHIFT), 135 page_count(upg), 136 upg->flags); 137 } 138 printk("\n"); 139 } 140 141 static void dump_vdso_pages(struct vm_area_struct * vma) 142 { 143 int i; 144 145 if (!vma || test_thread_flag(TIF_32BIT)) { 146 printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); 147 for (i=0; i<vdso32_pages; i++) { 148 struct page *pg = virt_to_page(vdso32_kbase + 149 i*PAGE_SIZE); 150 struct page *upg = (vma && vma->vm_mm) ? 151 follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) 152 : NULL; 153 dump_one_vdso_page(pg, upg); 154 } 155 } 156 if (!vma || !test_thread_flag(TIF_32BIT)) { 157 printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); 158 for (i=0; i<vdso64_pages; i++) { 159 struct page *pg = virt_to_page(vdso64_kbase + 160 i*PAGE_SIZE); 161 struct page *upg = (vma && vma->vm_mm) ? 162 follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) 163 : NULL; 164 dump_one_vdso_page(pg, upg); 165 } 166 } 167 } 168 #endif /* DEBUG */ 169 170 /* 171 * This is called from binfmt_elf, we create the special vma for the 172 * vDSO and insert it into the mm struct tree 173 */ 174 int arch_setup_additional_pages(struct linux_binprm *bprm, 175 int executable_stack) 176 { 177 struct mm_struct *mm = current->mm; 178 struct page **vdso_pagelist; 179 unsigned long vdso_pages; 180 unsigned long vdso_base; 181 int rc; 182 183 if (!vdso_ready) 184 return 0; 185 186 #ifdef CONFIG_PPC64 187 if (test_thread_flag(TIF_32BIT)) { 188 vdso_pagelist = vdso32_pagelist; 189 vdso_pages = vdso32_pages; 190 vdso_base = VDSO32_MBASE; 191 } else { 192 vdso_pagelist = vdso64_pagelist; 193 vdso_pages = vdso64_pages; 194 vdso_base = VDSO64_MBASE; 195 } 196 #else 197 vdso_pagelist = vdso32_pagelist; 198 vdso_pages = vdso32_pages; 199 vdso_base = VDSO32_MBASE; 200 #endif 201 202 current->mm->context.vdso_base = 0; 203 204 /* vDSO has a problem and was disabled, just don't "enable" it for the 205 * process 206 */ 207 if (vdso_pages == 0) 208 return 0; 209 /* Add a page to the vdso size for the data page */ 210 vdso_pages ++; 211 212 /* 213 * pick a base address for the vDSO in process space. We try to put it 214 * at vdso_base which is the "natural" base for it, but we might fail 215 * and end up putting it elsewhere. 216 */ 217 down_write(&mm->mmap_sem); 218 vdso_base = get_unmapped_area(NULL, vdso_base, 219 vdso_pages << PAGE_SHIFT, 0, 0); 220 if (IS_ERR_VALUE(vdso_base)) { 221 rc = vdso_base; 222 goto fail_mmapsem; 223 } 224 225 /* 226 * our vma flags don't have VM_WRITE so by default, the process isn't 227 * allowed to write those pages. 228 * gdb can break that with ptrace interface, and thus trigger COW on 229 * those pages but it's then your responsibility to never do that on 230 * the "data" page of the vDSO or you'll stop getting kernel updates 231 * and your nice userland gettimeofday will be totally dead. 232 * It's fine to use that for setting breakpoints in the vDSO code 233 * pages though 234 * 235 * Make sure the vDSO gets into every core dump. 236 * Dumping its contents makes post-mortem fully interpretable later 237 * without matching up the same kernel and hardware config to see 238 * what PC values meant. 239 */ 240 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 241 VM_READ|VM_EXEC| 242 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| 243 VM_ALWAYSDUMP, 244 vdso_pagelist); 245 if (rc) 246 goto fail_mmapsem; 247 248 /* Put vDSO base into mm struct */ 249 current->mm->context.vdso_base = vdso_base; 250 251 up_write(&mm->mmap_sem); 252 return 0; 253 254 fail_mmapsem: 255 up_write(&mm->mmap_sem); 256 return rc; 257 } 258 259 const char *arch_vma_name(struct vm_area_struct *vma) 260 { 261 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) 262 return "[vdso]"; 263 return NULL; 264 } 265 266 267 268 static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, 269 unsigned long *size) 270 { 271 Elf32_Shdr *sechdrs; 272 unsigned int i; 273 char *secnames; 274 275 /* Grab section headers and strings so we can tell who is who */ 276 sechdrs = (void *)ehdr + ehdr->e_shoff; 277 secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; 278 279 /* Find the section they want */ 280 for (i = 1; i < ehdr->e_shnum; i++) { 281 if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { 282 if (size) 283 *size = sechdrs[i].sh_size; 284 return (void *)ehdr + sechdrs[i].sh_offset; 285 } 286 } 287 *size = 0; 288 return NULL; 289 } 290 291 static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib, 292 const char *symname) 293 { 294 unsigned int i; 295 char name[MAX_SYMNAME], *c; 296 297 for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) { 298 if (lib->dynsym[i].st_name == 0) 299 continue; 300 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 301 MAX_SYMNAME); 302 c = strchr(name, '@'); 303 if (c) 304 *c = 0; 305 if (strcmp(symname, name) == 0) 306 return &lib->dynsym[i]; 307 } 308 return NULL; 309 } 310 311 /* Note that we assume the section is .text and the symbol is relative to 312 * the library base 313 */ 314 static unsigned long __init find_function32(struct lib32_elfinfo *lib, 315 const char *symname) 316 { 317 Elf32_Sym *sym = find_symbol32(lib, symname); 318 319 if (sym == NULL) { 320 printk(KERN_WARNING "vDSO32: function %s not found !\n", 321 symname); 322 return 0; 323 } 324 return sym->st_value - VDSO32_LBASE; 325 } 326 327 static int vdso_do_func_patch32(struct lib32_elfinfo *v32, 328 struct lib64_elfinfo *v64, 329 const char *orig, const char *fix) 330 { 331 Elf32_Sym *sym32_gen, *sym32_fix; 332 333 sym32_gen = find_symbol32(v32, orig); 334 if (sym32_gen == NULL) { 335 printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig); 336 return -1; 337 } 338 if (fix == NULL) { 339 sym32_gen->st_name = 0; 340 return 0; 341 } 342 sym32_fix = find_symbol32(v32, fix); 343 if (sym32_fix == NULL) { 344 printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix); 345 return -1; 346 } 347 sym32_gen->st_value = sym32_fix->st_value; 348 sym32_gen->st_size = sym32_fix->st_size; 349 sym32_gen->st_info = sym32_fix->st_info; 350 sym32_gen->st_other = sym32_fix->st_other; 351 sym32_gen->st_shndx = sym32_fix->st_shndx; 352 353 return 0; 354 } 355 356 357 #ifdef CONFIG_PPC64 358 359 static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname, 360 unsigned long *size) 361 { 362 Elf64_Shdr *sechdrs; 363 unsigned int i; 364 char *secnames; 365 366 /* Grab section headers and strings so we can tell who is who */ 367 sechdrs = (void *)ehdr + ehdr->e_shoff; 368 secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; 369 370 /* Find the section they want */ 371 for (i = 1; i < ehdr->e_shnum; i++) { 372 if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { 373 if (size) 374 *size = sechdrs[i].sh_size; 375 return (void *)ehdr + sechdrs[i].sh_offset; 376 } 377 } 378 if (size) 379 *size = 0; 380 return NULL; 381 } 382 383 static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib, 384 const char *symname) 385 { 386 unsigned int i; 387 char name[MAX_SYMNAME], *c; 388 389 for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) { 390 if (lib->dynsym[i].st_name == 0) 391 continue; 392 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 393 MAX_SYMNAME); 394 c = strchr(name, '@'); 395 if (c) 396 *c = 0; 397 if (strcmp(symname, name) == 0) 398 return &lib->dynsym[i]; 399 } 400 return NULL; 401 } 402 403 /* Note that we assume the section is .text and the symbol is relative to 404 * the library base 405 */ 406 static unsigned long __init find_function64(struct lib64_elfinfo *lib, 407 const char *symname) 408 { 409 Elf64_Sym *sym = find_symbol64(lib, symname); 410 411 if (sym == NULL) { 412 printk(KERN_WARNING "vDSO64: function %s not found !\n", 413 symname); 414 return 0; 415 } 416 #ifdef VDS64_HAS_DESCRIPTORS 417 return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) - 418 VDSO64_LBASE; 419 #else 420 return sym->st_value - VDSO64_LBASE; 421 #endif 422 } 423 424 static int vdso_do_func_patch64(struct lib32_elfinfo *v32, 425 struct lib64_elfinfo *v64, 426 const char *orig, const char *fix) 427 { 428 Elf64_Sym *sym64_gen, *sym64_fix; 429 430 sym64_gen = find_symbol64(v64, orig); 431 if (sym64_gen == NULL) { 432 printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig); 433 return -1; 434 } 435 if (fix == NULL) { 436 sym64_gen->st_name = 0; 437 return 0; 438 } 439 sym64_fix = find_symbol64(v64, fix); 440 if (sym64_fix == NULL) { 441 printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix); 442 return -1; 443 } 444 sym64_gen->st_value = sym64_fix->st_value; 445 sym64_gen->st_size = sym64_fix->st_size; 446 sym64_gen->st_info = sym64_fix->st_info; 447 sym64_gen->st_other = sym64_fix->st_other; 448 sym64_gen->st_shndx = sym64_fix->st_shndx; 449 450 return 0; 451 } 452 453 #endif /* CONFIG_PPC64 */ 454 455 456 static __init int vdso_do_find_sections(struct lib32_elfinfo *v32, 457 struct lib64_elfinfo *v64) 458 { 459 void *sect; 460 461 /* 462 * Locate symbol tables & text section 463 */ 464 465 v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize); 466 v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL); 467 if (v32->dynsym == NULL || v32->dynstr == NULL) { 468 printk(KERN_ERR "vDSO32: required symbol section not found\n"); 469 return -1; 470 } 471 sect = find_section32(v32->hdr, ".text", NULL); 472 if (sect == NULL) { 473 printk(KERN_ERR "vDSO32: the .text section was not found\n"); 474 return -1; 475 } 476 v32->text = sect - vdso32_kbase; 477 478 #ifdef CONFIG_PPC64 479 v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize); 480 v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL); 481 if (v64->dynsym == NULL || v64->dynstr == NULL) { 482 printk(KERN_ERR "vDSO64: required symbol section not found\n"); 483 return -1; 484 } 485 sect = find_section64(v64->hdr, ".text", NULL); 486 if (sect == NULL) { 487 printk(KERN_ERR "vDSO64: the .text section was not found\n"); 488 return -1; 489 } 490 v64->text = sect - vdso64_kbase; 491 #endif /* CONFIG_PPC64 */ 492 493 return 0; 494 } 495 496 static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32, 497 struct lib64_elfinfo *v64) 498 { 499 /* 500 * Find signal trampolines 501 */ 502 503 #ifdef CONFIG_PPC64 504 vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64"); 505 #endif 506 vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32"); 507 vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32"); 508 } 509 510 static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32, 511 struct lib64_elfinfo *v64) 512 { 513 Elf32_Sym *sym32; 514 #ifdef CONFIG_PPC64 515 Elf64_Sym *sym64; 516 517 sym64 = find_symbol64(v64, "__kernel_datapage_offset"); 518 if (sym64 == NULL) { 519 printk(KERN_ERR "vDSO64: Can't find symbol " 520 "__kernel_datapage_offset !\n"); 521 return -1; 522 } 523 *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) = 524 (vdso64_pages << PAGE_SHIFT) - 525 (sym64->st_value - VDSO64_LBASE); 526 #endif /* CONFIG_PPC64 */ 527 528 sym32 = find_symbol32(v32, "__kernel_datapage_offset"); 529 if (sym32 == NULL) { 530 printk(KERN_ERR "vDSO32: Can't find symbol " 531 "__kernel_datapage_offset !\n"); 532 return -1; 533 } 534 *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) = 535 (vdso32_pages << PAGE_SHIFT) - 536 (sym32->st_value - VDSO32_LBASE); 537 538 return 0; 539 } 540 541 542 static __init int vdso_fixup_features(struct lib32_elfinfo *v32, 543 struct lib64_elfinfo *v64) 544 { 545 void *start32; 546 unsigned long size32; 547 548 #ifdef CONFIG_PPC64 549 void *start64; 550 unsigned long size64; 551 552 start64 = find_section64(v64->hdr, "__ftr_fixup", &size64); 553 if (start64) 554 do_feature_fixups(cur_cpu_spec->cpu_features, 555 start64, start64 + size64); 556 557 start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64); 558 if (start64) 559 do_feature_fixups(powerpc_firmware_features, 560 start64, start64 + size64); 561 #endif /* CONFIG_PPC64 */ 562 563 start32 = find_section32(v32->hdr, "__ftr_fixup", &size32); 564 if (start32) 565 do_feature_fixups(cur_cpu_spec->cpu_features, 566 start32, start32 + size32); 567 568 #ifdef CONFIG_PPC64 569 start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32); 570 if (start32) 571 do_feature_fixups(powerpc_firmware_features, 572 start32, start32 + size32); 573 #endif /* CONFIG_PPC64 */ 574 575 return 0; 576 } 577 578 static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32, 579 struct lib64_elfinfo *v64) 580 { 581 int i; 582 583 for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) { 584 struct vdso_patch_def *patch = &vdso_patches[i]; 585 int match = (cur_cpu_spec->cpu_features & patch->ftr_mask) 586 == patch->ftr_value; 587 if (!match) 588 continue; 589 590 DBG("replacing %s with %s...\n", patch->gen_name, 591 patch->fix_name ? "NONE" : patch->fix_name); 592 593 /* 594 * Patch the 32 bits and 64 bits symbols. Note that we do not 595 * patch the "." symbol on 64 bits. 596 * It would be easy to do, but doesn't seem to be necessary, 597 * patching the OPD symbol is enough. 598 */ 599 vdso_do_func_patch32(v32, v64, patch->gen_name, 600 patch->fix_name); 601 #ifdef CONFIG_PPC64 602 vdso_do_func_patch64(v32, v64, patch->gen_name, 603 patch->fix_name); 604 #endif /* CONFIG_PPC64 */ 605 } 606 607 return 0; 608 } 609 610 611 static __init int vdso_setup(void) 612 { 613 struct lib32_elfinfo v32; 614 struct lib64_elfinfo v64; 615 616 v32.hdr = vdso32_kbase; 617 #ifdef CONFIG_PPC64 618 v64.hdr = vdso64_kbase; 619 #endif 620 if (vdso_do_find_sections(&v32, &v64)) 621 return -1; 622 623 if (vdso_fixup_datapage(&v32, &v64)) 624 return -1; 625 626 if (vdso_fixup_features(&v32, &v64)) 627 return -1; 628 629 if (vdso_fixup_alt_funcs(&v32, &v64)) 630 return -1; 631 632 vdso_setup_trampolines(&v32, &v64); 633 634 return 0; 635 } 636 637 /* 638 * Called from setup_arch to initialize the bitmap of available 639 * syscalls in the systemcfg page 640 */ 641 static void __init vdso_setup_syscall_map(void) 642 { 643 unsigned int i; 644 extern unsigned long *sys_call_table; 645 extern unsigned long sys_ni_syscall; 646 647 648 for (i = 0; i < __NR_syscalls; i++) { 649 #ifdef CONFIG_PPC64 650 if (sys_call_table[i*2] != sys_ni_syscall) 651 vdso_data->syscall_map_64[i >> 5] |= 652 0x80000000UL >> (i & 0x1f); 653 if (sys_call_table[i*2+1] != sys_ni_syscall) 654 vdso_data->syscall_map_32[i >> 5] |= 655 0x80000000UL >> (i & 0x1f); 656 #else /* CONFIG_PPC64 */ 657 if (sys_call_table[i] != sys_ni_syscall) 658 vdso_data->syscall_map_32[i >> 5] |= 659 0x80000000UL >> (i & 0x1f); 660 #endif /* CONFIG_PPC64 */ 661 } 662 } 663 664 665 static int __init vdso_init(void) 666 { 667 int i; 668 669 #ifdef CONFIG_PPC64 670 /* 671 * Fill up the "systemcfg" stuff for backward compatiblity 672 */ 673 strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64"); 674 vdso_data->version.major = SYSTEMCFG_MAJOR; 675 vdso_data->version.minor = SYSTEMCFG_MINOR; 676 vdso_data->processor = mfspr(SPRN_PVR); 677 /* 678 * Fake the old platform number for pSeries and iSeries and add 679 * in LPAR bit if necessary 680 */ 681 vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100; 682 if (firmware_has_feature(FW_FEATURE_LPAR)) 683 vdso_data->platform |= 1; 684 vdso_data->physicalMemorySize = lmb_phys_mem_size(); 685 vdso_data->dcache_size = ppc64_caches.dsize; 686 vdso_data->dcache_line_size = ppc64_caches.dline_size; 687 vdso_data->icache_size = ppc64_caches.isize; 688 vdso_data->icache_line_size = ppc64_caches.iline_size; 689 690 /* 691 * Calculate the size of the 64 bits vDSO 692 */ 693 vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT; 694 DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages); 695 #endif /* CONFIG_PPC64 */ 696 697 698 /* 699 * Calculate the size of the 32 bits vDSO 700 */ 701 vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT; 702 DBG("vdso32_kbase: %p, 0x%x pages\n", vdso32_kbase, vdso32_pages); 703 704 705 /* 706 * Setup the syscall map in the vDOS 707 */ 708 vdso_setup_syscall_map(); 709 710 /* 711 * Initialize the vDSO images in memory, that is do necessary 712 * fixups of vDSO symbols, locate trampolines, etc... 713 */ 714 if (vdso_setup()) { 715 printk(KERN_ERR "vDSO setup failure, not enabled !\n"); 716 vdso32_pages = 0; 717 #ifdef CONFIG_PPC64 718 vdso64_pages = 0; 719 #endif 720 return 0; 721 } 722 723 /* Make sure pages are in the correct state */ 724 vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 2), 725 GFP_KERNEL); 726 BUG_ON(vdso32_pagelist == NULL); 727 for (i = 0; i < vdso32_pages; i++) { 728 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); 729 ClearPageReserved(pg); 730 get_page(pg); 731 vdso32_pagelist[i] = pg; 732 } 733 vdso32_pagelist[i++] = virt_to_page(vdso_data); 734 vdso32_pagelist[i] = NULL; 735 736 #ifdef CONFIG_PPC64 737 vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 2), 738 GFP_KERNEL); 739 BUG_ON(vdso64_pagelist == NULL); 740 for (i = 0; i < vdso64_pages; i++) { 741 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); 742 ClearPageReserved(pg); 743 get_page(pg); 744 vdso64_pagelist[i] = pg; 745 } 746 vdso64_pagelist[i++] = virt_to_page(vdso_data); 747 vdso64_pagelist[i] = NULL; 748 #endif /* CONFIG_PPC64 */ 749 750 get_page(virt_to_page(vdso_data)); 751 752 smp_wmb(); 753 vdso_ready = 1; 754 755 return 0; 756 } 757 arch_initcall(vdso_init); 758 759 int in_gate_area_no_task(unsigned long addr) 760 { 761 return 0; 762 } 763 764 int in_gate_area(struct task_struct *task, unsigned long addr) 765 { 766 return 0; 767 } 768 769 struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 770 { 771 return NULL; 772 } 773 774