1 2 /* 3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 4 * <benh@kernel.crashing.org> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/sched.h> 14 #include <linux/kernel.h> 15 #include <linux/mm.h> 16 #include <linux/smp.h> 17 #include <linux/stddef.h> 18 #include <linux/unistd.h> 19 #include <linux/slab.h> 20 #include <linux/user.h> 21 #include <linux/elf.h> 22 #include <linux/security.h> 23 #include <linux/memblock.h> 24 25 #include <asm/cpu_has_feature.h> 26 #include <asm/pgtable.h> 27 #include <asm/processor.h> 28 #include <asm/mmu.h> 29 #include <asm/mmu_context.h> 30 #include <asm/prom.h> 31 #include <asm/machdep.h> 32 #include <asm/cputable.h> 33 #include <asm/sections.h> 34 #include <asm/firmware.h> 35 #include <asm/vdso.h> 36 #include <asm/vdso_datapage.h> 37 #include <asm/setup.h> 38 39 #undef DEBUG 40 41 #ifdef DEBUG 42 #define DBG(fmt...) printk(fmt) 43 #else 44 #define DBG(fmt...) 45 #endif 46 47 /* Max supported size for symbol names */ 48 #define MAX_SYMNAME 64 49 50 /* The alignment of the vDSO */ 51 #define VDSO_ALIGNMENT (1 << 16) 52 53 static unsigned int vdso32_pages; 54 static void *vdso32_kbase; 55 static struct page **vdso32_pagelist; 56 unsigned long vdso32_sigtramp; 57 unsigned long vdso32_rt_sigtramp; 58 59 #ifdef CONFIG_VDSO32 60 extern char vdso32_start, vdso32_end; 61 #endif 62 63 #ifdef CONFIG_PPC64 64 extern char vdso64_start, vdso64_end; 65 static void *vdso64_kbase = &vdso64_start; 66 static unsigned int vdso64_pages; 67 static struct page **vdso64_pagelist; 68 unsigned long vdso64_rt_sigtramp; 69 #endif /* CONFIG_PPC64 */ 70 71 static int vdso_ready; 72 73 /* 74 * The vdso data page (aka. systemcfg for old ppc64 fans) is here. 75 * Once the early boot kernel code no longer needs to muck around 76 * with it, it will become dynamically allocated 77 */ 78 static union { 79 struct vdso_data data; 80 u8 page[PAGE_SIZE]; 81 } vdso_data_store __page_aligned_data; 82 struct vdso_data *vdso_data = &vdso_data_store.data; 83 84 /* Format of the patch table */ 85 struct vdso_patch_def 86 { 87 unsigned long ftr_mask, ftr_value; 88 const char *gen_name; 89 const char *fix_name; 90 }; 91 92 /* Table of functions to patch based on the CPU type/revision 93 * 94 * Currently, we only change sync_dicache to do nothing on processors 95 * with a coherent icache 96 */ 97 static struct vdso_patch_def vdso_patches[] = { 98 { 99 CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE, 100 "__kernel_sync_dicache", "__kernel_sync_dicache_p5" 101 }, 102 #ifdef CONFIG_PPC32 103 { 104 CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, 105 "__kernel_gettimeofday", NULL 106 }, 107 { 108 CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, 109 "__kernel_clock_gettime", NULL 110 }, 111 { 112 CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, 113 "__kernel_clock_getres", NULL 114 }, 115 { 116 CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, 117 "__kernel_get_tbfreq", NULL 118 }, 119 { 120 CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, 121 "__kernel_time", NULL 122 }, 123 #endif 124 }; 125 126 /* 127 * Some infos carried around for each of them during parsing at 128 * boot time. 129 */ 130 struct lib32_elfinfo 131 { 132 Elf32_Ehdr *hdr; /* ptr to ELF */ 133 Elf32_Sym *dynsym; /* ptr to .dynsym section */ 134 unsigned long dynsymsize; /* size of .dynsym section */ 135 char *dynstr; /* ptr to .dynstr section */ 136 unsigned long text; /* offset of .text section in .so */ 137 }; 138 139 struct lib64_elfinfo 140 { 141 Elf64_Ehdr *hdr; 142 Elf64_Sym *dynsym; 143 unsigned long dynsymsize; 144 char *dynstr; 145 unsigned long text; 146 }; 147 148 149 /* 150 * This is called from binfmt_elf, we create the special vma for the 151 * vDSO and insert it into the mm struct tree 152 */ 153 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 154 { 155 struct mm_struct *mm = current->mm; 156 struct page **vdso_pagelist; 157 unsigned long vdso_pages; 158 unsigned long vdso_base; 159 int rc; 160 161 if (!vdso_ready) 162 return 0; 163 164 #ifdef CONFIG_PPC64 165 if (is_32bit_task()) { 166 vdso_pagelist = vdso32_pagelist; 167 vdso_pages = vdso32_pages; 168 vdso_base = VDSO32_MBASE; 169 } else { 170 vdso_pagelist = vdso64_pagelist; 171 vdso_pages = vdso64_pages; 172 /* 173 * On 64bit we don't have a preferred map address. This 174 * allows get_unmapped_area to find an area near other mmaps 175 * and most likely share a SLB entry. 176 */ 177 vdso_base = 0; 178 } 179 #else 180 vdso_pagelist = vdso32_pagelist; 181 vdso_pages = vdso32_pages; 182 vdso_base = VDSO32_MBASE; 183 #endif 184 185 current->mm->context.vdso_base = 0; 186 187 /* vDSO has a problem and was disabled, just don't "enable" it for the 188 * process 189 */ 190 if (vdso_pages == 0) 191 return 0; 192 /* Add a page to the vdso size for the data page */ 193 vdso_pages ++; 194 195 /* 196 * pick a base address for the vDSO in process space. We try to put it 197 * at vdso_base which is the "natural" base for it, but we might fail 198 * and end up putting it elsewhere. 199 * Add enough to the size so that the result can be aligned. 200 */ 201 if (down_write_killable(&mm->mmap_sem)) 202 return -EINTR; 203 vdso_base = get_unmapped_area(NULL, vdso_base, 204 (vdso_pages << PAGE_SHIFT) + 205 ((VDSO_ALIGNMENT - 1) & PAGE_MASK), 206 0, 0); 207 if (IS_ERR_VALUE(vdso_base)) { 208 rc = vdso_base; 209 goto fail_mmapsem; 210 } 211 212 /* Add required alignment. */ 213 vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT); 214 215 /* 216 * Put vDSO base into mm struct. We need to do this before calling 217 * install_special_mapping or the perf counter mmap tracking code 218 * will fail to recognise it as a vDSO (since arch_vma_name fails). 219 */ 220 current->mm->context.vdso_base = vdso_base; 221 222 /* 223 * our vma flags don't have VM_WRITE so by default, the process isn't 224 * allowed to write those pages. 225 * gdb can break that with ptrace interface, and thus trigger COW on 226 * those pages but it's then your responsibility to never do that on 227 * the "data" page of the vDSO or you'll stop getting kernel updates 228 * and your nice userland gettimeofday will be totally dead. 229 * It's fine to use that for setting breakpoints in the vDSO code 230 * pages though. 231 */ 232 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 233 VM_READ|VM_EXEC| 234 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 235 vdso_pagelist); 236 if (rc) { 237 current->mm->context.vdso_base = 0; 238 goto fail_mmapsem; 239 } 240 241 up_write(&mm->mmap_sem); 242 return 0; 243 244 fail_mmapsem: 245 up_write(&mm->mmap_sem); 246 return rc; 247 } 248 249 const char *arch_vma_name(struct vm_area_struct *vma) 250 { 251 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) 252 return "[vdso]"; 253 return NULL; 254 } 255 256 257 258 #ifdef CONFIG_VDSO32 259 static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, 260 unsigned long *size) 261 { 262 Elf32_Shdr *sechdrs; 263 unsigned int i; 264 char *secnames; 265 266 /* Grab section headers and strings so we can tell who is who */ 267 sechdrs = (void *)ehdr + ehdr->e_shoff; 268 secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; 269 270 /* Find the section they want */ 271 for (i = 1; i < ehdr->e_shnum; i++) { 272 if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { 273 if (size) 274 *size = sechdrs[i].sh_size; 275 return (void *)ehdr + sechdrs[i].sh_offset; 276 } 277 } 278 *size = 0; 279 return NULL; 280 } 281 282 static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib, 283 const char *symname) 284 { 285 unsigned int i; 286 char name[MAX_SYMNAME], *c; 287 288 for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) { 289 if (lib->dynsym[i].st_name == 0) 290 continue; 291 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 292 MAX_SYMNAME); 293 c = strchr(name, '@'); 294 if (c) 295 *c = 0; 296 if (strcmp(symname, name) == 0) 297 return &lib->dynsym[i]; 298 } 299 return NULL; 300 } 301 302 /* Note that we assume the section is .text and the symbol is relative to 303 * the library base 304 */ 305 static unsigned long __init find_function32(struct lib32_elfinfo *lib, 306 const char *symname) 307 { 308 Elf32_Sym *sym = find_symbol32(lib, symname); 309 310 if (sym == NULL) { 311 printk(KERN_WARNING "vDSO32: function %s not found !\n", 312 symname); 313 return 0; 314 } 315 return sym->st_value - VDSO32_LBASE; 316 } 317 318 static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32, 319 struct lib64_elfinfo *v64, 320 const char *orig, const char *fix) 321 { 322 Elf32_Sym *sym32_gen, *sym32_fix; 323 324 sym32_gen = find_symbol32(v32, orig); 325 if (sym32_gen == NULL) { 326 printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig); 327 return -1; 328 } 329 if (fix == NULL) { 330 sym32_gen->st_name = 0; 331 return 0; 332 } 333 sym32_fix = find_symbol32(v32, fix); 334 if (sym32_fix == NULL) { 335 printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix); 336 return -1; 337 } 338 sym32_gen->st_value = sym32_fix->st_value; 339 sym32_gen->st_size = sym32_fix->st_size; 340 sym32_gen->st_info = sym32_fix->st_info; 341 sym32_gen->st_other = sym32_fix->st_other; 342 sym32_gen->st_shndx = sym32_fix->st_shndx; 343 344 return 0; 345 } 346 #else /* !CONFIG_VDSO32 */ 347 static unsigned long __init find_function32(struct lib32_elfinfo *lib, 348 const char *symname) 349 { 350 return 0; 351 } 352 353 static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32, 354 struct lib64_elfinfo *v64, 355 const char *orig, const char *fix) 356 { 357 return 0; 358 } 359 #endif /* CONFIG_VDSO32 */ 360 361 362 #ifdef CONFIG_PPC64 363 364 static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname, 365 unsigned long *size) 366 { 367 Elf64_Shdr *sechdrs; 368 unsigned int i; 369 char *secnames; 370 371 /* Grab section headers and strings so we can tell who is who */ 372 sechdrs = (void *)ehdr + ehdr->e_shoff; 373 secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; 374 375 /* Find the section they want */ 376 for (i = 1; i < ehdr->e_shnum; i++) { 377 if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { 378 if (size) 379 *size = sechdrs[i].sh_size; 380 return (void *)ehdr + sechdrs[i].sh_offset; 381 } 382 } 383 if (size) 384 *size = 0; 385 return NULL; 386 } 387 388 static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib, 389 const char *symname) 390 { 391 unsigned int i; 392 char name[MAX_SYMNAME], *c; 393 394 for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) { 395 if (lib->dynsym[i].st_name == 0) 396 continue; 397 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 398 MAX_SYMNAME); 399 c = strchr(name, '@'); 400 if (c) 401 *c = 0; 402 if (strcmp(symname, name) == 0) 403 return &lib->dynsym[i]; 404 } 405 return NULL; 406 } 407 408 /* Note that we assume the section is .text and the symbol is relative to 409 * the library base 410 */ 411 static unsigned long __init find_function64(struct lib64_elfinfo *lib, 412 const char *symname) 413 { 414 Elf64_Sym *sym = find_symbol64(lib, symname); 415 416 if (sym == NULL) { 417 printk(KERN_WARNING "vDSO64: function %s not found !\n", 418 symname); 419 return 0; 420 } 421 #ifdef VDS64_HAS_DESCRIPTORS 422 return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) - 423 VDSO64_LBASE; 424 #else 425 return sym->st_value - VDSO64_LBASE; 426 #endif 427 } 428 429 static int __init vdso_do_func_patch64(struct lib32_elfinfo *v32, 430 struct lib64_elfinfo *v64, 431 const char *orig, const char *fix) 432 { 433 Elf64_Sym *sym64_gen, *sym64_fix; 434 435 sym64_gen = find_symbol64(v64, orig); 436 if (sym64_gen == NULL) { 437 printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig); 438 return -1; 439 } 440 if (fix == NULL) { 441 sym64_gen->st_name = 0; 442 return 0; 443 } 444 sym64_fix = find_symbol64(v64, fix); 445 if (sym64_fix == NULL) { 446 printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix); 447 return -1; 448 } 449 sym64_gen->st_value = sym64_fix->st_value; 450 sym64_gen->st_size = sym64_fix->st_size; 451 sym64_gen->st_info = sym64_fix->st_info; 452 sym64_gen->st_other = sym64_fix->st_other; 453 sym64_gen->st_shndx = sym64_fix->st_shndx; 454 455 return 0; 456 } 457 458 #endif /* CONFIG_PPC64 */ 459 460 461 static __init int vdso_do_find_sections(struct lib32_elfinfo *v32, 462 struct lib64_elfinfo *v64) 463 { 464 void *sect; 465 466 /* 467 * Locate symbol tables & text section 468 */ 469 470 #ifdef CONFIG_VDSO32 471 v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize); 472 v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL); 473 if (v32->dynsym == NULL || v32->dynstr == NULL) { 474 printk(KERN_ERR "vDSO32: required symbol section not found\n"); 475 return -1; 476 } 477 sect = find_section32(v32->hdr, ".text", NULL); 478 if (sect == NULL) { 479 printk(KERN_ERR "vDSO32: the .text section was not found\n"); 480 return -1; 481 } 482 v32->text = sect - vdso32_kbase; 483 #endif 484 485 #ifdef CONFIG_PPC64 486 v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize); 487 v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL); 488 if (v64->dynsym == NULL || v64->dynstr == NULL) { 489 printk(KERN_ERR "vDSO64: required symbol section not found\n"); 490 return -1; 491 } 492 sect = find_section64(v64->hdr, ".text", NULL); 493 if (sect == NULL) { 494 printk(KERN_ERR "vDSO64: the .text section was not found\n"); 495 return -1; 496 } 497 v64->text = sect - vdso64_kbase; 498 #endif /* CONFIG_PPC64 */ 499 500 return 0; 501 } 502 503 static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32, 504 struct lib64_elfinfo *v64) 505 { 506 /* 507 * Find signal trampolines 508 */ 509 510 #ifdef CONFIG_PPC64 511 vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64"); 512 #endif 513 vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32"); 514 vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32"); 515 } 516 517 static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32, 518 struct lib64_elfinfo *v64) 519 { 520 #ifdef CONFIG_VDSO32 521 Elf32_Sym *sym32; 522 #endif 523 #ifdef CONFIG_PPC64 524 Elf64_Sym *sym64; 525 526 sym64 = find_symbol64(v64, "__kernel_datapage_offset"); 527 if (sym64 == NULL) { 528 printk(KERN_ERR "vDSO64: Can't find symbol " 529 "__kernel_datapage_offset !\n"); 530 return -1; 531 } 532 *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) = 533 (vdso64_pages << PAGE_SHIFT) - 534 (sym64->st_value - VDSO64_LBASE); 535 #endif /* CONFIG_PPC64 */ 536 537 #ifdef CONFIG_VDSO32 538 sym32 = find_symbol32(v32, "__kernel_datapage_offset"); 539 if (sym32 == NULL) { 540 printk(KERN_ERR "vDSO32: Can't find symbol " 541 "__kernel_datapage_offset !\n"); 542 return -1; 543 } 544 *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) = 545 (vdso32_pages << PAGE_SHIFT) - 546 (sym32->st_value - VDSO32_LBASE); 547 #endif 548 549 return 0; 550 } 551 552 553 static __init int vdso_fixup_features(struct lib32_elfinfo *v32, 554 struct lib64_elfinfo *v64) 555 { 556 unsigned long size; 557 void *start; 558 559 #ifdef CONFIG_PPC64 560 start = find_section64(v64->hdr, "__ftr_fixup", &size); 561 if (start) 562 do_feature_fixups(cur_cpu_spec->cpu_features, 563 start, start + size); 564 565 start = find_section64(v64->hdr, "__mmu_ftr_fixup", &size); 566 if (start) 567 do_feature_fixups(cur_cpu_spec->mmu_features, 568 start, start + size); 569 570 start = find_section64(v64->hdr, "__fw_ftr_fixup", &size); 571 if (start) 572 do_feature_fixups(powerpc_firmware_features, 573 start, start + size); 574 575 start = find_section64(v64->hdr, "__lwsync_fixup", &size); 576 if (start) 577 do_lwsync_fixups(cur_cpu_spec->cpu_features, 578 start, start + size); 579 #endif /* CONFIG_PPC64 */ 580 581 #ifdef CONFIG_VDSO32 582 start = find_section32(v32->hdr, "__ftr_fixup", &size); 583 if (start) 584 do_feature_fixups(cur_cpu_spec->cpu_features, 585 start, start + size); 586 587 start = find_section32(v32->hdr, "__mmu_ftr_fixup", &size); 588 if (start) 589 do_feature_fixups(cur_cpu_spec->mmu_features, 590 start, start + size); 591 592 #ifdef CONFIG_PPC64 593 start = find_section32(v32->hdr, "__fw_ftr_fixup", &size); 594 if (start) 595 do_feature_fixups(powerpc_firmware_features, 596 start, start + size); 597 #endif /* CONFIG_PPC64 */ 598 599 start = find_section32(v32->hdr, "__lwsync_fixup", &size); 600 if (start) 601 do_lwsync_fixups(cur_cpu_spec->cpu_features, 602 start, start + size); 603 #endif 604 605 return 0; 606 } 607 608 static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32, 609 struct lib64_elfinfo *v64) 610 { 611 int i; 612 613 for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) { 614 struct vdso_patch_def *patch = &vdso_patches[i]; 615 int match = (cur_cpu_spec->cpu_features & patch->ftr_mask) 616 == patch->ftr_value; 617 if (!match) 618 continue; 619 620 DBG("replacing %s with %s...\n", patch->gen_name, 621 patch->fix_name ? "NONE" : patch->fix_name); 622 623 /* 624 * Patch the 32 bits and 64 bits symbols. Note that we do not 625 * patch the "." symbol on 64 bits. 626 * It would be easy to do, but doesn't seem to be necessary, 627 * patching the OPD symbol is enough. 628 */ 629 vdso_do_func_patch32(v32, v64, patch->gen_name, 630 patch->fix_name); 631 #ifdef CONFIG_PPC64 632 vdso_do_func_patch64(v32, v64, patch->gen_name, 633 patch->fix_name); 634 #endif /* CONFIG_PPC64 */ 635 } 636 637 return 0; 638 } 639 640 641 static __init int vdso_setup(void) 642 { 643 struct lib32_elfinfo v32; 644 struct lib64_elfinfo v64; 645 646 v32.hdr = vdso32_kbase; 647 #ifdef CONFIG_PPC64 648 v64.hdr = vdso64_kbase; 649 #endif 650 if (vdso_do_find_sections(&v32, &v64)) 651 return -1; 652 653 if (vdso_fixup_datapage(&v32, &v64)) 654 return -1; 655 656 if (vdso_fixup_features(&v32, &v64)) 657 return -1; 658 659 if (vdso_fixup_alt_funcs(&v32, &v64)) 660 return -1; 661 662 vdso_setup_trampolines(&v32, &v64); 663 664 return 0; 665 } 666 667 /* 668 * Called from setup_arch to initialize the bitmap of available 669 * syscalls in the systemcfg page 670 */ 671 static void __init vdso_setup_syscall_map(void) 672 { 673 unsigned int i; 674 extern unsigned long *sys_call_table; 675 extern unsigned long sys_ni_syscall; 676 677 678 for (i = 0; i < NR_syscalls; i++) { 679 #ifdef CONFIG_PPC64 680 if (sys_call_table[i*2] != sys_ni_syscall) 681 vdso_data->syscall_map_64[i >> 5] |= 682 0x80000000UL >> (i & 0x1f); 683 if (sys_call_table[i*2+1] != sys_ni_syscall) 684 vdso_data->syscall_map_32[i >> 5] |= 685 0x80000000UL >> (i & 0x1f); 686 #else /* CONFIG_PPC64 */ 687 if (sys_call_table[i] != sys_ni_syscall) 688 vdso_data->syscall_map_32[i >> 5] |= 689 0x80000000UL >> (i & 0x1f); 690 #endif /* CONFIG_PPC64 */ 691 } 692 } 693 694 #ifdef CONFIG_PPC64 695 int vdso_getcpu_init(void) 696 { 697 unsigned long cpu, node, val; 698 699 /* 700 * SPRG_VDSO contains the CPU in the bottom 16 bits and the NUMA node 701 * in the next 16 bits. The VDSO uses this to implement getcpu(). 702 */ 703 cpu = get_cpu(); 704 WARN_ON_ONCE(cpu > 0xffff); 705 706 node = cpu_to_node(cpu); 707 WARN_ON_ONCE(node > 0xffff); 708 709 val = (cpu & 0xfff) | ((node & 0xffff) << 16); 710 mtspr(SPRN_SPRG_VDSO_WRITE, val); 711 get_paca()->sprg_vdso = val; 712 713 put_cpu(); 714 715 return 0; 716 } 717 /* We need to call this before SMP init */ 718 early_initcall(vdso_getcpu_init); 719 #endif 720 721 static int __init vdso_init(void) 722 { 723 int i; 724 725 #ifdef CONFIG_PPC64 726 /* 727 * Fill up the "systemcfg" stuff for backward compatibility 728 */ 729 strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64"); 730 vdso_data->version.major = SYSTEMCFG_MAJOR; 731 vdso_data->version.minor = SYSTEMCFG_MINOR; 732 vdso_data->processor = mfspr(SPRN_PVR); 733 /* 734 * Fake the old platform number for pSeries and add 735 * in LPAR bit if necessary 736 */ 737 vdso_data->platform = 0x100; 738 if (firmware_has_feature(FW_FEATURE_LPAR)) 739 vdso_data->platform |= 1; 740 vdso_data->physicalMemorySize = memblock_phys_mem_size(); 741 vdso_data->dcache_size = ppc64_caches.l1d.size; 742 vdso_data->dcache_line_size = ppc64_caches.l1d.line_size; 743 vdso_data->icache_size = ppc64_caches.l1i.size; 744 vdso_data->icache_line_size = ppc64_caches.l1i.line_size; 745 vdso_data->dcache_block_size = ppc64_caches.l1d.block_size; 746 vdso_data->icache_block_size = ppc64_caches.l1i.block_size; 747 vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size; 748 vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size; 749 750 /* 751 * Calculate the size of the 64 bits vDSO 752 */ 753 vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT; 754 DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages); 755 #else 756 vdso_data->dcache_block_size = L1_CACHE_BYTES; 757 vdso_data->dcache_log_block_size = L1_CACHE_SHIFT; 758 vdso_data->icache_block_size = L1_CACHE_BYTES; 759 vdso_data->icache_log_block_size = L1_CACHE_SHIFT; 760 #endif /* CONFIG_PPC64 */ 761 762 763 #ifdef CONFIG_VDSO32 764 vdso32_kbase = &vdso32_start; 765 766 /* 767 * Calculate the size of the 32 bits vDSO 768 */ 769 vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT; 770 DBG("vdso32_kbase: %p, 0x%x pages\n", vdso32_kbase, vdso32_pages); 771 #endif 772 773 774 /* 775 * Setup the syscall map in the vDOS 776 */ 777 vdso_setup_syscall_map(); 778 779 /* 780 * Initialize the vDSO images in memory, that is do necessary 781 * fixups of vDSO symbols, locate trampolines, etc... 782 */ 783 if (vdso_setup()) { 784 printk(KERN_ERR "vDSO setup failure, not enabled !\n"); 785 vdso32_pages = 0; 786 #ifdef CONFIG_PPC64 787 vdso64_pages = 0; 788 #endif 789 return 0; 790 } 791 792 #ifdef CONFIG_VDSO32 793 /* Make sure pages are in the correct state */ 794 vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 2), 795 GFP_KERNEL); 796 BUG_ON(vdso32_pagelist == NULL); 797 for (i = 0; i < vdso32_pages; i++) { 798 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); 799 ClearPageReserved(pg); 800 get_page(pg); 801 vdso32_pagelist[i] = pg; 802 } 803 vdso32_pagelist[i++] = virt_to_page(vdso_data); 804 vdso32_pagelist[i] = NULL; 805 #endif 806 807 #ifdef CONFIG_PPC64 808 vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 2), 809 GFP_KERNEL); 810 BUG_ON(vdso64_pagelist == NULL); 811 for (i = 0; i < vdso64_pages; i++) { 812 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); 813 ClearPageReserved(pg); 814 get_page(pg); 815 vdso64_pagelist[i] = pg; 816 } 817 vdso64_pagelist[i++] = virt_to_page(vdso_data); 818 vdso64_pagelist[i] = NULL; 819 #endif /* CONFIG_PPC64 */ 820 821 get_page(virt_to_page(vdso_data)); 822 823 smp_wmb(); 824 vdso_ready = 1; 825 826 return 0; 827 } 828 arch_initcall(vdso_init); 829