1 /* Kernel module help for PPC64. 2 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation. 3 4 This program is free software; you can redistribute it and/or modify 5 it under the terms of the GNU General Public License as published by 6 the Free Software Foundation; either version 2 of the License, or 7 (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 */ 18 #include <linux/module.h> 19 #include <linux/elf.h> 20 #include <linux/moduleloader.h> 21 #include <linux/err.h> 22 #include <linux/vmalloc.h> 23 #include <linux/ftrace.h> 24 #include <linux/bug.h> 25 #include <linux/uaccess.h> 26 #include <asm/module.h> 27 #include <asm/firmware.h> 28 #include <asm/code-patching.h> 29 #include <linux/sort.h> 30 #include <asm/setup.h> 31 32 /* FIXME: We don't do .init separately. To do this, we'd need to have 33 a separate r2 value in the init and core section, and stub between 34 them, too. 35 36 Using a magic allocator which places modules within 32MB solves 37 this, and makes other things simpler. Anton? 38 --RR. */ 39 #if 0 40 #define DEBUGP printk 41 #else 42 #define DEBUGP(fmt , ...) 43 #endif 44 45 #if defined(_CALL_ELF) && _CALL_ELF == 2 46 #define R2_STACK_OFFSET 24 47 48 /* An address is simply the address of the function. */ 49 typedef unsigned long func_desc_t; 50 51 static func_desc_t func_desc(unsigned long addr) 52 { 53 return addr; 54 } 55 static unsigned long func_addr(unsigned long addr) 56 { 57 return addr; 58 } 59 static unsigned long stub_func_addr(func_desc_t func) 60 { 61 return func; 62 } 63 64 /* PowerPC64 specific values for the Elf64_Sym st_other field. */ 65 #define STO_PPC64_LOCAL_BIT 5 66 #define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT) 67 #define PPC64_LOCAL_ENTRY_OFFSET(other) \ 68 (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2) 69 70 static unsigned int local_entry_offset(const Elf64_Sym *sym) 71 { 72 /* sym->st_other indicates offset to local entry point 73 * (otherwise it will assume r12 is the address of the start 74 * of function and try to derive r2 from it). */ 75 return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other); 76 } 77 #else 78 #define R2_STACK_OFFSET 40 79 80 /* An address is address of the OPD entry, which contains address of fn. */ 81 typedef struct ppc64_opd_entry func_desc_t; 82 83 static func_desc_t func_desc(unsigned long addr) 84 { 85 return *(struct ppc64_opd_entry *)addr; 86 } 87 static unsigned long func_addr(unsigned long addr) 88 { 89 return func_desc(addr).funcaddr; 90 } 91 static unsigned long stub_func_addr(func_desc_t func) 92 { 93 return func.funcaddr; 94 } 95 static unsigned int local_entry_offset(const Elf64_Sym *sym) 96 { 97 return 0; 98 } 99 #endif 100 101 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into 102 the kernel itself). But on PPC64, these need to be used for every 103 jump, actually, to reset r2 (TOC+0x8000). */ 104 struct ppc64_stub_entry 105 { 106 /* 28 byte jump instruction sequence (7 instructions). We only 107 * need 6 instructions on ABIv2 but we always allocate 7 so 108 * so we don't have to modify the trampoline load instruction. */ 109 u32 jump[7]; 110 u32 unused; 111 /* Data for the above code */ 112 func_desc_t funcdata; 113 }; 114 115 /* 116 * PPC64 uses 24 bit jumps, but we need to jump into other modules or 117 * the kernel which may be further. So we jump to a stub. 118 * 119 * For ELFv1 we need to use this to set up the new r2 value (aka TOC 120 * pointer). For ELFv2 it's the callee's responsibility to set up the 121 * new r2, but for both we need to save the old r2. 122 * 123 * We could simply patch the new r2 value and function pointer into 124 * the stub, but it's significantly shorter to put these values at the 125 * end of the stub code, and patch the stub address (32-bits relative 126 * to the TOC ptr, r2) into the stub. 127 */ 128 129 static u32 ppc64_stub_insns[] = { 130 0x3d620000, /* addis r11,r2, <high> */ 131 0x396b0000, /* addi r11,r11, <low> */ 132 /* Save current r2 value in magic place on the stack. */ 133 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */ 134 0xe98b0020, /* ld r12,32(r11) */ 135 #if !defined(_CALL_ELF) || _CALL_ELF != 2 136 /* Set up new r2 from function descriptor */ 137 0xe84b0028, /* ld r2,40(r11) */ 138 #endif 139 0x7d8903a6, /* mtctr r12 */ 140 0x4e800420 /* bctr */ 141 }; 142 143 #ifdef CONFIG_DYNAMIC_FTRACE 144 145 static u32 ppc64_stub_mask[] = { 146 0xffff0000, 147 0xffff0000, 148 0xffffffff, 149 0xffffffff, 150 #if !defined(_CALL_ELF) || _CALL_ELF != 2 151 0xffffffff, 152 #endif 153 0xffffffff, 154 0xffffffff 155 }; 156 157 bool is_module_trampoline(u32 *p) 158 { 159 unsigned int i; 160 u32 insns[ARRAY_SIZE(ppc64_stub_insns)]; 161 162 BUILD_BUG_ON(sizeof(ppc64_stub_insns) != sizeof(ppc64_stub_mask)); 163 164 if (probe_kernel_read(insns, p, sizeof(insns))) 165 return -EFAULT; 166 167 for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) { 168 u32 insna = insns[i]; 169 u32 insnb = ppc64_stub_insns[i]; 170 u32 mask = ppc64_stub_mask[i]; 171 172 if ((insna & mask) != (insnb & mask)) 173 return false; 174 } 175 176 return true; 177 } 178 179 int module_trampoline_target(struct module *mod, u32 *trampoline, 180 unsigned long *target) 181 { 182 u32 buf[2]; 183 u16 upper, lower; 184 long offset; 185 void *toc_entry; 186 187 if (probe_kernel_read(buf, trampoline, sizeof(buf))) 188 return -EFAULT; 189 190 upper = buf[0] & 0xffff; 191 lower = buf[1] & 0xffff; 192 193 /* perform the addis/addi, both signed */ 194 offset = ((short)upper << 16) + (short)lower; 195 196 /* 197 * Now get the address this trampoline jumps to. This 198 * is always 32 bytes into our trampoline stub. 199 */ 200 toc_entry = (void *)mod->arch.toc + offset + 32; 201 202 if (probe_kernel_read(target, toc_entry, sizeof(*target))) 203 return -EFAULT; 204 205 return 0; 206 } 207 208 #endif 209 210 /* Count how many different 24-bit relocations (different symbol, 211 different addend) */ 212 static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num) 213 { 214 unsigned int i, r_info, r_addend, _count_relocs; 215 216 /* FIXME: Only count external ones --RR */ 217 _count_relocs = 0; 218 r_info = 0; 219 r_addend = 0; 220 for (i = 0; i < num; i++) 221 /* Only count 24-bit relocs, others don't need stubs */ 222 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 && 223 (r_info != ELF64_R_SYM(rela[i].r_info) || 224 r_addend != rela[i].r_addend)) { 225 _count_relocs++; 226 r_info = ELF64_R_SYM(rela[i].r_info); 227 r_addend = rela[i].r_addend; 228 } 229 230 return _count_relocs; 231 } 232 233 static int relacmp(const void *_x, const void *_y) 234 { 235 const Elf64_Rela *x, *y; 236 237 y = (Elf64_Rela *)_x; 238 x = (Elf64_Rela *)_y; 239 240 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to 241 * make the comparison cheaper/faster. It won't affect the sorting or 242 * the counting algorithms' performance 243 */ 244 if (x->r_info < y->r_info) 245 return -1; 246 else if (x->r_info > y->r_info) 247 return 1; 248 else if (x->r_addend < y->r_addend) 249 return -1; 250 else if (x->r_addend > y->r_addend) 251 return 1; 252 else 253 return 0; 254 } 255 256 static void relaswap(void *_x, void *_y, int size) 257 { 258 uint64_t *x, *y, tmp; 259 int i; 260 261 y = (uint64_t *)_x; 262 x = (uint64_t *)_y; 263 264 for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) { 265 tmp = x[i]; 266 x[i] = y[i]; 267 y[i] = tmp; 268 } 269 } 270 271 /* Get size of potential trampolines required. */ 272 static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, 273 const Elf64_Shdr *sechdrs) 274 { 275 /* One extra reloc so it's always 0-funcaddr terminated */ 276 unsigned long relocs = 1; 277 unsigned i; 278 279 /* Every relocated section... */ 280 for (i = 1; i < hdr->e_shnum; i++) { 281 if (sechdrs[i].sh_type == SHT_RELA) { 282 DEBUGP("Found relocations in section %u\n", i); 283 DEBUGP("Ptr: %p. Number: %lu\n", 284 (void *)sechdrs[i].sh_addr, 285 sechdrs[i].sh_size / sizeof(Elf64_Rela)); 286 287 /* Sort the relocation information based on a symbol and 288 * addend key. This is a stable O(n*log n) complexity 289 * alogrithm but it will reduce the complexity of 290 * count_relocs() to linear complexity O(n) 291 */ 292 sort((void *)sechdrs[i].sh_addr, 293 sechdrs[i].sh_size / sizeof(Elf64_Rela), 294 sizeof(Elf64_Rela), relacmp, relaswap); 295 296 relocs += count_relocs((void *)sechdrs[i].sh_addr, 297 sechdrs[i].sh_size 298 / sizeof(Elf64_Rela)); 299 } 300 } 301 302 #ifdef CONFIG_DYNAMIC_FTRACE 303 /* make the trampoline to the ftrace_caller */ 304 relocs++; 305 #endif 306 307 DEBUGP("Looks like a total of %lu stubs, max\n", relocs); 308 return relocs * sizeof(struct ppc64_stub_entry); 309 } 310 311 /* Still needed for ELFv2, for .TOC. */ 312 static void dedotify_versions(struct modversion_info *vers, 313 unsigned long size) 314 { 315 struct modversion_info *end; 316 317 for (end = (void *)vers + size; vers < end; vers++) 318 if (vers->name[0] == '.') 319 memmove(vers->name, vers->name+1, strlen(vers->name)); 320 } 321 322 /* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ 323 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) 324 { 325 unsigned int i; 326 327 for (i = 1; i < numsyms; i++) { 328 if (syms[i].st_shndx == SHN_UNDEF) { 329 char *name = strtab + syms[i].st_name; 330 if (name[0] == '.') 331 memmove(name, name+1, strlen(name)); 332 } 333 } 334 } 335 336 static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, 337 const char *strtab, 338 unsigned int symindex) 339 { 340 unsigned int i, numsyms; 341 Elf64_Sym *syms; 342 343 syms = (Elf64_Sym *)sechdrs[symindex].sh_addr; 344 numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); 345 346 for (i = 1; i < numsyms; i++) { 347 if (syms[i].st_shndx == SHN_UNDEF 348 && strcmp(strtab + syms[i].st_name, "TOC.") == 0) 349 return &syms[i]; 350 } 351 return NULL; 352 } 353 354 int module_frob_arch_sections(Elf64_Ehdr *hdr, 355 Elf64_Shdr *sechdrs, 356 char *secstrings, 357 struct module *me) 358 { 359 unsigned int i; 360 361 /* Find .toc and .stubs sections, symtab and strtab */ 362 for (i = 1; i < hdr->e_shnum; i++) { 363 char *p; 364 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0) 365 me->arch.stubs_section = i; 366 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) 367 me->arch.toc_section = i; 368 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0) 369 dedotify_versions((void *)hdr + sechdrs[i].sh_offset, 370 sechdrs[i].sh_size); 371 372 /* We don't handle .init for the moment: rename to _init */ 373 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init"))) 374 p[0] = '_'; 375 376 if (sechdrs[i].sh_type == SHT_SYMTAB) 377 dedotify((void *)hdr + sechdrs[i].sh_offset, 378 sechdrs[i].sh_size / sizeof(Elf64_Sym), 379 (void *)hdr 380 + sechdrs[sechdrs[i].sh_link].sh_offset); 381 } 382 383 if (!me->arch.stubs_section) { 384 printk("%s: doesn't contain .stubs.\n", me->name); 385 return -ENOEXEC; 386 } 387 388 /* If we don't have a .toc, just use .stubs. We need to set r2 389 to some reasonable value in case the module calls out to 390 other functions via a stub, or if a function pointer escapes 391 the module by some means. */ 392 if (!me->arch.toc_section) 393 me->arch.toc_section = me->arch.stubs_section; 394 395 /* Override the stubs size */ 396 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs); 397 return 0; 398 } 399 400 /* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this 401 gives the value maximum span in an instruction which uses a signed 402 offset) */ 403 static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me) 404 { 405 return sechdrs[me->arch.toc_section].sh_addr + 0x8000; 406 } 407 408 /* Both low and high 16 bits are added as SIGNED additions, so if low 409 16 bits has high bit set, high 16 bits must be adjusted. These 410 macros do that (stolen from binutils). */ 411 #define PPC_LO(v) ((v) & 0xffff) 412 #define PPC_HI(v) (((v) >> 16) & 0xffff) 413 #define PPC_HA(v) PPC_HI ((v) + 0x8000) 414 415 /* Patch stub to reference function and correct r2 value. */ 416 static inline int create_stub(Elf64_Shdr *sechdrs, 417 struct ppc64_stub_entry *entry, 418 unsigned long addr, 419 struct module *me) 420 { 421 long reladdr; 422 423 memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); 424 425 /* Stub uses address relative to r2. */ 426 reladdr = (unsigned long)entry - my_r2(sechdrs, me); 427 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { 428 printk("%s: Address %p of stub out of range of %p.\n", 429 me->name, (void *)reladdr, (void *)my_r2); 430 return 0; 431 } 432 DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr); 433 434 entry->jump[0] |= PPC_HA(reladdr); 435 entry->jump[1] |= PPC_LO(reladdr); 436 entry->funcdata = func_desc(addr); 437 return 1; 438 } 439 440 /* Create stub to jump to function described in this OPD/ptr: we need the 441 stub to set up the TOC ptr (r2) for the function. */ 442 static unsigned long stub_for_addr(Elf64_Shdr *sechdrs, 443 unsigned long addr, 444 struct module *me) 445 { 446 struct ppc64_stub_entry *stubs; 447 unsigned int i, num_stubs; 448 449 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs); 450 451 /* Find this stub, or if that fails, the next avail. entry */ 452 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; 453 for (i = 0; stub_func_addr(stubs[i].funcdata); i++) { 454 BUG_ON(i >= num_stubs); 455 456 if (stub_func_addr(stubs[i].funcdata) == func_addr(addr)) 457 return (unsigned long)&stubs[i]; 458 } 459 460 if (!create_stub(sechdrs, &stubs[i], addr, me)) 461 return 0; 462 463 return (unsigned long)&stubs[i]; 464 } 465 466 /* We expect a noop next: if it is, replace it with instruction to 467 restore r2. */ 468 static int restore_r2(u32 *instruction, struct module *me) 469 { 470 if (*instruction != PPC_INST_NOP) { 471 printk("%s: Expect noop after relocate, got %08x\n", 472 me->name, *instruction); 473 return 0; 474 } 475 /* ld r2,R2_STACK_OFFSET(r1) */ 476 *instruction = 0xe8410000 | R2_STACK_OFFSET; 477 return 1; 478 } 479 480 int apply_relocate_add(Elf64_Shdr *sechdrs, 481 const char *strtab, 482 unsigned int symindex, 483 unsigned int relsec, 484 struct module *me) 485 { 486 unsigned int i; 487 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr; 488 Elf64_Sym *sym; 489 unsigned long *location; 490 unsigned long value; 491 492 DEBUGP("Applying ADD relocate section %u to %u\n", relsec, 493 sechdrs[relsec].sh_info); 494 495 /* First time we're called, we can fix up .TOC. */ 496 if (!me->arch.toc_fixed) { 497 sym = find_dot_toc(sechdrs, strtab, symindex); 498 /* It's theoretically possible that a module doesn't want a 499 * .TOC. so don't fail it just for that. */ 500 if (sym) 501 sym->st_value = my_r2(sechdrs, me); 502 me->arch.toc_fixed = true; 503 } 504 505 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { 506 /* This is where to make the change */ 507 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 508 + rela[i].r_offset; 509 /* This is the symbol it is referring to */ 510 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 511 + ELF64_R_SYM(rela[i].r_info); 512 513 DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n", 514 location, (long)ELF64_R_TYPE(rela[i].r_info), 515 strtab + sym->st_name, (unsigned long)sym->st_value, 516 (long)rela[i].r_addend); 517 518 /* `Everything is relative'. */ 519 value = sym->st_value + rela[i].r_addend; 520 521 switch (ELF64_R_TYPE(rela[i].r_info)) { 522 case R_PPC64_ADDR32: 523 /* Simply set it */ 524 *(u32 *)location = value; 525 break; 526 527 case R_PPC64_ADDR64: 528 /* Simply set it */ 529 *(unsigned long *)location = value; 530 break; 531 532 case R_PPC64_TOC: 533 *(unsigned long *)location = my_r2(sechdrs, me); 534 break; 535 536 case R_PPC64_TOC16: 537 /* Subtract TOC pointer */ 538 value -= my_r2(sechdrs, me); 539 if (value + 0x8000 > 0xffff) { 540 printk("%s: bad TOC16 relocation (%lu)\n", 541 me->name, value); 542 return -ENOEXEC; 543 } 544 *((uint16_t *) location) 545 = (*((uint16_t *) location) & ~0xffff) 546 | (value & 0xffff); 547 break; 548 549 case R_PPC64_TOC16_LO: 550 /* Subtract TOC pointer */ 551 value -= my_r2(sechdrs, me); 552 *((uint16_t *) location) 553 = (*((uint16_t *) location) & ~0xffff) 554 | (value & 0xffff); 555 break; 556 557 case R_PPC64_TOC16_DS: 558 /* Subtract TOC pointer */ 559 value -= my_r2(sechdrs, me); 560 if ((value & 3) != 0 || value + 0x8000 > 0xffff) { 561 printk("%s: bad TOC16_DS relocation (%lu)\n", 562 me->name, value); 563 return -ENOEXEC; 564 } 565 *((uint16_t *) location) 566 = (*((uint16_t *) location) & ~0xfffc) 567 | (value & 0xfffc); 568 break; 569 570 case R_PPC64_TOC16_LO_DS: 571 /* Subtract TOC pointer */ 572 value -= my_r2(sechdrs, me); 573 if ((value & 3) != 0) { 574 printk("%s: bad TOC16_LO_DS relocation (%lu)\n", 575 me->name, value); 576 return -ENOEXEC; 577 } 578 *((uint16_t *) location) 579 = (*((uint16_t *) location) & ~0xfffc) 580 | (value & 0xfffc); 581 break; 582 583 case R_PPC64_TOC16_HA: 584 /* Subtract TOC pointer */ 585 value -= my_r2(sechdrs, me); 586 value = ((value + 0x8000) >> 16); 587 *((uint16_t *) location) 588 = (*((uint16_t *) location) & ~0xffff) 589 | (value & 0xffff); 590 break; 591 592 case R_PPC_REL24: 593 /* FIXME: Handle weak symbols here --RR */ 594 if (sym->st_shndx == SHN_UNDEF) { 595 /* External: go via stub */ 596 value = stub_for_addr(sechdrs, value, me); 597 if (!value) 598 return -ENOENT; 599 if (!restore_r2((u32 *)location + 1, me)) 600 return -ENOEXEC; 601 } else 602 value += local_entry_offset(sym); 603 604 /* Convert value to relative */ 605 value -= (unsigned long)location; 606 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){ 607 printk("%s: REL24 %li out of range!\n", 608 me->name, (long int)value); 609 return -ENOEXEC; 610 } 611 612 /* Only replace bits 2 through 26 */ 613 *(uint32_t *)location 614 = (*(uint32_t *)location & ~0x03fffffc) 615 | (value & 0x03fffffc); 616 break; 617 618 case R_PPC64_REL64: 619 /* 64 bits relative (used by features fixups) */ 620 *location = value - (unsigned long)location; 621 break; 622 623 case R_PPC64_TOCSAVE: 624 /* 625 * Marker reloc indicates we don't have to save r2. 626 * That would only save us one instruction, so ignore 627 * it. 628 */ 629 break; 630 631 case R_PPC64_REL16_HA: 632 /* Subtract location pointer */ 633 value -= (unsigned long)location; 634 value = ((value + 0x8000) >> 16); 635 *((uint16_t *) location) 636 = (*((uint16_t *) location) & ~0xffff) 637 | (value & 0xffff); 638 break; 639 640 case R_PPC64_REL16_LO: 641 /* Subtract location pointer */ 642 value -= (unsigned long)location; 643 *((uint16_t *) location) 644 = (*((uint16_t *) location) & ~0xffff) 645 | (value & 0xffff); 646 break; 647 648 default: 649 printk("%s: Unknown ADD relocation: %lu\n", 650 me->name, 651 (unsigned long)ELF64_R_TYPE(rela[i].r_info)); 652 return -ENOEXEC; 653 } 654 } 655 656 #ifdef CONFIG_DYNAMIC_FTRACE 657 me->arch.toc = my_r2(sechdrs, me); 658 me->arch.tramp = stub_for_addr(sechdrs, 659 (unsigned long)ftrace_caller, 660 me); 661 #endif 662 663 return 0; 664 } 665