1 /* Kernel module help for PPC64. 2 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation. 3 4 This program is free software; you can redistribute it and/or modify 5 it under the terms of the GNU General Public License as published by 6 the Free Software Foundation; either version 2 of the License, or 7 (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include <linux/module.h> 22 #include <linux/elf.h> 23 #include <linux/moduleloader.h> 24 #include <linux/err.h> 25 #include <linux/vmalloc.h> 26 #include <linux/ftrace.h> 27 #include <linux/bug.h> 28 #include <linux/uaccess.h> 29 #include <asm/module.h> 30 #include <asm/firmware.h> 31 #include <asm/code-patching.h> 32 #include <linux/sort.h> 33 #include <asm/setup.h> 34 #include <asm/sections.h> 35 36 /* FIXME: We don't do .init separately. To do this, we'd need to have 37 a separate r2 value in the init and core section, and stub between 38 them, too. 39 40 Using a magic allocator which places modules within 32MB solves 41 this, and makes other things simpler. Anton? 42 --RR. */ 43 44 #ifdef PPC64_ELF_ABI_v2 45 46 /* An address is simply the address of the function. */ 47 typedef unsigned long func_desc_t; 48 49 static func_desc_t func_desc(unsigned long addr) 50 { 51 return addr; 52 } 53 static unsigned long func_addr(unsigned long addr) 54 { 55 return addr; 56 } 57 static unsigned long stub_func_addr(func_desc_t func) 58 { 59 return func; 60 } 61 62 /* PowerPC64 specific values for the Elf64_Sym st_other field. */ 63 #define STO_PPC64_LOCAL_BIT 5 64 #define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT) 65 #define PPC64_LOCAL_ENTRY_OFFSET(other) \ 66 (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2) 67 68 static unsigned int local_entry_offset(const Elf64_Sym *sym) 69 { 70 /* sym->st_other indicates offset to local entry point 71 * (otherwise it will assume r12 is the address of the start 72 * of function and try to derive r2 from it). */ 73 return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other); 74 } 75 #else 76 77 /* An address is address of the OPD entry, which contains address of fn. */ 78 typedef struct ppc64_opd_entry func_desc_t; 79 80 static func_desc_t func_desc(unsigned long addr) 81 { 82 return *(struct ppc64_opd_entry *)addr; 83 } 84 static unsigned long func_addr(unsigned long addr) 85 { 86 return func_desc(addr).funcaddr; 87 } 88 static unsigned long stub_func_addr(func_desc_t func) 89 { 90 return func.funcaddr; 91 } 92 static unsigned int local_entry_offset(const Elf64_Sym *sym) 93 { 94 return 0; 95 } 96 #endif 97 98 #define STUB_MAGIC 0x73747562 /* stub */ 99 100 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into 101 the kernel itself). But on PPC64, these need to be used for every 102 jump, actually, to reset r2 (TOC+0x8000). */ 103 struct ppc64_stub_entry 104 { 105 /* 28 byte jump instruction sequence (7 instructions). We only 106 * need 6 instructions on ABIv2 but we always allocate 7 so 107 * so we don't have to modify the trampoline load instruction. */ 108 u32 jump[7]; 109 /* Used by ftrace to identify stubs */ 110 u32 magic; 111 /* Data for the above code */ 112 func_desc_t funcdata; 113 }; 114 115 /* 116 * PPC64 uses 24 bit jumps, but we need to jump into other modules or 117 * the kernel which may be further. So we jump to a stub. 118 * 119 * For ELFv1 we need to use this to set up the new r2 value (aka TOC 120 * pointer). For ELFv2 it's the callee's responsibility to set up the 121 * new r2, but for both we need to save the old r2. 122 * 123 * We could simply patch the new r2 value and function pointer into 124 * the stub, but it's significantly shorter to put these values at the 125 * end of the stub code, and patch the stub address (32-bits relative 126 * to the TOC ptr, r2) into the stub. 127 */ 128 129 static u32 ppc64_stub_insns[] = { 130 0x3d620000, /* addis r11,r2, <high> */ 131 0x396b0000, /* addi r11,r11, <low> */ 132 /* Save current r2 value in magic place on the stack. */ 133 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */ 134 0xe98b0020, /* ld r12,32(r11) */ 135 #ifdef PPC64_ELF_ABI_v1 136 /* Set up new r2 from function descriptor */ 137 0xe84b0028, /* ld r2,40(r11) */ 138 #endif 139 0x7d8903a6, /* mtctr r12 */ 140 0x4e800420 /* bctr */ 141 }; 142 143 #ifdef CONFIG_DYNAMIC_FTRACE 144 int module_trampoline_target(struct module *mod, unsigned long addr, 145 unsigned long *target) 146 { 147 struct ppc64_stub_entry *stub; 148 func_desc_t funcdata; 149 u32 magic; 150 151 if (!within_module_core(addr, mod)) { 152 pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name); 153 return -EFAULT; 154 } 155 156 stub = (struct ppc64_stub_entry *)addr; 157 158 if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) { 159 pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name); 160 return -EFAULT; 161 } 162 163 if (magic != STUB_MAGIC) { 164 pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name); 165 return -EFAULT; 166 } 167 168 if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) { 169 pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name); 170 return -EFAULT; 171 } 172 173 *target = stub_func_addr(funcdata); 174 175 return 0; 176 } 177 #endif 178 179 /* Count how many different 24-bit relocations (different symbol, 180 different addend) */ 181 static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num) 182 { 183 unsigned int i, r_info, r_addend, _count_relocs; 184 185 /* FIXME: Only count external ones --RR */ 186 _count_relocs = 0; 187 r_info = 0; 188 r_addend = 0; 189 for (i = 0; i < num; i++) 190 /* Only count 24-bit relocs, others don't need stubs */ 191 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 && 192 (r_info != ELF64_R_SYM(rela[i].r_info) || 193 r_addend != rela[i].r_addend)) { 194 _count_relocs++; 195 r_info = ELF64_R_SYM(rela[i].r_info); 196 r_addend = rela[i].r_addend; 197 } 198 199 return _count_relocs; 200 } 201 202 static int relacmp(const void *_x, const void *_y) 203 { 204 const Elf64_Rela *x, *y; 205 206 y = (Elf64_Rela *)_x; 207 x = (Elf64_Rela *)_y; 208 209 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to 210 * make the comparison cheaper/faster. It won't affect the sorting or 211 * the counting algorithms' performance 212 */ 213 if (x->r_info < y->r_info) 214 return -1; 215 else if (x->r_info > y->r_info) 216 return 1; 217 else if (x->r_addend < y->r_addend) 218 return -1; 219 else if (x->r_addend > y->r_addend) 220 return 1; 221 else 222 return 0; 223 } 224 225 static void relaswap(void *_x, void *_y, int size) 226 { 227 uint64_t *x, *y, tmp; 228 int i; 229 230 y = (uint64_t *)_x; 231 x = (uint64_t *)_y; 232 233 for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) { 234 tmp = x[i]; 235 x[i] = y[i]; 236 y[i] = tmp; 237 } 238 } 239 240 /* Get size of potential trampolines required. */ 241 static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, 242 const Elf64_Shdr *sechdrs) 243 { 244 /* One extra reloc so it's always 0-funcaddr terminated */ 245 unsigned long relocs = 1; 246 unsigned i; 247 248 /* Every relocated section... */ 249 for (i = 1; i < hdr->e_shnum; i++) { 250 if (sechdrs[i].sh_type == SHT_RELA) { 251 pr_debug("Found relocations in section %u\n", i); 252 pr_debug("Ptr: %p. Number: %Lu\n", 253 (void *)sechdrs[i].sh_addr, 254 sechdrs[i].sh_size / sizeof(Elf64_Rela)); 255 256 /* Sort the relocation information based on a symbol and 257 * addend key. This is a stable O(n*log n) complexity 258 * alogrithm but it will reduce the complexity of 259 * count_relocs() to linear complexity O(n) 260 */ 261 sort((void *)sechdrs[i].sh_addr, 262 sechdrs[i].sh_size / sizeof(Elf64_Rela), 263 sizeof(Elf64_Rela), relacmp, relaswap); 264 265 relocs += count_relocs((void *)sechdrs[i].sh_addr, 266 sechdrs[i].sh_size 267 / sizeof(Elf64_Rela)); 268 } 269 } 270 271 #ifdef CONFIG_DYNAMIC_FTRACE 272 /* make the trampoline to the ftrace_caller */ 273 relocs++; 274 #endif 275 276 pr_debug("Looks like a total of %lu stubs, max\n", relocs); 277 return relocs * sizeof(struct ppc64_stub_entry); 278 } 279 280 /* Still needed for ELFv2, for .TOC. */ 281 static void dedotify_versions(struct modversion_info *vers, 282 unsigned long size) 283 { 284 struct modversion_info *end; 285 286 for (end = (void *)vers + size; vers < end; vers++) 287 if (vers->name[0] == '.') { 288 memmove(vers->name, vers->name+1, strlen(vers->name)); 289 } 290 } 291 292 /* 293 * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC. 294 * seem to be defined (value set later). 295 */ 296 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) 297 { 298 unsigned int i; 299 300 for (i = 1; i < numsyms; i++) { 301 if (syms[i].st_shndx == SHN_UNDEF) { 302 char *name = strtab + syms[i].st_name; 303 if (name[0] == '.') { 304 if (strcmp(name+1, "TOC.") == 0) 305 syms[i].st_shndx = SHN_ABS; 306 syms[i].st_name++; 307 } 308 } 309 } 310 } 311 312 static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, 313 const char *strtab, 314 unsigned int symindex) 315 { 316 unsigned int i, numsyms; 317 Elf64_Sym *syms; 318 319 syms = (Elf64_Sym *)sechdrs[symindex].sh_addr; 320 numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); 321 322 for (i = 1; i < numsyms; i++) { 323 if (syms[i].st_shndx == SHN_ABS 324 && strcmp(strtab + syms[i].st_name, "TOC.") == 0) 325 return &syms[i]; 326 } 327 return NULL; 328 } 329 330 int module_frob_arch_sections(Elf64_Ehdr *hdr, 331 Elf64_Shdr *sechdrs, 332 char *secstrings, 333 struct module *me) 334 { 335 unsigned int i; 336 337 /* Find .toc and .stubs sections, symtab and strtab */ 338 for (i = 1; i < hdr->e_shnum; i++) { 339 char *p; 340 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0) 341 me->arch.stubs_section = i; 342 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) 343 me->arch.toc_section = i; 344 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0) 345 dedotify_versions((void *)hdr + sechdrs[i].sh_offset, 346 sechdrs[i].sh_size); 347 348 /* We don't handle .init for the moment: rename to _init */ 349 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init"))) 350 p[0] = '_'; 351 352 if (sechdrs[i].sh_type == SHT_SYMTAB) 353 dedotify((void *)hdr + sechdrs[i].sh_offset, 354 sechdrs[i].sh_size / sizeof(Elf64_Sym), 355 (void *)hdr 356 + sechdrs[sechdrs[i].sh_link].sh_offset); 357 } 358 359 if (!me->arch.stubs_section) { 360 pr_err("%s: doesn't contain .stubs.\n", me->name); 361 return -ENOEXEC; 362 } 363 364 /* If we don't have a .toc, just use .stubs. We need to set r2 365 to some reasonable value in case the module calls out to 366 other functions via a stub, or if a function pointer escapes 367 the module by some means. */ 368 if (!me->arch.toc_section) 369 me->arch.toc_section = me->arch.stubs_section; 370 371 /* Override the stubs size */ 372 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs); 373 return 0; 374 } 375 376 /* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this 377 gives the value maximum span in an instruction which uses a signed 378 offset) */ 379 static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me) 380 { 381 return sechdrs[me->arch.toc_section].sh_addr + 0x8000; 382 } 383 384 /* Both low and high 16 bits are added as SIGNED additions, so if low 385 16 bits has high bit set, high 16 bits must be adjusted. These 386 macros do that (stolen from binutils). */ 387 #define PPC_LO(v) ((v) & 0xffff) 388 #define PPC_HI(v) (((v) >> 16) & 0xffff) 389 #define PPC_HA(v) PPC_HI ((v) + 0x8000) 390 391 /* Patch stub to reference function and correct r2 value. */ 392 static inline int create_stub(const Elf64_Shdr *sechdrs, 393 struct ppc64_stub_entry *entry, 394 unsigned long addr, 395 struct module *me) 396 { 397 long reladdr; 398 399 memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); 400 401 /* Stub uses address relative to r2. */ 402 reladdr = (unsigned long)entry - my_r2(sechdrs, me); 403 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { 404 pr_err("%s: Address %p of stub out of range of %p.\n", 405 me->name, (void *)reladdr, (void *)my_r2); 406 return 0; 407 } 408 pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr); 409 410 entry->jump[0] |= PPC_HA(reladdr); 411 entry->jump[1] |= PPC_LO(reladdr); 412 entry->funcdata = func_desc(addr); 413 entry->magic = STUB_MAGIC; 414 415 return 1; 416 } 417 418 /* Create stub to jump to function described in this OPD/ptr: we need the 419 stub to set up the TOC ptr (r2) for the function. */ 420 static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs, 421 unsigned long addr, 422 struct module *me) 423 { 424 struct ppc64_stub_entry *stubs; 425 unsigned int i, num_stubs; 426 427 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs); 428 429 /* Find this stub, or if that fails, the next avail. entry */ 430 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; 431 for (i = 0; stub_func_addr(stubs[i].funcdata); i++) { 432 BUG_ON(i >= num_stubs); 433 434 if (stub_func_addr(stubs[i].funcdata) == func_addr(addr)) 435 return (unsigned long)&stubs[i]; 436 } 437 438 if (!create_stub(sechdrs, &stubs[i], addr, me)) 439 return 0; 440 441 return (unsigned long)&stubs[i]; 442 } 443 444 #ifdef CC_USING_MPROFILE_KERNEL 445 static bool is_early_mcount_callsite(u32 *instruction) 446 { 447 /* 448 * Check if this is one of the -mprofile-kernel sequences. 449 */ 450 if (instruction[-1] == PPC_INST_STD_LR && 451 instruction[-2] == PPC_INST_MFLR) 452 return true; 453 454 if (instruction[-1] == PPC_INST_MFLR) 455 return true; 456 457 return false; 458 } 459 460 /* 461 * In case of _mcount calls, do not save the current callee's TOC (in r2) into 462 * the original caller's stack frame. If we did we would clobber the saved TOC 463 * value of the original caller. 464 */ 465 static void squash_toc_save_inst(const char *name, unsigned long addr) 466 { 467 struct ppc64_stub_entry *stub = (struct ppc64_stub_entry *)addr; 468 469 /* Only for calls to _mcount */ 470 if (strcmp("_mcount", name) != 0) 471 return; 472 473 stub->jump[2] = PPC_INST_NOP; 474 } 475 #else 476 static void squash_toc_save_inst(const char *name, unsigned long addr) { } 477 478 /* without -mprofile-kernel, mcount calls are never early */ 479 static bool is_early_mcount_callsite(u32 *instruction) 480 { 481 return false; 482 } 483 #endif 484 485 /* We expect a noop next: if it is, replace it with instruction to 486 restore r2. */ 487 static int restore_r2(u32 *instruction, struct module *me) 488 { 489 if (is_early_mcount_callsite(instruction - 1)) 490 return 1; 491 492 if (*instruction != PPC_INST_NOP) { 493 pr_err("%s: Expect noop after relocate, got %08x\n", 494 me->name, *instruction); 495 return 0; 496 } 497 /* ld r2,R2_STACK_OFFSET(r1) */ 498 *instruction = PPC_INST_LD_TOC; 499 return 1; 500 } 501 502 int apply_relocate_add(Elf64_Shdr *sechdrs, 503 const char *strtab, 504 unsigned int symindex, 505 unsigned int relsec, 506 struct module *me) 507 { 508 unsigned int i; 509 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr; 510 Elf64_Sym *sym; 511 unsigned long *location; 512 unsigned long value; 513 514 pr_debug("Applying ADD relocate section %u to %u\n", relsec, 515 sechdrs[relsec].sh_info); 516 517 /* First time we're called, we can fix up .TOC. */ 518 if (!me->arch.toc_fixed) { 519 sym = find_dot_toc(sechdrs, strtab, symindex); 520 /* It's theoretically possible that a module doesn't want a 521 * .TOC. so don't fail it just for that. */ 522 if (sym) 523 sym->st_value = my_r2(sechdrs, me); 524 me->arch.toc_fixed = true; 525 } 526 527 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { 528 /* This is where to make the change */ 529 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 530 + rela[i].r_offset; 531 /* This is the symbol it is referring to */ 532 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 533 + ELF64_R_SYM(rela[i].r_info); 534 535 pr_debug("RELOC at %p: %li-type as %s (0x%lx) + %li\n", 536 location, (long)ELF64_R_TYPE(rela[i].r_info), 537 strtab + sym->st_name, (unsigned long)sym->st_value, 538 (long)rela[i].r_addend); 539 540 /* `Everything is relative'. */ 541 value = sym->st_value + rela[i].r_addend; 542 543 switch (ELF64_R_TYPE(rela[i].r_info)) { 544 case R_PPC64_ADDR32: 545 /* Simply set it */ 546 *(u32 *)location = value; 547 break; 548 549 case R_PPC64_ADDR64: 550 /* Simply set it */ 551 *(unsigned long *)location = value; 552 break; 553 554 case R_PPC64_TOC: 555 *(unsigned long *)location = my_r2(sechdrs, me); 556 break; 557 558 case R_PPC64_TOC16: 559 /* Subtract TOC pointer */ 560 value -= my_r2(sechdrs, me); 561 if (value + 0x8000 > 0xffff) { 562 pr_err("%s: bad TOC16 relocation (0x%lx)\n", 563 me->name, value); 564 return -ENOEXEC; 565 } 566 *((uint16_t *) location) 567 = (*((uint16_t *) location) & ~0xffff) 568 | (value & 0xffff); 569 break; 570 571 case R_PPC64_TOC16_LO: 572 /* Subtract TOC pointer */ 573 value -= my_r2(sechdrs, me); 574 *((uint16_t *) location) 575 = (*((uint16_t *) location) & ~0xffff) 576 | (value & 0xffff); 577 break; 578 579 case R_PPC64_TOC16_DS: 580 /* Subtract TOC pointer */ 581 value -= my_r2(sechdrs, me); 582 if ((value & 3) != 0 || value + 0x8000 > 0xffff) { 583 pr_err("%s: bad TOC16_DS relocation (0x%lx)\n", 584 me->name, value); 585 return -ENOEXEC; 586 } 587 *((uint16_t *) location) 588 = (*((uint16_t *) location) & ~0xfffc) 589 | (value & 0xfffc); 590 break; 591 592 case R_PPC64_TOC16_LO_DS: 593 /* Subtract TOC pointer */ 594 value -= my_r2(sechdrs, me); 595 if ((value & 3) != 0) { 596 pr_err("%s: bad TOC16_LO_DS relocation (0x%lx)\n", 597 me->name, value); 598 return -ENOEXEC; 599 } 600 *((uint16_t *) location) 601 = (*((uint16_t *) location) & ~0xfffc) 602 | (value & 0xfffc); 603 break; 604 605 case R_PPC64_TOC16_HA: 606 /* Subtract TOC pointer */ 607 value -= my_r2(sechdrs, me); 608 value = ((value + 0x8000) >> 16); 609 *((uint16_t *) location) 610 = (*((uint16_t *) location) & ~0xffff) 611 | (value & 0xffff); 612 break; 613 614 case R_PPC_REL24: 615 /* FIXME: Handle weak symbols here --RR */ 616 if (sym->st_shndx == SHN_UNDEF) { 617 /* External: go via stub */ 618 value = stub_for_addr(sechdrs, value, me); 619 if (!value) 620 return -ENOENT; 621 if (!restore_r2((u32 *)location + 1, me)) 622 return -ENOEXEC; 623 624 squash_toc_save_inst(strtab + sym->st_name, value); 625 } else 626 value += local_entry_offset(sym); 627 628 /* Convert value to relative */ 629 value -= (unsigned long)location; 630 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){ 631 pr_err("%s: REL24 %li out of range!\n", 632 me->name, (long int)value); 633 return -ENOEXEC; 634 } 635 636 /* Only replace bits 2 through 26 */ 637 *(uint32_t *)location 638 = (*(uint32_t *)location & ~0x03fffffc) 639 | (value & 0x03fffffc); 640 break; 641 642 case R_PPC64_REL64: 643 /* 64 bits relative (used by features fixups) */ 644 *location = value - (unsigned long)location; 645 break; 646 647 case R_PPC64_REL32: 648 /* 32 bits relative (used by relative exception tables) */ 649 *(u32 *)location = value - (unsigned long)location; 650 break; 651 652 case R_PPC64_TOCSAVE: 653 /* 654 * Marker reloc indicates we don't have to save r2. 655 * That would only save us one instruction, so ignore 656 * it. 657 */ 658 break; 659 660 case R_PPC64_ENTRY: 661 /* 662 * Optimize ELFv2 large code model entry point if 663 * the TOC is within 2GB range of current location. 664 */ 665 value = my_r2(sechdrs, me) - (unsigned long)location; 666 if (value + 0x80008000 > 0xffffffff) 667 break; 668 /* 669 * Check for the large code model prolog sequence: 670 * ld r2, ...(r12) 671 * add r2, r2, r12 672 */ 673 if ((((uint32_t *)location)[0] & ~0xfffc) 674 != 0xe84c0000) 675 break; 676 if (((uint32_t *)location)[1] != 0x7c426214) 677 break; 678 /* 679 * If found, replace it with: 680 * addis r2, r12, (.TOC.-func)@ha 681 * addi r2, r12, (.TOC.-func)@l 682 */ 683 ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value); 684 ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value); 685 break; 686 687 case R_PPC64_REL16_HA: 688 /* Subtract location pointer */ 689 value -= (unsigned long)location; 690 value = ((value + 0x8000) >> 16); 691 *((uint16_t *) location) 692 = (*((uint16_t *) location) & ~0xffff) 693 | (value & 0xffff); 694 break; 695 696 case R_PPC64_REL16_LO: 697 /* Subtract location pointer */ 698 value -= (unsigned long)location; 699 *((uint16_t *) location) 700 = (*((uint16_t *) location) & ~0xffff) 701 | (value & 0xffff); 702 break; 703 704 default: 705 pr_err("%s: Unknown ADD relocation: %lu\n", 706 me->name, 707 (unsigned long)ELF64_R_TYPE(rela[i].r_info)); 708 return -ENOEXEC; 709 } 710 } 711 712 return 0; 713 } 714 715 #ifdef CONFIG_DYNAMIC_FTRACE 716 717 #ifdef CC_USING_MPROFILE_KERNEL 718 719 #define PACATOC offsetof(struct paca_struct, kernel_toc) 720 721 /* 722 * For mprofile-kernel we use a special stub for ftrace_caller() because we 723 * can't rely on r2 containing this module's TOC when we enter the stub. 724 * 725 * That can happen if the function calling us didn't need to use the toc. In 726 * that case it won't have setup r2, and the r2 value will be either the 727 * kernel's toc, or possibly another modules toc. 728 * 729 * To deal with that this stub uses the kernel toc, which is always accessible 730 * via the paca (in r13). The target (ftrace_caller()) is responsible for 731 * saving and restoring the toc before returning. 732 */ 733 static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me) 734 { 735 struct ppc64_stub_entry *entry; 736 unsigned int i, num_stubs; 737 static u32 stub_insns[] = { 738 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */ 739 0x3d8c0000, /* addis r12,r12,<high> */ 740 0x398c0000, /* addi r12,r12,<low> */ 741 0x7d8903a6, /* mtctr r12 */ 742 0x4e800420, /* bctr */ 743 }; 744 long reladdr; 745 746 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*entry); 747 748 /* Find the next available stub entry */ 749 entry = (void *)sechdrs[me->arch.stubs_section].sh_addr; 750 for (i = 0; i < num_stubs && stub_func_addr(entry->funcdata); i++, entry++); 751 752 if (i >= num_stubs) { 753 pr_err("%s: Unable to find a free slot for ftrace stub.\n", me->name); 754 return 0; 755 } 756 757 memcpy(entry->jump, stub_insns, sizeof(stub_insns)); 758 759 /* Stub uses address relative to kernel toc (from the paca) */ 760 reladdr = (unsigned long)ftrace_caller - kernel_toc_addr(); 761 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { 762 pr_err("%s: Address of ftrace_caller out of range of kernel_toc.\n", me->name); 763 return 0; 764 } 765 766 entry->jump[1] |= PPC_HA(reladdr); 767 entry->jump[2] |= PPC_LO(reladdr); 768 769 /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */ 770 entry->funcdata = func_desc((unsigned long)ftrace_caller); 771 entry->magic = STUB_MAGIC; 772 773 return (unsigned long)entry; 774 } 775 #else 776 static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me) 777 { 778 return stub_for_addr(sechdrs, (unsigned long)ftrace_caller, me); 779 } 780 #endif 781 782 int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) 783 { 784 mod->arch.toc = my_r2(sechdrs, mod); 785 mod->arch.tramp = create_ftrace_stub(sechdrs, mod); 786 787 if (!mod->arch.tramp) 788 return -ENOENT; 789 790 return 0; 791 } 792 #endif 793