1 /* Kernel module help for PPC64. 2 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation. 3 4 This program is free software; you can redistribute it and/or modify 5 it under the terms of the GNU General Public License as published by 6 the Free Software Foundation; either version 2 of the License, or 7 (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 */ 18 #include <linux/module.h> 19 #include <linux/elf.h> 20 #include <linux/moduleloader.h> 21 #include <linux/err.h> 22 #include <linux/vmalloc.h> 23 #include <linux/bug.h> 24 #include <asm/module.h> 25 #include <asm/uaccess.h> 26 #include <asm/firmware.h> 27 #include <asm/code-patching.h> 28 #include <linux/sort.h> 29 30 #include "setup.h" 31 32 /* FIXME: We don't do .init separately. To do this, we'd need to have 33 a separate r2 value in the init and core section, and stub between 34 them, too. 35 36 Using a magic allocator which places modules within 32MB solves 37 this, and makes other things simpler. Anton? 38 --RR. */ 39 #if 0 40 #define DEBUGP printk 41 #else 42 #define DEBUGP(fmt , ...) 43 #endif 44 45 /* There's actually a third entry here, but it's unused */ 46 struct ppc64_opd_entry 47 { 48 unsigned long funcaddr; 49 unsigned long r2; 50 }; 51 52 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into 53 the kernel itself). But on PPC64, these need to be used for every 54 jump, actually, to reset r2 (TOC+0x8000). */ 55 struct ppc64_stub_entry 56 { 57 /* 28 byte jump instruction sequence (7 instructions) */ 58 unsigned char jump[28]; 59 unsigned char unused[4]; 60 /* Data for the above code */ 61 struct ppc64_opd_entry opd; 62 }; 63 64 /* We use a stub to fix up r2 (TOC ptr) and to jump to the (external) 65 function which may be more than 24-bits away. We could simply 66 patch the new r2 value and function pointer into the stub, but it's 67 significantly shorter to put these values at the end of the stub 68 code, and patch the stub address (32-bits relative to the TOC ptr, 69 r2) into the stub. */ 70 static struct ppc64_stub_entry ppc64_stub = 71 { .jump = { 72 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */ 73 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */ 74 /* Save current r2 value in magic place on the stack. */ 75 0xf8, 0x41, 0x00, 0x28, /* std r2,40(r1) */ 76 0xe9, 0x6c, 0x00, 0x20, /* ld r11,32(r12) */ 77 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */ 78 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */ 79 0x4e, 0x80, 0x04, 0x20 /* bctr */ 80 } }; 81 82 /* Count how many different 24-bit relocations (different symbol, 83 different addend) */ 84 static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num) 85 { 86 unsigned int i, r_info, r_addend, _count_relocs; 87 88 /* FIXME: Only count external ones --RR */ 89 _count_relocs = 0; 90 r_info = 0; 91 r_addend = 0; 92 for (i = 0; i < num; i++) 93 /* Only count 24-bit relocs, others don't need stubs */ 94 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 && 95 (r_info != ELF64_R_SYM(rela[i].r_info) || 96 r_addend != rela[i].r_addend)) { 97 _count_relocs++; 98 r_info = ELF64_R_SYM(rela[i].r_info); 99 r_addend = rela[i].r_addend; 100 } 101 102 return _count_relocs; 103 } 104 105 static int relacmp(const void *_x, const void *_y) 106 { 107 const Elf64_Rela *x, *y; 108 109 y = (Elf64_Rela *)_x; 110 x = (Elf64_Rela *)_y; 111 112 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to 113 * make the comparison cheaper/faster. It won't affect the sorting or 114 * the counting algorithms' performance 115 */ 116 if (x->r_info < y->r_info) 117 return -1; 118 else if (x->r_info > y->r_info) 119 return 1; 120 else if (x->r_addend < y->r_addend) 121 return -1; 122 else if (x->r_addend > y->r_addend) 123 return 1; 124 else 125 return 0; 126 } 127 128 static void relaswap(void *_x, void *_y, int size) 129 { 130 uint64_t *x, *y, tmp; 131 int i; 132 133 y = (uint64_t *)_x; 134 x = (uint64_t *)_y; 135 136 for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) { 137 tmp = x[i]; 138 x[i] = y[i]; 139 y[i] = tmp; 140 } 141 } 142 143 /* Get size of potential trampolines required. */ 144 static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, 145 const Elf64_Shdr *sechdrs) 146 { 147 /* One extra reloc so it's always 0-funcaddr terminated */ 148 unsigned long relocs = 1; 149 unsigned i; 150 151 /* Every relocated section... */ 152 for (i = 1; i < hdr->e_shnum; i++) { 153 if (sechdrs[i].sh_type == SHT_RELA) { 154 DEBUGP("Found relocations in section %u\n", i); 155 DEBUGP("Ptr: %p. Number: %lu\n", 156 (void *)sechdrs[i].sh_addr, 157 sechdrs[i].sh_size / sizeof(Elf64_Rela)); 158 159 /* Sort the relocation information based on a symbol and 160 * addend key. This is a stable O(n*log n) complexity 161 * alogrithm but it will reduce the complexity of 162 * count_relocs() to linear complexity O(n) 163 */ 164 sort((void *)sechdrs[i].sh_addr, 165 sechdrs[i].sh_size / sizeof(Elf64_Rela), 166 sizeof(Elf64_Rela), relacmp, relaswap); 167 168 relocs += count_relocs((void *)sechdrs[i].sh_addr, 169 sechdrs[i].sh_size 170 / sizeof(Elf64_Rela)); 171 } 172 } 173 174 DEBUGP("Looks like a total of %lu stubs, max\n", relocs); 175 return relocs * sizeof(struct ppc64_stub_entry); 176 } 177 178 static void dedotify_versions(struct modversion_info *vers, 179 unsigned long size) 180 { 181 struct modversion_info *end; 182 183 for (end = (void *)vers + size; vers < end; vers++) 184 if (vers->name[0] == '.') 185 memmove(vers->name, vers->name+1, strlen(vers->name)); 186 } 187 188 /* Undefined symbols which refer to .funcname, hack to funcname */ 189 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) 190 { 191 unsigned int i; 192 193 for (i = 1; i < numsyms; i++) { 194 if (syms[i].st_shndx == SHN_UNDEF) { 195 char *name = strtab + syms[i].st_name; 196 if (name[0] == '.') 197 memmove(name, name+1, strlen(name)); 198 } 199 } 200 } 201 202 int module_frob_arch_sections(Elf64_Ehdr *hdr, 203 Elf64_Shdr *sechdrs, 204 char *secstrings, 205 struct module *me) 206 { 207 unsigned int i; 208 209 /* Find .toc and .stubs sections, symtab and strtab */ 210 for (i = 1; i < hdr->e_shnum; i++) { 211 char *p; 212 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0) 213 me->arch.stubs_section = i; 214 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) 215 me->arch.toc_section = i; 216 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0) 217 dedotify_versions((void *)hdr + sechdrs[i].sh_offset, 218 sechdrs[i].sh_size); 219 220 /* We don't handle .init for the moment: rename to _init */ 221 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init"))) 222 p[0] = '_'; 223 224 if (sechdrs[i].sh_type == SHT_SYMTAB) 225 dedotify((void *)hdr + sechdrs[i].sh_offset, 226 sechdrs[i].sh_size / sizeof(Elf64_Sym), 227 (void *)hdr 228 + sechdrs[sechdrs[i].sh_link].sh_offset); 229 } 230 231 if (!me->arch.stubs_section) { 232 printk("%s: doesn't contain .stubs.\n", me->name); 233 return -ENOEXEC; 234 } 235 236 /* If we don't have a .toc, just use .stubs. We need to set r2 237 to some reasonable value in case the module calls out to 238 other functions via a stub, or if a function pointer escapes 239 the module by some means. */ 240 if (!me->arch.toc_section) 241 me->arch.toc_section = me->arch.stubs_section; 242 243 /* Override the stubs size */ 244 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs); 245 return 0; 246 } 247 248 int apply_relocate(Elf64_Shdr *sechdrs, 249 const char *strtab, 250 unsigned int symindex, 251 unsigned int relsec, 252 struct module *me) 253 { 254 printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", me->name); 255 return -ENOEXEC; 256 } 257 258 /* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this 259 gives the value maximum span in an instruction which uses a signed 260 offset) */ 261 static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me) 262 { 263 return sechdrs[me->arch.toc_section].sh_addr + 0x8000; 264 } 265 266 /* Both low and high 16 bits are added as SIGNED additions, so if low 267 16 bits has high bit set, high 16 bits must be adjusted. These 268 macros do that (stolen from binutils). */ 269 #define PPC_LO(v) ((v) & 0xffff) 270 #define PPC_HI(v) (((v) >> 16) & 0xffff) 271 #define PPC_HA(v) PPC_HI ((v) + 0x8000) 272 273 /* Patch stub to reference function and correct r2 value. */ 274 static inline int create_stub(Elf64_Shdr *sechdrs, 275 struct ppc64_stub_entry *entry, 276 struct ppc64_opd_entry *opd, 277 struct module *me) 278 { 279 Elf64_Half *loc1, *loc2; 280 long reladdr; 281 282 *entry = ppc64_stub; 283 284 loc1 = (Elf64_Half *)&entry->jump[2]; 285 loc2 = (Elf64_Half *)&entry->jump[6]; 286 287 /* Stub uses address relative to r2. */ 288 reladdr = (unsigned long)entry - my_r2(sechdrs, me); 289 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { 290 printk("%s: Address %p of stub out of range of %p.\n", 291 me->name, (void *)reladdr, (void *)my_r2); 292 return 0; 293 } 294 DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr); 295 296 *loc1 = PPC_HA(reladdr); 297 *loc2 = PPC_LO(reladdr); 298 entry->opd.funcaddr = opd->funcaddr; 299 entry->opd.r2 = opd->r2; 300 return 1; 301 } 302 303 /* Create stub to jump to function described in this OPD: we need the 304 stub to set up the TOC ptr (r2) for the function. */ 305 static unsigned long stub_for_addr(Elf64_Shdr *sechdrs, 306 unsigned long opdaddr, 307 struct module *me) 308 { 309 struct ppc64_stub_entry *stubs; 310 struct ppc64_opd_entry *opd = (void *)opdaddr; 311 unsigned int i, num_stubs; 312 313 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs); 314 315 /* Find this stub, or if that fails, the next avail. entry */ 316 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; 317 for (i = 0; stubs[i].opd.funcaddr; i++) { 318 BUG_ON(i >= num_stubs); 319 320 if (stubs[i].opd.funcaddr == opd->funcaddr) 321 return (unsigned long)&stubs[i]; 322 } 323 324 if (!create_stub(sechdrs, &stubs[i], opd, me)) 325 return 0; 326 327 return (unsigned long)&stubs[i]; 328 } 329 330 /* We expect a noop next: if it is, replace it with instruction to 331 restore r2. */ 332 static int restore_r2(u32 *instruction, struct module *me) 333 { 334 if (*instruction != PPC_NOP_INSTR) { 335 printk("%s: Expect noop after relocate, got %08x\n", 336 me->name, *instruction); 337 return 0; 338 } 339 *instruction = 0xe8410028; /* ld r2,40(r1) */ 340 return 1; 341 } 342 343 int apply_relocate_add(Elf64_Shdr *sechdrs, 344 const char *strtab, 345 unsigned int symindex, 346 unsigned int relsec, 347 struct module *me) 348 { 349 unsigned int i; 350 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr; 351 Elf64_Sym *sym; 352 unsigned long *location; 353 unsigned long value; 354 355 DEBUGP("Applying ADD relocate section %u to %u\n", relsec, 356 sechdrs[relsec].sh_info); 357 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { 358 /* This is where to make the change */ 359 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 360 + rela[i].r_offset; 361 /* This is the symbol it is referring to */ 362 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 363 + ELF64_R_SYM(rela[i].r_info); 364 365 DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n", 366 location, (long)ELF64_R_TYPE(rela[i].r_info), 367 strtab + sym->st_name, (unsigned long)sym->st_value, 368 (long)rela[i].r_addend); 369 370 /* `Everything is relative'. */ 371 value = sym->st_value + rela[i].r_addend; 372 373 switch (ELF64_R_TYPE(rela[i].r_info)) { 374 case R_PPC64_ADDR32: 375 /* Simply set it */ 376 *(u32 *)location = value; 377 break; 378 379 case R_PPC64_ADDR64: 380 /* Simply set it */ 381 *(unsigned long *)location = value; 382 break; 383 384 case R_PPC64_TOC: 385 *(unsigned long *)location = my_r2(sechdrs, me); 386 break; 387 388 case R_PPC64_TOC16: 389 /* Subtract TOC pointer */ 390 value -= my_r2(sechdrs, me); 391 if (value + 0x8000 > 0xffff) { 392 printk("%s: bad TOC16 relocation (%lu)\n", 393 me->name, value); 394 return -ENOEXEC; 395 } 396 *((uint16_t *) location) 397 = (*((uint16_t *) location) & ~0xffff) 398 | (value & 0xffff); 399 break; 400 401 case R_PPC64_TOC16_DS: 402 /* Subtract TOC pointer */ 403 value -= my_r2(sechdrs, me); 404 if ((value & 3) != 0 || value + 0x8000 > 0xffff) { 405 printk("%s: bad TOC16_DS relocation (%lu)\n", 406 me->name, value); 407 return -ENOEXEC; 408 } 409 *((uint16_t *) location) 410 = (*((uint16_t *) location) & ~0xfffc) 411 | (value & 0xfffc); 412 break; 413 414 case R_PPC_REL24: 415 /* FIXME: Handle weak symbols here --RR */ 416 if (sym->st_shndx == SHN_UNDEF) { 417 /* External: go via stub */ 418 value = stub_for_addr(sechdrs, value, me); 419 if (!value) 420 return -ENOENT; 421 if (!restore_r2((u32 *)location + 1, me)) 422 return -ENOEXEC; 423 } 424 425 /* Convert value to relative */ 426 value -= (unsigned long)location; 427 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){ 428 printk("%s: REL24 %li out of range!\n", 429 me->name, (long int)value); 430 return -ENOEXEC; 431 } 432 433 /* Only replace bits 2 through 26 */ 434 *(uint32_t *)location 435 = (*(uint32_t *)location & ~0x03fffffc) 436 | (value & 0x03fffffc); 437 break; 438 439 case R_PPC64_REL64: 440 /* 64 bits relative (used by features fixups) */ 441 *location = value - (unsigned long)location; 442 break; 443 444 default: 445 printk("%s: Unknown ADD relocation: %lu\n", 446 me->name, 447 (unsigned long)ELF64_R_TYPE(rela[i].r_info)); 448 return -ENOEXEC; 449 } 450 } 451 452 return 0; 453 } 454