1 /* 2 * AArch64 loadable module support. 3 * 4 * Copyright (C) 2012 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * Author: Will Deacon <will.deacon@arm.com> 19 */ 20 21 #include <linux/bitops.h> 22 #include <linux/elf.h> 23 #include <linux/gfp.h> 24 #include <linux/kasan.h> 25 #include <linux/kernel.h> 26 #include <linux/mm.h> 27 #include <linux/moduleloader.h> 28 #include <linux/vmalloc.h> 29 #include <asm/alternative.h> 30 #include <asm/insn.h> 31 #include <asm/sections.h> 32 33 void *module_alloc(unsigned long size) 34 { 35 gfp_t gfp_mask = GFP_KERNEL; 36 void *p; 37 38 /* Silence the initial allocation */ 39 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) 40 gfp_mask |= __GFP_NOWARN; 41 42 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, 43 module_alloc_base + MODULES_VSIZE, 44 gfp_mask, PAGE_KERNEL_EXEC, 0, 45 NUMA_NO_NODE, __builtin_return_address(0)); 46 47 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && 48 !IS_ENABLED(CONFIG_KASAN)) 49 /* 50 * KASAN can only deal with module allocations being served 51 * from the reserved module region, since the remainder of 52 * the vmalloc region is already backed by zero shadow pages, 53 * and punching holes into it is non-trivial. Since the module 54 * region is not randomized when KASAN is enabled, it is even 55 * less likely that the module region gets exhausted, so we 56 * can simply omit this fallback in that case. 57 */ 58 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, 59 module_alloc_base + SZ_2G, GFP_KERNEL, 60 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, 61 __builtin_return_address(0)); 62 63 if (p && (kasan_module_alloc(p, size) < 0)) { 64 vfree(p); 65 return NULL; 66 } 67 68 return p; 69 } 70 71 enum aarch64_reloc_op { 72 RELOC_OP_NONE, 73 RELOC_OP_ABS, 74 RELOC_OP_PREL, 75 RELOC_OP_PAGE, 76 }; 77 78 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val) 79 { 80 switch (reloc_op) { 81 case RELOC_OP_ABS: 82 return val; 83 case RELOC_OP_PREL: 84 return val - (u64)place; 85 case RELOC_OP_PAGE: 86 return (val & ~0xfff) - ((u64)place & ~0xfff); 87 case RELOC_OP_NONE: 88 return 0; 89 } 90 91 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); 92 return 0; 93 } 94 95 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) 96 { 97 s64 sval = do_reloc(op, place, val); 98 99 /* 100 * The ELF psABI for AArch64 documents the 16-bit and 32-bit place 101 * relative relocations as having a range of [-2^15, 2^16) or 102 * [-2^31, 2^32), respectively. However, in order to be able to detect 103 * overflows reliably, we have to choose whether we interpret such 104 * quantities as signed or as unsigned, and stick with it. 105 * The way we organize our address space requires a signed 106 * interpretation of 32-bit relative references, so let's use that 107 * for all R_AARCH64_PRELxx relocations. This means our upper 108 * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX. 109 */ 110 111 switch (len) { 112 case 16: 113 *(s16 *)place = sval; 114 if (sval < S16_MIN || sval > S16_MAX) 115 return -ERANGE; 116 break; 117 case 32: 118 *(s32 *)place = sval; 119 if (sval < S32_MIN || sval > S32_MAX) 120 return -ERANGE; 121 break; 122 case 64: 123 *(s64 *)place = sval; 124 break; 125 default: 126 pr_err("Invalid length (%d) for data relocation\n", len); 127 return 0; 128 } 129 return 0; 130 } 131 132 enum aarch64_insn_movw_imm_type { 133 AARCH64_INSN_IMM_MOVNZ, 134 AARCH64_INSN_IMM_MOVKZ, 135 }; 136 137 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, 138 int lsb, enum aarch64_insn_movw_imm_type imm_type) 139 { 140 u64 imm; 141 s64 sval; 142 u32 insn = le32_to_cpu(*place); 143 144 sval = do_reloc(op, place, val); 145 imm = sval >> lsb; 146 147 if (imm_type == AARCH64_INSN_IMM_MOVNZ) { 148 /* 149 * For signed MOVW relocations, we have to manipulate the 150 * instruction encoding depending on whether or not the 151 * immediate is less than zero. 152 */ 153 insn &= ~(3 << 29); 154 if (sval >= 0) { 155 /* >=0: Set the instruction to MOVZ (opcode 10b). */ 156 insn |= 2 << 29; 157 } else { 158 /* 159 * <0: Set the instruction to MOVN (opcode 00b). 160 * Since we've masked the opcode already, we 161 * don't need to do anything other than 162 * inverting the new immediate field. 163 */ 164 imm = ~imm; 165 } 166 } 167 168 /* Update the instruction with the new encoding. */ 169 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); 170 *place = cpu_to_le32(insn); 171 172 if (imm > U16_MAX) 173 return -ERANGE; 174 175 return 0; 176 } 177 178 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, 179 int lsb, int len, enum aarch64_insn_imm_type imm_type) 180 { 181 u64 imm, imm_mask; 182 s64 sval; 183 u32 insn = le32_to_cpu(*place); 184 185 /* Calculate the relocation value. */ 186 sval = do_reloc(op, place, val); 187 sval >>= lsb; 188 189 /* Extract the value bits and shift them to bit 0. */ 190 imm_mask = (BIT(lsb + len) - 1) >> lsb; 191 imm = sval & imm_mask; 192 193 /* Update the instruction's immediate field. */ 194 insn = aarch64_insn_encode_immediate(imm_type, insn, imm); 195 *place = cpu_to_le32(insn); 196 197 /* 198 * Extract the upper value bits (including the sign bit) and 199 * shift them to bit 0. 200 */ 201 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); 202 203 /* 204 * Overflow has occurred if the upper bits are not all equal to 205 * the sign bit of the value. 206 */ 207 if ((u64)(sval + 1) >= 2) 208 return -ERANGE; 209 210 return 0; 211 } 212 213 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, 214 __le32 *place, u64 val) 215 { 216 u32 insn; 217 218 if (!is_forbidden_offset_for_adrp(place)) 219 return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, 220 AARCH64_INSN_IMM_ADR); 221 222 /* patch ADRP to ADR if it is in range */ 223 if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21, 224 AARCH64_INSN_IMM_ADR)) { 225 insn = le32_to_cpu(*place); 226 insn &= ~BIT(31); 227 } else { 228 /* out of range for ADR -> emit a veneer */ 229 val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff); 230 if (!val) 231 return -ENOEXEC; 232 insn = aarch64_insn_gen_branch_imm((u64)place, val, 233 AARCH64_INSN_BRANCH_NOLINK); 234 } 235 236 *place = cpu_to_le32(insn); 237 return 0; 238 } 239 240 int apply_relocate_add(Elf64_Shdr *sechdrs, 241 const char *strtab, 242 unsigned int symindex, 243 unsigned int relsec, 244 struct module *me) 245 { 246 unsigned int i; 247 int ovf; 248 bool overflow_check; 249 Elf64_Sym *sym; 250 void *loc; 251 u64 val; 252 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 253 254 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 255 /* loc corresponds to P in the AArch64 ELF document. */ 256 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 257 + rel[i].r_offset; 258 259 /* sym is the ELF symbol we're referring to. */ 260 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 261 + ELF64_R_SYM(rel[i].r_info); 262 263 /* val corresponds to (S + A) in the AArch64 ELF document. */ 264 val = sym->st_value + rel[i].r_addend; 265 266 /* Check for overflow by default. */ 267 overflow_check = true; 268 269 /* Perform the static relocation. */ 270 switch (ELF64_R_TYPE(rel[i].r_info)) { 271 /* Null relocations. */ 272 case R_ARM_NONE: 273 case R_AARCH64_NONE: 274 ovf = 0; 275 break; 276 277 /* Data relocations. */ 278 case R_AARCH64_ABS64: 279 overflow_check = false; 280 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); 281 break; 282 case R_AARCH64_ABS32: 283 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); 284 break; 285 case R_AARCH64_ABS16: 286 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); 287 break; 288 case R_AARCH64_PREL64: 289 overflow_check = false; 290 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); 291 break; 292 case R_AARCH64_PREL32: 293 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); 294 break; 295 case R_AARCH64_PREL16: 296 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); 297 break; 298 299 /* MOVW instruction relocations. */ 300 case R_AARCH64_MOVW_UABS_G0_NC: 301 overflow_check = false; 302 case R_AARCH64_MOVW_UABS_G0: 303 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, 304 AARCH64_INSN_IMM_MOVKZ); 305 break; 306 case R_AARCH64_MOVW_UABS_G1_NC: 307 overflow_check = false; 308 case R_AARCH64_MOVW_UABS_G1: 309 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, 310 AARCH64_INSN_IMM_MOVKZ); 311 break; 312 case R_AARCH64_MOVW_UABS_G2_NC: 313 overflow_check = false; 314 case R_AARCH64_MOVW_UABS_G2: 315 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, 316 AARCH64_INSN_IMM_MOVKZ); 317 break; 318 case R_AARCH64_MOVW_UABS_G3: 319 /* We're using the top bits so we can't overflow. */ 320 overflow_check = false; 321 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, 322 AARCH64_INSN_IMM_MOVKZ); 323 break; 324 case R_AARCH64_MOVW_SABS_G0: 325 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, 326 AARCH64_INSN_IMM_MOVNZ); 327 break; 328 case R_AARCH64_MOVW_SABS_G1: 329 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, 330 AARCH64_INSN_IMM_MOVNZ); 331 break; 332 case R_AARCH64_MOVW_SABS_G2: 333 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, 334 AARCH64_INSN_IMM_MOVNZ); 335 break; 336 case R_AARCH64_MOVW_PREL_G0_NC: 337 overflow_check = false; 338 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, 339 AARCH64_INSN_IMM_MOVKZ); 340 break; 341 case R_AARCH64_MOVW_PREL_G0: 342 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, 343 AARCH64_INSN_IMM_MOVNZ); 344 break; 345 case R_AARCH64_MOVW_PREL_G1_NC: 346 overflow_check = false; 347 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, 348 AARCH64_INSN_IMM_MOVKZ); 349 break; 350 case R_AARCH64_MOVW_PREL_G1: 351 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, 352 AARCH64_INSN_IMM_MOVNZ); 353 break; 354 case R_AARCH64_MOVW_PREL_G2_NC: 355 overflow_check = false; 356 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, 357 AARCH64_INSN_IMM_MOVKZ); 358 break; 359 case R_AARCH64_MOVW_PREL_G2: 360 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, 361 AARCH64_INSN_IMM_MOVNZ); 362 break; 363 case R_AARCH64_MOVW_PREL_G3: 364 /* We're using the top bits so we can't overflow. */ 365 overflow_check = false; 366 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, 367 AARCH64_INSN_IMM_MOVNZ); 368 break; 369 370 /* Immediate instruction relocations. */ 371 case R_AARCH64_LD_PREL_LO19: 372 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, 373 AARCH64_INSN_IMM_19); 374 break; 375 case R_AARCH64_ADR_PREL_LO21: 376 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, 377 AARCH64_INSN_IMM_ADR); 378 break; 379 case R_AARCH64_ADR_PREL_PG_HI21_NC: 380 overflow_check = false; 381 case R_AARCH64_ADR_PREL_PG_HI21: 382 ovf = reloc_insn_adrp(me, sechdrs, loc, val); 383 if (ovf && ovf != -ERANGE) 384 return ovf; 385 break; 386 case R_AARCH64_ADD_ABS_LO12_NC: 387 case R_AARCH64_LDST8_ABS_LO12_NC: 388 overflow_check = false; 389 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, 390 AARCH64_INSN_IMM_12); 391 break; 392 case R_AARCH64_LDST16_ABS_LO12_NC: 393 overflow_check = false; 394 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, 395 AARCH64_INSN_IMM_12); 396 break; 397 case R_AARCH64_LDST32_ABS_LO12_NC: 398 overflow_check = false; 399 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, 400 AARCH64_INSN_IMM_12); 401 break; 402 case R_AARCH64_LDST64_ABS_LO12_NC: 403 overflow_check = false; 404 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, 405 AARCH64_INSN_IMM_12); 406 break; 407 case R_AARCH64_LDST128_ABS_LO12_NC: 408 overflow_check = false; 409 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, 410 AARCH64_INSN_IMM_12); 411 break; 412 case R_AARCH64_TSTBR14: 413 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, 414 AARCH64_INSN_IMM_14); 415 break; 416 case R_AARCH64_CONDBR19: 417 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, 418 AARCH64_INSN_IMM_19); 419 break; 420 case R_AARCH64_JUMP26: 421 case R_AARCH64_CALL26: 422 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, 423 AARCH64_INSN_IMM_26); 424 425 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && 426 ovf == -ERANGE) { 427 val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); 428 if (!val) 429 return -ENOEXEC; 430 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 431 26, AARCH64_INSN_IMM_26); 432 } 433 break; 434 435 default: 436 pr_err("module %s: unsupported RELA relocation: %llu\n", 437 me->name, ELF64_R_TYPE(rel[i].r_info)); 438 return -ENOEXEC; 439 } 440 441 if (overflow_check && ovf == -ERANGE) 442 goto overflow; 443 444 } 445 446 return 0; 447 448 overflow: 449 pr_err("module %s: overflow in relocation type %d val %Lx\n", 450 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); 451 return -ENOEXEC; 452 } 453 454 int module_finalize(const Elf_Ehdr *hdr, 455 const Elf_Shdr *sechdrs, 456 struct module *me) 457 { 458 const Elf_Shdr *s, *se; 459 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 460 461 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { 462 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) 463 apply_alternatives_module((void *)s->sh_addr, s->sh_size); 464 #ifdef CONFIG_ARM64_MODULE_PLTS 465 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && 466 !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name)) 467 me->arch.ftrace_trampoline = (void *)s->sh_addr; 468 #endif 469 } 470 471 return 0; 472 } 473