1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org> 4 */ 5 6 #include <linux/elf.h> 7 #include <linux/ftrace.h> 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/sort.h> 11 12 static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc, 13 enum aarch64_insn_register reg) 14 { 15 u32 adrp, add; 16 17 adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP); 18 add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K, 19 AARCH64_INSN_VARIANT_64BIT, 20 AARCH64_INSN_ADSB_ADD); 21 22 return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) }; 23 } 24 25 struct plt_entry get_plt_entry(u64 dst, void *pc) 26 { 27 struct plt_entry plt; 28 static u32 br; 29 30 if (!br) 31 br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16, 32 AARCH64_INSN_BRANCH_NOLINK); 33 34 plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16); 35 plt.br = cpu_to_le32(br); 36 37 return plt; 38 } 39 40 static bool plt_entries_equal(const struct plt_entry *a, 41 const struct plt_entry *b) 42 { 43 u64 p, q; 44 45 /* 46 * Check whether both entries refer to the same target: 47 * do the cheapest checks first. 48 * If the 'add' or 'br' opcodes are different, then the target 49 * cannot be the same. 50 */ 51 if (a->add != b->add || a->br != b->br) 52 return false; 53 54 p = ALIGN_DOWN((u64)a, SZ_4K); 55 q = ALIGN_DOWN((u64)b, SZ_4K); 56 57 /* 58 * If the 'adrp' opcodes are the same then we just need to check 59 * that they refer to the same 4k region. 60 */ 61 if (a->adrp == b->adrp && p == q) 62 return true; 63 64 return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) == 65 (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp))); 66 } 67 68 u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, 69 void *loc, const Elf64_Rela *rela, 70 Elf64_Sym *sym) 71 { 72 struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ? 73 &mod->arch.core : &mod->arch.init; 74 struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr; 75 int i = pltsec->plt_num_entries; 76 int j = i - 1; 77 u64 val = sym->st_value + rela->r_addend; 78 79 if (is_forbidden_offset_for_adrp(&plt[i].adrp)) 80 i++; 81 82 plt[i] = get_plt_entry(val, &plt[i]); 83 84 /* 85 * Check if the entry we just created is a duplicate. Given that the 86 * relocations are sorted, this will be the last entry we allocated. 87 * (if one exists). 88 */ 89 if (j >= 0 && plt_entries_equal(plt + i, plt + j)) 90 return (u64)&plt[j]; 91 92 pltsec->plt_num_entries += i - j; 93 if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) 94 return 0; 95 96 return (u64)&plt[i]; 97 } 98 99 #ifdef CONFIG_ARM64_ERRATUM_843419 100 u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs, 101 void *loc, u64 val) 102 { 103 struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ? 104 &mod->arch.core : &mod->arch.init; 105 struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr; 106 int i = pltsec->plt_num_entries++; 107 u32 br; 108 int rd; 109 110 if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) 111 return 0; 112 113 if (is_forbidden_offset_for_adrp(&plt[i].adrp)) 114 i = pltsec->plt_num_entries++; 115 116 /* get the destination register of the ADRP instruction */ 117 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, 118 le32_to_cpup((__le32 *)loc)); 119 120 br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4, 121 AARCH64_INSN_BRANCH_NOLINK); 122 123 plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd); 124 plt[i].br = cpu_to_le32(br); 125 126 return (u64)&plt[i]; 127 } 128 #endif 129 130 #define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b)) 131 132 static int cmp_rela(const void *a, const void *b) 133 { 134 const Elf64_Rela *x = a, *y = b; 135 int i; 136 137 /* sort by type, symbol index and addend */ 138 i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info)); 139 if (i == 0) 140 i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info)); 141 if (i == 0) 142 i = cmp_3way(x->r_addend, y->r_addend); 143 return i; 144 } 145 146 static bool duplicate_rel(const Elf64_Rela *rela, int num) 147 { 148 /* 149 * Entries are sorted by type, symbol index and addend. That means 150 * that, if a duplicate entry exists, it must be in the preceding 151 * slot. 152 */ 153 return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0; 154 } 155 156 static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, 157 Elf64_Word dstidx, Elf_Shdr *dstsec) 158 { 159 unsigned int ret = 0; 160 Elf64_Sym *s; 161 int i; 162 163 for (i = 0; i < num; i++) { 164 u64 min_align; 165 166 switch (ELF64_R_TYPE(rela[i].r_info)) { 167 case R_AARCH64_JUMP26: 168 case R_AARCH64_CALL26: 169 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) 170 break; 171 172 /* 173 * We only have to consider branch targets that resolve 174 * to symbols that are defined in a different section. 175 * This is not simply a heuristic, it is a fundamental 176 * limitation, since there is no guaranteed way to emit 177 * PLT entries sufficiently close to the branch if the 178 * section size exceeds the range of a branch 179 * instruction. So ignore relocations against defined 180 * symbols if they live in the same section as the 181 * relocation target. 182 */ 183 s = syms + ELF64_R_SYM(rela[i].r_info); 184 if (s->st_shndx == dstidx) 185 break; 186 187 /* 188 * Jump relocations with non-zero addends against 189 * undefined symbols are supported by the ELF spec, but 190 * do not occur in practice (e.g., 'jump n bytes past 191 * the entry point of undefined function symbol f'). 192 * So we need to support them, but there is no need to 193 * take them into consideration when trying to optimize 194 * this code. So let's only check for duplicates when 195 * the addend is zero: this allows us to record the PLT 196 * entry address in the symbol table itself, rather than 197 * having to search the list for duplicates each time we 198 * emit one. 199 */ 200 if (rela[i].r_addend != 0 || !duplicate_rel(rela, i)) 201 ret++; 202 break; 203 case R_AARCH64_ADR_PREL_PG_HI21_NC: 204 case R_AARCH64_ADR_PREL_PG_HI21: 205 if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) || 206 !cpus_have_const_cap(ARM64_WORKAROUND_843419)) 207 break; 208 209 /* 210 * Determine the minimal safe alignment for this ADRP 211 * instruction: the section alignment at which it is 212 * guaranteed not to appear at a vulnerable offset. 213 * 214 * This comes down to finding the least significant zero 215 * bit in bits [11:3] of the section offset, and 216 * increasing the section's alignment so that the 217 * resulting address of this instruction is guaranteed 218 * to equal the offset in that particular bit (as well 219 * as all less significant bits). This ensures that the 220 * address modulo 4 KB != 0xfff8 or 0xfffc (which would 221 * have all ones in bits [11:3]) 222 */ 223 min_align = 2ULL << ffz(rela[i].r_offset | 0x7); 224 225 /* 226 * Allocate veneer space for each ADRP that may appear 227 * at a vulnerable offset nonetheless. At relocation 228 * time, some of these will remain unused since some 229 * ADRP instructions can be patched to ADR instructions 230 * instead. 231 */ 232 if (min_align > SZ_4K) 233 ret++; 234 else 235 dstsec->sh_addralign = max(dstsec->sh_addralign, 236 min_align); 237 break; 238 } 239 } 240 241 if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) && 242 cpus_have_const_cap(ARM64_WORKAROUND_843419)) 243 /* 244 * Add some slack so we can skip PLT slots that may trigger 245 * the erratum due to the placement of the ADRP instruction. 246 */ 247 ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry))); 248 249 return ret; 250 } 251 252 static bool branch_rela_needs_plt(Elf64_Sym *syms, Elf64_Rela *rela, 253 Elf64_Word dstidx) 254 { 255 256 Elf64_Sym *s = syms + ELF64_R_SYM(rela->r_info); 257 258 if (s->st_shndx == dstidx) 259 return false; 260 261 return ELF64_R_TYPE(rela->r_info) == R_AARCH64_JUMP26 || 262 ELF64_R_TYPE(rela->r_info) == R_AARCH64_CALL26; 263 } 264 265 /* Group branch PLT relas at the front end of the array. */ 266 static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela, 267 int numrels, Elf64_Word dstidx) 268 { 269 int i = 0, j = numrels - 1; 270 271 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) 272 return 0; 273 274 while (i < j) { 275 if (branch_rela_needs_plt(syms, &rela[i], dstidx)) 276 i++; 277 else if (branch_rela_needs_plt(syms, &rela[j], dstidx)) 278 swap(rela[i], rela[j]); 279 else 280 j--; 281 } 282 283 return i; 284 } 285 286 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, 287 char *secstrings, struct module *mod) 288 { 289 unsigned long core_plts = 0; 290 unsigned long init_plts = 0; 291 Elf64_Sym *syms = NULL; 292 Elf_Shdr *pltsec, *tramp = NULL; 293 int i; 294 295 /* 296 * Find the empty .plt section so we can expand it to store the PLT 297 * entries. Record the symtab address as well. 298 */ 299 for (i = 0; i < ehdr->e_shnum; i++) { 300 if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) 301 mod->arch.core.plt_shndx = i; 302 else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) 303 mod->arch.init.plt_shndx = i; 304 else if (!strcmp(secstrings + sechdrs[i].sh_name, 305 ".text.ftrace_trampoline")) 306 tramp = sechdrs + i; 307 else if (sechdrs[i].sh_type == SHT_SYMTAB) 308 syms = (Elf64_Sym *)sechdrs[i].sh_addr; 309 } 310 311 if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) { 312 pr_err("%s: module PLT section(s) missing\n", mod->name); 313 return -ENOEXEC; 314 } 315 if (!syms) { 316 pr_err("%s: module symtab section missing\n", mod->name); 317 return -ENOEXEC; 318 } 319 320 for (i = 0; i < ehdr->e_shnum; i++) { 321 Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset; 322 int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela); 323 Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info; 324 325 if (sechdrs[i].sh_type != SHT_RELA) 326 continue; 327 328 /* ignore relocations that operate on non-exec sections */ 329 if (!(dstsec->sh_flags & SHF_EXECINSTR)) 330 continue; 331 332 /* 333 * sort branch relocations requiring a PLT by type, symbol index 334 * and addend 335 */ 336 nents = partition_branch_plt_relas(syms, rels, numrels, 337 sechdrs[i].sh_info); 338 if (nents) 339 sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL); 340 341 if (!str_has_prefix(secstrings + dstsec->sh_name, ".init")) 342 core_plts += count_plts(syms, rels, numrels, 343 sechdrs[i].sh_info, dstsec); 344 else 345 init_plts += count_plts(syms, rels, numrels, 346 sechdrs[i].sh_info, dstsec); 347 } 348 349 pltsec = sechdrs + mod->arch.core.plt_shndx; 350 pltsec->sh_type = SHT_NOBITS; 351 pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC; 352 pltsec->sh_addralign = L1_CACHE_BYTES; 353 pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry); 354 mod->arch.core.plt_num_entries = 0; 355 mod->arch.core.plt_max_entries = core_plts; 356 357 pltsec = sechdrs + mod->arch.init.plt_shndx; 358 pltsec->sh_type = SHT_NOBITS; 359 pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC; 360 pltsec->sh_addralign = L1_CACHE_BYTES; 361 pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry); 362 mod->arch.init.plt_num_entries = 0; 363 mod->arch.init.plt_max_entries = init_plts; 364 365 if (tramp) { 366 tramp->sh_type = SHT_NOBITS; 367 tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC; 368 tramp->sh_addralign = __alignof__(struct plt_entry); 369 tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry); 370 } 371 372 return 0; 373 } 374