1 /* 2 * AArch64 loadable module support. 3 * 4 * Copyright (C) 2012 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * Author: Will Deacon <will.deacon@arm.com> 19 */ 20 21 #include <linux/bitops.h> 22 #include <linux/elf.h> 23 #include <linux/gfp.h> 24 #include <linux/kernel.h> 25 #include <linux/mm.h> 26 #include <linux/moduleloader.h> 27 #include <linux/vmalloc.h> 28 #include <asm/insn.h> 29 30 #define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX 31 #define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16 32 33 void *module_alloc(unsigned long size) 34 { 35 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, 36 GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, 37 __builtin_return_address(0)); 38 } 39 40 enum aarch64_reloc_op { 41 RELOC_OP_NONE, 42 RELOC_OP_ABS, 43 RELOC_OP_PREL, 44 RELOC_OP_PAGE, 45 }; 46 47 static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) 48 { 49 switch (reloc_op) { 50 case RELOC_OP_ABS: 51 return val; 52 case RELOC_OP_PREL: 53 return val - (u64)place; 54 case RELOC_OP_PAGE: 55 return (val & ~0xfff) - ((u64)place & ~0xfff); 56 case RELOC_OP_NONE: 57 return 0; 58 } 59 60 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); 61 return 0; 62 } 63 64 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) 65 { 66 u64 imm_mask = (1 << len) - 1; 67 s64 sval = do_reloc(op, place, val); 68 69 switch (len) { 70 case 16: 71 *(s16 *)place = sval; 72 break; 73 case 32: 74 *(s32 *)place = sval; 75 break; 76 case 64: 77 *(s64 *)place = sval; 78 break; 79 default: 80 pr_err("Invalid length (%d) for data relocation\n", len); 81 return 0; 82 } 83 84 /* 85 * Extract the upper value bits (including the sign bit) and 86 * shift them to bit 0. 87 */ 88 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); 89 90 /* 91 * Overflow has occurred if the value is not representable in 92 * len bits (i.e the bottom len bits are not sign-extended and 93 * the top bits are not all zero). 94 */ 95 if ((u64)(sval + 1) > 2) 96 return -ERANGE; 97 98 return 0; 99 } 100 101 static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, 102 int lsb, enum aarch64_insn_imm_type imm_type) 103 { 104 u64 imm, limit = 0; 105 s64 sval; 106 u32 insn = le32_to_cpu(*(u32 *)place); 107 108 sval = do_reloc(op, place, val); 109 sval >>= lsb; 110 imm = sval & 0xffff; 111 112 if (imm_type == AARCH64_INSN_IMM_MOVNZ) { 113 /* 114 * For signed MOVW relocations, we have to manipulate the 115 * instruction encoding depending on whether or not the 116 * immediate is less than zero. 117 */ 118 insn &= ~(3 << 29); 119 if ((s64)imm >= 0) { 120 /* >=0: Set the instruction to MOVZ (opcode 10b). */ 121 insn |= 2 << 29; 122 } else { 123 /* 124 * <0: Set the instruction to MOVN (opcode 00b). 125 * Since we've masked the opcode already, we 126 * don't need to do anything other than 127 * inverting the new immediate field. 128 */ 129 imm = ~imm; 130 } 131 imm_type = AARCH64_INSN_IMM_MOVK; 132 } 133 134 /* Update the instruction with the new encoding. */ 135 insn = aarch64_insn_encode_immediate(imm_type, insn, imm); 136 *(u32 *)place = cpu_to_le32(insn); 137 138 /* Shift out the immediate field. */ 139 sval >>= 16; 140 141 /* 142 * For unsigned immediates, the overflow check is straightforward. 143 * For signed immediates, the sign bit is actually the bit past the 144 * most significant bit of the field. 145 * The AARCH64_INSN_IMM_16 immediate type is unsigned. 146 */ 147 if (imm_type != AARCH64_INSN_IMM_16) { 148 sval++; 149 limit++; 150 } 151 152 /* Check the upper bits depending on the sign of the immediate. */ 153 if ((u64)sval > limit) 154 return -ERANGE; 155 156 return 0; 157 } 158 159 static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, 160 int lsb, int len, enum aarch64_insn_imm_type imm_type) 161 { 162 u64 imm, imm_mask; 163 s64 sval; 164 u32 insn = le32_to_cpu(*(u32 *)place); 165 166 /* Calculate the relocation value. */ 167 sval = do_reloc(op, place, val); 168 sval >>= lsb; 169 170 /* Extract the value bits and shift them to bit 0. */ 171 imm_mask = (BIT(lsb + len) - 1) >> lsb; 172 imm = sval & imm_mask; 173 174 /* Update the instruction's immediate field. */ 175 insn = aarch64_insn_encode_immediate(imm_type, insn, imm); 176 *(u32 *)place = cpu_to_le32(insn); 177 178 /* 179 * Extract the upper value bits (including the sign bit) and 180 * shift them to bit 0. 181 */ 182 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); 183 184 /* 185 * Overflow has occurred if the upper bits are not all equal to 186 * the sign bit of the value. 187 */ 188 if ((u64)(sval + 1) >= 2) 189 return -ERANGE; 190 191 return 0; 192 } 193 194 int apply_relocate_add(Elf64_Shdr *sechdrs, 195 const char *strtab, 196 unsigned int symindex, 197 unsigned int relsec, 198 struct module *me) 199 { 200 unsigned int i; 201 int ovf; 202 bool overflow_check; 203 Elf64_Sym *sym; 204 void *loc; 205 u64 val; 206 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 207 208 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 209 /* loc corresponds to P in the AArch64 ELF document. */ 210 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 211 + rel[i].r_offset; 212 213 /* sym is the ELF symbol we're referring to. */ 214 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 215 + ELF64_R_SYM(rel[i].r_info); 216 217 /* val corresponds to (S + A) in the AArch64 ELF document. */ 218 val = sym->st_value + rel[i].r_addend; 219 220 /* Check for overflow by default. */ 221 overflow_check = true; 222 223 /* Perform the static relocation. */ 224 switch (ELF64_R_TYPE(rel[i].r_info)) { 225 /* Null relocations. */ 226 case R_ARM_NONE: 227 case R_AARCH64_NONE: 228 ovf = 0; 229 break; 230 231 /* Data relocations. */ 232 case R_AARCH64_ABS64: 233 overflow_check = false; 234 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); 235 break; 236 case R_AARCH64_ABS32: 237 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); 238 break; 239 case R_AARCH64_ABS16: 240 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); 241 break; 242 case R_AARCH64_PREL64: 243 overflow_check = false; 244 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); 245 break; 246 case R_AARCH64_PREL32: 247 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); 248 break; 249 case R_AARCH64_PREL16: 250 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); 251 break; 252 253 /* MOVW instruction relocations. */ 254 case R_AARCH64_MOVW_UABS_G0_NC: 255 overflow_check = false; 256 case R_AARCH64_MOVW_UABS_G0: 257 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, 258 AARCH64_INSN_IMM_16); 259 break; 260 case R_AARCH64_MOVW_UABS_G1_NC: 261 overflow_check = false; 262 case R_AARCH64_MOVW_UABS_G1: 263 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, 264 AARCH64_INSN_IMM_16); 265 break; 266 case R_AARCH64_MOVW_UABS_G2_NC: 267 overflow_check = false; 268 case R_AARCH64_MOVW_UABS_G2: 269 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, 270 AARCH64_INSN_IMM_16); 271 break; 272 case R_AARCH64_MOVW_UABS_G3: 273 /* We're using the top bits so we can't overflow. */ 274 overflow_check = false; 275 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, 276 AARCH64_INSN_IMM_16); 277 break; 278 case R_AARCH64_MOVW_SABS_G0: 279 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, 280 AARCH64_INSN_IMM_MOVNZ); 281 break; 282 case R_AARCH64_MOVW_SABS_G1: 283 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, 284 AARCH64_INSN_IMM_MOVNZ); 285 break; 286 case R_AARCH64_MOVW_SABS_G2: 287 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, 288 AARCH64_INSN_IMM_MOVNZ); 289 break; 290 case R_AARCH64_MOVW_PREL_G0_NC: 291 overflow_check = false; 292 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, 293 AARCH64_INSN_IMM_MOVK); 294 break; 295 case R_AARCH64_MOVW_PREL_G0: 296 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, 297 AARCH64_INSN_IMM_MOVNZ); 298 break; 299 case R_AARCH64_MOVW_PREL_G1_NC: 300 overflow_check = false; 301 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, 302 AARCH64_INSN_IMM_MOVK); 303 break; 304 case R_AARCH64_MOVW_PREL_G1: 305 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, 306 AARCH64_INSN_IMM_MOVNZ); 307 break; 308 case R_AARCH64_MOVW_PREL_G2_NC: 309 overflow_check = false; 310 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, 311 AARCH64_INSN_IMM_MOVK); 312 break; 313 case R_AARCH64_MOVW_PREL_G2: 314 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, 315 AARCH64_INSN_IMM_MOVNZ); 316 break; 317 case R_AARCH64_MOVW_PREL_G3: 318 /* We're using the top bits so we can't overflow. */ 319 overflow_check = false; 320 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, 321 AARCH64_INSN_IMM_MOVNZ); 322 break; 323 324 /* Immediate instruction relocations. */ 325 case R_AARCH64_LD_PREL_LO19: 326 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, 327 AARCH64_INSN_IMM_19); 328 break; 329 case R_AARCH64_ADR_PREL_LO21: 330 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, 331 AARCH64_INSN_IMM_ADR); 332 break; 333 case R_AARCH64_ADR_PREL_PG_HI21_NC: 334 overflow_check = false; 335 case R_AARCH64_ADR_PREL_PG_HI21: 336 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, 337 AARCH64_INSN_IMM_ADR); 338 break; 339 case R_AARCH64_ADD_ABS_LO12_NC: 340 case R_AARCH64_LDST8_ABS_LO12_NC: 341 overflow_check = false; 342 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, 343 AARCH64_INSN_IMM_12); 344 break; 345 case R_AARCH64_LDST16_ABS_LO12_NC: 346 overflow_check = false; 347 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, 348 AARCH64_INSN_IMM_12); 349 break; 350 case R_AARCH64_LDST32_ABS_LO12_NC: 351 overflow_check = false; 352 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, 353 AARCH64_INSN_IMM_12); 354 break; 355 case R_AARCH64_LDST64_ABS_LO12_NC: 356 overflow_check = false; 357 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, 358 AARCH64_INSN_IMM_12); 359 break; 360 case R_AARCH64_LDST128_ABS_LO12_NC: 361 overflow_check = false; 362 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, 363 AARCH64_INSN_IMM_12); 364 break; 365 case R_AARCH64_TSTBR14: 366 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, 367 AARCH64_INSN_IMM_14); 368 break; 369 case R_AARCH64_CONDBR19: 370 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, 371 AARCH64_INSN_IMM_19); 372 break; 373 case R_AARCH64_JUMP26: 374 case R_AARCH64_CALL26: 375 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, 376 AARCH64_INSN_IMM_26); 377 break; 378 379 default: 380 pr_err("module %s: unsupported RELA relocation: %llu\n", 381 me->name, ELF64_R_TYPE(rel[i].r_info)); 382 return -ENOEXEC; 383 } 384 385 if (overflow_check && ovf == -ERANGE) 386 goto overflow; 387 388 } 389 390 return 0; 391 392 overflow: 393 pr_err("module %s: overflow in relocation type %d val %Lx\n", 394 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); 395 return -ENOEXEC; 396 } 397