xref: /openbmc/linux/arch/arm64/kernel/module.c (revision 6c33a6f4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AArch64 loadable module support.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/elf.h>
12 #include <linux/ftrace.h>
13 #include <linux/gfp.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/moduleloader.h>
18 #include <linux/vmalloc.h>
19 #include <asm/alternative.h>
20 #include <asm/insn.h>
21 #include <asm/sections.h>
22 
23 void *module_alloc(unsigned long size)
24 {
25 	u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
26 	gfp_t gfp_mask = GFP_KERNEL;
27 	void *p;
28 
29 	/* Silence the initial allocation */
30 	if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
31 		gfp_mask |= __GFP_NOWARN;
32 
33 	if (IS_ENABLED(CONFIG_KASAN))
34 		/* don't exceed the static module region - see below */
35 		module_alloc_end = MODULES_END;
36 
37 	p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
38 				module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
39 				NUMA_NO_NODE, __builtin_return_address(0));
40 
41 	if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
42 	    !IS_ENABLED(CONFIG_KASAN))
43 		/*
44 		 * KASAN can only deal with module allocations being served
45 		 * from the reserved module region, since the remainder of
46 		 * the vmalloc region is already backed by zero shadow pages,
47 		 * and punching holes into it is non-trivial. Since the module
48 		 * region is not randomized when KASAN is enabled, it is even
49 		 * less likely that the module region gets exhausted, so we
50 		 * can simply omit this fallback in that case.
51 		 */
52 		p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
53 				module_alloc_base + SZ_2G, GFP_KERNEL,
54 				PAGE_KERNEL, 0, NUMA_NO_NODE,
55 				__builtin_return_address(0));
56 
57 	if (p && (kasan_module_alloc(p, size) < 0)) {
58 		vfree(p);
59 		return NULL;
60 	}
61 
62 	return p;
63 }
64 
65 enum aarch64_reloc_op {
66 	RELOC_OP_NONE,
67 	RELOC_OP_ABS,
68 	RELOC_OP_PREL,
69 	RELOC_OP_PAGE,
70 };
71 
72 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
73 {
74 	switch (reloc_op) {
75 	case RELOC_OP_ABS:
76 		return val;
77 	case RELOC_OP_PREL:
78 		return val - (u64)place;
79 	case RELOC_OP_PAGE:
80 		return (val & ~0xfff) - ((u64)place & ~0xfff);
81 	case RELOC_OP_NONE:
82 		return 0;
83 	}
84 
85 	pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
86 	return 0;
87 }
88 
89 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
90 {
91 	s64 sval = do_reloc(op, place, val);
92 
93 	/*
94 	 * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
95 	 * relative and absolute relocations as having a range of [-2^15, 2^16)
96 	 * or [-2^31, 2^32), respectively. However, in order to be able to
97 	 * detect overflows reliably, we have to choose whether we interpret
98 	 * such quantities as signed or as unsigned, and stick with it.
99 	 * The way we organize our address space requires a signed
100 	 * interpretation of 32-bit relative references, so let's use that
101 	 * for all R_AARCH64_PRELxx relocations. This means our upper
102 	 * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
103 	 */
104 
105 	switch (len) {
106 	case 16:
107 		*(s16 *)place = sval;
108 		switch (op) {
109 		case RELOC_OP_ABS:
110 			if (sval < 0 || sval > U16_MAX)
111 				return -ERANGE;
112 			break;
113 		case RELOC_OP_PREL:
114 			if (sval < S16_MIN || sval > S16_MAX)
115 				return -ERANGE;
116 			break;
117 		default:
118 			pr_err("Invalid 16-bit data relocation (%d)\n", op);
119 			return 0;
120 		}
121 		break;
122 	case 32:
123 		*(s32 *)place = sval;
124 		switch (op) {
125 		case RELOC_OP_ABS:
126 			if (sval < 0 || sval > U32_MAX)
127 				return -ERANGE;
128 			break;
129 		case RELOC_OP_PREL:
130 			if (sval < S32_MIN || sval > S32_MAX)
131 				return -ERANGE;
132 			break;
133 		default:
134 			pr_err("Invalid 32-bit data relocation (%d)\n", op);
135 			return 0;
136 		}
137 		break;
138 	case 64:
139 		*(s64 *)place = sval;
140 		break;
141 	default:
142 		pr_err("Invalid length (%d) for data relocation\n", len);
143 		return 0;
144 	}
145 	return 0;
146 }
147 
148 enum aarch64_insn_movw_imm_type {
149 	AARCH64_INSN_IMM_MOVNZ,
150 	AARCH64_INSN_IMM_MOVKZ,
151 };
152 
153 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
154 			   int lsb, enum aarch64_insn_movw_imm_type imm_type)
155 {
156 	u64 imm;
157 	s64 sval;
158 	u32 insn = le32_to_cpu(*place);
159 
160 	sval = do_reloc(op, place, val);
161 	imm = sval >> lsb;
162 
163 	if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
164 		/*
165 		 * For signed MOVW relocations, we have to manipulate the
166 		 * instruction encoding depending on whether or not the
167 		 * immediate is less than zero.
168 		 */
169 		insn &= ~(3 << 29);
170 		if (sval >= 0) {
171 			/* >=0: Set the instruction to MOVZ (opcode 10b). */
172 			insn |= 2 << 29;
173 		} else {
174 			/*
175 			 * <0: Set the instruction to MOVN (opcode 00b).
176 			 *     Since we've masked the opcode already, we
177 			 *     don't need to do anything other than
178 			 *     inverting the new immediate field.
179 			 */
180 			imm = ~imm;
181 		}
182 	}
183 
184 	/* Update the instruction with the new encoding. */
185 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
186 	*place = cpu_to_le32(insn);
187 
188 	if (imm > U16_MAX)
189 		return -ERANGE;
190 
191 	return 0;
192 }
193 
194 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
195 			  int lsb, int len, enum aarch64_insn_imm_type imm_type)
196 {
197 	u64 imm, imm_mask;
198 	s64 sval;
199 	u32 insn = le32_to_cpu(*place);
200 
201 	/* Calculate the relocation value. */
202 	sval = do_reloc(op, place, val);
203 	sval >>= lsb;
204 
205 	/* Extract the value bits and shift them to bit 0. */
206 	imm_mask = (BIT(lsb + len) - 1) >> lsb;
207 	imm = sval & imm_mask;
208 
209 	/* Update the instruction's immediate field. */
210 	insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
211 	*place = cpu_to_le32(insn);
212 
213 	/*
214 	 * Extract the upper value bits (including the sign bit) and
215 	 * shift them to bit 0.
216 	 */
217 	sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
218 
219 	/*
220 	 * Overflow has occurred if the upper bits are not all equal to
221 	 * the sign bit of the value.
222 	 */
223 	if ((u64)(sval + 1) >= 2)
224 		return -ERANGE;
225 
226 	return 0;
227 }
228 
229 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
230 			   __le32 *place, u64 val)
231 {
232 	u32 insn;
233 
234 	if (!is_forbidden_offset_for_adrp(place))
235 		return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
236 				      AARCH64_INSN_IMM_ADR);
237 
238 	/* patch ADRP to ADR if it is in range */
239 	if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
240 			    AARCH64_INSN_IMM_ADR)) {
241 		insn = le32_to_cpu(*place);
242 		insn &= ~BIT(31);
243 	} else {
244 		/* out of range for ADR -> emit a veneer */
245 		val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
246 		if (!val)
247 			return -ENOEXEC;
248 		insn = aarch64_insn_gen_branch_imm((u64)place, val,
249 						   AARCH64_INSN_BRANCH_NOLINK);
250 	}
251 
252 	*place = cpu_to_le32(insn);
253 	return 0;
254 }
255 
256 int apply_relocate_add(Elf64_Shdr *sechdrs,
257 		       const char *strtab,
258 		       unsigned int symindex,
259 		       unsigned int relsec,
260 		       struct module *me)
261 {
262 	unsigned int i;
263 	int ovf;
264 	bool overflow_check;
265 	Elf64_Sym *sym;
266 	void *loc;
267 	u64 val;
268 	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
269 
270 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
271 		/* loc corresponds to P in the AArch64 ELF document. */
272 		loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
273 			+ rel[i].r_offset;
274 
275 		/* sym is the ELF symbol we're referring to. */
276 		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
277 			+ ELF64_R_SYM(rel[i].r_info);
278 
279 		/* val corresponds to (S + A) in the AArch64 ELF document. */
280 		val = sym->st_value + rel[i].r_addend;
281 
282 		/* Check for overflow by default. */
283 		overflow_check = true;
284 
285 		/* Perform the static relocation. */
286 		switch (ELF64_R_TYPE(rel[i].r_info)) {
287 		/* Null relocations. */
288 		case R_ARM_NONE:
289 		case R_AARCH64_NONE:
290 			ovf = 0;
291 			break;
292 
293 		/* Data relocations. */
294 		case R_AARCH64_ABS64:
295 			overflow_check = false;
296 			ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
297 			break;
298 		case R_AARCH64_ABS32:
299 			ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
300 			break;
301 		case R_AARCH64_ABS16:
302 			ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
303 			break;
304 		case R_AARCH64_PREL64:
305 			overflow_check = false;
306 			ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
307 			break;
308 		case R_AARCH64_PREL32:
309 			ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
310 			break;
311 		case R_AARCH64_PREL16:
312 			ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
313 			break;
314 
315 		/* MOVW instruction relocations. */
316 		case R_AARCH64_MOVW_UABS_G0_NC:
317 			overflow_check = false;
318 			/* Fall through */
319 		case R_AARCH64_MOVW_UABS_G0:
320 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
321 					      AARCH64_INSN_IMM_MOVKZ);
322 			break;
323 		case R_AARCH64_MOVW_UABS_G1_NC:
324 			overflow_check = false;
325 			/* Fall through */
326 		case R_AARCH64_MOVW_UABS_G1:
327 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
328 					      AARCH64_INSN_IMM_MOVKZ);
329 			break;
330 		case R_AARCH64_MOVW_UABS_G2_NC:
331 			overflow_check = false;
332 			/* Fall through */
333 		case R_AARCH64_MOVW_UABS_G2:
334 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
335 					      AARCH64_INSN_IMM_MOVKZ);
336 			break;
337 		case R_AARCH64_MOVW_UABS_G3:
338 			/* We're using the top bits so we can't overflow. */
339 			overflow_check = false;
340 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
341 					      AARCH64_INSN_IMM_MOVKZ);
342 			break;
343 		case R_AARCH64_MOVW_SABS_G0:
344 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
345 					      AARCH64_INSN_IMM_MOVNZ);
346 			break;
347 		case R_AARCH64_MOVW_SABS_G1:
348 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
349 					      AARCH64_INSN_IMM_MOVNZ);
350 			break;
351 		case R_AARCH64_MOVW_SABS_G2:
352 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
353 					      AARCH64_INSN_IMM_MOVNZ);
354 			break;
355 		case R_AARCH64_MOVW_PREL_G0_NC:
356 			overflow_check = false;
357 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
358 					      AARCH64_INSN_IMM_MOVKZ);
359 			break;
360 		case R_AARCH64_MOVW_PREL_G0:
361 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
362 					      AARCH64_INSN_IMM_MOVNZ);
363 			break;
364 		case R_AARCH64_MOVW_PREL_G1_NC:
365 			overflow_check = false;
366 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
367 					      AARCH64_INSN_IMM_MOVKZ);
368 			break;
369 		case R_AARCH64_MOVW_PREL_G1:
370 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
371 					      AARCH64_INSN_IMM_MOVNZ);
372 			break;
373 		case R_AARCH64_MOVW_PREL_G2_NC:
374 			overflow_check = false;
375 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
376 					      AARCH64_INSN_IMM_MOVKZ);
377 			break;
378 		case R_AARCH64_MOVW_PREL_G2:
379 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
380 					      AARCH64_INSN_IMM_MOVNZ);
381 			break;
382 		case R_AARCH64_MOVW_PREL_G3:
383 			/* We're using the top bits so we can't overflow. */
384 			overflow_check = false;
385 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
386 					      AARCH64_INSN_IMM_MOVNZ);
387 			break;
388 
389 		/* Immediate instruction relocations. */
390 		case R_AARCH64_LD_PREL_LO19:
391 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
392 					     AARCH64_INSN_IMM_19);
393 			break;
394 		case R_AARCH64_ADR_PREL_LO21:
395 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
396 					     AARCH64_INSN_IMM_ADR);
397 			break;
398 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
399 			overflow_check = false;
400 			/* Fall through */
401 		case R_AARCH64_ADR_PREL_PG_HI21:
402 			ovf = reloc_insn_adrp(me, sechdrs, loc, val);
403 			if (ovf && ovf != -ERANGE)
404 				return ovf;
405 			break;
406 		case R_AARCH64_ADD_ABS_LO12_NC:
407 		case R_AARCH64_LDST8_ABS_LO12_NC:
408 			overflow_check = false;
409 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
410 					     AARCH64_INSN_IMM_12);
411 			break;
412 		case R_AARCH64_LDST16_ABS_LO12_NC:
413 			overflow_check = false;
414 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
415 					     AARCH64_INSN_IMM_12);
416 			break;
417 		case R_AARCH64_LDST32_ABS_LO12_NC:
418 			overflow_check = false;
419 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
420 					     AARCH64_INSN_IMM_12);
421 			break;
422 		case R_AARCH64_LDST64_ABS_LO12_NC:
423 			overflow_check = false;
424 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
425 					     AARCH64_INSN_IMM_12);
426 			break;
427 		case R_AARCH64_LDST128_ABS_LO12_NC:
428 			overflow_check = false;
429 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
430 					     AARCH64_INSN_IMM_12);
431 			break;
432 		case R_AARCH64_TSTBR14:
433 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
434 					     AARCH64_INSN_IMM_14);
435 			break;
436 		case R_AARCH64_CONDBR19:
437 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
438 					     AARCH64_INSN_IMM_19);
439 			break;
440 		case R_AARCH64_JUMP26:
441 		case R_AARCH64_CALL26:
442 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
443 					     AARCH64_INSN_IMM_26);
444 
445 			if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
446 			    ovf == -ERANGE) {
447 				val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
448 				if (!val)
449 					return -ENOEXEC;
450 				ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
451 						     26, AARCH64_INSN_IMM_26);
452 			}
453 			break;
454 
455 		default:
456 			pr_err("module %s: unsupported RELA relocation: %llu\n",
457 			       me->name, ELF64_R_TYPE(rel[i].r_info));
458 			return -ENOEXEC;
459 		}
460 
461 		if (overflow_check && ovf == -ERANGE)
462 			goto overflow;
463 
464 	}
465 
466 	return 0;
467 
468 overflow:
469 	pr_err("module %s: overflow in relocation type %d val %Lx\n",
470 	       me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
471 	return -ENOEXEC;
472 }
473 
474 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
475 				    const Elf_Shdr *sechdrs,
476 				    const char *name)
477 {
478 	const Elf_Shdr *s, *se;
479 	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
480 
481 	for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
482 		if (strcmp(name, secstrs + s->sh_name) == 0)
483 			return s;
484 	}
485 
486 	return NULL;
487 }
488 
489 static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
490 {
491 	*plt = get_plt_entry(addr, plt);
492 }
493 
494 static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
495 				  const Elf_Shdr *sechdrs,
496 				  struct module *mod)
497 {
498 #if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
499 	const Elf_Shdr *s;
500 	struct plt_entry *plts;
501 
502 	s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
503 	if (!s)
504 		return -ENOEXEC;
505 
506 	plts = (void *)s->sh_addr;
507 
508 	__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
509 
510 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
511 		__init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
512 
513 	mod->arch.ftrace_trampolines = plts;
514 #endif
515 	return 0;
516 }
517 
518 int module_finalize(const Elf_Ehdr *hdr,
519 		    const Elf_Shdr *sechdrs,
520 		    struct module *me)
521 {
522 	const Elf_Shdr *s;
523 	s = find_section(hdr, sechdrs, ".altinstructions");
524 	if (s)
525 		apply_alternatives_module((void *)s->sh_addr, s->sh_size);
526 
527 	return module_init_ftrace_plt(hdr, sechdrs, me);
528 }
529