xref: /openbmc/linux/arch/arm64/kernel/module.c (revision a9ca9f9c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AArch64 loadable module support.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #define pr_fmt(fmt) "Modules: " fmt
11 
12 #include <linux/bitops.h>
13 #include <linux/elf.h>
14 #include <linux/ftrace.h>
15 #include <linux/gfp.h>
16 #include <linux/kasan.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/moduleloader.h>
20 #include <linux/random.h>
21 #include <linux/scs.h>
22 #include <linux/vmalloc.h>
23 
24 #include <asm/alternative.h>
25 #include <asm/insn.h>
26 #include <asm/scs.h>
27 #include <asm/sections.h>
28 
29 static u64 module_direct_base __ro_after_init = 0;
30 static u64 module_plt_base __ro_after_init = 0;
31 
32 /*
33  * Choose a random page-aligned base address for a window of 'size' bytes which
34  * entirely contains the interval [start, end - 1].
35  */
36 static u64 __init random_bounding_box(u64 size, u64 start, u64 end)
37 {
38 	u64 max_pgoff, pgoff;
39 
40 	if ((end - start) >= size)
41 		return 0;
42 
43 	max_pgoff = (size - (end - start)) / PAGE_SIZE;
44 	pgoff = get_random_u32_inclusive(0, max_pgoff);
45 
46 	return start - pgoff * PAGE_SIZE;
47 }
48 
49 /*
50  * Modules may directly reference data and text anywhere within the kernel
51  * image and other modules. References using PREL32 relocations have a +/-2G
52  * range, and so we need to ensure that the entire kernel image and all modules
53  * fall within a 2G window such that these are always within range.
54  *
55  * Modules may directly branch to functions and code within the kernel text,
56  * and to functions and code within other modules. These branches will use
57  * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
58  * that the entire kernel text and all module text falls within a 128M window
59  * such that these are always within range. With PLTs, we can expand this to a
60  * 2G window.
61  *
62  * We chose the 128M region to surround the entire kernel image (rather than
63  * just the text) as using the same bounds for the 128M and 2G regions ensures
64  * by construction that we never select a 128M region that is not a subset of
65  * the 2G region. For very large and unusual kernel configurations this means
66  * we may fall back to PLTs where they could have been avoided, but this keeps
67  * the logic significantly simpler.
68  */
69 static int __init module_init_limits(void)
70 {
71 	u64 kernel_end = (u64)_end;
72 	u64 kernel_start = (u64)_text;
73 	u64 kernel_size = kernel_end - kernel_start;
74 
75 	/*
76 	 * The default modules region is placed immediately below the kernel
77 	 * image, and is large enough to use the full 2G relocation range.
78 	 */
79 	BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END);
80 	BUILD_BUG_ON(MODULES_VSIZE < SZ_2G);
81 
82 	if (!kaslr_enabled()) {
83 		if (kernel_size < SZ_128M)
84 			module_direct_base = kernel_end - SZ_128M;
85 		if (kernel_size < SZ_2G)
86 			module_plt_base = kernel_end - SZ_2G;
87 	} else {
88 		u64 min = kernel_start;
89 		u64 max = kernel_end;
90 
91 		if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
92 			pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n");
93 		} else {
94 			module_direct_base = random_bounding_box(SZ_128M, min, max);
95 			if (module_direct_base) {
96 				min = module_direct_base;
97 				max = module_direct_base + SZ_128M;
98 			}
99 		}
100 
101 		module_plt_base = random_bounding_box(SZ_2G, min, max);
102 	}
103 
104 	pr_info("%llu pages in range for non-PLT usage",
105 		module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0);
106 	pr_info("%llu pages in range for PLT usage",
107 		module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0);
108 
109 	return 0;
110 }
111 subsys_initcall(module_init_limits);
112 
113 void *module_alloc(unsigned long size)
114 {
115 	void *p = NULL;
116 
117 	/*
118 	 * Where possible, prefer to allocate within direct branch range of the
119 	 * kernel such that no PLTs are necessary.
120 	 */
121 	if (module_direct_base) {
122 		p = __vmalloc_node_range(size, MODULE_ALIGN,
123 					 module_direct_base,
124 					 module_direct_base + SZ_128M,
125 					 GFP_KERNEL | __GFP_NOWARN,
126 					 PAGE_KERNEL, 0, NUMA_NO_NODE,
127 					 __builtin_return_address(0));
128 	}
129 
130 	if (!p && module_plt_base) {
131 		p = __vmalloc_node_range(size, MODULE_ALIGN,
132 					 module_plt_base,
133 					 module_plt_base + SZ_2G,
134 					 GFP_KERNEL | __GFP_NOWARN,
135 					 PAGE_KERNEL, 0, NUMA_NO_NODE,
136 					 __builtin_return_address(0));
137 	}
138 
139 	if (!p) {
140 		pr_warn_ratelimited("%s: unable to allocate memory\n",
141 				    __func__);
142 	}
143 
144 	if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
145 		vfree(p);
146 		return NULL;
147 	}
148 
149 	/* Memory is intended to be executable, reset the pointer tag. */
150 	return kasan_reset_tag(p);
151 }
152 
153 enum aarch64_reloc_op {
154 	RELOC_OP_NONE,
155 	RELOC_OP_ABS,
156 	RELOC_OP_PREL,
157 	RELOC_OP_PAGE,
158 };
159 
160 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
161 {
162 	switch (reloc_op) {
163 	case RELOC_OP_ABS:
164 		return val;
165 	case RELOC_OP_PREL:
166 		return val - (u64)place;
167 	case RELOC_OP_PAGE:
168 		return (val & ~0xfff) - ((u64)place & ~0xfff);
169 	case RELOC_OP_NONE:
170 		return 0;
171 	}
172 
173 	pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
174 	return 0;
175 }
176 
177 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
178 {
179 	s64 sval = do_reloc(op, place, val);
180 
181 	/*
182 	 * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
183 	 * relative and absolute relocations as having a range of [-2^15, 2^16)
184 	 * or [-2^31, 2^32), respectively. However, in order to be able to
185 	 * detect overflows reliably, we have to choose whether we interpret
186 	 * such quantities as signed or as unsigned, and stick with it.
187 	 * The way we organize our address space requires a signed
188 	 * interpretation of 32-bit relative references, so let's use that
189 	 * for all R_AARCH64_PRELxx relocations. This means our upper
190 	 * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
191 	 */
192 
193 	switch (len) {
194 	case 16:
195 		*(s16 *)place = sval;
196 		switch (op) {
197 		case RELOC_OP_ABS:
198 			if (sval < 0 || sval > U16_MAX)
199 				return -ERANGE;
200 			break;
201 		case RELOC_OP_PREL:
202 			if (sval < S16_MIN || sval > S16_MAX)
203 				return -ERANGE;
204 			break;
205 		default:
206 			pr_err("Invalid 16-bit data relocation (%d)\n", op);
207 			return 0;
208 		}
209 		break;
210 	case 32:
211 		*(s32 *)place = sval;
212 		switch (op) {
213 		case RELOC_OP_ABS:
214 			if (sval < 0 || sval > U32_MAX)
215 				return -ERANGE;
216 			break;
217 		case RELOC_OP_PREL:
218 			if (sval < S32_MIN || sval > S32_MAX)
219 				return -ERANGE;
220 			break;
221 		default:
222 			pr_err("Invalid 32-bit data relocation (%d)\n", op);
223 			return 0;
224 		}
225 		break;
226 	case 64:
227 		*(s64 *)place = sval;
228 		break;
229 	default:
230 		pr_err("Invalid length (%d) for data relocation\n", len);
231 		return 0;
232 	}
233 	return 0;
234 }
235 
236 enum aarch64_insn_movw_imm_type {
237 	AARCH64_INSN_IMM_MOVNZ,
238 	AARCH64_INSN_IMM_MOVKZ,
239 };
240 
241 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
242 			   int lsb, enum aarch64_insn_movw_imm_type imm_type)
243 {
244 	u64 imm;
245 	s64 sval;
246 	u32 insn = le32_to_cpu(*place);
247 
248 	sval = do_reloc(op, place, val);
249 	imm = sval >> lsb;
250 
251 	if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
252 		/*
253 		 * For signed MOVW relocations, we have to manipulate the
254 		 * instruction encoding depending on whether or not the
255 		 * immediate is less than zero.
256 		 */
257 		insn &= ~(3 << 29);
258 		if (sval >= 0) {
259 			/* >=0: Set the instruction to MOVZ (opcode 10b). */
260 			insn |= 2 << 29;
261 		} else {
262 			/*
263 			 * <0: Set the instruction to MOVN (opcode 00b).
264 			 *     Since we've masked the opcode already, we
265 			 *     don't need to do anything other than
266 			 *     inverting the new immediate field.
267 			 */
268 			imm = ~imm;
269 		}
270 	}
271 
272 	/* Update the instruction with the new encoding. */
273 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
274 	*place = cpu_to_le32(insn);
275 
276 	if (imm > U16_MAX)
277 		return -ERANGE;
278 
279 	return 0;
280 }
281 
282 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
283 			  int lsb, int len, enum aarch64_insn_imm_type imm_type)
284 {
285 	u64 imm, imm_mask;
286 	s64 sval;
287 	u32 insn = le32_to_cpu(*place);
288 
289 	/* Calculate the relocation value. */
290 	sval = do_reloc(op, place, val);
291 	sval >>= lsb;
292 
293 	/* Extract the value bits and shift them to bit 0. */
294 	imm_mask = (BIT(lsb + len) - 1) >> lsb;
295 	imm = sval & imm_mask;
296 
297 	/* Update the instruction's immediate field. */
298 	insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
299 	*place = cpu_to_le32(insn);
300 
301 	/*
302 	 * Extract the upper value bits (including the sign bit) and
303 	 * shift them to bit 0.
304 	 */
305 	sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
306 
307 	/*
308 	 * Overflow has occurred if the upper bits are not all equal to
309 	 * the sign bit of the value.
310 	 */
311 	if ((u64)(sval + 1) >= 2)
312 		return -ERANGE;
313 
314 	return 0;
315 }
316 
317 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
318 			   __le32 *place, u64 val)
319 {
320 	u32 insn;
321 
322 	if (!is_forbidden_offset_for_adrp(place))
323 		return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
324 				      AARCH64_INSN_IMM_ADR);
325 
326 	/* patch ADRP to ADR if it is in range */
327 	if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
328 			    AARCH64_INSN_IMM_ADR)) {
329 		insn = le32_to_cpu(*place);
330 		insn &= ~BIT(31);
331 	} else {
332 		/* out of range for ADR -> emit a veneer */
333 		val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
334 		if (!val)
335 			return -ENOEXEC;
336 		insn = aarch64_insn_gen_branch_imm((u64)place, val,
337 						   AARCH64_INSN_BRANCH_NOLINK);
338 	}
339 
340 	*place = cpu_to_le32(insn);
341 	return 0;
342 }
343 
344 int apply_relocate_add(Elf64_Shdr *sechdrs,
345 		       const char *strtab,
346 		       unsigned int symindex,
347 		       unsigned int relsec,
348 		       struct module *me)
349 {
350 	unsigned int i;
351 	int ovf;
352 	bool overflow_check;
353 	Elf64_Sym *sym;
354 	void *loc;
355 	u64 val;
356 	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
357 
358 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
359 		/* loc corresponds to P in the AArch64 ELF document. */
360 		loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
361 			+ rel[i].r_offset;
362 
363 		/* sym is the ELF symbol we're referring to. */
364 		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
365 			+ ELF64_R_SYM(rel[i].r_info);
366 
367 		/* val corresponds to (S + A) in the AArch64 ELF document. */
368 		val = sym->st_value + rel[i].r_addend;
369 
370 		/* Check for overflow by default. */
371 		overflow_check = true;
372 
373 		/* Perform the static relocation. */
374 		switch (ELF64_R_TYPE(rel[i].r_info)) {
375 		/* Null relocations. */
376 		case R_ARM_NONE:
377 		case R_AARCH64_NONE:
378 			ovf = 0;
379 			break;
380 
381 		/* Data relocations. */
382 		case R_AARCH64_ABS64:
383 			overflow_check = false;
384 			ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
385 			break;
386 		case R_AARCH64_ABS32:
387 			ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
388 			break;
389 		case R_AARCH64_ABS16:
390 			ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
391 			break;
392 		case R_AARCH64_PREL64:
393 			overflow_check = false;
394 			ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
395 			break;
396 		case R_AARCH64_PREL32:
397 			ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
398 			break;
399 		case R_AARCH64_PREL16:
400 			ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
401 			break;
402 
403 		/* MOVW instruction relocations. */
404 		case R_AARCH64_MOVW_UABS_G0_NC:
405 			overflow_check = false;
406 			fallthrough;
407 		case R_AARCH64_MOVW_UABS_G0:
408 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
409 					      AARCH64_INSN_IMM_MOVKZ);
410 			break;
411 		case R_AARCH64_MOVW_UABS_G1_NC:
412 			overflow_check = false;
413 			fallthrough;
414 		case R_AARCH64_MOVW_UABS_G1:
415 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
416 					      AARCH64_INSN_IMM_MOVKZ);
417 			break;
418 		case R_AARCH64_MOVW_UABS_G2_NC:
419 			overflow_check = false;
420 			fallthrough;
421 		case R_AARCH64_MOVW_UABS_G2:
422 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
423 					      AARCH64_INSN_IMM_MOVKZ);
424 			break;
425 		case R_AARCH64_MOVW_UABS_G3:
426 			/* We're using the top bits so we can't overflow. */
427 			overflow_check = false;
428 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
429 					      AARCH64_INSN_IMM_MOVKZ);
430 			break;
431 		case R_AARCH64_MOVW_SABS_G0:
432 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
433 					      AARCH64_INSN_IMM_MOVNZ);
434 			break;
435 		case R_AARCH64_MOVW_SABS_G1:
436 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
437 					      AARCH64_INSN_IMM_MOVNZ);
438 			break;
439 		case R_AARCH64_MOVW_SABS_G2:
440 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
441 					      AARCH64_INSN_IMM_MOVNZ);
442 			break;
443 		case R_AARCH64_MOVW_PREL_G0_NC:
444 			overflow_check = false;
445 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
446 					      AARCH64_INSN_IMM_MOVKZ);
447 			break;
448 		case R_AARCH64_MOVW_PREL_G0:
449 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
450 					      AARCH64_INSN_IMM_MOVNZ);
451 			break;
452 		case R_AARCH64_MOVW_PREL_G1_NC:
453 			overflow_check = false;
454 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
455 					      AARCH64_INSN_IMM_MOVKZ);
456 			break;
457 		case R_AARCH64_MOVW_PREL_G1:
458 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
459 					      AARCH64_INSN_IMM_MOVNZ);
460 			break;
461 		case R_AARCH64_MOVW_PREL_G2_NC:
462 			overflow_check = false;
463 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
464 					      AARCH64_INSN_IMM_MOVKZ);
465 			break;
466 		case R_AARCH64_MOVW_PREL_G2:
467 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
468 					      AARCH64_INSN_IMM_MOVNZ);
469 			break;
470 		case R_AARCH64_MOVW_PREL_G3:
471 			/* We're using the top bits so we can't overflow. */
472 			overflow_check = false;
473 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
474 					      AARCH64_INSN_IMM_MOVNZ);
475 			break;
476 
477 		/* Immediate instruction relocations. */
478 		case R_AARCH64_LD_PREL_LO19:
479 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
480 					     AARCH64_INSN_IMM_19);
481 			break;
482 		case R_AARCH64_ADR_PREL_LO21:
483 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
484 					     AARCH64_INSN_IMM_ADR);
485 			break;
486 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
487 			overflow_check = false;
488 			fallthrough;
489 		case R_AARCH64_ADR_PREL_PG_HI21:
490 			ovf = reloc_insn_adrp(me, sechdrs, loc, val);
491 			if (ovf && ovf != -ERANGE)
492 				return ovf;
493 			break;
494 		case R_AARCH64_ADD_ABS_LO12_NC:
495 		case R_AARCH64_LDST8_ABS_LO12_NC:
496 			overflow_check = false;
497 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
498 					     AARCH64_INSN_IMM_12);
499 			break;
500 		case R_AARCH64_LDST16_ABS_LO12_NC:
501 			overflow_check = false;
502 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
503 					     AARCH64_INSN_IMM_12);
504 			break;
505 		case R_AARCH64_LDST32_ABS_LO12_NC:
506 			overflow_check = false;
507 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
508 					     AARCH64_INSN_IMM_12);
509 			break;
510 		case R_AARCH64_LDST64_ABS_LO12_NC:
511 			overflow_check = false;
512 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
513 					     AARCH64_INSN_IMM_12);
514 			break;
515 		case R_AARCH64_LDST128_ABS_LO12_NC:
516 			overflow_check = false;
517 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
518 					     AARCH64_INSN_IMM_12);
519 			break;
520 		case R_AARCH64_TSTBR14:
521 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
522 					     AARCH64_INSN_IMM_14);
523 			break;
524 		case R_AARCH64_CONDBR19:
525 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
526 					     AARCH64_INSN_IMM_19);
527 			break;
528 		case R_AARCH64_JUMP26:
529 		case R_AARCH64_CALL26:
530 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
531 					     AARCH64_INSN_IMM_26);
532 			if (ovf == -ERANGE) {
533 				val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
534 				if (!val)
535 					return -ENOEXEC;
536 				ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
537 						     26, AARCH64_INSN_IMM_26);
538 			}
539 			break;
540 
541 		default:
542 			pr_err("module %s: unsupported RELA relocation: %llu\n",
543 			       me->name, ELF64_R_TYPE(rel[i].r_info));
544 			return -ENOEXEC;
545 		}
546 
547 		if (overflow_check && ovf == -ERANGE)
548 			goto overflow;
549 
550 	}
551 
552 	return 0;
553 
554 overflow:
555 	pr_err("module %s: overflow in relocation type %d val %Lx\n",
556 	       me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
557 	return -ENOEXEC;
558 }
559 
560 static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
561 {
562 	*plt = get_plt_entry(addr, plt);
563 }
564 
565 static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
566 				  const Elf_Shdr *sechdrs,
567 				  struct module *mod)
568 {
569 #if defined(CONFIG_DYNAMIC_FTRACE)
570 	const Elf_Shdr *s;
571 	struct plt_entry *plts;
572 
573 	s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
574 	if (!s)
575 		return -ENOEXEC;
576 
577 	plts = (void *)s->sh_addr;
578 
579 	__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
580 
581 	mod->arch.ftrace_trampolines = plts;
582 #endif
583 	return 0;
584 }
585 
586 int module_finalize(const Elf_Ehdr *hdr,
587 		    const Elf_Shdr *sechdrs,
588 		    struct module *me)
589 {
590 	const Elf_Shdr *s;
591 	s = find_section(hdr, sechdrs, ".altinstructions");
592 	if (s)
593 		apply_alternatives_module((void *)s->sh_addr, s->sh_size);
594 
595 	if (scs_is_dynamic()) {
596 		s = find_section(hdr, sechdrs, ".init.eh_frame");
597 		if (s)
598 			scs_patch((void *)s->sh_addr, s->sh_size);
599 	}
600 
601 	return module_init_ftrace_plt(hdr, sechdrs, me);
602 }
603