xref: /openbmc/linux/arch/arm64/kernel/kaslr.c (revision fc5a89f7)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f80fb3a3SArd Biesheuvel /*
3f80fb3a3SArd Biesheuvel  * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
4f80fb3a3SArd Biesheuvel  */
5f80fb3a3SArd Biesheuvel 
65a9e3e15SJisheng Zhang #include <linux/cache.h>
7f80fb3a3SArd Biesheuvel #include <linux/crc32.h>
8f80fb3a3SArd Biesheuvel #include <linux/init.h>
9f80fb3a3SArd Biesheuvel #include <linux/libfdt.h>
10f80fb3a3SArd Biesheuvel #include <linux/mm_types.h>
11f80fb3a3SArd Biesheuvel #include <linux/sched.h>
12f80fb3a3SArd Biesheuvel #include <linux/types.h>
1365fddcfcSMike Rapoport #include <linux/pgtable.h>
1458552408SLinus Torvalds #include <linux/random.h>
15f80fb3a3SArd Biesheuvel 
16f80fb3a3SArd Biesheuvel #include <asm/fixmap.h>
17f80fb3a3SArd Biesheuvel #include <asm/kernel-pgtable.h>
18f80fb3a3SArd Biesheuvel #include <asm/memory.h>
19f80fb3a3SArd Biesheuvel #include <asm/mmu.h>
20f80fb3a3SArd Biesheuvel #include <asm/sections.h>
21f6f0c436SMarc Zyngier #include <asm/setup.h>
22f80fb3a3SArd Biesheuvel 
235a9e3e15SJisheng Zhang u64 __ro_after_init module_alloc_base;
24c031a421SArd Biesheuvel u16 __initdata memstart_offset_seed;
25f80fb3a3SArd Biesheuvel 
26f80fb3a3SArd Biesheuvel static __init u64 get_kaslr_seed(void *fdt)
27f80fb3a3SArd Biesheuvel {
28f80fb3a3SArd Biesheuvel 	int node, len;
2967831edfSLuc Van Oostenryck 	fdt64_t *prop;
30f80fb3a3SArd Biesheuvel 	u64 ret;
31f80fb3a3SArd Biesheuvel 
32f80fb3a3SArd Biesheuvel 	node = fdt_path_offset(fdt, "/chosen");
33f80fb3a3SArd Biesheuvel 	if (node < 0)
34f80fb3a3SArd Biesheuvel 		return 0;
35f80fb3a3SArd Biesheuvel 
36f80fb3a3SArd Biesheuvel 	prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
37f80fb3a3SArd Biesheuvel 	if (!prop || len != sizeof(u64))
38f80fb3a3SArd Biesheuvel 		return 0;
39f80fb3a3SArd Biesheuvel 
40f80fb3a3SArd Biesheuvel 	ret = fdt64_to_cpu(*prop);
41f80fb3a3SArd Biesheuvel 	*prop = 0;
42f80fb3a3SArd Biesheuvel 	return ret;
43f80fb3a3SArd Biesheuvel }
44f80fb3a3SArd Biesheuvel 
45a762f4ffSMarc Zyngier struct arm64_ftr_override kaslr_feature_override __initdata;
46f80fb3a3SArd Biesheuvel 
47f80fb3a3SArd Biesheuvel /*
48f80fb3a3SArd Biesheuvel  * This routine will be executed with the kernel mapped at its default virtual
49f80fb3a3SArd Biesheuvel  * address, and if it returns successfully, the kernel will be remapped, and
50f80fb3a3SArd Biesheuvel  * start_kernel() will be executed from a randomized virtual offset. The
51f80fb3a3SArd Biesheuvel  * relocation will result in all absolute references (e.g., static variables
52f80fb3a3SArd Biesheuvel  * containing function pointers) to be reinitialized, and zero-initialized
53f80fb3a3SArd Biesheuvel  * .bss variables will be reset to 0.
54f80fb3a3SArd Biesheuvel  */
55f6f0c436SMarc Zyngier u64 __init kaslr_early_init(void)
56f80fb3a3SArd Biesheuvel {
57f80fb3a3SArd Biesheuvel 	void *fdt;
58*fc5a89f7SArd Biesheuvel 	u64 seed, offset, mask;
599bceb80bSGuenter Roeck 	unsigned long raw;
60f80fb3a3SArd Biesheuvel 
61f80fb3a3SArd Biesheuvel 	/*
62f80fb3a3SArd Biesheuvel 	 * Try to map the FDT early. If this fails, we simply bail,
63f80fb3a3SArd Biesheuvel 	 * and proceed with KASLR disabled. We will make another
64f80fb3a3SArd Biesheuvel 	 * attempt at mapping the FDT in setup_machine()
65f80fb3a3SArd Biesheuvel 	 */
66f6f0c436SMarc Zyngier 	fdt = get_early_fdt_ptr();
67294a9dddSMark Brown 	if (!fdt) {
68f80fb3a3SArd Biesheuvel 		return 0;
69294a9dddSMark Brown 	}
70f80fb3a3SArd Biesheuvel 
71f80fb3a3SArd Biesheuvel 	/*
72f80fb3a3SArd Biesheuvel 	 * Retrieve (and wipe) the seed from the FDT
73f80fb3a3SArd Biesheuvel 	 */
74f80fb3a3SArd Biesheuvel 	seed = get_kaslr_seed(fdt);
75f80fb3a3SArd Biesheuvel 
76f80fb3a3SArd Biesheuvel 	/*
77f80fb3a3SArd Biesheuvel 	 * Check if 'nokaslr' appears on the command line, and
78f80fb3a3SArd Biesheuvel 	 * return 0 if that is the case.
79f80fb3a3SArd Biesheuvel 	 */
80a762f4ffSMarc Zyngier 	if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) {
81f80fb3a3SArd Biesheuvel 		return 0;
82294a9dddSMark Brown 	}
83f80fb3a3SArd Biesheuvel 
842e8e1ea8SMark Brown 	/*
859bceb80bSGuenter Roeck 	 * Mix in any entropy obtainable architecturally if enabled
869bceb80bSGuenter Roeck 	 * and supported.
872e8e1ea8SMark Brown 	 */
882e8e1ea8SMark Brown 
899bceb80bSGuenter Roeck 	if (arch_get_random_seed_long_early(&raw))
902e8e1ea8SMark Brown 		seed ^= raw;
912e8e1ea8SMark Brown 
922203e1adSMark Brown 	if (!seed) {
932203e1adSMark Brown 		return 0;
942203e1adSMark Brown 	}
952203e1adSMark Brown 
96f80fb3a3SArd Biesheuvel 	/*
97f80fb3a3SArd Biesheuvel 	 * OK, so we are proceeding with KASLR enabled. Calculate a suitable
98f80fb3a3SArd Biesheuvel 	 * kernel image offset from the seed. Let's place the kernel in the
9990ec95cdSSteve Capper 	 * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
100f2b9ba87SArd Biesheuvel 	 * the lower and upper quarters to avoid colliding with other
101f2b9ba87SArd Biesheuvel 	 * allocations.
102f80fb3a3SArd Biesheuvel 	 * Even if we could randomize at page granularity for 16k and 64k pages,
103f80fb3a3SArd Biesheuvel 	 * let's always round to 2 MB so we don't interfere with the ability to
104f80fb3a3SArd Biesheuvel 	 * map using contiguous PTEs
105f80fb3a3SArd Biesheuvel 	 */
10690ec95cdSSteve Capper 	mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
10790ec95cdSSteve Capper 	offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
108f80fb3a3SArd Biesheuvel 
109c031a421SArd Biesheuvel 	/* use the top 16 bits to randomize the linear region */
110c031a421SArd Biesheuvel 	memstart_offset_seed = seed >> 48;
111c031a421SArd Biesheuvel 
112*fc5a89f7SArd Biesheuvel 	return offset;
113*fc5a89f7SArd Biesheuvel }
114*fc5a89f7SArd Biesheuvel 
115*fc5a89f7SArd Biesheuvel static int __init kaslr_init(void)
116*fc5a89f7SArd Biesheuvel {
117*fc5a89f7SArd Biesheuvel 	u64 module_range;
118*fc5a89f7SArd Biesheuvel 	u32 seed;
119*fc5a89f7SArd Biesheuvel 
120f80fb3a3SArd Biesheuvel 	/*
121*fc5a89f7SArd Biesheuvel 	 * Set a reasonable default for module_alloc_base in case
122*fc5a89f7SArd Biesheuvel 	 * we end up running with module randomization disabled.
123f80fb3a3SArd Biesheuvel 	 */
124*fc5a89f7SArd Biesheuvel 	module_alloc_base = (u64)_etext - MODULES_VSIZE;
125*fc5a89f7SArd Biesheuvel 
126*fc5a89f7SArd Biesheuvel 	if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) {
127*fc5a89f7SArd Biesheuvel 		pr_info("KASLR disabled on command line\n");
128*fc5a89f7SArd Biesheuvel 		return 0;
129*fc5a89f7SArd Biesheuvel 	}
130*fc5a89f7SArd Biesheuvel 
131*fc5a89f7SArd Biesheuvel 	if (!kaslr_offset()) {
132*fc5a89f7SArd Biesheuvel 		pr_warn("KASLR disabled due to lack of seed\n");
133*fc5a89f7SArd Biesheuvel 		return 0;
134*fc5a89f7SArd Biesheuvel 	}
135*fc5a89f7SArd Biesheuvel 
136*fc5a89f7SArd Biesheuvel 	pr_info("KASLR enabled\n");
137*fc5a89f7SArd Biesheuvel 
138*fc5a89f7SArd Biesheuvel 	/*
139*fc5a89f7SArd Biesheuvel 	 * KASAN without KASAN_VMALLOC does not expect the module region to
140*fc5a89f7SArd Biesheuvel 	 * intersect the vmalloc region, since shadow memory is allocated for
141*fc5a89f7SArd Biesheuvel 	 * each module at load time, whereas the vmalloc region will already be
142*fc5a89f7SArd Biesheuvel 	 * shadowed by KASAN zero pages.
143*fc5a89f7SArd Biesheuvel 	 */
144*fc5a89f7SArd Biesheuvel 	BUILD_BUG_ON((IS_ENABLED(CONFIG_KASAN_GENERIC) ||
145*fc5a89f7SArd Biesheuvel 	              IS_ENABLED(CONFIG_KASAN_SW_TAGS)) &&
146*fc5a89f7SArd Biesheuvel 		     !IS_ENABLED(CONFIG_KASAN_VMALLOC));
147*fc5a89f7SArd Biesheuvel 
148*fc5a89f7SArd Biesheuvel 	seed = get_random_u32();
149f80fb3a3SArd Biesheuvel 
150f80fb3a3SArd Biesheuvel 	if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
151f80fb3a3SArd Biesheuvel 		/*
152b2eed9b5SArd Biesheuvel 		 * Randomize the module region over a 2 GB window covering the
153f2b9ba87SArd Biesheuvel 		 * kernel. This reduces the risk of modules leaking information
154f80fb3a3SArd Biesheuvel 		 * about the address of the kernel itself, but results in
155f80fb3a3SArd Biesheuvel 		 * branches between modules and the core kernel that are
156f80fb3a3SArd Biesheuvel 		 * resolved via PLTs. (Branches between modules will be
157f80fb3a3SArd Biesheuvel 		 * resolved normally.)
158f80fb3a3SArd Biesheuvel 		 */
159b2eed9b5SArd Biesheuvel 		module_range = SZ_2G - (u64)(_end - _stext);
160*fc5a89f7SArd Biesheuvel 		module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR);
161f80fb3a3SArd Biesheuvel 	} else {
162f80fb3a3SArd Biesheuvel 		/*
163f80fb3a3SArd Biesheuvel 		 * Randomize the module region by setting module_alloc_base to
164f80fb3a3SArd Biesheuvel 		 * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
165f80fb3a3SArd Biesheuvel 		 * _stext) . This guarantees that the resulting region still
166f80fb3a3SArd Biesheuvel 		 * covers [_stext, _etext], and that all relative branches can
167f9c4ff2aSBarry Song 		 * be resolved without veneers unless this region is exhausted
168f9c4ff2aSBarry Song 		 * and we fall back to a larger 2GB window in module_alloc()
169f9c4ff2aSBarry Song 		 * when ARM64_MODULE_PLTS is enabled.
170f80fb3a3SArd Biesheuvel 		 */
171f80fb3a3SArd Biesheuvel 		module_range = MODULES_VSIZE - (u64)(_etext - _stext);
172f80fb3a3SArd Biesheuvel 	}
173f80fb3a3SArd Biesheuvel 
174f80fb3a3SArd Biesheuvel 	/* use the lower 21 bits to randomize the base of the module region */
175f80fb3a3SArd Biesheuvel 	module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
176f80fb3a3SArd Biesheuvel 	module_alloc_base &= PAGE_MASK;
177f80fb3a3SArd Biesheuvel 
178294a9dddSMark Brown 	return 0;
179294a9dddSMark Brown }
180*fc5a89f7SArd Biesheuvel subsys_initcall(kaslr_init)
181