1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright (C) 2019 Jason Yan <yanaijie@huawei.com>
4 
5 #include <linux/kernel.h>
6 #include <linux/errno.h>
7 #include <linux/string.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/stddef.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/libfdt.h>
16 #include <linux/crash_core.h>
17 #include <asm/cacheflush.h>
18 #include <asm/pgalloc.h>
19 #include <asm/prom.h>
20 #include <asm/kdump.h>
21 #include <mm/mmu_decl.h>
22 #include <generated/compile.h>
23 #include <generated/utsrelease.h>
24 
25 struct regions {
26 	unsigned long pa_start;
27 	unsigned long pa_end;
28 	unsigned long kernel_size;
29 	unsigned long dtb_start;
30 	unsigned long dtb_end;
31 	unsigned long initrd_start;
32 	unsigned long initrd_end;
33 	unsigned long crash_start;
34 	unsigned long crash_end;
35 	int reserved_mem;
36 	int reserved_mem_addr_cells;
37 	int reserved_mem_size_cells;
38 };
39 
40 /* Simplified build-specific string for starting entropy. */
41 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
42 		LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
43 
44 struct regions __initdata regions;
45 
46 static __init void kaslr_get_cmdline(void *fdt)
47 {
48 	int node = fdt_path_offset(fdt, "/chosen");
49 
50 	early_init_dt_scan_chosen(node, "chosen", 1, boot_command_line);
51 }
52 
53 static unsigned long __init rotate_xor(unsigned long hash, const void *area,
54 				       size_t size)
55 {
56 	size_t i;
57 	const unsigned long *ptr = area;
58 
59 	for (i = 0; i < size / sizeof(hash); i++) {
60 		/* Rotate by odd number of bits and XOR. */
61 		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
62 		hash ^= ptr[i];
63 	}
64 
65 	return hash;
66 }
67 
68 /* Attempt to create a simple starting entropy. This can make it defferent for
69  * every build but it is still not enough. Stronger entropy should
70  * be added to make it change for every boot.
71  */
72 static unsigned long __init get_boot_seed(void *fdt)
73 {
74 	unsigned long hash = 0;
75 
76 	hash = rotate_xor(hash, build_str, sizeof(build_str));
77 	hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
78 
79 	return hash;
80 }
81 
82 static __init u64 get_kaslr_seed(void *fdt)
83 {
84 	int node, len;
85 	fdt64_t *prop;
86 	u64 ret;
87 
88 	node = fdt_path_offset(fdt, "/chosen");
89 	if (node < 0)
90 		return 0;
91 
92 	prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
93 	if (!prop || len != sizeof(u64))
94 		return 0;
95 
96 	ret = fdt64_to_cpu(*prop);
97 	*prop = 0;
98 	return ret;
99 }
100 
101 static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2)
102 {
103 	return e1 >= s2 && e2 >= s1;
104 }
105 
106 static __init bool overlaps_reserved_region(const void *fdt, u32 start,
107 					    u32 end)
108 {
109 	int subnode, len, i;
110 	u64 base, size;
111 
112 	/* check for overlap with /memreserve/ entries */
113 	for (i = 0; i < fdt_num_mem_rsv(fdt); i++) {
114 		if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0)
115 			continue;
116 		if (regions_overlap(start, end, base, base + size))
117 			return true;
118 	}
119 
120 	if (regions.reserved_mem < 0)
121 		return false;
122 
123 	/* check for overlap with static reservations in /reserved-memory */
124 	for (subnode = fdt_first_subnode(fdt, regions.reserved_mem);
125 	     subnode >= 0;
126 	     subnode = fdt_next_subnode(fdt, subnode)) {
127 		const fdt32_t *reg;
128 		u64 rsv_end;
129 
130 		len = 0;
131 		reg = fdt_getprop(fdt, subnode, "reg", &len);
132 		while (len >= (regions.reserved_mem_addr_cells +
133 			       regions.reserved_mem_size_cells)) {
134 			base = fdt32_to_cpu(reg[0]);
135 			if (regions.reserved_mem_addr_cells == 2)
136 				base = (base << 32) | fdt32_to_cpu(reg[1]);
137 
138 			reg += regions.reserved_mem_addr_cells;
139 			len -= 4 * regions.reserved_mem_addr_cells;
140 
141 			size = fdt32_to_cpu(reg[0]);
142 			if (regions.reserved_mem_size_cells == 2)
143 				size = (size << 32) | fdt32_to_cpu(reg[1]);
144 
145 			reg += regions.reserved_mem_size_cells;
146 			len -= 4 * regions.reserved_mem_size_cells;
147 
148 			if (base >= regions.pa_end)
149 				continue;
150 
151 			rsv_end = min(base + size, (u64)U32_MAX);
152 
153 			if (regions_overlap(start, end, base, rsv_end))
154 				return true;
155 		}
156 	}
157 	return false;
158 }
159 
160 static __init bool overlaps_region(const void *fdt, u32 start,
161 				   u32 end)
162 {
163 	if (regions_overlap(start, end, __pa(_stext), __pa(_end)))
164 		return true;
165 
166 	if (regions_overlap(start, end, regions.dtb_start,
167 			    regions.dtb_end))
168 		return true;
169 
170 	if (regions_overlap(start, end, regions.initrd_start,
171 			    regions.initrd_end))
172 		return true;
173 
174 	if (regions_overlap(start, end, regions.crash_start,
175 			    regions.crash_end))
176 		return true;
177 
178 	return overlaps_reserved_region(fdt, start, end);
179 }
180 
181 static void __init get_crash_kernel(void *fdt, unsigned long size)
182 {
183 #ifdef CONFIG_CRASH_CORE
184 	unsigned long long crash_size, crash_base;
185 	int ret;
186 
187 	ret = parse_crashkernel(boot_command_line, size, &crash_size,
188 				&crash_base);
189 	if (ret != 0 || crash_size == 0)
190 		return;
191 	if (crash_base == 0)
192 		crash_base = KDUMP_KERNELBASE;
193 
194 	regions.crash_start = (unsigned long)crash_base;
195 	regions.crash_end = (unsigned long)(crash_base + crash_size);
196 
197 	pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size);
198 #endif
199 }
200 
201 static void __init get_initrd_range(void *fdt)
202 {
203 	u64 start, end;
204 	int node, len;
205 	const __be32 *prop;
206 
207 	node = fdt_path_offset(fdt, "/chosen");
208 	if (node < 0)
209 		return;
210 
211 	prop = fdt_getprop(fdt, node, "linux,initrd-start", &len);
212 	if (!prop)
213 		return;
214 	start = of_read_number(prop, len / 4);
215 
216 	prop = fdt_getprop(fdt, node, "linux,initrd-end", &len);
217 	if (!prop)
218 		return;
219 	end = of_read_number(prop, len / 4);
220 
221 	regions.initrd_start = (unsigned long)start;
222 	regions.initrd_end = (unsigned long)end;
223 
224 	pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n", start, end);
225 }
226 
227 static __init unsigned long get_usable_address(const void *fdt,
228 					       unsigned long start,
229 					       unsigned long offset)
230 {
231 	unsigned long pa;
232 	unsigned long pa_end;
233 
234 	for (pa = offset; (long)pa > (long)start; pa -= SZ_16K) {
235 		pa_end = pa + regions.kernel_size;
236 		if (overlaps_region(fdt, pa, pa_end))
237 			continue;
238 
239 		return pa;
240 	}
241 	return 0;
242 }
243 
244 static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
245 				  int *size_cells)
246 {
247 	const int *prop;
248 	int len;
249 
250 	/*
251 	 * Retrieve the #address-cells and #size-cells properties
252 	 * from the 'node', or use the default if not provided.
253 	 */
254 	*addr_cells = *size_cells = 1;
255 
256 	prop = fdt_getprop(fdt, node, "#address-cells", &len);
257 	if (len == 4)
258 		*addr_cells = fdt32_to_cpu(*prop);
259 	prop = fdt_getprop(fdt, node, "#size-cells", &len);
260 	if (len == 4)
261 		*size_cells = fdt32_to_cpu(*prop);
262 }
263 
264 static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index,
265 					       unsigned long offset)
266 {
267 	unsigned long koffset = 0;
268 	unsigned long start;
269 
270 	while ((long)index >= 0) {
271 		offset = memstart_addr + index * SZ_64M + offset;
272 		start = memstart_addr + index * SZ_64M;
273 		koffset = get_usable_address(dt_ptr, start, offset);
274 		if (koffset)
275 			break;
276 		index--;
277 	}
278 
279 	if (koffset != 0)
280 		koffset -= memstart_addr;
281 
282 	return koffset;
283 }
284 
285 static inline __init bool kaslr_disabled(void)
286 {
287 	return strstr(boot_command_line, "nokaslr") != NULL;
288 }
289 
290 static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
291 						  unsigned long kernel_sz)
292 {
293 	unsigned long offset, random;
294 	unsigned long ram, linear_sz;
295 	u64 seed;
296 	unsigned long index;
297 
298 	kaslr_get_cmdline(dt_ptr);
299 	if (kaslr_disabled())
300 		return 0;
301 
302 	random = get_boot_seed(dt_ptr);
303 
304 	seed = get_tb() << 32;
305 	seed ^= get_tb();
306 	random = rotate_xor(random, &seed, sizeof(seed));
307 
308 	/*
309 	 * Retrieve (and wipe) the seed from the FDT
310 	 */
311 	seed = get_kaslr_seed(dt_ptr);
312 	if (seed)
313 		random = rotate_xor(random, &seed, sizeof(seed));
314 	else
315 		pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
316 
317 	ram = min_t(phys_addr_t, __max_low_memory, size);
318 	ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true);
319 	linear_sz = min_t(unsigned long, ram, SZ_512M);
320 
321 	/* If the linear size is smaller than 64M, do not randmize */
322 	if (linear_sz < SZ_64M)
323 		return 0;
324 
325 	/* check for a reserved-memory node and record its cell sizes */
326 	regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory");
327 	if (regions.reserved_mem >= 0)
328 		get_cell_sizes(dt_ptr, regions.reserved_mem,
329 			       &regions.reserved_mem_addr_cells,
330 			       &regions.reserved_mem_size_cells);
331 
332 	regions.pa_start = memstart_addr;
333 	regions.pa_end = memstart_addr + linear_sz;
334 	regions.dtb_start = __pa(dt_ptr);
335 	regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
336 	regions.kernel_size = kernel_sz;
337 
338 	get_initrd_range(dt_ptr);
339 	get_crash_kernel(dt_ptr, ram);
340 
341 	/*
342 	 * Decide which 64M we want to start
343 	 * Only use the low 8 bits of the random seed
344 	 */
345 	index = random & 0xFF;
346 	index %= linear_sz / SZ_64M;
347 
348 	/* Decide offset inside 64M */
349 	offset = random % (SZ_64M - kernel_sz);
350 	offset = round_down(offset, SZ_16K);
351 
352 	return kaslr_legal_offset(dt_ptr, index, offset);
353 }
354 
355 /*
356  * To see if we need to relocate the kernel to a random offset
357  * void *dt_ptr - address of the device tree
358  * phys_addr_t size - size of the first memory block
359  */
360 notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
361 {
362 	unsigned long tlb_virt;
363 	phys_addr_t tlb_phys;
364 	unsigned long offset;
365 	unsigned long kernel_sz;
366 
367 	kernel_sz = (unsigned long)_end - (unsigned long)_stext;
368 
369 	offset = kaslr_choose_location(dt_ptr, size, kernel_sz);
370 	if (offset == 0)
371 		return;
372 
373 	kernstart_virt_addr += offset;
374 	kernstart_addr += offset;
375 
376 	is_second_reloc = 1;
377 
378 	if (offset >= SZ_64M) {
379 		tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
380 		tlb_phys = round_down(kernstart_addr, SZ_64M);
381 
382 		/* Create kernel map to relocate in */
383 		create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
384 	}
385 
386 	/* Copy the kernel to it's new location and run */
387 	memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
388 	flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);
389 
390 	reloc_kernel_entry(dt_ptr, kernstart_virt_addr);
391 }
392 
393 void __init kaslr_late_init(void)
394 {
395 	/* If randomized, clear the original kernel */
396 	if (kernstart_virt_addr != KERNELBASE) {
397 		unsigned long kernel_sz;
398 
399 		kernel_sz = (unsigned long)_end - kernstart_virt_addr;
400 		memzero_explicit((void *)KERNELBASE, kernel_sz);
401 	}
402 }
403