1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright (C) 2019 Jason Yan <yanaijie@huawei.com>
4 
5 #include <linux/kernel.h>
6 #include <linux/errno.h>
7 #include <linux/string.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/stddef.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/libfdt.h>
16 #include <linux/crash_core.h>
17 #include <linux/of.h>
18 #include <linux/of_fdt.h>
19 #include <asm/cacheflush.h>
20 #include <asm/kdump.h>
21 #include <mm/mmu_decl.h>
22 #include <generated/compile.h>
23 #include <generated/utsrelease.h>
24 
25 struct regions {
26 	unsigned long pa_start;
27 	unsigned long pa_end;
28 	unsigned long kernel_size;
29 	unsigned long dtb_start;
30 	unsigned long dtb_end;
31 	unsigned long initrd_start;
32 	unsigned long initrd_end;
33 	unsigned long crash_start;
34 	unsigned long crash_end;
35 	int reserved_mem;
36 	int reserved_mem_addr_cells;
37 	int reserved_mem_size_cells;
38 };
39 
40 /* Simplified build-specific string for starting entropy. */
41 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
42 		LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
43 
44 struct regions __initdata regions;
45 
46 static __init void kaslr_get_cmdline(void *fdt)
47 {
48 	early_init_dt_scan_chosen(boot_command_line);
49 }
50 
51 static unsigned long __init rotate_xor(unsigned long hash, const void *area,
52 				       size_t size)
53 {
54 	size_t i;
55 	const unsigned long *ptr = area;
56 
57 	for (i = 0; i < size / sizeof(hash); i++) {
58 		/* Rotate by odd number of bits and XOR. */
59 		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
60 		hash ^= ptr[i];
61 	}
62 
63 	return hash;
64 }
65 
66 /* Attempt to create a simple starting entropy. This can make it defferent for
67  * every build but it is still not enough. Stronger entropy should
68  * be added to make it change for every boot.
69  */
70 static unsigned long __init get_boot_seed(void *fdt)
71 {
72 	unsigned long hash = 0;
73 
74 	hash = rotate_xor(hash, build_str, sizeof(build_str));
75 	hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
76 
77 	return hash;
78 }
79 
80 static __init u64 get_kaslr_seed(void *fdt)
81 {
82 	int node, len;
83 	fdt64_t *prop;
84 	u64 ret;
85 
86 	node = fdt_path_offset(fdt, "/chosen");
87 	if (node < 0)
88 		return 0;
89 
90 	prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
91 	if (!prop || len != sizeof(u64))
92 		return 0;
93 
94 	ret = fdt64_to_cpu(*prop);
95 	*prop = 0;
96 	return ret;
97 }
98 
99 static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2)
100 {
101 	return e1 >= s2 && e2 >= s1;
102 }
103 
104 static __init bool overlaps_reserved_region(const void *fdt, u32 start,
105 					    u32 end)
106 {
107 	int subnode, len, i;
108 	u64 base, size;
109 
110 	/* check for overlap with /memreserve/ entries */
111 	for (i = 0; i < fdt_num_mem_rsv(fdt); i++) {
112 		if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0)
113 			continue;
114 		if (regions_overlap(start, end, base, base + size))
115 			return true;
116 	}
117 
118 	if (regions.reserved_mem < 0)
119 		return false;
120 
121 	/* check for overlap with static reservations in /reserved-memory */
122 	for (subnode = fdt_first_subnode(fdt, regions.reserved_mem);
123 	     subnode >= 0;
124 	     subnode = fdt_next_subnode(fdt, subnode)) {
125 		const fdt32_t *reg;
126 		u64 rsv_end;
127 
128 		len = 0;
129 		reg = fdt_getprop(fdt, subnode, "reg", &len);
130 		while (len >= (regions.reserved_mem_addr_cells +
131 			       regions.reserved_mem_size_cells)) {
132 			base = fdt32_to_cpu(reg[0]);
133 			if (regions.reserved_mem_addr_cells == 2)
134 				base = (base << 32) | fdt32_to_cpu(reg[1]);
135 
136 			reg += regions.reserved_mem_addr_cells;
137 			len -= 4 * regions.reserved_mem_addr_cells;
138 
139 			size = fdt32_to_cpu(reg[0]);
140 			if (regions.reserved_mem_size_cells == 2)
141 				size = (size << 32) | fdt32_to_cpu(reg[1]);
142 
143 			reg += regions.reserved_mem_size_cells;
144 			len -= 4 * regions.reserved_mem_size_cells;
145 
146 			if (base >= regions.pa_end)
147 				continue;
148 
149 			rsv_end = min(base + size, (u64)U32_MAX);
150 
151 			if (regions_overlap(start, end, base, rsv_end))
152 				return true;
153 		}
154 	}
155 	return false;
156 }
157 
158 static __init bool overlaps_region(const void *fdt, u32 start,
159 				   u32 end)
160 {
161 	if (regions_overlap(start, end, __pa(_stext), __pa(_end)))
162 		return true;
163 
164 	if (regions_overlap(start, end, regions.dtb_start,
165 			    regions.dtb_end))
166 		return true;
167 
168 	if (regions_overlap(start, end, regions.initrd_start,
169 			    regions.initrd_end))
170 		return true;
171 
172 	if (regions_overlap(start, end, regions.crash_start,
173 			    regions.crash_end))
174 		return true;
175 
176 	return overlaps_reserved_region(fdt, start, end);
177 }
178 
179 static void __init get_crash_kernel(void *fdt, unsigned long size)
180 {
181 #ifdef CONFIG_CRASH_CORE
182 	unsigned long long crash_size, crash_base;
183 	int ret;
184 
185 	ret = parse_crashkernel(boot_command_line, size, &crash_size,
186 				&crash_base);
187 	if (ret != 0 || crash_size == 0)
188 		return;
189 	if (crash_base == 0)
190 		crash_base = KDUMP_KERNELBASE;
191 
192 	regions.crash_start = (unsigned long)crash_base;
193 	regions.crash_end = (unsigned long)(crash_base + crash_size);
194 
195 	pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size);
196 #endif
197 }
198 
199 static void __init get_initrd_range(void *fdt)
200 {
201 	u64 start, end;
202 	int node, len;
203 	const __be32 *prop;
204 
205 	node = fdt_path_offset(fdt, "/chosen");
206 	if (node < 0)
207 		return;
208 
209 	prop = fdt_getprop(fdt, node, "linux,initrd-start", &len);
210 	if (!prop)
211 		return;
212 	start = of_read_number(prop, len / 4);
213 
214 	prop = fdt_getprop(fdt, node, "linux,initrd-end", &len);
215 	if (!prop)
216 		return;
217 	end = of_read_number(prop, len / 4);
218 
219 	regions.initrd_start = (unsigned long)start;
220 	regions.initrd_end = (unsigned long)end;
221 
222 	pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n", start, end);
223 }
224 
225 static __init unsigned long get_usable_address(const void *fdt,
226 					       unsigned long start,
227 					       unsigned long offset)
228 {
229 	unsigned long pa;
230 	unsigned long pa_end;
231 
232 	for (pa = offset; (long)pa > (long)start; pa -= SZ_16K) {
233 		pa_end = pa + regions.kernel_size;
234 		if (overlaps_region(fdt, pa, pa_end))
235 			continue;
236 
237 		return pa;
238 	}
239 	return 0;
240 }
241 
242 static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
243 				  int *size_cells)
244 {
245 	const int *prop;
246 	int len;
247 
248 	/*
249 	 * Retrieve the #address-cells and #size-cells properties
250 	 * from the 'node', or use the default if not provided.
251 	 */
252 	*addr_cells = *size_cells = 1;
253 
254 	prop = fdt_getprop(fdt, node, "#address-cells", &len);
255 	if (len == 4)
256 		*addr_cells = fdt32_to_cpu(*prop);
257 	prop = fdt_getprop(fdt, node, "#size-cells", &len);
258 	if (len == 4)
259 		*size_cells = fdt32_to_cpu(*prop);
260 }
261 
262 static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index,
263 					       unsigned long offset)
264 {
265 	unsigned long koffset = 0;
266 	unsigned long start;
267 
268 	while ((long)index >= 0) {
269 		offset = memstart_addr + index * SZ_64M + offset;
270 		start = memstart_addr + index * SZ_64M;
271 		koffset = get_usable_address(dt_ptr, start, offset);
272 		if (koffset)
273 			break;
274 		index--;
275 	}
276 
277 	if (koffset != 0)
278 		koffset -= memstart_addr;
279 
280 	return koffset;
281 }
282 
283 static inline __init bool kaslr_disabled(void)
284 {
285 	return strstr(boot_command_line, "nokaslr") != NULL;
286 }
287 
288 static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
289 						  unsigned long kernel_sz)
290 {
291 	unsigned long offset, random;
292 	unsigned long ram, linear_sz;
293 	u64 seed;
294 	unsigned long index;
295 
296 	kaslr_get_cmdline(dt_ptr);
297 	if (kaslr_disabled())
298 		return 0;
299 
300 	random = get_boot_seed(dt_ptr);
301 
302 	seed = get_tb() << 32;
303 	seed ^= get_tb();
304 	random = rotate_xor(random, &seed, sizeof(seed));
305 
306 	/*
307 	 * Retrieve (and wipe) the seed from the FDT
308 	 */
309 	seed = get_kaslr_seed(dt_ptr);
310 	if (seed)
311 		random = rotate_xor(random, &seed, sizeof(seed));
312 	else
313 		pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
314 
315 	ram = min_t(phys_addr_t, __max_low_memory, size);
316 	ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
317 	linear_sz = min_t(unsigned long, ram, SZ_512M);
318 
319 	/* If the linear size is smaller than 64M, do not randomize */
320 	if (linear_sz < SZ_64M)
321 		return 0;
322 
323 	/* check for a reserved-memory node and record its cell sizes */
324 	regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory");
325 	if (regions.reserved_mem >= 0)
326 		get_cell_sizes(dt_ptr, regions.reserved_mem,
327 			       &regions.reserved_mem_addr_cells,
328 			       &regions.reserved_mem_size_cells);
329 
330 	regions.pa_start = memstart_addr;
331 	regions.pa_end = memstart_addr + linear_sz;
332 	regions.dtb_start = __pa(dt_ptr);
333 	regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
334 	regions.kernel_size = kernel_sz;
335 
336 	get_initrd_range(dt_ptr);
337 	get_crash_kernel(dt_ptr, ram);
338 
339 	/*
340 	 * Decide which 64M we want to start
341 	 * Only use the low 8 bits of the random seed
342 	 */
343 	index = random & 0xFF;
344 	index %= linear_sz / SZ_64M;
345 
346 	/* Decide offset inside 64M */
347 	offset = random % (SZ_64M - kernel_sz);
348 	offset = round_down(offset, SZ_16K);
349 
350 	return kaslr_legal_offset(dt_ptr, index, offset);
351 }
352 
353 /*
354  * To see if we need to relocate the kernel to a random offset
355  * void *dt_ptr - address of the device tree
356  * phys_addr_t size - size of the first memory block
357  */
358 notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
359 {
360 	unsigned long tlb_virt;
361 	phys_addr_t tlb_phys;
362 	unsigned long offset;
363 	unsigned long kernel_sz;
364 
365 	kernel_sz = (unsigned long)_end - (unsigned long)_stext;
366 
367 	offset = kaslr_choose_location(dt_ptr, size, kernel_sz);
368 	if (offset == 0)
369 		return;
370 
371 	kernstart_virt_addr += offset;
372 	kernstart_addr += offset;
373 
374 	is_second_reloc = 1;
375 
376 	if (offset >= SZ_64M) {
377 		tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
378 		tlb_phys = round_down(kernstart_addr, SZ_64M);
379 
380 		/* Create kernel map to relocate in */
381 		create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
382 	}
383 
384 	/* Copy the kernel to it's new location and run */
385 	memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
386 	flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);
387 
388 	reloc_kernel_entry(dt_ptr, kernstart_virt_addr);
389 }
390 
391 void __init kaslr_late_init(void)
392 {
393 	/* If randomized, clear the original kernel */
394 	if (kernstart_virt_addr != KERNELBASE) {
395 		unsigned long kernel_sz;
396 
397 		kernel_sz = (unsigned long)_end - kernstart_virt_addr;
398 		memzero_explicit((void *)KERNELBASE, kernel_sz);
399 	}
400 }
401