xref: /openbmc/linux/arch/x86/boot/compressed/kaslr.c (revision 4494ce4f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * kaslr.c
4  *
5  * This contains the routines needed to generate a reasonable level of
6  * entropy to choose a randomized kernel base address offset in support
7  * of Kernel Address Space Layout Randomization (KASLR). Additionally
8  * handles walking the physical memory maps (and tracking memory regions
9  * to avoid) in order to select a physical memory location that can
10  * contain the entire properly aligned running kernel image.
11  *
12  */
13 
14 /*
15  * isspace() in linux/ctype.h is expected by next_args() to filter
16  * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
17  * since isdigit() is implemented in both of them. Hence disable it
18  * here.
19  */
20 #define BOOT_CTYPE_H
21 
22 /*
23  * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
24  * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
25  * which is meaningless and will cause compiling error in some cases.
26  */
27 #define __DISABLE_EXPORTS
28 
29 #include "misc.h"
30 #include "error.h"
31 #include "../string.h"
32 
33 #include <generated/compile.h>
34 #include <linux/module.h>
35 #include <linux/uts.h>
36 #include <linux/utsname.h>
37 #include <linux/ctype.h>
38 #include <linux/efi.h>
39 #include <generated/utsrelease.h>
40 #include <asm/efi.h>
41 
42 /* Macros used by the included decompressor code below. */
43 #define STATIC
44 #include <linux/decompress/mm.h>
45 
46 #ifdef CONFIG_X86_5LEVEL
47 unsigned int __pgtable_l5_enabled;
48 unsigned int pgdir_shift __ro_after_init = 39;
49 unsigned int ptrs_per_p4d __ro_after_init = 1;
50 #endif
51 
52 extern unsigned long get_cmd_line_ptr(void);
53 
54 /* Used by PAGE_KERN* macros: */
55 pteval_t __default_kernel_pte_mask __read_mostly = ~0;
56 
57 /* Simplified build-specific string for starting entropy. */
58 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
59 		LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
60 
61 static unsigned long rotate_xor(unsigned long hash, const void *area,
62 				size_t size)
63 {
64 	size_t i;
65 	unsigned long *ptr = (unsigned long *)area;
66 
67 	for (i = 0; i < size / sizeof(hash); i++) {
68 		/* Rotate by odd number of bits and XOR. */
69 		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
70 		hash ^= ptr[i];
71 	}
72 
73 	return hash;
74 }
75 
76 /* Attempt to create a simple but unpredictable starting entropy. */
77 static unsigned long get_boot_seed(void)
78 {
79 	unsigned long hash = 0;
80 
81 	hash = rotate_xor(hash, build_str, sizeof(build_str));
82 	hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
83 
84 	return hash;
85 }
86 
87 #define KASLR_COMPRESSED_BOOT
88 #include "../../lib/kaslr.c"
89 
90 struct mem_vector {
91 	unsigned long long start;
92 	unsigned long long size;
93 };
94 
95 /* Only supporting at most 4 unusable memmap regions with kaslr */
96 #define MAX_MEMMAP_REGIONS	4
97 
98 static bool memmap_too_large;
99 
100 
101 /* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
102 static unsigned long long mem_limit = ULLONG_MAX;
103 
104 
105 enum mem_avoid_index {
106 	MEM_AVOID_ZO_RANGE = 0,
107 	MEM_AVOID_INITRD,
108 	MEM_AVOID_CMDLINE,
109 	MEM_AVOID_BOOTPARAMS,
110 	MEM_AVOID_MEMMAP_BEGIN,
111 	MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
112 	MEM_AVOID_MAX,
113 };
114 
115 static struct mem_vector mem_avoid[MEM_AVOID_MAX];
116 
117 static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
118 {
119 	/* Item one is entirely before item two. */
120 	if (one->start + one->size <= two->start)
121 		return false;
122 	/* Item one is entirely after item two. */
123 	if (one->start >= two->start + two->size)
124 		return false;
125 	return true;
126 }
127 
128 char *skip_spaces(const char *str)
129 {
130 	while (isspace(*str))
131 		++str;
132 	return (char *)str;
133 }
134 #include "../../../../lib/ctype.c"
135 #include "../../../../lib/cmdline.c"
136 
137 static int
138 parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
139 {
140 	char *oldp;
141 
142 	if (!p)
143 		return -EINVAL;
144 
145 	/* We don't care about this option here */
146 	if (!strncmp(p, "exactmap", 8))
147 		return -EINVAL;
148 
149 	oldp = p;
150 	*size = memparse(p, &p);
151 	if (p == oldp)
152 		return -EINVAL;
153 
154 	switch (*p) {
155 	case '#':
156 	case '$':
157 	case '!':
158 		*start = memparse(p + 1, &p);
159 		return 0;
160 	case '@':
161 		/* memmap=nn@ss specifies usable region, should be skipped */
162 		*size = 0;
163 		/* Fall through */
164 	default:
165 		/*
166 		 * If w/o offset, only size specified, memmap=nn[KMG] has the
167 		 * same behaviour as mem=nn[KMG]. It limits the max address
168 		 * system can use. Region above the limit should be avoided.
169 		 */
170 		*start = 0;
171 		return 0;
172 	}
173 
174 	return -EINVAL;
175 }
176 
177 static void mem_avoid_memmap(char *str)
178 {
179 	static int i;
180 
181 	if (i >= MAX_MEMMAP_REGIONS)
182 		return;
183 
184 	while (str && (i < MAX_MEMMAP_REGIONS)) {
185 		int rc;
186 		unsigned long long start, size;
187 		char *k = strchr(str, ',');
188 
189 		if (k)
190 			*k++ = 0;
191 
192 		rc = parse_memmap(str, &start, &size);
193 		if (rc < 0)
194 			break;
195 		str = k;
196 
197 		if (start == 0) {
198 			/* Store the specified memory limit if size > 0 */
199 			if (size > 0)
200 				mem_limit = size;
201 
202 			continue;
203 		}
204 
205 		mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
206 		mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
207 		i++;
208 	}
209 
210 	/* More than 4 memmaps, fail kaslr */
211 	if ((i >= MAX_MEMMAP_REGIONS) && str)
212 		memmap_too_large = true;
213 }
214 
215 /* Store the number of 1GB huge pages which users specified: */
216 static unsigned long max_gb_huge_pages;
217 
218 static void parse_gb_huge_pages(char *param, char *val)
219 {
220 	static bool gbpage_sz;
221 	char *p;
222 
223 	if (!strcmp(param, "hugepagesz")) {
224 		p = val;
225 		if (memparse(p, &p) != PUD_SIZE) {
226 			gbpage_sz = false;
227 			return;
228 		}
229 
230 		if (gbpage_sz)
231 			warn("Repeatedly set hugeTLB page size of 1G!\n");
232 		gbpage_sz = true;
233 		return;
234 	}
235 
236 	if (!strcmp(param, "hugepages") && gbpage_sz) {
237 		p = val;
238 		max_gb_huge_pages = simple_strtoull(p, &p, 0);
239 		return;
240 	}
241 }
242 
243 
244 static void handle_mem_options(void)
245 {
246 	char *args = (char *)get_cmd_line_ptr();
247 	size_t len = strlen((char *)args);
248 	char *tmp_cmdline;
249 	char *param, *val;
250 	u64 mem_size;
251 
252 	if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
253 		!strstr(args, "hugepages"))
254 		return;
255 
256 	tmp_cmdline = malloc(len + 1);
257 	if (!tmp_cmdline)
258 		error("Failed to allocate space for tmp_cmdline");
259 
260 	memcpy(tmp_cmdline, args, len);
261 	tmp_cmdline[len] = 0;
262 	args = tmp_cmdline;
263 
264 	/* Chew leading spaces */
265 	args = skip_spaces(args);
266 
267 	while (*args) {
268 		args = next_arg(args, &param, &val);
269 		/* Stop at -- */
270 		if (!val && strcmp(param, "--") == 0) {
271 			warn("Only '--' specified in cmdline");
272 			goto out;
273 		}
274 
275 		if (!strcmp(param, "memmap")) {
276 			mem_avoid_memmap(val);
277 		} else if (strstr(param, "hugepages")) {
278 			parse_gb_huge_pages(param, val);
279 		} else if (!strcmp(param, "mem")) {
280 			char *p = val;
281 
282 			if (!strcmp(p, "nopentium"))
283 				continue;
284 			mem_size = memparse(p, &p);
285 			if (mem_size == 0)
286 				goto out;
287 
288 			mem_limit = mem_size;
289 		}
290 	}
291 
292 out:
293 	free(tmp_cmdline);
294 	return;
295 }
296 
297 /*
298  * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
299  * The mem_avoid array is used to store the ranges that need to be avoided
300  * when KASLR searches for an appropriate random address. We must avoid any
301  * regions that are unsafe to overlap with during decompression, and other
302  * things like the initrd, cmdline and boot_params. This comment seeks to
303  * explain mem_avoid as clearly as possible since incorrect mem_avoid
304  * memory ranges lead to really hard to debug boot failures.
305  *
306  * The initrd, cmdline, and boot_params are trivial to identify for
307  * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
308  * MEM_AVOID_BOOTPARAMS respectively below.
309  *
310  * What is not obvious how to avoid is the range of memory that is used
311  * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
312  * the compressed kernel (ZO) and its run space, which is used to extract
313  * the uncompressed kernel (VO) and relocs.
314  *
315  * ZO's full run size sits against the end of the decompression buffer, so
316  * we can calculate where text, data, bss, etc of ZO are positioned more
317  * easily.
318  *
319  * For additional background, the decompression calculations can be found
320  * in header.S, and the memory diagram is based on the one found in misc.c.
321  *
322  * The following conditions are already enforced by the image layouts and
323  * associated code:
324  *  - input + input_size >= output + output_size
325  *  - kernel_total_size <= init_size
326  *  - kernel_total_size <= output_size (see Note below)
327  *  - output + init_size >= output + output_size
328  *
329  * (Note that kernel_total_size and output_size have no fundamental
330  * relationship, but output_size is passed to choose_random_location
331  * as a maximum of the two. The diagram is showing a case where
332  * kernel_total_size is larger than output_size, but this case is
333  * handled by bumping output_size.)
334  *
335  * The above conditions can be illustrated by a diagram:
336  *
337  * 0   output            input            input+input_size    output+init_size
338  * |     |                 |                             |             |
339  * |     |                 |                             |             |
340  * |-----|--------|--------|--------------|-----------|--|-------------|
341  *                |                       |           |
342  *                |                       |           |
343  * output+init_size-ZO_INIT_SIZE  output+output_size  output+kernel_total_size
344  *
345  * [output, output+init_size) is the entire memory range used for
346  * extracting the compressed image.
347  *
348  * [output, output+kernel_total_size) is the range needed for the
349  * uncompressed kernel (VO) and its run size (bss, brk, etc).
350  *
351  * [output, output+output_size) is VO plus relocs (i.e. the entire
352  * uncompressed payload contained by ZO). This is the area of the buffer
353  * written to during decompression.
354  *
355  * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
356  * range of the copied ZO and decompression code. (i.e. the range
357  * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
358  *
359  * [input, input+input_size) is the original copied compressed image (ZO)
360  * (i.e. it does not include its run size). This range must be avoided
361  * because it contains the data used for decompression.
362  *
363  * [input+input_size, output+init_size) is [_text, _end) for ZO. This
364  * range includes ZO's heap and stack, and must be avoided since it
365  * performs the decompression.
366  *
367  * Since the above two ranges need to be avoided and they are adjacent,
368  * they can be merged, resulting in: [input, output+init_size) which
369  * becomes the MEM_AVOID_ZO_RANGE below.
370  */
371 static void mem_avoid_init(unsigned long input, unsigned long input_size,
372 			   unsigned long output)
373 {
374 	unsigned long init_size = boot_params->hdr.init_size;
375 	u64 initrd_start, initrd_size;
376 	u64 cmd_line, cmd_line_size;
377 	char *ptr;
378 
379 	/*
380 	 * Avoid the region that is unsafe to overlap during
381 	 * decompression.
382 	 */
383 	mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
384 	mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
385 	add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
386 			 mem_avoid[MEM_AVOID_ZO_RANGE].size);
387 
388 	/* Avoid initrd. */
389 	initrd_start  = (u64)boot_params->ext_ramdisk_image << 32;
390 	initrd_start |= boot_params->hdr.ramdisk_image;
391 	initrd_size  = (u64)boot_params->ext_ramdisk_size << 32;
392 	initrd_size |= boot_params->hdr.ramdisk_size;
393 	mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
394 	mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
395 	/* No need to set mapping for initrd, it will be handled in VO. */
396 
397 	/* Avoid kernel command line. */
398 	cmd_line  = (u64)boot_params->ext_cmd_line_ptr << 32;
399 	cmd_line |= boot_params->hdr.cmd_line_ptr;
400 	/* Calculate size of cmd_line. */
401 	ptr = (char *)(unsigned long)cmd_line;
402 	for (cmd_line_size = 0; ptr[cmd_line_size++];)
403 		;
404 	mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
405 	mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
406 	add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
407 			 mem_avoid[MEM_AVOID_CMDLINE].size);
408 
409 	/* Avoid boot parameters. */
410 	mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
411 	mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
412 	add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
413 			 mem_avoid[MEM_AVOID_BOOTPARAMS].size);
414 
415 	/* We don't need to set a mapping for setup_data. */
416 
417 	/* Mark the memmap regions we need to avoid */
418 	handle_mem_options();
419 
420 #ifdef CONFIG_X86_VERBOSE_BOOTUP
421 	/* Make sure video RAM can be used. */
422 	add_identity_map(0, PMD_SIZE);
423 #endif
424 }
425 
426 /*
427  * Does this memory vector overlap a known avoided area? If so, record the
428  * overlap region with the lowest address.
429  */
430 static bool mem_avoid_overlap(struct mem_vector *img,
431 			      struct mem_vector *overlap)
432 {
433 	int i;
434 	struct setup_data *ptr;
435 	unsigned long earliest = img->start + img->size;
436 	bool is_overlapping = false;
437 
438 	for (i = 0; i < MEM_AVOID_MAX; i++) {
439 		if (mem_overlaps(img, &mem_avoid[i]) &&
440 		    mem_avoid[i].start < earliest) {
441 			*overlap = mem_avoid[i];
442 			earliest = overlap->start;
443 			is_overlapping = true;
444 		}
445 	}
446 
447 	/* Avoid all entries in the setup_data linked list. */
448 	ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
449 	while (ptr) {
450 		struct mem_vector avoid;
451 
452 		avoid.start = (unsigned long)ptr;
453 		avoid.size = sizeof(*ptr) + ptr->len;
454 
455 		if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
456 			*overlap = avoid;
457 			earliest = overlap->start;
458 			is_overlapping = true;
459 		}
460 
461 		ptr = (struct setup_data *)(unsigned long)ptr->next;
462 	}
463 
464 	return is_overlapping;
465 }
466 
467 struct slot_area {
468 	unsigned long addr;
469 	int num;
470 };
471 
472 #define MAX_SLOT_AREA 100
473 
474 static struct slot_area slot_areas[MAX_SLOT_AREA];
475 
476 static unsigned long slot_max;
477 
478 static unsigned long slot_area_index;
479 
480 static void store_slot_info(struct mem_vector *region, unsigned long image_size)
481 {
482 	struct slot_area slot_area;
483 
484 	if (slot_area_index == MAX_SLOT_AREA)
485 		return;
486 
487 	slot_area.addr = region->start;
488 	slot_area.num = (region->size - image_size) /
489 			CONFIG_PHYSICAL_ALIGN + 1;
490 
491 	if (slot_area.num > 0) {
492 		slot_areas[slot_area_index++] = slot_area;
493 		slot_max += slot_area.num;
494 	}
495 }
496 
497 /*
498  * Skip as many 1GB huge pages as possible in the passed region
499  * according to the number which users specified:
500  */
501 static void
502 process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
503 {
504 	unsigned long addr, size = 0;
505 	struct mem_vector tmp;
506 	int i = 0;
507 
508 	if (!max_gb_huge_pages) {
509 		store_slot_info(region, image_size);
510 		return;
511 	}
512 
513 	addr = ALIGN(region->start, PUD_SIZE);
514 	/* Did we raise the address above the passed in memory entry? */
515 	if (addr < region->start + region->size)
516 		size = region->size - (addr - region->start);
517 
518 	/* Check how many 1GB huge pages can be filtered out: */
519 	while (size > PUD_SIZE && max_gb_huge_pages) {
520 		size -= PUD_SIZE;
521 		max_gb_huge_pages--;
522 		i++;
523 	}
524 
525 	/* No good 1GB huge pages found: */
526 	if (!i) {
527 		store_slot_info(region, image_size);
528 		return;
529 	}
530 
531 	/*
532 	 * Skip those 'i'*1GB good huge pages, and continue checking and
533 	 * processing the remaining head or tail part of the passed region
534 	 * if available.
535 	 */
536 
537 	if (addr >= region->start + image_size) {
538 		tmp.start = region->start;
539 		tmp.size = addr - region->start;
540 		store_slot_info(&tmp, image_size);
541 	}
542 
543 	size  = region->size - (addr - region->start) - i * PUD_SIZE;
544 	if (size >= image_size) {
545 		tmp.start = addr + i * PUD_SIZE;
546 		tmp.size = size;
547 		store_slot_info(&tmp, image_size);
548 	}
549 }
550 
551 static unsigned long slots_fetch_random(void)
552 {
553 	unsigned long slot;
554 	int i;
555 
556 	/* Handle case of no slots stored. */
557 	if (slot_max == 0)
558 		return 0;
559 
560 	slot = kaslr_get_random_long("Physical") % slot_max;
561 
562 	for (i = 0; i < slot_area_index; i++) {
563 		if (slot >= slot_areas[i].num) {
564 			slot -= slot_areas[i].num;
565 			continue;
566 		}
567 		return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
568 	}
569 
570 	if (i == slot_area_index)
571 		debug_putstr("slots_fetch_random() failed!?\n");
572 	return 0;
573 }
574 
575 static void process_mem_region(struct mem_vector *entry,
576 			       unsigned long minimum,
577 			       unsigned long image_size)
578 {
579 	struct mem_vector region, overlap;
580 	unsigned long start_orig, end;
581 	struct mem_vector cur_entry;
582 
583 	/* On 32-bit, ignore entries entirely above our maximum. */
584 	if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
585 		return;
586 
587 	/* Ignore entries entirely below our minimum. */
588 	if (entry->start + entry->size < minimum)
589 		return;
590 
591 	/* Ignore entries above memory limit */
592 	end = min(entry->size + entry->start, mem_limit);
593 	if (entry->start >= end)
594 		return;
595 	cur_entry.start = entry->start;
596 	cur_entry.size = end - entry->start;
597 
598 	region.start = cur_entry.start;
599 	region.size = cur_entry.size;
600 
601 	/* Give up if slot area array is full. */
602 	while (slot_area_index < MAX_SLOT_AREA) {
603 		start_orig = region.start;
604 
605 		/* Potentially raise address to minimum location. */
606 		if (region.start < minimum)
607 			region.start = minimum;
608 
609 		/* Potentially raise address to meet alignment needs. */
610 		region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
611 
612 		/* Did we raise the address above the passed in memory entry? */
613 		if (region.start > cur_entry.start + cur_entry.size)
614 			return;
615 
616 		/* Reduce size by any delta from the original address. */
617 		region.size -= region.start - start_orig;
618 
619 		/* On 32-bit, reduce region size to fit within max size. */
620 		if (IS_ENABLED(CONFIG_X86_32) &&
621 		    region.start + region.size > KERNEL_IMAGE_SIZE)
622 			region.size = KERNEL_IMAGE_SIZE - region.start;
623 
624 		/* Return if region can't contain decompressed kernel */
625 		if (region.size < image_size)
626 			return;
627 
628 		/* If nothing overlaps, store the region and return. */
629 		if (!mem_avoid_overlap(&region, &overlap)) {
630 			process_gb_huge_pages(&region, image_size);
631 			return;
632 		}
633 
634 		/* Store beginning of region if holds at least image_size. */
635 		if (overlap.start > region.start + image_size) {
636 			struct mem_vector beginning;
637 
638 			beginning.start = region.start;
639 			beginning.size = overlap.start - region.start;
640 			process_gb_huge_pages(&beginning, image_size);
641 		}
642 
643 		/* Return if overlap extends to or past end of region. */
644 		if (overlap.start + overlap.size >= region.start + region.size)
645 			return;
646 
647 		/* Clip off the overlapping region and start over. */
648 		region.size -= overlap.start - region.start + overlap.size;
649 		region.start = overlap.start + overlap.size;
650 	}
651 }
652 
653 #ifdef CONFIG_EFI
654 /*
655  * Returns true if mirror region found (and must have been processed
656  * for slots adding)
657  */
658 static bool
659 process_efi_entries(unsigned long minimum, unsigned long image_size)
660 {
661 	struct efi_info *e = &boot_params->efi_info;
662 	bool efi_mirror_found = false;
663 	struct mem_vector region;
664 	efi_memory_desc_t *md;
665 	unsigned long pmap;
666 	char *signature;
667 	u32 nr_desc;
668 	int i;
669 
670 	signature = (char *)&e->efi_loader_signature;
671 	if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
672 	    strncmp(signature, EFI64_LOADER_SIGNATURE, 4))
673 		return false;
674 
675 #ifdef CONFIG_X86_32
676 	/* Can't handle data above 4GB at this time */
677 	if (e->efi_memmap_hi) {
678 		warn("EFI memmap is above 4GB, can't be handled now on x86_32. EFI should be disabled.\n");
679 		return false;
680 	}
681 	pmap =  e->efi_memmap;
682 #else
683 	pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
684 #endif
685 
686 	nr_desc = e->efi_memmap_size / e->efi_memdesc_size;
687 	for (i = 0; i < nr_desc; i++) {
688 		md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
689 		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
690 			efi_mirror_found = true;
691 			break;
692 		}
693 	}
694 
695 	for (i = 0; i < nr_desc; i++) {
696 		md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
697 
698 		/*
699 		 * Here we are more conservative in picking free memory than
700 		 * the EFI spec allows:
701 		 *
702 		 * According to the spec, EFI_BOOT_SERVICES_{CODE|DATA} are also
703 		 * free memory and thus available to place the kernel image into,
704 		 * but in practice there's firmware where using that memory leads
705 		 * to crashes.
706 		 *
707 		 * Only EFI_CONVENTIONAL_MEMORY is guaranteed to be free.
708 		 */
709 		if (md->type != EFI_CONVENTIONAL_MEMORY)
710 			continue;
711 
712 		if (efi_mirror_found &&
713 		    !(md->attribute & EFI_MEMORY_MORE_RELIABLE))
714 			continue;
715 
716 		region.start = md->phys_addr;
717 		region.size = md->num_pages << EFI_PAGE_SHIFT;
718 		process_mem_region(&region, minimum, image_size);
719 		if (slot_area_index == MAX_SLOT_AREA) {
720 			debug_putstr("Aborted EFI scan (slot_areas full)!\n");
721 			break;
722 		}
723 	}
724 	return true;
725 }
726 #else
727 static inline bool
728 process_efi_entries(unsigned long minimum, unsigned long image_size)
729 {
730 	return false;
731 }
732 #endif
733 
734 static void process_e820_entries(unsigned long minimum,
735 				 unsigned long image_size)
736 {
737 	int i;
738 	struct mem_vector region;
739 	struct boot_e820_entry *entry;
740 
741 	/* Verify potential e820 positions, appending to slots list. */
742 	for (i = 0; i < boot_params->e820_entries; i++) {
743 		entry = &boot_params->e820_table[i];
744 		/* Skip non-RAM entries. */
745 		if (entry->type != E820_TYPE_RAM)
746 			continue;
747 		region.start = entry->addr;
748 		region.size = entry->size;
749 		process_mem_region(&region, minimum, image_size);
750 		if (slot_area_index == MAX_SLOT_AREA) {
751 			debug_putstr("Aborted e820 scan (slot_areas full)!\n");
752 			break;
753 		}
754 	}
755 }
756 
757 static unsigned long find_random_phys_addr(unsigned long minimum,
758 					   unsigned long image_size)
759 {
760 	/* Check if we had too many memmaps. */
761 	if (memmap_too_large) {
762 		debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
763 		return 0;
764 	}
765 
766 	/* Make sure minimum is aligned. */
767 	minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
768 
769 	if (process_efi_entries(minimum, image_size))
770 		return slots_fetch_random();
771 
772 	process_e820_entries(minimum, image_size);
773 	return slots_fetch_random();
774 }
775 
776 static unsigned long find_random_virt_addr(unsigned long minimum,
777 					   unsigned long image_size)
778 {
779 	unsigned long slots, random_addr;
780 
781 	/* Make sure minimum is aligned. */
782 	minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
783 	/* Align image_size for easy slot calculations. */
784 	image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
785 
786 	/*
787 	 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
788 	 * that can hold image_size within the range of minimum to
789 	 * KERNEL_IMAGE_SIZE?
790 	 */
791 	slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
792 		 CONFIG_PHYSICAL_ALIGN + 1;
793 
794 	random_addr = kaslr_get_random_long("Virtual") % slots;
795 
796 	return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
797 }
798 
799 /*
800  * Since this function examines addresses much more numerically,
801  * it takes the input and output pointers as 'unsigned long'.
802  */
803 void choose_random_location(unsigned long input,
804 			    unsigned long input_size,
805 			    unsigned long *output,
806 			    unsigned long output_size,
807 			    unsigned long *virt_addr)
808 {
809 	unsigned long random_addr, min_addr;
810 
811 	if (cmdline_find_option_bool("nokaslr")) {
812 		warn("KASLR disabled: 'nokaslr' on cmdline.");
813 		return;
814 	}
815 
816 #ifdef CONFIG_X86_5LEVEL
817 	if (__read_cr4() & X86_CR4_LA57) {
818 		__pgtable_l5_enabled = 1;
819 		pgdir_shift = 48;
820 		ptrs_per_p4d = 512;
821 	}
822 #endif
823 
824 	boot_params->hdr.loadflags |= KASLR_FLAG;
825 
826 	/* Prepare to add new identity pagetables on demand. */
827 	initialize_identity_maps();
828 
829 	/* Record the various known unsafe memory ranges. */
830 	mem_avoid_init(input, input_size, *output);
831 
832 	/*
833 	 * Low end of the randomization range should be the
834 	 * smaller of 512M or the initial kernel image
835 	 * location:
836 	 */
837 	min_addr = min(*output, 512UL << 20);
838 
839 	/* Walk available memory entries to find a random address. */
840 	random_addr = find_random_phys_addr(min_addr, output_size);
841 	if (!random_addr) {
842 		warn("Physical KASLR disabled: no suitable memory region!");
843 	} else {
844 		/* Update the new physical address location. */
845 		if (*output != random_addr) {
846 			add_identity_map(random_addr, output_size);
847 			*output = random_addr;
848 		}
849 
850 		/*
851 		 * This loads the identity mapping page table.
852 		 * This should only be done if a new physical address
853 		 * is found for the kernel, otherwise we should keep
854 		 * the old page table to make it be like the "nokaslr"
855 		 * case.
856 		 */
857 		finalize_identity_maps();
858 	}
859 
860 
861 	/* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
862 	if (IS_ENABLED(CONFIG_X86_64))
863 		random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
864 	*virt_addr = random_addr;
865 }
866