1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright 2022 Google LLC 3 // Author: Ard Biesheuvel <ardb@google.com> 4 5 // NOTE: code in this file runs *very* early, and is not permitted to use 6 // global variables or anything that relies on absolute addressing. 7 8 #include <linux/libfdt.h> 9 #include <linux/init.h> 10 #include <linux/linkage.h> 11 #include <linux/types.h> 12 #include <linux/sizes.h> 13 #include <linux/string.h> 14 15 #include <asm/archrandom.h> 16 #include <asm/memory.h> 17 18 /* taken from lib/string.c */ 19 static char *__strstr(const char *s1, const char *s2) 20 { 21 size_t l1, l2; 22 23 l2 = strlen(s2); 24 if (!l2) 25 return (char *)s1; 26 l1 = strlen(s1); 27 while (l1 >= l2) { 28 l1--; 29 if (!memcmp(s1, s2, l2)) 30 return (char *)s1; 31 s1++; 32 } 33 return NULL; 34 } 35 static bool cmdline_contains_nokaslr(const u8 *cmdline) 36 { 37 const u8 *str; 38 39 str = __strstr(cmdline, "nokaslr"); 40 return str == cmdline || (str > cmdline && *(str - 1) == ' '); 41 } 42 43 static bool is_kaslr_disabled_cmdline(void *fdt) 44 { 45 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) { 46 int node; 47 const u8 *prop; 48 49 node = fdt_path_offset(fdt, "/chosen"); 50 if (node < 0) 51 goto out; 52 53 prop = fdt_getprop(fdt, node, "bootargs", NULL); 54 if (!prop) 55 goto out; 56 57 if (cmdline_contains_nokaslr(prop)) 58 return true; 59 60 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND)) 61 goto out; 62 63 return false; 64 } 65 out: 66 return cmdline_contains_nokaslr(CONFIG_CMDLINE); 67 } 68 69 static u64 get_kaslr_seed(void *fdt) 70 { 71 int node, len; 72 fdt64_t *prop; 73 u64 ret; 74 75 node = fdt_path_offset(fdt, "/chosen"); 76 if (node < 0) 77 return 0; 78 79 prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len); 80 if (!prop || len != sizeof(u64)) 81 return 0; 82 83 ret = fdt64_to_cpu(*prop); 84 *prop = 0; 85 return ret; 86 } 87 88 asmlinkage u64 kaslr_early_init(void *fdt) 89 { 90 u64 seed; 91 92 if (is_kaslr_disabled_cmdline(fdt)) 93 return 0; 94 95 seed = get_kaslr_seed(fdt); 96 if (!seed) { 97 if (!__early_cpu_has_rndr() || 98 !__arm64_rndr((unsigned long *)&seed)) 99 return 0; 100 } 101 102 /* 103 * OK, so we are proceeding with KASLR enabled. Calculate a suitable 104 * kernel image offset from the seed. Let's place the kernel in the 105 * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of 106 * the lower and upper quarters to avoid colliding with other 107 * allocations. 108 */ 109 return BIT(VA_BITS_MIN - 3) + (seed & GENMASK(VA_BITS_MIN - 3, 0)); 110 } 111