1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright (C) 2019 Jason Yan <yanaijie@huawei.com> 4 5 #include <linux/kernel.h> 6 #include <linux/errno.h> 7 #include <linux/string.h> 8 #include <linux/types.h> 9 #include <linux/mm.h> 10 #include <linux/swap.h> 11 #include <linux/stddef.h> 12 #include <linux/init.h> 13 #include <linux/delay.h> 14 #include <linux/memblock.h> 15 #include <linux/libfdt.h> 16 #include <linux/crash_core.h> 17 #include <asm/cacheflush.h> 18 #include <asm/prom.h> 19 #include <asm/kdump.h> 20 #include <mm/mmu_decl.h> 21 #include <generated/compile.h> 22 #include <generated/utsrelease.h> 23 24 struct regions { 25 unsigned long pa_start; 26 unsigned long pa_end; 27 unsigned long kernel_size; 28 unsigned long dtb_start; 29 unsigned long dtb_end; 30 unsigned long initrd_start; 31 unsigned long initrd_end; 32 unsigned long crash_start; 33 unsigned long crash_end; 34 int reserved_mem; 35 int reserved_mem_addr_cells; 36 int reserved_mem_size_cells; 37 }; 38 39 /* Simplified build-specific string for starting entropy. */ 40 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" 41 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; 42 43 struct regions __initdata regions; 44 45 static __init void kaslr_get_cmdline(void *fdt) 46 { 47 early_init_dt_scan_chosen(boot_command_line); 48 } 49 50 static unsigned long __init rotate_xor(unsigned long hash, const void *area, 51 size_t size) 52 { 53 size_t i; 54 const unsigned long *ptr = area; 55 56 for (i = 0; i < size / sizeof(hash); i++) { 57 /* Rotate by odd number of bits and XOR. */ 58 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); 59 hash ^= ptr[i]; 60 } 61 62 return hash; 63 } 64 65 /* Attempt to create a simple starting entropy. This can make it defferent for 66 * every build but it is still not enough. Stronger entropy should 67 * be added to make it change for every boot. 68 */ 69 static unsigned long __init get_boot_seed(void *fdt) 70 { 71 unsigned long hash = 0; 72 73 hash = rotate_xor(hash, build_str, sizeof(build_str)); 74 hash = rotate_xor(hash, fdt, fdt_totalsize(fdt)); 75 76 return hash; 77 } 78 79 static __init u64 get_kaslr_seed(void *fdt) 80 { 81 int node, len; 82 fdt64_t *prop; 83 u64 ret; 84 85 node = fdt_path_offset(fdt, "/chosen"); 86 if (node < 0) 87 return 0; 88 89 prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len); 90 if (!prop || len != sizeof(u64)) 91 return 0; 92 93 ret = fdt64_to_cpu(*prop); 94 *prop = 0; 95 return ret; 96 } 97 98 static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2) 99 { 100 return e1 >= s2 && e2 >= s1; 101 } 102 103 static __init bool overlaps_reserved_region(const void *fdt, u32 start, 104 u32 end) 105 { 106 int subnode, len, i; 107 u64 base, size; 108 109 /* check for overlap with /memreserve/ entries */ 110 for (i = 0; i < fdt_num_mem_rsv(fdt); i++) { 111 if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0) 112 continue; 113 if (regions_overlap(start, end, base, base + size)) 114 return true; 115 } 116 117 if (regions.reserved_mem < 0) 118 return false; 119 120 /* check for overlap with static reservations in /reserved-memory */ 121 for (subnode = fdt_first_subnode(fdt, regions.reserved_mem); 122 subnode >= 0; 123 subnode = fdt_next_subnode(fdt, subnode)) { 124 const fdt32_t *reg; 125 u64 rsv_end; 126 127 len = 0; 128 reg = fdt_getprop(fdt, subnode, "reg", &len); 129 while (len >= (regions.reserved_mem_addr_cells + 130 regions.reserved_mem_size_cells)) { 131 base = fdt32_to_cpu(reg[0]); 132 if (regions.reserved_mem_addr_cells == 2) 133 base = (base << 32) | fdt32_to_cpu(reg[1]); 134 135 reg += regions.reserved_mem_addr_cells; 136 len -= 4 * regions.reserved_mem_addr_cells; 137 138 size = fdt32_to_cpu(reg[0]); 139 if (regions.reserved_mem_size_cells == 2) 140 size = (size << 32) | fdt32_to_cpu(reg[1]); 141 142 reg += regions.reserved_mem_size_cells; 143 len -= 4 * regions.reserved_mem_size_cells; 144 145 if (base >= regions.pa_end) 146 continue; 147 148 rsv_end = min(base + size, (u64)U32_MAX); 149 150 if (regions_overlap(start, end, base, rsv_end)) 151 return true; 152 } 153 } 154 return false; 155 } 156 157 static __init bool overlaps_region(const void *fdt, u32 start, 158 u32 end) 159 { 160 if (regions_overlap(start, end, __pa(_stext), __pa(_end))) 161 return true; 162 163 if (regions_overlap(start, end, regions.dtb_start, 164 regions.dtb_end)) 165 return true; 166 167 if (regions_overlap(start, end, regions.initrd_start, 168 regions.initrd_end)) 169 return true; 170 171 if (regions_overlap(start, end, regions.crash_start, 172 regions.crash_end)) 173 return true; 174 175 return overlaps_reserved_region(fdt, start, end); 176 } 177 178 static void __init get_crash_kernel(void *fdt, unsigned long size) 179 { 180 #ifdef CONFIG_CRASH_CORE 181 unsigned long long crash_size, crash_base; 182 int ret; 183 184 ret = parse_crashkernel(boot_command_line, size, &crash_size, 185 &crash_base); 186 if (ret != 0 || crash_size == 0) 187 return; 188 if (crash_base == 0) 189 crash_base = KDUMP_KERNELBASE; 190 191 regions.crash_start = (unsigned long)crash_base; 192 regions.crash_end = (unsigned long)(crash_base + crash_size); 193 194 pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size); 195 #endif 196 } 197 198 static void __init get_initrd_range(void *fdt) 199 { 200 u64 start, end; 201 int node, len; 202 const __be32 *prop; 203 204 node = fdt_path_offset(fdt, "/chosen"); 205 if (node < 0) 206 return; 207 208 prop = fdt_getprop(fdt, node, "linux,initrd-start", &len); 209 if (!prop) 210 return; 211 start = of_read_number(prop, len / 4); 212 213 prop = fdt_getprop(fdt, node, "linux,initrd-end", &len); 214 if (!prop) 215 return; 216 end = of_read_number(prop, len / 4); 217 218 regions.initrd_start = (unsigned long)start; 219 regions.initrd_end = (unsigned long)end; 220 221 pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end); 222 } 223 224 static __init unsigned long get_usable_address(const void *fdt, 225 unsigned long start, 226 unsigned long offset) 227 { 228 unsigned long pa; 229 unsigned long pa_end; 230 231 for (pa = offset; (long)pa > (long)start; pa -= SZ_16K) { 232 pa_end = pa + regions.kernel_size; 233 if (overlaps_region(fdt, pa, pa_end)) 234 continue; 235 236 return pa; 237 } 238 return 0; 239 } 240 241 static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells, 242 int *size_cells) 243 { 244 const int *prop; 245 int len; 246 247 /* 248 * Retrieve the #address-cells and #size-cells properties 249 * from the 'node', or use the default if not provided. 250 */ 251 *addr_cells = *size_cells = 1; 252 253 prop = fdt_getprop(fdt, node, "#address-cells", &len); 254 if (len == 4) 255 *addr_cells = fdt32_to_cpu(*prop); 256 prop = fdt_getprop(fdt, node, "#size-cells", &len); 257 if (len == 4) 258 *size_cells = fdt32_to_cpu(*prop); 259 } 260 261 static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index, 262 unsigned long offset) 263 { 264 unsigned long koffset = 0; 265 unsigned long start; 266 267 while ((long)index >= 0) { 268 offset = memstart_addr + index * SZ_64M + offset; 269 start = memstart_addr + index * SZ_64M; 270 koffset = get_usable_address(dt_ptr, start, offset); 271 if (koffset) 272 break; 273 index--; 274 } 275 276 if (koffset != 0) 277 koffset -= memstart_addr; 278 279 return koffset; 280 } 281 282 static inline __init bool kaslr_disabled(void) 283 { 284 return strstr(boot_command_line, "nokaslr") != NULL; 285 } 286 287 static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size, 288 unsigned long kernel_sz) 289 { 290 unsigned long offset, random; 291 unsigned long ram, linear_sz; 292 u64 seed; 293 unsigned long index; 294 295 kaslr_get_cmdline(dt_ptr); 296 if (kaslr_disabled()) 297 return 0; 298 299 random = get_boot_seed(dt_ptr); 300 301 seed = get_tb() << 32; 302 seed ^= get_tb(); 303 random = rotate_xor(random, &seed, sizeof(seed)); 304 305 /* 306 * Retrieve (and wipe) the seed from the FDT 307 */ 308 seed = get_kaslr_seed(dt_ptr); 309 if (seed) 310 random = rotate_xor(random, &seed, sizeof(seed)); 311 else 312 pr_warn("KASLR: No safe seed for randomizing the kernel base.\n"); 313 314 ram = min_t(phys_addr_t, __max_low_memory, size); 315 ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, false); 316 linear_sz = min_t(unsigned long, ram, SZ_512M); 317 318 /* If the linear size is smaller than 64M, do not randmize */ 319 if (linear_sz < SZ_64M) 320 return 0; 321 322 /* check for a reserved-memory node and record its cell sizes */ 323 regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory"); 324 if (regions.reserved_mem >= 0) 325 get_cell_sizes(dt_ptr, regions.reserved_mem, 326 ®ions.reserved_mem_addr_cells, 327 ®ions.reserved_mem_size_cells); 328 329 regions.pa_start = memstart_addr; 330 regions.pa_end = memstart_addr + linear_sz; 331 regions.dtb_start = __pa(dt_ptr); 332 regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr); 333 regions.kernel_size = kernel_sz; 334 335 get_initrd_range(dt_ptr); 336 get_crash_kernel(dt_ptr, ram); 337 338 /* 339 * Decide which 64M we want to start 340 * Only use the low 8 bits of the random seed 341 */ 342 index = random & 0xFF; 343 index %= linear_sz / SZ_64M; 344 345 /* Decide offset inside 64M */ 346 offset = random % (SZ_64M - kernel_sz); 347 offset = round_down(offset, SZ_16K); 348 349 return kaslr_legal_offset(dt_ptr, index, offset); 350 } 351 352 /* 353 * To see if we need to relocate the kernel to a random offset 354 * void *dt_ptr - address of the device tree 355 * phys_addr_t size - size of the first memory block 356 */ 357 notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size) 358 { 359 unsigned long tlb_virt; 360 phys_addr_t tlb_phys; 361 unsigned long offset; 362 unsigned long kernel_sz; 363 364 kernel_sz = (unsigned long)_end - (unsigned long)_stext; 365 366 offset = kaslr_choose_location(dt_ptr, size, kernel_sz); 367 if (offset == 0) 368 return; 369 370 kernstart_virt_addr += offset; 371 kernstart_addr += offset; 372 373 is_second_reloc = 1; 374 375 if (offset >= SZ_64M) { 376 tlb_virt = round_down(kernstart_virt_addr, SZ_64M); 377 tlb_phys = round_down(kernstart_addr, SZ_64M); 378 379 /* Create kernel map to relocate in */ 380 create_kaslr_tlb_entry(1, tlb_virt, tlb_phys); 381 } 382 383 /* Copy the kernel to it's new location and run */ 384 memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz); 385 flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz); 386 387 reloc_kernel_entry(dt_ptr, kernstart_virt_addr); 388 } 389 390 void __init kaslr_late_init(void) 391 { 392 /* If randomized, clear the original kernel */ 393 if (kernstart_virt_addr != KERNELBASE) { 394 unsigned long kernel_sz; 395 396 kernel_sz = (unsigned long)_end - kernstart_virt_addr; 397 memzero_explicit((void *)KERNELBASE, kernel_sz); 398 } 399 } 400