1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright (C) 2019 Jason Yan <yanaijie@huawei.com> 4 5 #include <linux/kernel.h> 6 #include <linux/errno.h> 7 #include <linux/string.h> 8 #include <linux/types.h> 9 #include <linux/mm.h> 10 #include <linux/swap.h> 11 #include <linux/stddef.h> 12 #include <linux/init.h> 13 #include <linux/delay.h> 14 #include <linux/memblock.h> 15 #include <linux/libfdt.h> 16 #include <linux/crash_core.h> 17 #include <linux/of.h> 18 #include <linux/of_fdt.h> 19 #include <asm/cacheflush.h> 20 #include <asm/kdump.h> 21 #include <mm/mmu_decl.h> 22 #include <generated/utsrelease.h> 23 24 struct regions { 25 unsigned long pa_start; 26 unsigned long pa_end; 27 unsigned long kernel_size; 28 unsigned long dtb_start; 29 unsigned long dtb_end; 30 unsigned long initrd_start; 31 unsigned long initrd_end; 32 unsigned long crash_start; 33 unsigned long crash_end; 34 int reserved_mem; 35 int reserved_mem_addr_cells; 36 int reserved_mem_size_cells; 37 }; 38 39 struct regions __initdata regions; 40 41 static __init void kaslr_get_cmdline(void *fdt) 42 { 43 early_init_dt_scan_chosen(boot_command_line); 44 } 45 46 static unsigned long __init rotate_xor(unsigned long hash, const void *area, 47 size_t size) 48 { 49 size_t i; 50 const unsigned long *ptr = area; 51 52 for (i = 0; i < size / sizeof(hash); i++) { 53 /* Rotate by odd number of bits and XOR. */ 54 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); 55 hash ^= ptr[i]; 56 } 57 58 return hash; 59 } 60 61 /* Attempt to create a simple starting entropy. This can make it defferent for 62 * every build but it is still not enough. Stronger entropy should 63 * be added to make it change for every boot. 64 */ 65 static unsigned long __init get_boot_seed(void *fdt) 66 { 67 unsigned long hash = 0; 68 69 /* build-specific string for starting entropy. */ 70 hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); 71 hash = rotate_xor(hash, fdt, fdt_totalsize(fdt)); 72 73 return hash; 74 } 75 76 static __init u64 get_kaslr_seed(void *fdt) 77 { 78 int node, len; 79 fdt64_t *prop; 80 u64 ret; 81 82 node = fdt_path_offset(fdt, "/chosen"); 83 if (node < 0) 84 return 0; 85 86 prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len); 87 if (!prop || len != sizeof(u64)) 88 return 0; 89 90 ret = fdt64_to_cpu(*prop); 91 *prop = 0; 92 return ret; 93 } 94 95 static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2) 96 { 97 return e1 >= s2 && e2 >= s1; 98 } 99 100 static __init bool overlaps_reserved_region(const void *fdt, u32 start, 101 u32 end) 102 { 103 int subnode, len, i; 104 u64 base, size; 105 106 /* check for overlap with /memreserve/ entries */ 107 for (i = 0; i < fdt_num_mem_rsv(fdt); i++) { 108 if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0) 109 continue; 110 if (regions_overlap(start, end, base, base + size)) 111 return true; 112 } 113 114 if (regions.reserved_mem < 0) 115 return false; 116 117 /* check for overlap with static reservations in /reserved-memory */ 118 for (subnode = fdt_first_subnode(fdt, regions.reserved_mem); 119 subnode >= 0; 120 subnode = fdt_next_subnode(fdt, subnode)) { 121 const fdt32_t *reg; 122 u64 rsv_end; 123 124 len = 0; 125 reg = fdt_getprop(fdt, subnode, "reg", &len); 126 while (len >= (regions.reserved_mem_addr_cells + 127 regions.reserved_mem_size_cells)) { 128 base = fdt32_to_cpu(reg[0]); 129 if (regions.reserved_mem_addr_cells == 2) 130 base = (base << 32) | fdt32_to_cpu(reg[1]); 131 132 reg += regions.reserved_mem_addr_cells; 133 len -= 4 * regions.reserved_mem_addr_cells; 134 135 size = fdt32_to_cpu(reg[0]); 136 if (regions.reserved_mem_size_cells == 2) 137 size = (size << 32) | fdt32_to_cpu(reg[1]); 138 139 reg += regions.reserved_mem_size_cells; 140 len -= 4 * regions.reserved_mem_size_cells; 141 142 if (base >= regions.pa_end) 143 continue; 144 145 rsv_end = min(base + size, (u64)U32_MAX); 146 147 if (regions_overlap(start, end, base, rsv_end)) 148 return true; 149 } 150 } 151 return false; 152 } 153 154 static __init bool overlaps_region(const void *fdt, u32 start, 155 u32 end) 156 { 157 if (regions_overlap(start, end, __pa(_stext), __pa(_end))) 158 return true; 159 160 if (regions_overlap(start, end, regions.dtb_start, 161 regions.dtb_end)) 162 return true; 163 164 if (regions_overlap(start, end, regions.initrd_start, 165 regions.initrd_end)) 166 return true; 167 168 if (regions_overlap(start, end, regions.crash_start, 169 regions.crash_end)) 170 return true; 171 172 return overlaps_reserved_region(fdt, start, end); 173 } 174 175 static void __init get_crash_kernel(void *fdt, unsigned long size) 176 { 177 #ifdef CONFIG_CRASH_CORE 178 unsigned long long crash_size, crash_base; 179 int ret; 180 181 ret = parse_crashkernel(boot_command_line, size, &crash_size, 182 &crash_base); 183 if (ret != 0 || crash_size == 0) 184 return; 185 if (crash_base == 0) 186 crash_base = KDUMP_KERNELBASE; 187 188 regions.crash_start = (unsigned long)crash_base; 189 regions.crash_end = (unsigned long)(crash_base + crash_size); 190 191 pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size); 192 #endif 193 } 194 195 static void __init get_initrd_range(void *fdt) 196 { 197 u64 start, end; 198 int node, len; 199 const __be32 *prop; 200 201 node = fdt_path_offset(fdt, "/chosen"); 202 if (node < 0) 203 return; 204 205 prop = fdt_getprop(fdt, node, "linux,initrd-start", &len); 206 if (!prop) 207 return; 208 start = of_read_number(prop, len / 4); 209 210 prop = fdt_getprop(fdt, node, "linux,initrd-end", &len); 211 if (!prop) 212 return; 213 end = of_read_number(prop, len / 4); 214 215 regions.initrd_start = (unsigned long)start; 216 regions.initrd_end = (unsigned long)end; 217 218 pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end); 219 } 220 221 static __init unsigned long get_usable_address(const void *fdt, 222 unsigned long start, 223 unsigned long offset) 224 { 225 unsigned long pa; 226 unsigned long pa_end; 227 228 for (pa = offset; (long)pa > (long)start; pa -= SZ_16K) { 229 pa_end = pa + regions.kernel_size; 230 if (overlaps_region(fdt, pa, pa_end)) 231 continue; 232 233 return pa; 234 } 235 return 0; 236 } 237 238 static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells, 239 int *size_cells) 240 { 241 const int *prop; 242 int len; 243 244 /* 245 * Retrieve the #address-cells and #size-cells properties 246 * from the 'node', or use the default if not provided. 247 */ 248 *addr_cells = *size_cells = 1; 249 250 prop = fdt_getprop(fdt, node, "#address-cells", &len); 251 if (len == 4) 252 *addr_cells = fdt32_to_cpu(*prop); 253 prop = fdt_getprop(fdt, node, "#size-cells", &len); 254 if (len == 4) 255 *size_cells = fdt32_to_cpu(*prop); 256 } 257 258 static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index, 259 unsigned long offset) 260 { 261 unsigned long koffset = 0; 262 unsigned long start; 263 264 while ((long)index >= 0) { 265 offset = memstart_addr + index * SZ_64M + offset; 266 start = memstart_addr + index * SZ_64M; 267 koffset = get_usable_address(dt_ptr, start, offset); 268 if (koffset) 269 break; 270 index--; 271 } 272 273 if (koffset != 0) 274 koffset -= memstart_addr; 275 276 return koffset; 277 } 278 279 static inline __init bool kaslr_disabled(void) 280 { 281 return strstr(boot_command_line, "nokaslr") != NULL; 282 } 283 284 static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size, 285 unsigned long kernel_sz) 286 { 287 unsigned long offset, random; 288 unsigned long ram, linear_sz; 289 u64 seed; 290 unsigned long index; 291 292 kaslr_get_cmdline(dt_ptr); 293 if (kaslr_disabled()) 294 return 0; 295 296 random = get_boot_seed(dt_ptr); 297 298 seed = get_tb() << 32; 299 seed ^= get_tb(); 300 random = rotate_xor(random, &seed, sizeof(seed)); 301 302 /* 303 * Retrieve (and wipe) the seed from the FDT 304 */ 305 seed = get_kaslr_seed(dt_ptr); 306 if (seed) 307 random = rotate_xor(random, &seed, sizeof(seed)); 308 else 309 pr_warn("KASLR: No safe seed for randomizing the kernel base.\n"); 310 311 ram = min_t(phys_addr_t, __max_low_memory, size); 312 ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true); 313 linear_sz = min_t(unsigned long, ram, SZ_512M); 314 315 /* If the linear size is smaller than 64M, do not randomize */ 316 if (linear_sz < SZ_64M) 317 return 0; 318 319 /* check for a reserved-memory node and record its cell sizes */ 320 regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory"); 321 if (regions.reserved_mem >= 0) 322 get_cell_sizes(dt_ptr, regions.reserved_mem, 323 ®ions.reserved_mem_addr_cells, 324 ®ions.reserved_mem_size_cells); 325 326 regions.pa_start = memstart_addr; 327 regions.pa_end = memstart_addr + linear_sz; 328 regions.dtb_start = __pa(dt_ptr); 329 regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr); 330 regions.kernel_size = kernel_sz; 331 332 get_initrd_range(dt_ptr); 333 get_crash_kernel(dt_ptr, ram); 334 335 /* 336 * Decide which 64M we want to start 337 * Only use the low 8 bits of the random seed 338 */ 339 index = random & 0xFF; 340 index %= linear_sz / SZ_64M; 341 342 /* Decide offset inside 64M */ 343 offset = random % (SZ_64M - kernel_sz); 344 offset = round_down(offset, SZ_16K); 345 346 return kaslr_legal_offset(dt_ptr, index, offset); 347 } 348 349 /* 350 * To see if we need to relocate the kernel to a random offset 351 * void *dt_ptr - address of the device tree 352 * phys_addr_t size - size of the first memory block 353 */ 354 notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size) 355 { 356 unsigned long tlb_virt; 357 phys_addr_t tlb_phys; 358 unsigned long offset; 359 unsigned long kernel_sz; 360 361 kernel_sz = (unsigned long)_end - (unsigned long)_stext; 362 363 offset = kaslr_choose_location(dt_ptr, size, kernel_sz); 364 if (offset == 0) 365 return; 366 367 kernstart_virt_addr += offset; 368 kernstart_addr += offset; 369 370 is_second_reloc = 1; 371 372 if (offset >= SZ_64M) { 373 tlb_virt = round_down(kernstart_virt_addr, SZ_64M); 374 tlb_phys = round_down(kernstart_addr, SZ_64M); 375 376 /* Create kernel map to relocate in */ 377 create_kaslr_tlb_entry(1, tlb_virt, tlb_phys); 378 } 379 380 /* Copy the kernel to it's new location and run */ 381 memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz); 382 flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz); 383 384 reloc_kernel_entry(dt_ptr, kernstart_virt_addr); 385 } 386 387 void __init kaslr_late_init(void) 388 { 389 /* If randomized, clear the original kernel */ 390 if (kernstart_virt_addr != KERNELBASE) { 391 unsigned long kernel_sz; 392 393 kernel_sz = (unsigned long)_end - kernstart_virt_addr; 394 memzero_explicit((void *)KERNELBASE, kernel_sz); 395 } 396 } 397