1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 4 * Chen Liqin <liqin.chen@sunplusct.com> 5 * Lennox Wu <lennox.wu@sunplusct.com> 6 * Copyright (C) 2012 Regents of the University of California 7 * Copyright (C) 2020 FORTH-ICS/CARV 8 * Nick Kossifidis <mick@ics.forth.gr> 9 */ 10 11 #include <linux/init.h> 12 #include <linux/mm.h> 13 #include <linux/memblock.h> 14 #include <linux/sched.h> 15 #include <linux/console.h> 16 #include <linux/screen_info.h> 17 #include <linux/of_fdt.h> 18 #include <linux/of_platform.h> 19 #include <linux/sched/task.h> 20 #include <linux/swiotlb.h> 21 #include <linux/smp.h> 22 #include <linux/efi.h> 23 #include <linux/crash_dump.h> 24 25 #include <asm/cpu_ops.h> 26 #include <asm/early_ioremap.h> 27 #include <asm/pgtable.h> 28 #include <asm/setup.h> 29 #include <asm/set_memory.h> 30 #include <asm/sections.h> 31 #include <asm/sbi.h> 32 #include <asm/tlbflush.h> 33 #include <asm/thread_info.h> 34 #include <asm/kasan.h> 35 #include <asm/efi.h> 36 37 #include "head.h" 38 39 #if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI) 40 struct screen_info screen_info __section(".data") = { 41 .orig_video_lines = 30, 42 .orig_video_cols = 80, 43 .orig_video_mode = 0, 44 .orig_video_ega_bx = 0, 45 .orig_video_isVGA = 1, 46 .orig_video_points = 8 47 }; 48 #endif 49 50 /* 51 * The lucky hart to first increment this variable will boot the other cores. 52 * This is used before the kernel initializes the BSS so it can't be in the 53 * BSS. 54 */ 55 atomic_t hart_lottery __section(".sdata") 56 #ifdef CONFIG_XIP_KERNEL 57 = ATOMIC_INIT(0xC001BEEF) 58 #endif 59 ; 60 unsigned long boot_cpu_hartid; 61 static DEFINE_PER_CPU(struct cpu, cpu_devices); 62 63 /* 64 * Place kernel memory regions on the resource tree so that 65 * kexec-tools can retrieve them from /proc/iomem. While there 66 * also add "System RAM" regions for compatibility with other 67 * archs, and the rest of the known regions for completeness. 68 */ 69 static struct resource kimage_res = { .name = "Kernel image", }; 70 static struct resource code_res = { .name = "Kernel code", }; 71 static struct resource data_res = { .name = "Kernel data", }; 72 static struct resource rodata_res = { .name = "Kernel rodata", }; 73 static struct resource bss_res = { .name = "Kernel bss", }; 74 #ifdef CONFIG_CRASH_DUMP 75 static struct resource elfcorehdr_res = { .name = "ELF Core hdr", }; 76 #endif 77 78 static int __init add_resource(struct resource *parent, 79 struct resource *res) 80 { 81 int ret = 0; 82 83 ret = insert_resource(parent, res); 84 if (ret < 0) { 85 pr_err("Failed to add a %s resource at %llx\n", 86 res->name, (unsigned long long) res->start); 87 return ret; 88 } 89 90 return 1; 91 } 92 93 static int __init add_kernel_resources(void) 94 { 95 int ret = 0; 96 97 /* 98 * The memory region of the kernel image is continuous and 99 * was reserved on setup_bootmem, register it here as a 100 * resource, with the various segments of the image as 101 * child nodes. 102 */ 103 104 code_res.start = __pa_symbol(_text); 105 code_res.end = __pa_symbol(_etext) - 1; 106 code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 107 108 rodata_res.start = __pa_symbol(__start_rodata); 109 rodata_res.end = __pa_symbol(__end_rodata) - 1; 110 rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 111 112 data_res.start = __pa_symbol(_data); 113 data_res.end = __pa_symbol(_edata) - 1; 114 data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 115 116 bss_res.start = __pa_symbol(__bss_start); 117 bss_res.end = __pa_symbol(__bss_stop) - 1; 118 bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 119 120 kimage_res.start = code_res.start; 121 kimage_res.end = bss_res.end; 122 kimage_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 123 124 ret = add_resource(&iomem_resource, &kimage_res); 125 if (ret < 0) 126 return ret; 127 128 ret = add_resource(&kimage_res, &code_res); 129 if (ret < 0) 130 return ret; 131 132 ret = add_resource(&kimage_res, &rodata_res); 133 if (ret < 0) 134 return ret; 135 136 ret = add_resource(&kimage_res, &data_res); 137 if (ret < 0) 138 return ret; 139 140 ret = add_resource(&kimage_res, &bss_res); 141 142 return ret; 143 } 144 145 static void __init init_resources(void) 146 { 147 struct memblock_region *region = NULL; 148 struct resource *res = NULL; 149 struct resource *mem_res = NULL; 150 size_t mem_res_sz = 0; 151 int num_resources = 0, res_idx = 0; 152 int ret = 0; 153 154 /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */ 155 num_resources = memblock.memory.cnt + memblock.reserved.cnt + 1; 156 res_idx = num_resources - 1; 157 158 mem_res_sz = num_resources * sizeof(*mem_res); 159 mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES); 160 if (!mem_res) 161 panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz); 162 163 /* 164 * Start by adding the reserved regions, if they overlap 165 * with /memory regions, insert_resource later on will take 166 * care of it. 167 */ 168 ret = add_kernel_resources(); 169 if (ret < 0) 170 goto error; 171 172 #ifdef CONFIG_KEXEC_CORE 173 if (crashk_res.start != crashk_res.end) { 174 ret = add_resource(&iomem_resource, &crashk_res); 175 if (ret < 0) 176 goto error; 177 } 178 #endif 179 180 #ifdef CONFIG_CRASH_DUMP 181 if (elfcorehdr_size > 0) { 182 elfcorehdr_res.start = elfcorehdr_addr; 183 elfcorehdr_res.end = elfcorehdr_addr + elfcorehdr_size - 1; 184 elfcorehdr_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 185 add_resource(&iomem_resource, &elfcorehdr_res); 186 } 187 #endif 188 189 for_each_reserved_mem_region(region) { 190 res = &mem_res[res_idx--]; 191 192 res->name = "Reserved"; 193 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 194 res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); 195 res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; 196 197 /* 198 * Ignore any other reserved regions within 199 * system memory. 200 */ 201 if (memblock_is_memory(res->start)) { 202 /* Re-use this pre-allocated resource */ 203 res_idx++; 204 continue; 205 } 206 207 ret = add_resource(&iomem_resource, res); 208 if (ret < 0) 209 goto error; 210 } 211 212 /* Add /memory regions to the resource tree */ 213 for_each_mem_region(region) { 214 res = &mem_res[res_idx--]; 215 216 if (unlikely(memblock_is_nomap(region))) { 217 res->name = "Reserved"; 218 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 219 } else { 220 res->name = "System RAM"; 221 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 222 } 223 224 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 225 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 226 227 ret = add_resource(&iomem_resource, res); 228 if (ret < 0) 229 goto error; 230 } 231 232 /* Clean-up any unused pre-allocated resources */ 233 mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res); 234 memblock_free(__pa(mem_res), mem_res_sz); 235 return; 236 237 error: 238 /* Better an empty resource tree than an inconsistent one */ 239 release_child_resources(&iomem_resource); 240 memblock_free(__pa(mem_res), mem_res_sz); 241 } 242 243 244 static void __init parse_dtb(void) 245 { 246 /* Early scan of device tree from init memory */ 247 if (early_init_dt_scan(dtb_early_va)) { 248 const char *name = of_flat_dt_get_machine_name(); 249 250 if (name) { 251 pr_info("Machine model: %s\n", name); 252 dump_stack_set_arch_desc("%s (DT)", name); 253 } 254 return; 255 } 256 257 pr_err("No DTB passed to the kernel\n"); 258 #ifdef CONFIG_CMDLINE_FORCE 259 strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 260 pr_info("Forcing kernel command line to: %s\n", boot_command_line); 261 #endif 262 } 263 264 void __init setup_arch(char **cmdline_p) 265 { 266 parse_dtb(); 267 init_mm.start_code = (unsigned long) _stext; 268 init_mm.end_code = (unsigned long) _etext; 269 init_mm.end_data = (unsigned long) _edata; 270 init_mm.brk = (unsigned long) _end; 271 272 *cmdline_p = boot_command_line; 273 274 early_ioremap_setup(); 275 jump_label_init(); 276 parse_early_param(); 277 278 efi_init(); 279 setup_bootmem(); 280 paging_init(); 281 #if IS_ENABLED(CONFIG_BUILTIN_DTB) 282 unflatten_and_copy_device_tree(); 283 #else 284 if (early_init_dt_verify(__va(XIP_FIXUP(dtb_early_pa)))) 285 unflatten_device_tree(); 286 else 287 pr_err("No DTB found in kernel mappings\n"); 288 #endif 289 misc_mem_init(); 290 291 init_resources(); 292 sbi_init(); 293 294 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { 295 protect_kernel_text_data(); 296 protect_kernel_linear_mapping_text_rodata(); 297 } 298 299 #ifdef CONFIG_SWIOTLB 300 swiotlb_init(1); 301 #endif 302 303 #ifdef CONFIG_KASAN 304 kasan_init(); 305 #endif 306 307 #ifdef CONFIG_SMP 308 setup_smp(); 309 #endif 310 311 riscv_fill_hwcap(); 312 } 313 314 static int __init topology_init(void) 315 { 316 int i, ret; 317 318 for_each_online_node(i) 319 register_one_node(i); 320 321 for_each_possible_cpu(i) { 322 struct cpu *cpu = &per_cpu(cpu_devices, i); 323 324 cpu->hotpluggable = cpu_has_hotplug(i); 325 ret = register_cpu(cpu, i); 326 if (unlikely(ret)) 327 pr_warn("Warning: %s: register_cpu %d failed (%d)\n", 328 __func__, i, ret); 329 } 330 331 return 0; 332 } 333 subsys_initcall(topology_init); 334 335 void free_initmem(void) 336 { 337 unsigned long init_begin = (unsigned long)__init_begin; 338 unsigned long init_end = (unsigned long)__init_end; 339 340 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) 341 set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT); 342 343 free_initmem_default(POISON_FREE_INITMEM); 344 } 345