1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 4 * Chen Liqin <liqin.chen@sunplusct.com> 5 * Lennox Wu <lennox.wu@sunplusct.com> 6 * Copyright (C) 2012 Regents of the University of California 7 * Copyright (C) 2020 FORTH-ICS/CARV 8 * Nick Kossifidis <mick@ics.forth.gr> 9 */ 10 11 #include <linux/init.h> 12 #include <linux/mm.h> 13 #include <linux/memblock.h> 14 #include <linux/sched.h> 15 #include <linux/console.h> 16 #include <linux/screen_info.h> 17 #include <linux/of_fdt.h> 18 #include <linux/of_platform.h> 19 #include <linux/sched/task.h> 20 #include <linux/swiotlb.h> 21 #include <linux/smp.h> 22 #include <linux/efi.h> 23 24 #include <asm/cpu_ops.h> 25 #include <asm/early_ioremap.h> 26 #include <asm/setup.h> 27 #include <asm/set_memory.h> 28 #include <asm/sections.h> 29 #include <asm/sbi.h> 30 #include <asm/tlbflush.h> 31 #include <asm/thread_info.h> 32 #include <asm/kasan.h> 33 #include <asm/efi.h> 34 35 #include "head.h" 36 37 #if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI) 38 struct screen_info screen_info __section(".data") = { 39 .orig_video_lines = 30, 40 .orig_video_cols = 80, 41 .orig_video_mode = 0, 42 .orig_video_ega_bx = 0, 43 .orig_video_isVGA = 1, 44 .orig_video_points = 8 45 }; 46 #endif 47 48 /* 49 * The lucky hart to first increment this variable will boot the other cores. 50 * This is used before the kernel initializes the BSS so it can't be in the 51 * BSS. 52 */ 53 atomic_t hart_lottery __section(".sdata"); 54 unsigned long boot_cpu_hartid; 55 static DEFINE_PER_CPU(struct cpu, cpu_devices); 56 57 /* 58 * Place kernel memory regions on the resource tree so that 59 * kexec-tools can retrieve them from /proc/iomem. While there 60 * also add "System RAM" regions for compatibility with other 61 * archs, and the rest of the known regions for completeness. 62 */ 63 static struct resource code_res = { .name = "Kernel code", }; 64 static struct resource data_res = { .name = "Kernel data", }; 65 static struct resource rodata_res = { .name = "Kernel rodata", }; 66 static struct resource bss_res = { .name = "Kernel bss", }; 67 68 static int __init add_resource(struct resource *parent, 69 struct resource *res) 70 { 71 int ret = 0; 72 73 ret = insert_resource(parent, res); 74 if (ret < 0) { 75 pr_err("Failed to add a %s resource at %llx\n", 76 res->name, (unsigned long long) res->start); 77 return ret; 78 } 79 80 return 1; 81 } 82 83 static int __init add_kernel_resources(struct resource *res) 84 { 85 int ret = 0; 86 87 /* 88 * The memory region of the kernel image is continuous and 89 * was reserved on setup_bootmem, find it here and register 90 * it as a resource, then register the various segments of 91 * the image as child nodes 92 */ 93 if (!(res->start <= code_res.start && res->end >= data_res.end)) 94 return 0; 95 96 res->name = "Kernel image"; 97 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 98 99 /* 100 * We removed a part of this region on setup_bootmem so 101 * we need to expand the resource for the bss to fit in. 102 */ 103 res->end = bss_res.end; 104 105 ret = add_resource(&iomem_resource, res); 106 if (ret < 0) 107 return ret; 108 109 ret = add_resource(res, &code_res); 110 if (ret < 0) 111 return ret; 112 113 ret = add_resource(res, &rodata_res); 114 if (ret < 0) 115 return ret; 116 117 ret = add_resource(res, &data_res); 118 if (ret < 0) 119 return ret; 120 121 ret = add_resource(res, &bss_res); 122 123 return ret; 124 } 125 126 static void __init init_resources(void) 127 { 128 struct memblock_region *region = NULL; 129 struct resource *res = NULL; 130 struct resource *mem_res = NULL; 131 size_t mem_res_sz = 0; 132 int ret = 0, i = 0; 133 134 code_res.start = __pa_symbol(_text); 135 code_res.end = __pa_symbol(_etext) - 1; 136 code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 137 138 rodata_res.start = __pa_symbol(__start_rodata); 139 rodata_res.end = __pa_symbol(__end_rodata) - 1; 140 rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 141 142 data_res.start = __pa_symbol(_data); 143 data_res.end = __pa_symbol(_edata) - 1; 144 data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 145 146 bss_res.start = __pa_symbol(__bss_start); 147 bss_res.end = __pa_symbol(__bss_stop) - 1; 148 bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 149 150 mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res); 151 mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES); 152 if (!mem_res) 153 panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz); 154 /* 155 * Start by adding the reserved regions, if they overlap 156 * with /memory regions, insert_resource later on will take 157 * care of it. 158 */ 159 for_each_reserved_mem_region(region) { 160 res = &mem_res[i++]; 161 162 res->name = "Reserved"; 163 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 164 res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); 165 res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; 166 167 ret = add_kernel_resources(res); 168 if (ret < 0) 169 goto error; 170 else if (ret) 171 continue; 172 173 /* 174 * Ignore any other reserved regions within 175 * system memory. 176 */ 177 if (memblock_is_memory(res->start)) { 178 memblock_free((phys_addr_t) res, sizeof(struct resource)); 179 continue; 180 } 181 182 ret = add_resource(&iomem_resource, res); 183 if (ret < 0) 184 goto error; 185 } 186 187 /* Add /memory regions to the resource tree */ 188 for_each_mem_region(region) { 189 res = &mem_res[i++]; 190 191 if (unlikely(memblock_is_nomap(region))) { 192 res->name = "Reserved"; 193 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 194 } else { 195 res->name = "System RAM"; 196 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 197 } 198 199 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 200 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 201 202 ret = add_resource(&iomem_resource, res); 203 if (ret < 0) 204 goto error; 205 } 206 207 return; 208 209 error: 210 /* Better an empty resource tree than an inconsistent one */ 211 release_child_resources(&iomem_resource); 212 memblock_free((phys_addr_t) mem_res, mem_res_sz); 213 } 214 215 216 static void __init parse_dtb(void) 217 { 218 /* Early scan of device tree from init memory */ 219 if (early_init_dt_scan(dtb_early_va)) { 220 const char *name = of_flat_dt_get_machine_name(); 221 222 if (name) { 223 pr_info("Machine model: %s\n", name); 224 dump_stack_set_arch_desc("%s (DT)", name); 225 } 226 return; 227 } 228 229 pr_err("No DTB passed to the kernel\n"); 230 #ifdef CONFIG_CMDLINE_FORCE 231 strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 232 pr_info("Forcing kernel command line to: %s\n", boot_command_line); 233 #endif 234 } 235 236 void __init setup_arch(char **cmdline_p) 237 { 238 parse_dtb(); 239 init_mm.start_code = (unsigned long) _stext; 240 init_mm.end_code = (unsigned long) _etext; 241 init_mm.end_data = (unsigned long) _edata; 242 init_mm.brk = (unsigned long) _end; 243 244 *cmdline_p = boot_command_line; 245 246 early_ioremap_setup(); 247 jump_label_init(); 248 parse_early_param(); 249 250 efi_init(); 251 setup_bootmem(); 252 paging_init(); 253 init_resources(); 254 #if IS_ENABLED(CONFIG_BUILTIN_DTB) 255 unflatten_and_copy_device_tree(); 256 #else 257 if (early_init_dt_verify(__va(dtb_early_pa))) 258 unflatten_device_tree(); 259 else 260 pr_err("No DTB found in kernel mappings\n"); 261 #endif 262 misc_mem_init(); 263 264 sbi_init(); 265 266 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) 267 protect_kernel_text_data(); 268 #ifdef CONFIG_SWIOTLB 269 swiotlb_init(1); 270 #endif 271 272 #ifdef CONFIG_KASAN 273 kasan_init(); 274 #endif 275 276 #ifdef CONFIG_SMP 277 setup_smp(); 278 #endif 279 280 riscv_fill_hwcap(); 281 } 282 283 static int __init topology_init(void) 284 { 285 int i, ret; 286 287 for_each_online_node(i) 288 register_one_node(i); 289 290 for_each_possible_cpu(i) { 291 struct cpu *cpu = &per_cpu(cpu_devices, i); 292 293 cpu->hotpluggable = cpu_has_hotplug(i); 294 ret = register_cpu(cpu, i); 295 if (unlikely(ret)) 296 pr_warn("Warning: %s: register_cpu %d failed (%d)\n", 297 __func__, i, ret); 298 } 299 300 return 0; 301 } 302 subsys_initcall(topology_init); 303 304 void free_initmem(void) 305 { 306 unsigned long init_begin = (unsigned long)__init_begin; 307 unsigned long init_end = (unsigned long)__init_end; 308 309 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) 310 set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT); 311 312 free_initmem_default(POISON_FREE_INITMEM); 313 } 314