1 /* 2 * Based on arch/arm/kernel/setup.c 3 * 4 * Copyright (C) 1995-2001 Russell King 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/acpi.h> 21 #include <linux/export.h> 22 #include <linux/kernel.h> 23 #include <linux/stddef.h> 24 #include <linux/ioport.h> 25 #include <linux/delay.h> 26 #include <linux/utsname.h> 27 #include <linux/initrd.h> 28 #include <linux/console.h> 29 #include <linux/cache.h> 30 #include <linux/bootmem.h> 31 #include <linux/screen_info.h> 32 #include <linux/init.h> 33 #include <linux/kexec.h> 34 #include <linux/crash_dump.h> 35 #include <linux/root_dev.h> 36 #include <linux/cpu.h> 37 #include <linux/interrupt.h> 38 #include <linux/smp.h> 39 #include <linux/fs.h> 40 #include <linux/proc_fs.h> 41 #include <linux/memblock.h> 42 #include <linux/of_fdt.h> 43 #include <linux/efi.h> 44 #include <linux/psci.h> 45 #include <linux/mm.h> 46 47 #include <asm/acpi.h> 48 #include <asm/fixmap.h> 49 #include <asm/cpu.h> 50 #include <asm/cputype.h> 51 #include <asm/elf.h> 52 #include <asm/cpufeature.h> 53 #include <asm/cpu_ops.h> 54 #include <asm/kasan.h> 55 #include <asm/numa.h> 56 #include <asm/sections.h> 57 #include <asm/setup.h> 58 #include <asm/smp_plat.h> 59 #include <asm/cacheflush.h> 60 #include <asm/tlbflush.h> 61 #include <asm/traps.h> 62 #include <asm/memblock.h> 63 #include <asm/efi.h> 64 #include <asm/xen/hypervisor.h> 65 #include <asm/mmu_context.h> 66 67 phys_addr_t __fdt_pointer __initdata; 68 69 /* 70 * Standard memory resources 71 */ 72 static struct resource mem_res[] = { 73 { 74 .name = "Kernel code", 75 .start = 0, 76 .end = 0, 77 .flags = IORESOURCE_SYSTEM_RAM 78 }, 79 { 80 .name = "Kernel data", 81 .start = 0, 82 .end = 0, 83 .flags = IORESOURCE_SYSTEM_RAM 84 } 85 }; 86 87 #define kernel_code mem_res[0] 88 #define kernel_data mem_res[1] 89 90 /* 91 * The recorded values of x0 .. x3 upon kernel entry. 92 */ 93 u64 __cacheline_aligned boot_args[4]; 94 95 void __init smp_setup_processor_id(void) 96 { 97 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; 98 cpu_logical_map(0) = mpidr; 99 100 /* 101 * clear __my_cpu_offset on boot CPU to avoid hang caused by 102 * using percpu variable early, for example, lockdep will 103 * access percpu variable inside lock_release 104 */ 105 set_my_cpu_offset(0); 106 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr); 107 } 108 109 bool arch_match_cpu_phys_id(int cpu, u64 phys_id) 110 { 111 return phys_id == cpu_logical_map(cpu); 112 } 113 114 struct mpidr_hash mpidr_hash; 115 /** 116 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity 117 * level in order to build a linear index from an 118 * MPIDR value. Resulting algorithm is a collision 119 * free hash carried out through shifting and ORing 120 */ 121 static void __init smp_build_mpidr_hash(void) 122 { 123 u32 i, affinity, fs[4], bits[4], ls; 124 u64 mask = 0; 125 /* 126 * Pre-scan the list of MPIDRS and filter out bits that do 127 * not contribute to affinity levels, ie they never toggle. 128 */ 129 for_each_possible_cpu(i) 130 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); 131 pr_debug("mask of set bits %#llx\n", mask); 132 /* 133 * Find and stash the last and first bit set at all affinity levels to 134 * check how many bits are required to represent them. 135 */ 136 for (i = 0; i < 4; i++) { 137 affinity = MPIDR_AFFINITY_LEVEL(mask, i); 138 /* 139 * Find the MSB bit and LSB bits position 140 * to determine how many bits are required 141 * to express the affinity level. 142 */ 143 ls = fls(affinity); 144 fs[i] = affinity ? ffs(affinity) - 1 : 0; 145 bits[i] = ls - fs[i]; 146 } 147 /* 148 * An index can be created from the MPIDR_EL1 by isolating the 149 * significant bits at each affinity level and by shifting 150 * them in order to compress the 32 bits values space to a 151 * compressed set of values. This is equivalent to hashing 152 * the MPIDR_EL1 through shifting and ORing. It is a collision free 153 * hash though not minimal since some levels might contain a number 154 * of CPUs that is not an exact power of 2 and their bit 155 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}. 156 */ 157 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0]; 158 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0]; 159 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] - 160 (bits[1] + bits[0]); 161 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) + 162 fs[3] - (bits[2] + bits[1] + bits[0]); 163 mpidr_hash.mask = mask; 164 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0]; 165 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n", 166 mpidr_hash.shift_aff[0], 167 mpidr_hash.shift_aff[1], 168 mpidr_hash.shift_aff[2], 169 mpidr_hash.shift_aff[3], 170 mpidr_hash.mask, 171 mpidr_hash.bits); 172 /* 173 * 4x is an arbitrary value used to warn on a hash table much bigger 174 * than expected on most systems. 175 */ 176 if (mpidr_hash_size() > 4 * num_possible_cpus()) 177 pr_warn("Large number of MPIDR hash buckets detected\n"); 178 } 179 180 static void __init setup_machine_fdt(phys_addr_t dt_phys) 181 { 182 void *dt_virt = fixmap_remap_fdt(dt_phys); 183 184 if (!dt_virt || !early_init_dt_scan(dt_virt)) { 185 pr_crit("\n" 186 "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n" 187 "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n" 188 "\nPlease check your bootloader.", 189 &dt_phys, dt_virt); 190 191 while (true) 192 cpu_relax(); 193 } 194 195 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name()); 196 } 197 198 static void __init request_standard_resources(void) 199 { 200 struct memblock_region *region; 201 struct resource *res; 202 203 kernel_code.start = __pa_symbol(_text); 204 kernel_code.end = __pa_symbol(__init_begin - 1); 205 kernel_data.start = __pa_symbol(_sdata); 206 kernel_data.end = __pa_symbol(_end - 1); 207 208 for_each_memblock(memory, region) { 209 res = alloc_bootmem_low(sizeof(*res)); 210 if (memblock_is_nomap(region)) { 211 res->name = "reserved"; 212 res->flags = IORESOURCE_MEM; 213 } else { 214 res->name = "System RAM"; 215 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 216 } 217 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 218 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 219 220 request_resource(&iomem_resource, res); 221 222 if (kernel_code.start >= res->start && 223 kernel_code.end <= res->end) 224 request_resource(res, &kernel_code); 225 if (kernel_data.start >= res->start && 226 kernel_data.end <= res->end) 227 request_resource(res, &kernel_data); 228 } 229 } 230 231 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; 232 233 void __init setup_arch(char **cmdline_p) 234 { 235 pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id()); 236 237 sprintf(init_utsname()->machine, UTS_MACHINE); 238 init_mm.start_code = (unsigned long) _text; 239 init_mm.end_code = (unsigned long) _etext; 240 init_mm.end_data = (unsigned long) _edata; 241 init_mm.brk = (unsigned long) _end; 242 243 *cmdline_p = boot_command_line; 244 245 early_fixmap_init(); 246 early_ioremap_init(); 247 248 setup_machine_fdt(__fdt_pointer); 249 250 parse_early_param(); 251 252 /* 253 * Unmask asynchronous aborts after bringing up possible earlycon. 254 * (Report possible System Errors once we can report this occurred) 255 */ 256 local_async_enable(); 257 258 /* 259 * TTBR0 is only used for the identity mapping at this stage. Make it 260 * point to zero page to avoid speculatively fetching new entries. 261 */ 262 cpu_uninstall_idmap(); 263 264 xen_early_init(); 265 efi_init(); 266 arm64_memblock_init(); 267 268 paging_init(); 269 270 acpi_table_upgrade(); 271 272 /* Parse the ACPI tables for possible boot-time configuration */ 273 acpi_boot_table_init(); 274 275 if (acpi_disabled) 276 unflatten_device_tree(); 277 278 bootmem_init(); 279 280 kasan_init(); 281 282 request_standard_resources(); 283 284 early_ioremap_reset(); 285 286 if (acpi_disabled) 287 psci_dt_init(); 288 else 289 psci_acpi_init(); 290 291 cpu_read_bootcpu_ops(); 292 smp_init_cpus(); 293 smp_build_mpidr_hash(); 294 295 #ifdef CONFIG_ARM64_SW_TTBR0_PAN 296 /* 297 * Make sure init_thread_info.ttbr0 always generates translation 298 * faults in case uaccess_enable() is inadvertently called by the init 299 * thread. 300 */ 301 init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page); 302 #endif 303 304 #ifdef CONFIG_VT 305 #if defined(CONFIG_VGA_CONSOLE) 306 conswitchp = &vga_con; 307 #elif defined(CONFIG_DUMMY_CONSOLE) 308 conswitchp = &dummy_con; 309 #endif 310 #endif 311 if (boot_args[1] || boot_args[2] || boot_args[3]) { 312 pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n" 313 "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n" 314 "This indicates a broken bootloader or old kernel\n", 315 boot_args[1], boot_args[2], boot_args[3]); 316 } 317 } 318 319 static int __init topology_init(void) 320 { 321 int i; 322 323 for_each_online_node(i) 324 register_one_node(i); 325 326 for_each_possible_cpu(i) { 327 struct cpu *cpu = &per_cpu(cpu_data.cpu, i); 328 cpu->hotpluggable = 1; 329 register_cpu(cpu, i); 330 } 331 332 return 0; 333 } 334 subsys_initcall(topology_init); 335 336 /* 337 * Dump out kernel offset information on panic. 338 */ 339 static int dump_kernel_offset(struct notifier_block *self, unsigned long v, 340 void *p) 341 { 342 const unsigned long offset = kaslr_offset(); 343 344 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) { 345 pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n", 346 offset, KIMAGE_VADDR); 347 } else { 348 pr_emerg("Kernel Offset: disabled\n"); 349 } 350 return 0; 351 } 352 353 static struct notifier_block kernel_offset_notifier = { 354 .notifier_call = dump_kernel_offset 355 }; 356 357 static int __init register_kernel_offset_dumper(void) 358 { 359 atomic_notifier_chain_register(&panic_notifier_list, 360 &kernel_offset_notifier); 361 return 0; 362 } 363 __initcall(register_kernel_offset_dumper); 364