1 /* 2 * linux/arch/arm/kernel/setup.c 3 * 4 * Copyright (C) 1995-2001 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/efi.h> 11 #include <linux/export.h> 12 #include <linux/kernel.h> 13 #include <linux/stddef.h> 14 #include <linux/ioport.h> 15 #include <linux/delay.h> 16 #include <linux/utsname.h> 17 #include <linux/initrd.h> 18 #include <linux/console.h> 19 #include <linux/seq_file.h> 20 #include <linux/screen_info.h> 21 #include <linux/of_platform.h> 22 #include <linux/init.h> 23 #include <linux/kexec.h> 24 #include <linux/of_fdt.h> 25 #include <linux/cpu.h> 26 #include <linux/interrupt.h> 27 #include <linux/smp.h> 28 #include <linux/proc_fs.h> 29 #include <linux/memblock.h> 30 #include <linux/bug.h> 31 #include <linux/compiler.h> 32 #include <linux/sort.h> 33 #include <linux/psci.h> 34 35 #include <asm/unified.h> 36 #include <asm/cp15.h> 37 #include <asm/cpu.h> 38 #include <asm/cputype.h> 39 #include <asm/efi.h> 40 #include <asm/elf.h> 41 #include <asm/early_ioremap.h> 42 #include <asm/fixmap.h> 43 #include <asm/procinfo.h> 44 #include <asm/psci.h> 45 #include <asm/sections.h> 46 #include <asm/setup.h> 47 #include <asm/smp_plat.h> 48 #include <asm/mach-types.h> 49 #include <asm/cacheflush.h> 50 #include <asm/cachetype.h> 51 #include <asm/tlbflush.h> 52 #include <asm/xen/hypervisor.h> 53 54 #include <asm/prom.h> 55 #include <asm/mach/arch.h> 56 #include <asm/mach/irq.h> 57 #include <asm/mach/time.h> 58 #include <asm/system_info.h> 59 #include <asm/system_misc.h> 60 #include <asm/traps.h> 61 #include <asm/unwind.h> 62 #include <asm/memblock.h> 63 #include <asm/virt.h> 64 65 #include "atags.h" 66 67 68 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 69 char fpe_type[8]; 70 71 static int __init fpe_setup(char *line) 72 { 73 memcpy(fpe_type, line, 8); 74 return 1; 75 } 76 77 __setup("fpe=", fpe_setup); 78 #endif 79 80 extern void init_default_cache_policy(unsigned long); 81 extern void paging_init(const struct machine_desc *desc); 82 extern void early_mm_init(const struct machine_desc *); 83 extern void adjust_lowmem_bounds(void); 84 extern enum reboot_mode reboot_mode; 85 extern void setup_dma_zone(const struct machine_desc *desc); 86 87 unsigned int processor_id; 88 EXPORT_SYMBOL(processor_id); 89 unsigned int __machine_arch_type __read_mostly; 90 EXPORT_SYMBOL(__machine_arch_type); 91 unsigned int cacheid __read_mostly; 92 EXPORT_SYMBOL(cacheid); 93 94 unsigned int __atags_pointer __initdata; 95 96 unsigned int system_rev; 97 EXPORT_SYMBOL(system_rev); 98 99 const char *system_serial; 100 EXPORT_SYMBOL(system_serial); 101 102 unsigned int system_serial_low; 103 EXPORT_SYMBOL(system_serial_low); 104 105 unsigned int system_serial_high; 106 EXPORT_SYMBOL(system_serial_high); 107 108 unsigned int elf_hwcap __read_mostly; 109 EXPORT_SYMBOL(elf_hwcap); 110 111 unsigned int elf_hwcap2 __read_mostly; 112 EXPORT_SYMBOL(elf_hwcap2); 113 114 115 #ifdef MULTI_CPU 116 struct processor processor __ro_after_init; 117 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) 118 struct processor *cpu_vtable[NR_CPUS] = { 119 [0] = &processor, 120 }; 121 #endif 122 #endif 123 #ifdef MULTI_TLB 124 struct cpu_tlb_fns cpu_tlb __ro_after_init; 125 #endif 126 #ifdef MULTI_USER 127 struct cpu_user_fns cpu_user __ro_after_init; 128 #endif 129 #ifdef MULTI_CACHE 130 struct cpu_cache_fns cpu_cache __ro_after_init; 131 #endif 132 #ifdef CONFIG_OUTER_CACHE 133 struct outer_cache_fns outer_cache __ro_after_init; 134 EXPORT_SYMBOL(outer_cache); 135 #endif 136 137 /* 138 * Cached cpu_architecture() result for use by assembler code. 139 * C code should use the cpu_architecture() function instead of accessing this 140 * variable directly. 141 */ 142 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN; 143 144 struct stack { 145 u32 irq[3]; 146 u32 abt[3]; 147 u32 und[3]; 148 u32 fiq[3]; 149 } ____cacheline_aligned; 150 151 #ifndef CONFIG_CPU_V7M 152 static struct stack stacks[NR_CPUS]; 153 #endif 154 155 char elf_platform[ELF_PLATFORM_SIZE]; 156 EXPORT_SYMBOL(elf_platform); 157 158 static const char *cpu_name; 159 static const char *machine_name; 160 static char __initdata cmd_line[COMMAND_LINE_SIZE]; 161 const struct machine_desc *machine_desc __initdata; 162 163 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 164 #define ENDIANNESS ((char)endian_test.l) 165 166 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); 167 168 /* 169 * Standard memory resources 170 */ 171 static struct resource mem_res[] = { 172 { 173 .name = "Video RAM", 174 .start = 0, 175 .end = 0, 176 .flags = IORESOURCE_MEM 177 }, 178 { 179 .name = "Kernel code", 180 .start = 0, 181 .end = 0, 182 .flags = IORESOURCE_SYSTEM_RAM 183 }, 184 { 185 .name = "Kernel data", 186 .start = 0, 187 .end = 0, 188 .flags = IORESOURCE_SYSTEM_RAM 189 } 190 }; 191 192 #define video_ram mem_res[0] 193 #define kernel_code mem_res[1] 194 #define kernel_data mem_res[2] 195 196 static struct resource io_res[] = { 197 { 198 .name = "reserved", 199 .start = 0x3bc, 200 .end = 0x3be, 201 .flags = IORESOURCE_IO | IORESOURCE_BUSY 202 }, 203 { 204 .name = "reserved", 205 .start = 0x378, 206 .end = 0x37f, 207 .flags = IORESOURCE_IO | IORESOURCE_BUSY 208 }, 209 { 210 .name = "reserved", 211 .start = 0x278, 212 .end = 0x27f, 213 .flags = IORESOURCE_IO | IORESOURCE_BUSY 214 } 215 }; 216 217 #define lp0 io_res[0] 218 #define lp1 io_res[1] 219 #define lp2 io_res[2] 220 221 static const char *proc_arch[] = { 222 "undefined/unknown", 223 "3", 224 "4", 225 "4T", 226 "5", 227 "5T", 228 "5TE", 229 "5TEJ", 230 "6TEJ", 231 "7", 232 "7M", 233 "?(12)", 234 "?(13)", 235 "?(14)", 236 "?(15)", 237 "?(16)", 238 "?(17)", 239 }; 240 241 #ifdef CONFIG_CPU_V7M 242 static int __get_cpu_architecture(void) 243 { 244 return CPU_ARCH_ARMv7M; 245 } 246 #else 247 static int __get_cpu_architecture(void) 248 { 249 int cpu_arch; 250 251 if ((read_cpuid_id() & 0x0008f000) == 0) { 252 cpu_arch = CPU_ARCH_UNKNOWN; 253 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 254 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; 255 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) { 256 cpu_arch = (read_cpuid_id() >> 16) & 7; 257 if (cpu_arch) 258 cpu_arch += CPU_ARCH_ARMv3; 259 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 260 /* Revised CPUID format. Read the Memory Model Feature 261 * Register 0 and check for VMSAv7 or PMSAv7 */ 262 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0); 263 if ((mmfr0 & 0x0000000f) >= 0x00000003 || 264 (mmfr0 & 0x000000f0) >= 0x00000030) 265 cpu_arch = CPU_ARCH_ARMv7; 266 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 267 (mmfr0 & 0x000000f0) == 0x00000020) 268 cpu_arch = CPU_ARCH_ARMv6; 269 else 270 cpu_arch = CPU_ARCH_UNKNOWN; 271 } else 272 cpu_arch = CPU_ARCH_UNKNOWN; 273 274 return cpu_arch; 275 } 276 #endif 277 278 int __pure cpu_architecture(void) 279 { 280 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN); 281 282 return __cpu_architecture; 283 } 284 285 static int cpu_has_aliasing_icache(unsigned int arch) 286 { 287 int aliasing_icache; 288 unsigned int id_reg, num_sets, line_size; 289 290 /* PIPT caches never alias. */ 291 if (icache_is_pipt()) 292 return 0; 293 294 /* arch specifies the register format */ 295 switch (arch) { 296 case CPU_ARCH_ARMv7: 297 set_csselr(CSSELR_ICACHE | CSSELR_L1); 298 isb(); 299 id_reg = read_ccsidr(); 300 line_size = 4 << ((id_reg & 0x7) + 2); 301 num_sets = ((id_reg >> 13) & 0x7fff) + 1; 302 aliasing_icache = (line_size * num_sets) > PAGE_SIZE; 303 break; 304 case CPU_ARCH_ARMv6: 305 aliasing_icache = read_cpuid_cachetype() & (1 << 11); 306 break; 307 default: 308 /* I-cache aliases will be handled by D-cache aliasing code */ 309 aliasing_icache = 0; 310 } 311 312 return aliasing_icache; 313 } 314 315 static void __init cacheid_init(void) 316 { 317 unsigned int arch = cpu_architecture(); 318 319 if (arch >= CPU_ARCH_ARMv6) { 320 unsigned int cachetype = read_cpuid_cachetype(); 321 322 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) { 323 cacheid = 0; 324 } else if ((cachetype & (7 << 29)) == 4 << 29) { 325 /* ARMv7 register format */ 326 arch = CPU_ARCH_ARMv7; 327 cacheid = CACHEID_VIPT_NONALIASING; 328 switch (cachetype & (3 << 14)) { 329 case (1 << 14): 330 cacheid |= CACHEID_ASID_TAGGED; 331 break; 332 case (3 << 14): 333 cacheid |= CACHEID_PIPT; 334 break; 335 } 336 } else { 337 arch = CPU_ARCH_ARMv6; 338 if (cachetype & (1 << 23)) 339 cacheid = CACHEID_VIPT_ALIASING; 340 else 341 cacheid = CACHEID_VIPT_NONALIASING; 342 } 343 if (cpu_has_aliasing_icache(arch)) 344 cacheid |= CACHEID_VIPT_I_ALIASING; 345 } else { 346 cacheid = CACHEID_VIVT; 347 } 348 349 pr_info("CPU: %s data cache, %s instruction cache\n", 350 cache_is_vivt() ? "VIVT" : 351 cache_is_vipt_aliasing() ? "VIPT aliasing" : 352 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown", 353 cache_is_vivt() ? "VIVT" : 354 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : 355 icache_is_vipt_aliasing() ? "VIPT aliasing" : 356 icache_is_pipt() ? "PIPT" : 357 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); 358 } 359 360 /* 361 * These functions re-use the assembly code in head.S, which 362 * already provide the required functionality. 363 */ 364 extern struct proc_info_list *lookup_processor_type(unsigned int); 365 366 void __init early_print(const char *str, ...) 367 { 368 extern void printascii(const char *); 369 char buf[256]; 370 va_list ap; 371 372 va_start(ap, str); 373 vsnprintf(buf, sizeof(buf), str, ap); 374 va_end(ap); 375 376 #ifdef CONFIG_DEBUG_LL 377 printascii(buf); 378 #endif 379 printk("%s", buf); 380 } 381 382 #ifdef CONFIG_ARM_PATCH_IDIV 383 384 static inline u32 __attribute_const__ sdiv_instruction(void) 385 { 386 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { 387 /* "sdiv r0, r0, r1" */ 388 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1); 389 return __opcode_to_mem_thumb32(insn); 390 } 391 392 /* "sdiv r0, r0, r1" */ 393 return __opcode_to_mem_arm(0xe710f110); 394 } 395 396 static inline u32 __attribute_const__ udiv_instruction(void) 397 { 398 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { 399 /* "udiv r0, r0, r1" */ 400 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1); 401 return __opcode_to_mem_thumb32(insn); 402 } 403 404 /* "udiv r0, r0, r1" */ 405 return __opcode_to_mem_arm(0xe730f110); 406 } 407 408 static inline u32 __attribute_const__ bx_lr_instruction(void) 409 { 410 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { 411 /* "bx lr; nop" */ 412 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0); 413 return __opcode_to_mem_thumb32(insn); 414 } 415 416 /* "bx lr" */ 417 return __opcode_to_mem_arm(0xe12fff1e); 418 } 419 420 static void __init patch_aeabi_idiv(void) 421 { 422 extern void __aeabi_uidiv(void); 423 extern void __aeabi_idiv(void); 424 uintptr_t fn_addr; 425 unsigned int mask; 426 427 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA; 428 if (!(elf_hwcap & mask)) 429 return; 430 431 pr_info("CPU: div instructions available: patching division code\n"); 432 433 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1; 434 asm ("" : "+g" (fn_addr)); 435 ((u32 *)fn_addr)[0] = udiv_instruction(); 436 ((u32 *)fn_addr)[1] = bx_lr_instruction(); 437 flush_icache_range(fn_addr, fn_addr + 8); 438 439 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1; 440 asm ("" : "+g" (fn_addr)); 441 ((u32 *)fn_addr)[0] = sdiv_instruction(); 442 ((u32 *)fn_addr)[1] = bx_lr_instruction(); 443 flush_icache_range(fn_addr, fn_addr + 8); 444 } 445 446 #else 447 static inline void patch_aeabi_idiv(void) { } 448 #endif 449 450 static void __init cpuid_init_hwcaps(void) 451 { 452 int block; 453 u32 isar5; 454 455 if (cpu_architecture() < CPU_ARCH_ARMv7) 456 return; 457 458 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24); 459 if (block >= 2) 460 elf_hwcap |= HWCAP_IDIVA; 461 if (block >= 1) 462 elf_hwcap |= HWCAP_IDIVT; 463 464 /* LPAE implies atomic ldrd/strd instructions */ 465 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0); 466 if (block >= 5) 467 elf_hwcap |= HWCAP_LPAE; 468 469 /* check for supported v8 Crypto instructions */ 470 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5); 471 472 block = cpuid_feature_extract_field(isar5, 4); 473 if (block >= 2) 474 elf_hwcap2 |= HWCAP2_PMULL; 475 if (block >= 1) 476 elf_hwcap2 |= HWCAP2_AES; 477 478 block = cpuid_feature_extract_field(isar5, 8); 479 if (block >= 1) 480 elf_hwcap2 |= HWCAP2_SHA1; 481 482 block = cpuid_feature_extract_field(isar5, 12); 483 if (block >= 1) 484 elf_hwcap2 |= HWCAP2_SHA2; 485 486 block = cpuid_feature_extract_field(isar5, 16); 487 if (block >= 1) 488 elf_hwcap2 |= HWCAP2_CRC32; 489 } 490 491 static void __init elf_hwcap_fixup(void) 492 { 493 unsigned id = read_cpuid_id(); 494 495 /* 496 * HWCAP_TLS is available only on 1136 r1p0 and later, 497 * see also kuser_get_tls_init. 498 */ 499 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 && 500 ((id >> 20) & 3) == 0) { 501 elf_hwcap &= ~HWCAP_TLS; 502 return; 503 } 504 505 /* Verify if CPUID scheme is implemented */ 506 if ((id & 0x000f0000) != 0x000f0000) 507 return; 508 509 /* 510 * If the CPU supports LDREX/STREX and LDREXB/STREXB, 511 * avoid advertising SWP; it may not be atomic with 512 * multiprocessing cores. 513 */ 514 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 || 515 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 && 516 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3)) 517 elf_hwcap &= ~HWCAP_SWP; 518 } 519 520 /* 521 * cpu_init - initialise one CPU. 522 * 523 * cpu_init sets up the per-CPU stacks. 524 */ 525 void notrace cpu_init(void) 526 { 527 #ifndef CONFIG_CPU_V7M 528 unsigned int cpu = smp_processor_id(); 529 struct stack *stk = &stacks[cpu]; 530 531 if (cpu >= NR_CPUS) { 532 pr_crit("CPU%u: bad primary CPU number\n", cpu); 533 BUG(); 534 } 535 536 /* 537 * This only works on resume and secondary cores. For booting on the 538 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup. 539 */ 540 set_my_cpu_offset(per_cpu_offset(cpu)); 541 542 cpu_proc_init(); 543 544 /* 545 * Define the placement constraint for the inline asm directive below. 546 * In Thumb-2, msr with an immediate value is not allowed. 547 */ 548 #ifdef CONFIG_THUMB2_KERNEL 549 #define PLC "r" 550 #else 551 #define PLC "I" 552 #endif 553 554 /* 555 * setup stacks for re-entrant exception handlers 556 */ 557 __asm__ ( 558 "msr cpsr_c, %1\n\t" 559 "add r14, %0, %2\n\t" 560 "mov sp, r14\n\t" 561 "msr cpsr_c, %3\n\t" 562 "add r14, %0, %4\n\t" 563 "mov sp, r14\n\t" 564 "msr cpsr_c, %5\n\t" 565 "add r14, %0, %6\n\t" 566 "mov sp, r14\n\t" 567 "msr cpsr_c, %7\n\t" 568 "add r14, %0, %8\n\t" 569 "mov sp, r14\n\t" 570 "msr cpsr_c, %9" 571 : 572 : "r" (stk), 573 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 574 "I" (offsetof(struct stack, irq[0])), 575 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 576 "I" (offsetof(struct stack, abt[0])), 577 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), 578 "I" (offsetof(struct stack, und[0])), 579 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), 580 "I" (offsetof(struct stack, fiq[0])), 581 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 582 : "r14"); 583 #endif 584 } 585 586 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; 587 588 void __init smp_setup_processor_id(void) 589 { 590 int i; 591 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; 592 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 593 594 cpu_logical_map(0) = cpu; 595 for (i = 1; i < nr_cpu_ids; ++i) 596 cpu_logical_map(i) = i == cpu ? 0 : i; 597 598 /* 599 * clear __my_cpu_offset on boot CPU to avoid hang caused by 600 * using percpu variable early, for example, lockdep will 601 * access percpu variable inside lock_release 602 */ 603 set_my_cpu_offset(0); 604 605 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr); 606 } 607 608 struct mpidr_hash mpidr_hash; 609 #ifdef CONFIG_SMP 610 /** 611 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity 612 * level in order to build a linear index from an 613 * MPIDR value. Resulting algorithm is a collision 614 * free hash carried out through shifting and ORing 615 */ 616 static void __init smp_build_mpidr_hash(void) 617 { 618 u32 i, affinity; 619 u32 fs[3], bits[3], ls, mask = 0; 620 /* 621 * Pre-scan the list of MPIDRS and filter out bits that do 622 * not contribute to affinity levels, ie they never toggle. 623 */ 624 for_each_possible_cpu(i) 625 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); 626 pr_debug("mask of set bits 0x%x\n", mask); 627 /* 628 * Find and stash the last and first bit set at all affinity levels to 629 * check how many bits are required to represent them. 630 */ 631 for (i = 0; i < 3; i++) { 632 affinity = MPIDR_AFFINITY_LEVEL(mask, i); 633 /* 634 * Find the MSB bit and LSB bits position 635 * to determine how many bits are required 636 * to express the affinity level. 637 */ 638 ls = fls(affinity); 639 fs[i] = affinity ? ffs(affinity) - 1 : 0; 640 bits[i] = ls - fs[i]; 641 } 642 /* 643 * An index can be created from the MPIDR by isolating the 644 * significant bits at each affinity level and by shifting 645 * them in order to compress the 24 bits values space to a 646 * compressed set of values. This is equivalent to hashing 647 * the MPIDR through shifting and ORing. It is a collision free 648 * hash though not minimal since some levels might contain a number 649 * of CPUs that is not an exact power of 2 and their bit 650 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}. 651 */ 652 mpidr_hash.shift_aff[0] = fs[0]; 653 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0]; 654 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] - 655 (bits[1] + bits[0]); 656 mpidr_hash.mask = mask; 657 mpidr_hash.bits = bits[2] + bits[1] + bits[0]; 658 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n", 659 mpidr_hash.shift_aff[0], 660 mpidr_hash.shift_aff[1], 661 mpidr_hash.shift_aff[2], 662 mpidr_hash.mask, 663 mpidr_hash.bits); 664 /* 665 * 4x is an arbitrary value used to warn on a hash table much bigger 666 * than expected on most systems. 667 */ 668 if (mpidr_hash_size() > 4 * num_possible_cpus()) 669 pr_warn("Large number of MPIDR hash buckets detected\n"); 670 sync_cache_w(&mpidr_hash); 671 } 672 #endif 673 674 /* 675 * locate processor in the list of supported processor types. The linker 676 * builds this table for us from the entries in arch/arm/mm/proc-*.S 677 */ 678 struct proc_info_list *lookup_processor(u32 midr) 679 { 680 struct proc_info_list *list = lookup_processor_type(midr); 681 682 if (!list) { 683 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n", 684 smp_processor_id(), midr); 685 while (1) 686 /* can't use cpu_relax() here as it may require MMU setup */; 687 } 688 689 return list; 690 } 691 692 static void __init setup_processor(void) 693 { 694 unsigned int midr = read_cpuid_id(); 695 struct proc_info_list *list = lookup_processor(midr); 696 697 cpu_name = list->cpu_name; 698 __cpu_architecture = __get_cpu_architecture(); 699 700 init_proc_vtable(list->proc); 701 #ifdef MULTI_TLB 702 cpu_tlb = *list->tlb; 703 #endif 704 #ifdef MULTI_USER 705 cpu_user = *list->user; 706 #endif 707 #ifdef MULTI_CACHE 708 cpu_cache = *list->cache; 709 #endif 710 711 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", 712 list->cpu_name, midr, midr & 15, 713 proc_arch[cpu_architecture()], get_cr()); 714 715 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", 716 list->arch_name, ENDIANNESS); 717 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", 718 list->elf_name, ENDIANNESS); 719 elf_hwcap = list->elf_hwcap; 720 721 cpuid_init_hwcaps(); 722 patch_aeabi_idiv(); 723 724 #ifndef CONFIG_ARM_THUMB 725 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); 726 #endif 727 #ifdef CONFIG_MMU 728 init_default_cache_policy(list->__cpu_mm_mmu_flags); 729 #endif 730 erratum_a15_798181_init(); 731 732 elf_hwcap_fixup(); 733 734 cacheid_init(); 735 cpu_init(); 736 } 737 738 void __init dump_machine_table(void) 739 { 740 const struct machine_desc *p; 741 742 early_print("Available machine support:\n\nID (hex)\tNAME\n"); 743 for_each_machine_desc(p) 744 early_print("%08x\t%s\n", p->nr, p->name); 745 746 early_print("\nPlease check your kernel config and/or bootloader.\n"); 747 748 while (true) 749 /* can't use cpu_relax() here as it may require MMU setup */; 750 } 751 752 int __init arm_add_memory(u64 start, u64 size) 753 { 754 u64 aligned_start; 755 756 /* 757 * Ensure that start/size are aligned to a page boundary. 758 * Size is rounded down, start is rounded up. 759 */ 760 aligned_start = PAGE_ALIGN(start); 761 if (aligned_start > start + size) 762 size = 0; 763 else 764 size -= aligned_start - start; 765 766 #ifndef CONFIG_PHYS_ADDR_T_64BIT 767 if (aligned_start > ULONG_MAX) { 768 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n", 769 (long long)start); 770 return -EINVAL; 771 } 772 773 if (aligned_start + size > ULONG_MAX) { 774 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n", 775 (long long)start); 776 /* 777 * To ensure bank->start + bank->size is representable in 778 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB. 779 * This means we lose a page after masking. 780 */ 781 size = ULONG_MAX - aligned_start; 782 } 783 #endif 784 785 if (aligned_start < PHYS_OFFSET) { 786 if (aligned_start + size <= PHYS_OFFSET) { 787 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", 788 aligned_start, aligned_start + size); 789 return -EINVAL; 790 } 791 792 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", 793 aligned_start, (u64)PHYS_OFFSET); 794 795 size -= PHYS_OFFSET - aligned_start; 796 aligned_start = PHYS_OFFSET; 797 } 798 799 start = aligned_start; 800 size = size & ~(phys_addr_t)(PAGE_SIZE - 1); 801 802 /* 803 * Check whether this memory region has non-zero size or 804 * invalid node number. 805 */ 806 if (size == 0) 807 return -EINVAL; 808 809 memblock_add(start, size); 810 return 0; 811 } 812 813 /* 814 * Pick out the memory size. We look for mem=size@start, 815 * where start and size are "size[KkMm]" 816 */ 817 818 static int __init early_mem(char *p) 819 { 820 static int usermem __initdata = 0; 821 u64 size; 822 u64 start; 823 char *endp; 824 825 /* 826 * If the user specifies memory size, we 827 * blow away any automatically generated 828 * size. 829 */ 830 if (usermem == 0) { 831 usermem = 1; 832 memblock_remove(memblock_start_of_DRAM(), 833 memblock_end_of_DRAM() - memblock_start_of_DRAM()); 834 } 835 836 start = PHYS_OFFSET; 837 size = memparse(p, &endp); 838 if (*endp == '@') 839 start = memparse(endp + 1, NULL); 840 841 arm_add_memory(start, size); 842 843 return 0; 844 } 845 early_param("mem", early_mem); 846 847 static void __init request_standard_resources(const struct machine_desc *mdesc) 848 { 849 struct memblock_region *region; 850 struct resource *res; 851 852 kernel_code.start = virt_to_phys(_text); 853 kernel_code.end = virt_to_phys(__init_begin - 1); 854 kernel_data.start = virt_to_phys(_sdata); 855 kernel_data.end = virt_to_phys(_end - 1); 856 857 for_each_memblock(memory, region) { 858 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 859 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 860 unsigned long boot_alias_start; 861 862 /* 863 * Some systems have a special memory alias which is only 864 * used for booting. We need to advertise this region to 865 * kexec-tools so they know where bootable RAM is located. 866 */ 867 boot_alias_start = phys_to_idmap(start); 868 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { 869 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); 870 if (!res) 871 panic("%s: Failed to allocate %zu bytes\n", 872 __func__, sizeof(*res)); 873 res->name = "System RAM (boot alias)"; 874 res->start = boot_alias_start; 875 res->end = phys_to_idmap(end); 876 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 877 request_resource(&iomem_resource, res); 878 } 879 880 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); 881 if (!res) 882 panic("%s: Failed to allocate %zu bytes\n", __func__, 883 sizeof(*res)); 884 res->name = "System RAM"; 885 res->start = start; 886 res->end = end; 887 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 888 889 request_resource(&iomem_resource, res); 890 891 if (kernel_code.start >= res->start && 892 kernel_code.end <= res->end) 893 request_resource(res, &kernel_code); 894 if (kernel_data.start >= res->start && 895 kernel_data.end <= res->end) 896 request_resource(res, &kernel_data); 897 } 898 899 if (mdesc->video_start) { 900 video_ram.start = mdesc->video_start; 901 video_ram.end = mdesc->video_end; 902 request_resource(&iomem_resource, &video_ram); 903 } 904 905 /* 906 * Some machines don't have the possibility of ever 907 * possessing lp0, lp1 or lp2 908 */ 909 if (mdesc->reserve_lp0) 910 request_resource(&ioport_resource, &lp0); 911 if (mdesc->reserve_lp1) 912 request_resource(&ioport_resource, &lp1); 913 if (mdesc->reserve_lp2) 914 request_resource(&ioport_resource, &lp2); 915 } 916 917 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \ 918 defined(CONFIG_EFI) 919 struct screen_info screen_info = { 920 .orig_video_lines = 30, 921 .orig_video_cols = 80, 922 .orig_video_mode = 0, 923 .orig_video_ega_bx = 0, 924 .orig_video_isVGA = 1, 925 .orig_video_points = 8 926 }; 927 #endif 928 929 static int __init customize_machine(void) 930 { 931 /* 932 * customizes platform devices, or adds new ones 933 * On DT based machines, we fall back to populating the 934 * machine from the device tree, if no callback is provided, 935 * otherwise we would always need an init_machine callback. 936 */ 937 if (machine_desc->init_machine) 938 machine_desc->init_machine(); 939 940 return 0; 941 } 942 arch_initcall(customize_machine); 943 944 static int __init init_machine_late(void) 945 { 946 struct device_node *root; 947 int ret; 948 949 if (machine_desc->init_late) 950 machine_desc->init_late(); 951 952 root = of_find_node_by_path("/"); 953 if (root) { 954 ret = of_property_read_string(root, "serial-number", 955 &system_serial); 956 if (ret) 957 system_serial = NULL; 958 } 959 960 if (!system_serial) 961 system_serial = kasprintf(GFP_KERNEL, "%08x%08x", 962 system_serial_high, 963 system_serial_low); 964 965 return 0; 966 } 967 late_initcall(init_machine_late); 968 969 #ifdef CONFIG_KEXEC 970 /* 971 * The crash region must be aligned to 128MB to avoid 972 * zImage relocating below the reserved region. 973 */ 974 #define CRASH_ALIGN (128 << 20) 975 976 static inline unsigned long long get_total_mem(void) 977 { 978 unsigned long total; 979 980 total = max_low_pfn - min_low_pfn; 981 return total << PAGE_SHIFT; 982 } 983 984 /** 985 * reserve_crashkernel() - reserves memory are for crash kernel 986 * 987 * This function reserves memory area given in "crashkernel=" kernel command 988 * line parameter. The memory reserved is used by a dump capture kernel when 989 * primary kernel is crashing. 990 */ 991 static void __init reserve_crashkernel(void) 992 { 993 unsigned long long crash_size, crash_base; 994 unsigned long long total_mem; 995 int ret; 996 997 total_mem = get_total_mem(); 998 ret = parse_crashkernel(boot_command_line, total_mem, 999 &crash_size, &crash_base); 1000 if (ret) 1001 return; 1002 1003 if (crash_base <= 0) { 1004 unsigned long long crash_max = idmap_to_phys((u32)~0); 1005 unsigned long long lowmem_max = __pa(high_memory - 1) + 1; 1006 if (crash_max > lowmem_max) 1007 crash_max = lowmem_max; 1008 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max, 1009 crash_size, CRASH_ALIGN); 1010 if (!crash_base) { 1011 pr_err("crashkernel reservation failed - No suitable area found.\n"); 1012 return; 1013 } 1014 } else { 1015 unsigned long long start; 1016 1017 start = memblock_find_in_range(crash_base, 1018 crash_base + crash_size, 1019 crash_size, SECTION_SIZE); 1020 if (start != crash_base) { 1021 pr_err("crashkernel reservation failed - memory is in use.\n"); 1022 return; 1023 } 1024 } 1025 1026 ret = memblock_reserve(crash_base, crash_size); 1027 if (ret < 0) { 1028 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n", 1029 (unsigned long)crash_base); 1030 return; 1031 } 1032 1033 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", 1034 (unsigned long)(crash_size >> 20), 1035 (unsigned long)(crash_base >> 20), 1036 (unsigned long)(total_mem >> 20)); 1037 1038 /* The crashk resource must always be located in normal mem */ 1039 crashk_res.start = crash_base; 1040 crashk_res.end = crash_base + crash_size - 1; 1041 insert_resource(&iomem_resource, &crashk_res); 1042 1043 if (arm_has_idmap_alias()) { 1044 /* 1045 * If we have a special RAM alias for use at boot, we 1046 * need to advertise to kexec tools where the alias is. 1047 */ 1048 static struct resource crashk_boot_res = { 1049 .name = "Crash kernel (boot alias)", 1050 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 1051 }; 1052 1053 crashk_boot_res.start = phys_to_idmap(crash_base); 1054 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1; 1055 insert_resource(&iomem_resource, &crashk_boot_res); 1056 } 1057 } 1058 #else 1059 static inline void reserve_crashkernel(void) {} 1060 #endif /* CONFIG_KEXEC */ 1061 1062 void __init hyp_mode_check(void) 1063 { 1064 #ifdef CONFIG_ARM_VIRT_EXT 1065 sync_boot_mode(); 1066 1067 if (is_hyp_mode_available()) { 1068 pr_info("CPU: All CPU(s) started in HYP mode.\n"); 1069 pr_info("CPU: Virtualization extensions available.\n"); 1070 } else if (is_hyp_mode_mismatched()) { 1071 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n", 1072 __boot_cpu_mode & MODE_MASK); 1073 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n"); 1074 } else 1075 pr_info("CPU: All CPU(s) started in SVC mode.\n"); 1076 #endif 1077 } 1078 1079 void __init setup_arch(char **cmdline_p) 1080 { 1081 const struct machine_desc *mdesc; 1082 1083 setup_processor(); 1084 mdesc = setup_machine_fdt(__atags_pointer); 1085 if (!mdesc) 1086 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type); 1087 if (!mdesc) { 1088 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n"); 1089 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type, 1090 __atags_pointer); 1091 if (__atags_pointer) 1092 early_print(" r2[]=%*ph\n", 16, 1093 phys_to_virt(__atags_pointer)); 1094 dump_machine_table(); 1095 } 1096 1097 machine_desc = mdesc; 1098 machine_name = mdesc->name; 1099 dump_stack_set_arch_desc("%s", mdesc->name); 1100 1101 if (mdesc->reboot_mode != REBOOT_HARD) 1102 reboot_mode = mdesc->reboot_mode; 1103 1104 init_mm.start_code = (unsigned long) _text; 1105 init_mm.end_code = (unsigned long) _etext; 1106 init_mm.end_data = (unsigned long) _edata; 1107 init_mm.brk = (unsigned long) _end; 1108 1109 /* populate cmd_line too for later use, preserving boot_command_line */ 1110 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); 1111 *cmdline_p = cmd_line; 1112 1113 early_fixmap_init(); 1114 early_ioremap_init(); 1115 1116 parse_early_param(); 1117 1118 #ifdef CONFIG_MMU 1119 early_mm_init(mdesc); 1120 #endif 1121 setup_dma_zone(mdesc); 1122 xen_early_init(); 1123 efi_init(); 1124 /* 1125 * Make sure the calculation for lowmem/highmem is set appropriately 1126 * before reserving/allocating any mmeory 1127 */ 1128 adjust_lowmem_bounds(); 1129 arm_memblock_init(mdesc); 1130 /* Memory may have been removed so recalculate the bounds. */ 1131 adjust_lowmem_bounds(); 1132 1133 early_ioremap_reset(); 1134 1135 paging_init(mdesc); 1136 request_standard_resources(mdesc); 1137 1138 if (mdesc->restart) 1139 arm_pm_restart = mdesc->restart; 1140 1141 unflatten_device_tree(); 1142 1143 arm_dt_init_cpu_maps(); 1144 psci_dt_init(); 1145 #ifdef CONFIG_SMP 1146 if (is_smp()) { 1147 if (!mdesc->smp_init || !mdesc->smp_init()) { 1148 if (psci_smp_available()) 1149 smp_set_ops(&psci_smp_ops); 1150 else if (mdesc->smp) 1151 smp_set_ops(mdesc->smp); 1152 } 1153 smp_init_cpus(); 1154 smp_build_mpidr_hash(); 1155 } 1156 #endif 1157 1158 if (!is_smp()) 1159 hyp_mode_check(); 1160 1161 reserve_crashkernel(); 1162 1163 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER 1164 handle_arch_irq = mdesc->handle_irq; 1165 #endif 1166 1167 #ifdef CONFIG_VT 1168 #if defined(CONFIG_VGA_CONSOLE) 1169 conswitchp = &vga_con; 1170 #elif defined(CONFIG_DUMMY_CONSOLE) 1171 conswitchp = &dummy_con; 1172 #endif 1173 #endif 1174 1175 if (mdesc->init_early) 1176 mdesc->init_early(); 1177 } 1178 1179 1180 static int __init topology_init(void) 1181 { 1182 int cpu; 1183 1184 for_each_possible_cpu(cpu) { 1185 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); 1186 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu); 1187 register_cpu(&cpuinfo->cpu, cpu); 1188 } 1189 1190 return 0; 1191 } 1192 subsys_initcall(topology_init); 1193 1194 #ifdef CONFIG_HAVE_PROC_CPU 1195 static int __init proc_cpu_init(void) 1196 { 1197 struct proc_dir_entry *res; 1198 1199 res = proc_mkdir("cpu", NULL); 1200 if (!res) 1201 return -ENOMEM; 1202 return 0; 1203 } 1204 fs_initcall(proc_cpu_init); 1205 #endif 1206 1207 static const char *hwcap_str[] = { 1208 "swp", 1209 "half", 1210 "thumb", 1211 "26bit", 1212 "fastmult", 1213 "fpa", 1214 "vfp", 1215 "edsp", 1216 "java", 1217 "iwmmxt", 1218 "crunch", 1219 "thumbee", 1220 "neon", 1221 "vfpv3", 1222 "vfpv3d16", 1223 "tls", 1224 "vfpv4", 1225 "idiva", 1226 "idivt", 1227 "vfpd32", 1228 "lpae", 1229 "evtstrm", 1230 NULL 1231 }; 1232 1233 static const char *hwcap2_str[] = { 1234 "aes", 1235 "pmull", 1236 "sha1", 1237 "sha2", 1238 "crc32", 1239 NULL 1240 }; 1241 1242 static int c_show(struct seq_file *m, void *v) 1243 { 1244 int i, j; 1245 u32 cpuid; 1246 1247 for_each_online_cpu(i) { 1248 /* 1249 * glibc reads /proc/cpuinfo to determine the number of 1250 * online processors, looking for lines beginning with 1251 * "processor". Give glibc what it expects. 1252 */ 1253 seq_printf(m, "processor\t: %d\n", i); 1254 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id(); 1255 seq_printf(m, "model name\t: %s rev %d (%s)\n", 1256 cpu_name, cpuid & 15, elf_platform); 1257 1258 #if defined(CONFIG_SMP) 1259 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 1260 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 1261 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 1262 #else 1263 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 1264 loops_per_jiffy / (500000/HZ), 1265 (loops_per_jiffy / (5000/HZ)) % 100); 1266 #endif 1267 /* dump out the processor features */ 1268 seq_puts(m, "Features\t: "); 1269 1270 for (j = 0; hwcap_str[j]; j++) 1271 if (elf_hwcap & (1 << j)) 1272 seq_printf(m, "%s ", hwcap_str[j]); 1273 1274 for (j = 0; hwcap2_str[j]; j++) 1275 if (elf_hwcap2 & (1 << j)) 1276 seq_printf(m, "%s ", hwcap2_str[j]); 1277 1278 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24); 1279 seq_printf(m, "CPU architecture: %s\n", 1280 proc_arch[cpu_architecture()]); 1281 1282 if ((cpuid & 0x0008f000) == 0x00000000) { 1283 /* pre-ARM7 */ 1284 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4); 1285 } else { 1286 if ((cpuid & 0x0008f000) == 0x00007000) { 1287 /* ARM7 */ 1288 seq_printf(m, "CPU variant\t: 0x%02x\n", 1289 (cpuid >> 16) & 127); 1290 } else { 1291 /* post-ARM7 */ 1292 seq_printf(m, "CPU variant\t: 0x%x\n", 1293 (cpuid >> 20) & 15); 1294 } 1295 seq_printf(m, "CPU part\t: 0x%03x\n", 1296 (cpuid >> 4) & 0xfff); 1297 } 1298 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15); 1299 } 1300 1301 seq_printf(m, "Hardware\t: %s\n", machine_name); 1302 seq_printf(m, "Revision\t: %04x\n", system_rev); 1303 seq_printf(m, "Serial\t\t: %s\n", system_serial); 1304 1305 return 0; 1306 } 1307 1308 static void *c_start(struct seq_file *m, loff_t *pos) 1309 { 1310 return *pos < 1 ? (void *)1 : NULL; 1311 } 1312 1313 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1314 { 1315 ++*pos; 1316 return NULL; 1317 } 1318 1319 static void c_stop(struct seq_file *m, void *v) 1320 { 1321 } 1322 1323 const struct seq_operations cpuinfo_op = { 1324 .start = c_start, 1325 .next = c_next, 1326 .stop = c_stop, 1327 .show = c_show 1328 }; 1329