1 /* 2 * linux/arch/arm/kernel/setup.c 3 * 4 * Copyright (C) 1995-2001 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/export.h> 11 #include <linux/kernel.h> 12 #include <linux/stddef.h> 13 #include <linux/ioport.h> 14 #include <linux/delay.h> 15 #include <linux/utsname.h> 16 #include <linux/initrd.h> 17 #include <linux/console.h> 18 #include <linux/bootmem.h> 19 #include <linux/seq_file.h> 20 #include <linux/screen_info.h> 21 #include <linux/of_iommu.h> 22 #include <linux/of_platform.h> 23 #include <linux/init.h> 24 #include <linux/kexec.h> 25 #include <linux/of_fdt.h> 26 #include <linux/cpu.h> 27 #include <linux/interrupt.h> 28 #include <linux/smp.h> 29 #include <linux/proc_fs.h> 30 #include <linux/memblock.h> 31 #include <linux/bug.h> 32 #include <linux/compiler.h> 33 #include <linux/sort.h> 34 #include <linux/psci.h> 35 36 #include <asm/unified.h> 37 #include <asm/cp15.h> 38 #include <asm/cpu.h> 39 #include <asm/cputype.h> 40 #include <asm/elf.h> 41 #include <asm/early_ioremap.h> 42 #include <asm/fixmap.h> 43 #include <asm/procinfo.h> 44 #include <asm/psci.h> 45 #include <asm/sections.h> 46 #include <asm/setup.h> 47 #include <asm/smp_plat.h> 48 #include <asm/mach-types.h> 49 #include <asm/cacheflush.h> 50 #include <asm/cachetype.h> 51 #include <asm/tlbflush.h> 52 #include <asm/xen/hypervisor.h> 53 54 #include <asm/prom.h> 55 #include <asm/mach/arch.h> 56 #include <asm/mach/irq.h> 57 #include <asm/mach/time.h> 58 #include <asm/system_info.h> 59 #include <asm/system_misc.h> 60 #include <asm/traps.h> 61 #include <asm/unwind.h> 62 #include <asm/memblock.h> 63 #include <asm/virt.h> 64 65 #include "atags.h" 66 67 68 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 69 char fpe_type[8]; 70 71 static int __init fpe_setup(char *line) 72 { 73 memcpy(fpe_type, line, 8); 74 return 1; 75 } 76 77 __setup("fpe=", fpe_setup); 78 #endif 79 80 extern void init_default_cache_policy(unsigned long); 81 extern void paging_init(const struct machine_desc *desc); 82 extern void early_paging_init(const struct machine_desc *); 83 extern void sanity_check_meminfo(void); 84 extern enum reboot_mode reboot_mode; 85 extern void setup_dma_zone(const struct machine_desc *desc); 86 87 unsigned int processor_id; 88 EXPORT_SYMBOL(processor_id); 89 unsigned int __machine_arch_type __read_mostly; 90 EXPORT_SYMBOL(__machine_arch_type); 91 unsigned int cacheid __read_mostly; 92 EXPORT_SYMBOL(cacheid); 93 94 unsigned int __atags_pointer __initdata; 95 96 unsigned int system_rev; 97 EXPORT_SYMBOL(system_rev); 98 99 const char *system_serial; 100 EXPORT_SYMBOL(system_serial); 101 102 unsigned int system_serial_low; 103 EXPORT_SYMBOL(system_serial_low); 104 105 unsigned int system_serial_high; 106 EXPORT_SYMBOL(system_serial_high); 107 108 unsigned int elf_hwcap __read_mostly; 109 EXPORT_SYMBOL(elf_hwcap); 110 111 unsigned int elf_hwcap2 __read_mostly; 112 EXPORT_SYMBOL(elf_hwcap2); 113 114 115 #ifdef MULTI_CPU 116 struct processor processor __read_mostly; 117 #endif 118 #ifdef MULTI_TLB 119 struct cpu_tlb_fns cpu_tlb __read_mostly; 120 #endif 121 #ifdef MULTI_USER 122 struct cpu_user_fns cpu_user __read_mostly; 123 #endif 124 #ifdef MULTI_CACHE 125 struct cpu_cache_fns cpu_cache __read_mostly; 126 #endif 127 #ifdef CONFIG_OUTER_CACHE 128 struct outer_cache_fns outer_cache __read_mostly; 129 EXPORT_SYMBOL(outer_cache); 130 #endif 131 132 /* 133 * Cached cpu_architecture() result for use by assembler code. 134 * C code should use the cpu_architecture() function instead of accessing this 135 * variable directly. 136 */ 137 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN; 138 139 struct stack { 140 u32 irq[3]; 141 u32 abt[3]; 142 u32 und[3]; 143 u32 fiq[3]; 144 } ____cacheline_aligned; 145 146 #ifndef CONFIG_CPU_V7M 147 static struct stack stacks[NR_CPUS]; 148 #endif 149 150 char elf_platform[ELF_PLATFORM_SIZE]; 151 EXPORT_SYMBOL(elf_platform); 152 153 static const char *cpu_name; 154 static const char *machine_name; 155 static char __initdata cmd_line[COMMAND_LINE_SIZE]; 156 const struct machine_desc *machine_desc __initdata; 157 158 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 159 #define ENDIANNESS ((char)endian_test.l) 160 161 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); 162 163 /* 164 * Standard memory resources 165 */ 166 static struct resource mem_res[] = { 167 { 168 .name = "Video RAM", 169 .start = 0, 170 .end = 0, 171 .flags = IORESOURCE_MEM 172 }, 173 { 174 .name = "Kernel code", 175 .start = 0, 176 .end = 0, 177 .flags = IORESOURCE_MEM 178 }, 179 { 180 .name = "Kernel data", 181 .start = 0, 182 .end = 0, 183 .flags = IORESOURCE_MEM 184 } 185 }; 186 187 #define video_ram mem_res[0] 188 #define kernel_code mem_res[1] 189 #define kernel_data mem_res[2] 190 191 static struct resource io_res[] = { 192 { 193 .name = "reserved", 194 .start = 0x3bc, 195 .end = 0x3be, 196 .flags = IORESOURCE_IO | IORESOURCE_BUSY 197 }, 198 { 199 .name = "reserved", 200 .start = 0x378, 201 .end = 0x37f, 202 .flags = IORESOURCE_IO | IORESOURCE_BUSY 203 }, 204 { 205 .name = "reserved", 206 .start = 0x278, 207 .end = 0x27f, 208 .flags = IORESOURCE_IO | IORESOURCE_BUSY 209 } 210 }; 211 212 #define lp0 io_res[0] 213 #define lp1 io_res[1] 214 #define lp2 io_res[2] 215 216 static const char *proc_arch[] = { 217 "undefined/unknown", 218 "3", 219 "4", 220 "4T", 221 "5", 222 "5T", 223 "5TE", 224 "5TEJ", 225 "6TEJ", 226 "7", 227 "7M", 228 "?(12)", 229 "?(13)", 230 "?(14)", 231 "?(15)", 232 "?(16)", 233 "?(17)", 234 }; 235 236 #ifdef CONFIG_CPU_V7M 237 static int __get_cpu_architecture(void) 238 { 239 return CPU_ARCH_ARMv7M; 240 } 241 #else 242 static int __get_cpu_architecture(void) 243 { 244 int cpu_arch; 245 246 if ((read_cpuid_id() & 0x0008f000) == 0) { 247 cpu_arch = CPU_ARCH_UNKNOWN; 248 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 249 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; 250 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) { 251 cpu_arch = (read_cpuid_id() >> 16) & 7; 252 if (cpu_arch) 253 cpu_arch += CPU_ARCH_ARMv3; 254 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 255 /* Revised CPUID format. Read the Memory Model Feature 256 * Register 0 and check for VMSAv7 or PMSAv7 */ 257 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0); 258 if ((mmfr0 & 0x0000000f) >= 0x00000003 || 259 (mmfr0 & 0x000000f0) >= 0x00000030) 260 cpu_arch = CPU_ARCH_ARMv7; 261 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 262 (mmfr0 & 0x000000f0) == 0x00000020) 263 cpu_arch = CPU_ARCH_ARMv6; 264 else 265 cpu_arch = CPU_ARCH_UNKNOWN; 266 } else 267 cpu_arch = CPU_ARCH_UNKNOWN; 268 269 return cpu_arch; 270 } 271 #endif 272 273 int __pure cpu_architecture(void) 274 { 275 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN); 276 277 return __cpu_architecture; 278 } 279 280 static int cpu_has_aliasing_icache(unsigned int arch) 281 { 282 int aliasing_icache; 283 unsigned int id_reg, num_sets, line_size; 284 285 /* PIPT caches never alias. */ 286 if (icache_is_pipt()) 287 return 0; 288 289 /* arch specifies the register format */ 290 switch (arch) { 291 case CPU_ARCH_ARMv7: 292 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR" 293 : /* No output operands */ 294 : "r" (1)); 295 isb(); 296 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR" 297 : "=r" (id_reg)); 298 line_size = 4 << ((id_reg & 0x7) + 2); 299 num_sets = ((id_reg >> 13) & 0x7fff) + 1; 300 aliasing_icache = (line_size * num_sets) > PAGE_SIZE; 301 break; 302 case CPU_ARCH_ARMv6: 303 aliasing_icache = read_cpuid_cachetype() & (1 << 11); 304 break; 305 default: 306 /* I-cache aliases will be handled by D-cache aliasing code */ 307 aliasing_icache = 0; 308 } 309 310 return aliasing_icache; 311 } 312 313 static void __init cacheid_init(void) 314 { 315 unsigned int arch = cpu_architecture(); 316 317 if (arch == CPU_ARCH_ARMv7M) { 318 cacheid = 0; 319 } else if (arch >= CPU_ARCH_ARMv6) { 320 unsigned int cachetype = read_cpuid_cachetype(); 321 if ((cachetype & (7 << 29)) == 4 << 29) { 322 /* ARMv7 register format */ 323 arch = CPU_ARCH_ARMv7; 324 cacheid = CACHEID_VIPT_NONALIASING; 325 switch (cachetype & (3 << 14)) { 326 case (1 << 14): 327 cacheid |= CACHEID_ASID_TAGGED; 328 break; 329 case (3 << 14): 330 cacheid |= CACHEID_PIPT; 331 break; 332 } 333 } else { 334 arch = CPU_ARCH_ARMv6; 335 if (cachetype & (1 << 23)) 336 cacheid = CACHEID_VIPT_ALIASING; 337 else 338 cacheid = CACHEID_VIPT_NONALIASING; 339 } 340 if (cpu_has_aliasing_icache(arch)) 341 cacheid |= CACHEID_VIPT_I_ALIASING; 342 } else { 343 cacheid = CACHEID_VIVT; 344 } 345 346 pr_info("CPU: %s data cache, %s instruction cache\n", 347 cache_is_vivt() ? "VIVT" : 348 cache_is_vipt_aliasing() ? "VIPT aliasing" : 349 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown", 350 cache_is_vivt() ? "VIVT" : 351 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : 352 icache_is_vipt_aliasing() ? "VIPT aliasing" : 353 icache_is_pipt() ? "PIPT" : 354 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); 355 } 356 357 /* 358 * These functions re-use the assembly code in head.S, which 359 * already provide the required functionality. 360 */ 361 extern struct proc_info_list *lookup_processor_type(unsigned int); 362 363 void __init early_print(const char *str, ...) 364 { 365 extern void printascii(const char *); 366 char buf[256]; 367 va_list ap; 368 369 va_start(ap, str); 370 vsnprintf(buf, sizeof(buf), str, ap); 371 va_end(ap); 372 373 #ifdef CONFIG_DEBUG_LL 374 printascii(buf); 375 #endif 376 printk("%s", buf); 377 } 378 379 static void __init cpuid_init_hwcaps(void) 380 { 381 int block; 382 u32 isar5; 383 384 if (cpu_architecture() < CPU_ARCH_ARMv7) 385 return; 386 387 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24); 388 if (block >= 2) 389 elf_hwcap |= HWCAP_IDIVA; 390 if (block >= 1) 391 elf_hwcap |= HWCAP_IDIVT; 392 393 /* LPAE implies atomic ldrd/strd instructions */ 394 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0); 395 if (block >= 5) 396 elf_hwcap |= HWCAP_LPAE; 397 398 /* check for supported v8 Crypto instructions */ 399 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5); 400 401 block = cpuid_feature_extract_field(isar5, 4); 402 if (block >= 2) 403 elf_hwcap2 |= HWCAP2_PMULL; 404 if (block >= 1) 405 elf_hwcap2 |= HWCAP2_AES; 406 407 block = cpuid_feature_extract_field(isar5, 8); 408 if (block >= 1) 409 elf_hwcap2 |= HWCAP2_SHA1; 410 411 block = cpuid_feature_extract_field(isar5, 12); 412 if (block >= 1) 413 elf_hwcap2 |= HWCAP2_SHA2; 414 415 block = cpuid_feature_extract_field(isar5, 16); 416 if (block >= 1) 417 elf_hwcap2 |= HWCAP2_CRC32; 418 } 419 420 static void __init elf_hwcap_fixup(void) 421 { 422 unsigned id = read_cpuid_id(); 423 424 /* 425 * HWCAP_TLS is available only on 1136 r1p0 and later, 426 * see also kuser_get_tls_init. 427 */ 428 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 && 429 ((id >> 20) & 3) == 0) { 430 elf_hwcap &= ~HWCAP_TLS; 431 return; 432 } 433 434 /* Verify if CPUID scheme is implemented */ 435 if ((id & 0x000f0000) != 0x000f0000) 436 return; 437 438 /* 439 * If the CPU supports LDREX/STREX and LDREXB/STREXB, 440 * avoid advertising SWP; it may not be atomic with 441 * multiprocessing cores. 442 */ 443 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 || 444 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 && 445 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3)) 446 elf_hwcap &= ~HWCAP_SWP; 447 } 448 449 /* 450 * cpu_init - initialise one CPU. 451 * 452 * cpu_init sets up the per-CPU stacks. 453 */ 454 void notrace cpu_init(void) 455 { 456 #ifndef CONFIG_CPU_V7M 457 unsigned int cpu = smp_processor_id(); 458 struct stack *stk = &stacks[cpu]; 459 460 if (cpu >= NR_CPUS) { 461 pr_crit("CPU%u: bad primary CPU number\n", cpu); 462 BUG(); 463 } 464 465 /* 466 * This only works on resume and secondary cores. For booting on the 467 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup. 468 */ 469 set_my_cpu_offset(per_cpu_offset(cpu)); 470 471 cpu_proc_init(); 472 473 /* 474 * Define the placement constraint for the inline asm directive below. 475 * In Thumb-2, msr with an immediate value is not allowed. 476 */ 477 #ifdef CONFIG_THUMB2_KERNEL 478 #define PLC "r" 479 #else 480 #define PLC "I" 481 #endif 482 483 /* 484 * setup stacks for re-entrant exception handlers 485 */ 486 __asm__ ( 487 "msr cpsr_c, %1\n\t" 488 "add r14, %0, %2\n\t" 489 "mov sp, r14\n\t" 490 "msr cpsr_c, %3\n\t" 491 "add r14, %0, %4\n\t" 492 "mov sp, r14\n\t" 493 "msr cpsr_c, %5\n\t" 494 "add r14, %0, %6\n\t" 495 "mov sp, r14\n\t" 496 "msr cpsr_c, %7\n\t" 497 "add r14, %0, %8\n\t" 498 "mov sp, r14\n\t" 499 "msr cpsr_c, %9" 500 : 501 : "r" (stk), 502 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 503 "I" (offsetof(struct stack, irq[0])), 504 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 505 "I" (offsetof(struct stack, abt[0])), 506 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), 507 "I" (offsetof(struct stack, und[0])), 508 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), 509 "I" (offsetof(struct stack, fiq[0])), 510 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 511 : "r14"); 512 #endif 513 } 514 515 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; 516 517 void __init smp_setup_processor_id(void) 518 { 519 int i; 520 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; 521 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 522 523 cpu_logical_map(0) = cpu; 524 for (i = 1; i < nr_cpu_ids; ++i) 525 cpu_logical_map(i) = i == cpu ? 0 : i; 526 527 /* 528 * clear __my_cpu_offset on boot CPU to avoid hang caused by 529 * using percpu variable early, for example, lockdep will 530 * access percpu variable inside lock_release 531 */ 532 set_my_cpu_offset(0); 533 534 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr); 535 } 536 537 struct mpidr_hash mpidr_hash; 538 #ifdef CONFIG_SMP 539 /** 540 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity 541 * level in order to build a linear index from an 542 * MPIDR value. Resulting algorithm is a collision 543 * free hash carried out through shifting and ORing 544 */ 545 static void __init smp_build_mpidr_hash(void) 546 { 547 u32 i, affinity; 548 u32 fs[3], bits[3], ls, mask = 0; 549 /* 550 * Pre-scan the list of MPIDRS and filter out bits that do 551 * not contribute to affinity levels, ie they never toggle. 552 */ 553 for_each_possible_cpu(i) 554 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); 555 pr_debug("mask of set bits 0x%x\n", mask); 556 /* 557 * Find and stash the last and first bit set at all affinity levels to 558 * check how many bits are required to represent them. 559 */ 560 for (i = 0; i < 3; i++) { 561 affinity = MPIDR_AFFINITY_LEVEL(mask, i); 562 /* 563 * Find the MSB bit and LSB bits position 564 * to determine how many bits are required 565 * to express the affinity level. 566 */ 567 ls = fls(affinity); 568 fs[i] = affinity ? ffs(affinity) - 1 : 0; 569 bits[i] = ls - fs[i]; 570 } 571 /* 572 * An index can be created from the MPIDR by isolating the 573 * significant bits at each affinity level and by shifting 574 * them in order to compress the 24 bits values space to a 575 * compressed set of values. This is equivalent to hashing 576 * the MPIDR through shifting and ORing. It is a collision free 577 * hash though not minimal since some levels might contain a number 578 * of CPUs that is not an exact power of 2 and their bit 579 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}. 580 */ 581 mpidr_hash.shift_aff[0] = fs[0]; 582 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0]; 583 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] - 584 (bits[1] + bits[0]); 585 mpidr_hash.mask = mask; 586 mpidr_hash.bits = bits[2] + bits[1] + bits[0]; 587 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n", 588 mpidr_hash.shift_aff[0], 589 mpidr_hash.shift_aff[1], 590 mpidr_hash.shift_aff[2], 591 mpidr_hash.mask, 592 mpidr_hash.bits); 593 /* 594 * 4x is an arbitrary value used to warn on a hash table much bigger 595 * than expected on most systems. 596 */ 597 if (mpidr_hash_size() > 4 * num_possible_cpus()) 598 pr_warn("Large number of MPIDR hash buckets detected\n"); 599 sync_cache_w(&mpidr_hash); 600 } 601 #endif 602 603 static void __init setup_processor(void) 604 { 605 struct proc_info_list *list; 606 607 /* 608 * locate processor in the list of supported processor 609 * types. The linker builds this table for us from the 610 * entries in arch/arm/mm/proc-*.S 611 */ 612 list = lookup_processor_type(read_cpuid_id()); 613 if (!list) { 614 pr_err("CPU configuration botched (ID %08x), unable to continue.\n", 615 read_cpuid_id()); 616 while (1); 617 } 618 619 cpu_name = list->cpu_name; 620 __cpu_architecture = __get_cpu_architecture(); 621 622 #ifdef MULTI_CPU 623 processor = *list->proc; 624 #endif 625 #ifdef MULTI_TLB 626 cpu_tlb = *list->tlb; 627 #endif 628 #ifdef MULTI_USER 629 cpu_user = *list->user; 630 #endif 631 #ifdef MULTI_CACHE 632 cpu_cache = *list->cache; 633 #endif 634 635 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", 636 cpu_name, read_cpuid_id(), read_cpuid_id() & 15, 637 proc_arch[cpu_architecture()], get_cr()); 638 639 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", 640 list->arch_name, ENDIANNESS); 641 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", 642 list->elf_name, ENDIANNESS); 643 elf_hwcap = list->elf_hwcap; 644 645 cpuid_init_hwcaps(); 646 647 #ifndef CONFIG_ARM_THUMB 648 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); 649 #endif 650 #ifdef CONFIG_MMU 651 init_default_cache_policy(list->__cpu_mm_mmu_flags); 652 #endif 653 erratum_a15_798181_init(); 654 655 elf_hwcap_fixup(); 656 657 cacheid_init(); 658 cpu_init(); 659 } 660 661 void __init dump_machine_table(void) 662 { 663 const struct machine_desc *p; 664 665 early_print("Available machine support:\n\nID (hex)\tNAME\n"); 666 for_each_machine_desc(p) 667 early_print("%08x\t%s\n", p->nr, p->name); 668 669 early_print("\nPlease check your kernel config and/or bootloader.\n"); 670 671 while (true) 672 /* can't use cpu_relax() here as it may require MMU setup */; 673 } 674 675 int __init arm_add_memory(u64 start, u64 size) 676 { 677 u64 aligned_start; 678 679 /* 680 * Ensure that start/size are aligned to a page boundary. 681 * Size is rounded down, start is rounded up. 682 */ 683 aligned_start = PAGE_ALIGN(start); 684 if (aligned_start > start + size) 685 size = 0; 686 else 687 size -= aligned_start - start; 688 689 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT 690 if (aligned_start > ULONG_MAX) { 691 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n", 692 (long long)start); 693 return -EINVAL; 694 } 695 696 if (aligned_start + size > ULONG_MAX) { 697 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n", 698 (long long)start); 699 /* 700 * To ensure bank->start + bank->size is representable in 701 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB. 702 * This means we lose a page after masking. 703 */ 704 size = ULONG_MAX - aligned_start; 705 } 706 #endif 707 708 if (aligned_start < PHYS_OFFSET) { 709 if (aligned_start + size <= PHYS_OFFSET) { 710 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", 711 aligned_start, aligned_start + size); 712 return -EINVAL; 713 } 714 715 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", 716 aligned_start, (u64)PHYS_OFFSET); 717 718 size -= PHYS_OFFSET - aligned_start; 719 aligned_start = PHYS_OFFSET; 720 } 721 722 start = aligned_start; 723 size = size & ~(phys_addr_t)(PAGE_SIZE - 1); 724 725 /* 726 * Check whether this memory region has non-zero size or 727 * invalid node number. 728 */ 729 if (size == 0) 730 return -EINVAL; 731 732 memblock_add(start, size); 733 return 0; 734 } 735 736 /* 737 * Pick out the memory size. We look for mem=size@start, 738 * where start and size are "size[KkMm]" 739 */ 740 741 static int __init early_mem(char *p) 742 { 743 static int usermem __initdata = 0; 744 u64 size; 745 u64 start; 746 char *endp; 747 748 /* 749 * If the user specifies memory size, we 750 * blow away any automatically generated 751 * size. 752 */ 753 if (usermem == 0) { 754 usermem = 1; 755 memblock_remove(memblock_start_of_DRAM(), 756 memblock_end_of_DRAM() - memblock_start_of_DRAM()); 757 } 758 759 start = PHYS_OFFSET; 760 size = memparse(p, &endp); 761 if (*endp == '@') 762 start = memparse(endp + 1, NULL); 763 764 arm_add_memory(start, size); 765 766 return 0; 767 } 768 early_param("mem", early_mem); 769 770 static void __init request_standard_resources(const struct machine_desc *mdesc) 771 { 772 struct memblock_region *region; 773 struct resource *res; 774 775 kernel_code.start = virt_to_phys(_text); 776 kernel_code.end = virt_to_phys(_etext - 1); 777 kernel_data.start = virt_to_phys(_sdata); 778 kernel_data.end = virt_to_phys(_end - 1); 779 780 for_each_memblock(memory, region) { 781 res = memblock_virt_alloc(sizeof(*res), 0); 782 res->name = "System RAM"; 783 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 784 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 785 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 786 787 request_resource(&iomem_resource, res); 788 789 if (kernel_code.start >= res->start && 790 kernel_code.end <= res->end) 791 request_resource(res, &kernel_code); 792 if (kernel_data.start >= res->start && 793 kernel_data.end <= res->end) 794 request_resource(res, &kernel_data); 795 } 796 797 if (mdesc->video_start) { 798 video_ram.start = mdesc->video_start; 799 video_ram.end = mdesc->video_end; 800 request_resource(&iomem_resource, &video_ram); 801 } 802 803 /* 804 * Some machines don't have the possibility of ever 805 * possessing lp0, lp1 or lp2 806 */ 807 if (mdesc->reserve_lp0) 808 request_resource(&ioport_resource, &lp0); 809 if (mdesc->reserve_lp1) 810 request_resource(&ioport_resource, &lp1); 811 if (mdesc->reserve_lp2) 812 request_resource(&ioport_resource, &lp2); 813 } 814 815 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 816 struct screen_info screen_info = { 817 .orig_video_lines = 30, 818 .orig_video_cols = 80, 819 .orig_video_mode = 0, 820 .orig_video_ega_bx = 0, 821 .orig_video_isVGA = 1, 822 .orig_video_points = 8 823 }; 824 #endif 825 826 static int __init customize_machine(void) 827 { 828 /* 829 * customizes platform devices, or adds new ones 830 * On DT based machines, we fall back to populating the 831 * machine from the device tree, if no callback is provided, 832 * otherwise we would always need an init_machine callback. 833 */ 834 of_iommu_init(); 835 if (machine_desc->init_machine) 836 machine_desc->init_machine(); 837 #ifdef CONFIG_OF 838 else 839 of_platform_populate(NULL, of_default_bus_match_table, 840 NULL, NULL); 841 #endif 842 return 0; 843 } 844 arch_initcall(customize_machine); 845 846 static int __init init_machine_late(void) 847 { 848 struct device_node *root; 849 int ret; 850 851 if (machine_desc->init_late) 852 machine_desc->init_late(); 853 854 root = of_find_node_by_path("/"); 855 if (root) { 856 ret = of_property_read_string(root, "serial-number", 857 &system_serial); 858 if (ret) 859 system_serial = NULL; 860 } 861 862 if (!system_serial) 863 system_serial = kasprintf(GFP_KERNEL, "%08x%08x", 864 system_serial_high, 865 system_serial_low); 866 867 return 0; 868 } 869 late_initcall(init_machine_late); 870 871 #ifdef CONFIG_KEXEC 872 static inline unsigned long long get_total_mem(void) 873 { 874 unsigned long total; 875 876 total = max_low_pfn - min_low_pfn; 877 return total << PAGE_SHIFT; 878 } 879 880 /** 881 * reserve_crashkernel() - reserves memory are for crash kernel 882 * 883 * This function reserves memory area given in "crashkernel=" kernel command 884 * line parameter. The memory reserved is used by a dump capture kernel when 885 * primary kernel is crashing. 886 */ 887 static void __init reserve_crashkernel(void) 888 { 889 unsigned long long crash_size, crash_base; 890 unsigned long long total_mem; 891 int ret; 892 893 total_mem = get_total_mem(); 894 ret = parse_crashkernel(boot_command_line, total_mem, 895 &crash_size, &crash_base); 896 if (ret) 897 return; 898 899 ret = memblock_reserve(crash_base, crash_size); 900 if (ret < 0) { 901 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n", 902 (unsigned long)crash_base); 903 return; 904 } 905 906 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", 907 (unsigned long)(crash_size >> 20), 908 (unsigned long)(crash_base >> 20), 909 (unsigned long)(total_mem >> 20)); 910 911 crashk_res.start = crash_base; 912 crashk_res.end = crash_base + crash_size - 1; 913 insert_resource(&iomem_resource, &crashk_res); 914 } 915 #else 916 static inline void reserve_crashkernel(void) {} 917 #endif /* CONFIG_KEXEC */ 918 919 void __init hyp_mode_check(void) 920 { 921 #ifdef CONFIG_ARM_VIRT_EXT 922 sync_boot_mode(); 923 924 if (is_hyp_mode_available()) { 925 pr_info("CPU: All CPU(s) started in HYP mode.\n"); 926 pr_info("CPU: Virtualization extensions available.\n"); 927 } else if (is_hyp_mode_mismatched()) { 928 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n", 929 __boot_cpu_mode & MODE_MASK); 930 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n"); 931 } else 932 pr_info("CPU: All CPU(s) started in SVC mode.\n"); 933 #endif 934 } 935 936 void __init setup_arch(char **cmdline_p) 937 { 938 const struct machine_desc *mdesc; 939 940 setup_processor(); 941 mdesc = setup_machine_fdt(__atags_pointer); 942 if (!mdesc) 943 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type); 944 machine_desc = mdesc; 945 machine_name = mdesc->name; 946 dump_stack_set_arch_desc("%s", mdesc->name); 947 948 if (mdesc->reboot_mode != REBOOT_HARD) 949 reboot_mode = mdesc->reboot_mode; 950 951 init_mm.start_code = (unsigned long) _text; 952 init_mm.end_code = (unsigned long) _etext; 953 init_mm.end_data = (unsigned long) _edata; 954 init_mm.brk = (unsigned long) _end; 955 956 /* populate cmd_line too for later use, preserving boot_command_line */ 957 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); 958 *cmdline_p = cmd_line; 959 960 early_fixmap_init(); 961 early_ioremap_init(); 962 963 parse_early_param(); 964 965 #ifdef CONFIG_MMU 966 early_paging_init(mdesc); 967 #endif 968 setup_dma_zone(mdesc); 969 sanity_check_meminfo(); 970 arm_memblock_init(mdesc); 971 972 early_ioremap_reset(); 973 974 paging_init(mdesc); 975 request_standard_resources(mdesc); 976 977 if (mdesc->restart) 978 arm_pm_restart = mdesc->restart; 979 980 unflatten_device_tree(); 981 982 arm_dt_init_cpu_maps(); 983 psci_dt_init(); 984 xen_early_init(); 985 #ifdef CONFIG_SMP 986 if (is_smp()) { 987 if (!mdesc->smp_init || !mdesc->smp_init()) { 988 if (psci_smp_available()) 989 smp_set_ops(&psci_smp_ops); 990 else if (mdesc->smp) 991 smp_set_ops(mdesc->smp); 992 } 993 smp_init_cpus(); 994 smp_build_mpidr_hash(); 995 } 996 #endif 997 998 if (!is_smp()) 999 hyp_mode_check(); 1000 1001 reserve_crashkernel(); 1002 1003 #ifdef CONFIG_MULTI_IRQ_HANDLER 1004 handle_arch_irq = mdesc->handle_irq; 1005 #endif 1006 1007 #ifdef CONFIG_VT 1008 #if defined(CONFIG_VGA_CONSOLE) 1009 conswitchp = &vga_con; 1010 #elif defined(CONFIG_DUMMY_CONSOLE) 1011 conswitchp = &dummy_con; 1012 #endif 1013 #endif 1014 1015 if (mdesc->init_early) 1016 mdesc->init_early(); 1017 } 1018 1019 1020 static int __init topology_init(void) 1021 { 1022 int cpu; 1023 1024 for_each_possible_cpu(cpu) { 1025 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); 1026 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu); 1027 register_cpu(&cpuinfo->cpu, cpu); 1028 } 1029 1030 return 0; 1031 } 1032 subsys_initcall(topology_init); 1033 1034 #ifdef CONFIG_HAVE_PROC_CPU 1035 static int __init proc_cpu_init(void) 1036 { 1037 struct proc_dir_entry *res; 1038 1039 res = proc_mkdir("cpu", NULL); 1040 if (!res) 1041 return -ENOMEM; 1042 return 0; 1043 } 1044 fs_initcall(proc_cpu_init); 1045 #endif 1046 1047 static const char *hwcap_str[] = { 1048 "swp", 1049 "half", 1050 "thumb", 1051 "26bit", 1052 "fastmult", 1053 "fpa", 1054 "vfp", 1055 "edsp", 1056 "java", 1057 "iwmmxt", 1058 "crunch", 1059 "thumbee", 1060 "neon", 1061 "vfpv3", 1062 "vfpv3d16", 1063 "tls", 1064 "vfpv4", 1065 "idiva", 1066 "idivt", 1067 "vfpd32", 1068 "lpae", 1069 "evtstrm", 1070 NULL 1071 }; 1072 1073 static const char *hwcap2_str[] = { 1074 "aes", 1075 "pmull", 1076 "sha1", 1077 "sha2", 1078 "crc32", 1079 NULL 1080 }; 1081 1082 static int c_show(struct seq_file *m, void *v) 1083 { 1084 int i, j; 1085 u32 cpuid; 1086 1087 for_each_online_cpu(i) { 1088 /* 1089 * glibc reads /proc/cpuinfo to determine the number of 1090 * online processors, looking for lines beginning with 1091 * "processor". Give glibc what it expects. 1092 */ 1093 seq_printf(m, "processor\t: %d\n", i); 1094 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id(); 1095 seq_printf(m, "model name\t: %s rev %d (%s)\n", 1096 cpu_name, cpuid & 15, elf_platform); 1097 1098 #if defined(CONFIG_SMP) 1099 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 1100 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 1101 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 1102 #else 1103 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 1104 loops_per_jiffy / (500000/HZ), 1105 (loops_per_jiffy / (5000/HZ)) % 100); 1106 #endif 1107 /* dump out the processor features */ 1108 seq_puts(m, "Features\t: "); 1109 1110 for (j = 0; hwcap_str[j]; j++) 1111 if (elf_hwcap & (1 << j)) 1112 seq_printf(m, "%s ", hwcap_str[j]); 1113 1114 for (j = 0; hwcap2_str[j]; j++) 1115 if (elf_hwcap2 & (1 << j)) 1116 seq_printf(m, "%s ", hwcap2_str[j]); 1117 1118 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24); 1119 seq_printf(m, "CPU architecture: %s\n", 1120 proc_arch[cpu_architecture()]); 1121 1122 if ((cpuid & 0x0008f000) == 0x00000000) { 1123 /* pre-ARM7 */ 1124 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4); 1125 } else { 1126 if ((cpuid & 0x0008f000) == 0x00007000) { 1127 /* ARM7 */ 1128 seq_printf(m, "CPU variant\t: 0x%02x\n", 1129 (cpuid >> 16) & 127); 1130 } else { 1131 /* post-ARM7 */ 1132 seq_printf(m, "CPU variant\t: 0x%x\n", 1133 (cpuid >> 20) & 15); 1134 } 1135 seq_printf(m, "CPU part\t: 0x%03x\n", 1136 (cpuid >> 4) & 0xfff); 1137 } 1138 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15); 1139 } 1140 1141 seq_printf(m, "Hardware\t: %s\n", machine_name); 1142 seq_printf(m, "Revision\t: %04x\n", system_rev); 1143 seq_printf(m, "Serial\t\t: %s\n", system_serial); 1144 1145 return 0; 1146 } 1147 1148 static void *c_start(struct seq_file *m, loff_t *pos) 1149 { 1150 return *pos < 1 ? (void *)1 : NULL; 1151 } 1152 1153 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1154 { 1155 ++*pos; 1156 return NULL; 1157 } 1158 1159 static void c_stop(struct seq_file *m, void *v) 1160 { 1161 } 1162 1163 const struct seq_operations cpuinfo_op = { 1164 .start = c_start, 1165 .next = c_next, 1166 .stop = c_stop, 1167 .show = c_show 1168 }; 1169