1 /* 2 * linux/arch/arm/kernel/setup.c 3 * 4 * Copyright (C) 1995-2001 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/stddef.h> 13 #include <linux/ioport.h> 14 #include <linux/delay.h> 15 #include <linux/utsname.h> 16 #include <linux/initrd.h> 17 #include <linux/console.h> 18 #include <linux/bootmem.h> 19 #include <linux/seq_file.h> 20 #include <linux/screen_info.h> 21 #include <linux/init.h> 22 #include <linux/kexec.h> 23 #include <linux/of_fdt.h> 24 #include <linux/crash_dump.h> 25 #include <linux/root_dev.h> 26 #include <linux/cpu.h> 27 #include <linux/interrupt.h> 28 #include <linux/smp.h> 29 #include <linux/fs.h> 30 #include <linux/proc_fs.h> 31 #include <linux/memblock.h> 32 33 #include <asm/unified.h> 34 #include <asm/cpu.h> 35 #include <asm/cputype.h> 36 #include <asm/elf.h> 37 #include <asm/procinfo.h> 38 #include <asm/sections.h> 39 #include <asm/setup.h> 40 #include <asm/smp_plat.h> 41 #include <asm/mach-types.h> 42 #include <asm/cacheflush.h> 43 #include <asm/cachetype.h> 44 #include <asm/tlbflush.h> 45 46 #include <asm/prom.h> 47 #include <asm/mach/arch.h> 48 #include <asm/mach/irq.h> 49 #include <asm/mach/time.h> 50 #include <asm/traps.h> 51 #include <asm/unwind.h> 52 53 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) 54 #include "compat.h" 55 #endif 56 #include "atags.h" 57 #include "tcm.h" 58 59 #ifndef MEM_SIZE 60 #define MEM_SIZE (16*1024*1024) 61 #endif 62 63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 64 char fpe_type[8]; 65 66 static int __init fpe_setup(char *line) 67 { 68 memcpy(fpe_type, line, 8); 69 return 1; 70 } 71 72 __setup("fpe=", fpe_setup); 73 #endif 74 75 extern void paging_init(struct machine_desc *desc); 76 extern void sanity_check_meminfo(void); 77 extern void reboot_setup(char *str); 78 79 unsigned int processor_id; 80 EXPORT_SYMBOL(processor_id); 81 unsigned int __machine_arch_type __read_mostly; 82 EXPORT_SYMBOL(__machine_arch_type); 83 unsigned int cacheid __read_mostly; 84 EXPORT_SYMBOL(cacheid); 85 86 unsigned int __atags_pointer __initdata; 87 88 unsigned int system_rev; 89 EXPORT_SYMBOL(system_rev); 90 91 unsigned int system_serial_low; 92 EXPORT_SYMBOL(system_serial_low); 93 94 unsigned int system_serial_high; 95 EXPORT_SYMBOL(system_serial_high); 96 97 unsigned int elf_hwcap __read_mostly; 98 EXPORT_SYMBOL(elf_hwcap); 99 100 101 #ifdef MULTI_CPU 102 struct processor processor __read_mostly; 103 #endif 104 #ifdef MULTI_TLB 105 struct cpu_tlb_fns cpu_tlb __read_mostly; 106 #endif 107 #ifdef MULTI_USER 108 struct cpu_user_fns cpu_user __read_mostly; 109 #endif 110 #ifdef MULTI_CACHE 111 struct cpu_cache_fns cpu_cache __read_mostly; 112 #endif 113 #ifdef CONFIG_OUTER_CACHE 114 struct outer_cache_fns outer_cache __read_mostly; 115 EXPORT_SYMBOL(outer_cache); 116 #endif 117 118 struct stack { 119 u32 irq[3]; 120 u32 abt[3]; 121 u32 und[3]; 122 } ____cacheline_aligned; 123 124 static struct stack stacks[NR_CPUS]; 125 126 char elf_platform[ELF_PLATFORM_SIZE]; 127 EXPORT_SYMBOL(elf_platform); 128 129 static const char *cpu_name; 130 static const char *machine_name; 131 static char __initdata cmd_line[COMMAND_LINE_SIZE]; 132 struct machine_desc *machine_desc __initdata; 133 134 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; 135 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 136 #define ENDIANNESS ((char)endian_test.l) 137 138 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); 139 140 /* 141 * Standard memory resources 142 */ 143 static struct resource mem_res[] = { 144 { 145 .name = "Video RAM", 146 .start = 0, 147 .end = 0, 148 .flags = IORESOURCE_MEM 149 }, 150 { 151 .name = "Kernel text", 152 .start = 0, 153 .end = 0, 154 .flags = IORESOURCE_MEM 155 }, 156 { 157 .name = "Kernel data", 158 .start = 0, 159 .end = 0, 160 .flags = IORESOURCE_MEM 161 } 162 }; 163 164 #define video_ram mem_res[0] 165 #define kernel_code mem_res[1] 166 #define kernel_data mem_res[2] 167 168 static struct resource io_res[] = { 169 { 170 .name = "reserved", 171 .start = 0x3bc, 172 .end = 0x3be, 173 .flags = IORESOURCE_IO | IORESOURCE_BUSY 174 }, 175 { 176 .name = "reserved", 177 .start = 0x378, 178 .end = 0x37f, 179 .flags = IORESOURCE_IO | IORESOURCE_BUSY 180 }, 181 { 182 .name = "reserved", 183 .start = 0x278, 184 .end = 0x27f, 185 .flags = IORESOURCE_IO | IORESOURCE_BUSY 186 } 187 }; 188 189 #define lp0 io_res[0] 190 #define lp1 io_res[1] 191 #define lp2 io_res[2] 192 193 static const char *proc_arch[] = { 194 "undefined/unknown", 195 "3", 196 "4", 197 "4T", 198 "5", 199 "5T", 200 "5TE", 201 "5TEJ", 202 "6TEJ", 203 "7", 204 "?(11)", 205 "?(12)", 206 "?(13)", 207 "?(14)", 208 "?(15)", 209 "?(16)", 210 "?(17)", 211 }; 212 213 int cpu_architecture(void) 214 { 215 int cpu_arch; 216 217 if ((read_cpuid_id() & 0x0008f000) == 0) { 218 cpu_arch = CPU_ARCH_UNKNOWN; 219 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 220 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; 221 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) { 222 cpu_arch = (read_cpuid_id() >> 16) & 7; 223 if (cpu_arch) 224 cpu_arch += CPU_ARCH_ARMv3; 225 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 226 unsigned int mmfr0; 227 228 /* Revised CPUID format. Read the Memory Model Feature 229 * Register 0 and check for VMSAv7 or PMSAv7 */ 230 asm("mrc p15, 0, %0, c0, c1, 4" 231 : "=r" (mmfr0)); 232 if ((mmfr0 & 0x0000000f) >= 0x00000003 || 233 (mmfr0 & 0x000000f0) >= 0x00000030) 234 cpu_arch = CPU_ARCH_ARMv7; 235 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 236 (mmfr0 & 0x000000f0) == 0x00000020) 237 cpu_arch = CPU_ARCH_ARMv6; 238 else 239 cpu_arch = CPU_ARCH_UNKNOWN; 240 } else 241 cpu_arch = CPU_ARCH_UNKNOWN; 242 243 return cpu_arch; 244 } 245 246 static int cpu_has_aliasing_icache(unsigned int arch) 247 { 248 int aliasing_icache; 249 unsigned int id_reg, num_sets, line_size; 250 251 /* arch specifies the register format */ 252 switch (arch) { 253 case CPU_ARCH_ARMv7: 254 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR" 255 : /* No output operands */ 256 : "r" (1)); 257 isb(); 258 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR" 259 : "=r" (id_reg)); 260 line_size = 4 << ((id_reg & 0x7) + 2); 261 num_sets = ((id_reg >> 13) & 0x7fff) + 1; 262 aliasing_icache = (line_size * num_sets) > PAGE_SIZE; 263 break; 264 case CPU_ARCH_ARMv6: 265 aliasing_icache = read_cpuid_cachetype() & (1 << 11); 266 break; 267 default: 268 /* I-cache aliases will be handled by D-cache aliasing code */ 269 aliasing_icache = 0; 270 } 271 272 return aliasing_icache; 273 } 274 275 static void __init cacheid_init(void) 276 { 277 unsigned int cachetype = read_cpuid_cachetype(); 278 unsigned int arch = cpu_architecture(); 279 280 if (arch >= CPU_ARCH_ARMv6) { 281 if ((cachetype & (7 << 29)) == 4 << 29) { 282 /* ARMv7 register format */ 283 arch = CPU_ARCH_ARMv7; 284 cacheid = CACHEID_VIPT_NONALIASING; 285 if ((cachetype & (3 << 14)) == 1 << 14) 286 cacheid |= CACHEID_ASID_TAGGED; 287 } else { 288 arch = CPU_ARCH_ARMv6; 289 if (cachetype & (1 << 23)) 290 cacheid = CACHEID_VIPT_ALIASING; 291 else 292 cacheid = CACHEID_VIPT_NONALIASING; 293 } 294 if (cpu_has_aliasing_icache(arch)) 295 cacheid |= CACHEID_VIPT_I_ALIASING; 296 } else { 297 cacheid = CACHEID_VIVT; 298 } 299 300 printk("CPU: %s data cache, %s instruction cache\n", 301 cache_is_vivt() ? "VIVT" : 302 cache_is_vipt_aliasing() ? "VIPT aliasing" : 303 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown", 304 cache_is_vivt() ? "VIVT" : 305 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : 306 icache_is_vipt_aliasing() ? "VIPT aliasing" : 307 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); 308 } 309 310 /* 311 * These functions re-use the assembly code in head.S, which 312 * already provide the required functionality. 313 */ 314 extern struct proc_info_list *lookup_processor_type(unsigned int); 315 316 void __init early_print(const char *str, ...) 317 { 318 extern void printascii(const char *); 319 char buf[256]; 320 va_list ap; 321 322 va_start(ap, str); 323 vsnprintf(buf, sizeof(buf), str, ap); 324 va_end(ap); 325 326 #ifdef CONFIG_DEBUG_LL 327 printascii(buf); 328 #endif 329 printk("%s", buf); 330 } 331 332 static void __init feat_v6_fixup(void) 333 { 334 int id = read_cpuid_id(); 335 336 if ((id & 0xff0f0000) != 0x41070000) 337 return; 338 339 /* 340 * HWCAP_TLS is available only on 1136 r1p0 and later, 341 * see also kuser_get_tls_init. 342 */ 343 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0)) 344 elf_hwcap &= ~HWCAP_TLS; 345 } 346 347 /* 348 * cpu_init - initialise one CPU. 349 * 350 * cpu_init sets up the per-CPU stacks. 351 */ 352 void cpu_init(void) 353 { 354 unsigned int cpu = smp_processor_id(); 355 struct stack *stk = &stacks[cpu]; 356 357 if (cpu >= NR_CPUS) { 358 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); 359 BUG(); 360 } 361 362 cpu_proc_init(); 363 364 /* 365 * Define the placement constraint for the inline asm directive below. 366 * In Thumb-2, msr with an immediate value is not allowed. 367 */ 368 #ifdef CONFIG_THUMB2_KERNEL 369 #define PLC "r" 370 #else 371 #define PLC "I" 372 #endif 373 374 /* 375 * setup stacks for re-entrant exception handlers 376 */ 377 __asm__ ( 378 "msr cpsr_c, %1\n\t" 379 "add r14, %0, %2\n\t" 380 "mov sp, r14\n\t" 381 "msr cpsr_c, %3\n\t" 382 "add r14, %0, %4\n\t" 383 "mov sp, r14\n\t" 384 "msr cpsr_c, %5\n\t" 385 "add r14, %0, %6\n\t" 386 "mov sp, r14\n\t" 387 "msr cpsr_c, %7" 388 : 389 : "r" (stk), 390 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 391 "I" (offsetof(struct stack, irq[0])), 392 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 393 "I" (offsetof(struct stack, abt[0])), 394 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), 395 "I" (offsetof(struct stack, und[0])), 396 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 397 : "r14"); 398 } 399 400 static void __init setup_processor(void) 401 { 402 struct proc_info_list *list; 403 404 /* 405 * locate processor in the list of supported processor 406 * types. The linker builds this table for us from the 407 * entries in arch/arm/mm/proc-*.S 408 */ 409 list = lookup_processor_type(read_cpuid_id()); 410 if (!list) { 411 printk("CPU configuration botched (ID %08x), unable " 412 "to continue.\n", read_cpuid_id()); 413 while (1); 414 } 415 416 cpu_name = list->cpu_name; 417 418 #ifdef MULTI_CPU 419 processor = *list->proc; 420 #endif 421 #ifdef MULTI_TLB 422 cpu_tlb = *list->tlb; 423 #endif 424 #ifdef MULTI_USER 425 cpu_user = *list->user; 426 #endif 427 #ifdef MULTI_CACHE 428 cpu_cache = *list->cache; 429 #endif 430 431 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", 432 cpu_name, read_cpuid_id(), read_cpuid_id() & 15, 433 proc_arch[cpu_architecture()], cr_alignment); 434 435 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); 436 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); 437 elf_hwcap = list->elf_hwcap; 438 #ifndef CONFIG_ARM_THUMB 439 elf_hwcap &= ~HWCAP_THUMB; 440 #endif 441 442 feat_v6_fixup(); 443 444 cacheid_init(); 445 cpu_init(); 446 } 447 448 void __init dump_machine_table(void) 449 { 450 struct machine_desc *p; 451 452 early_print("Available machine support:\n\nID (hex)\tNAME\n"); 453 for_each_machine_desc(p) 454 early_print("%08x\t%s\n", p->nr, p->name); 455 456 early_print("\nPlease check your kernel config and/or bootloader.\n"); 457 458 while (true) 459 /* can't use cpu_relax() here as it may require MMU setup */; 460 } 461 462 int __init arm_add_memory(phys_addr_t start, unsigned long size) 463 { 464 struct membank *bank = &meminfo.bank[meminfo.nr_banks]; 465 466 if (meminfo.nr_banks >= NR_BANKS) { 467 printk(KERN_CRIT "NR_BANKS too low, " 468 "ignoring memory at 0x%08llx\n", (long long)start); 469 return -EINVAL; 470 } 471 472 /* 473 * Ensure that start/size are aligned to a page boundary. 474 * Size is appropriately rounded down, start is rounded up. 475 */ 476 size -= start & ~PAGE_MASK; 477 bank->start = PAGE_ALIGN(start); 478 bank->size = size & PAGE_MASK; 479 480 /* 481 * Check whether this memory region has non-zero size or 482 * invalid node number. 483 */ 484 if (bank->size == 0) 485 return -EINVAL; 486 487 meminfo.nr_banks++; 488 return 0; 489 } 490 491 /* 492 * Pick out the memory size. We look for mem=size@start, 493 * where start and size are "size[KkMm]" 494 */ 495 static int __init early_mem(char *p) 496 { 497 static int usermem __initdata = 0; 498 unsigned long size; 499 phys_addr_t start; 500 char *endp; 501 502 /* 503 * If the user specifies memory size, we 504 * blow away any automatically generated 505 * size. 506 */ 507 if (usermem == 0) { 508 usermem = 1; 509 meminfo.nr_banks = 0; 510 } 511 512 start = PHYS_OFFSET; 513 size = memparse(p, &endp); 514 if (*endp == '@') 515 start = memparse(endp + 1, NULL); 516 517 arm_add_memory(start, size); 518 519 return 0; 520 } 521 early_param("mem", early_mem); 522 523 static void __init 524 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) 525 { 526 #ifdef CONFIG_BLK_DEV_RAM 527 extern int rd_size, rd_image_start, rd_prompt, rd_doload; 528 529 rd_image_start = image_start; 530 rd_prompt = prompt; 531 rd_doload = doload; 532 533 if (rd_sz) 534 rd_size = rd_sz; 535 #endif 536 } 537 538 static void __init request_standard_resources(struct machine_desc *mdesc) 539 { 540 struct memblock_region *region; 541 struct resource *res; 542 543 kernel_code.start = virt_to_phys(_text); 544 kernel_code.end = virt_to_phys(_etext - 1); 545 kernel_data.start = virt_to_phys(_sdata); 546 kernel_data.end = virt_to_phys(_end - 1); 547 548 for_each_memblock(memory, region) { 549 res = alloc_bootmem_low(sizeof(*res)); 550 res->name = "System RAM"; 551 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 552 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 553 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 554 555 request_resource(&iomem_resource, res); 556 557 if (kernel_code.start >= res->start && 558 kernel_code.end <= res->end) 559 request_resource(res, &kernel_code); 560 if (kernel_data.start >= res->start && 561 kernel_data.end <= res->end) 562 request_resource(res, &kernel_data); 563 } 564 565 if (mdesc->video_start) { 566 video_ram.start = mdesc->video_start; 567 video_ram.end = mdesc->video_end; 568 request_resource(&iomem_resource, &video_ram); 569 } 570 571 /* 572 * Some machines don't have the possibility of ever 573 * possessing lp0, lp1 or lp2 574 */ 575 if (mdesc->reserve_lp0) 576 request_resource(&ioport_resource, &lp0); 577 if (mdesc->reserve_lp1) 578 request_resource(&ioport_resource, &lp1); 579 if (mdesc->reserve_lp2) 580 request_resource(&ioport_resource, &lp2); 581 } 582 583 /* 584 * Tag parsing. 585 * 586 * This is the new way of passing data to the kernel at boot time. Rather 587 * than passing a fixed inflexible structure to the kernel, we pass a list 588 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE 589 * tag for the list to be recognised (to distinguish the tagged list from 590 * a param_struct). The list is terminated with a zero-length tag (this tag 591 * is not parsed in any way). 592 */ 593 static int __init parse_tag_core(const struct tag *tag) 594 { 595 if (tag->hdr.size > 2) { 596 if ((tag->u.core.flags & 1) == 0) 597 root_mountflags &= ~MS_RDONLY; 598 ROOT_DEV = old_decode_dev(tag->u.core.rootdev); 599 } 600 return 0; 601 } 602 603 __tagtable(ATAG_CORE, parse_tag_core); 604 605 static int __init parse_tag_mem32(const struct tag *tag) 606 { 607 return arm_add_memory(tag->u.mem.start, tag->u.mem.size); 608 } 609 610 __tagtable(ATAG_MEM, parse_tag_mem32); 611 612 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 613 struct screen_info screen_info = { 614 .orig_video_lines = 30, 615 .orig_video_cols = 80, 616 .orig_video_mode = 0, 617 .orig_video_ega_bx = 0, 618 .orig_video_isVGA = 1, 619 .orig_video_points = 8 620 }; 621 622 static int __init parse_tag_videotext(const struct tag *tag) 623 { 624 screen_info.orig_x = tag->u.videotext.x; 625 screen_info.orig_y = tag->u.videotext.y; 626 screen_info.orig_video_page = tag->u.videotext.video_page; 627 screen_info.orig_video_mode = tag->u.videotext.video_mode; 628 screen_info.orig_video_cols = tag->u.videotext.video_cols; 629 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; 630 screen_info.orig_video_lines = tag->u.videotext.video_lines; 631 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; 632 screen_info.orig_video_points = tag->u.videotext.video_points; 633 return 0; 634 } 635 636 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); 637 #endif 638 639 static int __init parse_tag_ramdisk(const struct tag *tag) 640 { 641 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0, 642 (tag->u.ramdisk.flags & 2) == 0, 643 tag->u.ramdisk.start, tag->u.ramdisk.size); 644 return 0; 645 } 646 647 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk); 648 649 static int __init parse_tag_serialnr(const struct tag *tag) 650 { 651 system_serial_low = tag->u.serialnr.low; 652 system_serial_high = tag->u.serialnr.high; 653 return 0; 654 } 655 656 __tagtable(ATAG_SERIAL, parse_tag_serialnr); 657 658 static int __init parse_tag_revision(const struct tag *tag) 659 { 660 system_rev = tag->u.revision.rev; 661 return 0; 662 } 663 664 __tagtable(ATAG_REVISION, parse_tag_revision); 665 666 static int __init parse_tag_cmdline(const struct tag *tag) 667 { 668 #if defined(CONFIG_CMDLINE_EXTEND) 669 strlcat(default_command_line, " ", COMMAND_LINE_SIZE); 670 strlcat(default_command_line, tag->u.cmdline.cmdline, 671 COMMAND_LINE_SIZE); 672 #elif defined(CONFIG_CMDLINE_FORCE) 673 pr_warning("Ignoring tag cmdline (using the default kernel command line)\n"); 674 #else 675 strlcpy(default_command_line, tag->u.cmdline.cmdline, 676 COMMAND_LINE_SIZE); 677 #endif 678 return 0; 679 } 680 681 __tagtable(ATAG_CMDLINE, parse_tag_cmdline); 682 683 /* 684 * Scan the tag table for this tag, and call its parse function. 685 * The tag table is built by the linker from all the __tagtable 686 * declarations. 687 */ 688 static int __init parse_tag(const struct tag *tag) 689 { 690 extern struct tagtable __tagtable_begin, __tagtable_end; 691 struct tagtable *t; 692 693 for (t = &__tagtable_begin; t < &__tagtable_end; t++) 694 if (tag->hdr.tag == t->tag) { 695 t->parse(tag); 696 break; 697 } 698 699 return t < &__tagtable_end; 700 } 701 702 /* 703 * Parse all tags in the list, checking both the global and architecture 704 * specific tag tables. 705 */ 706 static void __init parse_tags(const struct tag *t) 707 { 708 for (; t->hdr.size; t = tag_next(t)) 709 if (!parse_tag(t)) 710 printk(KERN_WARNING 711 "Ignoring unrecognised tag 0x%08x\n", 712 t->hdr.tag); 713 } 714 715 /* 716 * This holds our defaults. 717 */ 718 static struct init_tags { 719 struct tag_header hdr1; 720 struct tag_core core; 721 struct tag_header hdr2; 722 struct tag_mem32 mem; 723 struct tag_header hdr3; 724 } init_tags __initdata = { 725 { tag_size(tag_core), ATAG_CORE }, 726 { 1, PAGE_SIZE, 0xff }, 727 { tag_size(tag_mem32), ATAG_MEM }, 728 { MEM_SIZE }, 729 { 0, ATAG_NONE } 730 }; 731 732 static int __init customize_machine(void) 733 { 734 /* customizes platform devices, or adds new ones */ 735 if (machine_desc->init_machine) 736 machine_desc->init_machine(); 737 return 0; 738 } 739 arch_initcall(customize_machine); 740 741 #ifdef CONFIG_KEXEC 742 static inline unsigned long long get_total_mem(void) 743 { 744 unsigned long total; 745 746 total = max_low_pfn - min_low_pfn; 747 return total << PAGE_SHIFT; 748 } 749 750 /** 751 * reserve_crashkernel() - reserves memory are for crash kernel 752 * 753 * This function reserves memory area given in "crashkernel=" kernel command 754 * line parameter. The memory reserved is used by a dump capture kernel when 755 * primary kernel is crashing. 756 */ 757 static void __init reserve_crashkernel(void) 758 { 759 unsigned long long crash_size, crash_base; 760 unsigned long long total_mem; 761 int ret; 762 763 total_mem = get_total_mem(); 764 ret = parse_crashkernel(boot_command_line, total_mem, 765 &crash_size, &crash_base); 766 if (ret) 767 return; 768 769 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE); 770 if (ret < 0) { 771 printk(KERN_WARNING "crashkernel reservation failed - " 772 "memory is in use (0x%lx)\n", (unsigned long)crash_base); 773 return; 774 } 775 776 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 777 "for crashkernel (System RAM: %ldMB)\n", 778 (unsigned long)(crash_size >> 20), 779 (unsigned long)(crash_base >> 20), 780 (unsigned long)(total_mem >> 20)); 781 782 crashk_res.start = crash_base; 783 crashk_res.end = crash_base + crash_size - 1; 784 insert_resource(&iomem_resource, &crashk_res); 785 } 786 #else 787 static inline void reserve_crashkernel(void) {} 788 #endif /* CONFIG_KEXEC */ 789 790 static void __init squash_mem_tags(struct tag *tag) 791 { 792 for (; tag->hdr.size; tag = tag_next(tag)) 793 if (tag->hdr.tag == ATAG_MEM) 794 tag->hdr.tag = ATAG_NONE; 795 } 796 797 static struct machine_desc * __init setup_machine_tags(unsigned int nr) 798 { 799 struct tag *tags = (struct tag *)&init_tags; 800 struct machine_desc *mdesc = NULL, *p; 801 char *from = default_command_line; 802 803 init_tags.mem.start = PHYS_OFFSET; 804 805 /* 806 * locate machine in the list of supported machines. 807 */ 808 for_each_machine_desc(p) 809 if (nr == p->nr) { 810 printk("Machine: %s\n", p->name); 811 mdesc = p; 812 break; 813 } 814 815 if (!mdesc) { 816 early_print("\nError: unrecognized/unsupported machine ID" 817 " (r1 = 0x%08x).\n\n", nr); 818 dump_machine_table(); /* does not return */ 819 } 820 821 if (__atags_pointer) 822 tags = phys_to_virt(__atags_pointer); 823 else if (mdesc->boot_params) { 824 #ifdef CONFIG_MMU 825 /* 826 * We still are executing with a minimal MMU mapping created 827 * with the presumption that the machine default for this 828 * is located in the first MB of RAM. Anything else will 829 * fault and silently hang the kernel at this point. 830 */ 831 if (mdesc->boot_params < PHYS_OFFSET || 832 mdesc->boot_params >= PHYS_OFFSET + SZ_1M) { 833 printk(KERN_WARNING 834 "Default boot params at physical 0x%08lx out of reach\n", 835 mdesc->boot_params); 836 } else 837 #endif 838 { 839 tags = phys_to_virt(mdesc->boot_params); 840 } 841 } 842 843 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) 844 /* 845 * If we have the old style parameters, convert them to 846 * a tag list. 847 */ 848 if (tags->hdr.tag != ATAG_CORE) 849 convert_to_tag_list(tags); 850 #endif 851 852 if (tags->hdr.tag != ATAG_CORE) { 853 #if defined(CONFIG_OF) 854 /* 855 * If CONFIG_OF is set, then assume this is a reasonably 856 * modern system that should pass boot parameters 857 */ 858 early_print("Warning: Neither atags nor dtb found\n"); 859 #endif 860 tags = (struct tag *)&init_tags; 861 } 862 863 if (mdesc->fixup) 864 mdesc->fixup(mdesc, tags, &from, &meminfo); 865 866 if (tags->hdr.tag == ATAG_CORE) { 867 if (meminfo.nr_banks != 0) 868 squash_mem_tags(tags); 869 save_atags(tags); 870 parse_tags(tags); 871 } 872 873 /* parse_early_param needs a boot_command_line */ 874 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); 875 876 return mdesc; 877 } 878 879 880 void __init setup_arch(char **cmdline_p) 881 { 882 struct machine_desc *mdesc; 883 884 unwind_init(); 885 886 setup_processor(); 887 mdesc = setup_machine_fdt(__atags_pointer); 888 if (!mdesc) 889 mdesc = setup_machine_tags(machine_arch_type); 890 machine_desc = mdesc; 891 machine_name = mdesc->name; 892 893 if (mdesc->soft_reboot) 894 reboot_setup("s"); 895 896 init_mm.start_code = (unsigned long) _text; 897 init_mm.end_code = (unsigned long) _etext; 898 init_mm.end_data = (unsigned long) _edata; 899 init_mm.brk = (unsigned long) _end; 900 901 /* populate cmd_line too for later use, preserving boot_command_line */ 902 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); 903 *cmdline_p = cmd_line; 904 905 parse_early_param(); 906 907 sanity_check_meminfo(); 908 arm_memblock_init(&meminfo, mdesc); 909 910 paging_init(mdesc); 911 request_standard_resources(mdesc); 912 913 unflatten_device_tree(); 914 915 #ifdef CONFIG_SMP 916 if (is_smp()) 917 smp_init_cpus(); 918 #endif 919 reserve_crashkernel(); 920 921 tcm_init(); 922 923 #ifdef CONFIG_ZONE_DMA 924 if (mdesc->dma_zone_size) { 925 extern unsigned long arm_dma_zone_size; 926 arm_dma_zone_size = mdesc->dma_zone_size; 927 } 928 #endif 929 #ifdef CONFIG_MULTI_IRQ_HANDLER 930 handle_arch_irq = mdesc->handle_irq; 931 #endif 932 933 #ifdef CONFIG_VT 934 #if defined(CONFIG_VGA_CONSOLE) 935 conswitchp = &vga_con; 936 #elif defined(CONFIG_DUMMY_CONSOLE) 937 conswitchp = &dummy_con; 938 #endif 939 #endif 940 early_trap_init(); 941 942 if (mdesc->init_early) 943 mdesc->init_early(); 944 } 945 946 947 static int __init topology_init(void) 948 { 949 int cpu; 950 951 for_each_possible_cpu(cpu) { 952 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); 953 cpuinfo->cpu.hotpluggable = 1; 954 register_cpu(&cpuinfo->cpu, cpu); 955 } 956 957 return 0; 958 } 959 subsys_initcall(topology_init); 960 961 #ifdef CONFIG_HAVE_PROC_CPU 962 static int __init proc_cpu_init(void) 963 { 964 struct proc_dir_entry *res; 965 966 res = proc_mkdir("cpu", NULL); 967 if (!res) 968 return -ENOMEM; 969 return 0; 970 } 971 fs_initcall(proc_cpu_init); 972 #endif 973 974 static const char *hwcap_str[] = { 975 "swp", 976 "half", 977 "thumb", 978 "26bit", 979 "fastmult", 980 "fpa", 981 "vfp", 982 "edsp", 983 "java", 984 "iwmmxt", 985 "crunch", 986 "thumbee", 987 "neon", 988 "vfpv3", 989 "vfpv3d16", 990 "tls", 991 "vfpv4", 992 "idiva", 993 "idivt", 994 NULL 995 }; 996 997 static int c_show(struct seq_file *m, void *v) 998 { 999 int i; 1000 1001 seq_printf(m, "Processor\t: %s rev %d (%s)\n", 1002 cpu_name, read_cpuid_id() & 15, elf_platform); 1003 1004 #if defined(CONFIG_SMP) 1005 for_each_online_cpu(i) { 1006 /* 1007 * glibc reads /proc/cpuinfo to determine the number of 1008 * online processors, looking for lines beginning with 1009 * "processor". Give glibc what it expects. 1010 */ 1011 seq_printf(m, "processor\t: %d\n", i); 1012 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", 1013 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 1014 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 1015 } 1016 #else /* CONFIG_SMP */ 1017 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 1018 loops_per_jiffy / (500000/HZ), 1019 (loops_per_jiffy / (5000/HZ)) % 100); 1020 #endif 1021 1022 /* dump out the processor features */ 1023 seq_puts(m, "Features\t: "); 1024 1025 for (i = 0; hwcap_str[i]; i++) 1026 if (elf_hwcap & (1 << i)) 1027 seq_printf(m, "%s ", hwcap_str[i]); 1028 1029 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); 1030 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); 1031 1032 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) { 1033 /* pre-ARM7 */ 1034 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4); 1035 } else { 1036 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 1037 /* ARM7 */ 1038 seq_printf(m, "CPU variant\t: 0x%02x\n", 1039 (read_cpuid_id() >> 16) & 127); 1040 } else { 1041 /* post-ARM7 */ 1042 seq_printf(m, "CPU variant\t: 0x%x\n", 1043 (read_cpuid_id() >> 20) & 15); 1044 } 1045 seq_printf(m, "CPU part\t: 0x%03x\n", 1046 (read_cpuid_id() >> 4) & 0xfff); 1047 } 1048 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); 1049 1050 seq_puts(m, "\n"); 1051 1052 seq_printf(m, "Hardware\t: %s\n", machine_name); 1053 seq_printf(m, "Revision\t: %04x\n", system_rev); 1054 seq_printf(m, "Serial\t\t: %08x%08x\n", 1055 system_serial_high, system_serial_low); 1056 1057 return 0; 1058 } 1059 1060 static void *c_start(struct seq_file *m, loff_t *pos) 1061 { 1062 return *pos < 1 ? (void *)1 : NULL; 1063 } 1064 1065 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1066 { 1067 ++*pos; 1068 return NULL; 1069 } 1070 1071 static void c_stop(struct seq_file *m, void *v) 1072 { 1073 } 1074 1075 const struct seq_operations cpuinfo_op = { 1076 .start = c_start, 1077 .next = c_next, 1078 .stop = c_stop, 1079 .show = c_show 1080 }; 1081