1 /* 2 * linux/arch/arm/kernel/setup.c 3 * 4 * Copyright (C) 1995-2001 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/stddef.h> 13 #include <linux/ioport.h> 14 #include <linux/delay.h> 15 #include <linux/utsname.h> 16 #include <linux/initrd.h> 17 #include <linux/console.h> 18 #include <linux/bootmem.h> 19 #include <linux/seq_file.h> 20 #include <linux/screen_info.h> 21 #include <linux/init.h> 22 #include <linux/root_dev.h> 23 #include <linux/cpu.h> 24 #include <linux/interrupt.h> 25 #include <linux/smp.h> 26 #include <linux/fs.h> 27 28 #include <asm/cpu.h> 29 #include <asm/cputype.h> 30 #include <asm/elf.h> 31 #include <asm/procinfo.h> 32 #include <asm/setup.h> 33 #include <asm/mach-types.h> 34 #include <asm/cacheflush.h> 35 #include <asm/cachetype.h> 36 #include <asm/tlbflush.h> 37 38 #include <asm/mach/arch.h> 39 #include <asm/mach/irq.h> 40 #include <asm/mach/time.h> 41 #include <asm/traps.h> 42 43 #include "compat.h" 44 #include "atags.h" 45 46 #ifndef MEM_SIZE 47 #define MEM_SIZE (16*1024*1024) 48 #endif 49 50 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 51 char fpe_type[8]; 52 53 static int __init fpe_setup(char *line) 54 { 55 memcpy(fpe_type, line, 8); 56 return 1; 57 } 58 59 __setup("fpe=", fpe_setup); 60 #endif 61 62 extern void paging_init(struct meminfo *, struct machine_desc *desc); 63 extern void reboot_setup(char *str); 64 extern void _text, _etext, __data_start, _edata, _end; 65 66 unsigned int processor_id; 67 EXPORT_SYMBOL(processor_id); 68 unsigned int __machine_arch_type; 69 EXPORT_SYMBOL(__machine_arch_type); 70 unsigned int cacheid; 71 EXPORT_SYMBOL(cacheid); 72 73 unsigned int __atags_pointer __initdata; 74 75 unsigned int system_rev; 76 EXPORT_SYMBOL(system_rev); 77 78 unsigned int system_serial_low; 79 EXPORT_SYMBOL(system_serial_low); 80 81 unsigned int system_serial_high; 82 EXPORT_SYMBOL(system_serial_high); 83 84 unsigned int elf_hwcap; 85 EXPORT_SYMBOL(elf_hwcap); 86 87 88 #ifdef MULTI_CPU 89 struct processor processor; 90 #endif 91 #ifdef MULTI_TLB 92 struct cpu_tlb_fns cpu_tlb; 93 #endif 94 #ifdef MULTI_USER 95 struct cpu_user_fns cpu_user; 96 #endif 97 #ifdef MULTI_CACHE 98 struct cpu_cache_fns cpu_cache; 99 #endif 100 #ifdef CONFIG_OUTER_CACHE 101 struct outer_cache_fns outer_cache; 102 #endif 103 104 struct stack { 105 u32 irq[3]; 106 u32 abt[3]; 107 u32 und[3]; 108 } ____cacheline_aligned; 109 110 static struct stack stacks[NR_CPUS]; 111 112 char elf_platform[ELF_PLATFORM_SIZE]; 113 EXPORT_SYMBOL(elf_platform); 114 115 static struct meminfo meminfo __initdata = { 0, }; 116 static const char *cpu_name; 117 static const char *machine_name; 118 static char __initdata command_line[COMMAND_LINE_SIZE]; 119 120 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; 121 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 122 #define ENDIANNESS ((char)endian_test.l) 123 124 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); 125 126 /* 127 * Standard memory resources 128 */ 129 static struct resource mem_res[] = { 130 { 131 .name = "Video RAM", 132 .start = 0, 133 .end = 0, 134 .flags = IORESOURCE_MEM 135 }, 136 { 137 .name = "Kernel text", 138 .start = 0, 139 .end = 0, 140 .flags = IORESOURCE_MEM 141 }, 142 { 143 .name = "Kernel data", 144 .start = 0, 145 .end = 0, 146 .flags = IORESOURCE_MEM 147 } 148 }; 149 150 #define video_ram mem_res[0] 151 #define kernel_code mem_res[1] 152 #define kernel_data mem_res[2] 153 154 static struct resource io_res[] = { 155 { 156 .name = "reserved", 157 .start = 0x3bc, 158 .end = 0x3be, 159 .flags = IORESOURCE_IO | IORESOURCE_BUSY 160 }, 161 { 162 .name = "reserved", 163 .start = 0x378, 164 .end = 0x37f, 165 .flags = IORESOURCE_IO | IORESOURCE_BUSY 166 }, 167 { 168 .name = "reserved", 169 .start = 0x278, 170 .end = 0x27f, 171 .flags = IORESOURCE_IO | IORESOURCE_BUSY 172 } 173 }; 174 175 #define lp0 io_res[0] 176 #define lp1 io_res[1] 177 #define lp2 io_res[2] 178 179 static const char *proc_arch[] = { 180 "undefined/unknown", 181 "3", 182 "4", 183 "4T", 184 "5", 185 "5T", 186 "5TE", 187 "5TEJ", 188 "6TEJ", 189 "7", 190 "?(11)", 191 "?(12)", 192 "?(13)", 193 "?(14)", 194 "?(15)", 195 "?(16)", 196 "?(17)", 197 }; 198 199 int cpu_architecture(void) 200 { 201 int cpu_arch; 202 203 if ((read_cpuid_id() & 0x0008f000) == 0) { 204 cpu_arch = CPU_ARCH_UNKNOWN; 205 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 206 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; 207 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) { 208 cpu_arch = (read_cpuid_id() >> 16) & 7; 209 if (cpu_arch) 210 cpu_arch += CPU_ARCH_ARMv3; 211 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 212 unsigned int mmfr0; 213 214 /* Revised CPUID format. Read the Memory Model Feature 215 * Register 0 and check for VMSAv7 or PMSAv7 */ 216 asm("mrc p15, 0, %0, c0, c1, 4" 217 : "=r" (mmfr0)); 218 if ((mmfr0 & 0x0000000f) == 0x00000003 || 219 (mmfr0 & 0x000000f0) == 0x00000030) 220 cpu_arch = CPU_ARCH_ARMv7; 221 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 222 (mmfr0 & 0x000000f0) == 0x00000020) 223 cpu_arch = CPU_ARCH_ARMv6; 224 else 225 cpu_arch = CPU_ARCH_UNKNOWN; 226 } else 227 cpu_arch = CPU_ARCH_UNKNOWN; 228 229 return cpu_arch; 230 } 231 232 static void __init cacheid_init(void) 233 { 234 unsigned int cachetype = read_cpuid_cachetype(); 235 unsigned int arch = cpu_architecture(); 236 237 if (arch >= CPU_ARCH_ARMv7) { 238 cacheid = CACHEID_VIPT_NONALIASING; 239 if ((cachetype & (3 << 14)) == 1 << 14) 240 cacheid |= CACHEID_ASID_TAGGED; 241 } else if (arch >= CPU_ARCH_ARMv6) { 242 if (cachetype & (1 << 23)) 243 cacheid = CACHEID_VIPT_ALIASING; 244 else 245 cacheid = CACHEID_VIPT_NONALIASING; 246 } else { 247 cacheid = CACHEID_VIVT; 248 } 249 250 printk("CPU: %s data cache, %s instruction cache\n", 251 cache_is_vivt() ? "VIVT" : 252 cache_is_vipt_aliasing() ? "VIPT aliasing" : 253 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown", 254 cache_is_vivt() ? "VIVT" : 255 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : 256 cache_is_vipt_aliasing() ? "VIPT aliasing" : 257 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); 258 } 259 260 /* 261 * These functions re-use the assembly code in head.S, which 262 * already provide the required functionality. 263 */ 264 extern struct proc_info_list *lookup_processor_type(unsigned int); 265 extern struct machine_desc *lookup_machine_type(unsigned int); 266 267 static void __init setup_processor(void) 268 { 269 struct proc_info_list *list; 270 271 /* 272 * locate processor in the list of supported processor 273 * types. The linker builds this table for us from the 274 * entries in arch/arm/mm/proc-*.S 275 */ 276 list = lookup_processor_type(read_cpuid_id()); 277 if (!list) { 278 printk("CPU configuration botched (ID %08x), unable " 279 "to continue.\n", read_cpuid_id()); 280 while (1); 281 } 282 283 cpu_name = list->cpu_name; 284 285 #ifdef MULTI_CPU 286 processor = *list->proc; 287 #endif 288 #ifdef MULTI_TLB 289 cpu_tlb = *list->tlb; 290 #endif 291 #ifdef MULTI_USER 292 cpu_user = *list->user; 293 #endif 294 #ifdef MULTI_CACHE 295 cpu_cache = *list->cache; 296 #endif 297 298 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", 299 cpu_name, read_cpuid_id(), read_cpuid_id() & 15, 300 proc_arch[cpu_architecture()], cr_alignment); 301 302 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); 303 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); 304 elf_hwcap = list->elf_hwcap; 305 #ifndef CONFIG_ARM_THUMB 306 elf_hwcap &= ~HWCAP_THUMB; 307 #endif 308 309 cacheid_init(); 310 cpu_proc_init(); 311 } 312 313 /* 314 * cpu_init - initialise one CPU. 315 * 316 * cpu_init sets up the per-CPU stacks. 317 */ 318 void cpu_init(void) 319 { 320 unsigned int cpu = smp_processor_id(); 321 struct stack *stk = &stacks[cpu]; 322 323 if (cpu >= NR_CPUS) { 324 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); 325 BUG(); 326 } 327 328 /* 329 * setup stacks for re-entrant exception handlers 330 */ 331 __asm__ ( 332 "msr cpsr_c, %1\n\t" 333 "add sp, %0, %2\n\t" 334 "msr cpsr_c, %3\n\t" 335 "add sp, %0, %4\n\t" 336 "msr cpsr_c, %5\n\t" 337 "add sp, %0, %6\n\t" 338 "msr cpsr_c, %7" 339 : 340 : "r" (stk), 341 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 342 "I" (offsetof(struct stack, irq[0])), 343 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 344 "I" (offsetof(struct stack, abt[0])), 345 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), 346 "I" (offsetof(struct stack, und[0])), 347 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 348 : "r14"); 349 } 350 351 static struct machine_desc * __init setup_machine(unsigned int nr) 352 { 353 struct machine_desc *list; 354 355 /* 356 * locate machine in the list of supported machines. 357 */ 358 list = lookup_machine_type(nr); 359 if (!list) { 360 printk("Machine configuration botched (nr %d), unable " 361 "to continue.\n", nr); 362 while (1); 363 } 364 365 printk("Machine: %s\n", list->name); 366 367 return list; 368 } 369 370 static void __init arm_add_memory(unsigned long start, unsigned long size) 371 { 372 struct membank *bank; 373 374 /* 375 * Ensure that start/size are aligned to a page boundary. 376 * Size is appropriately rounded down, start is rounded up. 377 */ 378 size -= start & ~PAGE_MASK; 379 380 bank = &meminfo.bank[meminfo.nr_banks++]; 381 382 bank->start = PAGE_ALIGN(start); 383 bank->size = size & PAGE_MASK; 384 bank->node = PHYS_TO_NID(start); 385 } 386 387 /* 388 * Pick out the memory size. We look for mem=size@start, 389 * where start and size are "size[KkMm]" 390 */ 391 static void __init early_mem(char **p) 392 { 393 static int usermem __initdata = 0; 394 unsigned long size, start; 395 396 /* 397 * If the user specifies memory size, we 398 * blow away any automatically generated 399 * size. 400 */ 401 if (usermem == 0) { 402 usermem = 1; 403 meminfo.nr_banks = 0; 404 } 405 406 start = PHYS_OFFSET; 407 size = memparse(*p, p); 408 if (**p == '@') 409 start = memparse(*p + 1, p); 410 411 arm_add_memory(start, size); 412 } 413 __early_param("mem=", early_mem); 414 415 /* 416 * Initial parsing of the command line. 417 */ 418 static void __init parse_cmdline(char **cmdline_p, char *from) 419 { 420 char c = ' ', *to = command_line; 421 int len = 0; 422 423 for (;;) { 424 if (c == ' ') { 425 extern struct early_params __early_begin, __early_end; 426 struct early_params *p; 427 428 for (p = &__early_begin; p < &__early_end; p++) { 429 int arglen = strlen(p->arg); 430 431 if (memcmp(from, p->arg, arglen) == 0) { 432 if (to != command_line) 433 to -= 1; 434 from += arglen; 435 p->fn(&from); 436 437 while (*from != ' ' && *from != '\0') 438 from++; 439 break; 440 } 441 } 442 } 443 c = *from++; 444 if (!c) 445 break; 446 if (COMMAND_LINE_SIZE <= ++len) 447 break; 448 *to++ = c; 449 } 450 *to = '\0'; 451 *cmdline_p = command_line; 452 } 453 454 static void __init 455 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) 456 { 457 #ifdef CONFIG_BLK_DEV_RAM 458 extern int rd_size, rd_image_start, rd_prompt, rd_doload; 459 460 rd_image_start = image_start; 461 rd_prompt = prompt; 462 rd_doload = doload; 463 464 if (rd_sz) 465 rd_size = rd_sz; 466 #endif 467 } 468 469 static void __init 470 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) 471 { 472 struct resource *res; 473 int i; 474 475 kernel_code.start = virt_to_phys(&_text); 476 kernel_code.end = virt_to_phys(&_etext - 1); 477 kernel_data.start = virt_to_phys(&__data_start); 478 kernel_data.end = virt_to_phys(&_end - 1); 479 480 for (i = 0; i < mi->nr_banks; i++) { 481 if (mi->bank[i].size == 0) 482 continue; 483 484 res = alloc_bootmem_low(sizeof(*res)); 485 res->name = "System RAM"; 486 res->start = mi->bank[i].start; 487 res->end = mi->bank[i].start + mi->bank[i].size - 1; 488 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 489 490 request_resource(&iomem_resource, res); 491 492 if (kernel_code.start >= res->start && 493 kernel_code.end <= res->end) 494 request_resource(res, &kernel_code); 495 if (kernel_data.start >= res->start && 496 kernel_data.end <= res->end) 497 request_resource(res, &kernel_data); 498 } 499 500 if (mdesc->video_start) { 501 video_ram.start = mdesc->video_start; 502 video_ram.end = mdesc->video_end; 503 request_resource(&iomem_resource, &video_ram); 504 } 505 506 /* 507 * Some machines don't have the possibility of ever 508 * possessing lp0, lp1 or lp2 509 */ 510 if (mdesc->reserve_lp0) 511 request_resource(&ioport_resource, &lp0); 512 if (mdesc->reserve_lp1) 513 request_resource(&ioport_resource, &lp1); 514 if (mdesc->reserve_lp2) 515 request_resource(&ioport_resource, &lp2); 516 } 517 518 /* 519 * Tag parsing. 520 * 521 * This is the new way of passing data to the kernel at boot time. Rather 522 * than passing a fixed inflexible structure to the kernel, we pass a list 523 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE 524 * tag for the list to be recognised (to distinguish the tagged list from 525 * a param_struct). The list is terminated with a zero-length tag (this tag 526 * is not parsed in any way). 527 */ 528 static int __init parse_tag_core(const struct tag *tag) 529 { 530 if (tag->hdr.size > 2) { 531 if ((tag->u.core.flags & 1) == 0) 532 root_mountflags &= ~MS_RDONLY; 533 ROOT_DEV = old_decode_dev(tag->u.core.rootdev); 534 } 535 return 0; 536 } 537 538 __tagtable(ATAG_CORE, parse_tag_core); 539 540 static int __init parse_tag_mem32(const struct tag *tag) 541 { 542 if (meminfo.nr_banks >= NR_BANKS) { 543 printk(KERN_WARNING 544 "Ignoring memory bank 0x%08x size %dKB\n", 545 tag->u.mem.start, tag->u.mem.size / 1024); 546 return -EINVAL; 547 } 548 arm_add_memory(tag->u.mem.start, tag->u.mem.size); 549 return 0; 550 } 551 552 __tagtable(ATAG_MEM, parse_tag_mem32); 553 554 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 555 struct screen_info screen_info = { 556 .orig_video_lines = 30, 557 .orig_video_cols = 80, 558 .orig_video_mode = 0, 559 .orig_video_ega_bx = 0, 560 .orig_video_isVGA = 1, 561 .orig_video_points = 8 562 }; 563 564 static int __init parse_tag_videotext(const struct tag *tag) 565 { 566 screen_info.orig_x = tag->u.videotext.x; 567 screen_info.orig_y = tag->u.videotext.y; 568 screen_info.orig_video_page = tag->u.videotext.video_page; 569 screen_info.orig_video_mode = tag->u.videotext.video_mode; 570 screen_info.orig_video_cols = tag->u.videotext.video_cols; 571 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; 572 screen_info.orig_video_lines = tag->u.videotext.video_lines; 573 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; 574 screen_info.orig_video_points = tag->u.videotext.video_points; 575 return 0; 576 } 577 578 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); 579 #endif 580 581 static int __init parse_tag_ramdisk(const struct tag *tag) 582 { 583 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0, 584 (tag->u.ramdisk.flags & 2) == 0, 585 tag->u.ramdisk.start, tag->u.ramdisk.size); 586 return 0; 587 } 588 589 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk); 590 591 static int __init parse_tag_serialnr(const struct tag *tag) 592 { 593 system_serial_low = tag->u.serialnr.low; 594 system_serial_high = tag->u.serialnr.high; 595 return 0; 596 } 597 598 __tagtable(ATAG_SERIAL, parse_tag_serialnr); 599 600 static int __init parse_tag_revision(const struct tag *tag) 601 { 602 system_rev = tag->u.revision.rev; 603 return 0; 604 } 605 606 __tagtable(ATAG_REVISION, parse_tag_revision); 607 608 static int __init parse_tag_cmdline(const struct tag *tag) 609 { 610 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); 611 return 0; 612 } 613 614 __tagtable(ATAG_CMDLINE, parse_tag_cmdline); 615 616 /* 617 * Scan the tag table for this tag, and call its parse function. 618 * The tag table is built by the linker from all the __tagtable 619 * declarations. 620 */ 621 static int __init parse_tag(const struct tag *tag) 622 { 623 extern struct tagtable __tagtable_begin, __tagtable_end; 624 struct tagtable *t; 625 626 for (t = &__tagtable_begin; t < &__tagtable_end; t++) 627 if (tag->hdr.tag == t->tag) { 628 t->parse(tag); 629 break; 630 } 631 632 return t < &__tagtable_end; 633 } 634 635 /* 636 * Parse all tags in the list, checking both the global and architecture 637 * specific tag tables. 638 */ 639 static void __init parse_tags(const struct tag *t) 640 { 641 for (; t->hdr.size; t = tag_next(t)) 642 if (!parse_tag(t)) 643 printk(KERN_WARNING 644 "Ignoring unrecognised tag 0x%08x\n", 645 t->hdr.tag); 646 } 647 648 /* 649 * This holds our defaults. 650 */ 651 static struct init_tags { 652 struct tag_header hdr1; 653 struct tag_core core; 654 struct tag_header hdr2; 655 struct tag_mem32 mem; 656 struct tag_header hdr3; 657 } init_tags __initdata = { 658 { tag_size(tag_core), ATAG_CORE }, 659 { 1, PAGE_SIZE, 0xff }, 660 { tag_size(tag_mem32), ATAG_MEM }, 661 { MEM_SIZE, PHYS_OFFSET }, 662 { 0, ATAG_NONE } 663 }; 664 665 static void (*init_machine)(void) __initdata; 666 667 static int __init customize_machine(void) 668 { 669 /* customizes platform devices, or adds new ones */ 670 if (init_machine) 671 init_machine(); 672 return 0; 673 } 674 arch_initcall(customize_machine); 675 676 void __init setup_arch(char **cmdline_p) 677 { 678 struct tag *tags = (struct tag *)&init_tags; 679 struct machine_desc *mdesc; 680 char *from = default_command_line; 681 682 setup_processor(); 683 mdesc = setup_machine(machine_arch_type); 684 machine_name = mdesc->name; 685 686 if (mdesc->soft_reboot) 687 reboot_setup("s"); 688 689 if (__atags_pointer) 690 tags = phys_to_virt(__atags_pointer); 691 else if (mdesc->boot_params) 692 tags = phys_to_virt(mdesc->boot_params); 693 694 /* 695 * If we have the old style parameters, convert them to 696 * a tag list. 697 */ 698 if (tags->hdr.tag != ATAG_CORE) 699 convert_to_tag_list(tags); 700 if (tags->hdr.tag != ATAG_CORE) 701 tags = (struct tag *)&init_tags; 702 703 if (mdesc->fixup) 704 mdesc->fixup(mdesc, tags, &from, &meminfo); 705 706 if (tags->hdr.tag == ATAG_CORE) { 707 if (meminfo.nr_banks != 0) 708 squash_mem_tags(tags); 709 save_atags(tags); 710 parse_tags(tags); 711 } 712 713 init_mm.start_code = (unsigned long) &_text; 714 init_mm.end_code = (unsigned long) &_etext; 715 init_mm.end_data = (unsigned long) &_edata; 716 init_mm.brk = (unsigned long) &_end; 717 718 memcpy(boot_command_line, from, COMMAND_LINE_SIZE); 719 boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; 720 parse_cmdline(cmdline_p, from); 721 paging_init(&meminfo, mdesc); 722 request_standard_resources(&meminfo, mdesc); 723 724 #ifdef CONFIG_SMP 725 smp_init_cpus(); 726 #endif 727 728 cpu_init(); 729 730 /* 731 * Set up various architecture-specific pointers 732 */ 733 init_arch_irq = mdesc->init_irq; 734 system_timer = mdesc->timer; 735 init_machine = mdesc->init_machine; 736 737 #ifdef CONFIG_VT 738 #if defined(CONFIG_VGA_CONSOLE) 739 conswitchp = &vga_con; 740 #elif defined(CONFIG_DUMMY_CONSOLE) 741 conswitchp = &dummy_con; 742 #endif 743 #endif 744 early_trap_init(); 745 } 746 747 748 static int __init topology_init(void) 749 { 750 int cpu; 751 752 for_each_possible_cpu(cpu) { 753 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); 754 cpuinfo->cpu.hotpluggable = 1; 755 register_cpu(&cpuinfo->cpu, cpu); 756 } 757 758 return 0; 759 } 760 761 subsys_initcall(topology_init); 762 763 static const char *hwcap_str[] = { 764 "swp", 765 "half", 766 "thumb", 767 "26bit", 768 "fastmult", 769 "fpa", 770 "vfp", 771 "edsp", 772 "java", 773 "iwmmxt", 774 "crunch", 775 NULL 776 }; 777 778 static int c_show(struct seq_file *m, void *v) 779 { 780 int i; 781 782 seq_printf(m, "Processor\t: %s rev %d (%s)\n", 783 cpu_name, read_cpuid_id() & 15, elf_platform); 784 785 #if defined(CONFIG_SMP) 786 for_each_online_cpu(i) { 787 /* 788 * glibc reads /proc/cpuinfo to determine the number of 789 * online processors, looking for lines beginning with 790 * "processor". Give glibc what it expects. 791 */ 792 seq_printf(m, "processor\t: %d\n", i); 793 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", 794 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 795 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 796 } 797 #else /* CONFIG_SMP */ 798 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 799 loops_per_jiffy / (500000/HZ), 800 (loops_per_jiffy / (5000/HZ)) % 100); 801 #endif 802 803 /* dump out the processor features */ 804 seq_puts(m, "Features\t: "); 805 806 for (i = 0; hwcap_str[i]; i++) 807 if (elf_hwcap & (1 << i)) 808 seq_printf(m, "%s ", hwcap_str[i]); 809 810 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); 811 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); 812 813 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) { 814 /* pre-ARM7 */ 815 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4); 816 } else { 817 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 818 /* ARM7 */ 819 seq_printf(m, "CPU variant\t: 0x%02x\n", 820 (read_cpuid_id() >> 16) & 127); 821 } else { 822 /* post-ARM7 */ 823 seq_printf(m, "CPU variant\t: 0x%x\n", 824 (read_cpuid_id() >> 20) & 15); 825 } 826 seq_printf(m, "CPU part\t: 0x%03x\n", 827 (read_cpuid_id() >> 4) & 0xfff); 828 } 829 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); 830 831 seq_puts(m, "\n"); 832 833 seq_printf(m, "Hardware\t: %s\n", machine_name); 834 seq_printf(m, "Revision\t: %04x\n", system_rev); 835 seq_printf(m, "Serial\t\t: %08x%08x\n", 836 system_serial_high, system_serial_low); 837 838 return 0; 839 } 840 841 static void *c_start(struct seq_file *m, loff_t *pos) 842 { 843 return *pos < 1 ? (void *)1 : NULL; 844 } 845 846 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 847 { 848 ++*pos; 849 return NULL; 850 } 851 852 static void c_stop(struct seq_file *m, void *v) 853 { 854 } 855 856 const struct seq_operations cpuinfo_op = { 857 .start = c_start, 858 .next = c_next, 859 .stop = c_stop, 860 .show = c_show 861 }; 862