xref: /openbmc/linux/arch/arm/kernel/setup.c (revision 4dc7ccf7)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/fs.h>
27 #include <linux/proc_fs.h>
28 
29 #include <asm/unified.h>
30 #include <asm/cpu.h>
31 #include <asm/cputype.h>
32 #include <asm/elf.h>
33 #include <asm/procinfo.h>
34 #include <asm/sections.h>
35 #include <asm/setup.h>
36 #include <asm/mach-types.h>
37 #include <asm/cacheflush.h>
38 #include <asm/cachetype.h>
39 #include <asm/tlbflush.h>
40 
41 #include <asm/mach/arch.h>
42 #include <asm/mach/irq.h>
43 #include <asm/mach/time.h>
44 #include <asm/traps.h>
45 #include <asm/unwind.h>
46 
47 #include "compat.h"
48 #include "atags.h"
49 #include "tcm.h"
50 
51 #ifndef MEM_SIZE
52 #define MEM_SIZE	(16*1024*1024)
53 #endif
54 
55 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
56 char fpe_type[8];
57 
58 static int __init fpe_setup(char *line)
59 {
60 	memcpy(fpe_type, line, 8);
61 	return 1;
62 }
63 
64 __setup("fpe=", fpe_setup);
65 #endif
66 
67 extern void paging_init(struct machine_desc *desc);
68 extern void reboot_setup(char *str);
69 
70 unsigned int processor_id;
71 EXPORT_SYMBOL(processor_id);
72 unsigned int __machine_arch_type;
73 EXPORT_SYMBOL(__machine_arch_type);
74 unsigned int cacheid;
75 EXPORT_SYMBOL(cacheid);
76 
77 unsigned int __atags_pointer __initdata;
78 
79 unsigned int system_rev;
80 EXPORT_SYMBOL(system_rev);
81 
82 unsigned int system_serial_low;
83 EXPORT_SYMBOL(system_serial_low);
84 
85 unsigned int system_serial_high;
86 EXPORT_SYMBOL(system_serial_high);
87 
88 unsigned int elf_hwcap;
89 EXPORT_SYMBOL(elf_hwcap);
90 
91 
92 #ifdef MULTI_CPU
93 struct processor processor;
94 #endif
95 #ifdef MULTI_TLB
96 struct cpu_tlb_fns cpu_tlb;
97 #endif
98 #ifdef MULTI_USER
99 struct cpu_user_fns cpu_user;
100 #endif
101 #ifdef MULTI_CACHE
102 struct cpu_cache_fns cpu_cache;
103 #endif
104 #ifdef CONFIG_OUTER_CACHE
105 struct outer_cache_fns outer_cache;
106 EXPORT_SYMBOL(outer_cache);
107 #endif
108 
109 struct stack {
110 	u32 irq[3];
111 	u32 abt[3];
112 	u32 und[3];
113 } ____cacheline_aligned;
114 
115 static struct stack stacks[NR_CPUS];
116 
117 char elf_platform[ELF_PLATFORM_SIZE];
118 EXPORT_SYMBOL(elf_platform);
119 
120 static const char *cpu_name;
121 static const char *machine_name;
122 static char __initdata cmd_line[COMMAND_LINE_SIZE];
123 
124 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
125 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
126 #define ENDIANNESS ((char)endian_test.l)
127 
128 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
129 
130 /*
131  * Standard memory resources
132  */
133 static struct resource mem_res[] = {
134 	{
135 		.name = "Video RAM",
136 		.start = 0,
137 		.end = 0,
138 		.flags = IORESOURCE_MEM
139 	},
140 	{
141 		.name = "Kernel text",
142 		.start = 0,
143 		.end = 0,
144 		.flags = IORESOURCE_MEM
145 	},
146 	{
147 		.name = "Kernel data",
148 		.start = 0,
149 		.end = 0,
150 		.flags = IORESOURCE_MEM
151 	}
152 };
153 
154 #define video_ram   mem_res[0]
155 #define kernel_code mem_res[1]
156 #define kernel_data mem_res[2]
157 
158 static struct resource io_res[] = {
159 	{
160 		.name = "reserved",
161 		.start = 0x3bc,
162 		.end = 0x3be,
163 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
164 	},
165 	{
166 		.name = "reserved",
167 		.start = 0x378,
168 		.end = 0x37f,
169 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
170 	},
171 	{
172 		.name = "reserved",
173 		.start = 0x278,
174 		.end = 0x27f,
175 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
176 	}
177 };
178 
179 #define lp0 io_res[0]
180 #define lp1 io_res[1]
181 #define lp2 io_res[2]
182 
183 static const char *proc_arch[] = {
184 	"undefined/unknown",
185 	"3",
186 	"4",
187 	"4T",
188 	"5",
189 	"5T",
190 	"5TE",
191 	"5TEJ",
192 	"6TEJ",
193 	"7",
194 	"?(11)",
195 	"?(12)",
196 	"?(13)",
197 	"?(14)",
198 	"?(15)",
199 	"?(16)",
200 	"?(17)",
201 };
202 
203 int cpu_architecture(void)
204 {
205 	int cpu_arch;
206 
207 	if ((read_cpuid_id() & 0x0008f000) == 0) {
208 		cpu_arch = CPU_ARCH_UNKNOWN;
209 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
210 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
211 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
212 		cpu_arch = (read_cpuid_id() >> 16) & 7;
213 		if (cpu_arch)
214 			cpu_arch += CPU_ARCH_ARMv3;
215 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
216 		unsigned int mmfr0;
217 
218 		/* Revised CPUID format. Read the Memory Model Feature
219 		 * Register 0 and check for VMSAv7 or PMSAv7 */
220 		asm("mrc	p15, 0, %0, c0, c1, 4"
221 		    : "=r" (mmfr0));
222 		if ((mmfr0 & 0x0000000f) == 0x00000003 ||
223 		    (mmfr0 & 0x000000f0) == 0x00000030)
224 			cpu_arch = CPU_ARCH_ARMv7;
225 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
226 			 (mmfr0 & 0x000000f0) == 0x00000020)
227 			cpu_arch = CPU_ARCH_ARMv6;
228 		else
229 			cpu_arch = CPU_ARCH_UNKNOWN;
230 	} else
231 		cpu_arch = CPU_ARCH_UNKNOWN;
232 
233 	return cpu_arch;
234 }
235 
236 static void __init cacheid_init(void)
237 {
238 	unsigned int cachetype = read_cpuid_cachetype();
239 	unsigned int arch = cpu_architecture();
240 
241 	if (arch >= CPU_ARCH_ARMv6) {
242 		if ((cachetype & (7 << 29)) == 4 << 29) {
243 			/* ARMv7 register format */
244 			cacheid = CACHEID_VIPT_NONALIASING;
245 			if ((cachetype & (3 << 14)) == 1 << 14)
246 				cacheid |= CACHEID_ASID_TAGGED;
247 		} else if (cachetype & (1 << 23))
248 			cacheid = CACHEID_VIPT_ALIASING;
249 		else
250 			cacheid = CACHEID_VIPT_NONALIASING;
251 	} else {
252 		cacheid = CACHEID_VIVT;
253 	}
254 
255 	printk("CPU: %s data cache, %s instruction cache\n",
256 		cache_is_vivt() ? "VIVT" :
257 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
258 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
259 		cache_is_vivt() ? "VIVT" :
260 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
261 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
262 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
263 }
264 
265 /*
266  * These functions re-use the assembly code in head.S, which
267  * already provide the required functionality.
268  */
269 extern struct proc_info_list *lookup_processor_type(unsigned int);
270 extern struct machine_desc *lookup_machine_type(unsigned int);
271 
272 static void __init setup_processor(void)
273 {
274 	struct proc_info_list *list;
275 
276 	/*
277 	 * locate processor in the list of supported processor
278 	 * types.  The linker builds this table for us from the
279 	 * entries in arch/arm/mm/proc-*.S
280 	 */
281 	list = lookup_processor_type(read_cpuid_id());
282 	if (!list) {
283 		printk("CPU configuration botched (ID %08x), unable "
284 		       "to continue.\n", read_cpuid_id());
285 		while (1);
286 	}
287 
288 	cpu_name = list->cpu_name;
289 
290 #ifdef MULTI_CPU
291 	processor = *list->proc;
292 #endif
293 #ifdef MULTI_TLB
294 	cpu_tlb = *list->tlb;
295 #endif
296 #ifdef MULTI_USER
297 	cpu_user = *list->user;
298 #endif
299 #ifdef MULTI_CACHE
300 	cpu_cache = *list->cache;
301 #endif
302 
303 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
304 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
305 	       proc_arch[cpu_architecture()], cr_alignment);
306 
307 	sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
308 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
309 	elf_hwcap = list->elf_hwcap;
310 #ifndef CONFIG_ARM_THUMB
311 	elf_hwcap &= ~HWCAP_THUMB;
312 #endif
313 
314 	cacheid_init();
315 	cpu_proc_init();
316 }
317 
318 /*
319  * cpu_init - initialise one CPU.
320  *
321  * cpu_init sets up the per-CPU stacks.
322  */
323 void cpu_init(void)
324 {
325 	unsigned int cpu = smp_processor_id();
326 	struct stack *stk = &stacks[cpu];
327 
328 	if (cpu >= NR_CPUS) {
329 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
330 		BUG();
331 	}
332 
333 	/*
334 	 * Define the placement constraint for the inline asm directive below.
335 	 * In Thumb-2, msr with an immediate value is not allowed.
336 	 */
337 #ifdef CONFIG_THUMB2_KERNEL
338 #define PLC	"r"
339 #else
340 #define PLC	"I"
341 #endif
342 
343 	/*
344 	 * setup stacks for re-entrant exception handlers
345 	 */
346 	__asm__ (
347 	"msr	cpsr_c, %1\n\t"
348 	"add	r14, %0, %2\n\t"
349 	"mov	sp, r14\n\t"
350 	"msr	cpsr_c, %3\n\t"
351 	"add	r14, %0, %4\n\t"
352 	"mov	sp, r14\n\t"
353 	"msr	cpsr_c, %5\n\t"
354 	"add	r14, %0, %6\n\t"
355 	"mov	sp, r14\n\t"
356 	"msr	cpsr_c, %7"
357 	    :
358 	    : "r" (stk),
359 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
360 	      "I" (offsetof(struct stack, irq[0])),
361 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
362 	      "I" (offsetof(struct stack, abt[0])),
363 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
364 	      "I" (offsetof(struct stack, und[0])),
365 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
366 	    : "r14");
367 }
368 
369 static struct machine_desc * __init setup_machine(unsigned int nr)
370 {
371 	struct machine_desc *list;
372 
373 	/*
374 	 * locate machine in the list of supported machines.
375 	 */
376 	list = lookup_machine_type(nr);
377 	if (!list) {
378 		printk("Machine configuration botched (nr %d), unable "
379 		       "to continue.\n", nr);
380 		while (1);
381 	}
382 
383 	printk("Machine: %s\n", list->name);
384 
385 	return list;
386 }
387 
388 static int __init arm_add_memory(unsigned long start, unsigned long size)
389 {
390 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
391 
392 	if (meminfo.nr_banks >= NR_BANKS) {
393 		printk(KERN_CRIT "NR_BANKS too low, "
394 			"ignoring memory at %#lx\n", start);
395 		return -EINVAL;
396 	}
397 
398 	/*
399 	 * Ensure that start/size are aligned to a page boundary.
400 	 * Size is appropriately rounded down, start is rounded up.
401 	 */
402 	size -= start & ~PAGE_MASK;
403 	bank->start = PAGE_ALIGN(start);
404 	bank->size  = size & PAGE_MASK;
405 	bank->node  = PHYS_TO_NID(start);
406 
407 	/*
408 	 * Check whether this memory region has non-zero size or
409 	 * invalid node number.
410 	 */
411 	if (bank->size == 0 || bank->node >= MAX_NUMNODES)
412 		return -EINVAL;
413 
414 	meminfo.nr_banks++;
415 	return 0;
416 }
417 
418 /*
419  * Pick out the memory size.  We look for mem=size@start,
420  * where start and size are "size[KkMm]"
421  */
422 static int __init early_mem(char *p)
423 {
424 	static int usermem __initdata = 0;
425 	unsigned long size, start;
426 	char *endp;
427 
428 	/*
429 	 * If the user specifies memory size, we
430 	 * blow away any automatically generated
431 	 * size.
432 	 */
433 	if (usermem == 0) {
434 		usermem = 1;
435 		meminfo.nr_banks = 0;
436 	}
437 
438 	start = PHYS_OFFSET;
439 	size  = memparse(p, &endp);
440 	if (*endp == '@')
441 		start = memparse(endp + 1, NULL);
442 
443 	arm_add_memory(start, size);
444 
445 	return 0;
446 }
447 early_param("mem", early_mem);
448 
449 static void __init
450 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
451 {
452 #ifdef CONFIG_BLK_DEV_RAM
453 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
454 
455 	rd_image_start = image_start;
456 	rd_prompt = prompt;
457 	rd_doload = doload;
458 
459 	if (rd_sz)
460 		rd_size = rd_sz;
461 #endif
462 }
463 
464 static void __init
465 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
466 {
467 	struct resource *res;
468 	int i;
469 
470 	kernel_code.start   = virt_to_phys(_text);
471 	kernel_code.end     = virt_to_phys(_etext - 1);
472 	kernel_data.start   = virt_to_phys(_data);
473 	kernel_data.end     = virt_to_phys(_end - 1);
474 
475 	for (i = 0; i < mi->nr_banks; i++) {
476 		if (mi->bank[i].size == 0)
477 			continue;
478 
479 		res = alloc_bootmem_low(sizeof(*res));
480 		res->name  = "System RAM";
481 		res->start = mi->bank[i].start;
482 		res->end   = mi->bank[i].start + mi->bank[i].size - 1;
483 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
484 
485 		request_resource(&iomem_resource, res);
486 
487 		if (kernel_code.start >= res->start &&
488 		    kernel_code.end <= res->end)
489 			request_resource(res, &kernel_code);
490 		if (kernel_data.start >= res->start &&
491 		    kernel_data.end <= res->end)
492 			request_resource(res, &kernel_data);
493 	}
494 
495 	if (mdesc->video_start) {
496 		video_ram.start = mdesc->video_start;
497 		video_ram.end   = mdesc->video_end;
498 		request_resource(&iomem_resource, &video_ram);
499 	}
500 
501 	/*
502 	 * Some machines don't have the possibility of ever
503 	 * possessing lp0, lp1 or lp2
504 	 */
505 	if (mdesc->reserve_lp0)
506 		request_resource(&ioport_resource, &lp0);
507 	if (mdesc->reserve_lp1)
508 		request_resource(&ioport_resource, &lp1);
509 	if (mdesc->reserve_lp2)
510 		request_resource(&ioport_resource, &lp2);
511 }
512 
513 /*
514  *  Tag parsing.
515  *
516  * This is the new way of passing data to the kernel at boot time.  Rather
517  * than passing a fixed inflexible structure to the kernel, we pass a list
518  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
519  * tag for the list to be recognised (to distinguish the tagged list from
520  * a param_struct).  The list is terminated with a zero-length tag (this tag
521  * is not parsed in any way).
522  */
523 static int __init parse_tag_core(const struct tag *tag)
524 {
525 	if (tag->hdr.size > 2) {
526 		if ((tag->u.core.flags & 1) == 0)
527 			root_mountflags &= ~MS_RDONLY;
528 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
529 	}
530 	return 0;
531 }
532 
533 __tagtable(ATAG_CORE, parse_tag_core);
534 
535 static int __init parse_tag_mem32(const struct tag *tag)
536 {
537 	return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
538 }
539 
540 __tagtable(ATAG_MEM, parse_tag_mem32);
541 
542 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
543 struct screen_info screen_info = {
544  .orig_video_lines	= 30,
545  .orig_video_cols	= 80,
546  .orig_video_mode	= 0,
547  .orig_video_ega_bx	= 0,
548  .orig_video_isVGA	= 1,
549  .orig_video_points	= 8
550 };
551 
552 static int __init parse_tag_videotext(const struct tag *tag)
553 {
554 	screen_info.orig_x            = tag->u.videotext.x;
555 	screen_info.orig_y            = tag->u.videotext.y;
556 	screen_info.orig_video_page   = tag->u.videotext.video_page;
557 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
558 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
559 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
560 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
561 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
562 	screen_info.orig_video_points = tag->u.videotext.video_points;
563 	return 0;
564 }
565 
566 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
567 #endif
568 
569 static int __init parse_tag_ramdisk(const struct tag *tag)
570 {
571 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
572 		      (tag->u.ramdisk.flags & 2) == 0,
573 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
574 	return 0;
575 }
576 
577 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
578 
579 static int __init parse_tag_serialnr(const struct tag *tag)
580 {
581 	system_serial_low = tag->u.serialnr.low;
582 	system_serial_high = tag->u.serialnr.high;
583 	return 0;
584 }
585 
586 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
587 
588 static int __init parse_tag_revision(const struct tag *tag)
589 {
590 	system_rev = tag->u.revision.rev;
591 	return 0;
592 }
593 
594 __tagtable(ATAG_REVISION, parse_tag_revision);
595 
596 static int __init parse_tag_cmdline(const struct tag *tag)
597 {
598 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
599 	return 0;
600 }
601 
602 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
603 
604 /*
605  * Scan the tag table for this tag, and call its parse function.
606  * The tag table is built by the linker from all the __tagtable
607  * declarations.
608  */
609 static int __init parse_tag(const struct tag *tag)
610 {
611 	extern struct tagtable __tagtable_begin, __tagtable_end;
612 	struct tagtable *t;
613 
614 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
615 		if (tag->hdr.tag == t->tag) {
616 			t->parse(tag);
617 			break;
618 		}
619 
620 	return t < &__tagtable_end;
621 }
622 
623 /*
624  * Parse all tags in the list, checking both the global and architecture
625  * specific tag tables.
626  */
627 static void __init parse_tags(const struct tag *t)
628 {
629 	for (; t->hdr.size; t = tag_next(t))
630 		if (!parse_tag(t))
631 			printk(KERN_WARNING
632 				"Ignoring unrecognised tag 0x%08x\n",
633 				t->hdr.tag);
634 }
635 
636 /*
637  * This holds our defaults.
638  */
639 static struct init_tags {
640 	struct tag_header hdr1;
641 	struct tag_core   core;
642 	struct tag_header hdr2;
643 	struct tag_mem32  mem;
644 	struct tag_header hdr3;
645 } init_tags __initdata = {
646 	{ tag_size(tag_core), ATAG_CORE },
647 	{ 1, PAGE_SIZE, 0xff },
648 	{ tag_size(tag_mem32), ATAG_MEM },
649 	{ MEM_SIZE, PHYS_OFFSET },
650 	{ 0, ATAG_NONE }
651 };
652 
653 static void (*init_machine)(void) __initdata;
654 
655 static int __init customize_machine(void)
656 {
657 	/* customizes platform devices, or adds new ones */
658 	if (init_machine)
659 		init_machine();
660 	return 0;
661 }
662 arch_initcall(customize_machine);
663 
664 void __init setup_arch(char **cmdline_p)
665 {
666 	struct tag *tags = (struct tag *)&init_tags;
667 	struct machine_desc *mdesc;
668 	char *from = default_command_line;
669 
670 	unwind_init();
671 
672 	setup_processor();
673 	mdesc = setup_machine(machine_arch_type);
674 	machine_name = mdesc->name;
675 
676 	if (mdesc->soft_reboot)
677 		reboot_setup("s");
678 
679 	if (__atags_pointer)
680 		tags = phys_to_virt(__atags_pointer);
681 	else if (mdesc->boot_params)
682 		tags = phys_to_virt(mdesc->boot_params);
683 
684 	/*
685 	 * If we have the old style parameters, convert them to
686 	 * a tag list.
687 	 */
688 	if (tags->hdr.tag != ATAG_CORE)
689 		convert_to_tag_list(tags);
690 	if (tags->hdr.tag != ATAG_CORE)
691 		tags = (struct tag *)&init_tags;
692 
693 	if (mdesc->fixup)
694 		mdesc->fixup(mdesc, tags, &from, &meminfo);
695 
696 	if (tags->hdr.tag == ATAG_CORE) {
697 		if (meminfo.nr_banks != 0)
698 			squash_mem_tags(tags);
699 		save_atags(tags);
700 		parse_tags(tags);
701 	}
702 
703 	init_mm.start_code = (unsigned long) _text;
704 	init_mm.end_code   = (unsigned long) _etext;
705 	init_mm.end_data   = (unsigned long) _edata;
706 	init_mm.brk	   = (unsigned long) _end;
707 
708 	/* parse_early_param needs a boot_command_line */
709 	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
710 
711 	/* populate cmd_line too for later use, preserving boot_command_line */
712 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
713 	*cmdline_p = cmd_line;
714 
715 	parse_early_param();
716 
717 	paging_init(mdesc);
718 	request_standard_resources(&meminfo, mdesc);
719 
720 #ifdef CONFIG_SMP
721 	smp_init_cpus();
722 #endif
723 
724 	cpu_init();
725 	tcm_init();
726 
727 	/*
728 	 * Set up various architecture-specific pointers
729 	 */
730 	init_arch_irq = mdesc->init_irq;
731 	system_timer = mdesc->timer;
732 	init_machine = mdesc->init_machine;
733 
734 #ifdef CONFIG_VT
735 #if defined(CONFIG_VGA_CONSOLE)
736 	conswitchp = &vga_con;
737 #elif defined(CONFIG_DUMMY_CONSOLE)
738 	conswitchp = &dummy_con;
739 #endif
740 #endif
741 	early_trap_init();
742 }
743 
744 
745 static int __init topology_init(void)
746 {
747 	int cpu;
748 
749 	for_each_possible_cpu(cpu) {
750 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
751 		cpuinfo->cpu.hotpluggable = 1;
752 		register_cpu(&cpuinfo->cpu, cpu);
753 	}
754 
755 	return 0;
756 }
757 subsys_initcall(topology_init);
758 
759 #ifdef CONFIG_HAVE_PROC_CPU
760 static int __init proc_cpu_init(void)
761 {
762 	struct proc_dir_entry *res;
763 
764 	res = proc_mkdir("cpu", NULL);
765 	if (!res)
766 		return -ENOMEM;
767 	return 0;
768 }
769 fs_initcall(proc_cpu_init);
770 #endif
771 
772 static const char *hwcap_str[] = {
773 	"swp",
774 	"half",
775 	"thumb",
776 	"26bit",
777 	"fastmult",
778 	"fpa",
779 	"vfp",
780 	"edsp",
781 	"java",
782 	"iwmmxt",
783 	"crunch",
784 	"thumbee",
785 	"neon",
786 	"vfpv3",
787 	"vfpv3d16",
788 	NULL
789 };
790 
791 static int c_show(struct seq_file *m, void *v)
792 {
793 	int i;
794 
795 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
796 		   cpu_name, read_cpuid_id() & 15, elf_platform);
797 
798 #if defined(CONFIG_SMP)
799 	for_each_online_cpu(i) {
800 		/*
801 		 * glibc reads /proc/cpuinfo to determine the number of
802 		 * online processors, looking for lines beginning with
803 		 * "processor".  Give glibc what it expects.
804 		 */
805 		seq_printf(m, "processor\t: %d\n", i);
806 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
807 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
808 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
809 	}
810 #else /* CONFIG_SMP */
811 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
812 		   loops_per_jiffy / (500000/HZ),
813 		   (loops_per_jiffy / (5000/HZ)) % 100);
814 #endif
815 
816 	/* dump out the processor features */
817 	seq_puts(m, "Features\t: ");
818 
819 	for (i = 0; hwcap_str[i]; i++)
820 		if (elf_hwcap & (1 << i))
821 			seq_printf(m, "%s ", hwcap_str[i]);
822 
823 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
824 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
825 
826 	if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
827 		/* pre-ARM7 */
828 		seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
829 	} else {
830 		if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
831 			/* ARM7 */
832 			seq_printf(m, "CPU variant\t: 0x%02x\n",
833 				   (read_cpuid_id() >> 16) & 127);
834 		} else {
835 			/* post-ARM7 */
836 			seq_printf(m, "CPU variant\t: 0x%x\n",
837 				   (read_cpuid_id() >> 20) & 15);
838 		}
839 		seq_printf(m, "CPU part\t: 0x%03x\n",
840 			   (read_cpuid_id() >> 4) & 0xfff);
841 	}
842 	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
843 
844 	seq_puts(m, "\n");
845 
846 	seq_printf(m, "Hardware\t: %s\n", machine_name);
847 	seq_printf(m, "Revision\t: %04x\n", system_rev);
848 	seq_printf(m, "Serial\t\t: %08x%08x\n",
849 		   system_serial_high, system_serial_low);
850 
851 	return 0;
852 }
853 
854 static void *c_start(struct seq_file *m, loff_t *pos)
855 {
856 	return *pos < 1 ? (void *)1 : NULL;
857 }
858 
859 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
860 {
861 	++*pos;
862 	return NULL;
863 }
864 
865 static void c_stop(struct seq_file *m, void *v)
866 {
867 }
868 
869 const struct seq_operations cpuinfo_op = {
870 	.start	= c_start,
871 	.next	= c_next,
872 	.stop	= c_stop,
873 	.show	= c_show
874 };
875