xref: /openbmc/linux/arch/arm/kernel/setup.c (revision fd589a8f)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/fs.h>
27 
28 #include <asm/unified.h>
29 #include <asm/cpu.h>
30 #include <asm/cputype.h>
31 #include <asm/elf.h>
32 #include <asm/procinfo.h>
33 #include <asm/sections.h>
34 #include <asm/setup.h>
35 #include <asm/mach-types.h>
36 #include <asm/cacheflush.h>
37 #include <asm/cachetype.h>
38 #include <asm/tlbflush.h>
39 
40 #include <asm/mach/arch.h>
41 #include <asm/mach/irq.h>
42 #include <asm/mach/time.h>
43 #include <asm/traps.h>
44 #include <asm/unwind.h>
45 
46 #include "compat.h"
47 #include "atags.h"
48 
49 #ifndef MEM_SIZE
50 #define MEM_SIZE	(16*1024*1024)
51 #endif
52 
53 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
54 char fpe_type[8];
55 
56 static int __init fpe_setup(char *line)
57 {
58 	memcpy(fpe_type, line, 8);
59 	return 1;
60 }
61 
62 __setup("fpe=", fpe_setup);
63 #endif
64 
65 extern void paging_init(struct machine_desc *desc);
66 extern void reboot_setup(char *str);
67 
68 unsigned int processor_id;
69 EXPORT_SYMBOL(processor_id);
70 unsigned int __machine_arch_type;
71 EXPORT_SYMBOL(__machine_arch_type);
72 unsigned int cacheid;
73 EXPORT_SYMBOL(cacheid);
74 
75 unsigned int __atags_pointer __initdata;
76 
77 unsigned int system_rev;
78 EXPORT_SYMBOL(system_rev);
79 
80 unsigned int system_serial_low;
81 EXPORT_SYMBOL(system_serial_low);
82 
83 unsigned int system_serial_high;
84 EXPORT_SYMBOL(system_serial_high);
85 
86 unsigned int elf_hwcap;
87 EXPORT_SYMBOL(elf_hwcap);
88 
89 
90 #ifdef MULTI_CPU
91 struct processor processor;
92 #endif
93 #ifdef MULTI_TLB
94 struct cpu_tlb_fns cpu_tlb;
95 #endif
96 #ifdef MULTI_USER
97 struct cpu_user_fns cpu_user;
98 #endif
99 #ifdef MULTI_CACHE
100 struct cpu_cache_fns cpu_cache;
101 #endif
102 #ifdef CONFIG_OUTER_CACHE
103 struct outer_cache_fns outer_cache;
104 #endif
105 
106 struct stack {
107 	u32 irq[3];
108 	u32 abt[3];
109 	u32 und[3];
110 } ____cacheline_aligned;
111 
112 static struct stack stacks[NR_CPUS];
113 
114 char elf_platform[ELF_PLATFORM_SIZE];
115 EXPORT_SYMBOL(elf_platform);
116 
117 static const char *cpu_name;
118 static const char *machine_name;
119 static char __initdata command_line[COMMAND_LINE_SIZE];
120 
121 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
122 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
123 #define ENDIANNESS ((char)endian_test.l)
124 
125 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
126 
127 /*
128  * Standard memory resources
129  */
130 static struct resource mem_res[] = {
131 	{
132 		.name = "Video RAM",
133 		.start = 0,
134 		.end = 0,
135 		.flags = IORESOURCE_MEM
136 	},
137 	{
138 		.name = "Kernel text",
139 		.start = 0,
140 		.end = 0,
141 		.flags = IORESOURCE_MEM
142 	},
143 	{
144 		.name = "Kernel data",
145 		.start = 0,
146 		.end = 0,
147 		.flags = IORESOURCE_MEM
148 	}
149 };
150 
151 #define video_ram   mem_res[0]
152 #define kernel_code mem_res[1]
153 #define kernel_data mem_res[2]
154 
155 static struct resource io_res[] = {
156 	{
157 		.name = "reserved",
158 		.start = 0x3bc,
159 		.end = 0x3be,
160 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
161 	},
162 	{
163 		.name = "reserved",
164 		.start = 0x378,
165 		.end = 0x37f,
166 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
167 	},
168 	{
169 		.name = "reserved",
170 		.start = 0x278,
171 		.end = 0x27f,
172 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
173 	}
174 };
175 
176 #define lp0 io_res[0]
177 #define lp1 io_res[1]
178 #define lp2 io_res[2]
179 
180 static const char *proc_arch[] = {
181 	"undefined/unknown",
182 	"3",
183 	"4",
184 	"4T",
185 	"5",
186 	"5T",
187 	"5TE",
188 	"5TEJ",
189 	"6TEJ",
190 	"7",
191 	"?(11)",
192 	"?(12)",
193 	"?(13)",
194 	"?(14)",
195 	"?(15)",
196 	"?(16)",
197 	"?(17)",
198 };
199 
200 int cpu_architecture(void)
201 {
202 	int cpu_arch;
203 
204 	if ((read_cpuid_id() & 0x0008f000) == 0) {
205 		cpu_arch = CPU_ARCH_UNKNOWN;
206 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
207 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
208 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
209 		cpu_arch = (read_cpuid_id() >> 16) & 7;
210 		if (cpu_arch)
211 			cpu_arch += CPU_ARCH_ARMv3;
212 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
213 		unsigned int mmfr0;
214 
215 		/* Revised CPUID format. Read the Memory Model Feature
216 		 * Register 0 and check for VMSAv7 or PMSAv7 */
217 		asm("mrc	p15, 0, %0, c0, c1, 4"
218 		    : "=r" (mmfr0));
219 		if ((mmfr0 & 0x0000000f) == 0x00000003 ||
220 		    (mmfr0 & 0x000000f0) == 0x00000030)
221 			cpu_arch = CPU_ARCH_ARMv7;
222 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
223 			 (mmfr0 & 0x000000f0) == 0x00000020)
224 			cpu_arch = CPU_ARCH_ARMv6;
225 		else
226 			cpu_arch = CPU_ARCH_UNKNOWN;
227 	} else
228 		cpu_arch = CPU_ARCH_UNKNOWN;
229 
230 	return cpu_arch;
231 }
232 
233 static void __init cacheid_init(void)
234 {
235 	unsigned int cachetype = read_cpuid_cachetype();
236 	unsigned int arch = cpu_architecture();
237 
238 	if (arch >= CPU_ARCH_ARMv6) {
239 		if ((cachetype & (7 << 29)) == 4 << 29) {
240 			/* ARMv7 register format */
241 			cacheid = CACHEID_VIPT_NONALIASING;
242 			if ((cachetype & (3 << 14)) == 1 << 14)
243 				cacheid |= CACHEID_ASID_TAGGED;
244 		} else if (cachetype & (1 << 23))
245 			cacheid = CACHEID_VIPT_ALIASING;
246 		else
247 			cacheid = CACHEID_VIPT_NONALIASING;
248 	} else {
249 		cacheid = CACHEID_VIVT;
250 	}
251 
252 	printk("CPU: %s data cache, %s instruction cache\n",
253 		cache_is_vivt() ? "VIVT" :
254 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
255 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
256 		cache_is_vivt() ? "VIVT" :
257 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
258 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
259 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
260 }
261 
262 /*
263  * These functions re-use the assembly code in head.S, which
264  * already provide the required functionality.
265  */
266 extern struct proc_info_list *lookup_processor_type(unsigned int);
267 extern struct machine_desc *lookup_machine_type(unsigned int);
268 
269 static void __init setup_processor(void)
270 {
271 	struct proc_info_list *list;
272 
273 	/*
274 	 * locate processor in the list of supported processor
275 	 * types.  The linker builds this table for us from the
276 	 * entries in arch/arm/mm/proc-*.S
277 	 */
278 	list = lookup_processor_type(read_cpuid_id());
279 	if (!list) {
280 		printk("CPU configuration botched (ID %08x), unable "
281 		       "to continue.\n", read_cpuid_id());
282 		while (1);
283 	}
284 
285 	cpu_name = list->cpu_name;
286 
287 #ifdef MULTI_CPU
288 	processor = *list->proc;
289 #endif
290 #ifdef MULTI_TLB
291 	cpu_tlb = *list->tlb;
292 #endif
293 #ifdef MULTI_USER
294 	cpu_user = *list->user;
295 #endif
296 #ifdef MULTI_CACHE
297 	cpu_cache = *list->cache;
298 #endif
299 
300 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
301 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
302 	       proc_arch[cpu_architecture()], cr_alignment);
303 
304 	sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
305 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
306 	elf_hwcap = list->elf_hwcap;
307 #ifndef CONFIG_ARM_THUMB
308 	elf_hwcap &= ~HWCAP_THUMB;
309 #endif
310 
311 	cacheid_init();
312 	cpu_proc_init();
313 }
314 
315 /*
316  * cpu_init - initialise one CPU.
317  *
318  * cpu_init sets up the per-CPU stacks.
319  */
320 void cpu_init(void)
321 {
322 	unsigned int cpu = smp_processor_id();
323 	struct stack *stk = &stacks[cpu];
324 
325 	if (cpu >= NR_CPUS) {
326 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
327 		BUG();
328 	}
329 
330 	/*
331 	 * Define the placement constraint for the inline asm directive below.
332 	 * In Thumb-2, msr with an immediate value is not allowed.
333 	 */
334 #ifdef CONFIG_THUMB2_KERNEL
335 #define PLC	"r"
336 #else
337 #define PLC	"I"
338 #endif
339 
340 	/*
341 	 * setup stacks for re-entrant exception handlers
342 	 */
343 	__asm__ (
344 	"msr	cpsr_c, %1\n\t"
345 	"add	r14, %0, %2\n\t"
346 	"mov	sp, r14\n\t"
347 	"msr	cpsr_c, %3\n\t"
348 	"add	r14, %0, %4\n\t"
349 	"mov	sp, r14\n\t"
350 	"msr	cpsr_c, %5\n\t"
351 	"add	r14, %0, %6\n\t"
352 	"mov	sp, r14\n\t"
353 	"msr	cpsr_c, %7"
354 	    :
355 	    : "r" (stk),
356 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
357 	      "I" (offsetof(struct stack, irq[0])),
358 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
359 	      "I" (offsetof(struct stack, abt[0])),
360 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
361 	      "I" (offsetof(struct stack, und[0])),
362 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
363 	    : "r14");
364 }
365 
366 static struct machine_desc * __init setup_machine(unsigned int nr)
367 {
368 	struct machine_desc *list;
369 
370 	/*
371 	 * locate machine in the list of supported machines.
372 	 */
373 	list = lookup_machine_type(nr);
374 	if (!list) {
375 		printk("Machine configuration botched (nr %d), unable "
376 		       "to continue.\n", nr);
377 		while (1);
378 	}
379 
380 	printk("Machine: %s\n", list->name);
381 
382 	return list;
383 }
384 
385 static int __init arm_add_memory(unsigned long start, unsigned long size)
386 {
387 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
388 
389 	if (meminfo.nr_banks >= NR_BANKS) {
390 		printk(KERN_CRIT "NR_BANKS too low, "
391 			"ignoring memory at %#lx\n", start);
392 		return -EINVAL;
393 	}
394 
395 	/*
396 	 * Ensure that start/size are aligned to a page boundary.
397 	 * Size is appropriately rounded down, start is rounded up.
398 	 */
399 	size -= start & ~PAGE_MASK;
400 	bank->start = PAGE_ALIGN(start);
401 	bank->size  = size & PAGE_MASK;
402 	bank->node  = PHYS_TO_NID(start);
403 
404 	/*
405 	 * Check whether this memory region has non-zero size or
406 	 * invalid node number.
407 	 */
408 	if (bank->size == 0 || bank->node >= MAX_NUMNODES)
409 		return -EINVAL;
410 
411 	meminfo.nr_banks++;
412 	return 0;
413 }
414 
415 /*
416  * Pick out the memory size.  We look for mem=size@start,
417  * where start and size are "size[KkMm]"
418  */
419 static void __init early_mem(char **p)
420 {
421 	static int usermem __initdata = 0;
422 	unsigned long size, start;
423 
424 	/*
425 	 * If the user specifies memory size, we
426 	 * blow away any automatically generated
427 	 * size.
428 	 */
429 	if (usermem == 0) {
430 		usermem = 1;
431 		meminfo.nr_banks = 0;
432 	}
433 
434 	start = PHYS_OFFSET;
435 	size  = memparse(*p, p);
436 	if (**p == '@')
437 		start = memparse(*p + 1, p);
438 
439 	arm_add_memory(start, size);
440 }
441 __early_param("mem=", early_mem);
442 
443 /*
444  * Initial parsing of the command line.
445  */
446 static void __init parse_cmdline(char **cmdline_p, char *from)
447 {
448 	char c = ' ', *to = command_line;
449 	int len = 0;
450 
451 	for (;;) {
452 		if (c == ' ') {
453 			extern struct early_params __early_begin, __early_end;
454 			struct early_params *p;
455 
456 			for (p = &__early_begin; p < &__early_end; p++) {
457 				int arglen = strlen(p->arg);
458 
459 				if (memcmp(from, p->arg, arglen) == 0) {
460 					if (to != command_line)
461 						to -= 1;
462 					from += arglen;
463 					p->fn(&from);
464 
465 					while (*from != ' ' && *from != '\0')
466 						from++;
467 					break;
468 				}
469 			}
470 		}
471 		c = *from++;
472 		if (!c)
473 			break;
474 		if (COMMAND_LINE_SIZE <= ++len)
475 			break;
476 		*to++ = c;
477 	}
478 	*to = '\0';
479 	*cmdline_p = command_line;
480 }
481 
482 static void __init
483 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
484 {
485 #ifdef CONFIG_BLK_DEV_RAM
486 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
487 
488 	rd_image_start = image_start;
489 	rd_prompt = prompt;
490 	rd_doload = doload;
491 
492 	if (rd_sz)
493 		rd_size = rd_sz;
494 #endif
495 }
496 
497 static void __init
498 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
499 {
500 	struct resource *res;
501 	int i;
502 
503 	kernel_code.start   = virt_to_phys(_text);
504 	kernel_code.end     = virt_to_phys(_etext - 1);
505 	kernel_data.start   = virt_to_phys(_data);
506 	kernel_data.end     = virt_to_phys(_end - 1);
507 
508 	for (i = 0; i < mi->nr_banks; i++) {
509 		if (mi->bank[i].size == 0)
510 			continue;
511 
512 		res = alloc_bootmem_low(sizeof(*res));
513 		res->name  = "System RAM";
514 		res->start = mi->bank[i].start;
515 		res->end   = mi->bank[i].start + mi->bank[i].size - 1;
516 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
517 
518 		request_resource(&iomem_resource, res);
519 
520 		if (kernel_code.start >= res->start &&
521 		    kernel_code.end <= res->end)
522 			request_resource(res, &kernel_code);
523 		if (kernel_data.start >= res->start &&
524 		    kernel_data.end <= res->end)
525 			request_resource(res, &kernel_data);
526 	}
527 
528 	if (mdesc->video_start) {
529 		video_ram.start = mdesc->video_start;
530 		video_ram.end   = mdesc->video_end;
531 		request_resource(&iomem_resource, &video_ram);
532 	}
533 
534 	/*
535 	 * Some machines don't have the possibility of ever
536 	 * possessing lp0, lp1 or lp2
537 	 */
538 	if (mdesc->reserve_lp0)
539 		request_resource(&ioport_resource, &lp0);
540 	if (mdesc->reserve_lp1)
541 		request_resource(&ioport_resource, &lp1);
542 	if (mdesc->reserve_lp2)
543 		request_resource(&ioport_resource, &lp2);
544 }
545 
546 /*
547  *  Tag parsing.
548  *
549  * This is the new way of passing data to the kernel at boot time.  Rather
550  * than passing a fixed inflexible structure to the kernel, we pass a list
551  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
552  * tag for the list to be recognised (to distinguish the tagged list from
553  * a param_struct).  The list is terminated with a zero-length tag (this tag
554  * is not parsed in any way).
555  */
556 static int __init parse_tag_core(const struct tag *tag)
557 {
558 	if (tag->hdr.size > 2) {
559 		if ((tag->u.core.flags & 1) == 0)
560 			root_mountflags &= ~MS_RDONLY;
561 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
562 	}
563 	return 0;
564 }
565 
566 __tagtable(ATAG_CORE, parse_tag_core);
567 
568 static int __init parse_tag_mem32(const struct tag *tag)
569 {
570 	return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
571 }
572 
573 __tagtable(ATAG_MEM, parse_tag_mem32);
574 
575 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
576 struct screen_info screen_info = {
577  .orig_video_lines	= 30,
578  .orig_video_cols	= 80,
579  .orig_video_mode	= 0,
580  .orig_video_ega_bx	= 0,
581  .orig_video_isVGA	= 1,
582  .orig_video_points	= 8
583 };
584 
585 static int __init parse_tag_videotext(const struct tag *tag)
586 {
587 	screen_info.orig_x            = tag->u.videotext.x;
588 	screen_info.orig_y            = tag->u.videotext.y;
589 	screen_info.orig_video_page   = tag->u.videotext.video_page;
590 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
591 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
592 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
593 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
594 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
595 	screen_info.orig_video_points = tag->u.videotext.video_points;
596 	return 0;
597 }
598 
599 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
600 #endif
601 
602 static int __init parse_tag_ramdisk(const struct tag *tag)
603 {
604 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
605 		      (tag->u.ramdisk.flags & 2) == 0,
606 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
607 	return 0;
608 }
609 
610 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
611 
612 static int __init parse_tag_serialnr(const struct tag *tag)
613 {
614 	system_serial_low = tag->u.serialnr.low;
615 	system_serial_high = tag->u.serialnr.high;
616 	return 0;
617 }
618 
619 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
620 
621 static int __init parse_tag_revision(const struct tag *tag)
622 {
623 	system_rev = tag->u.revision.rev;
624 	return 0;
625 }
626 
627 __tagtable(ATAG_REVISION, parse_tag_revision);
628 
629 static int __init parse_tag_cmdline(const struct tag *tag)
630 {
631 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
632 	return 0;
633 }
634 
635 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
636 
637 /*
638  * Scan the tag table for this tag, and call its parse function.
639  * The tag table is built by the linker from all the __tagtable
640  * declarations.
641  */
642 static int __init parse_tag(const struct tag *tag)
643 {
644 	extern struct tagtable __tagtable_begin, __tagtable_end;
645 	struct tagtable *t;
646 
647 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
648 		if (tag->hdr.tag == t->tag) {
649 			t->parse(tag);
650 			break;
651 		}
652 
653 	return t < &__tagtable_end;
654 }
655 
656 /*
657  * Parse all tags in the list, checking both the global and architecture
658  * specific tag tables.
659  */
660 static void __init parse_tags(const struct tag *t)
661 {
662 	for (; t->hdr.size; t = tag_next(t))
663 		if (!parse_tag(t))
664 			printk(KERN_WARNING
665 				"Ignoring unrecognised tag 0x%08x\n",
666 				t->hdr.tag);
667 }
668 
669 /*
670  * This holds our defaults.
671  */
672 static struct init_tags {
673 	struct tag_header hdr1;
674 	struct tag_core   core;
675 	struct tag_header hdr2;
676 	struct tag_mem32  mem;
677 	struct tag_header hdr3;
678 } init_tags __initdata = {
679 	{ tag_size(tag_core), ATAG_CORE },
680 	{ 1, PAGE_SIZE, 0xff },
681 	{ tag_size(tag_mem32), ATAG_MEM },
682 	{ MEM_SIZE, PHYS_OFFSET },
683 	{ 0, ATAG_NONE }
684 };
685 
686 static void (*init_machine)(void) __initdata;
687 
688 static int __init customize_machine(void)
689 {
690 	/* customizes platform devices, or adds new ones */
691 	if (init_machine)
692 		init_machine();
693 	return 0;
694 }
695 arch_initcall(customize_machine);
696 
697 void __init setup_arch(char **cmdline_p)
698 {
699 	struct tag *tags = (struct tag *)&init_tags;
700 	struct machine_desc *mdesc;
701 	char *from = default_command_line;
702 
703 	unwind_init();
704 
705 	setup_processor();
706 	mdesc = setup_machine(machine_arch_type);
707 	machine_name = mdesc->name;
708 
709 	if (mdesc->soft_reboot)
710 		reboot_setup("s");
711 
712 	if (__atags_pointer)
713 		tags = phys_to_virt(__atags_pointer);
714 	else if (mdesc->boot_params)
715 		tags = phys_to_virt(mdesc->boot_params);
716 
717 	/*
718 	 * If we have the old style parameters, convert them to
719 	 * a tag list.
720 	 */
721 	if (tags->hdr.tag != ATAG_CORE)
722 		convert_to_tag_list(tags);
723 	if (tags->hdr.tag != ATAG_CORE)
724 		tags = (struct tag *)&init_tags;
725 
726 	if (mdesc->fixup)
727 		mdesc->fixup(mdesc, tags, &from, &meminfo);
728 
729 	if (tags->hdr.tag == ATAG_CORE) {
730 		if (meminfo.nr_banks != 0)
731 			squash_mem_tags(tags);
732 		save_atags(tags);
733 		parse_tags(tags);
734 	}
735 
736 	init_mm.start_code = (unsigned long) _text;
737 	init_mm.end_code   = (unsigned long) _etext;
738 	init_mm.end_data   = (unsigned long) _edata;
739 	init_mm.brk	   = (unsigned long) _end;
740 
741 	memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
742 	boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
743 	parse_cmdline(cmdline_p, from);
744 	paging_init(mdesc);
745 	request_standard_resources(&meminfo, mdesc);
746 
747 #ifdef CONFIG_SMP
748 	smp_init_cpus();
749 #endif
750 
751 	cpu_init();
752 
753 	/*
754 	 * Set up various architecture-specific pointers
755 	 */
756 	init_arch_irq = mdesc->init_irq;
757 	system_timer = mdesc->timer;
758 	init_machine = mdesc->init_machine;
759 
760 #ifdef CONFIG_VT
761 #if defined(CONFIG_VGA_CONSOLE)
762 	conswitchp = &vga_con;
763 #elif defined(CONFIG_DUMMY_CONSOLE)
764 	conswitchp = &dummy_con;
765 #endif
766 #endif
767 	early_trap_init();
768 }
769 
770 
771 static int __init topology_init(void)
772 {
773 	int cpu;
774 
775 	for_each_possible_cpu(cpu) {
776 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
777 		cpuinfo->cpu.hotpluggable = 1;
778 		register_cpu(&cpuinfo->cpu, cpu);
779 	}
780 
781 	return 0;
782 }
783 
784 subsys_initcall(topology_init);
785 
786 static const char *hwcap_str[] = {
787 	"swp",
788 	"half",
789 	"thumb",
790 	"26bit",
791 	"fastmult",
792 	"fpa",
793 	"vfp",
794 	"edsp",
795 	"java",
796 	"iwmmxt",
797 	"crunch",
798 	"thumbee",
799 	"neon",
800 	"vfpv3",
801 	"vfpv3d16",
802 	NULL
803 };
804 
805 static int c_show(struct seq_file *m, void *v)
806 {
807 	int i;
808 
809 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
810 		   cpu_name, read_cpuid_id() & 15, elf_platform);
811 
812 #if defined(CONFIG_SMP)
813 	for_each_online_cpu(i) {
814 		/*
815 		 * glibc reads /proc/cpuinfo to determine the number of
816 		 * online processors, looking for lines beginning with
817 		 * "processor".  Give glibc what it expects.
818 		 */
819 		seq_printf(m, "processor\t: %d\n", i);
820 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
821 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
822 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
823 	}
824 #else /* CONFIG_SMP */
825 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
826 		   loops_per_jiffy / (500000/HZ),
827 		   (loops_per_jiffy / (5000/HZ)) % 100);
828 #endif
829 
830 	/* dump out the processor features */
831 	seq_puts(m, "Features\t: ");
832 
833 	for (i = 0; hwcap_str[i]; i++)
834 		if (elf_hwcap & (1 << i))
835 			seq_printf(m, "%s ", hwcap_str[i]);
836 
837 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
838 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
839 
840 	if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
841 		/* pre-ARM7 */
842 		seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
843 	} else {
844 		if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
845 			/* ARM7 */
846 			seq_printf(m, "CPU variant\t: 0x%02x\n",
847 				   (read_cpuid_id() >> 16) & 127);
848 		} else {
849 			/* post-ARM7 */
850 			seq_printf(m, "CPU variant\t: 0x%x\n",
851 				   (read_cpuid_id() >> 20) & 15);
852 		}
853 		seq_printf(m, "CPU part\t: 0x%03x\n",
854 			   (read_cpuid_id() >> 4) & 0xfff);
855 	}
856 	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
857 
858 	seq_puts(m, "\n");
859 
860 	seq_printf(m, "Hardware\t: %s\n", machine_name);
861 	seq_printf(m, "Revision\t: %04x\n", system_rev);
862 	seq_printf(m, "Serial\t\t: %08x%08x\n",
863 		   system_serial_high, system_serial_low);
864 
865 	return 0;
866 }
867 
868 static void *c_start(struct seq_file *m, loff_t *pos)
869 {
870 	return *pos < 1 ? (void *)1 : NULL;
871 }
872 
873 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
874 {
875 	++*pos;
876 	return NULL;
877 }
878 
879 static void c_stop(struct seq_file *m, void *v)
880 {
881 }
882 
883 const struct seq_operations cpuinfo_op = {
884 	.start	= c_start,
885 	.next	= c_next,
886 	.stop	= c_stop,
887 	.show	= c_show
888 };
889