xref: /openbmc/linux/arch/arm/kernel/setup.c (revision 643d1f7f)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/fs.h>
27 #include <linux/kexec.h>
28 
29 #include <asm/cpu.h>
30 #include <asm/elf.h>
31 #include <asm/procinfo.h>
32 #include <asm/setup.h>
33 #include <asm/mach-types.h>
34 #include <asm/cacheflush.h>
35 #include <asm/tlbflush.h>
36 
37 #include <asm/mach/arch.h>
38 #include <asm/mach/irq.h>
39 #include <asm/mach/time.h>
40 
41 #include "compat.h"
42 
43 #ifndef MEM_SIZE
44 #define MEM_SIZE	(16*1024*1024)
45 #endif
46 
47 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
48 char fpe_type[8];
49 
50 static int __init fpe_setup(char *line)
51 {
52 	memcpy(fpe_type, line, 8);
53 	return 1;
54 }
55 
56 __setup("fpe=", fpe_setup);
57 #endif
58 
59 extern void paging_init(struct meminfo *, struct machine_desc *desc);
60 extern void reboot_setup(char *str);
61 extern int root_mountflags;
62 extern void _stext, _text, _etext, __data_start, _edata, _end;
63 
64 unsigned int processor_id;
65 unsigned int __machine_arch_type;
66 EXPORT_SYMBOL(__machine_arch_type);
67 
68 unsigned int __atags_pointer __initdata;
69 
70 unsigned int system_rev;
71 EXPORT_SYMBOL(system_rev);
72 
73 unsigned int system_serial_low;
74 EXPORT_SYMBOL(system_serial_low);
75 
76 unsigned int system_serial_high;
77 EXPORT_SYMBOL(system_serial_high);
78 
79 unsigned int elf_hwcap;
80 EXPORT_SYMBOL(elf_hwcap);
81 
82 
83 #ifdef MULTI_CPU
84 struct processor processor;
85 #endif
86 #ifdef MULTI_TLB
87 struct cpu_tlb_fns cpu_tlb;
88 #endif
89 #ifdef MULTI_USER
90 struct cpu_user_fns cpu_user;
91 #endif
92 #ifdef MULTI_CACHE
93 struct cpu_cache_fns cpu_cache;
94 #endif
95 #ifdef CONFIG_OUTER_CACHE
96 struct outer_cache_fns outer_cache;
97 #endif
98 
99 struct stack {
100 	u32 irq[3];
101 	u32 abt[3];
102 	u32 und[3];
103 } ____cacheline_aligned;
104 
105 static struct stack stacks[NR_CPUS];
106 
107 char elf_platform[ELF_PLATFORM_SIZE];
108 EXPORT_SYMBOL(elf_platform);
109 
110 unsigned long phys_initrd_start __initdata = 0;
111 unsigned long phys_initrd_size __initdata = 0;
112 
113 static struct meminfo meminfo __initdata = { 0, };
114 static const char *cpu_name;
115 static const char *machine_name;
116 static char __initdata command_line[COMMAND_LINE_SIZE];
117 
118 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
119 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
120 #define ENDIANNESS ((char)endian_test.l)
121 
122 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
123 
124 /*
125  * Standard memory resources
126  */
127 static struct resource mem_res[] = {
128 	{
129 		.name = "Video RAM",
130 		.start = 0,
131 		.end = 0,
132 		.flags = IORESOURCE_MEM
133 	},
134 	{
135 		.name = "Kernel text",
136 		.start = 0,
137 		.end = 0,
138 		.flags = IORESOURCE_MEM
139 	},
140 	{
141 		.name = "Kernel data",
142 		.start = 0,
143 		.end = 0,
144 		.flags = IORESOURCE_MEM
145 	}
146 };
147 
148 #define video_ram   mem_res[0]
149 #define kernel_code mem_res[1]
150 #define kernel_data mem_res[2]
151 
152 static struct resource io_res[] = {
153 	{
154 		.name = "reserved",
155 		.start = 0x3bc,
156 		.end = 0x3be,
157 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
158 	},
159 	{
160 		.name = "reserved",
161 		.start = 0x378,
162 		.end = 0x37f,
163 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
164 	},
165 	{
166 		.name = "reserved",
167 		.start = 0x278,
168 		.end = 0x27f,
169 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
170 	}
171 };
172 
173 #define lp0 io_res[0]
174 #define lp1 io_res[1]
175 #define lp2 io_res[2]
176 
177 static const char *cache_types[16] = {
178 	"write-through",
179 	"write-back",
180 	"write-back",
181 	"undefined 3",
182 	"undefined 4",
183 	"undefined 5",
184 	"write-back",
185 	"write-back",
186 	"undefined 8",
187 	"undefined 9",
188 	"undefined 10",
189 	"undefined 11",
190 	"undefined 12",
191 	"undefined 13",
192 	"write-back",
193 	"undefined 15",
194 };
195 
196 static const char *cache_clean[16] = {
197 	"not required",
198 	"read-block",
199 	"cp15 c7 ops",
200 	"undefined 3",
201 	"undefined 4",
202 	"undefined 5",
203 	"cp15 c7 ops",
204 	"cp15 c7 ops",
205 	"undefined 8",
206 	"undefined 9",
207 	"undefined 10",
208 	"undefined 11",
209 	"undefined 12",
210 	"undefined 13",
211 	"cp15 c7 ops",
212 	"undefined 15",
213 };
214 
215 static const char *cache_lockdown[16] = {
216 	"not supported",
217 	"not supported",
218 	"not supported",
219 	"undefined 3",
220 	"undefined 4",
221 	"undefined 5",
222 	"format A",
223 	"format B",
224 	"undefined 8",
225 	"undefined 9",
226 	"undefined 10",
227 	"undefined 11",
228 	"undefined 12",
229 	"undefined 13",
230 	"format C",
231 	"undefined 15",
232 };
233 
234 static const char *proc_arch[] = {
235 	"undefined/unknown",
236 	"3",
237 	"4",
238 	"4T",
239 	"5",
240 	"5T",
241 	"5TE",
242 	"5TEJ",
243 	"6TEJ",
244 	"7",
245 	"?(11)",
246 	"?(12)",
247 	"?(13)",
248 	"?(14)",
249 	"?(15)",
250 	"?(16)",
251 	"?(17)",
252 };
253 
254 #define CACHE_TYPE(x)	(((x) >> 25) & 15)
255 #define CACHE_S(x)	((x) & (1 << 24))
256 #define CACHE_DSIZE(x)	(((x) >> 12) & 4095)	/* only if S=1 */
257 #define CACHE_ISIZE(x)	((x) & 4095)
258 
259 #define CACHE_SIZE(y)	(((y) >> 6) & 7)
260 #define CACHE_ASSOC(y)	(((y) >> 3) & 7)
261 #define CACHE_M(y)	((y) & (1 << 2))
262 #define CACHE_LINE(y)	((y) & 3)
263 
264 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
265 {
266 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
267 
268 	printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
269 		cpu, prefix,
270 		mult << (8 + CACHE_SIZE(cache)),
271 		(mult << CACHE_ASSOC(cache)) >> 1,
272 		8 << CACHE_LINE(cache),
273 		1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
274 			CACHE_LINE(cache)));
275 }
276 
277 static void __init dump_cpu_info(int cpu)
278 {
279 	unsigned int info = read_cpuid(CPUID_CACHETYPE);
280 
281 	if (info != processor_id) {
282 		printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
283 		       cache_types[CACHE_TYPE(info)]);
284 		if (CACHE_S(info)) {
285 			dump_cache("I cache", cpu, CACHE_ISIZE(info));
286 			dump_cache("D cache", cpu, CACHE_DSIZE(info));
287 		} else {
288 			dump_cache("cache", cpu, CACHE_ISIZE(info));
289 		}
290 	}
291 
292 	if (arch_is_coherent())
293 		printk("Cache coherency enabled\n");
294 }
295 
296 int cpu_architecture(void)
297 {
298 	int cpu_arch;
299 
300 	if ((processor_id & 0x0008f000) == 0) {
301 		cpu_arch = CPU_ARCH_UNKNOWN;
302 	} else if ((processor_id & 0x0008f000) == 0x00007000) {
303 		cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
304 	} else if ((processor_id & 0x00080000) == 0x00000000) {
305 		cpu_arch = (processor_id >> 16) & 7;
306 		if (cpu_arch)
307 			cpu_arch += CPU_ARCH_ARMv3;
308 	} else if ((processor_id & 0x000f0000) == 0x000f0000) {
309 		unsigned int mmfr0;
310 
311 		/* Revised CPUID format. Read the Memory Model Feature
312 		 * Register 0 and check for VMSAv7 or PMSAv7 */
313 		asm("mrc	p15, 0, %0, c0, c1, 4"
314 		    : "=r" (mmfr0));
315 		if ((mmfr0 & 0x0000000f) == 0x00000003 ||
316 		    (mmfr0 & 0x000000f0) == 0x00000030)
317 			cpu_arch = CPU_ARCH_ARMv7;
318 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
319 			 (mmfr0 & 0x000000f0) == 0x00000020)
320 			cpu_arch = CPU_ARCH_ARMv6;
321 		else
322 			cpu_arch = CPU_ARCH_UNKNOWN;
323 	} else
324 		cpu_arch = CPU_ARCH_UNKNOWN;
325 
326 	return cpu_arch;
327 }
328 
329 /*
330  * These functions re-use the assembly code in head.S, which
331  * already provide the required functionality.
332  */
333 extern struct proc_info_list *lookup_processor_type(unsigned int);
334 extern struct machine_desc *lookup_machine_type(unsigned int);
335 
336 static void __init setup_processor(void)
337 {
338 	struct proc_info_list *list;
339 
340 	/*
341 	 * locate processor in the list of supported processor
342 	 * types.  The linker builds this table for us from the
343 	 * entries in arch/arm/mm/proc-*.S
344 	 */
345 	list = lookup_processor_type(processor_id);
346 	if (!list) {
347 		printk("CPU configuration botched (ID %08x), unable "
348 		       "to continue.\n", processor_id);
349 		while (1);
350 	}
351 
352 	cpu_name = list->cpu_name;
353 
354 #ifdef MULTI_CPU
355 	processor = *list->proc;
356 #endif
357 #ifdef MULTI_TLB
358 	cpu_tlb = *list->tlb;
359 #endif
360 #ifdef MULTI_USER
361 	cpu_user = *list->user;
362 #endif
363 #ifdef MULTI_CACHE
364 	cpu_cache = *list->cache;
365 #endif
366 
367 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
368 	       cpu_name, processor_id, (int)processor_id & 15,
369 	       proc_arch[cpu_architecture()], cr_alignment);
370 
371 	sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
372 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
373 	elf_hwcap = list->elf_hwcap;
374 #ifndef CONFIG_ARM_THUMB
375 	elf_hwcap &= ~HWCAP_THUMB;
376 #endif
377 
378 	cpu_proc_init();
379 }
380 
381 /*
382  * cpu_init - initialise one CPU.
383  *
384  * cpu_init dumps the cache information, initialises SMP specific
385  * information, and sets up the per-CPU stacks.
386  */
387 void cpu_init(void)
388 {
389 	unsigned int cpu = smp_processor_id();
390 	struct stack *stk = &stacks[cpu];
391 
392 	if (cpu >= NR_CPUS) {
393 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
394 		BUG();
395 	}
396 
397 	if (system_state == SYSTEM_BOOTING)
398 		dump_cpu_info(cpu);
399 
400 	/*
401 	 * setup stacks for re-entrant exception handlers
402 	 */
403 	__asm__ (
404 	"msr	cpsr_c, %1\n\t"
405 	"add	sp, %0, %2\n\t"
406 	"msr	cpsr_c, %3\n\t"
407 	"add	sp, %0, %4\n\t"
408 	"msr	cpsr_c, %5\n\t"
409 	"add	sp, %0, %6\n\t"
410 	"msr	cpsr_c, %7"
411 	    :
412 	    : "r" (stk),
413 	      "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
414 	      "I" (offsetof(struct stack, irq[0])),
415 	      "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
416 	      "I" (offsetof(struct stack, abt[0])),
417 	      "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
418 	      "I" (offsetof(struct stack, und[0])),
419 	      "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
420 	    : "r14");
421 }
422 
423 static struct machine_desc * __init setup_machine(unsigned int nr)
424 {
425 	struct machine_desc *list;
426 
427 	/*
428 	 * locate machine in the list of supported machines.
429 	 */
430 	list = lookup_machine_type(nr);
431 	if (!list) {
432 		printk("Machine configuration botched (nr %d), unable "
433 		       "to continue.\n", nr);
434 		while (1);
435 	}
436 
437 	printk("Machine: %s\n", list->name);
438 
439 	return list;
440 }
441 
442 static void __init early_initrd(char **p)
443 {
444 	unsigned long start, size;
445 
446 	start = memparse(*p, p);
447 	if (**p == ',') {
448 		size = memparse((*p) + 1, p);
449 
450 		phys_initrd_start = start;
451 		phys_initrd_size = size;
452 	}
453 }
454 __early_param("initrd=", early_initrd);
455 
456 static void __init arm_add_memory(unsigned long start, unsigned long size)
457 {
458 	struct membank *bank;
459 
460 	/*
461 	 * Ensure that start/size are aligned to a page boundary.
462 	 * Size is appropriately rounded down, start is rounded up.
463 	 */
464 	size -= start & ~PAGE_MASK;
465 
466 	bank = &meminfo.bank[meminfo.nr_banks++];
467 
468 	bank->start = PAGE_ALIGN(start);
469 	bank->size  = size & PAGE_MASK;
470 	bank->node  = PHYS_TO_NID(start);
471 }
472 
473 /*
474  * Pick out the memory size.  We look for mem=size@start,
475  * where start and size are "size[KkMm]"
476  */
477 static void __init early_mem(char **p)
478 {
479 	static int usermem __initdata = 0;
480 	unsigned long size, start;
481 
482 	/*
483 	 * If the user specifies memory size, we
484 	 * blow away any automatically generated
485 	 * size.
486 	 */
487 	if (usermem == 0) {
488 		usermem = 1;
489 		meminfo.nr_banks = 0;
490 	}
491 
492 	start = PHYS_OFFSET;
493 	size  = memparse(*p, p);
494 	if (**p == '@')
495 		start = memparse(*p + 1, p);
496 
497 	arm_add_memory(start, size);
498 }
499 __early_param("mem=", early_mem);
500 
501 /*
502  * Initial parsing of the command line.
503  */
504 static void __init parse_cmdline(char **cmdline_p, char *from)
505 {
506 	char c = ' ', *to = command_line;
507 	int len = 0;
508 
509 	for (;;) {
510 		if (c == ' ') {
511 			extern struct early_params __early_begin, __early_end;
512 			struct early_params *p;
513 
514 			for (p = &__early_begin; p < &__early_end; p++) {
515 				int len = strlen(p->arg);
516 
517 				if (memcmp(from, p->arg, len) == 0) {
518 					if (to != command_line)
519 						to -= 1;
520 					from += len;
521 					p->fn(&from);
522 
523 					while (*from != ' ' && *from != '\0')
524 						from++;
525 					break;
526 				}
527 			}
528 		}
529 		c = *from++;
530 		if (!c)
531 			break;
532 		if (COMMAND_LINE_SIZE <= ++len)
533 			break;
534 		*to++ = c;
535 	}
536 	*to = '\0';
537 	*cmdline_p = command_line;
538 }
539 
540 static void __init
541 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
542 {
543 #ifdef CONFIG_BLK_DEV_RAM
544 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
545 
546 	rd_image_start = image_start;
547 	rd_prompt = prompt;
548 	rd_doload = doload;
549 
550 	if (rd_sz)
551 		rd_size = rd_sz;
552 #endif
553 }
554 
555 static void __init
556 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
557 {
558 	struct resource *res;
559 	int i;
560 
561 	kernel_code.start   = virt_to_phys(&_text);
562 	kernel_code.end     = virt_to_phys(&_etext - 1);
563 	kernel_data.start   = virt_to_phys(&__data_start);
564 	kernel_data.end     = virt_to_phys(&_end - 1);
565 
566 	for (i = 0; i < mi->nr_banks; i++) {
567 		unsigned long virt_start, virt_end;
568 
569 		if (mi->bank[i].size == 0)
570 			continue;
571 
572 		virt_start = __phys_to_virt(mi->bank[i].start);
573 		virt_end   = virt_start + mi->bank[i].size - 1;
574 
575 		res = alloc_bootmem_low(sizeof(*res));
576 		res->name  = "System RAM";
577 		res->start = __virt_to_phys(virt_start);
578 		res->end   = __virt_to_phys(virt_end);
579 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
580 
581 		request_resource(&iomem_resource, res);
582 
583 		if (kernel_code.start >= res->start &&
584 		    kernel_code.end <= res->end)
585 			request_resource(res, &kernel_code);
586 		if (kernel_data.start >= res->start &&
587 		    kernel_data.end <= res->end)
588 			request_resource(res, &kernel_data);
589 	}
590 
591 	if (mdesc->video_start) {
592 		video_ram.start = mdesc->video_start;
593 		video_ram.end   = mdesc->video_end;
594 		request_resource(&iomem_resource, &video_ram);
595 	}
596 
597 	/*
598 	 * Some machines don't have the possibility of ever
599 	 * possessing lp0, lp1 or lp2
600 	 */
601 	if (mdesc->reserve_lp0)
602 		request_resource(&ioport_resource, &lp0);
603 	if (mdesc->reserve_lp1)
604 		request_resource(&ioport_resource, &lp1);
605 	if (mdesc->reserve_lp2)
606 		request_resource(&ioport_resource, &lp2);
607 }
608 
609 /*
610  *  Tag parsing.
611  *
612  * This is the new way of passing data to the kernel at boot time.  Rather
613  * than passing a fixed inflexible structure to the kernel, we pass a list
614  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
615  * tag for the list to be recognised (to distinguish the tagged list from
616  * a param_struct).  The list is terminated with a zero-length tag (this tag
617  * is not parsed in any way).
618  */
619 static int __init parse_tag_core(const struct tag *tag)
620 {
621 	if (tag->hdr.size > 2) {
622 		if ((tag->u.core.flags & 1) == 0)
623 			root_mountflags &= ~MS_RDONLY;
624 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
625 	}
626 	return 0;
627 }
628 
629 __tagtable(ATAG_CORE, parse_tag_core);
630 
631 static int __init parse_tag_mem32(const struct tag *tag)
632 {
633 	if (meminfo.nr_banks >= NR_BANKS) {
634 		printk(KERN_WARNING
635 		       "Ignoring memory bank 0x%08x size %dKB\n",
636 			tag->u.mem.start, tag->u.mem.size / 1024);
637 		return -EINVAL;
638 	}
639 	arm_add_memory(tag->u.mem.start, tag->u.mem.size);
640 	return 0;
641 }
642 
643 __tagtable(ATAG_MEM, parse_tag_mem32);
644 
645 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
646 struct screen_info screen_info = {
647  .orig_video_lines	= 30,
648  .orig_video_cols	= 80,
649  .orig_video_mode	= 0,
650  .orig_video_ega_bx	= 0,
651  .orig_video_isVGA	= 1,
652  .orig_video_points	= 8
653 };
654 
655 static int __init parse_tag_videotext(const struct tag *tag)
656 {
657 	screen_info.orig_x            = tag->u.videotext.x;
658 	screen_info.orig_y            = tag->u.videotext.y;
659 	screen_info.orig_video_page   = tag->u.videotext.video_page;
660 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
661 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
662 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
663 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
664 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
665 	screen_info.orig_video_points = tag->u.videotext.video_points;
666 	return 0;
667 }
668 
669 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
670 #endif
671 
672 static int __init parse_tag_ramdisk(const struct tag *tag)
673 {
674 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
675 		      (tag->u.ramdisk.flags & 2) == 0,
676 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
677 	return 0;
678 }
679 
680 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
681 
682 static int __init parse_tag_initrd(const struct tag *tag)
683 {
684 	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
685 		"please update your bootloader.\n");
686 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
687 	phys_initrd_size = tag->u.initrd.size;
688 	return 0;
689 }
690 
691 __tagtable(ATAG_INITRD, parse_tag_initrd);
692 
693 static int __init parse_tag_initrd2(const struct tag *tag)
694 {
695 	phys_initrd_start = tag->u.initrd.start;
696 	phys_initrd_size = tag->u.initrd.size;
697 	return 0;
698 }
699 
700 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
701 
702 static int __init parse_tag_serialnr(const struct tag *tag)
703 {
704 	system_serial_low = tag->u.serialnr.low;
705 	system_serial_high = tag->u.serialnr.high;
706 	return 0;
707 }
708 
709 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
710 
711 static int __init parse_tag_revision(const struct tag *tag)
712 {
713 	system_rev = tag->u.revision.rev;
714 	return 0;
715 }
716 
717 __tagtable(ATAG_REVISION, parse_tag_revision);
718 
719 static int __init parse_tag_cmdline(const struct tag *tag)
720 {
721 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
722 	return 0;
723 }
724 
725 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
726 
727 /*
728  * Scan the tag table for this tag, and call its parse function.
729  * The tag table is built by the linker from all the __tagtable
730  * declarations.
731  */
732 static int __init parse_tag(const struct tag *tag)
733 {
734 	extern struct tagtable __tagtable_begin, __tagtable_end;
735 	struct tagtable *t;
736 
737 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
738 		if (tag->hdr.tag == t->tag) {
739 			t->parse(tag);
740 			break;
741 		}
742 
743 	return t < &__tagtable_end;
744 }
745 
746 /*
747  * Parse all tags in the list, checking both the global and architecture
748  * specific tag tables.
749  */
750 static void __init parse_tags(const struct tag *t)
751 {
752 	for (; t->hdr.size; t = tag_next(t))
753 		if (!parse_tag(t))
754 			printk(KERN_WARNING
755 				"Ignoring unrecognised tag 0x%08x\n",
756 				t->hdr.tag);
757 }
758 
759 /*
760  * This holds our defaults.
761  */
762 static struct init_tags {
763 	struct tag_header hdr1;
764 	struct tag_core   core;
765 	struct tag_header hdr2;
766 	struct tag_mem32  mem;
767 	struct tag_header hdr3;
768 } init_tags __initdata = {
769 	{ tag_size(tag_core), ATAG_CORE },
770 	{ 1, PAGE_SIZE, 0xff },
771 	{ tag_size(tag_mem32), ATAG_MEM },
772 	{ MEM_SIZE, PHYS_OFFSET },
773 	{ 0, ATAG_NONE }
774 };
775 
776 static void (*init_machine)(void) __initdata;
777 
778 static int __init customize_machine(void)
779 {
780 	/* customizes platform devices, or adds new ones */
781 	if (init_machine)
782 		init_machine();
783 	return 0;
784 }
785 arch_initcall(customize_machine);
786 
787 #ifdef CONFIG_KEXEC
788 
789 /* Physical addr of where the boot params should be for this machine */
790 extern unsigned long kexec_boot_params_address;
791 
792 /* Physical addr of the buffer into which the boot params are copied */
793 extern unsigned long kexec_boot_params_copy;
794 
795 /* Pointer to the boot params buffer, for manipulation and display */
796 unsigned long kexec_boot_params;
797 EXPORT_SYMBOL(kexec_boot_params);
798 
799 /* The buffer itself - make sure it is sized correctly */
800 static unsigned long kexec_boot_params_buf[(KEXEC_BOOT_PARAMS_SIZE + 3) / 4];
801 
802 #endif
803 
804 void __init setup_arch(char **cmdline_p)
805 {
806 	struct tag *tags = (struct tag *)&init_tags;
807 	struct machine_desc *mdesc;
808 	char *from = default_command_line;
809 
810 	setup_processor();
811 	mdesc = setup_machine(machine_arch_type);
812 	machine_name = mdesc->name;
813 
814 	if (mdesc->soft_reboot)
815 		reboot_setup("s");
816 
817 	if (__atags_pointer)
818 		tags = phys_to_virt(__atags_pointer);
819 	else if (mdesc->boot_params)
820 		tags = phys_to_virt(mdesc->boot_params);
821 
822 #ifdef CONFIG_KEXEC
823 	kexec_boot_params_copy = virt_to_phys(kexec_boot_params_buf);
824 	kexec_boot_params = (unsigned long)kexec_boot_params_buf;
825 	if (__atags_pointer) {
826 		kexec_boot_params_address = __atags_pointer;
827 		memcpy((void *)kexec_boot_params, tags, KEXEC_BOOT_PARAMS_SIZE);
828 	} else if (mdesc->boot_params) {
829 		kexec_boot_params_address = mdesc->boot_params;
830 		memcpy((void *)kexec_boot_params, tags, KEXEC_BOOT_PARAMS_SIZE);
831 	}
832 #endif
833 
834 	/*
835 	 * If we have the old style parameters, convert them to
836 	 * a tag list.
837 	 */
838 	if (tags->hdr.tag != ATAG_CORE)
839 		convert_to_tag_list(tags);
840 	if (tags->hdr.tag != ATAG_CORE)
841 		tags = (struct tag *)&init_tags;
842 
843 	if (mdesc->fixup)
844 		mdesc->fixup(mdesc, tags, &from, &meminfo);
845 
846 	if (tags->hdr.tag == ATAG_CORE) {
847 		if (meminfo.nr_banks != 0)
848 			squash_mem_tags(tags);
849 		parse_tags(tags);
850 	}
851 
852 	init_mm.start_code = (unsigned long) &_text;
853 	init_mm.end_code   = (unsigned long) &_etext;
854 	init_mm.end_data   = (unsigned long) &_edata;
855 	init_mm.brk	   = (unsigned long) &_end;
856 
857 	memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
858 	boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
859 	parse_cmdline(cmdline_p, from);
860 	paging_init(&meminfo, mdesc);
861 	request_standard_resources(&meminfo, mdesc);
862 
863 #ifdef CONFIG_SMP
864 	smp_init_cpus();
865 #endif
866 
867 	cpu_init();
868 
869 	/*
870 	 * Set up various architecture-specific pointers
871 	 */
872 	init_arch_irq = mdesc->init_irq;
873 	system_timer = mdesc->timer;
874 	init_machine = mdesc->init_machine;
875 
876 #ifdef CONFIG_VT
877 #if defined(CONFIG_VGA_CONSOLE)
878 	conswitchp = &vga_con;
879 #elif defined(CONFIG_DUMMY_CONSOLE)
880 	conswitchp = &dummy_con;
881 #endif
882 #endif
883 }
884 
885 
886 static int __init topology_init(void)
887 {
888 	int cpu;
889 
890 	for_each_possible_cpu(cpu) {
891 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
892 		cpuinfo->cpu.hotpluggable = 1;
893 		register_cpu(&cpuinfo->cpu, cpu);
894 	}
895 
896 	return 0;
897 }
898 
899 subsys_initcall(topology_init);
900 
901 static const char *hwcap_str[] = {
902 	"swp",
903 	"half",
904 	"thumb",
905 	"26bit",
906 	"fastmult",
907 	"fpa",
908 	"vfp",
909 	"edsp",
910 	"java",
911 	"iwmmxt",
912 	"crunch",
913 	NULL
914 };
915 
916 static void
917 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
918 {
919 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
920 
921 	seq_printf(m, "%s size\t\t: %d\n"
922 		      "%s assoc\t\t: %d\n"
923 		      "%s line length\t: %d\n"
924 		      "%s sets\t\t: %d\n",
925 		type, mult << (8 + CACHE_SIZE(cache)),
926 		type, (mult << CACHE_ASSOC(cache)) >> 1,
927 		type, 8 << CACHE_LINE(cache),
928 		type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
929 			    CACHE_LINE(cache)));
930 }
931 
932 static int c_show(struct seq_file *m, void *v)
933 {
934 	int i;
935 
936 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
937 		   cpu_name, (int)processor_id & 15, elf_platform);
938 
939 #if defined(CONFIG_SMP)
940 	for_each_online_cpu(i) {
941 		/*
942 		 * glibc reads /proc/cpuinfo to determine the number of
943 		 * online processors, looking for lines beginning with
944 		 * "processor".  Give glibc what it expects.
945 		 */
946 		seq_printf(m, "processor\t: %d\n", i);
947 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
948 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
949 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
950 	}
951 #else /* CONFIG_SMP */
952 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
953 		   loops_per_jiffy / (500000/HZ),
954 		   (loops_per_jiffy / (5000/HZ)) % 100);
955 #endif
956 
957 	/* dump out the processor features */
958 	seq_puts(m, "Features\t: ");
959 
960 	for (i = 0; hwcap_str[i]; i++)
961 		if (elf_hwcap & (1 << i))
962 			seq_printf(m, "%s ", hwcap_str[i]);
963 
964 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
965 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
966 
967 	if ((processor_id & 0x0008f000) == 0x00000000) {
968 		/* pre-ARM7 */
969 		seq_printf(m, "CPU part\t: %07x\n", processor_id >> 4);
970 	} else {
971 		if ((processor_id & 0x0008f000) == 0x00007000) {
972 			/* ARM7 */
973 			seq_printf(m, "CPU variant\t: 0x%02x\n",
974 				   (processor_id >> 16) & 127);
975 		} else {
976 			/* post-ARM7 */
977 			seq_printf(m, "CPU variant\t: 0x%x\n",
978 				   (processor_id >> 20) & 15);
979 		}
980 		seq_printf(m, "CPU part\t: 0x%03x\n",
981 			   (processor_id >> 4) & 0xfff);
982 	}
983 	seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
984 
985 	{
986 		unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
987 		if (cache_info != processor_id) {
988 			seq_printf(m, "Cache type\t: %s\n"
989 				      "Cache clean\t: %s\n"
990 				      "Cache lockdown\t: %s\n"
991 				      "Cache format\t: %s\n",
992 				   cache_types[CACHE_TYPE(cache_info)],
993 				   cache_clean[CACHE_TYPE(cache_info)],
994 				   cache_lockdown[CACHE_TYPE(cache_info)],
995 				   CACHE_S(cache_info) ? "Harvard" : "Unified");
996 
997 			if (CACHE_S(cache_info)) {
998 				c_show_cache(m, "I", CACHE_ISIZE(cache_info));
999 				c_show_cache(m, "D", CACHE_DSIZE(cache_info));
1000 			} else {
1001 				c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
1002 			}
1003 		}
1004 	}
1005 
1006 	seq_puts(m, "\n");
1007 
1008 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1009 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1010 	seq_printf(m, "Serial\t\t: %08x%08x\n",
1011 		   system_serial_high, system_serial_low);
1012 
1013 	return 0;
1014 }
1015 
1016 static void *c_start(struct seq_file *m, loff_t *pos)
1017 {
1018 	return *pos < 1 ? (void *)1 : NULL;
1019 }
1020 
1021 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1022 {
1023 	++*pos;
1024 	return NULL;
1025 }
1026 
1027 static void c_stop(struct seq_file *m, void *v)
1028 {
1029 }
1030 
1031 struct seq_operations cpuinfo_op = {
1032 	.start	= c_start,
1033 	.next	= c_next,
1034 	.stop	= c_stop,
1035 	.show	= c_show
1036 };
1037