xref: /openbmc/linux/arch/arm/kernel/setup.c (revision 384740dc)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/fs.h>
27 
28 #include <asm/cpu.h>
29 #include <asm/elf.h>
30 #include <asm/procinfo.h>
31 #include <asm/setup.h>
32 #include <asm/mach-types.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
35 
36 #include <asm/mach/arch.h>
37 #include <asm/mach/irq.h>
38 #include <asm/mach/time.h>
39 #include <asm/traps.h>
40 
41 #include "compat.h"
42 #include "atags.h"
43 
44 #ifndef MEM_SIZE
45 #define MEM_SIZE	(16*1024*1024)
46 #endif
47 
48 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
49 char fpe_type[8];
50 
51 static int __init fpe_setup(char *line)
52 {
53 	memcpy(fpe_type, line, 8);
54 	return 1;
55 }
56 
57 __setup("fpe=", fpe_setup);
58 #endif
59 
60 extern void paging_init(struct meminfo *, struct machine_desc *desc);
61 extern void reboot_setup(char *str);
62 extern int root_mountflags;
63 extern void _stext, _text, _etext, __data_start, _edata, _end;
64 
65 unsigned int processor_id;
66 EXPORT_SYMBOL(processor_id);
67 unsigned int __machine_arch_type;
68 EXPORT_SYMBOL(__machine_arch_type);
69 
70 unsigned int __atags_pointer __initdata;
71 
72 unsigned int system_rev;
73 EXPORT_SYMBOL(system_rev);
74 
75 unsigned int system_serial_low;
76 EXPORT_SYMBOL(system_serial_low);
77 
78 unsigned int system_serial_high;
79 EXPORT_SYMBOL(system_serial_high);
80 
81 unsigned int elf_hwcap;
82 EXPORT_SYMBOL(elf_hwcap);
83 
84 unsigned long __initdata vmalloc_reserve = 128 << 20;
85 
86 
87 #ifdef MULTI_CPU
88 struct processor processor;
89 #endif
90 #ifdef MULTI_TLB
91 struct cpu_tlb_fns cpu_tlb;
92 #endif
93 #ifdef MULTI_USER
94 struct cpu_user_fns cpu_user;
95 #endif
96 #ifdef MULTI_CACHE
97 struct cpu_cache_fns cpu_cache;
98 #endif
99 #ifdef CONFIG_OUTER_CACHE
100 struct outer_cache_fns outer_cache;
101 #endif
102 
103 struct stack {
104 	u32 irq[3];
105 	u32 abt[3];
106 	u32 und[3];
107 } ____cacheline_aligned;
108 
109 static struct stack stacks[NR_CPUS];
110 
111 char elf_platform[ELF_PLATFORM_SIZE];
112 EXPORT_SYMBOL(elf_platform);
113 
114 unsigned long phys_initrd_start __initdata = 0;
115 unsigned long phys_initrd_size __initdata = 0;
116 
117 static struct meminfo meminfo __initdata = { 0, };
118 static const char *cpu_name;
119 static const char *machine_name;
120 static char __initdata command_line[COMMAND_LINE_SIZE];
121 
122 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
123 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
124 #define ENDIANNESS ((char)endian_test.l)
125 
126 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
127 
128 /*
129  * Standard memory resources
130  */
131 static struct resource mem_res[] = {
132 	{
133 		.name = "Video RAM",
134 		.start = 0,
135 		.end = 0,
136 		.flags = IORESOURCE_MEM
137 	},
138 	{
139 		.name = "Kernel text",
140 		.start = 0,
141 		.end = 0,
142 		.flags = IORESOURCE_MEM
143 	},
144 	{
145 		.name = "Kernel data",
146 		.start = 0,
147 		.end = 0,
148 		.flags = IORESOURCE_MEM
149 	}
150 };
151 
152 #define video_ram   mem_res[0]
153 #define kernel_code mem_res[1]
154 #define kernel_data mem_res[2]
155 
156 static struct resource io_res[] = {
157 	{
158 		.name = "reserved",
159 		.start = 0x3bc,
160 		.end = 0x3be,
161 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
162 	},
163 	{
164 		.name = "reserved",
165 		.start = 0x378,
166 		.end = 0x37f,
167 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
168 	},
169 	{
170 		.name = "reserved",
171 		.start = 0x278,
172 		.end = 0x27f,
173 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
174 	}
175 };
176 
177 #define lp0 io_res[0]
178 #define lp1 io_res[1]
179 #define lp2 io_res[2]
180 
181 static const char *cache_types[16] = {
182 	"write-through",
183 	"write-back",
184 	"write-back",
185 	"undefined 3",
186 	"undefined 4",
187 	"undefined 5",
188 	"write-back",
189 	"write-back",
190 	"undefined 8",
191 	"undefined 9",
192 	"undefined 10",
193 	"undefined 11",
194 	"undefined 12",
195 	"undefined 13",
196 	"write-back",
197 	"undefined 15",
198 };
199 
200 static const char *cache_clean[16] = {
201 	"not required",
202 	"read-block",
203 	"cp15 c7 ops",
204 	"undefined 3",
205 	"undefined 4",
206 	"undefined 5",
207 	"cp15 c7 ops",
208 	"cp15 c7 ops",
209 	"undefined 8",
210 	"undefined 9",
211 	"undefined 10",
212 	"undefined 11",
213 	"undefined 12",
214 	"undefined 13",
215 	"cp15 c7 ops",
216 	"undefined 15",
217 };
218 
219 static const char *cache_lockdown[16] = {
220 	"not supported",
221 	"not supported",
222 	"not supported",
223 	"undefined 3",
224 	"undefined 4",
225 	"undefined 5",
226 	"format A",
227 	"format B",
228 	"undefined 8",
229 	"undefined 9",
230 	"undefined 10",
231 	"undefined 11",
232 	"undefined 12",
233 	"undefined 13",
234 	"format C",
235 	"undefined 15",
236 };
237 
238 static const char *proc_arch[] = {
239 	"undefined/unknown",
240 	"3",
241 	"4",
242 	"4T",
243 	"5",
244 	"5T",
245 	"5TE",
246 	"5TEJ",
247 	"6TEJ",
248 	"7",
249 	"?(11)",
250 	"?(12)",
251 	"?(13)",
252 	"?(14)",
253 	"?(15)",
254 	"?(16)",
255 	"?(17)",
256 };
257 
258 #define CACHE_TYPE(x)	(((x) >> 25) & 15)
259 #define CACHE_S(x)	((x) & (1 << 24))
260 #define CACHE_DSIZE(x)	(((x) >> 12) & 4095)	/* only if S=1 */
261 #define CACHE_ISIZE(x)	((x) & 4095)
262 
263 #define CACHE_SIZE(y)	(((y) >> 6) & 7)
264 #define CACHE_ASSOC(y)	(((y) >> 3) & 7)
265 #define CACHE_M(y)	((y) & (1 << 2))
266 #define CACHE_LINE(y)	((y) & 3)
267 
268 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
269 {
270 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
271 
272 	printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
273 		cpu, prefix,
274 		mult << (8 + CACHE_SIZE(cache)),
275 		(mult << CACHE_ASSOC(cache)) >> 1,
276 		8 << CACHE_LINE(cache),
277 		1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
278 			CACHE_LINE(cache)));
279 }
280 
281 static void __init dump_cpu_info(int cpu)
282 {
283 	unsigned int info = read_cpuid(CPUID_CACHETYPE);
284 
285 	if (info != processor_id) {
286 		printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
287 		       cache_types[CACHE_TYPE(info)]);
288 		if (CACHE_S(info)) {
289 			dump_cache("I cache", cpu, CACHE_ISIZE(info));
290 			dump_cache("D cache", cpu, CACHE_DSIZE(info));
291 		} else {
292 			dump_cache("cache", cpu, CACHE_ISIZE(info));
293 		}
294 	}
295 
296 	if (arch_is_coherent())
297 		printk("Cache coherency enabled\n");
298 }
299 
300 int cpu_architecture(void)
301 {
302 	int cpu_arch;
303 
304 	if ((processor_id & 0x0008f000) == 0) {
305 		cpu_arch = CPU_ARCH_UNKNOWN;
306 	} else if ((processor_id & 0x0008f000) == 0x00007000) {
307 		cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
308 	} else if ((processor_id & 0x00080000) == 0x00000000) {
309 		cpu_arch = (processor_id >> 16) & 7;
310 		if (cpu_arch)
311 			cpu_arch += CPU_ARCH_ARMv3;
312 	} else if ((processor_id & 0x000f0000) == 0x000f0000) {
313 		unsigned int mmfr0;
314 
315 		/* Revised CPUID format. Read the Memory Model Feature
316 		 * Register 0 and check for VMSAv7 or PMSAv7 */
317 		asm("mrc	p15, 0, %0, c0, c1, 4"
318 		    : "=r" (mmfr0));
319 		if ((mmfr0 & 0x0000000f) == 0x00000003 ||
320 		    (mmfr0 & 0x000000f0) == 0x00000030)
321 			cpu_arch = CPU_ARCH_ARMv7;
322 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
323 			 (mmfr0 & 0x000000f0) == 0x00000020)
324 			cpu_arch = CPU_ARCH_ARMv6;
325 		else
326 			cpu_arch = CPU_ARCH_UNKNOWN;
327 	} else
328 		cpu_arch = CPU_ARCH_UNKNOWN;
329 
330 	return cpu_arch;
331 }
332 
333 /*
334  * These functions re-use the assembly code in head.S, which
335  * already provide the required functionality.
336  */
337 extern struct proc_info_list *lookup_processor_type(unsigned int);
338 extern struct machine_desc *lookup_machine_type(unsigned int);
339 
340 static void __init setup_processor(void)
341 {
342 	struct proc_info_list *list;
343 
344 	/*
345 	 * locate processor in the list of supported processor
346 	 * types.  The linker builds this table for us from the
347 	 * entries in arch/arm/mm/proc-*.S
348 	 */
349 	list = lookup_processor_type(processor_id);
350 	if (!list) {
351 		printk("CPU configuration botched (ID %08x), unable "
352 		       "to continue.\n", processor_id);
353 		while (1);
354 	}
355 
356 	cpu_name = list->cpu_name;
357 
358 #ifdef MULTI_CPU
359 	processor = *list->proc;
360 #endif
361 #ifdef MULTI_TLB
362 	cpu_tlb = *list->tlb;
363 #endif
364 #ifdef MULTI_USER
365 	cpu_user = *list->user;
366 #endif
367 #ifdef MULTI_CACHE
368 	cpu_cache = *list->cache;
369 #endif
370 
371 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
372 	       cpu_name, processor_id, (int)processor_id & 15,
373 	       proc_arch[cpu_architecture()], cr_alignment);
374 
375 	sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
376 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
377 	elf_hwcap = list->elf_hwcap;
378 #ifndef CONFIG_ARM_THUMB
379 	elf_hwcap &= ~HWCAP_THUMB;
380 #endif
381 
382 	cpu_proc_init();
383 }
384 
385 /*
386  * cpu_init - initialise one CPU.
387  *
388  * cpu_init dumps the cache information, initialises SMP specific
389  * information, and sets up the per-CPU stacks.
390  */
391 void cpu_init(void)
392 {
393 	unsigned int cpu = smp_processor_id();
394 	struct stack *stk = &stacks[cpu];
395 
396 	if (cpu >= NR_CPUS) {
397 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
398 		BUG();
399 	}
400 
401 	if (system_state == SYSTEM_BOOTING)
402 		dump_cpu_info(cpu);
403 
404 	/*
405 	 * setup stacks for re-entrant exception handlers
406 	 */
407 	__asm__ (
408 	"msr	cpsr_c, %1\n\t"
409 	"add	sp, %0, %2\n\t"
410 	"msr	cpsr_c, %3\n\t"
411 	"add	sp, %0, %4\n\t"
412 	"msr	cpsr_c, %5\n\t"
413 	"add	sp, %0, %6\n\t"
414 	"msr	cpsr_c, %7"
415 	    :
416 	    : "r" (stk),
417 	      "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
418 	      "I" (offsetof(struct stack, irq[0])),
419 	      "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
420 	      "I" (offsetof(struct stack, abt[0])),
421 	      "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
422 	      "I" (offsetof(struct stack, und[0])),
423 	      "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
424 	    : "r14");
425 }
426 
427 static struct machine_desc * __init setup_machine(unsigned int nr)
428 {
429 	struct machine_desc *list;
430 
431 	/*
432 	 * locate machine in the list of supported machines.
433 	 */
434 	list = lookup_machine_type(nr);
435 	if (!list) {
436 		printk("Machine configuration botched (nr %d), unable "
437 		       "to continue.\n", nr);
438 		while (1);
439 	}
440 
441 	printk("Machine: %s\n", list->name);
442 
443 	return list;
444 }
445 
446 static void __init early_initrd(char **p)
447 {
448 	unsigned long start, size;
449 
450 	start = memparse(*p, p);
451 	if (**p == ',') {
452 		size = memparse((*p) + 1, p);
453 
454 		phys_initrd_start = start;
455 		phys_initrd_size = size;
456 	}
457 }
458 __early_param("initrd=", early_initrd);
459 
460 static void __init arm_add_memory(unsigned long start, unsigned long size)
461 {
462 	struct membank *bank;
463 
464 	/*
465 	 * Ensure that start/size are aligned to a page boundary.
466 	 * Size is appropriately rounded down, start is rounded up.
467 	 */
468 	size -= start & ~PAGE_MASK;
469 
470 	bank = &meminfo.bank[meminfo.nr_banks++];
471 
472 	bank->start = PAGE_ALIGN(start);
473 	bank->size  = size & PAGE_MASK;
474 	bank->node  = PHYS_TO_NID(start);
475 }
476 
477 /*
478  * Pick out the memory size.  We look for mem=size@start,
479  * where start and size are "size[KkMm]"
480  */
481 static void __init early_mem(char **p)
482 {
483 	static int usermem __initdata = 0;
484 	unsigned long size, start;
485 
486 	/*
487 	 * If the user specifies memory size, we
488 	 * blow away any automatically generated
489 	 * size.
490 	 */
491 	if (usermem == 0) {
492 		usermem = 1;
493 		meminfo.nr_banks = 0;
494 	}
495 
496 	start = PHYS_OFFSET;
497 	size  = memparse(*p, p);
498 	if (**p == '@')
499 		start = memparse(*p + 1, p);
500 
501 	arm_add_memory(start, size);
502 }
503 __early_param("mem=", early_mem);
504 
505 /*
506  * vmalloc=size forces the vmalloc area to be exactly 'size'
507  * bytes. This can be used to increase (or decrease) the vmalloc
508  * area - the default is 128m.
509  */
510 static void __init early_vmalloc(char **arg)
511 {
512 	vmalloc_reserve = memparse(*arg, arg);
513 }
514 __early_param("vmalloc=", early_vmalloc);
515 
516 /*
517  * Initial parsing of the command line.
518  */
519 static void __init parse_cmdline(char **cmdline_p, char *from)
520 {
521 	char c = ' ', *to = command_line;
522 	int len = 0;
523 
524 	for (;;) {
525 		if (c == ' ') {
526 			extern struct early_params __early_begin, __early_end;
527 			struct early_params *p;
528 
529 			for (p = &__early_begin; p < &__early_end; p++) {
530 				int len = strlen(p->arg);
531 
532 				if (memcmp(from, p->arg, len) == 0) {
533 					if (to != command_line)
534 						to -= 1;
535 					from += len;
536 					p->fn(&from);
537 
538 					while (*from != ' ' && *from != '\0')
539 						from++;
540 					break;
541 				}
542 			}
543 		}
544 		c = *from++;
545 		if (!c)
546 			break;
547 		if (COMMAND_LINE_SIZE <= ++len)
548 			break;
549 		*to++ = c;
550 	}
551 	*to = '\0';
552 	*cmdline_p = command_line;
553 }
554 
555 static void __init
556 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
557 {
558 #ifdef CONFIG_BLK_DEV_RAM
559 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
560 
561 	rd_image_start = image_start;
562 	rd_prompt = prompt;
563 	rd_doload = doload;
564 
565 	if (rd_sz)
566 		rd_size = rd_sz;
567 #endif
568 }
569 
570 static void __init
571 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
572 {
573 	struct resource *res;
574 	int i;
575 
576 	kernel_code.start   = virt_to_phys(&_text);
577 	kernel_code.end     = virt_to_phys(&_etext - 1);
578 	kernel_data.start   = virt_to_phys(&__data_start);
579 	kernel_data.end     = virt_to_phys(&_end - 1);
580 
581 	for (i = 0; i < mi->nr_banks; i++) {
582 		unsigned long virt_start, virt_end;
583 
584 		if (mi->bank[i].size == 0)
585 			continue;
586 
587 		virt_start = __phys_to_virt(mi->bank[i].start);
588 		virt_end   = virt_start + mi->bank[i].size - 1;
589 
590 		res = alloc_bootmem_low(sizeof(*res));
591 		res->name  = "System RAM";
592 		res->start = __virt_to_phys(virt_start);
593 		res->end   = __virt_to_phys(virt_end);
594 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
595 
596 		request_resource(&iomem_resource, res);
597 
598 		if (kernel_code.start >= res->start &&
599 		    kernel_code.end <= res->end)
600 			request_resource(res, &kernel_code);
601 		if (kernel_data.start >= res->start &&
602 		    kernel_data.end <= res->end)
603 			request_resource(res, &kernel_data);
604 	}
605 
606 	if (mdesc->video_start) {
607 		video_ram.start = mdesc->video_start;
608 		video_ram.end   = mdesc->video_end;
609 		request_resource(&iomem_resource, &video_ram);
610 	}
611 
612 	/*
613 	 * Some machines don't have the possibility of ever
614 	 * possessing lp0, lp1 or lp2
615 	 */
616 	if (mdesc->reserve_lp0)
617 		request_resource(&ioport_resource, &lp0);
618 	if (mdesc->reserve_lp1)
619 		request_resource(&ioport_resource, &lp1);
620 	if (mdesc->reserve_lp2)
621 		request_resource(&ioport_resource, &lp2);
622 }
623 
624 /*
625  *  Tag parsing.
626  *
627  * This is the new way of passing data to the kernel at boot time.  Rather
628  * than passing a fixed inflexible structure to the kernel, we pass a list
629  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
630  * tag for the list to be recognised (to distinguish the tagged list from
631  * a param_struct).  The list is terminated with a zero-length tag (this tag
632  * is not parsed in any way).
633  */
634 static int __init parse_tag_core(const struct tag *tag)
635 {
636 	if (tag->hdr.size > 2) {
637 		if ((tag->u.core.flags & 1) == 0)
638 			root_mountflags &= ~MS_RDONLY;
639 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
640 	}
641 	return 0;
642 }
643 
644 __tagtable(ATAG_CORE, parse_tag_core);
645 
646 static int __init parse_tag_mem32(const struct tag *tag)
647 {
648 	if (meminfo.nr_banks >= NR_BANKS) {
649 		printk(KERN_WARNING
650 		       "Ignoring memory bank 0x%08x size %dKB\n",
651 			tag->u.mem.start, tag->u.mem.size / 1024);
652 		return -EINVAL;
653 	}
654 	arm_add_memory(tag->u.mem.start, tag->u.mem.size);
655 	return 0;
656 }
657 
658 __tagtable(ATAG_MEM, parse_tag_mem32);
659 
660 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
661 struct screen_info screen_info = {
662  .orig_video_lines	= 30,
663  .orig_video_cols	= 80,
664  .orig_video_mode	= 0,
665  .orig_video_ega_bx	= 0,
666  .orig_video_isVGA	= 1,
667  .orig_video_points	= 8
668 };
669 
670 static int __init parse_tag_videotext(const struct tag *tag)
671 {
672 	screen_info.orig_x            = tag->u.videotext.x;
673 	screen_info.orig_y            = tag->u.videotext.y;
674 	screen_info.orig_video_page   = tag->u.videotext.video_page;
675 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
676 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
677 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
678 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
679 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
680 	screen_info.orig_video_points = tag->u.videotext.video_points;
681 	return 0;
682 }
683 
684 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
685 #endif
686 
687 static int __init parse_tag_ramdisk(const struct tag *tag)
688 {
689 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
690 		      (tag->u.ramdisk.flags & 2) == 0,
691 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
692 	return 0;
693 }
694 
695 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
696 
697 static int __init parse_tag_initrd(const struct tag *tag)
698 {
699 	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
700 		"please update your bootloader.\n");
701 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
702 	phys_initrd_size = tag->u.initrd.size;
703 	return 0;
704 }
705 
706 __tagtable(ATAG_INITRD, parse_tag_initrd);
707 
708 static int __init parse_tag_initrd2(const struct tag *tag)
709 {
710 	phys_initrd_start = tag->u.initrd.start;
711 	phys_initrd_size = tag->u.initrd.size;
712 	return 0;
713 }
714 
715 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
716 
717 static int __init parse_tag_serialnr(const struct tag *tag)
718 {
719 	system_serial_low = tag->u.serialnr.low;
720 	system_serial_high = tag->u.serialnr.high;
721 	return 0;
722 }
723 
724 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
725 
726 static int __init parse_tag_revision(const struct tag *tag)
727 {
728 	system_rev = tag->u.revision.rev;
729 	return 0;
730 }
731 
732 __tagtable(ATAG_REVISION, parse_tag_revision);
733 
734 static int __init parse_tag_cmdline(const struct tag *tag)
735 {
736 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
737 	return 0;
738 }
739 
740 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
741 
742 /*
743  * Scan the tag table for this tag, and call its parse function.
744  * The tag table is built by the linker from all the __tagtable
745  * declarations.
746  */
747 static int __init parse_tag(const struct tag *tag)
748 {
749 	extern struct tagtable __tagtable_begin, __tagtable_end;
750 	struct tagtable *t;
751 
752 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
753 		if (tag->hdr.tag == t->tag) {
754 			t->parse(tag);
755 			break;
756 		}
757 
758 	return t < &__tagtable_end;
759 }
760 
761 /*
762  * Parse all tags in the list, checking both the global and architecture
763  * specific tag tables.
764  */
765 static void __init parse_tags(const struct tag *t)
766 {
767 	for (; t->hdr.size; t = tag_next(t))
768 		if (!parse_tag(t))
769 			printk(KERN_WARNING
770 				"Ignoring unrecognised tag 0x%08x\n",
771 				t->hdr.tag);
772 }
773 
774 /*
775  * This holds our defaults.
776  */
777 static struct init_tags {
778 	struct tag_header hdr1;
779 	struct tag_core   core;
780 	struct tag_header hdr2;
781 	struct tag_mem32  mem;
782 	struct tag_header hdr3;
783 } init_tags __initdata = {
784 	{ tag_size(tag_core), ATAG_CORE },
785 	{ 1, PAGE_SIZE, 0xff },
786 	{ tag_size(tag_mem32), ATAG_MEM },
787 	{ MEM_SIZE, PHYS_OFFSET },
788 	{ 0, ATAG_NONE }
789 };
790 
791 static void (*init_machine)(void) __initdata;
792 
793 static int __init customize_machine(void)
794 {
795 	/* customizes platform devices, or adds new ones */
796 	if (init_machine)
797 		init_machine();
798 	return 0;
799 }
800 arch_initcall(customize_machine);
801 
802 void __init setup_arch(char **cmdline_p)
803 {
804 	struct tag *tags = (struct tag *)&init_tags;
805 	struct machine_desc *mdesc;
806 	char *from = default_command_line;
807 
808 	setup_processor();
809 	mdesc = setup_machine(machine_arch_type);
810 	machine_name = mdesc->name;
811 
812 	if (mdesc->soft_reboot)
813 		reboot_setup("s");
814 
815 	if (__atags_pointer)
816 		tags = phys_to_virt(__atags_pointer);
817 	else if (mdesc->boot_params)
818 		tags = phys_to_virt(mdesc->boot_params);
819 
820 	/*
821 	 * If we have the old style parameters, convert them to
822 	 * a tag list.
823 	 */
824 	if (tags->hdr.tag != ATAG_CORE)
825 		convert_to_tag_list(tags);
826 	if (tags->hdr.tag != ATAG_CORE)
827 		tags = (struct tag *)&init_tags;
828 
829 	if (mdesc->fixup)
830 		mdesc->fixup(mdesc, tags, &from, &meminfo);
831 
832 	if (tags->hdr.tag == ATAG_CORE) {
833 		if (meminfo.nr_banks != 0)
834 			squash_mem_tags(tags);
835 		save_atags(tags);
836 		parse_tags(tags);
837 	}
838 
839 	init_mm.start_code = (unsigned long) &_text;
840 	init_mm.end_code   = (unsigned long) &_etext;
841 	init_mm.end_data   = (unsigned long) &_edata;
842 	init_mm.brk	   = (unsigned long) &_end;
843 
844 	memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
845 	boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
846 	parse_cmdline(cmdline_p, from);
847 	paging_init(&meminfo, mdesc);
848 	request_standard_resources(&meminfo, mdesc);
849 
850 #ifdef CONFIG_SMP
851 	smp_init_cpus();
852 #endif
853 
854 	cpu_init();
855 
856 	/*
857 	 * Set up various architecture-specific pointers
858 	 */
859 	init_arch_irq = mdesc->init_irq;
860 	system_timer = mdesc->timer;
861 	init_machine = mdesc->init_machine;
862 
863 #ifdef CONFIG_VT
864 #if defined(CONFIG_VGA_CONSOLE)
865 	conswitchp = &vga_con;
866 #elif defined(CONFIG_DUMMY_CONSOLE)
867 	conswitchp = &dummy_con;
868 #endif
869 #endif
870 	early_trap_init();
871 }
872 
873 
874 static int __init topology_init(void)
875 {
876 	int cpu;
877 
878 	for_each_possible_cpu(cpu) {
879 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
880 		cpuinfo->cpu.hotpluggable = 1;
881 		register_cpu(&cpuinfo->cpu, cpu);
882 	}
883 
884 	return 0;
885 }
886 
887 subsys_initcall(topology_init);
888 
889 static const char *hwcap_str[] = {
890 	"swp",
891 	"half",
892 	"thumb",
893 	"26bit",
894 	"fastmult",
895 	"fpa",
896 	"vfp",
897 	"edsp",
898 	"java",
899 	"iwmmxt",
900 	"crunch",
901 	NULL
902 };
903 
904 static void
905 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
906 {
907 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
908 
909 	seq_printf(m, "%s size\t\t: %d\n"
910 		      "%s assoc\t\t: %d\n"
911 		      "%s line length\t: %d\n"
912 		      "%s sets\t\t: %d\n",
913 		type, mult << (8 + CACHE_SIZE(cache)),
914 		type, (mult << CACHE_ASSOC(cache)) >> 1,
915 		type, 8 << CACHE_LINE(cache),
916 		type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
917 			    CACHE_LINE(cache)));
918 }
919 
920 static int c_show(struct seq_file *m, void *v)
921 {
922 	int i;
923 
924 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
925 		   cpu_name, (int)processor_id & 15, elf_platform);
926 
927 #if defined(CONFIG_SMP)
928 	for_each_online_cpu(i) {
929 		/*
930 		 * glibc reads /proc/cpuinfo to determine the number of
931 		 * online processors, looking for lines beginning with
932 		 * "processor".  Give glibc what it expects.
933 		 */
934 		seq_printf(m, "processor\t: %d\n", i);
935 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
936 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
937 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
938 	}
939 #else /* CONFIG_SMP */
940 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
941 		   loops_per_jiffy / (500000/HZ),
942 		   (loops_per_jiffy / (5000/HZ)) % 100);
943 #endif
944 
945 	/* dump out the processor features */
946 	seq_puts(m, "Features\t: ");
947 
948 	for (i = 0; hwcap_str[i]; i++)
949 		if (elf_hwcap & (1 << i))
950 			seq_printf(m, "%s ", hwcap_str[i]);
951 
952 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
953 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
954 
955 	if ((processor_id & 0x0008f000) == 0x00000000) {
956 		/* pre-ARM7 */
957 		seq_printf(m, "CPU part\t: %07x\n", processor_id >> 4);
958 	} else {
959 		if ((processor_id & 0x0008f000) == 0x00007000) {
960 			/* ARM7 */
961 			seq_printf(m, "CPU variant\t: 0x%02x\n",
962 				   (processor_id >> 16) & 127);
963 		} else {
964 			/* post-ARM7 */
965 			seq_printf(m, "CPU variant\t: 0x%x\n",
966 				   (processor_id >> 20) & 15);
967 		}
968 		seq_printf(m, "CPU part\t: 0x%03x\n",
969 			   (processor_id >> 4) & 0xfff);
970 	}
971 	seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
972 
973 	{
974 		unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
975 		if (cache_info != processor_id) {
976 			seq_printf(m, "Cache type\t: %s\n"
977 				      "Cache clean\t: %s\n"
978 				      "Cache lockdown\t: %s\n"
979 				      "Cache format\t: %s\n",
980 				   cache_types[CACHE_TYPE(cache_info)],
981 				   cache_clean[CACHE_TYPE(cache_info)],
982 				   cache_lockdown[CACHE_TYPE(cache_info)],
983 				   CACHE_S(cache_info) ? "Harvard" : "Unified");
984 
985 			if (CACHE_S(cache_info)) {
986 				c_show_cache(m, "I", CACHE_ISIZE(cache_info));
987 				c_show_cache(m, "D", CACHE_DSIZE(cache_info));
988 			} else {
989 				c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
990 			}
991 		}
992 	}
993 
994 	seq_puts(m, "\n");
995 
996 	seq_printf(m, "Hardware\t: %s\n", machine_name);
997 	seq_printf(m, "Revision\t: %04x\n", system_rev);
998 	seq_printf(m, "Serial\t\t: %08x%08x\n",
999 		   system_serial_high, system_serial_low);
1000 
1001 	return 0;
1002 }
1003 
1004 static void *c_start(struct seq_file *m, loff_t *pos)
1005 {
1006 	return *pos < 1 ? (void *)1 : NULL;
1007 }
1008 
1009 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1010 {
1011 	++*pos;
1012 	return NULL;
1013 }
1014 
1015 static void c_stop(struct seq_file *m, void *v)
1016 {
1017 }
1018 
1019 const struct seq_operations cpuinfo_op = {
1020 	.start	= c_start,
1021 	.next	= c_next,
1022 	.stop	= c_stop,
1023 	.show	= c_show
1024 };
1025