xref: /openbmc/linux/arch/arm/kernel/setup.c (revision 87c2ce3b)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/config.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/tty.h>
22 #include <linux/init.h>
23 #include <linux/root_dev.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 
27 #include <asm/cpu.h>
28 #include <asm/elf.h>
29 #include <asm/procinfo.h>
30 #include <asm/setup.h>
31 #include <asm/mach-types.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
34 
35 #include <asm/mach/arch.h>
36 #include <asm/mach/irq.h>
37 #include <asm/mach/time.h>
38 
39 #ifndef MEM_SIZE
40 #define MEM_SIZE	(16*1024*1024)
41 #endif
42 
43 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
44 char fpe_type[8];
45 
46 static int __init fpe_setup(char *line)
47 {
48 	memcpy(fpe_type, line, 8);
49 	return 1;
50 }
51 
52 __setup("fpe=", fpe_setup);
53 #endif
54 
55 extern unsigned int mem_fclk_21285;
56 extern void paging_init(struct meminfo *, struct machine_desc *desc);
57 extern void convert_to_tag_list(struct tag *tags);
58 extern void squash_mem_tags(struct tag *tag);
59 extern void reboot_setup(char *str);
60 extern int root_mountflags;
61 extern void _stext, _text, _etext, __data_start, _edata, _end;
62 
63 unsigned int processor_id;
64 unsigned int __machine_arch_type;
65 EXPORT_SYMBOL(__machine_arch_type);
66 
67 unsigned int system_rev;
68 EXPORT_SYMBOL(system_rev);
69 
70 unsigned int system_serial_low;
71 EXPORT_SYMBOL(system_serial_low);
72 
73 unsigned int system_serial_high;
74 EXPORT_SYMBOL(system_serial_high);
75 
76 unsigned int elf_hwcap;
77 EXPORT_SYMBOL(elf_hwcap);
78 
79 
80 #ifdef MULTI_CPU
81 struct processor processor;
82 #endif
83 #ifdef MULTI_TLB
84 struct cpu_tlb_fns cpu_tlb;
85 #endif
86 #ifdef MULTI_USER
87 struct cpu_user_fns cpu_user;
88 #endif
89 #ifdef MULTI_CACHE
90 struct cpu_cache_fns cpu_cache;
91 #endif
92 
93 struct stack {
94 	u32 irq[3];
95 	u32 abt[3];
96 	u32 und[3];
97 } ____cacheline_aligned;
98 
99 static struct stack stacks[NR_CPUS];
100 
101 char elf_platform[ELF_PLATFORM_SIZE];
102 EXPORT_SYMBOL(elf_platform);
103 
104 unsigned long phys_initrd_start __initdata = 0;
105 unsigned long phys_initrd_size __initdata = 0;
106 
107 static struct meminfo meminfo __initdata = { 0, };
108 static const char *cpu_name;
109 static const char *machine_name;
110 static char command_line[COMMAND_LINE_SIZE];
111 
112 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
113 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
114 #define ENDIANNESS ((char)endian_test.l)
115 
116 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
117 
118 /*
119  * Standard memory resources
120  */
121 static struct resource mem_res[] = {
122 	{ "Video RAM",   0,     0,     IORESOURCE_MEM			},
123 	{ "Kernel text", 0,     0,     IORESOURCE_MEM			},
124 	{ "Kernel data", 0,     0,     IORESOURCE_MEM			}
125 };
126 
127 #define video_ram   mem_res[0]
128 #define kernel_code mem_res[1]
129 #define kernel_data mem_res[2]
130 
131 static struct resource io_res[] = {
132 	{ "reserved",    0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
133 	{ "reserved",    0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
134 	{ "reserved",    0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
135 };
136 
137 #define lp0 io_res[0]
138 #define lp1 io_res[1]
139 #define lp2 io_res[2]
140 
141 static const char *cache_types[16] = {
142 	"write-through",
143 	"write-back",
144 	"write-back",
145 	"undefined 3",
146 	"undefined 4",
147 	"undefined 5",
148 	"write-back",
149 	"write-back",
150 	"undefined 8",
151 	"undefined 9",
152 	"undefined 10",
153 	"undefined 11",
154 	"undefined 12",
155 	"undefined 13",
156 	"write-back",
157 	"undefined 15",
158 };
159 
160 static const char *cache_clean[16] = {
161 	"not required",
162 	"read-block",
163 	"cp15 c7 ops",
164 	"undefined 3",
165 	"undefined 4",
166 	"undefined 5",
167 	"cp15 c7 ops",
168 	"cp15 c7 ops",
169 	"undefined 8",
170 	"undefined 9",
171 	"undefined 10",
172 	"undefined 11",
173 	"undefined 12",
174 	"undefined 13",
175 	"cp15 c7 ops",
176 	"undefined 15",
177 };
178 
179 static const char *cache_lockdown[16] = {
180 	"not supported",
181 	"not supported",
182 	"not supported",
183 	"undefined 3",
184 	"undefined 4",
185 	"undefined 5",
186 	"format A",
187 	"format B",
188 	"undefined 8",
189 	"undefined 9",
190 	"undefined 10",
191 	"undefined 11",
192 	"undefined 12",
193 	"undefined 13",
194 	"format C",
195 	"undefined 15",
196 };
197 
198 static const char *proc_arch[] = {
199 	"undefined/unknown",
200 	"3",
201 	"4",
202 	"4T",
203 	"5",
204 	"5T",
205 	"5TE",
206 	"5TEJ",
207 	"6TEJ",
208 	"?(10)",
209 	"?(11)",
210 	"?(12)",
211 	"?(13)",
212 	"?(14)",
213 	"?(15)",
214 	"?(16)",
215 	"?(17)",
216 };
217 
218 #define CACHE_TYPE(x)	(((x) >> 25) & 15)
219 #define CACHE_S(x)	((x) & (1 << 24))
220 #define CACHE_DSIZE(x)	(((x) >> 12) & 4095)	/* only if S=1 */
221 #define CACHE_ISIZE(x)	((x) & 4095)
222 
223 #define CACHE_SIZE(y)	(((y) >> 6) & 7)
224 #define CACHE_ASSOC(y)	(((y) >> 3) & 7)
225 #define CACHE_M(y)	((y) & (1 << 2))
226 #define CACHE_LINE(y)	((y) & 3)
227 
228 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
229 {
230 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
231 
232 	printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
233 		cpu, prefix,
234 		mult << (8 + CACHE_SIZE(cache)),
235 		(mult << CACHE_ASSOC(cache)) >> 1,
236 		8 << CACHE_LINE(cache),
237 		1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
238 			CACHE_LINE(cache)));
239 }
240 
241 static void __init dump_cpu_info(int cpu)
242 {
243 	unsigned int info = read_cpuid(CPUID_CACHETYPE);
244 
245 	if (info != processor_id) {
246 		printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
247 		       cache_types[CACHE_TYPE(info)]);
248 		if (CACHE_S(info)) {
249 			dump_cache("I cache", cpu, CACHE_ISIZE(info));
250 			dump_cache("D cache", cpu, CACHE_DSIZE(info));
251 		} else {
252 			dump_cache("cache", cpu, CACHE_ISIZE(info));
253 		}
254 	}
255 }
256 
257 int cpu_architecture(void)
258 {
259 	int cpu_arch;
260 
261 	if ((processor_id & 0x0000f000) == 0) {
262 		cpu_arch = CPU_ARCH_UNKNOWN;
263 	} else if ((processor_id & 0x0000f000) == 0x00007000) {
264 		cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
265 	} else {
266 		cpu_arch = (processor_id >> 16) & 7;
267 		if (cpu_arch)
268 			cpu_arch += CPU_ARCH_ARMv3;
269 	}
270 
271 	return cpu_arch;
272 }
273 
274 /*
275  * These functions re-use the assembly code in head.S, which
276  * already provide the required functionality.
277  */
278 extern struct proc_info_list *lookup_processor_type(void);
279 extern struct machine_desc *lookup_machine_type(unsigned int);
280 
281 static void __init setup_processor(void)
282 {
283 	struct proc_info_list *list;
284 
285 	/*
286 	 * locate processor in the list of supported processor
287 	 * types.  The linker builds this table for us from the
288 	 * entries in arch/arm/mm/proc-*.S
289 	 */
290 	list = lookup_processor_type();
291 	if (!list) {
292 		printk("CPU configuration botched (ID %08x), unable "
293 		       "to continue.\n", processor_id);
294 		while (1);
295 	}
296 
297 	cpu_name = list->cpu_name;
298 
299 #ifdef MULTI_CPU
300 	processor = *list->proc;
301 #endif
302 #ifdef MULTI_TLB
303 	cpu_tlb = *list->tlb;
304 #endif
305 #ifdef MULTI_USER
306 	cpu_user = *list->user;
307 #endif
308 #ifdef MULTI_CACHE
309 	cpu_cache = *list->cache;
310 #endif
311 
312 	printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
313 	       cpu_name, processor_id, (int)processor_id & 15,
314 	       proc_arch[cpu_architecture()]);
315 
316 	sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
317 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
318 	elf_hwcap = list->elf_hwcap;
319 
320 	cpu_proc_init();
321 }
322 
323 /*
324  * cpu_init - initialise one CPU.
325  *
326  * cpu_init dumps the cache information, initialises SMP specific
327  * information, and sets up the per-CPU stacks.
328  */
329 void cpu_init(void)
330 {
331 	unsigned int cpu = smp_processor_id();
332 	struct stack *stk = &stacks[cpu];
333 
334 	if (cpu >= NR_CPUS) {
335 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
336 		BUG();
337 	}
338 
339 	if (system_state == SYSTEM_BOOTING)
340 		dump_cpu_info(cpu);
341 
342 	/*
343 	 * setup stacks for re-entrant exception handlers
344 	 */
345 	__asm__ (
346 	"msr	cpsr_c, %1\n\t"
347 	"add	sp, %0, %2\n\t"
348 	"msr	cpsr_c, %3\n\t"
349 	"add	sp, %0, %4\n\t"
350 	"msr	cpsr_c, %5\n\t"
351 	"add	sp, %0, %6\n\t"
352 	"msr	cpsr_c, %7"
353 	    :
354 	    : "r" (stk),
355 	      "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
356 	      "I" (offsetof(struct stack, irq[0])),
357 	      "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
358 	      "I" (offsetof(struct stack, abt[0])),
359 	      "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
360 	      "I" (offsetof(struct stack, und[0])),
361 	      "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
362 	    : "r14");
363 }
364 
365 static struct machine_desc * __init setup_machine(unsigned int nr)
366 {
367 	struct machine_desc *list;
368 
369 	/*
370 	 * locate machine in the list of supported machines.
371 	 */
372 	list = lookup_machine_type(nr);
373 	if (!list) {
374 		printk("Machine configuration botched (nr %d), unable "
375 		       "to continue.\n", nr);
376 		while (1);
377 	}
378 
379 	printk("Machine: %s\n", list->name);
380 
381 	return list;
382 }
383 
384 static void __init early_initrd(char **p)
385 {
386 	unsigned long start, size;
387 
388 	start = memparse(*p, p);
389 	if (**p == ',') {
390 		size = memparse((*p) + 1, p);
391 
392 		phys_initrd_start = start;
393 		phys_initrd_size = size;
394 	}
395 }
396 __early_param("initrd=", early_initrd);
397 
398 static void __init add_memory(unsigned long start, unsigned long size)
399 {
400 	/*
401 	 * Ensure that start/size are aligned to a page boundary.
402 	 * Size is appropriately rounded down, start is rounded up.
403 	 */
404 	size -= start & ~PAGE_MASK;
405 
406 	meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start);
407 	meminfo.bank[meminfo.nr_banks].size  = size & PAGE_MASK;
408 	meminfo.bank[meminfo.nr_banks].node  = PHYS_TO_NID(start);
409 	meminfo.nr_banks += 1;
410 }
411 
412 /*
413  * Pick out the memory size.  We look for mem=size@start,
414  * where start and size are "size[KkMm]"
415  */
416 static void __init early_mem(char **p)
417 {
418 	static int usermem __initdata = 0;
419 	unsigned long size, start;
420 
421 	/*
422 	 * If the user specifies memory size, we
423 	 * blow away any automatically generated
424 	 * size.
425 	 */
426 	if (usermem == 0) {
427 		usermem = 1;
428 		meminfo.nr_banks = 0;
429 	}
430 
431 	start = PHYS_OFFSET;
432 	size  = memparse(*p, p);
433 	if (**p == '@')
434 		start = memparse(*p + 1, p);
435 
436 	add_memory(start, size);
437 }
438 __early_param("mem=", early_mem);
439 
440 /*
441  * Initial parsing of the command line.
442  */
443 static void __init parse_cmdline(char **cmdline_p, char *from)
444 {
445 	char c = ' ', *to = command_line;
446 	int len = 0;
447 
448 	for (;;) {
449 		if (c == ' ') {
450 			extern struct early_params __early_begin, __early_end;
451 			struct early_params *p;
452 
453 			for (p = &__early_begin; p < &__early_end; p++) {
454 				int len = strlen(p->arg);
455 
456 				if (memcmp(from, p->arg, len) == 0) {
457 					if (to != command_line)
458 						to -= 1;
459 					from += len;
460 					p->fn(&from);
461 
462 					while (*from != ' ' && *from != '\0')
463 						from++;
464 					break;
465 				}
466 			}
467 		}
468 		c = *from++;
469 		if (!c)
470 			break;
471 		if (COMMAND_LINE_SIZE <= ++len)
472 			break;
473 		*to++ = c;
474 	}
475 	*to = '\0';
476 	*cmdline_p = command_line;
477 }
478 
479 static void __init
480 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
481 {
482 #ifdef CONFIG_BLK_DEV_RAM
483 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
484 
485 	rd_image_start = image_start;
486 	rd_prompt = prompt;
487 	rd_doload = doload;
488 
489 	if (rd_sz)
490 		rd_size = rd_sz;
491 #endif
492 }
493 
494 static void __init
495 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
496 {
497 	struct resource *res;
498 	int i;
499 
500 	kernel_code.start   = virt_to_phys(&_text);
501 	kernel_code.end     = virt_to_phys(&_etext - 1);
502 	kernel_data.start   = virt_to_phys(&__data_start);
503 	kernel_data.end     = virt_to_phys(&_end - 1);
504 
505 	for (i = 0; i < mi->nr_banks; i++) {
506 		unsigned long virt_start, virt_end;
507 
508 		if (mi->bank[i].size == 0)
509 			continue;
510 
511 		virt_start = __phys_to_virt(mi->bank[i].start);
512 		virt_end   = virt_start + mi->bank[i].size - 1;
513 
514 		res = alloc_bootmem_low(sizeof(*res));
515 		res->name  = "System RAM";
516 		res->start = __virt_to_phys(virt_start);
517 		res->end   = __virt_to_phys(virt_end);
518 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
519 
520 		request_resource(&iomem_resource, res);
521 
522 		if (kernel_code.start >= res->start &&
523 		    kernel_code.end <= res->end)
524 			request_resource(res, &kernel_code);
525 		if (kernel_data.start >= res->start &&
526 		    kernel_data.end <= res->end)
527 			request_resource(res, &kernel_data);
528 	}
529 
530 	if (mdesc->video_start) {
531 		video_ram.start = mdesc->video_start;
532 		video_ram.end   = mdesc->video_end;
533 		request_resource(&iomem_resource, &video_ram);
534 	}
535 
536 	/*
537 	 * Some machines don't have the possibility of ever
538 	 * possessing lp0, lp1 or lp2
539 	 */
540 	if (mdesc->reserve_lp0)
541 		request_resource(&ioport_resource, &lp0);
542 	if (mdesc->reserve_lp1)
543 		request_resource(&ioport_resource, &lp1);
544 	if (mdesc->reserve_lp2)
545 		request_resource(&ioport_resource, &lp2);
546 }
547 
548 /*
549  *  Tag parsing.
550  *
551  * This is the new way of passing data to the kernel at boot time.  Rather
552  * than passing a fixed inflexible structure to the kernel, we pass a list
553  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
554  * tag for the list to be recognised (to distinguish the tagged list from
555  * a param_struct).  The list is terminated with a zero-length tag (this tag
556  * is not parsed in any way).
557  */
558 static int __init parse_tag_core(const struct tag *tag)
559 {
560 	if (tag->hdr.size > 2) {
561 		if ((tag->u.core.flags & 1) == 0)
562 			root_mountflags &= ~MS_RDONLY;
563 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
564 	}
565 	return 0;
566 }
567 
568 __tagtable(ATAG_CORE, parse_tag_core);
569 
570 static int __init parse_tag_mem32(const struct tag *tag)
571 {
572 	if (meminfo.nr_banks >= NR_BANKS) {
573 		printk(KERN_WARNING
574 		       "Ignoring memory bank 0x%08x size %dKB\n",
575 			tag->u.mem.start, tag->u.mem.size / 1024);
576 		return -EINVAL;
577 	}
578 	add_memory(tag->u.mem.start, tag->u.mem.size);
579 	return 0;
580 }
581 
582 __tagtable(ATAG_MEM, parse_tag_mem32);
583 
584 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
585 struct screen_info screen_info = {
586  .orig_video_lines	= 30,
587  .orig_video_cols	= 80,
588  .orig_video_mode	= 0,
589  .orig_video_ega_bx	= 0,
590  .orig_video_isVGA	= 1,
591  .orig_video_points	= 8
592 };
593 
594 static int __init parse_tag_videotext(const struct tag *tag)
595 {
596 	screen_info.orig_x            = tag->u.videotext.x;
597 	screen_info.orig_y            = tag->u.videotext.y;
598 	screen_info.orig_video_page   = tag->u.videotext.video_page;
599 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
600 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
601 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
602 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
603 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
604 	screen_info.orig_video_points = tag->u.videotext.video_points;
605 	return 0;
606 }
607 
608 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
609 #endif
610 
611 static int __init parse_tag_ramdisk(const struct tag *tag)
612 {
613 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
614 		      (tag->u.ramdisk.flags & 2) == 0,
615 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
616 	return 0;
617 }
618 
619 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
620 
621 static int __init parse_tag_initrd(const struct tag *tag)
622 {
623 	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
624 		"please update your bootloader.\n");
625 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
626 	phys_initrd_size = tag->u.initrd.size;
627 	return 0;
628 }
629 
630 __tagtable(ATAG_INITRD, parse_tag_initrd);
631 
632 static int __init parse_tag_initrd2(const struct tag *tag)
633 {
634 	phys_initrd_start = tag->u.initrd.start;
635 	phys_initrd_size = tag->u.initrd.size;
636 	return 0;
637 }
638 
639 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
640 
641 static int __init parse_tag_serialnr(const struct tag *tag)
642 {
643 	system_serial_low = tag->u.serialnr.low;
644 	system_serial_high = tag->u.serialnr.high;
645 	return 0;
646 }
647 
648 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
649 
650 static int __init parse_tag_revision(const struct tag *tag)
651 {
652 	system_rev = tag->u.revision.rev;
653 	return 0;
654 }
655 
656 __tagtable(ATAG_REVISION, parse_tag_revision);
657 
658 static int __init parse_tag_cmdline(const struct tag *tag)
659 {
660 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
661 	return 0;
662 }
663 
664 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
665 
666 /*
667  * Scan the tag table for this tag, and call its parse function.
668  * The tag table is built by the linker from all the __tagtable
669  * declarations.
670  */
671 static int __init parse_tag(const struct tag *tag)
672 {
673 	extern struct tagtable __tagtable_begin, __tagtable_end;
674 	struct tagtable *t;
675 
676 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
677 		if (tag->hdr.tag == t->tag) {
678 			t->parse(tag);
679 			break;
680 		}
681 
682 	return t < &__tagtable_end;
683 }
684 
685 /*
686  * Parse all tags in the list, checking both the global and architecture
687  * specific tag tables.
688  */
689 static void __init parse_tags(const struct tag *t)
690 {
691 	for (; t->hdr.size; t = tag_next(t))
692 		if (!parse_tag(t))
693 			printk(KERN_WARNING
694 				"Ignoring unrecognised tag 0x%08x\n",
695 				t->hdr.tag);
696 }
697 
698 /*
699  * This holds our defaults.
700  */
701 static struct init_tags {
702 	struct tag_header hdr1;
703 	struct tag_core   core;
704 	struct tag_header hdr2;
705 	struct tag_mem32  mem;
706 	struct tag_header hdr3;
707 } init_tags __initdata = {
708 	{ tag_size(tag_core), ATAG_CORE },
709 	{ 1, PAGE_SIZE, 0xff },
710 	{ tag_size(tag_mem32), ATAG_MEM },
711 	{ MEM_SIZE, PHYS_OFFSET },
712 	{ 0, ATAG_NONE }
713 };
714 
715 static void (*init_machine)(void) __initdata;
716 
717 static int __init customize_machine(void)
718 {
719 	/* customizes platform devices, or adds new ones */
720 	if (init_machine)
721 		init_machine();
722 	return 0;
723 }
724 arch_initcall(customize_machine);
725 
726 void __init setup_arch(char **cmdline_p)
727 {
728 	struct tag *tags = (struct tag *)&init_tags;
729 	struct machine_desc *mdesc;
730 	char *from = default_command_line;
731 
732 	setup_processor();
733 	mdesc = setup_machine(machine_arch_type);
734 	machine_name = mdesc->name;
735 
736 	if (mdesc->soft_reboot)
737 		reboot_setup("s");
738 
739 	if (mdesc->boot_params)
740 		tags = phys_to_virt(mdesc->boot_params);
741 
742 	/*
743 	 * If we have the old style parameters, convert them to
744 	 * a tag list.
745 	 */
746 	if (tags->hdr.tag != ATAG_CORE)
747 		convert_to_tag_list(tags);
748 	if (tags->hdr.tag != ATAG_CORE)
749 		tags = (struct tag *)&init_tags;
750 
751 	if (mdesc->fixup)
752 		mdesc->fixup(mdesc, tags, &from, &meminfo);
753 
754 	if (tags->hdr.tag == ATAG_CORE) {
755 		if (meminfo.nr_banks != 0)
756 			squash_mem_tags(tags);
757 		parse_tags(tags);
758 	}
759 
760 	init_mm.start_code = (unsigned long) &_text;
761 	init_mm.end_code   = (unsigned long) &_etext;
762 	init_mm.end_data   = (unsigned long) &_edata;
763 	init_mm.brk	   = (unsigned long) &_end;
764 
765 	memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
766 	saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
767 	parse_cmdline(cmdline_p, from);
768 	paging_init(&meminfo, mdesc);
769 	request_standard_resources(&meminfo, mdesc);
770 
771 	cpu_init();
772 
773 	/*
774 	 * Set up various architecture-specific pointers
775 	 */
776 	init_arch_irq = mdesc->init_irq;
777 	system_timer = mdesc->timer;
778 	init_machine = mdesc->init_machine;
779 
780 #ifdef CONFIG_VT
781 #if defined(CONFIG_VGA_CONSOLE)
782 	conswitchp = &vga_con;
783 #elif defined(CONFIG_DUMMY_CONSOLE)
784 	conswitchp = &dummy_con;
785 #endif
786 #endif
787 }
788 
789 
790 static int __init topology_init(void)
791 {
792 	int cpu;
793 
794 	for_each_cpu(cpu)
795 		register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
796 
797 	return 0;
798 }
799 
800 subsys_initcall(topology_init);
801 
802 static const char *hwcap_str[] = {
803 	"swp",
804 	"half",
805 	"thumb",
806 	"26bit",
807 	"fastmult",
808 	"fpa",
809 	"vfp",
810 	"edsp",
811 	"java",
812 	NULL
813 };
814 
815 static void
816 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
817 {
818 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
819 
820 	seq_printf(m, "%s size\t\t: %d\n"
821 		      "%s assoc\t\t: %d\n"
822 		      "%s line length\t: %d\n"
823 		      "%s sets\t\t: %d\n",
824 		type, mult << (8 + CACHE_SIZE(cache)),
825 		type, (mult << CACHE_ASSOC(cache)) >> 1,
826 		type, 8 << CACHE_LINE(cache),
827 		type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
828 			    CACHE_LINE(cache)));
829 }
830 
831 static int c_show(struct seq_file *m, void *v)
832 {
833 	int i;
834 
835 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
836 		   cpu_name, (int)processor_id & 15, elf_platform);
837 
838 #if defined(CONFIG_SMP)
839 	for_each_online_cpu(i) {
840 		/*
841 		 * glibc reads /proc/cpuinfo to determine the number of
842 		 * online processors, looking for lines beginning with
843 		 * "processor".  Give glibc what it expects.
844 		 */
845 		seq_printf(m, "processor\t: %d\n", i);
846 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
847 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
848 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
849 	}
850 #else /* CONFIG_SMP */
851 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
852 		   loops_per_jiffy / (500000/HZ),
853 		   (loops_per_jiffy / (5000/HZ)) % 100);
854 #endif
855 
856 	/* dump out the processor features */
857 	seq_puts(m, "Features\t: ");
858 
859 	for (i = 0; hwcap_str[i]; i++)
860 		if (elf_hwcap & (1 << i))
861 			seq_printf(m, "%s ", hwcap_str[i]);
862 
863 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
864 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
865 
866 	if ((processor_id & 0x0000f000) == 0x00000000) {
867 		/* pre-ARM7 */
868 		seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
869 	} else {
870 		if ((processor_id & 0x0000f000) == 0x00007000) {
871 			/* ARM7 */
872 			seq_printf(m, "CPU variant\t: 0x%02x\n",
873 				   (processor_id >> 16) & 127);
874 		} else {
875 			/* post-ARM7 */
876 			seq_printf(m, "CPU variant\t: 0x%x\n",
877 				   (processor_id >> 20) & 15);
878 		}
879 		seq_printf(m, "CPU part\t: 0x%03x\n",
880 			   (processor_id >> 4) & 0xfff);
881 	}
882 	seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
883 
884 	{
885 		unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
886 		if (cache_info != processor_id) {
887 			seq_printf(m, "Cache type\t: %s\n"
888 				      "Cache clean\t: %s\n"
889 				      "Cache lockdown\t: %s\n"
890 				      "Cache format\t: %s\n",
891 				   cache_types[CACHE_TYPE(cache_info)],
892 				   cache_clean[CACHE_TYPE(cache_info)],
893 				   cache_lockdown[CACHE_TYPE(cache_info)],
894 				   CACHE_S(cache_info) ? "Harvard" : "Unified");
895 
896 			if (CACHE_S(cache_info)) {
897 				c_show_cache(m, "I", CACHE_ISIZE(cache_info));
898 				c_show_cache(m, "D", CACHE_DSIZE(cache_info));
899 			} else {
900 				c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
901 			}
902 		}
903 	}
904 
905 	seq_puts(m, "\n");
906 
907 	seq_printf(m, "Hardware\t: %s\n", machine_name);
908 	seq_printf(m, "Revision\t: %04x\n", system_rev);
909 	seq_printf(m, "Serial\t\t: %08x%08x\n",
910 		   system_serial_high, system_serial_low);
911 
912 	return 0;
913 }
914 
915 static void *c_start(struct seq_file *m, loff_t *pos)
916 {
917 	return *pos < 1 ? (void *)1 : NULL;
918 }
919 
920 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
921 {
922 	++*pos;
923 	return NULL;
924 }
925 
926 static void c_stop(struct seq_file *m, void *v)
927 {
928 }
929 
930 struct seq_operations cpuinfo_op = {
931 	.start	= c_start,
932 	.next	= c_next,
933 	.stop	= c_stop,
934 	.show	= c_show
935 };
936