xref: /openbmc/linux/arch/arm/kernel/setup.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/crash_dump.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 
32 #include <asm/unified.h>
33 #include <asm/cpu.h>
34 #include <asm/cputype.h>
35 #include <asm/elf.h>
36 #include <asm/procinfo.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp_plat.h>
40 #include <asm/mach-types.h>
41 #include <asm/cacheflush.h>
42 #include <asm/cachetype.h>
43 #include <asm/tlbflush.h>
44 
45 #include <asm/mach/arch.h>
46 #include <asm/mach/irq.h>
47 #include <asm/mach/time.h>
48 #include <asm/traps.h>
49 #include <asm/unwind.h>
50 
51 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
52 #include "compat.h"
53 #endif
54 #include "atags.h"
55 #include "tcm.h"
56 
57 #ifndef MEM_SIZE
58 #define MEM_SIZE	(16*1024*1024)
59 #endif
60 
61 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62 char fpe_type[8];
63 
64 static int __init fpe_setup(char *line)
65 {
66 	memcpy(fpe_type, line, 8);
67 	return 1;
68 }
69 
70 __setup("fpe=", fpe_setup);
71 #endif
72 
73 extern void paging_init(struct machine_desc *desc);
74 extern void reboot_setup(char *str);
75 
76 unsigned int processor_id;
77 EXPORT_SYMBOL(processor_id);
78 unsigned int __machine_arch_type __read_mostly;
79 EXPORT_SYMBOL(__machine_arch_type);
80 unsigned int cacheid __read_mostly;
81 EXPORT_SYMBOL(cacheid);
82 
83 unsigned int __atags_pointer __initdata;
84 
85 unsigned int system_rev;
86 EXPORT_SYMBOL(system_rev);
87 
88 unsigned int system_serial_low;
89 EXPORT_SYMBOL(system_serial_low);
90 
91 unsigned int system_serial_high;
92 EXPORT_SYMBOL(system_serial_high);
93 
94 unsigned int elf_hwcap __read_mostly;
95 EXPORT_SYMBOL(elf_hwcap);
96 
97 
98 #ifdef MULTI_CPU
99 struct processor processor __read_mostly;
100 #endif
101 #ifdef MULTI_TLB
102 struct cpu_tlb_fns cpu_tlb __read_mostly;
103 #endif
104 #ifdef MULTI_USER
105 struct cpu_user_fns cpu_user __read_mostly;
106 #endif
107 #ifdef MULTI_CACHE
108 struct cpu_cache_fns cpu_cache __read_mostly;
109 #endif
110 #ifdef CONFIG_OUTER_CACHE
111 struct outer_cache_fns outer_cache __read_mostly;
112 EXPORT_SYMBOL(outer_cache);
113 #endif
114 
115 struct stack {
116 	u32 irq[3];
117 	u32 abt[3];
118 	u32 und[3];
119 } ____cacheline_aligned;
120 
121 static struct stack stacks[NR_CPUS];
122 
123 char elf_platform[ELF_PLATFORM_SIZE];
124 EXPORT_SYMBOL(elf_platform);
125 
126 static const char *cpu_name;
127 static const char *machine_name;
128 static char __initdata cmd_line[COMMAND_LINE_SIZE];
129 struct machine_desc *machine_desc __initdata;
130 
131 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
132 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
133 #define ENDIANNESS ((char)endian_test.l)
134 
135 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
136 
137 /*
138  * Standard memory resources
139  */
140 static struct resource mem_res[] = {
141 	{
142 		.name = "Video RAM",
143 		.start = 0,
144 		.end = 0,
145 		.flags = IORESOURCE_MEM
146 	},
147 	{
148 		.name = "Kernel text",
149 		.start = 0,
150 		.end = 0,
151 		.flags = IORESOURCE_MEM
152 	},
153 	{
154 		.name = "Kernel data",
155 		.start = 0,
156 		.end = 0,
157 		.flags = IORESOURCE_MEM
158 	}
159 };
160 
161 #define video_ram   mem_res[0]
162 #define kernel_code mem_res[1]
163 #define kernel_data mem_res[2]
164 
165 static struct resource io_res[] = {
166 	{
167 		.name = "reserved",
168 		.start = 0x3bc,
169 		.end = 0x3be,
170 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
171 	},
172 	{
173 		.name = "reserved",
174 		.start = 0x378,
175 		.end = 0x37f,
176 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
177 	},
178 	{
179 		.name = "reserved",
180 		.start = 0x278,
181 		.end = 0x27f,
182 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
183 	}
184 };
185 
186 #define lp0 io_res[0]
187 #define lp1 io_res[1]
188 #define lp2 io_res[2]
189 
190 static const char *proc_arch[] = {
191 	"undefined/unknown",
192 	"3",
193 	"4",
194 	"4T",
195 	"5",
196 	"5T",
197 	"5TE",
198 	"5TEJ",
199 	"6TEJ",
200 	"7",
201 	"?(11)",
202 	"?(12)",
203 	"?(13)",
204 	"?(14)",
205 	"?(15)",
206 	"?(16)",
207 	"?(17)",
208 };
209 
210 int cpu_architecture(void)
211 {
212 	int cpu_arch;
213 
214 	if ((read_cpuid_id() & 0x0008f000) == 0) {
215 		cpu_arch = CPU_ARCH_UNKNOWN;
216 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
217 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
218 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
219 		cpu_arch = (read_cpuid_id() >> 16) & 7;
220 		if (cpu_arch)
221 			cpu_arch += CPU_ARCH_ARMv3;
222 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
223 		unsigned int mmfr0;
224 
225 		/* Revised CPUID format. Read the Memory Model Feature
226 		 * Register 0 and check for VMSAv7 or PMSAv7 */
227 		asm("mrc	p15, 0, %0, c0, c1, 4"
228 		    : "=r" (mmfr0));
229 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
230 		    (mmfr0 & 0x000000f0) >= 0x00000030)
231 			cpu_arch = CPU_ARCH_ARMv7;
232 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
233 			 (mmfr0 & 0x000000f0) == 0x00000020)
234 			cpu_arch = CPU_ARCH_ARMv6;
235 		else
236 			cpu_arch = CPU_ARCH_UNKNOWN;
237 	} else
238 		cpu_arch = CPU_ARCH_UNKNOWN;
239 
240 	return cpu_arch;
241 }
242 
243 static int cpu_has_aliasing_icache(unsigned int arch)
244 {
245 	int aliasing_icache;
246 	unsigned int id_reg, num_sets, line_size;
247 
248 	/* arch specifies the register format */
249 	switch (arch) {
250 	case CPU_ARCH_ARMv7:
251 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
252 		    : /* No output operands */
253 		    : "r" (1));
254 		isb();
255 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
256 		    : "=r" (id_reg));
257 		line_size = 4 << ((id_reg & 0x7) + 2);
258 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
259 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
260 		break;
261 	case CPU_ARCH_ARMv6:
262 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
263 		break;
264 	default:
265 		/* I-cache aliases will be handled by D-cache aliasing code */
266 		aliasing_icache = 0;
267 	}
268 
269 	return aliasing_icache;
270 }
271 
272 static void __init cacheid_init(void)
273 {
274 	unsigned int cachetype = read_cpuid_cachetype();
275 	unsigned int arch = cpu_architecture();
276 
277 	if (arch >= CPU_ARCH_ARMv6) {
278 		if ((cachetype & (7 << 29)) == 4 << 29) {
279 			/* ARMv7 register format */
280 			cacheid = CACHEID_VIPT_NONALIASING;
281 			if ((cachetype & (3 << 14)) == 1 << 14)
282 				cacheid |= CACHEID_ASID_TAGGED;
283 			else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
284 				cacheid |= CACHEID_VIPT_I_ALIASING;
285 		} else if (cachetype & (1 << 23)) {
286 			cacheid = CACHEID_VIPT_ALIASING;
287 		} else {
288 			cacheid = CACHEID_VIPT_NONALIASING;
289 			if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
290 				cacheid |= CACHEID_VIPT_I_ALIASING;
291 		}
292 	} else {
293 		cacheid = CACHEID_VIVT;
294 	}
295 
296 	printk("CPU: %s data cache, %s instruction cache\n",
297 		cache_is_vivt() ? "VIVT" :
298 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
299 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
300 		cache_is_vivt() ? "VIVT" :
301 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
302 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
303 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
304 }
305 
306 /*
307  * These functions re-use the assembly code in head.S, which
308  * already provide the required functionality.
309  */
310 extern struct proc_info_list *lookup_processor_type(unsigned int);
311 extern struct machine_desc *lookup_machine_type(unsigned int);
312 
313 static void __init feat_v6_fixup(void)
314 {
315 	int id = read_cpuid_id();
316 
317 	if ((id & 0xff0f0000) != 0x41070000)
318 		return;
319 
320 	/*
321 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
322 	 * see also kuser_get_tls_init.
323 	 */
324 	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
325 		elf_hwcap &= ~HWCAP_TLS;
326 }
327 
328 static void __init setup_processor(void)
329 {
330 	struct proc_info_list *list;
331 
332 	/*
333 	 * locate processor in the list of supported processor
334 	 * types.  The linker builds this table for us from the
335 	 * entries in arch/arm/mm/proc-*.S
336 	 */
337 	list = lookup_processor_type(read_cpuid_id());
338 	if (!list) {
339 		printk("CPU configuration botched (ID %08x), unable "
340 		       "to continue.\n", read_cpuid_id());
341 		while (1);
342 	}
343 
344 	cpu_name = list->cpu_name;
345 
346 #ifdef MULTI_CPU
347 	processor = *list->proc;
348 #endif
349 #ifdef MULTI_TLB
350 	cpu_tlb = *list->tlb;
351 #endif
352 #ifdef MULTI_USER
353 	cpu_user = *list->user;
354 #endif
355 #ifdef MULTI_CACHE
356 	cpu_cache = *list->cache;
357 #endif
358 
359 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
360 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
361 	       proc_arch[cpu_architecture()], cr_alignment);
362 
363 	sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
364 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
365 	elf_hwcap = list->elf_hwcap;
366 #ifndef CONFIG_ARM_THUMB
367 	elf_hwcap &= ~HWCAP_THUMB;
368 #endif
369 
370 	feat_v6_fixup();
371 
372 	cacheid_init();
373 	cpu_proc_init();
374 }
375 
376 /*
377  * cpu_init - initialise one CPU.
378  *
379  * cpu_init sets up the per-CPU stacks.
380  */
381 void cpu_init(void)
382 {
383 	unsigned int cpu = smp_processor_id();
384 	struct stack *stk = &stacks[cpu];
385 
386 	if (cpu >= NR_CPUS) {
387 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
388 		BUG();
389 	}
390 
391 	/*
392 	 * Define the placement constraint for the inline asm directive below.
393 	 * In Thumb-2, msr with an immediate value is not allowed.
394 	 */
395 #ifdef CONFIG_THUMB2_KERNEL
396 #define PLC	"r"
397 #else
398 #define PLC	"I"
399 #endif
400 
401 	/*
402 	 * setup stacks for re-entrant exception handlers
403 	 */
404 	__asm__ (
405 	"msr	cpsr_c, %1\n\t"
406 	"add	r14, %0, %2\n\t"
407 	"mov	sp, r14\n\t"
408 	"msr	cpsr_c, %3\n\t"
409 	"add	r14, %0, %4\n\t"
410 	"mov	sp, r14\n\t"
411 	"msr	cpsr_c, %5\n\t"
412 	"add	r14, %0, %6\n\t"
413 	"mov	sp, r14\n\t"
414 	"msr	cpsr_c, %7"
415 	    :
416 	    : "r" (stk),
417 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
418 	      "I" (offsetof(struct stack, irq[0])),
419 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
420 	      "I" (offsetof(struct stack, abt[0])),
421 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
422 	      "I" (offsetof(struct stack, und[0])),
423 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
424 	    : "r14");
425 }
426 
427 static struct machine_desc * __init setup_machine(unsigned int nr)
428 {
429 	struct machine_desc *list;
430 
431 	/*
432 	 * locate machine in the list of supported machines.
433 	 */
434 	list = lookup_machine_type(nr);
435 	if (!list) {
436 		printk("Machine configuration botched (nr %d), unable "
437 		       "to continue.\n", nr);
438 		while (1);
439 	}
440 
441 	printk("Machine: %s\n", list->name);
442 
443 	return list;
444 }
445 
446 static int __init arm_add_memory(unsigned long start, unsigned long size)
447 {
448 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
449 
450 	if (meminfo.nr_banks >= NR_BANKS) {
451 		printk(KERN_CRIT "NR_BANKS too low, "
452 			"ignoring memory at %#lx\n", start);
453 		return -EINVAL;
454 	}
455 
456 	/*
457 	 * Ensure that start/size are aligned to a page boundary.
458 	 * Size is appropriately rounded down, start is rounded up.
459 	 */
460 	size -= start & ~PAGE_MASK;
461 	bank->start = PAGE_ALIGN(start);
462 	bank->size  = size & PAGE_MASK;
463 
464 	/*
465 	 * Check whether this memory region has non-zero size or
466 	 * invalid node number.
467 	 */
468 	if (bank->size == 0)
469 		return -EINVAL;
470 
471 	meminfo.nr_banks++;
472 	return 0;
473 }
474 
475 /*
476  * Pick out the memory size.  We look for mem=size@start,
477  * where start and size are "size[KkMm]"
478  */
479 static int __init early_mem(char *p)
480 {
481 	static int usermem __initdata = 0;
482 	unsigned long size, start;
483 	char *endp;
484 
485 	/*
486 	 * If the user specifies memory size, we
487 	 * blow away any automatically generated
488 	 * size.
489 	 */
490 	if (usermem == 0) {
491 		usermem = 1;
492 		meminfo.nr_banks = 0;
493 	}
494 
495 	start = PHYS_OFFSET;
496 	size  = memparse(p, &endp);
497 	if (*endp == '@')
498 		start = memparse(endp + 1, NULL);
499 
500 	arm_add_memory(start, size);
501 
502 	return 0;
503 }
504 early_param("mem", early_mem);
505 
506 static void __init
507 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
508 {
509 #ifdef CONFIG_BLK_DEV_RAM
510 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
511 
512 	rd_image_start = image_start;
513 	rd_prompt = prompt;
514 	rd_doload = doload;
515 
516 	if (rd_sz)
517 		rd_size = rd_sz;
518 #endif
519 }
520 
521 static void __init request_standard_resources(struct machine_desc *mdesc)
522 {
523 	struct memblock_region *region;
524 	struct resource *res;
525 
526 	kernel_code.start   = virt_to_phys(_text);
527 	kernel_code.end     = virt_to_phys(_etext - 1);
528 	kernel_data.start   = virt_to_phys(_sdata);
529 	kernel_data.end     = virt_to_phys(_end - 1);
530 
531 	for_each_memblock(memory, region) {
532 		res = alloc_bootmem_low(sizeof(*res));
533 		res->name  = "System RAM";
534 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
535 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
536 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
537 
538 		request_resource(&iomem_resource, res);
539 
540 		if (kernel_code.start >= res->start &&
541 		    kernel_code.end <= res->end)
542 			request_resource(res, &kernel_code);
543 		if (kernel_data.start >= res->start &&
544 		    kernel_data.end <= res->end)
545 			request_resource(res, &kernel_data);
546 	}
547 
548 	if (mdesc->video_start) {
549 		video_ram.start = mdesc->video_start;
550 		video_ram.end   = mdesc->video_end;
551 		request_resource(&iomem_resource, &video_ram);
552 	}
553 
554 	/*
555 	 * Some machines don't have the possibility of ever
556 	 * possessing lp0, lp1 or lp2
557 	 */
558 	if (mdesc->reserve_lp0)
559 		request_resource(&ioport_resource, &lp0);
560 	if (mdesc->reserve_lp1)
561 		request_resource(&ioport_resource, &lp1);
562 	if (mdesc->reserve_lp2)
563 		request_resource(&ioport_resource, &lp2);
564 }
565 
566 /*
567  *  Tag parsing.
568  *
569  * This is the new way of passing data to the kernel at boot time.  Rather
570  * than passing a fixed inflexible structure to the kernel, we pass a list
571  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
572  * tag for the list to be recognised (to distinguish the tagged list from
573  * a param_struct).  The list is terminated with a zero-length tag (this tag
574  * is not parsed in any way).
575  */
576 static int __init parse_tag_core(const struct tag *tag)
577 {
578 	if (tag->hdr.size > 2) {
579 		if ((tag->u.core.flags & 1) == 0)
580 			root_mountflags &= ~MS_RDONLY;
581 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
582 	}
583 	return 0;
584 }
585 
586 __tagtable(ATAG_CORE, parse_tag_core);
587 
588 static int __init parse_tag_mem32(const struct tag *tag)
589 {
590 	return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
591 }
592 
593 __tagtable(ATAG_MEM, parse_tag_mem32);
594 
595 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
596 struct screen_info screen_info = {
597  .orig_video_lines	= 30,
598  .orig_video_cols	= 80,
599  .orig_video_mode	= 0,
600  .orig_video_ega_bx	= 0,
601  .orig_video_isVGA	= 1,
602  .orig_video_points	= 8
603 };
604 
605 static int __init parse_tag_videotext(const struct tag *tag)
606 {
607 	screen_info.orig_x            = tag->u.videotext.x;
608 	screen_info.orig_y            = tag->u.videotext.y;
609 	screen_info.orig_video_page   = tag->u.videotext.video_page;
610 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
611 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
612 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
613 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
614 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
615 	screen_info.orig_video_points = tag->u.videotext.video_points;
616 	return 0;
617 }
618 
619 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
620 #endif
621 
622 static int __init parse_tag_ramdisk(const struct tag *tag)
623 {
624 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
625 		      (tag->u.ramdisk.flags & 2) == 0,
626 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
627 	return 0;
628 }
629 
630 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
631 
632 static int __init parse_tag_serialnr(const struct tag *tag)
633 {
634 	system_serial_low = tag->u.serialnr.low;
635 	system_serial_high = tag->u.serialnr.high;
636 	return 0;
637 }
638 
639 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
640 
641 static int __init parse_tag_revision(const struct tag *tag)
642 {
643 	system_rev = tag->u.revision.rev;
644 	return 0;
645 }
646 
647 __tagtable(ATAG_REVISION, parse_tag_revision);
648 
649 static int __init parse_tag_cmdline(const struct tag *tag)
650 {
651 #ifndef CONFIG_CMDLINE_FORCE
652 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
653 #else
654 	pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
655 #endif /* CONFIG_CMDLINE_FORCE */
656 	return 0;
657 }
658 
659 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
660 
661 /*
662  * Scan the tag table for this tag, and call its parse function.
663  * The tag table is built by the linker from all the __tagtable
664  * declarations.
665  */
666 static int __init parse_tag(const struct tag *tag)
667 {
668 	extern struct tagtable __tagtable_begin, __tagtable_end;
669 	struct tagtable *t;
670 
671 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
672 		if (tag->hdr.tag == t->tag) {
673 			t->parse(tag);
674 			break;
675 		}
676 
677 	return t < &__tagtable_end;
678 }
679 
680 /*
681  * Parse all tags in the list, checking both the global and architecture
682  * specific tag tables.
683  */
684 static void __init parse_tags(const struct tag *t)
685 {
686 	for (; t->hdr.size; t = tag_next(t))
687 		if (!parse_tag(t))
688 			printk(KERN_WARNING
689 				"Ignoring unrecognised tag 0x%08x\n",
690 				t->hdr.tag);
691 }
692 
693 /*
694  * This holds our defaults.
695  */
696 static struct init_tags {
697 	struct tag_header hdr1;
698 	struct tag_core   core;
699 	struct tag_header hdr2;
700 	struct tag_mem32  mem;
701 	struct tag_header hdr3;
702 } init_tags __initdata = {
703 	{ tag_size(tag_core), ATAG_CORE },
704 	{ 1, PAGE_SIZE, 0xff },
705 	{ tag_size(tag_mem32), ATAG_MEM },
706 	{ MEM_SIZE, PHYS_OFFSET },
707 	{ 0, ATAG_NONE }
708 };
709 
710 static int __init customize_machine(void)
711 {
712 	/* customizes platform devices, or adds new ones */
713 	if (machine_desc->init_machine)
714 		machine_desc->init_machine();
715 	return 0;
716 }
717 arch_initcall(customize_machine);
718 
719 #ifdef CONFIG_KEXEC
720 static inline unsigned long long get_total_mem(void)
721 {
722 	unsigned long total;
723 
724 	total = max_low_pfn - min_low_pfn;
725 	return total << PAGE_SHIFT;
726 }
727 
728 /**
729  * reserve_crashkernel() - reserves memory are for crash kernel
730  *
731  * This function reserves memory area given in "crashkernel=" kernel command
732  * line parameter. The memory reserved is used by a dump capture kernel when
733  * primary kernel is crashing.
734  */
735 static void __init reserve_crashkernel(void)
736 {
737 	unsigned long long crash_size, crash_base;
738 	unsigned long long total_mem;
739 	int ret;
740 
741 	total_mem = get_total_mem();
742 	ret = parse_crashkernel(boot_command_line, total_mem,
743 				&crash_size, &crash_base);
744 	if (ret)
745 		return;
746 
747 	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
748 	if (ret < 0) {
749 		printk(KERN_WARNING "crashkernel reservation failed - "
750 		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
751 		return;
752 	}
753 
754 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
755 	       "for crashkernel (System RAM: %ldMB)\n",
756 	       (unsigned long)(crash_size >> 20),
757 	       (unsigned long)(crash_base >> 20),
758 	       (unsigned long)(total_mem >> 20));
759 
760 	crashk_res.start = crash_base;
761 	crashk_res.end = crash_base + crash_size - 1;
762 	insert_resource(&iomem_resource, &crashk_res);
763 }
764 #else
765 static inline void reserve_crashkernel(void) {}
766 #endif /* CONFIG_KEXEC */
767 
768 /*
769  * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
770  * is_kdump_kernel() to determine if we are booting after a panic. Hence
771  * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
772  */
773 
774 #ifdef CONFIG_CRASH_DUMP
775 /*
776  * elfcorehdr= specifies the location of elf core header stored by the crashed
777  * kernel. This option will be passed by kexec loader to the capture kernel.
778  */
779 static int __init setup_elfcorehdr(char *arg)
780 {
781 	char *end;
782 
783 	if (!arg)
784 		return -EINVAL;
785 
786 	elfcorehdr_addr = memparse(arg, &end);
787 	return end > arg ? 0 : -EINVAL;
788 }
789 early_param("elfcorehdr", setup_elfcorehdr);
790 #endif /* CONFIG_CRASH_DUMP */
791 
792 static void __init squash_mem_tags(struct tag *tag)
793 {
794 	for (; tag->hdr.size; tag = tag_next(tag))
795 		if (tag->hdr.tag == ATAG_MEM)
796 			tag->hdr.tag = ATAG_NONE;
797 }
798 
799 void __init setup_arch(char **cmdline_p)
800 {
801 	struct tag *tags = (struct tag *)&init_tags;
802 	struct machine_desc *mdesc;
803 	char *from = default_command_line;
804 
805 	unwind_init();
806 
807 	setup_processor();
808 	mdesc = setup_machine(machine_arch_type);
809 	machine_desc = mdesc;
810 	machine_name = mdesc->name;
811 
812 	if (mdesc->soft_reboot)
813 		reboot_setup("s");
814 
815 	if (__atags_pointer)
816 		tags = phys_to_virt(__atags_pointer);
817 	else if (mdesc->boot_params)
818 		tags = phys_to_virt(mdesc->boot_params);
819 
820 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
821 	/*
822 	 * If we have the old style parameters, convert them to
823 	 * a tag list.
824 	 */
825 	if (tags->hdr.tag != ATAG_CORE)
826 		convert_to_tag_list(tags);
827 #endif
828 	if (tags->hdr.tag != ATAG_CORE)
829 		tags = (struct tag *)&init_tags;
830 
831 	if (mdesc->fixup)
832 		mdesc->fixup(mdesc, tags, &from, &meminfo);
833 
834 	if (tags->hdr.tag == ATAG_CORE) {
835 		if (meminfo.nr_banks != 0)
836 			squash_mem_tags(tags);
837 		save_atags(tags);
838 		parse_tags(tags);
839 	}
840 
841 	init_mm.start_code = (unsigned long) _text;
842 	init_mm.end_code   = (unsigned long) _etext;
843 	init_mm.end_data   = (unsigned long) _edata;
844 	init_mm.brk	   = (unsigned long) _end;
845 
846 	/* parse_early_param needs a boot_command_line */
847 	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
848 
849 	/* populate cmd_line too for later use, preserving boot_command_line */
850 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
851 	*cmdline_p = cmd_line;
852 
853 	parse_early_param();
854 
855 	arm_memblock_init(&meminfo, mdesc);
856 
857 	paging_init(mdesc);
858 	request_standard_resources(mdesc);
859 
860 #ifdef CONFIG_SMP
861 	if (is_smp())
862 		smp_init_cpus();
863 #endif
864 	reserve_crashkernel();
865 
866 	cpu_init();
867 	tcm_init();
868 
869 #ifdef CONFIG_MULTI_IRQ_HANDLER
870 	handle_arch_irq = mdesc->handle_irq;
871 #endif
872 
873 #ifdef CONFIG_VT
874 #if defined(CONFIG_VGA_CONSOLE)
875 	conswitchp = &vga_con;
876 #elif defined(CONFIG_DUMMY_CONSOLE)
877 	conswitchp = &dummy_con;
878 #endif
879 #endif
880 	early_trap_init();
881 
882 	if (mdesc->init_early)
883 		mdesc->init_early();
884 }
885 
886 
887 static int __init topology_init(void)
888 {
889 	int cpu;
890 
891 	for_each_possible_cpu(cpu) {
892 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
893 		cpuinfo->cpu.hotpluggable = 1;
894 		register_cpu(&cpuinfo->cpu, cpu);
895 	}
896 
897 	return 0;
898 }
899 subsys_initcall(topology_init);
900 
901 #ifdef CONFIG_HAVE_PROC_CPU
902 static int __init proc_cpu_init(void)
903 {
904 	struct proc_dir_entry *res;
905 
906 	res = proc_mkdir("cpu", NULL);
907 	if (!res)
908 		return -ENOMEM;
909 	return 0;
910 }
911 fs_initcall(proc_cpu_init);
912 #endif
913 
914 static const char *hwcap_str[] = {
915 	"swp",
916 	"half",
917 	"thumb",
918 	"26bit",
919 	"fastmult",
920 	"fpa",
921 	"vfp",
922 	"edsp",
923 	"java",
924 	"iwmmxt",
925 	"crunch",
926 	"thumbee",
927 	"neon",
928 	"vfpv3",
929 	"vfpv3d16",
930 	NULL
931 };
932 
933 static int c_show(struct seq_file *m, void *v)
934 {
935 	int i;
936 
937 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
938 		   cpu_name, read_cpuid_id() & 15, elf_platform);
939 
940 #if defined(CONFIG_SMP)
941 	for_each_online_cpu(i) {
942 		/*
943 		 * glibc reads /proc/cpuinfo to determine the number of
944 		 * online processors, looking for lines beginning with
945 		 * "processor".  Give glibc what it expects.
946 		 */
947 		seq_printf(m, "processor\t: %d\n", i);
948 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
949 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
950 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
951 	}
952 #else /* CONFIG_SMP */
953 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
954 		   loops_per_jiffy / (500000/HZ),
955 		   (loops_per_jiffy / (5000/HZ)) % 100);
956 #endif
957 
958 	/* dump out the processor features */
959 	seq_puts(m, "Features\t: ");
960 
961 	for (i = 0; hwcap_str[i]; i++)
962 		if (elf_hwcap & (1 << i))
963 			seq_printf(m, "%s ", hwcap_str[i]);
964 
965 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
966 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
967 
968 	if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
969 		/* pre-ARM7 */
970 		seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
971 	} else {
972 		if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
973 			/* ARM7 */
974 			seq_printf(m, "CPU variant\t: 0x%02x\n",
975 				   (read_cpuid_id() >> 16) & 127);
976 		} else {
977 			/* post-ARM7 */
978 			seq_printf(m, "CPU variant\t: 0x%x\n",
979 				   (read_cpuid_id() >> 20) & 15);
980 		}
981 		seq_printf(m, "CPU part\t: 0x%03x\n",
982 			   (read_cpuid_id() >> 4) & 0xfff);
983 	}
984 	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
985 
986 	seq_puts(m, "\n");
987 
988 	seq_printf(m, "Hardware\t: %s\n", machine_name);
989 	seq_printf(m, "Revision\t: %04x\n", system_rev);
990 	seq_printf(m, "Serial\t\t: %08x%08x\n",
991 		   system_serial_high, system_serial_low);
992 
993 	return 0;
994 }
995 
996 static void *c_start(struct seq_file *m, loff_t *pos)
997 {
998 	return *pos < 1 ? (void *)1 : NULL;
999 }
1000 
1001 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1002 {
1003 	++*pos;
1004 	return NULL;
1005 }
1006 
1007 static void c_stop(struct seq_file *m, void *v)
1008 {
1009 }
1010 
1011 const struct seq_operations cpuinfo_op = {
1012 	.start	= c_start,
1013 	.next	= c_next,
1014 	.stop	= c_stop,
1015 	.show	= c_show
1016 };
1017