xref: /openbmc/linux/arch/arm/kernel/setup.c (revision 4588c34d)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34 
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/elf.h>
40 #include <asm/procinfo.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48 
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58 #include <asm/virt.h>
59 
60 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
61 #include "compat.h"
62 #endif
63 #include "atags.h"
64 #include "tcm.h"
65 
66 #ifndef MEM_SIZE
67 #define MEM_SIZE	(16*1024*1024)
68 #endif
69 
70 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
71 char fpe_type[8];
72 
73 static int __init fpe_setup(char *line)
74 {
75 	memcpy(fpe_type, line, 8);
76 	return 1;
77 }
78 
79 __setup("fpe=", fpe_setup);
80 #endif
81 
82 extern void paging_init(struct machine_desc *desc);
83 extern void sanity_check_meminfo(void);
84 extern void reboot_setup(char *str);
85 extern void setup_dma_zone(struct machine_desc *desc);
86 
87 unsigned int processor_id;
88 EXPORT_SYMBOL(processor_id);
89 unsigned int __machine_arch_type __read_mostly;
90 EXPORT_SYMBOL(__machine_arch_type);
91 unsigned int cacheid __read_mostly;
92 EXPORT_SYMBOL(cacheid);
93 
94 unsigned int __atags_pointer __initdata;
95 
96 unsigned int system_rev;
97 EXPORT_SYMBOL(system_rev);
98 
99 unsigned int system_serial_low;
100 EXPORT_SYMBOL(system_serial_low);
101 
102 unsigned int system_serial_high;
103 EXPORT_SYMBOL(system_serial_high);
104 
105 unsigned int elf_hwcap __read_mostly;
106 EXPORT_SYMBOL(elf_hwcap);
107 
108 
109 #ifdef MULTI_CPU
110 struct processor processor __read_mostly;
111 #endif
112 #ifdef MULTI_TLB
113 struct cpu_tlb_fns cpu_tlb __read_mostly;
114 #endif
115 #ifdef MULTI_USER
116 struct cpu_user_fns cpu_user __read_mostly;
117 #endif
118 #ifdef MULTI_CACHE
119 struct cpu_cache_fns cpu_cache __read_mostly;
120 #endif
121 #ifdef CONFIG_OUTER_CACHE
122 struct outer_cache_fns outer_cache __read_mostly;
123 EXPORT_SYMBOL(outer_cache);
124 #endif
125 
126 /*
127  * Cached cpu_architecture() result for use by assembler code.
128  * C code should use the cpu_architecture() function instead of accessing this
129  * variable directly.
130  */
131 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
132 
133 struct stack {
134 	u32 irq[3];
135 	u32 abt[3];
136 	u32 und[3];
137 } ____cacheline_aligned;
138 
139 static struct stack stacks[NR_CPUS];
140 
141 char elf_platform[ELF_PLATFORM_SIZE];
142 EXPORT_SYMBOL(elf_platform);
143 
144 static const char *cpu_name;
145 static const char *machine_name;
146 static char __initdata cmd_line[COMMAND_LINE_SIZE];
147 struct machine_desc *machine_desc __initdata;
148 
149 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
150 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
151 #define ENDIANNESS ((char)endian_test.l)
152 
153 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
154 
155 /*
156  * Standard memory resources
157  */
158 static struct resource mem_res[] = {
159 	{
160 		.name = "Video RAM",
161 		.start = 0,
162 		.end = 0,
163 		.flags = IORESOURCE_MEM
164 	},
165 	{
166 		.name = "Kernel code",
167 		.start = 0,
168 		.end = 0,
169 		.flags = IORESOURCE_MEM
170 	},
171 	{
172 		.name = "Kernel data",
173 		.start = 0,
174 		.end = 0,
175 		.flags = IORESOURCE_MEM
176 	}
177 };
178 
179 #define video_ram   mem_res[0]
180 #define kernel_code mem_res[1]
181 #define kernel_data mem_res[2]
182 
183 static struct resource io_res[] = {
184 	{
185 		.name = "reserved",
186 		.start = 0x3bc,
187 		.end = 0x3be,
188 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
189 	},
190 	{
191 		.name = "reserved",
192 		.start = 0x378,
193 		.end = 0x37f,
194 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
195 	},
196 	{
197 		.name = "reserved",
198 		.start = 0x278,
199 		.end = 0x27f,
200 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
201 	}
202 };
203 
204 #define lp0 io_res[0]
205 #define lp1 io_res[1]
206 #define lp2 io_res[2]
207 
208 static const char *proc_arch[] = {
209 	"undefined/unknown",
210 	"3",
211 	"4",
212 	"4T",
213 	"5",
214 	"5T",
215 	"5TE",
216 	"5TEJ",
217 	"6TEJ",
218 	"7",
219 	"?(11)",
220 	"?(12)",
221 	"?(13)",
222 	"?(14)",
223 	"?(15)",
224 	"?(16)",
225 	"?(17)",
226 };
227 
228 static int __get_cpu_architecture(void)
229 {
230 	int cpu_arch;
231 
232 	if ((read_cpuid_id() & 0x0008f000) == 0) {
233 		cpu_arch = CPU_ARCH_UNKNOWN;
234 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
235 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
236 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
237 		cpu_arch = (read_cpuid_id() >> 16) & 7;
238 		if (cpu_arch)
239 			cpu_arch += CPU_ARCH_ARMv3;
240 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
241 		unsigned int mmfr0;
242 
243 		/* Revised CPUID format. Read the Memory Model Feature
244 		 * Register 0 and check for VMSAv7 or PMSAv7 */
245 		asm("mrc	p15, 0, %0, c0, c1, 4"
246 		    : "=r" (mmfr0));
247 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
248 		    (mmfr0 & 0x000000f0) >= 0x00000030)
249 			cpu_arch = CPU_ARCH_ARMv7;
250 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
251 			 (mmfr0 & 0x000000f0) == 0x00000020)
252 			cpu_arch = CPU_ARCH_ARMv6;
253 		else
254 			cpu_arch = CPU_ARCH_UNKNOWN;
255 	} else
256 		cpu_arch = CPU_ARCH_UNKNOWN;
257 
258 	return cpu_arch;
259 }
260 
261 int __pure cpu_architecture(void)
262 {
263 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
264 
265 	return __cpu_architecture;
266 }
267 
268 static int cpu_has_aliasing_icache(unsigned int arch)
269 {
270 	int aliasing_icache;
271 	unsigned int id_reg, num_sets, line_size;
272 
273 	/* PIPT caches never alias. */
274 	if (icache_is_pipt())
275 		return 0;
276 
277 	/* arch specifies the register format */
278 	switch (arch) {
279 	case CPU_ARCH_ARMv7:
280 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
281 		    : /* No output operands */
282 		    : "r" (1));
283 		isb();
284 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
285 		    : "=r" (id_reg));
286 		line_size = 4 << ((id_reg & 0x7) + 2);
287 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
288 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
289 		break;
290 	case CPU_ARCH_ARMv6:
291 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
292 		break;
293 	default:
294 		/* I-cache aliases will be handled by D-cache aliasing code */
295 		aliasing_icache = 0;
296 	}
297 
298 	return aliasing_icache;
299 }
300 
301 static void __init cacheid_init(void)
302 {
303 	unsigned int cachetype = read_cpuid_cachetype();
304 	unsigned int arch = cpu_architecture();
305 
306 	if (arch >= CPU_ARCH_ARMv6) {
307 		if ((cachetype & (7 << 29)) == 4 << 29) {
308 			/* ARMv7 register format */
309 			arch = CPU_ARCH_ARMv7;
310 			cacheid = CACHEID_VIPT_NONALIASING;
311 			switch (cachetype & (3 << 14)) {
312 			case (1 << 14):
313 				cacheid |= CACHEID_ASID_TAGGED;
314 				break;
315 			case (3 << 14):
316 				cacheid |= CACHEID_PIPT;
317 				break;
318 			}
319 		} else {
320 			arch = CPU_ARCH_ARMv6;
321 			if (cachetype & (1 << 23))
322 				cacheid = CACHEID_VIPT_ALIASING;
323 			else
324 				cacheid = CACHEID_VIPT_NONALIASING;
325 		}
326 		if (cpu_has_aliasing_icache(arch))
327 			cacheid |= CACHEID_VIPT_I_ALIASING;
328 	} else {
329 		cacheid = CACHEID_VIVT;
330 	}
331 
332 	printk("CPU: %s data cache, %s instruction cache\n",
333 		cache_is_vivt() ? "VIVT" :
334 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
335 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
336 		cache_is_vivt() ? "VIVT" :
337 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
338 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
339 		icache_is_pipt() ? "PIPT" :
340 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
341 }
342 
343 /*
344  * These functions re-use the assembly code in head.S, which
345  * already provide the required functionality.
346  */
347 extern struct proc_info_list *lookup_processor_type(unsigned int);
348 
349 void __init early_print(const char *str, ...)
350 {
351 	extern void printascii(const char *);
352 	char buf[256];
353 	va_list ap;
354 
355 	va_start(ap, str);
356 	vsnprintf(buf, sizeof(buf), str, ap);
357 	va_end(ap);
358 
359 #ifdef CONFIG_DEBUG_LL
360 	printascii(buf);
361 #endif
362 	printk("%s", buf);
363 }
364 
365 static void __init feat_v6_fixup(void)
366 {
367 	int id = read_cpuid_id();
368 
369 	if ((id & 0xff0f0000) != 0x41070000)
370 		return;
371 
372 	/*
373 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
374 	 * see also kuser_get_tls_init.
375 	 */
376 	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
377 		elf_hwcap &= ~HWCAP_TLS;
378 }
379 
380 /*
381  * cpu_init - initialise one CPU.
382  *
383  * cpu_init sets up the per-CPU stacks.
384  */
385 void cpu_init(void)
386 {
387 	unsigned int cpu = smp_processor_id();
388 	struct stack *stk = &stacks[cpu];
389 
390 	if (cpu >= NR_CPUS) {
391 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
392 		BUG();
393 	}
394 
395 	cpu_proc_init();
396 
397 	/*
398 	 * Define the placement constraint for the inline asm directive below.
399 	 * In Thumb-2, msr with an immediate value is not allowed.
400 	 */
401 #ifdef CONFIG_THUMB2_KERNEL
402 #define PLC	"r"
403 #else
404 #define PLC	"I"
405 #endif
406 
407 	/*
408 	 * setup stacks for re-entrant exception handlers
409 	 */
410 	__asm__ (
411 	"msr	cpsr_c, %1\n\t"
412 	"add	r14, %0, %2\n\t"
413 	"mov	sp, r14\n\t"
414 	"msr	cpsr_c, %3\n\t"
415 	"add	r14, %0, %4\n\t"
416 	"mov	sp, r14\n\t"
417 	"msr	cpsr_c, %5\n\t"
418 	"add	r14, %0, %6\n\t"
419 	"mov	sp, r14\n\t"
420 	"msr	cpsr_c, %7"
421 	    :
422 	    : "r" (stk),
423 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
424 	      "I" (offsetof(struct stack, irq[0])),
425 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
426 	      "I" (offsetof(struct stack, abt[0])),
427 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
428 	      "I" (offsetof(struct stack, und[0])),
429 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
430 	    : "r14");
431 }
432 
433 int __cpu_logical_map[NR_CPUS];
434 
435 void __init smp_setup_processor_id(void)
436 {
437 	int i;
438 	u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
439 
440 	cpu_logical_map(0) = cpu;
441 	for (i = 1; i < NR_CPUS; ++i)
442 		cpu_logical_map(i) = i == cpu ? 0 : i;
443 
444 	printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
445 }
446 
447 static void __init setup_processor(void)
448 {
449 	struct proc_info_list *list;
450 
451 	/*
452 	 * locate processor in the list of supported processor
453 	 * types.  The linker builds this table for us from the
454 	 * entries in arch/arm/mm/proc-*.S
455 	 */
456 	list = lookup_processor_type(read_cpuid_id());
457 	if (!list) {
458 		printk("CPU configuration botched (ID %08x), unable "
459 		       "to continue.\n", read_cpuid_id());
460 		while (1);
461 	}
462 
463 	cpu_name = list->cpu_name;
464 	__cpu_architecture = __get_cpu_architecture();
465 
466 #ifdef MULTI_CPU
467 	processor = *list->proc;
468 #endif
469 #ifdef MULTI_TLB
470 	cpu_tlb = *list->tlb;
471 #endif
472 #ifdef MULTI_USER
473 	cpu_user = *list->user;
474 #endif
475 #ifdef MULTI_CACHE
476 	cpu_cache = *list->cache;
477 #endif
478 
479 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
480 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
481 	       proc_arch[cpu_architecture()], cr_alignment);
482 
483 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
484 		 list->arch_name, ENDIANNESS);
485 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
486 		 list->elf_name, ENDIANNESS);
487 	elf_hwcap = list->elf_hwcap;
488 #ifndef CONFIG_ARM_THUMB
489 	elf_hwcap &= ~HWCAP_THUMB;
490 #endif
491 
492 	feat_v6_fixup();
493 
494 	cacheid_init();
495 	cpu_init();
496 }
497 
498 void __init dump_machine_table(void)
499 {
500 	struct machine_desc *p;
501 
502 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
503 	for_each_machine_desc(p)
504 		early_print("%08x\t%s\n", p->nr, p->name);
505 
506 	early_print("\nPlease check your kernel config and/or bootloader.\n");
507 
508 	while (true)
509 		/* can't use cpu_relax() here as it may require MMU setup */;
510 }
511 
512 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
513 {
514 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
515 
516 	if (meminfo.nr_banks >= NR_BANKS) {
517 		printk(KERN_CRIT "NR_BANKS too low, "
518 			"ignoring memory at 0x%08llx\n", (long long)start);
519 		return -EINVAL;
520 	}
521 
522 	/*
523 	 * Ensure that start/size are aligned to a page boundary.
524 	 * Size is appropriately rounded down, start is rounded up.
525 	 */
526 	size -= start & ~PAGE_MASK;
527 	bank->start = PAGE_ALIGN(start);
528 
529 #ifndef CONFIG_LPAE
530 	if (bank->start + size < bank->start) {
531 		printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
532 			"32-bit physical address space\n", (long long)start);
533 		/*
534 		 * To ensure bank->start + bank->size is representable in
535 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
536 		 * This means we lose a page after masking.
537 		 */
538 		size = ULONG_MAX - bank->start;
539 	}
540 #endif
541 
542 	bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
543 
544 	/*
545 	 * Check whether this memory region has non-zero size or
546 	 * invalid node number.
547 	 */
548 	if (bank->size == 0)
549 		return -EINVAL;
550 
551 	meminfo.nr_banks++;
552 	return 0;
553 }
554 
555 /*
556  * Pick out the memory size.  We look for mem=size@start,
557  * where start and size are "size[KkMm]"
558  */
559 static int __init early_mem(char *p)
560 {
561 	static int usermem __initdata = 0;
562 	phys_addr_t size;
563 	phys_addr_t start;
564 	char *endp;
565 
566 	/*
567 	 * If the user specifies memory size, we
568 	 * blow away any automatically generated
569 	 * size.
570 	 */
571 	if (usermem == 0) {
572 		usermem = 1;
573 		meminfo.nr_banks = 0;
574 	}
575 
576 	start = PHYS_OFFSET;
577 	size  = memparse(p, &endp);
578 	if (*endp == '@')
579 		start = memparse(endp + 1, NULL);
580 
581 	arm_add_memory(start, size);
582 
583 	return 0;
584 }
585 early_param("mem", early_mem);
586 
587 static void __init
588 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
589 {
590 #ifdef CONFIG_BLK_DEV_RAM
591 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
592 
593 	rd_image_start = image_start;
594 	rd_prompt = prompt;
595 	rd_doload = doload;
596 
597 	if (rd_sz)
598 		rd_size = rd_sz;
599 #endif
600 }
601 
602 static void __init request_standard_resources(struct machine_desc *mdesc)
603 {
604 	struct memblock_region *region;
605 	struct resource *res;
606 
607 	kernel_code.start   = virt_to_phys(_text);
608 	kernel_code.end     = virt_to_phys(_etext - 1);
609 	kernel_data.start   = virt_to_phys(_sdata);
610 	kernel_data.end     = virt_to_phys(_end - 1);
611 
612 	for_each_memblock(memory, region) {
613 		res = alloc_bootmem_low(sizeof(*res));
614 		res->name  = "System RAM";
615 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
616 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
617 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
618 
619 		request_resource(&iomem_resource, res);
620 
621 		if (kernel_code.start >= res->start &&
622 		    kernel_code.end <= res->end)
623 			request_resource(res, &kernel_code);
624 		if (kernel_data.start >= res->start &&
625 		    kernel_data.end <= res->end)
626 			request_resource(res, &kernel_data);
627 	}
628 
629 	if (mdesc->video_start) {
630 		video_ram.start = mdesc->video_start;
631 		video_ram.end   = mdesc->video_end;
632 		request_resource(&iomem_resource, &video_ram);
633 	}
634 
635 	/*
636 	 * Some machines don't have the possibility of ever
637 	 * possessing lp0, lp1 or lp2
638 	 */
639 	if (mdesc->reserve_lp0)
640 		request_resource(&ioport_resource, &lp0);
641 	if (mdesc->reserve_lp1)
642 		request_resource(&ioport_resource, &lp1);
643 	if (mdesc->reserve_lp2)
644 		request_resource(&ioport_resource, &lp2);
645 }
646 
647 /*
648  *  Tag parsing.
649  *
650  * This is the new way of passing data to the kernel at boot time.  Rather
651  * than passing a fixed inflexible structure to the kernel, we pass a list
652  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
653  * tag for the list to be recognised (to distinguish the tagged list from
654  * a param_struct).  The list is terminated with a zero-length tag (this tag
655  * is not parsed in any way).
656  */
657 static int __init parse_tag_core(const struct tag *tag)
658 {
659 	if (tag->hdr.size > 2) {
660 		if ((tag->u.core.flags & 1) == 0)
661 			root_mountflags &= ~MS_RDONLY;
662 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
663 	}
664 	return 0;
665 }
666 
667 __tagtable(ATAG_CORE, parse_tag_core);
668 
669 static int __init parse_tag_mem32(const struct tag *tag)
670 {
671 	return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
672 }
673 
674 __tagtable(ATAG_MEM, parse_tag_mem32);
675 
676 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
677 struct screen_info screen_info = {
678  .orig_video_lines	= 30,
679  .orig_video_cols	= 80,
680  .orig_video_mode	= 0,
681  .orig_video_ega_bx	= 0,
682  .orig_video_isVGA	= 1,
683  .orig_video_points	= 8
684 };
685 
686 static int __init parse_tag_videotext(const struct tag *tag)
687 {
688 	screen_info.orig_x            = tag->u.videotext.x;
689 	screen_info.orig_y            = tag->u.videotext.y;
690 	screen_info.orig_video_page   = tag->u.videotext.video_page;
691 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
692 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
693 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
694 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
695 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
696 	screen_info.orig_video_points = tag->u.videotext.video_points;
697 	return 0;
698 }
699 
700 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
701 #endif
702 
703 static int __init parse_tag_ramdisk(const struct tag *tag)
704 {
705 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
706 		      (tag->u.ramdisk.flags & 2) == 0,
707 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
708 	return 0;
709 }
710 
711 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
712 
713 static int __init parse_tag_serialnr(const struct tag *tag)
714 {
715 	system_serial_low = tag->u.serialnr.low;
716 	system_serial_high = tag->u.serialnr.high;
717 	return 0;
718 }
719 
720 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
721 
722 static int __init parse_tag_revision(const struct tag *tag)
723 {
724 	system_rev = tag->u.revision.rev;
725 	return 0;
726 }
727 
728 __tagtable(ATAG_REVISION, parse_tag_revision);
729 
730 static int __init parse_tag_cmdline(const struct tag *tag)
731 {
732 #if defined(CONFIG_CMDLINE_EXTEND)
733 	strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
734 	strlcat(default_command_line, tag->u.cmdline.cmdline,
735 		COMMAND_LINE_SIZE);
736 #elif defined(CONFIG_CMDLINE_FORCE)
737 	pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
738 #else
739 	strlcpy(default_command_line, tag->u.cmdline.cmdline,
740 		COMMAND_LINE_SIZE);
741 #endif
742 	return 0;
743 }
744 
745 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
746 
747 /*
748  * Scan the tag table for this tag, and call its parse function.
749  * The tag table is built by the linker from all the __tagtable
750  * declarations.
751  */
752 static int __init parse_tag(const struct tag *tag)
753 {
754 	extern struct tagtable __tagtable_begin, __tagtable_end;
755 	struct tagtable *t;
756 
757 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
758 		if (tag->hdr.tag == t->tag) {
759 			t->parse(tag);
760 			break;
761 		}
762 
763 	return t < &__tagtable_end;
764 }
765 
766 /*
767  * Parse all tags in the list, checking both the global and architecture
768  * specific tag tables.
769  */
770 static void __init parse_tags(const struct tag *t)
771 {
772 	for (; t->hdr.size; t = tag_next(t))
773 		if (!parse_tag(t))
774 			printk(KERN_WARNING
775 				"Ignoring unrecognised tag 0x%08x\n",
776 				t->hdr.tag);
777 }
778 
779 /*
780  * This holds our defaults.
781  */
782 static struct init_tags {
783 	struct tag_header hdr1;
784 	struct tag_core   core;
785 	struct tag_header hdr2;
786 	struct tag_mem32  mem;
787 	struct tag_header hdr3;
788 } init_tags __initdata = {
789 	{ tag_size(tag_core), ATAG_CORE },
790 	{ 1, PAGE_SIZE, 0xff },
791 	{ tag_size(tag_mem32), ATAG_MEM },
792 	{ MEM_SIZE },
793 	{ 0, ATAG_NONE }
794 };
795 
796 static int __init customize_machine(void)
797 {
798 	/* customizes platform devices, or adds new ones */
799 	if (machine_desc->init_machine)
800 		machine_desc->init_machine();
801 	return 0;
802 }
803 arch_initcall(customize_machine);
804 
805 static int __init init_machine_late(void)
806 {
807 	if (machine_desc->init_late)
808 		machine_desc->init_late();
809 	return 0;
810 }
811 late_initcall(init_machine_late);
812 
813 #ifdef CONFIG_KEXEC
814 static inline unsigned long long get_total_mem(void)
815 {
816 	unsigned long total;
817 
818 	total = max_low_pfn - min_low_pfn;
819 	return total << PAGE_SHIFT;
820 }
821 
822 /**
823  * reserve_crashkernel() - reserves memory are for crash kernel
824  *
825  * This function reserves memory area given in "crashkernel=" kernel command
826  * line parameter. The memory reserved is used by a dump capture kernel when
827  * primary kernel is crashing.
828  */
829 static void __init reserve_crashkernel(void)
830 {
831 	unsigned long long crash_size, crash_base;
832 	unsigned long long total_mem;
833 	int ret;
834 
835 	total_mem = get_total_mem();
836 	ret = parse_crashkernel(boot_command_line, total_mem,
837 				&crash_size, &crash_base);
838 	if (ret)
839 		return;
840 
841 	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
842 	if (ret < 0) {
843 		printk(KERN_WARNING "crashkernel reservation failed - "
844 		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
845 		return;
846 	}
847 
848 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
849 	       "for crashkernel (System RAM: %ldMB)\n",
850 	       (unsigned long)(crash_size >> 20),
851 	       (unsigned long)(crash_base >> 20),
852 	       (unsigned long)(total_mem >> 20));
853 
854 	crashk_res.start = crash_base;
855 	crashk_res.end = crash_base + crash_size - 1;
856 	insert_resource(&iomem_resource, &crashk_res);
857 }
858 #else
859 static inline void reserve_crashkernel(void) {}
860 #endif /* CONFIG_KEXEC */
861 
862 static void __init squash_mem_tags(struct tag *tag)
863 {
864 	for (; tag->hdr.size; tag = tag_next(tag))
865 		if (tag->hdr.tag == ATAG_MEM)
866 			tag->hdr.tag = ATAG_NONE;
867 }
868 
869 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
870 {
871 	struct tag *tags = (struct tag *)&init_tags;
872 	struct machine_desc *mdesc = NULL, *p;
873 	char *from = default_command_line;
874 
875 	init_tags.mem.start = PHYS_OFFSET;
876 
877 	/*
878 	 * locate machine in the list of supported machines.
879 	 */
880 	for_each_machine_desc(p)
881 		if (nr == p->nr) {
882 			printk("Machine: %s\n", p->name);
883 			mdesc = p;
884 			break;
885 		}
886 
887 	if (!mdesc) {
888 		early_print("\nError: unrecognized/unsupported machine ID"
889 			" (r1 = 0x%08x).\n\n", nr);
890 		dump_machine_table(); /* does not return */
891 	}
892 
893 	if (__atags_pointer)
894 		tags = phys_to_virt(__atags_pointer);
895 	else if (mdesc->atag_offset)
896 		tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
897 
898 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
899 	/*
900 	 * If we have the old style parameters, convert them to
901 	 * a tag list.
902 	 */
903 	if (tags->hdr.tag != ATAG_CORE)
904 		convert_to_tag_list(tags);
905 #endif
906 
907 	if (tags->hdr.tag != ATAG_CORE) {
908 #if defined(CONFIG_OF)
909 		/*
910 		 * If CONFIG_OF is set, then assume this is a reasonably
911 		 * modern system that should pass boot parameters
912 		 */
913 		early_print("Warning: Neither atags nor dtb found\n");
914 #endif
915 		tags = (struct tag *)&init_tags;
916 	}
917 
918 	if (mdesc->fixup)
919 		mdesc->fixup(tags, &from, &meminfo);
920 
921 	if (tags->hdr.tag == ATAG_CORE) {
922 		if (meminfo.nr_banks != 0)
923 			squash_mem_tags(tags);
924 		save_atags(tags);
925 		parse_tags(tags);
926 	}
927 
928 	/* parse_early_param needs a boot_command_line */
929 	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
930 
931 	return mdesc;
932 }
933 
934 static int __init meminfo_cmp(const void *_a, const void *_b)
935 {
936 	const struct membank *a = _a, *b = _b;
937 	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
938 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
939 }
940 
941 void __init hyp_mode_check(void)
942 {
943 #ifdef CONFIG_ARM_VIRT_EXT
944 	if (is_hyp_mode_available()) {
945 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
946 		pr_info("CPU: Virtualization extensions available.\n");
947 	} else if (is_hyp_mode_mismatched()) {
948 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
949 			__boot_cpu_mode & MODE_MASK);
950 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
951 	} else
952 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
953 #endif
954 }
955 
956 void __init setup_arch(char **cmdline_p)
957 {
958 	struct machine_desc *mdesc;
959 
960 	setup_processor();
961 	mdesc = setup_machine_fdt(__atags_pointer);
962 	if (!mdesc)
963 		mdesc = setup_machine_tags(machine_arch_type);
964 	machine_desc = mdesc;
965 	machine_name = mdesc->name;
966 
967 	setup_dma_zone(mdesc);
968 
969 	if (mdesc->restart_mode)
970 		reboot_setup(&mdesc->restart_mode);
971 
972 	init_mm.start_code = (unsigned long) _text;
973 	init_mm.end_code   = (unsigned long) _etext;
974 	init_mm.end_data   = (unsigned long) _edata;
975 	init_mm.brk	   = (unsigned long) _end;
976 
977 	/* populate cmd_line too for later use, preserving boot_command_line */
978 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
979 	*cmdline_p = cmd_line;
980 
981 	parse_early_param();
982 
983 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
984 	sanity_check_meminfo();
985 	arm_memblock_init(&meminfo, mdesc);
986 
987 	paging_init(mdesc);
988 	request_standard_resources(mdesc);
989 
990 	if (mdesc->restart)
991 		arm_pm_restart = mdesc->restart;
992 
993 	unflatten_device_tree();
994 
995 #ifdef CONFIG_SMP
996 	if (is_smp())
997 		smp_init_cpus();
998 #endif
999 
1000 	if (!is_smp())
1001 		hyp_mode_check();
1002 
1003 	reserve_crashkernel();
1004 
1005 	tcm_init();
1006 
1007 #ifdef CONFIG_MULTI_IRQ_HANDLER
1008 	handle_arch_irq = mdesc->handle_irq;
1009 #endif
1010 
1011 #ifdef CONFIG_VT
1012 #if defined(CONFIG_VGA_CONSOLE)
1013 	conswitchp = &vga_con;
1014 #elif defined(CONFIG_DUMMY_CONSOLE)
1015 	conswitchp = &dummy_con;
1016 #endif
1017 #endif
1018 
1019 	if (mdesc->init_early)
1020 		mdesc->init_early();
1021 }
1022 
1023 
1024 static int __init topology_init(void)
1025 {
1026 	int cpu;
1027 
1028 	for_each_possible_cpu(cpu) {
1029 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1030 		cpuinfo->cpu.hotpluggable = 1;
1031 		register_cpu(&cpuinfo->cpu, cpu);
1032 	}
1033 
1034 	return 0;
1035 }
1036 subsys_initcall(topology_init);
1037 
1038 #ifdef CONFIG_HAVE_PROC_CPU
1039 static int __init proc_cpu_init(void)
1040 {
1041 	struct proc_dir_entry *res;
1042 
1043 	res = proc_mkdir("cpu", NULL);
1044 	if (!res)
1045 		return -ENOMEM;
1046 	return 0;
1047 }
1048 fs_initcall(proc_cpu_init);
1049 #endif
1050 
1051 static const char *hwcap_str[] = {
1052 	"swp",
1053 	"half",
1054 	"thumb",
1055 	"26bit",
1056 	"fastmult",
1057 	"fpa",
1058 	"vfp",
1059 	"edsp",
1060 	"java",
1061 	"iwmmxt",
1062 	"crunch",
1063 	"thumbee",
1064 	"neon",
1065 	"vfpv3",
1066 	"vfpv3d16",
1067 	"tls",
1068 	"vfpv4",
1069 	"idiva",
1070 	"idivt",
1071 	NULL
1072 };
1073 
1074 static int c_show(struct seq_file *m, void *v)
1075 {
1076 	int i;
1077 
1078 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1079 		   cpu_name, read_cpuid_id() & 15, elf_platform);
1080 
1081 #if defined(CONFIG_SMP)
1082 	for_each_online_cpu(i) {
1083 		/*
1084 		 * glibc reads /proc/cpuinfo to determine the number of
1085 		 * online processors, looking for lines beginning with
1086 		 * "processor".  Give glibc what it expects.
1087 		 */
1088 		seq_printf(m, "processor\t: %d\n", i);
1089 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1090 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1091 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1092 	}
1093 #else /* CONFIG_SMP */
1094 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1095 		   loops_per_jiffy / (500000/HZ),
1096 		   (loops_per_jiffy / (5000/HZ)) % 100);
1097 #endif
1098 
1099 	/* dump out the processor features */
1100 	seq_puts(m, "Features\t: ");
1101 
1102 	for (i = 0; hwcap_str[i]; i++)
1103 		if (elf_hwcap & (1 << i))
1104 			seq_printf(m, "%s ", hwcap_str[i]);
1105 
1106 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1107 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1108 
1109 	if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1110 		/* pre-ARM7 */
1111 		seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1112 	} else {
1113 		if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1114 			/* ARM7 */
1115 			seq_printf(m, "CPU variant\t: 0x%02x\n",
1116 				   (read_cpuid_id() >> 16) & 127);
1117 		} else {
1118 			/* post-ARM7 */
1119 			seq_printf(m, "CPU variant\t: 0x%x\n",
1120 				   (read_cpuid_id() >> 20) & 15);
1121 		}
1122 		seq_printf(m, "CPU part\t: 0x%03x\n",
1123 			   (read_cpuid_id() >> 4) & 0xfff);
1124 	}
1125 	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1126 
1127 	seq_puts(m, "\n");
1128 
1129 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1130 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1131 	seq_printf(m, "Serial\t\t: %08x%08x\n",
1132 		   system_serial_high, system_serial_low);
1133 
1134 	return 0;
1135 }
1136 
1137 static void *c_start(struct seq_file *m, loff_t *pos)
1138 {
1139 	return *pos < 1 ? (void *)1 : NULL;
1140 }
1141 
1142 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1143 {
1144 	++*pos;
1145 	return NULL;
1146 }
1147 
1148 static void c_stop(struct seq_file *m, void *v)
1149 {
1150 }
1151 
1152 const struct seq_operations cpuinfo_op = {
1153 	.start	= c_start,
1154 	.next	= c_next,
1155 	.stop	= c_stop,
1156 	.show	= c_show
1157 };
1158