xref: /openbmc/linux/arch/arm/kernel/setup.c (revision 565d76cb)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/crash_dump.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 
32 #include <asm/unified.h>
33 #include <asm/cpu.h>
34 #include <asm/cputype.h>
35 #include <asm/elf.h>
36 #include <asm/procinfo.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp_plat.h>
40 #include <asm/mach-types.h>
41 #include <asm/cacheflush.h>
42 #include <asm/cachetype.h>
43 #include <asm/tlbflush.h>
44 
45 #include <asm/mach/arch.h>
46 #include <asm/mach/irq.h>
47 #include <asm/mach/time.h>
48 #include <asm/traps.h>
49 #include <asm/unwind.h>
50 
51 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
52 #include "compat.h"
53 #endif
54 #include "atags.h"
55 #include "tcm.h"
56 
57 #ifndef MEM_SIZE
58 #define MEM_SIZE	(16*1024*1024)
59 #endif
60 
61 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62 char fpe_type[8];
63 
64 static int __init fpe_setup(char *line)
65 {
66 	memcpy(fpe_type, line, 8);
67 	return 1;
68 }
69 
70 __setup("fpe=", fpe_setup);
71 #endif
72 
73 extern void paging_init(struct machine_desc *desc);
74 extern void reboot_setup(char *str);
75 
76 unsigned int processor_id;
77 EXPORT_SYMBOL(processor_id);
78 unsigned int __machine_arch_type __read_mostly;
79 EXPORT_SYMBOL(__machine_arch_type);
80 unsigned int cacheid __read_mostly;
81 EXPORT_SYMBOL(cacheid);
82 
83 unsigned int __atags_pointer __initdata;
84 
85 unsigned int system_rev;
86 EXPORT_SYMBOL(system_rev);
87 
88 unsigned int system_serial_low;
89 EXPORT_SYMBOL(system_serial_low);
90 
91 unsigned int system_serial_high;
92 EXPORT_SYMBOL(system_serial_high);
93 
94 unsigned int elf_hwcap __read_mostly;
95 EXPORT_SYMBOL(elf_hwcap);
96 
97 
98 #ifdef MULTI_CPU
99 struct processor processor __read_mostly;
100 #endif
101 #ifdef MULTI_TLB
102 struct cpu_tlb_fns cpu_tlb __read_mostly;
103 #endif
104 #ifdef MULTI_USER
105 struct cpu_user_fns cpu_user __read_mostly;
106 #endif
107 #ifdef MULTI_CACHE
108 struct cpu_cache_fns cpu_cache __read_mostly;
109 #endif
110 #ifdef CONFIG_OUTER_CACHE
111 struct outer_cache_fns outer_cache __read_mostly;
112 EXPORT_SYMBOL(outer_cache);
113 #endif
114 
115 struct stack {
116 	u32 irq[3];
117 	u32 abt[3];
118 	u32 und[3];
119 } ____cacheline_aligned;
120 
121 static struct stack stacks[NR_CPUS];
122 
123 char elf_platform[ELF_PLATFORM_SIZE];
124 EXPORT_SYMBOL(elf_platform);
125 
126 static const char *cpu_name;
127 static const char *machine_name;
128 static char __initdata cmd_line[COMMAND_LINE_SIZE];
129 struct machine_desc *machine_desc __initdata;
130 
131 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
132 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
133 #define ENDIANNESS ((char)endian_test.l)
134 
135 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
136 
137 /*
138  * Standard memory resources
139  */
140 static struct resource mem_res[] = {
141 	{
142 		.name = "Video RAM",
143 		.start = 0,
144 		.end = 0,
145 		.flags = IORESOURCE_MEM
146 	},
147 	{
148 		.name = "Kernel text",
149 		.start = 0,
150 		.end = 0,
151 		.flags = IORESOURCE_MEM
152 	},
153 	{
154 		.name = "Kernel data",
155 		.start = 0,
156 		.end = 0,
157 		.flags = IORESOURCE_MEM
158 	}
159 };
160 
161 #define video_ram   mem_res[0]
162 #define kernel_code mem_res[1]
163 #define kernel_data mem_res[2]
164 
165 static struct resource io_res[] = {
166 	{
167 		.name = "reserved",
168 		.start = 0x3bc,
169 		.end = 0x3be,
170 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
171 	},
172 	{
173 		.name = "reserved",
174 		.start = 0x378,
175 		.end = 0x37f,
176 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
177 	},
178 	{
179 		.name = "reserved",
180 		.start = 0x278,
181 		.end = 0x27f,
182 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
183 	}
184 };
185 
186 #define lp0 io_res[0]
187 #define lp1 io_res[1]
188 #define lp2 io_res[2]
189 
190 static const char *proc_arch[] = {
191 	"undefined/unknown",
192 	"3",
193 	"4",
194 	"4T",
195 	"5",
196 	"5T",
197 	"5TE",
198 	"5TEJ",
199 	"6TEJ",
200 	"7",
201 	"?(11)",
202 	"?(12)",
203 	"?(13)",
204 	"?(14)",
205 	"?(15)",
206 	"?(16)",
207 	"?(17)",
208 };
209 
210 int cpu_architecture(void)
211 {
212 	int cpu_arch;
213 
214 	if ((read_cpuid_id() & 0x0008f000) == 0) {
215 		cpu_arch = CPU_ARCH_UNKNOWN;
216 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
217 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
218 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
219 		cpu_arch = (read_cpuid_id() >> 16) & 7;
220 		if (cpu_arch)
221 			cpu_arch += CPU_ARCH_ARMv3;
222 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
223 		unsigned int mmfr0;
224 
225 		/* Revised CPUID format. Read the Memory Model Feature
226 		 * Register 0 and check for VMSAv7 or PMSAv7 */
227 		asm("mrc	p15, 0, %0, c0, c1, 4"
228 		    : "=r" (mmfr0));
229 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
230 		    (mmfr0 & 0x000000f0) >= 0x00000030)
231 			cpu_arch = CPU_ARCH_ARMv7;
232 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
233 			 (mmfr0 & 0x000000f0) == 0x00000020)
234 			cpu_arch = CPU_ARCH_ARMv6;
235 		else
236 			cpu_arch = CPU_ARCH_UNKNOWN;
237 	} else
238 		cpu_arch = CPU_ARCH_UNKNOWN;
239 
240 	return cpu_arch;
241 }
242 
243 static int cpu_has_aliasing_icache(unsigned int arch)
244 {
245 	int aliasing_icache;
246 	unsigned int id_reg, num_sets, line_size;
247 
248 	/* arch specifies the register format */
249 	switch (arch) {
250 	case CPU_ARCH_ARMv7:
251 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
252 		    : /* No output operands */
253 		    : "r" (1));
254 		isb();
255 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
256 		    : "=r" (id_reg));
257 		line_size = 4 << ((id_reg & 0x7) + 2);
258 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
259 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
260 		break;
261 	case CPU_ARCH_ARMv6:
262 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
263 		break;
264 	default:
265 		/* I-cache aliases will be handled by D-cache aliasing code */
266 		aliasing_icache = 0;
267 	}
268 
269 	return aliasing_icache;
270 }
271 
272 static void __init cacheid_init(void)
273 {
274 	unsigned int cachetype = read_cpuid_cachetype();
275 	unsigned int arch = cpu_architecture();
276 
277 	if (arch >= CPU_ARCH_ARMv6) {
278 		if ((cachetype & (7 << 29)) == 4 << 29) {
279 			/* ARMv7 register format */
280 			cacheid = CACHEID_VIPT_NONALIASING;
281 			if ((cachetype & (3 << 14)) == 1 << 14)
282 				cacheid |= CACHEID_ASID_TAGGED;
283 			else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
284 				cacheid |= CACHEID_VIPT_I_ALIASING;
285 		} else if (cachetype & (1 << 23)) {
286 			cacheid = CACHEID_VIPT_ALIASING;
287 		} else {
288 			cacheid = CACHEID_VIPT_NONALIASING;
289 			if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
290 				cacheid |= CACHEID_VIPT_I_ALIASING;
291 		}
292 	} else {
293 		cacheid = CACHEID_VIVT;
294 	}
295 
296 	printk("CPU: %s data cache, %s instruction cache\n",
297 		cache_is_vivt() ? "VIVT" :
298 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
299 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
300 		cache_is_vivt() ? "VIVT" :
301 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
302 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
303 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
304 }
305 
306 /*
307  * These functions re-use the assembly code in head.S, which
308  * already provide the required functionality.
309  */
310 extern struct proc_info_list *lookup_processor_type(unsigned int);
311 
312 static void __init early_print(const char *str, ...)
313 {
314 	extern void printascii(const char *);
315 	char buf[256];
316 	va_list ap;
317 
318 	va_start(ap, str);
319 	vsnprintf(buf, sizeof(buf), str, ap);
320 	va_end(ap);
321 
322 #ifdef CONFIG_DEBUG_LL
323 	printascii(buf);
324 #endif
325 	printk("%s", buf);
326 }
327 
328 static void __init feat_v6_fixup(void)
329 {
330 	int id = read_cpuid_id();
331 
332 	if ((id & 0xff0f0000) != 0x41070000)
333 		return;
334 
335 	/*
336 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
337 	 * see also kuser_get_tls_init.
338 	 */
339 	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
340 		elf_hwcap &= ~HWCAP_TLS;
341 }
342 
343 static void __init setup_processor(void)
344 {
345 	struct proc_info_list *list;
346 
347 	/*
348 	 * locate processor in the list of supported processor
349 	 * types.  The linker builds this table for us from the
350 	 * entries in arch/arm/mm/proc-*.S
351 	 */
352 	list = lookup_processor_type(read_cpuid_id());
353 	if (!list) {
354 		printk("CPU configuration botched (ID %08x), unable "
355 		       "to continue.\n", read_cpuid_id());
356 		while (1);
357 	}
358 
359 	cpu_name = list->cpu_name;
360 
361 #ifdef MULTI_CPU
362 	processor = *list->proc;
363 #endif
364 #ifdef MULTI_TLB
365 	cpu_tlb = *list->tlb;
366 #endif
367 #ifdef MULTI_USER
368 	cpu_user = *list->user;
369 #endif
370 #ifdef MULTI_CACHE
371 	cpu_cache = *list->cache;
372 #endif
373 
374 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
375 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
376 	       proc_arch[cpu_architecture()], cr_alignment);
377 
378 	sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
379 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
380 	elf_hwcap = list->elf_hwcap;
381 #ifndef CONFIG_ARM_THUMB
382 	elf_hwcap &= ~HWCAP_THUMB;
383 #endif
384 
385 	feat_v6_fixup();
386 
387 	cacheid_init();
388 	cpu_proc_init();
389 }
390 
391 /*
392  * cpu_init - initialise one CPU.
393  *
394  * cpu_init sets up the per-CPU stacks.
395  */
396 void cpu_init(void)
397 {
398 	unsigned int cpu = smp_processor_id();
399 	struct stack *stk = &stacks[cpu];
400 
401 	if (cpu >= NR_CPUS) {
402 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
403 		BUG();
404 	}
405 
406 	/*
407 	 * Define the placement constraint for the inline asm directive below.
408 	 * In Thumb-2, msr with an immediate value is not allowed.
409 	 */
410 #ifdef CONFIG_THUMB2_KERNEL
411 #define PLC	"r"
412 #else
413 #define PLC	"I"
414 #endif
415 
416 	/*
417 	 * setup stacks for re-entrant exception handlers
418 	 */
419 	__asm__ (
420 	"msr	cpsr_c, %1\n\t"
421 	"add	r14, %0, %2\n\t"
422 	"mov	sp, r14\n\t"
423 	"msr	cpsr_c, %3\n\t"
424 	"add	r14, %0, %4\n\t"
425 	"mov	sp, r14\n\t"
426 	"msr	cpsr_c, %5\n\t"
427 	"add	r14, %0, %6\n\t"
428 	"mov	sp, r14\n\t"
429 	"msr	cpsr_c, %7"
430 	    :
431 	    : "r" (stk),
432 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
433 	      "I" (offsetof(struct stack, irq[0])),
434 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
435 	      "I" (offsetof(struct stack, abt[0])),
436 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
437 	      "I" (offsetof(struct stack, und[0])),
438 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
439 	    : "r14");
440 }
441 
442 static struct machine_desc * __init setup_machine(unsigned int nr)
443 {
444 	extern struct machine_desc __arch_info_begin[], __arch_info_end[];
445 	struct machine_desc *p;
446 
447 	/*
448 	 * locate machine in the list of supported machines.
449 	 */
450 	for (p = __arch_info_begin; p < __arch_info_end; p++)
451 		if (nr == p->nr) {
452 			printk("Machine: %s\n", p->name);
453 			return p;
454 		}
455 
456 	early_print("\n"
457 		"Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
458 		"Available machine support:\n\nID (hex)\tNAME\n", nr);
459 
460 	for (p = __arch_info_begin; p < __arch_info_end; p++)
461 		early_print("%08x\t%s\n", p->nr, p->name);
462 
463 	early_print("\nPlease check your kernel config and/or bootloader.\n");
464 
465 	while (true)
466 		/* can't use cpu_relax() here as it may require MMU setup */;
467 }
468 
469 static int __init arm_add_memory(unsigned long start, unsigned long size)
470 {
471 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
472 
473 	if (meminfo.nr_banks >= NR_BANKS) {
474 		printk(KERN_CRIT "NR_BANKS too low, "
475 			"ignoring memory at %#lx\n", start);
476 		return -EINVAL;
477 	}
478 
479 	/*
480 	 * Ensure that start/size are aligned to a page boundary.
481 	 * Size is appropriately rounded down, start is rounded up.
482 	 */
483 	size -= start & ~PAGE_MASK;
484 	bank->start = PAGE_ALIGN(start);
485 	bank->size  = size & PAGE_MASK;
486 
487 	/*
488 	 * Check whether this memory region has non-zero size or
489 	 * invalid node number.
490 	 */
491 	if (bank->size == 0)
492 		return -EINVAL;
493 
494 	meminfo.nr_banks++;
495 	return 0;
496 }
497 
498 /*
499  * Pick out the memory size.  We look for mem=size@start,
500  * where start and size are "size[KkMm]"
501  */
502 static int __init early_mem(char *p)
503 {
504 	static int usermem __initdata = 0;
505 	unsigned long size, start;
506 	char *endp;
507 
508 	/*
509 	 * If the user specifies memory size, we
510 	 * blow away any automatically generated
511 	 * size.
512 	 */
513 	if (usermem == 0) {
514 		usermem = 1;
515 		meminfo.nr_banks = 0;
516 	}
517 
518 	start = PHYS_OFFSET;
519 	size  = memparse(p, &endp);
520 	if (*endp == '@')
521 		start = memparse(endp + 1, NULL);
522 
523 	arm_add_memory(start, size);
524 
525 	return 0;
526 }
527 early_param("mem", early_mem);
528 
529 static void __init
530 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
531 {
532 #ifdef CONFIG_BLK_DEV_RAM
533 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
534 
535 	rd_image_start = image_start;
536 	rd_prompt = prompt;
537 	rd_doload = doload;
538 
539 	if (rd_sz)
540 		rd_size = rd_sz;
541 #endif
542 }
543 
544 static void __init request_standard_resources(struct machine_desc *mdesc)
545 {
546 	struct memblock_region *region;
547 	struct resource *res;
548 
549 	kernel_code.start   = virt_to_phys(_text);
550 	kernel_code.end     = virt_to_phys(_etext - 1);
551 	kernel_data.start   = virt_to_phys(_sdata);
552 	kernel_data.end     = virt_to_phys(_end - 1);
553 
554 	for_each_memblock(memory, region) {
555 		res = alloc_bootmem_low(sizeof(*res));
556 		res->name  = "System RAM";
557 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
558 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
559 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
560 
561 		request_resource(&iomem_resource, res);
562 
563 		if (kernel_code.start >= res->start &&
564 		    kernel_code.end <= res->end)
565 			request_resource(res, &kernel_code);
566 		if (kernel_data.start >= res->start &&
567 		    kernel_data.end <= res->end)
568 			request_resource(res, &kernel_data);
569 	}
570 
571 	if (mdesc->video_start) {
572 		video_ram.start = mdesc->video_start;
573 		video_ram.end   = mdesc->video_end;
574 		request_resource(&iomem_resource, &video_ram);
575 	}
576 
577 	/*
578 	 * Some machines don't have the possibility of ever
579 	 * possessing lp0, lp1 or lp2
580 	 */
581 	if (mdesc->reserve_lp0)
582 		request_resource(&ioport_resource, &lp0);
583 	if (mdesc->reserve_lp1)
584 		request_resource(&ioport_resource, &lp1);
585 	if (mdesc->reserve_lp2)
586 		request_resource(&ioport_resource, &lp2);
587 }
588 
589 /*
590  *  Tag parsing.
591  *
592  * This is the new way of passing data to the kernel at boot time.  Rather
593  * than passing a fixed inflexible structure to the kernel, we pass a list
594  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
595  * tag for the list to be recognised (to distinguish the tagged list from
596  * a param_struct).  The list is terminated with a zero-length tag (this tag
597  * is not parsed in any way).
598  */
599 static int __init parse_tag_core(const struct tag *tag)
600 {
601 	if (tag->hdr.size > 2) {
602 		if ((tag->u.core.flags & 1) == 0)
603 			root_mountflags &= ~MS_RDONLY;
604 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
605 	}
606 	return 0;
607 }
608 
609 __tagtable(ATAG_CORE, parse_tag_core);
610 
611 static int __init parse_tag_mem32(const struct tag *tag)
612 {
613 	return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
614 }
615 
616 __tagtable(ATAG_MEM, parse_tag_mem32);
617 
618 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
619 struct screen_info screen_info = {
620  .orig_video_lines	= 30,
621  .orig_video_cols	= 80,
622  .orig_video_mode	= 0,
623  .orig_video_ega_bx	= 0,
624  .orig_video_isVGA	= 1,
625  .orig_video_points	= 8
626 };
627 
628 static int __init parse_tag_videotext(const struct tag *tag)
629 {
630 	screen_info.orig_x            = tag->u.videotext.x;
631 	screen_info.orig_y            = tag->u.videotext.y;
632 	screen_info.orig_video_page   = tag->u.videotext.video_page;
633 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
634 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
635 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
636 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
637 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
638 	screen_info.orig_video_points = tag->u.videotext.video_points;
639 	return 0;
640 }
641 
642 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
643 #endif
644 
645 static int __init parse_tag_ramdisk(const struct tag *tag)
646 {
647 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
648 		      (tag->u.ramdisk.flags & 2) == 0,
649 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
650 	return 0;
651 }
652 
653 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
654 
655 static int __init parse_tag_serialnr(const struct tag *tag)
656 {
657 	system_serial_low = tag->u.serialnr.low;
658 	system_serial_high = tag->u.serialnr.high;
659 	return 0;
660 }
661 
662 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
663 
664 static int __init parse_tag_revision(const struct tag *tag)
665 {
666 	system_rev = tag->u.revision.rev;
667 	return 0;
668 }
669 
670 __tagtable(ATAG_REVISION, parse_tag_revision);
671 
672 static int __init parse_tag_cmdline(const struct tag *tag)
673 {
674 #ifndef CONFIG_CMDLINE_FORCE
675 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
676 #else
677 	pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
678 #endif /* CONFIG_CMDLINE_FORCE */
679 	return 0;
680 }
681 
682 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
683 
684 /*
685  * Scan the tag table for this tag, and call its parse function.
686  * The tag table is built by the linker from all the __tagtable
687  * declarations.
688  */
689 static int __init parse_tag(const struct tag *tag)
690 {
691 	extern struct tagtable __tagtable_begin, __tagtable_end;
692 	struct tagtable *t;
693 
694 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
695 		if (tag->hdr.tag == t->tag) {
696 			t->parse(tag);
697 			break;
698 		}
699 
700 	return t < &__tagtable_end;
701 }
702 
703 /*
704  * Parse all tags in the list, checking both the global and architecture
705  * specific tag tables.
706  */
707 static void __init parse_tags(const struct tag *t)
708 {
709 	for (; t->hdr.size; t = tag_next(t))
710 		if (!parse_tag(t))
711 			printk(KERN_WARNING
712 				"Ignoring unrecognised tag 0x%08x\n",
713 				t->hdr.tag);
714 }
715 
716 /*
717  * This holds our defaults.
718  */
719 static struct init_tags {
720 	struct tag_header hdr1;
721 	struct tag_core   core;
722 	struct tag_header hdr2;
723 	struct tag_mem32  mem;
724 	struct tag_header hdr3;
725 } init_tags __initdata = {
726 	{ tag_size(tag_core), ATAG_CORE },
727 	{ 1, PAGE_SIZE, 0xff },
728 	{ tag_size(tag_mem32), ATAG_MEM },
729 	{ MEM_SIZE },
730 	{ 0, ATAG_NONE }
731 };
732 
733 static int __init customize_machine(void)
734 {
735 	/* customizes platform devices, or adds new ones */
736 	if (machine_desc->init_machine)
737 		machine_desc->init_machine();
738 	return 0;
739 }
740 arch_initcall(customize_machine);
741 
742 #ifdef CONFIG_KEXEC
743 static inline unsigned long long get_total_mem(void)
744 {
745 	unsigned long total;
746 
747 	total = max_low_pfn - min_low_pfn;
748 	return total << PAGE_SHIFT;
749 }
750 
751 /**
752  * reserve_crashkernel() - reserves memory are for crash kernel
753  *
754  * This function reserves memory area given in "crashkernel=" kernel command
755  * line parameter. The memory reserved is used by a dump capture kernel when
756  * primary kernel is crashing.
757  */
758 static void __init reserve_crashkernel(void)
759 {
760 	unsigned long long crash_size, crash_base;
761 	unsigned long long total_mem;
762 	int ret;
763 
764 	total_mem = get_total_mem();
765 	ret = parse_crashkernel(boot_command_line, total_mem,
766 				&crash_size, &crash_base);
767 	if (ret)
768 		return;
769 
770 	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
771 	if (ret < 0) {
772 		printk(KERN_WARNING "crashkernel reservation failed - "
773 		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
774 		return;
775 	}
776 
777 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
778 	       "for crashkernel (System RAM: %ldMB)\n",
779 	       (unsigned long)(crash_size >> 20),
780 	       (unsigned long)(crash_base >> 20),
781 	       (unsigned long)(total_mem >> 20));
782 
783 	crashk_res.start = crash_base;
784 	crashk_res.end = crash_base + crash_size - 1;
785 	insert_resource(&iomem_resource, &crashk_res);
786 }
787 #else
788 static inline void reserve_crashkernel(void) {}
789 #endif /* CONFIG_KEXEC */
790 
791 /*
792  * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
793  * is_kdump_kernel() to determine if we are booting after a panic. Hence
794  * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
795  */
796 
797 #ifdef CONFIG_CRASH_DUMP
798 /*
799  * elfcorehdr= specifies the location of elf core header stored by the crashed
800  * kernel. This option will be passed by kexec loader to the capture kernel.
801  */
802 static int __init setup_elfcorehdr(char *arg)
803 {
804 	char *end;
805 
806 	if (!arg)
807 		return -EINVAL;
808 
809 	elfcorehdr_addr = memparse(arg, &end);
810 	return end > arg ? 0 : -EINVAL;
811 }
812 early_param("elfcorehdr", setup_elfcorehdr);
813 #endif /* CONFIG_CRASH_DUMP */
814 
815 static void __init squash_mem_tags(struct tag *tag)
816 {
817 	for (; tag->hdr.size; tag = tag_next(tag))
818 		if (tag->hdr.tag == ATAG_MEM)
819 			tag->hdr.tag = ATAG_NONE;
820 }
821 
822 void __init setup_arch(char **cmdline_p)
823 {
824 	struct tag *tags = (struct tag *)&init_tags;
825 	struct machine_desc *mdesc;
826 	char *from = default_command_line;
827 
828 	init_tags.mem.start = PHYS_OFFSET;
829 
830 	unwind_init();
831 
832 	setup_processor();
833 	mdesc = setup_machine(machine_arch_type);
834 	machine_desc = mdesc;
835 	machine_name = mdesc->name;
836 
837 	if (mdesc->soft_reboot)
838 		reboot_setup("s");
839 
840 	if (__atags_pointer)
841 		tags = phys_to_virt(__atags_pointer);
842 	else if (mdesc->boot_params) {
843 #ifdef CONFIG_MMU
844 		/*
845 		 * We still are executing with a minimal MMU mapping created
846 		 * with the presumption that the machine default for this
847 		 * is located in the first MB of RAM.  Anything else will
848 		 * fault and silently hang the kernel at this point.
849 		 */
850 		if (mdesc->boot_params < PHYS_OFFSET ||
851 		    mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
852 			printk(KERN_WARNING
853 			       "Default boot params at physical 0x%08lx out of reach\n",
854 			       mdesc->boot_params);
855 		} else
856 #endif
857 		{
858 			tags = phys_to_virt(mdesc->boot_params);
859 		}
860 	}
861 
862 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
863 	/*
864 	 * If we have the old style parameters, convert them to
865 	 * a tag list.
866 	 */
867 	if (tags->hdr.tag != ATAG_CORE)
868 		convert_to_tag_list(tags);
869 #endif
870 	if (tags->hdr.tag != ATAG_CORE)
871 		tags = (struct tag *)&init_tags;
872 
873 	if (mdesc->fixup)
874 		mdesc->fixup(mdesc, tags, &from, &meminfo);
875 
876 	if (tags->hdr.tag == ATAG_CORE) {
877 		if (meminfo.nr_banks != 0)
878 			squash_mem_tags(tags);
879 		save_atags(tags);
880 		parse_tags(tags);
881 	}
882 
883 	init_mm.start_code = (unsigned long) _text;
884 	init_mm.end_code   = (unsigned long) _etext;
885 	init_mm.end_data   = (unsigned long) _edata;
886 	init_mm.brk	   = (unsigned long) _end;
887 
888 	/* parse_early_param needs a boot_command_line */
889 	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
890 
891 	/* populate cmd_line too for later use, preserving boot_command_line */
892 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
893 	*cmdline_p = cmd_line;
894 
895 	parse_early_param();
896 
897 	arm_memblock_init(&meminfo, mdesc);
898 
899 	paging_init(mdesc);
900 	request_standard_resources(mdesc);
901 
902 #ifdef CONFIG_SMP
903 	if (is_smp())
904 		smp_init_cpus();
905 #endif
906 	reserve_crashkernel();
907 
908 	cpu_init();
909 	tcm_init();
910 
911 #ifdef CONFIG_MULTI_IRQ_HANDLER
912 	handle_arch_irq = mdesc->handle_irq;
913 #endif
914 
915 #ifdef CONFIG_VT
916 #if defined(CONFIG_VGA_CONSOLE)
917 	conswitchp = &vga_con;
918 #elif defined(CONFIG_DUMMY_CONSOLE)
919 	conswitchp = &dummy_con;
920 #endif
921 #endif
922 	early_trap_init();
923 
924 	if (mdesc->init_early)
925 		mdesc->init_early();
926 }
927 
928 
929 static int __init topology_init(void)
930 {
931 	int cpu;
932 
933 	for_each_possible_cpu(cpu) {
934 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
935 		cpuinfo->cpu.hotpluggable = 1;
936 		register_cpu(&cpuinfo->cpu, cpu);
937 	}
938 
939 	return 0;
940 }
941 subsys_initcall(topology_init);
942 
943 #ifdef CONFIG_HAVE_PROC_CPU
944 static int __init proc_cpu_init(void)
945 {
946 	struct proc_dir_entry *res;
947 
948 	res = proc_mkdir("cpu", NULL);
949 	if (!res)
950 		return -ENOMEM;
951 	return 0;
952 }
953 fs_initcall(proc_cpu_init);
954 #endif
955 
956 static const char *hwcap_str[] = {
957 	"swp",
958 	"half",
959 	"thumb",
960 	"26bit",
961 	"fastmult",
962 	"fpa",
963 	"vfp",
964 	"edsp",
965 	"java",
966 	"iwmmxt",
967 	"crunch",
968 	"thumbee",
969 	"neon",
970 	"vfpv3",
971 	"vfpv3d16",
972 	NULL
973 };
974 
975 static int c_show(struct seq_file *m, void *v)
976 {
977 	int i;
978 
979 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
980 		   cpu_name, read_cpuid_id() & 15, elf_platform);
981 
982 #if defined(CONFIG_SMP)
983 	for_each_online_cpu(i) {
984 		/*
985 		 * glibc reads /proc/cpuinfo to determine the number of
986 		 * online processors, looking for lines beginning with
987 		 * "processor".  Give glibc what it expects.
988 		 */
989 		seq_printf(m, "processor\t: %d\n", i);
990 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
991 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
992 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
993 	}
994 #else /* CONFIG_SMP */
995 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
996 		   loops_per_jiffy / (500000/HZ),
997 		   (loops_per_jiffy / (5000/HZ)) % 100);
998 #endif
999 
1000 	/* dump out the processor features */
1001 	seq_puts(m, "Features\t: ");
1002 
1003 	for (i = 0; hwcap_str[i]; i++)
1004 		if (elf_hwcap & (1 << i))
1005 			seq_printf(m, "%s ", hwcap_str[i]);
1006 
1007 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1008 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1009 
1010 	if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1011 		/* pre-ARM7 */
1012 		seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1013 	} else {
1014 		if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1015 			/* ARM7 */
1016 			seq_printf(m, "CPU variant\t: 0x%02x\n",
1017 				   (read_cpuid_id() >> 16) & 127);
1018 		} else {
1019 			/* post-ARM7 */
1020 			seq_printf(m, "CPU variant\t: 0x%x\n",
1021 				   (read_cpuid_id() >> 20) & 15);
1022 		}
1023 		seq_printf(m, "CPU part\t: 0x%03x\n",
1024 			   (read_cpuid_id() >> 4) & 0xfff);
1025 	}
1026 	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1027 
1028 	seq_puts(m, "\n");
1029 
1030 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1031 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1032 	seq_printf(m, "Serial\t\t: %08x%08x\n",
1033 		   system_serial_high, system_serial_low);
1034 
1035 	return 0;
1036 }
1037 
1038 static void *c_start(struct seq_file *m, loff_t *pos)
1039 {
1040 	return *pos < 1 ? (void *)1 : NULL;
1041 }
1042 
1043 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1044 {
1045 	++*pos;
1046 	return NULL;
1047 }
1048 
1049 static void c_stop(struct seq_file *m, void *v)
1050 {
1051 }
1052 
1053 const struct seq_operations cpuinfo_op = {
1054 	.start	= c_start,
1055 	.next	= c_next,
1056 	.stop	= c_stop,
1057 	.show	= c_show
1058 };
1059