xref: /openbmc/linux/arch/arm/kernel/setup.c (revision de40614e)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/proc_fs.h>
28 #include <linux/memblock.h>
29 #include <linux/bug.h>
30 #include <linux/compiler.h>
31 #include <linux/sort.h>
32 
33 #include <asm/unified.h>
34 #include <asm/cp15.h>
35 #include <asm/cpu.h>
36 #include <asm/cputype.h>
37 #include <asm/elf.h>
38 #include <asm/procinfo.h>
39 #include <asm/sections.h>
40 #include <asm/setup.h>
41 #include <asm/smp_plat.h>
42 #include <asm/mach-types.h>
43 #include <asm/cacheflush.h>
44 #include <asm/cachetype.h>
45 #include <asm/tlbflush.h>
46 
47 #include <asm/prom.h>
48 #include <asm/mach/arch.h>
49 #include <asm/mach/irq.h>
50 #include <asm/mach/time.h>
51 #include <asm/system_info.h>
52 #include <asm/system_misc.h>
53 #include <asm/traps.h>
54 #include <asm/unwind.h>
55 #include <asm/memblock.h>
56 #include <asm/virt.h>
57 
58 #include "atags.h"
59 
60 
61 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62 char fpe_type[8];
63 
64 static int __init fpe_setup(char *line)
65 {
66 	memcpy(fpe_type, line, 8);
67 	return 1;
68 }
69 
70 __setup("fpe=", fpe_setup);
71 #endif
72 
73 extern void paging_init(struct machine_desc *desc);
74 extern void sanity_check_meminfo(void);
75 extern void reboot_setup(char *str);
76 extern void setup_dma_zone(struct machine_desc *desc);
77 
78 unsigned int processor_id;
79 EXPORT_SYMBOL(processor_id);
80 unsigned int __machine_arch_type __read_mostly;
81 EXPORT_SYMBOL(__machine_arch_type);
82 unsigned int cacheid __read_mostly;
83 EXPORT_SYMBOL(cacheid);
84 
85 unsigned int __atags_pointer __initdata;
86 
87 unsigned int system_rev;
88 EXPORT_SYMBOL(system_rev);
89 
90 unsigned int system_serial_low;
91 EXPORT_SYMBOL(system_serial_low);
92 
93 unsigned int system_serial_high;
94 EXPORT_SYMBOL(system_serial_high);
95 
96 unsigned int elf_hwcap __read_mostly;
97 EXPORT_SYMBOL(elf_hwcap);
98 
99 
100 #ifdef MULTI_CPU
101 struct processor processor __read_mostly;
102 #endif
103 #ifdef MULTI_TLB
104 struct cpu_tlb_fns cpu_tlb __read_mostly;
105 #endif
106 #ifdef MULTI_USER
107 struct cpu_user_fns cpu_user __read_mostly;
108 #endif
109 #ifdef MULTI_CACHE
110 struct cpu_cache_fns cpu_cache __read_mostly;
111 #endif
112 #ifdef CONFIG_OUTER_CACHE
113 struct outer_cache_fns outer_cache __read_mostly;
114 EXPORT_SYMBOL(outer_cache);
115 #endif
116 
117 /*
118  * Cached cpu_architecture() result for use by assembler code.
119  * C code should use the cpu_architecture() function instead of accessing this
120  * variable directly.
121  */
122 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
123 
124 struct stack {
125 	u32 irq[3];
126 	u32 abt[3];
127 	u32 und[3];
128 } ____cacheline_aligned;
129 
130 static struct stack stacks[NR_CPUS];
131 
132 char elf_platform[ELF_PLATFORM_SIZE];
133 EXPORT_SYMBOL(elf_platform);
134 
135 static const char *cpu_name;
136 static const char *machine_name;
137 static char __initdata cmd_line[COMMAND_LINE_SIZE];
138 struct machine_desc *machine_desc __initdata;
139 
140 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
141 #define ENDIANNESS ((char)endian_test.l)
142 
143 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
144 
145 /*
146  * Standard memory resources
147  */
148 static struct resource mem_res[] = {
149 	{
150 		.name = "Video RAM",
151 		.start = 0,
152 		.end = 0,
153 		.flags = IORESOURCE_MEM
154 	},
155 	{
156 		.name = "Kernel code",
157 		.start = 0,
158 		.end = 0,
159 		.flags = IORESOURCE_MEM
160 	},
161 	{
162 		.name = "Kernel data",
163 		.start = 0,
164 		.end = 0,
165 		.flags = IORESOURCE_MEM
166 	}
167 };
168 
169 #define video_ram   mem_res[0]
170 #define kernel_code mem_res[1]
171 #define kernel_data mem_res[2]
172 
173 static struct resource io_res[] = {
174 	{
175 		.name = "reserved",
176 		.start = 0x3bc,
177 		.end = 0x3be,
178 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
179 	},
180 	{
181 		.name = "reserved",
182 		.start = 0x378,
183 		.end = 0x37f,
184 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
185 	},
186 	{
187 		.name = "reserved",
188 		.start = 0x278,
189 		.end = 0x27f,
190 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
191 	}
192 };
193 
194 #define lp0 io_res[0]
195 #define lp1 io_res[1]
196 #define lp2 io_res[2]
197 
198 static const char *proc_arch[] = {
199 	"undefined/unknown",
200 	"3",
201 	"4",
202 	"4T",
203 	"5",
204 	"5T",
205 	"5TE",
206 	"5TEJ",
207 	"6TEJ",
208 	"7",
209 	"?(11)",
210 	"?(12)",
211 	"?(13)",
212 	"?(14)",
213 	"?(15)",
214 	"?(16)",
215 	"?(17)",
216 };
217 
218 static int __get_cpu_architecture(void)
219 {
220 	int cpu_arch;
221 
222 	if ((read_cpuid_id() & 0x0008f000) == 0) {
223 		cpu_arch = CPU_ARCH_UNKNOWN;
224 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
225 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
226 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
227 		cpu_arch = (read_cpuid_id() >> 16) & 7;
228 		if (cpu_arch)
229 			cpu_arch += CPU_ARCH_ARMv3;
230 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
231 		unsigned int mmfr0;
232 
233 		/* Revised CPUID format. Read the Memory Model Feature
234 		 * Register 0 and check for VMSAv7 or PMSAv7 */
235 		asm("mrc	p15, 0, %0, c0, c1, 4"
236 		    : "=r" (mmfr0));
237 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
238 		    (mmfr0 & 0x000000f0) >= 0x00000030)
239 			cpu_arch = CPU_ARCH_ARMv7;
240 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
241 			 (mmfr0 & 0x000000f0) == 0x00000020)
242 			cpu_arch = CPU_ARCH_ARMv6;
243 		else
244 			cpu_arch = CPU_ARCH_UNKNOWN;
245 	} else
246 		cpu_arch = CPU_ARCH_UNKNOWN;
247 
248 	return cpu_arch;
249 }
250 
251 int __pure cpu_architecture(void)
252 {
253 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
254 
255 	return __cpu_architecture;
256 }
257 
258 static int cpu_has_aliasing_icache(unsigned int arch)
259 {
260 	int aliasing_icache;
261 	unsigned int id_reg, num_sets, line_size;
262 
263 	/* PIPT caches never alias. */
264 	if (icache_is_pipt())
265 		return 0;
266 
267 	/* arch specifies the register format */
268 	switch (arch) {
269 	case CPU_ARCH_ARMv7:
270 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
271 		    : /* No output operands */
272 		    : "r" (1));
273 		isb();
274 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
275 		    : "=r" (id_reg));
276 		line_size = 4 << ((id_reg & 0x7) + 2);
277 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
278 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
279 		break;
280 	case CPU_ARCH_ARMv6:
281 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
282 		break;
283 	default:
284 		/* I-cache aliases will be handled by D-cache aliasing code */
285 		aliasing_icache = 0;
286 	}
287 
288 	return aliasing_icache;
289 }
290 
291 static void __init cacheid_init(void)
292 {
293 	unsigned int cachetype = read_cpuid_cachetype();
294 	unsigned int arch = cpu_architecture();
295 
296 	if (arch >= CPU_ARCH_ARMv6) {
297 		if ((cachetype & (7 << 29)) == 4 << 29) {
298 			/* ARMv7 register format */
299 			arch = CPU_ARCH_ARMv7;
300 			cacheid = CACHEID_VIPT_NONALIASING;
301 			switch (cachetype & (3 << 14)) {
302 			case (1 << 14):
303 				cacheid |= CACHEID_ASID_TAGGED;
304 				break;
305 			case (3 << 14):
306 				cacheid |= CACHEID_PIPT;
307 				break;
308 			}
309 		} else {
310 			arch = CPU_ARCH_ARMv6;
311 			if (cachetype & (1 << 23))
312 				cacheid = CACHEID_VIPT_ALIASING;
313 			else
314 				cacheid = CACHEID_VIPT_NONALIASING;
315 		}
316 		if (cpu_has_aliasing_icache(arch))
317 			cacheid |= CACHEID_VIPT_I_ALIASING;
318 	} else {
319 		cacheid = CACHEID_VIVT;
320 	}
321 
322 	printk("CPU: %s data cache, %s instruction cache\n",
323 		cache_is_vivt() ? "VIVT" :
324 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
325 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
326 		cache_is_vivt() ? "VIVT" :
327 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
328 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
329 		icache_is_pipt() ? "PIPT" :
330 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
331 }
332 
333 /*
334  * These functions re-use the assembly code in head.S, which
335  * already provide the required functionality.
336  */
337 extern struct proc_info_list *lookup_processor_type(unsigned int);
338 
339 void __init early_print(const char *str, ...)
340 {
341 	extern void printascii(const char *);
342 	char buf[256];
343 	va_list ap;
344 
345 	va_start(ap, str);
346 	vsnprintf(buf, sizeof(buf), str, ap);
347 	va_end(ap);
348 
349 #ifdef CONFIG_DEBUG_LL
350 	printascii(buf);
351 #endif
352 	printk("%s", buf);
353 }
354 
355 static void __init cpuid_init_hwcaps(void)
356 {
357 	unsigned int divide_instrs;
358 
359 	if (cpu_architecture() < CPU_ARCH_ARMv7)
360 		return;
361 
362 	divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
363 
364 	switch (divide_instrs) {
365 	case 2:
366 		elf_hwcap |= HWCAP_IDIVA;
367 	case 1:
368 		elf_hwcap |= HWCAP_IDIVT;
369 	}
370 }
371 
372 static void __init feat_v6_fixup(void)
373 {
374 	int id = read_cpuid_id();
375 
376 	if ((id & 0xff0f0000) != 0x41070000)
377 		return;
378 
379 	/*
380 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
381 	 * see also kuser_get_tls_init.
382 	 */
383 	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
384 		elf_hwcap &= ~HWCAP_TLS;
385 }
386 
387 /*
388  * cpu_init - initialise one CPU.
389  *
390  * cpu_init sets up the per-CPU stacks.
391  */
392 void cpu_init(void)
393 {
394 	unsigned int cpu = smp_processor_id();
395 	struct stack *stk = &stacks[cpu];
396 
397 	if (cpu >= NR_CPUS) {
398 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
399 		BUG();
400 	}
401 
402 	/*
403 	 * This only works on resume and secondary cores. For booting on the
404 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
405 	 */
406 	set_my_cpu_offset(per_cpu_offset(cpu));
407 
408 	cpu_proc_init();
409 
410 	/*
411 	 * Define the placement constraint for the inline asm directive below.
412 	 * In Thumb-2, msr with an immediate value is not allowed.
413 	 */
414 #ifdef CONFIG_THUMB2_KERNEL
415 #define PLC	"r"
416 #else
417 #define PLC	"I"
418 #endif
419 
420 	/*
421 	 * setup stacks for re-entrant exception handlers
422 	 */
423 	__asm__ (
424 	"msr	cpsr_c, %1\n\t"
425 	"add	r14, %0, %2\n\t"
426 	"mov	sp, r14\n\t"
427 	"msr	cpsr_c, %3\n\t"
428 	"add	r14, %0, %4\n\t"
429 	"mov	sp, r14\n\t"
430 	"msr	cpsr_c, %5\n\t"
431 	"add	r14, %0, %6\n\t"
432 	"mov	sp, r14\n\t"
433 	"msr	cpsr_c, %7"
434 	    :
435 	    : "r" (stk),
436 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
437 	      "I" (offsetof(struct stack, irq[0])),
438 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
439 	      "I" (offsetof(struct stack, abt[0])),
440 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
441 	      "I" (offsetof(struct stack, und[0])),
442 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
443 	    : "r14");
444 }
445 
446 int __cpu_logical_map[NR_CPUS];
447 
448 void __init smp_setup_processor_id(void)
449 {
450 	int i;
451 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
452 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
453 
454 	cpu_logical_map(0) = cpu;
455 	for (i = 1; i < nr_cpu_ids; ++i)
456 		cpu_logical_map(i) = i == cpu ? 0 : i;
457 
458 	printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
459 }
460 
461 static void __init setup_processor(void)
462 {
463 	struct proc_info_list *list;
464 
465 	/*
466 	 * locate processor in the list of supported processor
467 	 * types.  The linker builds this table for us from the
468 	 * entries in arch/arm/mm/proc-*.S
469 	 */
470 	list = lookup_processor_type(read_cpuid_id());
471 	if (!list) {
472 		printk("CPU configuration botched (ID %08x), unable "
473 		       "to continue.\n", read_cpuid_id());
474 		while (1);
475 	}
476 
477 	cpu_name = list->cpu_name;
478 	__cpu_architecture = __get_cpu_architecture();
479 
480 #ifdef MULTI_CPU
481 	processor = *list->proc;
482 #endif
483 #ifdef MULTI_TLB
484 	cpu_tlb = *list->tlb;
485 #endif
486 #ifdef MULTI_USER
487 	cpu_user = *list->user;
488 #endif
489 #ifdef MULTI_CACHE
490 	cpu_cache = *list->cache;
491 #endif
492 
493 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
494 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
495 	       proc_arch[cpu_architecture()], cr_alignment);
496 
497 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
498 		 list->arch_name, ENDIANNESS);
499 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
500 		 list->elf_name, ENDIANNESS);
501 	elf_hwcap = list->elf_hwcap;
502 
503 	cpuid_init_hwcaps();
504 
505 #ifndef CONFIG_ARM_THUMB
506 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
507 #endif
508 
509 	feat_v6_fixup();
510 
511 	cacheid_init();
512 	cpu_init();
513 }
514 
515 void __init dump_machine_table(void)
516 {
517 	struct machine_desc *p;
518 
519 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
520 	for_each_machine_desc(p)
521 		early_print("%08x\t%s\n", p->nr, p->name);
522 
523 	early_print("\nPlease check your kernel config and/or bootloader.\n");
524 
525 	while (true)
526 		/* can't use cpu_relax() here as it may require MMU setup */;
527 }
528 
529 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
530 {
531 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
532 
533 	if (meminfo.nr_banks >= NR_BANKS) {
534 		printk(KERN_CRIT "NR_BANKS too low, "
535 			"ignoring memory at 0x%08llx\n", (long long)start);
536 		return -EINVAL;
537 	}
538 
539 	/*
540 	 * Ensure that start/size are aligned to a page boundary.
541 	 * Size is appropriately rounded down, start is rounded up.
542 	 */
543 	size -= start & ~PAGE_MASK;
544 	bank->start = PAGE_ALIGN(start);
545 
546 #ifndef CONFIG_ARM_LPAE
547 	if (bank->start + size < bank->start) {
548 		printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
549 			"32-bit physical address space\n", (long long)start);
550 		/*
551 		 * To ensure bank->start + bank->size is representable in
552 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
553 		 * This means we lose a page after masking.
554 		 */
555 		size = ULONG_MAX - bank->start;
556 	}
557 #endif
558 
559 	bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
560 
561 	/*
562 	 * Check whether this memory region has non-zero size or
563 	 * invalid node number.
564 	 */
565 	if (bank->size == 0)
566 		return -EINVAL;
567 
568 	meminfo.nr_banks++;
569 	return 0;
570 }
571 
572 /*
573  * Pick out the memory size.  We look for mem=size@start,
574  * where start and size are "size[KkMm]"
575  */
576 static int __init early_mem(char *p)
577 {
578 	static int usermem __initdata = 0;
579 	phys_addr_t size;
580 	phys_addr_t start;
581 	char *endp;
582 
583 	/*
584 	 * If the user specifies memory size, we
585 	 * blow away any automatically generated
586 	 * size.
587 	 */
588 	if (usermem == 0) {
589 		usermem = 1;
590 		meminfo.nr_banks = 0;
591 	}
592 
593 	start = PHYS_OFFSET;
594 	size  = memparse(p, &endp);
595 	if (*endp == '@')
596 		start = memparse(endp + 1, NULL);
597 
598 	arm_add_memory(start, size);
599 
600 	return 0;
601 }
602 early_param("mem", early_mem);
603 
604 static void __init request_standard_resources(struct machine_desc *mdesc)
605 {
606 	struct memblock_region *region;
607 	struct resource *res;
608 
609 	kernel_code.start   = virt_to_phys(_text);
610 	kernel_code.end     = virt_to_phys(_etext - 1);
611 	kernel_data.start   = virt_to_phys(_sdata);
612 	kernel_data.end     = virt_to_phys(_end - 1);
613 
614 	for_each_memblock(memory, region) {
615 		res = alloc_bootmem_low(sizeof(*res));
616 		res->name  = "System RAM";
617 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
618 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
619 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
620 
621 		request_resource(&iomem_resource, res);
622 
623 		if (kernel_code.start >= res->start &&
624 		    kernel_code.end <= res->end)
625 			request_resource(res, &kernel_code);
626 		if (kernel_data.start >= res->start &&
627 		    kernel_data.end <= res->end)
628 			request_resource(res, &kernel_data);
629 	}
630 
631 	if (mdesc->video_start) {
632 		video_ram.start = mdesc->video_start;
633 		video_ram.end   = mdesc->video_end;
634 		request_resource(&iomem_resource, &video_ram);
635 	}
636 
637 	/*
638 	 * Some machines don't have the possibility of ever
639 	 * possessing lp0, lp1 or lp2
640 	 */
641 	if (mdesc->reserve_lp0)
642 		request_resource(&ioport_resource, &lp0);
643 	if (mdesc->reserve_lp1)
644 		request_resource(&ioport_resource, &lp1);
645 	if (mdesc->reserve_lp2)
646 		request_resource(&ioport_resource, &lp2);
647 }
648 
649 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
650 struct screen_info screen_info = {
651  .orig_video_lines	= 30,
652  .orig_video_cols	= 80,
653  .orig_video_mode	= 0,
654  .orig_video_ega_bx	= 0,
655  .orig_video_isVGA	= 1,
656  .orig_video_points	= 8
657 };
658 #endif
659 
660 static int __init customize_machine(void)
661 {
662 	/* customizes platform devices, or adds new ones */
663 	if (machine_desc->init_machine)
664 		machine_desc->init_machine();
665 	return 0;
666 }
667 arch_initcall(customize_machine);
668 
669 static int __init init_machine_late(void)
670 {
671 	if (machine_desc->init_late)
672 		machine_desc->init_late();
673 	return 0;
674 }
675 late_initcall(init_machine_late);
676 
677 #ifdef CONFIG_KEXEC
678 static inline unsigned long long get_total_mem(void)
679 {
680 	unsigned long total;
681 
682 	total = max_low_pfn - min_low_pfn;
683 	return total << PAGE_SHIFT;
684 }
685 
686 /**
687  * reserve_crashkernel() - reserves memory are for crash kernel
688  *
689  * This function reserves memory area given in "crashkernel=" kernel command
690  * line parameter. The memory reserved is used by a dump capture kernel when
691  * primary kernel is crashing.
692  */
693 static void __init reserve_crashkernel(void)
694 {
695 	unsigned long long crash_size, crash_base;
696 	unsigned long long total_mem;
697 	int ret;
698 
699 	total_mem = get_total_mem();
700 	ret = parse_crashkernel(boot_command_line, total_mem,
701 				&crash_size, &crash_base);
702 	if (ret)
703 		return;
704 
705 	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
706 	if (ret < 0) {
707 		printk(KERN_WARNING "crashkernel reservation failed - "
708 		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
709 		return;
710 	}
711 
712 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
713 	       "for crashkernel (System RAM: %ldMB)\n",
714 	       (unsigned long)(crash_size >> 20),
715 	       (unsigned long)(crash_base >> 20),
716 	       (unsigned long)(total_mem >> 20));
717 
718 	crashk_res.start = crash_base;
719 	crashk_res.end = crash_base + crash_size - 1;
720 	insert_resource(&iomem_resource, &crashk_res);
721 }
722 #else
723 static inline void reserve_crashkernel(void) {}
724 #endif /* CONFIG_KEXEC */
725 
726 static int __init meminfo_cmp(const void *_a, const void *_b)
727 {
728 	const struct membank *a = _a, *b = _b;
729 	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
730 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
731 }
732 
733 void __init hyp_mode_check(void)
734 {
735 #ifdef CONFIG_ARM_VIRT_EXT
736 	if (is_hyp_mode_available()) {
737 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
738 		pr_info("CPU: Virtualization extensions available.\n");
739 	} else if (is_hyp_mode_mismatched()) {
740 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
741 			__boot_cpu_mode & MODE_MASK);
742 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
743 	} else
744 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
745 #endif
746 }
747 
748 void __init setup_arch(char **cmdline_p)
749 {
750 	struct machine_desc *mdesc;
751 
752 	setup_processor();
753 	mdesc = setup_machine_fdt(__atags_pointer);
754 	if (!mdesc)
755 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
756 	machine_desc = mdesc;
757 	machine_name = mdesc->name;
758 
759 	setup_dma_zone(mdesc);
760 
761 	if (mdesc->restart_mode)
762 		reboot_setup(&mdesc->restart_mode);
763 
764 	init_mm.start_code = (unsigned long) _text;
765 	init_mm.end_code   = (unsigned long) _etext;
766 	init_mm.end_data   = (unsigned long) _edata;
767 	init_mm.brk	   = (unsigned long) _end;
768 
769 	/* populate cmd_line too for later use, preserving boot_command_line */
770 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
771 	*cmdline_p = cmd_line;
772 
773 	parse_early_param();
774 
775 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
776 	sanity_check_meminfo();
777 	arm_memblock_init(&meminfo, mdesc);
778 
779 	paging_init(mdesc);
780 	request_standard_resources(mdesc);
781 
782 	if (mdesc->restart)
783 		arm_pm_restart = mdesc->restart;
784 
785 	unflatten_device_tree();
786 
787 	arm_dt_init_cpu_maps();
788 #ifdef CONFIG_SMP
789 	if (is_smp()) {
790 		smp_set_ops(mdesc->smp);
791 		smp_init_cpus();
792 	}
793 #endif
794 
795 	if (!is_smp())
796 		hyp_mode_check();
797 
798 	reserve_crashkernel();
799 
800 #ifdef CONFIG_MULTI_IRQ_HANDLER
801 	handle_arch_irq = mdesc->handle_irq;
802 #endif
803 
804 #ifdef CONFIG_VT
805 #if defined(CONFIG_VGA_CONSOLE)
806 	conswitchp = &vga_con;
807 #elif defined(CONFIG_DUMMY_CONSOLE)
808 	conswitchp = &dummy_con;
809 #endif
810 #endif
811 
812 	if (mdesc->init_early)
813 		mdesc->init_early();
814 }
815 
816 
817 static int __init topology_init(void)
818 {
819 	int cpu;
820 
821 	for_each_possible_cpu(cpu) {
822 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
823 		cpuinfo->cpu.hotpluggable = 1;
824 		register_cpu(&cpuinfo->cpu, cpu);
825 	}
826 
827 	return 0;
828 }
829 subsys_initcall(topology_init);
830 
831 #ifdef CONFIG_HAVE_PROC_CPU
832 static int __init proc_cpu_init(void)
833 {
834 	struct proc_dir_entry *res;
835 
836 	res = proc_mkdir("cpu", NULL);
837 	if (!res)
838 		return -ENOMEM;
839 	return 0;
840 }
841 fs_initcall(proc_cpu_init);
842 #endif
843 
844 static const char *hwcap_str[] = {
845 	"swp",
846 	"half",
847 	"thumb",
848 	"26bit",
849 	"fastmult",
850 	"fpa",
851 	"vfp",
852 	"edsp",
853 	"java",
854 	"iwmmxt",
855 	"crunch",
856 	"thumbee",
857 	"neon",
858 	"vfpv3",
859 	"vfpv3d16",
860 	"tls",
861 	"vfpv4",
862 	"idiva",
863 	"idivt",
864 	NULL
865 };
866 
867 static int c_show(struct seq_file *m, void *v)
868 {
869 	int i, j;
870 	u32 cpuid;
871 
872 	for_each_online_cpu(i) {
873 		/*
874 		 * glibc reads /proc/cpuinfo to determine the number of
875 		 * online processors, looking for lines beginning with
876 		 * "processor".  Give glibc what it expects.
877 		 */
878 		seq_printf(m, "processor\t: %d\n", i);
879 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
880 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
881 			   cpu_name, cpuid & 15, elf_platform);
882 
883 #if defined(CONFIG_SMP)
884 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
885 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
886 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
887 #else
888 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
889 			   loops_per_jiffy / (500000/HZ),
890 			   (loops_per_jiffy / (5000/HZ)) % 100);
891 #endif
892 		/* dump out the processor features */
893 		seq_puts(m, "Features\t: ");
894 
895 		for (j = 0; hwcap_str[j]; j++)
896 			if (elf_hwcap & (1 << j))
897 				seq_printf(m, "%s ", hwcap_str[j]);
898 
899 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
900 		seq_printf(m, "CPU architecture: %s\n",
901 			   proc_arch[cpu_architecture()]);
902 
903 		if ((cpuid & 0x0008f000) == 0x00000000) {
904 			/* pre-ARM7 */
905 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
906 		} else {
907 			if ((cpuid & 0x0008f000) == 0x00007000) {
908 				/* ARM7 */
909 				seq_printf(m, "CPU variant\t: 0x%02x\n",
910 					   (cpuid >> 16) & 127);
911 			} else {
912 				/* post-ARM7 */
913 				seq_printf(m, "CPU variant\t: 0x%x\n",
914 					   (cpuid >> 20) & 15);
915 			}
916 			seq_printf(m, "CPU part\t: 0x%03x\n",
917 				   (cpuid >> 4) & 0xfff);
918 		}
919 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
920 	}
921 
922 	seq_printf(m, "Hardware\t: %s\n", machine_name);
923 	seq_printf(m, "Revision\t: %04x\n", system_rev);
924 	seq_printf(m, "Serial\t\t: %08x%08x\n",
925 		   system_serial_high, system_serial_low);
926 
927 	return 0;
928 }
929 
930 static void *c_start(struct seq_file *m, loff_t *pos)
931 {
932 	return *pos < 1 ? (void *)1 : NULL;
933 }
934 
935 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
936 {
937 	++*pos;
938 	return NULL;
939 }
940 
941 static void c_stop(struct seq_file *m, void *v)
942 {
943 }
944 
945 const struct seq_operations cpuinfo_op = {
946 	.start	= c_start,
947 	.next	= c_next,
948 	.stop	= c_stop,
949 	.show	= c_show
950 };
951