xref: /openbmc/linux/arch/arm/kernel/setup.c (revision 0d456bad)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/proc_fs.h>
28 #include <linux/memblock.h>
29 #include <linux/bug.h>
30 #include <linux/compiler.h>
31 #include <linux/sort.h>
32 
33 #include <asm/unified.h>
34 #include <asm/cp15.h>
35 #include <asm/cpu.h>
36 #include <asm/cputype.h>
37 #include <asm/elf.h>
38 #include <asm/procinfo.h>
39 #include <asm/sections.h>
40 #include <asm/setup.h>
41 #include <asm/smp_plat.h>
42 #include <asm/mach-types.h>
43 #include <asm/cacheflush.h>
44 #include <asm/cachetype.h>
45 #include <asm/tlbflush.h>
46 
47 #include <asm/prom.h>
48 #include <asm/mach/arch.h>
49 #include <asm/mach/irq.h>
50 #include <asm/mach/time.h>
51 #include <asm/system_info.h>
52 #include <asm/system_misc.h>
53 #include <asm/traps.h>
54 #include <asm/unwind.h>
55 #include <asm/memblock.h>
56 #include <asm/virt.h>
57 
58 #include "atags.h"
59 #include "tcm.h"
60 
61 
62 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63 char fpe_type[8];
64 
65 static int __init fpe_setup(char *line)
66 {
67 	memcpy(fpe_type, line, 8);
68 	return 1;
69 }
70 
71 __setup("fpe=", fpe_setup);
72 #endif
73 
74 extern void paging_init(struct machine_desc *desc);
75 extern void sanity_check_meminfo(void);
76 extern void reboot_setup(char *str);
77 extern void setup_dma_zone(struct machine_desc *desc);
78 
79 unsigned int processor_id;
80 EXPORT_SYMBOL(processor_id);
81 unsigned int __machine_arch_type __read_mostly;
82 EXPORT_SYMBOL(__machine_arch_type);
83 unsigned int cacheid __read_mostly;
84 EXPORT_SYMBOL(cacheid);
85 
86 unsigned int __atags_pointer __initdata;
87 
88 unsigned int system_rev;
89 EXPORT_SYMBOL(system_rev);
90 
91 unsigned int system_serial_low;
92 EXPORT_SYMBOL(system_serial_low);
93 
94 unsigned int system_serial_high;
95 EXPORT_SYMBOL(system_serial_high);
96 
97 unsigned int elf_hwcap __read_mostly;
98 EXPORT_SYMBOL(elf_hwcap);
99 
100 
101 #ifdef MULTI_CPU
102 struct processor processor __read_mostly;
103 #endif
104 #ifdef MULTI_TLB
105 struct cpu_tlb_fns cpu_tlb __read_mostly;
106 #endif
107 #ifdef MULTI_USER
108 struct cpu_user_fns cpu_user __read_mostly;
109 #endif
110 #ifdef MULTI_CACHE
111 struct cpu_cache_fns cpu_cache __read_mostly;
112 #endif
113 #ifdef CONFIG_OUTER_CACHE
114 struct outer_cache_fns outer_cache __read_mostly;
115 EXPORT_SYMBOL(outer_cache);
116 #endif
117 
118 /*
119  * Cached cpu_architecture() result for use by assembler code.
120  * C code should use the cpu_architecture() function instead of accessing this
121  * variable directly.
122  */
123 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124 
125 struct stack {
126 	u32 irq[3];
127 	u32 abt[3];
128 	u32 und[3];
129 } ____cacheline_aligned;
130 
131 static struct stack stacks[NR_CPUS];
132 
133 char elf_platform[ELF_PLATFORM_SIZE];
134 EXPORT_SYMBOL(elf_platform);
135 
136 static const char *cpu_name;
137 static const char *machine_name;
138 static char __initdata cmd_line[COMMAND_LINE_SIZE];
139 struct machine_desc *machine_desc __initdata;
140 
141 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
142 #define ENDIANNESS ((char)endian_test.l)
143 
144 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
145 
146 /*
147  * Standard memory resources
148  */
149 static struct resource mem_res[] = {
150 	{
151 		.name = "Video RAM",
152 		.start = 0,
153 		.end = 0,
154 		.flags = IORESOURCE_MEM
155 	},
156 	{
157 		.name = "Kernel code",
158 		.start = 0,
159 		.end = 0,
160 		.flags = IORESOURCE_MEM
161 	},
162 	{
163 		.name = "Kernel data",
164 		.start = 0,
165 		.end = 0,
166 		.flags = IORESOURCE_MEM
167 	}
168 };
169 
170 #define video_ram   mem_res[0]
171 #define kernel_code mem_res[1]
172 #define kernel_data mem_res[2]
173 
174 static struct resource io_res[] = {
175 	{
176 		.name = "reserved",
177 		.start = 0x3bc,
178 		.end = 0x3be,
179 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
180 	},
181 	{
182 		.name = "reserved",
183 		.start = 0x378,
184 		.end = 0x37f,
185 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
186 	},
187 	{
188 		.name = "reserved",
189 		.start = 0x278,
190 		.end = 0x27f,
191 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
192 	}
193 };
194 
195 #define lp0 io_res[0]
196 #define lp1 io_res[1]
197 #define lp2 io_res[2]
198 
199 static const char *proc_arch[] = {
200 	"undefined/unknown",
201 	"3",
202 	"4",
203 	"4T",
204 	"5",
205 	"5T",
206 	"5TE",
207 	"5TEJ",
208 	"6TEJ",
209 	"7",
210 	"?(11)",
211 	"?(12)",
212 	"?(13)",
213 	"?(14)",
214 	"?(15)",
215 	"?(16)",
216 	"?(17)",
217 };
218 
219 static int __get_cpu_architecture(void)
220 {
221 	int cpu_arch;
222 
223 	if ((read_cpuid_id() & 0x0008f000) == 0) {
224 		cpu_arch = CPU_ARCH_UNKNOWN;
225 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
226 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
227 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
228 		cpu_arch = (read_cpuid_id() >> 16) & 7;
229 		if (cpu_arch)
230 			cpu_arch += CPU_ARCH_ARMv3;
231 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
232 		unsigned int mmfr0;
233 
234 		/* Revised CPUID format. Read the Memory Model Feature
235 		 * Register 0 and check for VMSAv7 or PMSAv7 */
236 		asm("mrc	p15, 0, %0, c0, c1, 4"
237 		    : "=r" (mmfr0));
238 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
239 		    (mmfr0 & 0x000000f0) >= 0x00000030)
240 			cpu_arch = CPU_ARCH_ARMv7;
241 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
242 			 (mmfr0 & 0x000000f0) == 0x00000020)
243 			cpu_arch = CPU_ARCH_ARMv6;
244 		else
245 			cpu_arch = CPU_ARCH_UNKNOWN;
246 	} else
247 		cpu_arch = CPU_ARCH_UNKNOWN;
248 
249 	return cpu_arch;
250 }
251 
252 int __pure cpu_architecture(void)
253 {
254 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
255 
256 	return __cpu_architecture;
257 }
258 
259 static int cpu_has_aliasing_icache(unsigned int arch)
260 {
261 	int aliasing_icache;
262 	unsigned int id_reg, num_sets, line_size;
263 
264 	/* PIPT caches never alias. */
265 	if (icache_is_pipt())
266 		return 0;
267 
268 	/* arch specifies the register format */
269 	switch (arch) {
270 	case CPU_ARCH_ARMv7:
271 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
272 		    : /* No output operands */
273 		    : "r" (1));
274 		isb();
275 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
276 		    : "=r" (id_reg));
277 		line_size = 4 << ((id_reg & 0x7) + 2);
278 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
279 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
280 		break;
281 	case CPU_ARCH_ARMv6:
282 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
283 		break;
284 	default:
285 		/* I-cache aliases will be handled by D-cache aliasing code */
286 		aliasing_icache = 0;
287 	}
288 
289 	return aliasing_icache;
290 }
291 
292 static void __init cacheid_init(void)
293 {
294 	unsigned int cachetype = read_cpuid_cachetype();
295 	unsigned int arch = cpu_architecture();
296 
297 	if (arch >= CPU_ARCH_ARMv6) {
298 		if ((cachetype & (7 << 29)) == 4 << 29) {
299 			/* ARMv7 register format */
300 			arch = CPU_ARCH_ARMv7;
301 			cacheid = CACHEID_VIPT_NONALIASING;
302 			switch (cachetype & (3 << 14)) {
303 			case (1 << 14):
304 				cacheid |= CACHEID_ASID_TAGGED;
305 				break;
306 			case (3 << 14):
307 				cacheid |= CACHEID_PIPT;
308 				break;
309 			}
310 		} else {
311 			arch = CPU_ARCH_ARMv6;
312 			if (cachetype & (1 << 23))
313 				cacheid = CACHEID_VIPT_ALIASING;
314 			else
315 				cacheid = CACHEID_VIPT_NONALIASING;
316 		}
317 		if (cpu_has_aliasing_icache(arch))
318 			cacheid |= CACHEID_VIPT_I_ALIASING;
319 	} else {
320 		cacheid = CACHEID_VIVT;
321 	}
322 
323 	printk("CPU: %s data cache, %s instruction cache\n",
324 		cache_is_vivt() ? "VIVT" :
325 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
326 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
327 		cache_is_vivt() ? "VIVT" :
328 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
329 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
330 		icache_is_pipt() ? "PIPT" :
331 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
332 }
333 
334 /*
335  * These functions re-use the assembly code in head.S, which
336  * already provide the required functionality.
337  */
338 extern struct proc_info_list *lookup_processor_type(unsigned int);
339 
340 void __init early_print(const char *str, ...)
341 {
342 	extern void printascii(const char *);
343 	char buf[256];
344 	va_list ap;
345 
346 	va_start(ap, str);
347 	vsnprintf(buf, sizeof(buf), str, ap);
348 	va_end(ap);
349 
350 #ifdef CONFIG_DEBUG_LL
351 	printascii(buf);
352 #endif
353 	printk("%s", buf);
354 }
355 
356 static void __init feat_v6_fixup(void)
357 {
358 	int id = read_cpuid_id();
359 
360 	if ((id & 0xff0f0000) != 0x41070000)
361 		return;
362 
363 	/*
364 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
365 	 * see also kuser_get_tls_init.
366 	 */
367 	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
368 		elf_hwcap &= ~HWCAP_TLS;
369 }
370 
371 /*
372  * cpu_init - initialise one CPU.
373  *
374  * cpu_init sets up the per-CPU stacks.
375  */
376 void cpu_init(void)
377 {
378 	unsigned int cpu = smp_processor_id();
379 	struct stack *stk = &stacks[cpu];
380 
381 	if (cpu >= NR_CPUS) {
382 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
383 		BUG();
384 	}
385 
386 	/*
387 	 * This only works on resume and secondary cores. For booting on the
388 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
389 	 */
390 	set_my_cpu_offset(per_cpu_offset(cpu));
391 
392 	cpu_proc_init();
393 
394 	/*
395 	 * Define the placement constraint for the inline asm directive below.
396 	 * In Thumb-2, msr with an immediate value is not allowed.
397 	 */
398 #ifdef CONFIG_THUMB2_KERNEL
399 #define PLC	"r"
400 #else
401 #define PLC	"I"
402 #endif
403 
404 	/*
405 	 * setup stacks for re-entrant exception handlers
406 	 */
407 	__asm__ (
408 	"msr	cpsr_c, %1\n\t"
409 	"add	r14, %0, %2\n\t"
410 	"mov	sp, r14\n\t"
411 	"msr	cpsr_c, %3\n\t"
412 	"add	r14, %0, %4\n\t"
413 	"mov	sp, r14\n\t"
414 	"msr	cpsr_c, %5\n\t"
415 	"add	r14, %0, %6\n\t"
416 	"mov	sp, r14\n\t"
417 	"msr	cpsr_c, %7"
418 	    :
419 	    : "r" (stk),
420 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
421 	      "I" (offsetof(struct stack, irq[0])),
422 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
423 	      "I" (offsetof(struct stack, abt[0])),
424 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
425 	      "I" (offsetof(struct stack, und[0])),
426 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
427 	    : "r14");
428 }
429 
430 int __cpu_logical_map[NR_CPUS];
431 
432 void __init smp_setup_processor_id(void)
433 {
434 	int i;
435 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
436 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
437 
438 	cpu_logical_map(0) = cpu;
439 	for (i = 1; i < nr_cpu_ids; ++i)
440 		cpu_logical_map(i) = i == cpu ? 0 : i;
441 
442 	printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
443 }
444 
445 static void __init setup_processor(void)
446 {
447 	struct proc_info_list *list;
448 
449 	/*
450 	 * locate processor in the list of supported processor
451 	 * types.  The linker builds this table for us from the
452 	 * entries in arch/arm/mm/proc-*.S
453 	 */
454 	list = lookup_processor_type(read_cpuid_id());
455 	if (!list) {
456 		printk("CPU configuration botched (ID %08x), unable "
457 		       "to continue.\n", read_cpuid_id());
458 		while (1);
459 	}
460 
461 	cpu_name = list->cpu_name;
462 	__cpu_architecture = __get_cpu_architecture();
463 
464 #ifdef MULTI_CPU
465 	processor = *list->proc;
466 #endif
467 #ifdef MULTI_TLB
468 	cpu_tlb = *list->tlb;
469 #endif
470 #ifdef MULTI_USER
471 	cpu_user = *list->user;
472 #endif
473 #ifdef MULTI_CACHE
474 	cpu_cache = *list->cache;
475 #endif
476 
477 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
478 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
479 	       proc_arch[cpu_architecture()], cr_alignment);
480 
481 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
482 		 list->arch_name, ENDIANNESS);
483 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
484 		 list->elf_name, ENDIANNESS);
485 	elf_hwcap = list->elf_hwcap;
486 #ifndef CONFIG_ARM_THUMB
487 	elf_hwcap &= ~HWCAP_THUMB;
488 #endif
489 
490 	feat_v6_fixup();
491 
492 	cacheid_init();
493 	cpu_init();
494 }
495 
496 void __init dump_machine_table(void)
497 {
498 	struct machine_desc *p;
499 
500 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
501 	for_each_machine_desc(p)
502 		early_print("%08x\t%s\n", p->nr, p->name);
503 
504 	early_print("\nPlease check your kernel config and/or bootloader.\n");
505 
506 	while (true)
507 		/* can't use cpu_relax() here as it may require MMU setup */;
508 }
509 
510 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
511 {
512 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
513 
514 	if (meminfo.nr_banks >= NR_BANKS) {
515 		printk(KERN_CRIT "NR_BANKS too low, "
516 			"ignoring memory at 0x%08llx\n", (long long)start);
517 		return -EINVAL;
518 	}
519 
520 	/*
521 	 * Ensure that start/size are aligned to a page boundary.
522 	 * Size is appropriately rounded down, start is rounded up.
523 	 */
524 	size -= start & ~PAGE_MASK;
525 	bank->start = PAGE_ALIGN(start);
526 
527 #ifndef CONFIG_LPAE
528 	if (bank->start + size < bank->start) {
529 		printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
530 			"32-bit physical address space\n", (long long)start);
531 		/*
532 		 * To ensure bank->start + bank->size is representable in
533 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
534 		 * This means we lose a page after masking.
535 		 */
536 		size = ULONG_MAX - bank->start;
537 	}
538 #endif
539 
540 	bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
541 
542 	/*
543 	 * Check whether this memory region has non-zero size or
544 	 * invalid node number.
545 	 */
546 	if (bank->size == 0)
547 		return -EINVAL;
548 
549 	meminfo.nr_banks++;
550 	return 0;
551 }
552 
553 /*
554  * Pick out the memory size.  We look for mem=size@start,
555  * where start and size are "size[KkMm]"
556  */
557 static int __init early_mem(char *p)
558 {
559 	static int usermem __initdata = 0;
560 	phys_addr_t size;
561 	phys_addr_t start;
562 	char *endp;
563 
564 	/*
565 	 * If the user specifies memory size, we
566 	 * blow away any automatically generated
567 	 * size.
568 	 */
569 	if (usermem == 0) {
570 		usermem = 1;
571 		meminfo.nr_banks = 0;
572 	}
573 
574 	start = PHYS_OFFSET;
575 	size  = memparse(p, &endp);
576 	if (*endp == '@')
577 		start = memparse(endp + 1, NULL);
578 
579 	arm_add_memory(start, size);
580 
581 	return 0;
582 }
583 early_param("mem", early_mem);
584 
585 static void __init request_standard_resources(struct machine_desc *mdesc)
586 {
587 	struct memblock_region *region;
588 	struct resource *res;
589 
590 	kernel_code.start   = virt_to_phys(_text);
591 	kernel_code.end     = virt_to_phys(_etext - 1);
592 	kernel_data.start   = virt_to_phys(_sdata);
593 	kernel_data.end     = virt_to_phys(_end - 1);
594 
595 	for_each_memblock(memory, region) {
596 		res = alloc_bootmem_low(sizeof(*res));
597 		res->name  = "System RAM";
598 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
599 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
600 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
601 
602 		request_resource(&iomem_resource, res);
603 
604 		if (kernel_code.start >= res->start &&
605 		    kernel_code.end <= res->end)
606 			request_resource(res, &kernel_code);
607 		if (kernel_data.start >= res->start &&
608 		    kernel_data.end <= res->end)
609 			request_resource(res, &kernel_data);
610 	}
611 
612 	if (mdesc->video_start) {
613 		video_ram.start = mdesc->video_start;
614 		video_ram.end   = mdesc->video_end;
615 		request_resource(&iomem_resource, &video_ram);
616 	}
617 
618 	/*
619 	 * Some machines don't have the possibility of ever
620 	 * possessing lp0, lp1 or lp2
621 	 */
622 	if (mdesc->reserve_lp0)
623 		request_resource(&ioport_resource, &lp0);
624 	if (mdesc->reserve_lp1)
625 		request_resource(&ioport_resource, &lp1);
626 	if (mdesc->reserve_lp2)
627 		request_resource(&ioport_resource, &lp2);
628 }
629 
630 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
631 struct screen_info screen_info = {
632  .orig_video_lines	= 30,
633  .orig_video_cols	= 80,
634  .orig_video_mode	= 0,
635  .orig_video_ega_bx	= 0,
636  .orig_video_isVGA	= 1,
637  .orig_video_points	= 8
638 };
639 #endif
640 
641 static int __init customize_machine(void)
642 {
643 	/* customizes platform devices, or adds new ones */
644 	if (machine_desc->init_machine)
645 		machine_desc->init_machine();
646 	return 0;
647 }
648 arch_initcall(customize_machine);
649 
650 static int __init init_machine_late(void)
651 {
652 	if (machine_desc->init_late)
653 		machine_desc->init_late();
654 	return 0;
655 }
656 late_initcall(init_machine_late);
657 
658 #ifdef CONFIG_KEXEC
659 static inline unsigned long long get_total_mem(void)
660 {
661 	unsigned long total;
662 
663 	total = max_low_pfn - min_low_pfn;
664 	return total << PAGE_SHIFT;
665 }
666 
667 /**
668  * reserve_crashkernel() - reserves memory are for crash kernel
669  *
670  * This function reserves memory area given in "crashkernel=" kernel command
671  * line parameter. The memory reserved is used by a dump capture kernel when
672  * primary kernel is crashing.
673  */
674 static void __init reserve_crashkernel(void)
675 {
676 	unsigned long long crash_size, crash_base;
677 	unsigned long long total_mem;
678 	int ret;
679 
680 	total_mem = get_total_mem();
681 	ret = parse_crashkernel(boot_command_line, total_mem,
682 				&crash_size, &crash_base);
683 	if (ret)
684 		return;
685 
686 	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
687 	if (ret < 0) {
688 		printk(KERN_WARNING "crashkernel reservation failed - "
689 		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
690 		return;
691 	}
692 
693 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
694 	       "for crashkernel (System RAM: %ldMB)\n",
695 	       (unsigned long)(crash_size >> 20),
696 	       (unsigned long)(crash_base >> 20),
697 	       (unsigned long)(total_mem >> 20));
698 
699 	crashk_res.start = crash_base;
700 	crashk_res.end = crash_base + crash_size - 1;
701 	insert_resource(&iomem_resource, &crashk_res);
702 }
703 #else
704 static inline void reserve_crashkernel(void) {}
705 #endif /* CONFIG_KEXEC */
706 
707 static int __init meminfo_cmp(const void *_a, const void *_b)
708 {
709 	const struct membank *a = _a, *b = _b;
710 	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
711 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
712 }
713 
714 void __init hyp_mode_check(void)
715 {
716 #ifdef CONFIG_ARM_VIRT_EXT
717 	if (is_hyp_mode_available()) {
718 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
719 		pr_info("CPU: Virtualization extensions available.\n");
720 	} else if (is_hyp_mode_mismatched()) {
721 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
722 			__boot_cpu_mode & MODE_MASK);
723 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
724 	} else
725 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
726 #endif
727 }
728 
729 void __init setup_arch(char **cmdline_p)
730 {
731 	struct machine_desc *mdesc;
732 
733 	setup_processor();
734 	mdesc = setup_machine_fdt(__atags_pointer);
735 	if (!mdesc)
736 		mdesc = setup_machine_tags(__atags_pointer, machine_arch_type);
737 	machine_desc = mdesc;
738 	machine_name = mdesc->name;
739 
740 	setup_dma_zone(mdesc);
741 
742 	if (mdesc->restart_mode)
743 		reboot_setup(&mdesc->restart_mode);
744 
745 	init_mm.start_code = (unsigned long) _text;
746 	init_mm.end_code   = (unsigned long) _etext;
747 	init_mm.end_data   = (unsigned long) _edata;
748 	init_mm.brk	   = (unsigned long) _end;
749 
750 	/* populate cmd_line too for later use, preserving boot_command_line */
751 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
752 	*cmdline_p = cmd_line;
753 
754 	parse_early_param();
755 
756 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
757 	sanity_check_meminfo();
758 	arm_memblock_init(&meminfo, mdesc);
759 
760 	paging_init(mdesc);
761 	request_standard_resources(mdesc);
762 
763 	if (mdesc->restart)
764 		arm_pm_restart = mdesc->restart;
765 
766 	unflatten_device_tree();
767 
768 	arm_dt_init_cpu_maps();
769 #ifdef CONFIG_SMP
770 	if (is_smp()) {
771 		smp_set_ops(mdesc->smp);
772 		smp_init_cpus();
773 	}
774 #endif
775 
776 	if (!is_smp())
777 		hyp_mode_check();
778 
779 	reserve_crashkernel();
780 
781 	tcm_init();
782 
783 #ifdef CONFIG_MULTI_IRQ_HANDLER
784 	handle_arch_irq = mdesc->handle_irq;
785 #endif
786 
787 #ifdef CONFIG_VT
788 #if defined(CONFIG_VGA_CONSOLE)
789 	conswitchp = &vga_con;
790 #elif defined(CONFIG_DUMMY_CONSOLE)
791 	conswitchp = &dummy_con;
792 #endif
793 #endif
794 
795 	if (mdesc->init_early)
796 		mdesc->init_early();
797 }
798 
799 
800 static int __init topology_init(void)
801 {
802 	int cpu;
803 
804 	for_each_possible_cpu(cpu) {
805 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
806 		cpuinfo->cpu.hotpluggable = 1;
807 		register_cpu(&cpuinfo->cpu, cpu);
808 	}
809 
810 	return 0;
811 }
812 subsys_initcall(topology_init);
813 
814 #ifdef CONFIG_HAVE_PROC_CPU
815 static int __init proc_cpu_init(void)
816 {
817 	struct proc_dir_entry *res;
818 
819 	res = proc_mkdir("cpu", NULL);
820 	if (!res)
821 		return -ENOMEM;
822 	return 0;
823 }
824 fs_initcall(proc_cpu_init);
825 #endif
826 
827 static const char *hwcap_str[] = {
828 	"swp",
829 	"half",
830 	"thumb",
831 	"26bit",
832 	"fastmult",
833 	"fpa",
834 	"vfp",
835 	"edsp",
836 	"java",
837 	"iwmmxt",
838 	"crunch",
839 	"thumbee",
840 	"neon",
841 	"vfpv3",
842 	"vfpv3d16",
843 	"tls",
844 	"vfpv4",
845 	"idiva",
846 	"idivt",
847 	NULL
848 };
849 
850 static int c_show(struct seq_file *m, void *v)
851 {
852 	int i, j;
853 	u32 cpuid;
854 
855 	for_each_online_cpu(i) {
856 		/*
857 		 * glibc reads /proc/cpuinfo to determine the number of
858 		 * online processors, looking for lines beginning with
859 		 * "processor".  Give glibc what it expects.
860 		 */
861 		seq_printf(m, "processor\t: %d\n", i);
862 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
863 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
864 			   cpu_name, cpuid & 15, elf_platform);
865 
866 #if defined(CONFIG_SMP)
867 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
868 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
869 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
870 #else
871 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
872 			   loops_per_jiffy / (500000/HZ),
873 			   (loops_per_jiffy / (5000/HZ)) % 100);
874 #endif
875 		/* dump out the processor features */
876 		seq_puts(m, "Features\t: ");
877 
878 		for (j = 0; hwcap_str[j]; j++)
879 			if (elf_hwcap & (1 << j))
880 				seq_printf(m, "%s ", hwcap_str[j]);
881 
882 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
883 		seq_printf(m, "CPU architecture: %s\n",
884 			   proc_arch[cpu_architecture()]);
885 
886 		if ((cpuid & 0x0008f000) == 0x00000000) {
887 			/* pre-ARM7 */
888 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
889 		} else {
890 			if ((cpuid & 0x0008f000) == 0x00007000) {
891 				/* ARM7 */
892 				seq_printf(m, "CPU variant\t: 0x%02x\n",
893 					   (cpuid >> 16) & 127);
894 			} else {
895 				/* post-ARM7 */
896 				seq_printf(m, "CPU variant\t: 0x%x\n",
897 					   (cpuid >> 20) & 15);
898 			}
899 			seq_printf(m, "CPU part\t: 0x%03x\n",
900 				   (cpuid >> 4) & 0xfff);
901 		}
902 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
903 	}
904 
905 	seq_printf(m, "Hardware\t: %s\n", machine_name);
906 	seq_printf(m, "Revision\t: %04x\n", system_rev);
907 	seq_printf(m, "Serial\t\t: %08x%08x\n",
908 		   system_serial_high, system_serial_low);
909 
910 	return 0;
911 }
912 
913 static void *c_start(struct seq_file *m, loff_t *pos)
914 {
915 	return *pos < 1 ? (void *)1 : NULL;
916 }
917 
918 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
919 {
920 	++*pos;
921 	return NULL;
922 }
923 
924 static void c_stop(struct seq_file *m, void *v)
925 {
926 }
927 
928 const struct seq_operations cpuinfo_op = {
929 	.start	= c_start,
930 	.next	= c_next,
931 	.stop	= c_stop,
932 	.show	= c_show
933 };
934