xref: /openbmc/linux/arch/arm/kernel/setup.c (revision 77d84ff8)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33 
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/psci.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48 
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58 #include <asm/virt.h>
59 
60 #include "atags.h"
61 
62 
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 char fpe_type[8];
65 
66 static int __init fpe_setup(char *line)
67 {
68 	memcpy(fpe_type, line, 8);
69 	return 1;
70 }
71 
72 __setup("fpe=", fpe_setup);
73 #endif
74 
75 extern void paging_init(const struct machine_desc *desc);
76 extern void early_paging_init(const struct machine_desc *,
77 			      struct proc_info_list *);
78 extern void sanity_check_meminfo(void);
79 extern enum reboot_mode reboot_mode;
80 extern void setup_dma_zone(const struct machine_desc *desc);
81 
82 unsigned int processor_id;
83 EXPORT_SYMBOL(processor_id);
84 unsigned int __machine_arch_type __read_mostly;
85 EXPORT_SYMBOL(__machine_arch_type);
86 unsigned int cacheid __read_mostly;
87 EXPORT_SYMBOL(cacheid);
88 
89 unsigned int __atags_pointer __initdata;
90 
91 unsigned int system_rev;
92 EXPORT_SYMBOL(system_rev);
93 
94 unsigned int system_serial_low;
95 EXPORT_SYMBOL(system_serial_low);
96 
97 unsigned int system_serial_high;
98 EXPORT_SYMBOL(system_serial_high);
99 
100 unsigned int elf_hwcap __read_mostly;
101 EXPORT_SYMBOL(elf_hwcap);
102 
103 
104 #ifdef MULTI_CPU
105 struct processor processor __read_mostly;
106 #endif
107 #ifdef MULTI_TLB
108 struct cpu_tlb_fns cpu_tlb __read_mostly;
109 #endif
110 #ifdef MULTI_USER
111 struct cpu_user_fns cpu_user __read_mostly;
112 #endif
113 #ifdef MULTI_CACHE
114 struct cpu_cache_fns cpu_cache __read_mostly;
115 #endif
116 #ifdef CONFIG_OUTER_CACHE
117 struct outer_cache_fns outer_cache __read_mostly;
118 EXPORT_SYMBOL(outer_cache);
119 #endif
120 
121 /*
122  * Cached cpu_architecture() result for use by assembler code.
123  * C code should use the cpu_architecture() function instead of accessing this
124  * variable directly.
125  */
126 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
127 
128 struct stack {
129 	u32 irq[3];
130 	u32 abt[3];
131 	u32 und[3];
132 } ____cacheline_aligned;
133 
134 #ifndef CONFIG_CPU_V7M
135 static struct stack stacks[NR_CPUS];
136 #endif
137 
138 char elf_platform[ELF_PLATFORM_SIZE];
139 EXPORT_SYMBOL(elf_platform);
140 
141 static const char *cpu_name;
142 static const char *machine_name;
143 static char __initdata cmd_line[COMMAND_LINE_SIZE];
144 const struct machine_desc *machine_desc __initdata;
145 
146 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
147 #define ENDIANNESS ((char)endian_test.l)
148 
149 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
150 
151 /*
152  * Standard memory resources
153  */
154 static struct resource mem_res[] = {
155 	{
156 		.name = "Video RAM",
157 		.start = 0,
158 		.end = 0,
159 		.flags = IORESOURCE_MEM
160 	},
161 	{
162 		.name = "Kernel code",
163 		.start = 0,
164 		.end = 0,
165 		.flags = IORESOURCE_MEM
166 	},
167 	{
168 		.name = "Kernel data",
169 		.start = 0,
170 		.end = 0,
171 		.flags = IORESOURCE_MEM
172 	}
173 };
174 
175 #define video_ram   mem_res[0]
176 #define kernel_code mem_res[1]
177 #define kernel_data mem_res[2]
178 
179 static struct resource io_res[] = {
180 	{
181 		.name = "reserved",
182 		.start = 0x3bc,
183 		.end = 0x3be,
184 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
185 	},
186 	{
187 		.name = "reserved",
188 		.start = 0x378,
189 		.end = 0x37f,
190 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
191 	},
192 	{
193 		.name = "reserved",
194 		.start = 0x278,
195 		.end = 0x27f,
196 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
197 	}
198 };
199 
200 #define lp0 io_res[0]
201 #define lp1 io_res[1]
202 #define lp2 io_res[2]
203 
204 static const char *proc_arch[] = {
205 	"undefined/unknown",
206 	"3",
207 	"4",
208 	"4T",
209 	"5",
210 	"5T",
211 	"5TE",
212 	"5TEJ",
213 	"6TEJ",
214 	"7",
215 	"7M",
216 	"?(12)",
217 	"?(13)",
218 	"?(14)",
219 	"?(15)",
220 	"?(16)",
221 	"?(17)",
222 };
223 
224 #ifdef CONFIG_CPU_V7M
225 static int __get_cpu_architecture(void)
226 {
227 	return CPU_ARCH_ARMv7M;
228 }
229 #else
230 static int __get_cpu_architecture(void)
231 {
232 	int cpu_arch;
233 
234 	if ((read_cpuid_id() & 0x0008f000) == 0) {
235 		cpu_arch = CPU_ARCH_UNKNOWN;
236 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
237 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
238 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
239 		cpu_arch = (read_cpuid_id() >> 16) & 7;
240 		if (cpu_arch)
241 			cpu_arch += CPU_ARCH_ARMv3;
242 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
243 		unsigned int mmfr0;
244 
245 		/* Revised CPUID format. Read the Memory Model Feature
246 		 * Register 0 and check for VMSAv7 or PMSAv7 */
247 		asm("mrc	p15, 0, %0, c0, c1, 4"
248 		    : "=r" (mmfr0));
249 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
250 		    (mmfr0 & 0x000000f0) >= 0x00000030)
251 			cpu_arch = CPU_ARCH_ARMv7;
252 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
253 			 (mmfr0 & 0x000000f0) == 0x00000020)
254 			cpu_arch = CPU_ARCH_ARMv6;
255 		else
256 			cpu_arch = CPU_ARCH_UNKNOWN;
257 	} else
258 		cpu_arch = CPU_ARCH_UNKNOWN;
259 
260 	return cpu_arch;
261 }
262 #endif
263 
264 int __pure cpu_architecture(void)
265 {
266 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
267 
268 	return __cpu_architecture;
269 }
270 
271 static int cpu_has_aliasing_icache(unsigned int arch)
272 {
273 	int aliasing_icache;
274 	unsigned int id_reg, num_sets, line_size;
275 
276 	/* PIPT caches never alias. */
277 	if (icache_is_pipt())
278 		return 0;
279 
280 	/* arch specifies the register format */
281 	switch (arch) {
282 	case CPU_ARCH_ARMv7:
283 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
284 		    : /* No output operands */
285 		    : "r" (1));
286 		isb();
287 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
288 		    : "=r" (id_reg));
289 		line_size = 4 << ((id_reg & 0x7) + 2);
290 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
291 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
292 		break;
293 	case CPU_ARCH_ARMv6:
294 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
295 		break;
296 	default:
297 		/* I-cache aliases will be handled by D-cache aliasing code */
298 		aliasing_icache = 0;
299 	}
300 
301 	return aliasing_icache;
302 }
303 
304 static void __init cacheid_init(void)
305 {
306 	unsigned int arch = cpu_architecture();
307 
308 	if (arch == CPU_ARCH_ARMv7M) {
309 		cacheid = 0;
310 	} else if (arch >= CPU_ARCH_ARMv6) {
311 		unsigned int cachetype = read_cpuid_cachetype();
312 		if ((cachetype & (7 << 29)) == 4 << 29) {
313 			/* ARMv7 register format */
314 			arch = CPU_ARCH_ARMv7;
315 			cacheid = CACHEID_VIPT_NONALIASING;
316 			switch (cachetype & (3 << 14)) {
317 			case (1 << 14):
318 				cacheid |= CACHEID_ASID_TAGGED;
319 				break;
320 			case (3 << 14):
321 				cacheid |= CACHEID_PIPT;
322 				break;
323 			}
324 		} else {
325 			arch = CPU_ARCH_ARMv6;
326 			if (cachetype & (1 << 23))
327 				cacheid = CACHEID_VIPT_ALIASING;
328 			else
329 				cacheid = CACHEID_VIPT_NONALIASING;
330 		}
331 		if (cpu_has_aliasing_icache(arch))
332 			cacheid |= CACHEID_VIPT_I_ALIASING;
333 	} else {
334 		cacheid = CACHEID_VIVT;
335 	}
336 
337 	printk("CPU: %s data cache, %s instruction cache\n",
338 		cache_is_vivt() ? "VIVT" :
339 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
340 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
341 		cache_is_vivt() ? "VIVT" :
342 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
343 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
344 		icache_is_pipt() ? "PIPT" :
345 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
346 }
347 
348 /*
349  * These functions re-use the assembly code in head.S, which
350  * already provide the required functionality.
351  */
352 extern struct proc_info_list *lookup_processor_type(unsigned int);
353 
354 void __init early_print(const char *str, ...)
355 {
356 	extern void printascii(const char *);
357 	char buf[256];
358 	va_list ap;
359 
360 	va_start(ap, str);
361 	vsnprintf(buf, sizeof(buf), str, ap);
362 	va_end(ap);
363 
364 #ifdef CONFIG_DEBUG_LL
365 	printascii(buf);
366 #endif
367 	printk("%s", buf);
368 }
369 
370 static void __init cpuid_init_hwcaps(void)
371 {
372 	unsigned int divide_instrs, vmsa;
373 
374 	if (cpu_architecture() < CPU_ARCH_ARMv7)
375 		return;
376 
377 	divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
378 
379 	switch (divide_instrs) {
380 	case 2:
381 		elf_hwcap |= HWCAP_IDIVA;
382 	case 1:
383 		elf_hwcap |= HWCAP_IDIVT;
384 	}
385 
386 	/* LPAE implies atomic ldrd/strd instructions */
387 	vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
388 	if (vmsa >= 5)
389 		elf_hwcap |= HWCAP_LPAE;
390 }
391 
392 static void __init feat_v6_fixup(void)
393 {
394 	int id = read_cpuid_id();
395 
396 	if ((id & 0xff0f0000) != 0x41070000)
397 		return;
398 
399 	/*
400 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
401 	 * see also kuser_get_tls_init.
402 	 */
403 	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
404 		elf_hwcap &= ~HWCAP_TLS;
405 }
406 
407 /*
408  * cpu_init - initialise one CPU.
409  *
410  * cpu_init sets up the per-CPU stacks.
411  */
412 void notrace cpu_init(void)
413 {
414 #ifndef CONFIG_CPU_V7M
415 	unsigned int cpu = smp_processor_id();
416 	struct stack *stk = &stacks[cpu];
417 
418 	if (cpu >= NR_CPUS) {
419 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
420 		BUG();
421 	}
422 
423 	/*
424 	 * This only works on resume and secondary cores. For booting on the
425 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
426 	 */
427 	set_my_cpu_offset(per_cpu_offset(cpu));
428 
429 	cpu_proc_init();
430 
431 	/*
432 	 * Define the placement constraint for the inline asm directive below.
433 	 * In Thumb-2, msr with an immediate value is not allowed.
434 	 */
435 #ifdef CONFIG_THUMB2_KERNEL
436 #define PLC	"r"
437 #else
438 #define PLC	"I"
439 #endif
440 
441 	/*
442 	 * setup stacks for re-entrant exception handlers
443 	 */
444 	__asm__ (
445 	"msr	cpsr_c, %1\n\t"
446 	"add	r14, %0, %2\n\t"
447 	"mov	sp, r14\n\t"
448 	"msr	cpsr_c, %3\n\t"
449 	"add	r14, %0, %4\n\t"
450 	"mov	sp, r14\n\t"
451 	"msr	cpsr_c, %5\n\t"
452 	"add	r14, %0, %6\n\t"
453 	"mov	sp, r14\n\t"
454 	"msr	cpsr_c, %7"
455 	    :
456 	    : "r" (stk),
457 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
458 	      "I" (offsetof(struct stack, irq[0])),
459 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
460 	      "I" (offsetof(struct stack, abt[0])),
461 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
462 	      "I" (offsetof(struct stack, und[0])),
463 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
464 	    : "r14");
465 #endif
466 }
467 
468 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
469 
470 void __init smp_setup_processor_id(void)
471 {
472 	int i;
473 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
474 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
475 
476 	cpu_logical_map(0) = cpu;
477 	for (i = 1; i < nr_cpu_ids; ++i)
478 		cpu_logical_map(i) = i == cpu ? 0 : i;
479 
480 	/*
481 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
482 	 * using percpu variable early, for example, lockdep will
483 	 * access percpu variable inside lock_release
484 	 */
485 	set_my_cpu_offset(0);
486 
487 	printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
488 }
489 
490 struct mpidr_hash mpidr_hash;
491 #ifdef CONFIG_SMP
492 /**
493  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
494  *			  level in order to build a linear index from an
495  *			  MPIDR value. Resulting algorithm is a collision
496  *			  free hash carried out through shifting and ORing
497  */
498 static void __init smp_build_mpidr_hash(void)
499 {
500 	u32 i, affinity;
501 	u32 fs[3], bits[3], ls, mask = 0;
502 	/*
503 	 * Pre-scan the list of MPIDRS and filter out bits that do
504 	 * not contribute to affinity levels, ie they never toggle.
505 	 */
506 	for_each_possible_cpu(i)
507 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
508 	pr_debug("mask of set bits 0x%x\n", mask);
509 	/*
510 	 * Find and stash the last and first bit set at all affinity levels to
511 	 * check how many bits are required to represent them.
512 	 */
513 	for (i = 0; i < 3; i++) {
514 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
515 		/*
516 		 * Find the MSB bit and LSB bits position
517 		 * to determine how many bits are required
518 		 * to express the affinity level.
519 		 */
520 		ls = fls(affinity);
521 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
522 		bits[i] = ls - fs[i];
523 	}
524 	/*
525 	 * An index can be created from the MPIDR by isolating the
526 	 * significant bits at each affinity level and by shifting
527 	 * them in order to compress the 24 bits values space to a
528 	 * compressed set of values. This is equivalent to hashing
529 	 * the MPIDR through shifting and ORing. It is a collision free
530 	 * hash though not minimal since some levels might contain a number
531 	 * of CPUs that is not an exact power of 2 and their bit
532 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
533 	 */
534 	mpidr_hash.shift_aff[0] = fs[0];
535 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
536 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
537 						(bits[1] + bits[0]);
538 	mpidr_hash.mask = mask;
539 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
540 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
541 				mpidr_hash.shift_aff[0],
542 				mpidr_hash.shift_aff[1],
543 				mpidr_hash.shift_aff[2],
544 				mpidr_hash.mask,
545 				mpidr_hash.bits);
546 	/*
547 	 * 4x is an arbitrary value used to warn on a hash table much bigger
548 	 * than expected on most systems.
549 	 */
550 	if (mpidr_hash_size() > 4 * num_possible_cpus())
551 		pr_warn("Large number of MPIDR hash buckets detected\n");
552 	sync_cache_w(&mpidr_hash);
553 }
554 #endif
555 
556 static void __init setup_processor(void)
557 {
558 	struct proc_info_list *list;
559 
560 	/*
561 	 * locate processor in the list of supported processor
562 	 * types.  The linker builds this table for us from the
563 	 * entries in arch/arm/mm/proc-*.S
564 	 */
565 	list = lookup_processor_type(read_cpuid_id());
566 	if (!list) {
567 		printk("CPU configuration botched (ID %08x), unable "
568 		       "to continue.\n", read_cpuid_id());
569 		while (1);
570 	}
571 
572 	cpu_name = list->cpu_name;
573 	__cpu_architecture = __get_cpu_architecture();
574 
575 #ifdef MULTI_CPU
576 	processor = *list->proc;
577 #endif
578 #ifdef MULTI_TLB
579 	cpu_tlb = *list->tlb;
580 #endif
581 #ifdef MULTI_USER
582 	cpu_user = *list->user;
583 #endif
584 #ifdef MULTI_CACHE
585 	cpu_cache = *list->cache;
586 #endif
587 
588 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
589 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
590 	       proc_arch[cpu_architecture()], cr_alignment);
591 
592 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
593 		 list->arch_name, ENDIANNESS);
594 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
595 		 list->elf_name, ENDIANNESS);
596 	elf_hwcap = list->elf_hwcap;
597 
598 	cpuid_init_hwcaps();
599 
600 #ifndef CONFIG_ARM_THUMB
601 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
602 #endif
603 
604 	erratum_a15_798181_init();
605 
606 	feat_v6_fixup();
607 
608 	cacheid_init();
609 	cpu_init();
610 }
611 
612 void __init dump_machine_table(void)
613 {
614 	const struct machine_desc *p;
615 
616 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
617 	for_each_machine_desc(p)
618 		early_print("%08x\t%s\n", p->nr, p->name);
619 
620 	early_print("\nPlease check your kernel config and/or bootloader.\n");
621 
622 	while (true)
623 		/* can't use cpu_relax() here as it may require MMU setup */;
624 }
625 
626 int __init arm_add_memory(u64 start, u64 size)
627 {
628 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
629 	u64 aligned_start;
630 
631 	if (meminfo.nr_banks >= NR_BANKS) {
632 		printk(KERN_CRIT "NR_BANKS too low, "
633 			"ignoring memory at 0x%08llx\n", (long long)start);
634 		return -EINVAL;
635 	}
636 
637 	/*
638 	 * Ensure that start/size are aligned to a page boundary.
639 	 * Size is appropriately rounded down, start is rounded up.
640 	 */
641 	size -= start & ~PAGE_MASK;
642 	aligned_start = PAGE_ALIGN(start);
643 
644 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
645 	if (aligned_start > ULONG_MAX) {
646 		printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
647 		       "32-bit physical address space\n", (long long)start);
648 		return -EINVAL;
649 	}
650 
651 	if (aligned_start + size > ULONG_MAX) {
652 		printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
653 			"32-bit physical address space\n", (long long)start);
654 		/*
655 		 * To ensure bank->start + bank->size is representable in
656 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
657 		 * This means we lose a page after masking.
658 		 */
659 		size = ULONG_MAX - aligned_start;
660 	}
661 #endif
662 
663 	bank->start = aligned_start;
664 	bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
665 
666 	/*
667 	 * Check whether this memory region has non-zero size or
668 	 * invalid node number.
669 	 */
670 	if (bank->size == 0)
671 		return -EINVAL;
672 
673 	meminfo.nr_banks++;
674 	return 0;
675 }
676 
677 /*
678  * Pick out the memory size.  We look for mem=size@start,
679  * where start and size are "size[KkMm]"
680  */
681 static int __init early_mem(char *p)
682 {
683 	static int usermem __initdata = 0;
684 	u64 size;
685 	u64 start;
686 	char *endp;
687 
688 	/*
689 	 * If the user specifies memory size, we
690 	 * blow away any automatically generated
691 	 * size.
692 	 */
693 	if (usermem == 0) {
694 		usermem = 1;
695 		meminfo.nr_banks = 0;
696 	}
697 
698 	start = PHYS_OFFSET;
699 	size  = memparse(p, &endp);
700 	if (*endp == '@')
701 		start = memparse(endp + 1, NULL);
702 
703 	arm_add_memory(start, size);
704 
705 	return 0;
706 }
707 early_param("mem", early_mem);
708 
709 static void __init request_standard_resources(const struct machine_desc *mdesc)
710 {
711 	struct memblock_region *region;
712 	struct resource *res;
713 
714 	kernel_code.start   = virt_to_phys(_text);
715 	kernel_code.end     = virt_to_phys(_etext - 1);
716 	kernel_data.start   = virt_to_phys(_sdata);
717 	kernel_data.end     = virt_to_phys(_end - 1);
718 
719 	for_each_memblock(memory, region) {
720 		res = alloc_bootmem_low(sizeof(*res));
721 		res->name  = "System RAM";
722 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
723 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
724 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
725 
726 		request_resource(&iomem_resource, res);
727 
728 		if (kernel_code.start >= res->start &&
729 		    kernel_code.end <= res->end)
730 			request_resource(res, &kernel_code);
731 		if (kernel_data.start >= res->start &&
732 		    kernel_data.end <= res->end)
733 			request_resource(res, &kernel_data);
734 	}
735 
736 	if (mdesc->video_start) {
737 		video_ram.start = mdesc->video_start;
738 		video_ram.end   = mdesc->video_end;
739 		request_resource(&iomem_resource, &video_ram);
740 	}
741 
742 	/*
743 	 * Some machines don't have the possibility of ever
744 	 * possessing lp0, lp1 or lp2
745 	 */
746 	if (mdesc->reserve_lp0)
747 		request_resource(&ioport_resource, &lp0);
748 	if (mdesc->reserve_lp1)
749 		request_resource(&ioport_resource, &lp1);
750 	if (mdesc->reserve_lp2)
751 		request_resource(&ioport_resource, &lp2);
752 }
753 
754 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
755 struct screen_info screen_info = {
756  .orig_video_lines	= 30,
757  .orig_video_cols	= 80,
758  .orig_video_mode	= 0,
759  .orig_video_ega_bx	= 0,
760  .orig_video_isVGA	= 1,
761  .orig_video_points	= 8
762 };
763 #endif
764 
765 static int __init customize_machine(void)
766 {
767 	/*
768 	 * customizes platform devices, or adds new ones
769 	 * On DT based machines, we fall back to populating the
770 	 * machine from the device tree, if no callback is provided,
771 	 * otherwise we would always need an init_machine callback.
772 	 */
773 	if (machine_desc->init_machine)
774 		machine_desc->init_machine();
775 #ifdef CONFIG_OF
776 	else
777 		of_platform_populate(NULL, of_default_bus_match_table,
778 					NULL, NULL);
779 #endif
780 	return 0;
781 }
782 arch_initcall(customize_machine);
783 
784 static int __init init_machine_late(void)
785 {
786 	if (machine_desc->init_late)
787 		machine_desc->init_late();
788 	return 0;
789 }
790 late_initcall(init_machine_late);
791 
792 #ifdef CONFIG_KEXEC
793 static inline unsigned long long get_total_mem(void)
794 {
795 	unsigned long total;
796 
797 	total = max_low_pfn - min_low_pfn;
798 	return total << PAGE_SHIFT;
799 }
800 
801 /**
802  * reserve_crashkernel() - reserves memory are for crash kernel
803  *
804  * This function reserves memory area given in "crashkernel=" kernel command
805  * line parameter. The memory reserved is used by a dump capture kernel when
806  * primary kernel is crashing.
807  */
808 static void __init reserve_crashkernel(void)
809 {
810 	unsigned long long crash_size, crash_base;
811 	unsigned long long total_mem;
812 	int ret;
813 
814 	total_mem = get_total_mem();
815 	ret = parse_crashkernel(boot_command_line, total_mem,
816 				&crash_size, &crash_base);
817 	if (ret)
818 		return;
819 
820 	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
821 	if (ret < 0) {
822 		printk(KERN_WARNING "crashkernel reservation failed - "
823 		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
824 		return;
825 	}
826 
827 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
828 	       "for crashkernel (System RAM: %ldMB)\n",
829 	       (unsigned long)(crash_size >> 20),
830 	       (unsigned long)(crash_base >> 20),
831 	       (unsigned long)(total_mem >> 20));
832 
833 	crashk_res.start = crash_base;
834 	crashk_res.end = crash_base + crash_size - 1;
835 	insert_resource(&iomem_resource, &crashk_res);
836 }
837 #else
838 static inline void reserve_crashkernel(void) {}
839 #endif /* CONFIG_KEXEC */
840 
841 static int __init meminfo_cmp(const void *_a, const void *_b)
842 {
843 	const struct membank *a = _a, *b = _b;
844 	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
845 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
846 }
847 
848 void __init hyp_mode_check(void)
849 {
850 #ifdef CONFIG_ARM_VIRT_EXT
851 	sync_boot_mode();
852 
853 	if (is_hyp_mode_available()) {
854 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
855 		pr_info("CPU: Virtualization extensions available.\n");
856 	} else if (is_hyp_mode_mismatched()) {
857 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
858 			__boot_cpu_mode & MODE_MASK);
859 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
860 	} else
861 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
862 #endif
863 }
864 
865 void __init setup_arch(char **cmdline_p)
866 {
867 	const struct machine_desc *mdesc;
868 
869 	setup_processor();
870 	mdesc = setup_machine_fdt(__atags_pointer);
871 	if (!mdesc)
872 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
873 	machine_desc = mdesc;
874 	machine_name = mdesc->name;
875 
876 	if (mdesc->reboot_mode != REBOOT_HARD)
877 		reboot_mode = mdesc->reboot_mode;
878 
879 	init_mm.start_code = (unsigned long) _text;
880 	init_mm.end_code   = (unsigned long) _etext;
881 	init_mm.end_data   = (unsigned long) _edata;
882 	init_mm.brk	   = (unsigned long) _end;
883 
884 	/* populate cmd_line too for later use, preserving boot_command_line */
885 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
886 	*cmdline_p = cmd_line;
887 
888 	parse_early_param();
889 
890 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
891 
892 	early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
893 	setup_dma_zone(mdesc);
894 	sanity_check_meminfo();
895 	arm_memblock_init(&meminfo, mdesc);
896 
897 	paging_init(mdesc);
898 	request_standard_resources(mdesc);
899 
900 	if (mdesc->restart)
901 		arm_pm_restart = mdesc->restart;
902 
903 	unflatten_device_tree();
904 
905 	arm_dt_init_cpu_maps();
906 	psci_init();
907 #ifdef CONFIG_SMP
908 	if (is_smp()) {
909 		if (!mdesc->smp_init || !mdesc->smp_init()) {
910 			if (psci_smp_available())
911 				smp_set_ops(&psci_smp_ops);
912 			else if (mdesc->smp)
913 				smp_set_ops(mdesc->smp);
914 		}
915 		smp_init_cpus();
916 		smp_build_mpidr_hash();
917 	}
918 #endif
919 
920 	if (!is_smp())
921 		hyp_mode_check();
922 
923 	reserve_crashkernel();
924 
925 #ifdef CONFIG_MULTI_IRQ_HANDLER
926 	handle_arch_irq = mdesc->handle_irq;
927 #endif
928 
929 #ifdef CONFIG_VT
930 #if defined(CONFIG_VGA_CONSOLE)
931 	conswitchp = &vga_con;
932 #elif defined(CONFIG_DUMMY_CONSOLE)
933 	conswitchp = &dummy_con;
934 #endif
935 #endif
936 
937 	if (mdesc->init_early)
938 		mdesc->init_early();
939 }
940 
941 
942 static int __init topology_init(void)
943 {
944 	int cpu;
945 
946 	for_each_possible_cpu(cpu) {
947 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
948 		cpuinfo->cpu.hotpluggable = 1;
949 		register_cpu(&cpuinfo->cpu, cpu);
950 	}
951 
952 	return 0;
953 }
954 subsys_initcall(topology_init);
955 
956 #ifdef CONFIG_HAVE_PROC_CPU
957 static int __init proc_cpu_init(void)
958 {
959 	struct proc_dir_entry *res;
960 
961 	res = proc_mkdir("cpu", NULL);
962 	if (!res)
963 		return -ENOMEM;
964 	return 0;
965 }
966 fs_initcall(proc_cpu_init);
967 #endif
968 
969 static const char *hwcap_str[] = {
970 	"swp",
971 	"half",
972 	"thumb",
973 	"26bit",
974 	"fastmult",
975 	"fpa",
976 	"vfp",
977 	"edsp",
978 	"java",
979 	"iwmmxt",
980 	"crunch",
981 	"thumbee",
982 	"neon",
983 	"vfpv3",
984 	"vfpv3d16",
985 	"tls",
986 	"vfpv4",
987 	"idiva",
988 	"idivt",
989 	"vfpd32",
990 	"lpae",
991 	"evtstrm",
992 	NULL
993 };
994 
995 static int c_show(struct seq_file *m, void *v)
996 {
997 	int i, j;
998 	u32 cpuid;
999 
1000 	for_each_online_cpu(i) {
1001 		/*
1002 		 * glibc reads /proc/cpuinfo to determine the number of
1003 		 * online processors, looking for lines beginning with
1004 		 * "processor".  Give glibc what it expects.
1005 		 */
1006 		seq_printf(m, "processor\t: %d\n", i);
1007 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1008 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1009 			   cpu_name, cpuid & 15, elf_platform);
1010 
1011 		/* dump out the processor features */
1012 		seq_puts(m, "Features\t: ");
1013 
1014 		for (j = 0; hwcap_str[j]; j++)
1015 			if (elf_hwcap & (1 << j))
1016 				seq_printf(m, "%s ", hwcap_str[j]);
1017 
1018 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1019 		seq_printf(m, "CPU architecture: %s\n",
1020 			   proc_arch[cpu_architecture()]);
1021 
1022 		if ((cpuid & 0x0008f000) == 0x00000000) {
1023 			/* pre-ARM7 */
1024 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1025 		} else {
1026 			if ((cpuid & 0x0008f000) == 0x00007000) {
1027 				/* ARM7 */
1028 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1029 					   (cpuid >> 16) & 127);
1030 			} else {
1031 				/* post-ARM7 */
1032 				seq_printf(m, "CPU variant\t: 0x%x\n",
1033 					   (cpuid >> 20) & 15);
1034 			}
1035 			seq_printf(m, "CPU part\t: 0x%03x\n",
1036 				   (cpuid >> 4) & 0xfff);
1037 		}
1038 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1039 	}
1040 
1041 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1042 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1043 	seq_printf(m, "Serial\t\t: %08x%08x\n",
1044 		   system_serial_high, system_serial_low);
1045 
1046 	return 0;
1047 }
1048 
1049 static void *c_start(struct seq_file *m, loff_t *pos)
1050 {
1051 	return *pos < 1 ? (void *)1 : NULL;
1052 }
1053 
1054 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1055 {
1056 	++*pos;
1057 	return NULL;
1058 }
1059 
1060 static void c_stop(struct seq_file *m, void *v)
1061 {
1062 }
1063 
1064 const struct seq_operations cpuinfo_op = {
1065 	.start	= c_start,
1066 	.next	= c_next,
1067 	.stop	= c_stop,
1068 	.show	= c_show
1069 };
1070