xref: /openbmc/linux/arch/arm/kernel/setup.c (revision 77a87824)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/efi.h>
11 #include <linux/export.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/screen_info.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34 #include <linux/psci.h>
35 
36 #include <asm/unified.h>
37 #include <asm/cp15.h>
38 #include <asm/cpu.h>
39 #include <asm/cputype.h>
40 #include <asm/efi.h>
41 #include <asm/elf.h>
42 #include <asm/early_ioremap.h>
43 #include <asm/fixmap.h>
44 #include <asm/procinfo.h>
45 #include <asm/psci.h>
46 #include <asm/sections.h>
47 #include <asm/setup.h>
48 #include <asm/smp_plat.h>
49 #include <asm/mach-types.h>
50 #include <asm/cacheflush.h>
51 #include <asm/cachetype.h>
52 #include <asm/tlbflush.h>
53 #include <asm/xen/hypervisor.h>
54 
55 #include <asm/prom.h>
56 #include <asm/mach/arch.h>
57 #include <asm/mach/irq.h>
58 #include <asm/mach/time.h>
59 #include <asm/system_info.h>
60 #include <asm/system_misc.h>
61 #include <asm/traps.h>
62 #include <asm/unwind.h>
63 #include <asm/memblock.h>
64 #include <asm/virt.h>
65 
66 #include "atags.h"
67 
68 
69 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70 char fpe_type[8];
71 
72 static int __init fpe_setup(char *line)
73 {
74 	memcpy(fpe_type, line, 8);
75 	return 1;
76 }
77 
78 __setup("fpe=", fpe_setup);
79 #endif
80 
81 extern void init_default_cache_policy(unsigned long);
82 extern void paging_init(const struct machine_desc *desc);
83 extern void early_paging_init(const struct machine_desc *);
84 extern void sanity_check_meminfo(void);
85 extern enum reboot_mode reboot_mode;
86 extern void setup_dma_zone(const struct machine_desc *desc);
87 
88 unsigned int processor_id;
89 EXPORT_SYMBOL(processor_id);
90 unsigned int __machine_arch_type __read_mostly;
91 EXPORT_SYMBOL(__machine_arch_type);
92 unsigned int cacheid __read_mostly;
93 EXPORT_SYMBOL(cacheid);
94 
95 unsigned int __atags_pointer __initdata;
96 
97 unsigned int system_rev;
98 EXPORT_SYMBOL(system_rev);
99 
100 const char *system_serial;
101 EXPORT_SYMBOL(system_serial);
102 
103 unsigned int system_serial_low;
104 EXPORT_SYMBOL(system_serial_low);
105 
106 unsigned int system_serial_high;
107 EXPORT_SYMBOL(system_serial_high);
108 
109 unsigned int elf_hwcap __read_mostly;
110 EXPORT_SYMBOL(elf_hwcap);
111 
112 unsigned int elf_hwcap2 __read_mostly;
113 EXPORT_SYMBOL(elf_hwcap2);
114 
115 
116 #ifdef MULTI_CPU
117 struct processor processor __read_mostly;
118 #endif
119 #ifdef MULTI_TLB
120 struct cpu_tlb_fns cpu_tlb __read_mostly;
121 #endif
122 #ifdef MULTI_USER
123 struct cpu_user_fns cpu_user __read_mostly;
124 #endif
125 #ifdef MULTI_CACHE
126 struct cpu_cache_fns cpu_cache __read_mostly;
127 #endif
128 #ifdef CONFIG_OUTER_CACHE
129 struct outer_cache_fns outer_cache __read_mostly;
130 EXPORT_SYMBOL(outer_cache);
131 #endif
132 
133 /*
134  * Cached cpu_architecture() result for use by assembler code.
135  * C code should use the cpu_architecture() function instead of accessing this
136  * variable directly.
137  */
138 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
139 
140 struct stack {
141 	u32 irq[3];
142 	u32 abt[3];
143 	u32 und[3];
144 	u32 fiq[3];
145 } ____cacheline_aligned;
146 
147 #ifndef CONFIG_CPU_V7M
148 static struct stack stacks[NR_CPUS];
149 #endif
150 
151 char elf_platform[ELF_PLATFORM_SIZE];
152 EXPORT_SYMBOL(elf_platform);
153 
154 static const char *cpu_name;
155 static const char *machine_name;
156 static char __initdata cmd_line[COMMAND_LINE_SIZE];
157 const struct machine_desc *machine_desc __initdata;
158 
159 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
160 #define ENDIANNESS ((char)endian_test.l)
161 
162 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
163 
164 /*
165  * Standard memory resources
166  */
167 static struct resource mem_res[] = {
168 	{
169 		.name = "Video RAM",
170 		.start = 0,
171 		.end = 0,
172 		.flags = IORESOURCE_MEM
173 	},
174 	{
175 		.name = "Kernel code",
176 		.start = 0,
177 		.end = 0,
178 		.flags = IORESOURCE_SYSTEM_RAM
179 	},
180 	{
181 		.name = "Kernel data",
182 		.start = 0,
183 		.end = 0,
184 		.flags = IORESOURCE_SYSTEM_RAM
185 	}
186 };
187 
188 #define video_ram   mem_res[0]
189 #define kernel_code mem_res[1]
190 #define kernel_data mem_res[2]
191 
192 static struct resource io_res[] = {
193 	{
194 		.name = "reserved",
195 		.start = 0x3bc,
196 		.end = 0x3be,
197 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
198 	},
199 	{
200 		.name = "reserved",
201 		.start = 0x378,
202 		.end = 0x37f,
203 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
204 	},
205 	{
206 		.name = "reserved",
207 		.start = 0x278,
208 		.end = 0x27f,
209 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
210 	}
211 };
212 
213 #define lp0 io_res[0]
214 #define lp1 io_res[1]
215 #define lp2 io_res[2]
216 
217 static const char *proc_arch[] = {
218 	"undefined/unknown",
219 	"3",
220 	"4",
221 	"4T",
222 	"5",
223 	"5T",
224 	"5TE",
225 	"5TEJ",
226 	"6TEJ",
227 	"7",
228 	"7M",
229 	"?(12)",
230 	"?(13)",
231 	"?(14)",
232 	"?(15)",
233 	"?(16)",
234 	"?(17)",
235 };
236 
237 #ifdef CONFIG_CPU_V7M
238 static int __get_cpu_architecture(void)
239 {
240 	return CPU_ARCH_ARMv7M;
241 }
242 #else
243 static int __get_cpu_architecture(void)
244 {
245 	int cpu_arch;
246 
247 	if ((read_cpuid_id() & 0x0008f000) == 0) {
248 		cpu_arch = CPU_ARCH_UNKNOWN;
249 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
250 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
251 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
252 		cpu_arch = (read_cpuid_id() >> 16) & 7;
253 		if (cpu_arch)
254 			cpu_arch += CPU_ARCH_ARMv3;
255 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
256 		/* Revised CPUID format. Read the Memory Model Feature
257 		 * Register 0 and check for VMSAv7 or PMSAv7 */
258 		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
259 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
260 		    (mmfr0 & 0x000000f0) >= 0x00000030)
261 			cpu_arch = CPU_ARCH_ARMv7;
262 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
263 			 (mmfr0 & 0x000000f0) == 0x00000020)
264 			cpu_arch = CPU_ARCH_ARMv6;
265 		else
266 			cpu_arch = CPU_ARCH_UNKNOWN;
267 	} else
268 		cpu_arch = CPU_ARCH_UNKNOWN;
269 
270 	return cpu_arch;
271 }
272 #endif
273 
274 int __pure cpu_architecture(void)
275 {
276 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
277 
278 	return __cpu_architecture;
279 }
280 
281 static int cpu_has_aliasing_icache(unsigned int arch)
282 {
283 	int aliasing_icache;
284 	unsigned int id_reg, num_sets, line_size;
285 
286 	/* PIPT caches never alias. */
287 	if (icache_is_pipt())
288 		return 0;
289 
290 	/* arch specifies the register format */
291 	switch (arch) {
292 	case CPU_ARCH_ARMv7:
293 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
294 		    : /* No output operands */
295 		    : "r" (1));
296 		isb();
297 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
298 		    : "=r" (id_reg));
299 		line_size = 4 << ((id_reg & 0x7) + 2);
300 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
301 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
302 		break;
303 	case CPU_ARCH_ARMv6:
304 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
305 		break;
306 	default:
307 		/* I-cache aliases will be handled by D-cache aliasing code */
308 		aliasing_icache = 0;
309 	}
310 
311 	return aliasing_icache;
312 }
313 
314 static void __init cacheid_init(void)
315 {
316 	unsigned int arch = cpu_architecture();
317 
318 	if (arch == CPU_ARCH_ARMv7M) {
319 		cacheid = 0;
320 	} else if (arch >= CPU_ARCH_ARMv6) {
321 		unsigned int cachetype = read_cpuid_cachetype();
322 		if ((cachetype & (7 << 29)) == 4 << 29) {
323 			/* ARMv7 register format */
324 			arch = CPU_ARCH_ARMv7;
325 			cacheid = CACHEID_VIPT_NONALIASING;
326 			switch (cachetype & (3 << 14)) {
327 			case (1 << 14):
328 				cacheid |= CACHEID_ASID_TAGGED;
329 				break;
330 			case (3 << 14):
331 				cacheid |= CACHEID_PIPT;
332 				break;
333 			}
334 		} else {
335 			arch = CPU_ARCH_ARMv6;
336 			if (cachetype & (1 << 23))
337 				cacheid = CACHEID_VIPT_ALIASING;
338 			else
339 				cacheid = CACHEID_VIPT_NONALIASING;
340 		}
341 		if (cpu_has_aliasing_icache(arch))
342 			cacheid |= CACHEID_VIPT_I_ALIASING;
343 	} else {
344 		cacheid = CACHEID_VIVT;
345 	}
346 
347 	pr_info("CPU: %s data cache, %s instruction cache\n",
348 		cache_is_vivt() ? "VIVT" :
349 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
350 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
351 		cache_is_vivt() ? "VIVT" :
352 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
353 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
354 		icache_is_pipt() ? "PIPT" :
355 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
356 }
357 
358 /*
359  * These functions re-use the assembly code in head.S, which
360  * already provide the required functionality.
361  */
362 extern struct proc_info_list *lookup_processor_type(unsigned int);
363 
364 void __init early_print(const char *str, ...)
365 {
366 	extern void printascii(const char *);
367 	char buf[256];
368 	va_list ap;
369 
370 	va_start(ap, str);
371 	vsnprintf(buf, sizeof(buf), str, ap);
372 	va_end(ap);
373 
374 #ifdef CONFIG_DEBUG_LL
375 	printascii(buf);
376 #endif
377 	printk("%s", buf);
378 }
379 
380 #ifdef CONFIG_ARM_PATCH_IDIV
381 
382 static inline u32 __attribute_const__ sdiv_instruction(void)
383 {
384 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
385 		/* "sdiv r0, r0, r1" */
386 		u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
387 		return __opcode_to_mem_thumb32(insn);
388 	}
389 
390 	/* "sdiv r0, r0, r1" */
391 	return __opcode_to_mem_arm(0xe710f110);
392 }
393 
394 static inline u32 __attribute_const__ udiv_instruction(void)
395 {
396 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
397 		/* "udiv r0, r0, r1" */
398 		u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
399 		return __opcode_to_mem_thumb32(insn);
400 	}
401 
402 	/* "udiv r0, r0, r1" */
403 	return __opcode_to_mem_arm(0xe730f110);
404 }
405 
406 static inline u32 __attribute_const__ bx_lr_instruction(void)
407 {
408 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
409 		/* "bx lr; nop" */
410 		u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
411 		return __opcode_to_mem_thumb32(insn);
412 	}
413 
414 	/* "bx lr" */
415 	return __opcode_to_mem_arm(0xe12fff1e);
416 }
417 
418 static void __init patch_aeabi_idiv(void)
419 {
420 	extern void __aeabi_uidiv(void);
421 	extern void __aeabi_idiv(void);
422 	uintptr_t fn_addr;
423 	unsigned int mask;
424 
425 	mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
426 	if (!(elf_hwcap & mask))
427 		return;
428 
429 	pr_info("CPU: div instructions available: patching division code\n");
430 
431 	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
432 	asm ("" : "+g" (fn_addr));
433 	((u32 *)fn_addr)[0] = udiv_instruction();
434 	((u32 *)fn_addr)[1] = bx_lr_instruction();
435 	flush_icache_range(fn_addr, fn_addr + 8);
436 
437 	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
438 	asm ("" : "+g" (fn_addr));
439 	((u32 *)fn_addr)[0] = sdiv_instruction();
440 	((u32 *)fn_addr)[1] = bx_lr_instruction();
441 	flush_icache_range(fn_addr, fn_addr + 8);
442 }
443 
444 #else
445 static inline void patch_aeabi_idiv(void) { }
446 #endif
447 
448 static void __init cpuid_init_hwcaps(void)
449 {
450 	int block;
451 	u32 isar5;
452 
453 	if (cpu_architecture() < CPU_ARCH_ARMv7)
454 		return;
455 
456 	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
457 	if (block >= 2)
458 		elf_hwcap |= HWCAP_IDIVA;
459 	if (block >= 1)
460 		elf_hwcap |= HWCAP_IDIVT;
461 
462 	/* LPAE implies atomic ldrd/strd instructions */
463 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
464 	if (block >= 5)
465 		elf_hwcap |= HWCAP_LPAE;
466 
467 	/* check for supported v8 Crypto instructions */
468 	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
469 
470 	block = cpuid_feature_extract_field(isar5, 4);
471 	if (block >= 2)
472 		elf_hwcap2 |= HWCAP2_PMULL;
473 	if (block >= 1)
474 		elf_hwcap2 |= HWCAP2_AES;
475 
476 	block = cpuid_feature_extract_field(isar5, 8);
477 	if (block >= 1)
478 		elf_hwcap2 |= HWCAP2_SHA1;
479 
480 	block = cpuid_feature_extract_field(isar5, 12);
481 	if (block >= 1)
482 		elf_hwcap2 |= HWCAP2_SHA2;
483 
484 	block = cpuid_feature_extract_field(isar5, 16);
485 	if (block >= 1)
486 		elf_hwcap2 |= HWCAP2_CRC32;
487 }
488 
489 static void __init elf_hwcap_fixup(void)
490 {
491 	unsigned id = read_cpuid_id();
492 
493 	/*
494 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
495 	 * see also kuser_get_tls_init.
496 	 */
497 	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
498 	    ((id >> 20) & 3) == 0) {
499 		elf_hwcap &= ~HWCAP_TLS;
500 		return;
501 	}
502 
503 	/* Verify if CPUID scheme is implemented */
504 	if ((id & 0x000f0000) != 0x000f0000)
505 		return;
506 
507 	/*
508 	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
509 	 * avoid advertising SWP; it may not be atomic with
510 	 * multiprocessing cores.
511 	 */
512 	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
513 	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
514 	     cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
515 		elf_hwcap &= ~HWCAP_SWP;
516 }
517 
518 /*
519  * cpu_init - initialise one CPU.
520  *
521  * cpu_init sets up the per-CPU stacks.
522  */
523 void notrace cpu_init(void)
524 {
525 #ifndef CONFIG_CPU_V7M
526 	unsigned int cpu = smp_processor_id();
527 	struct stack *stk = &stacks[cpu];
528 
529 	if (cpu >= NR_CPUS) {
530 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
531 		BUG();
532 	}
533 
534 	/*
535 	 * This only works on resume and secondary cores. For booting on the
536 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
537 	 */
538 	set_my_cpu_offset(per_cpu_offset(cpu));
539 
540 	cpu_proc_init();
541 
542 	/*
543 	 * Define the placement constraint for the inline asm directive below.
544 	 * In Thumb-2, msr with an immediate value is not allowed.
545 	 */
546 #ifdef CONFIG_THUMB2_KERNEL
547 #define PLC	"r"
548 #else
549 #define PLC	"I"
550 #endif
551 
552 	/*
553 	 * setup stacks for re-entrant exception handlers
554 	 */
555 	__asm__ (
556 	"msr	cpsr_c, %1\n\t"
557 	"add	r14, %0, %2\n\t"
558 	"mov	sp, r14\n\t"
559 	"msr	cpsr_c, %3\n\t"
560 	"add	r14, %0, %4\n\t"
561 	"mov	sp, r14\n\t"
562 	"msr	cpsr_c, %5\n\t"
563 	"add	r14, %0, %6\n\t"
564 	"mov	sp, r14\n\t"
565 	"msr	cpsr_c, %7\n\t"
566 	"add	r14, %0, %8\n\t"
567 	"mov	sp, r14\n\t"
568 	"msr	cpsr_c, %9"
569 	    :
570 	    : "r" (stk),
571 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
572 	      "I" (offsetof(struct stack, irq[0])),
573 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
574 	      "I" (offsetof(struct stack, abt[0])),
575 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
576 	      "I" (offsetof(struct stack, und[0])),
577 	      PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
578 	      "I" (offsetof(struct stack, fiq[0])),
579 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
580 	    : "r14");
581 #endif
582 }
583 
584 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
585 
586 void __init smp_setup_processor_id(void)
587 {
588 	int i;
589 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
590 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
591 
592 	cpu_logical_map(0) = cpu;
593 	for (i = 1; i < nr_cpu_ids; ++i)
594 		cpu_logical_map(i) = i == cpu ? 0 : i;
595 
596 	/*
597 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
598 	 * using percpu variable early, for example, lockdep will
599 	 * access percpu variable inside lock_release
600 	 */
601 	set_my_cpu_offset(0);
602 
603 	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
604 }
605 
606 struct mpidr_hash mpidr_hash;
607 #ifdef CONFIG_SMP
608 /**
609  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
610  *			  level in order to build a linear index from an
611  *			  MPIDR value. Resulting algorithm is a collision
612  *			  free hash carried out through shifting and ORing
613  */
614 static void __init smp_build_mpidr_hash(void)
615 {
616 	u32 i, affinity;
617 	u32 fs[3], bits[3], ls, mask = 0;
618 	/*
619 	 * Pre-scan the list of MPIDRS and filter out bits that do
620 	 * not contribute to affinity levels, ie they never toggle.
621 	 */
622 	for_each_possible_cpu(i)
623 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
624 	pr_debug("mask of set bits 0x%x\n", mask);
625 	/*
626 	 * Find and stash the last and first bit set at all affinity levels to
627 	 * check how many bits are required to represent them.
628 	 */
629 	for (i = 0; i < 3; i++) {
630 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
631 		/*
632 		 * Find the MSB bit and LSB bits position
633 		 * to determine how many bits are required
634 		 * to express the affinity level.
635 		 */
636 		ls = fls(affinity);
637 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
638 		bits[i] = ls - fs[i];
639 	}
640 	/*
641 	 * An index can be created from the MPIDR by isolating the
642 	 * significant bits at each affinity level and by shifting
643 	 * them in order to compress the 24 bits values space to a
644 	 * compressed set of values. This is equivalent to hashing
645 	 * the MPIDR through shifting and ORing. It is a collision free
646 	 * hash though not minimal since some levels might contain a number
647 	 * of CPUs that is not an exact power of 2 and their bit
648 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
649 	 */
650 	mpidr_hash.shift_aff[0] = fs[0];
651 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
652 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
653 						(bits[1] + bits[0]);
654 	mpidr_hash.mask = mask;
655 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
656 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
657 				mpidr_hash.shift_aff[0],
658 				mpidr_hash.shift_aff[1],
659 				mpidr_hash.shift_aff[2],
660 				mpidr_hash.mask,
661 				mpidr_hash.bits);
662 	/*
663 	 * 4x is an arbitrary value used to warn on a hash table much bigger
664 	 * than expected on most systems.
665 	 */
666 	if (mpidr_hash_size() > 4 * num_possible_cpus())
667 		pr_warn("Large number of MPIDR hash buckets detected\n");
668 	sync_cache_w(&mpidr_hash);
669 }
670 #endif
671 
672 static void __init setup_processor(void)
673 {
674 	struct proc_info_list *list;
675 
676 	/*
677 	 * locate processor in the list of supported processor
678 	 * types.  The linker builds this table for us from the
679 	 * entries in arch/arm/mm/proc-*.S
680 	 */
681 	list = lookup_processor_type(read_cpuid_id());
682 	if (!list) {
683 		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
684 		       read_cpuid_id());
685 		while (1);
686 	}
687 
688 	cpu_name = list->cpu_name;
689 	__cpu_architecture = __get_cpu_architecture();
690 
691 #ifdef MULTI_CPU
692 	processor = *list->proc;
693 #endif
694 #ifdef MULTI_TLB
695 	cpu_tlb = *list->tlb;
696 #endif
697 #ifdef MULTI_USER
698 	cpu_user = *list->user;
699 #endif
700 #ifdef MULTI_CACHE
701 	cpu_cache = *list->cache;
702 #endif
703 
704 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
705 		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
706 		proc_arch[cpu_architecture()], get_cr());
707 
708 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
709 		 list->arch_name, ENDIANNESS);
710 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
711 		 list->elf_name, ENDIANNESS);
712 	elf_hwcap = list->elf_hwcap;
713 
714 	cpuid_init_hwcaps();
715 	patch_aeabi_idiv();
716 
717 #ifndef CONFIG_ARM_THUMB
718 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
719 #endif
720 #ifdef CONFIG_MMU
721 	init_default_cache_policy(list->__cpu_mm_mmu_flags);
722 #endif
723 	erratum_a15_798181_init();
724 
725 	elf_hwcap_fixup();
726 
727 	cacheid_init();
728 	cpu_init();
729 }
730 
731 void __init dump_machine_table(void)
732 {
733 	const struct machine_desc *p;
734 
735 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
736 	for_each_machine_desc(p)
737 		early_print("%08x\t%s\n", p->nr, p->name);
738 
739 	early_print("\nPlease check your kernel config and/or bootloader.\n");
740 
741 	while (true)
742 		/* can't use cpu_relax() here as it may require MMU setup */;
743 }
744 
745 int __init arm_add_memory(u64 start, u64 size)
746 {
747 	u64 aligned_start;
748 
749 	/*
750 	 * Ensure that start/size are aligned to a page boundary.
751 	 * Size is rounded down, start is rounded up.
752 	 */
753 	aligned_start = PAGE_ALIGN(start);
754 	if (aligned_start > start + size)
755 		size = 0;
756 	else
757 		size -= aligned_start - start;
758 
759 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
760 	if (aligned_start > ULONG_MAX) {
761 		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
762 			(long long)start);
763 		return -EINVAL;
764 	}
765 
766 	if (aligned_start + size > ULONG_MAX) {
767 		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
768 			(long long)start);
769 		/*
770 		 * To ensure bank->start + bank->size is representable in
771 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
772 		 * This means we lose a page after masking.
773 		 */
774 		size = ULONG_MAX - aligned_start;
775 	}
776 #endif
777 
778 	if (aligned_start < PHYS_OFFSET) {
779 		if (aligned_start + size <= PHYS_OFFSET) {
780 			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
781 				aligned_start, aligned_start + size);
782 			return -EINVAL;
783 		}
784 
785 		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
786 			aligned_start, (u64)PHYS_OFFSET);
787 
788 		size -= PHYS_OFFSET - aligned_start;
789 		aligned_start = PHYS_OFFSET;
790 	}
791 
792 	start = aligned_start;
793 	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
794 
795 	/*
796 	 * Check whether this memory region has non-zero size or
797 	 * invalid node number.
798 	 */
799 	if (size == 0)
800 		return -EINVAL;
801 
802 	memblock_add(start, size);
803 	return 0;
804 }
805 
806 /*
807  * Pick out the memory size.  We look for mem=size@start,
808  * where start and size are "size[KkMm]"
809  */
810 
811 static int __init early_mem(char *p)
812 {
813 	static int usermem __initdata = 0;
814 	u64 size;
815 	u64 start;
816 	char *endp;
817 
818 	/*
819 	 * If the user specifies memory size, we
820 	 * blow away any automatically generated
821 	 * size.
822 	 */
823 	if (usermem == 0) {
824 		usermem = 1;
825 		memblock_remove(memblock_start_of_DRAM(),
826 			memblock_end_of_DRAM() - memblock_start_of_DRAM());
827 	}
828 
829 	start = PHYS_OFFSET;
830 	size  = memparse(p, &endp);
831 	if (*endp == '@')
832 		start = memparse(endp + 1, NULL);
833 
834 	arm_add_memory(start, size);
835 
836 	return 0;
837 }
838 early_param("mem", early_mem);
839 
840 static void __init request_standard_resources(const struct machine_desc *mdesc)
841 {
842 	struct memblock_region *region;
843 	struct resource *res;
844 
845 	kernel_code.start   = virt_to_phys(_text);
846 	kernel_code.end     = virt_to_phys(__init_begin - 1);
847 	kernel_data.start   = virt_to_phys(_sdata);
848 	kernel_data.end     = virt_to_phys(_end - 1);
849 
850 	for_each_memblock(memory, region) {
851 		res = memblock_virt_alloc(sizeof(*res), 0);
852 		res->name  = "System RAM";
853 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
854 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
855 		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
856 
857 		request_resource(&iomem_resource, res);
858 
859 		if (kernel_code.start >= res->start &&
860 		    kernel_code.end <= res->end)
861 			request_resource(res, &kernel_code);
862 		if (kernel_data.start >= res->start &&
863 		    kernel_data.end <= res->end)
864 			request_resource(res, &kernel_data);
865 	}
866 
867 	if (mdesc->video_start) {
868 		video_ram.start = mdesc->video_start;
869 		video_ram.end   = mdesc->video_end;
870 		request_resource(&iomem_resource, &video_ram);
871 	}
872 
873 	/*
874 	 * Some machines don't have the possibility of ever
875 	 * possessing lp0, lp1 or lp2
876 	 */
877 	if (mdesc->reserve_lp0)
878 		request_resource(&ioport_resource, &lp0);
879 	if (mdesc->reserve_lp1)
880 		request_resource(&ioport_resource, &lp1);
881 	if (mdesc->reserve_lp2)
882 		request_resource(&ioport_resource, &lp2);
883 }
884 
885 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
886     defined(CONFIG_EFI)
887 struct screen_info screen_info = {
888  .orig_video_lines	= 30,
889  .orig_video_cols	= 80,
890  .orig_video_mode	= 0,
891  .orig_video_ega_bx	= 0,
892  .orig_video_isVGA	= 1,
893  .orig_video_points	= 8
894 };
895 #endif
896 
897 static int __init customize_machine(void)
898 {
899 	/*
900 	 * customizes platform devices, or adds new ones
901 	 * On DT based machines, we fall back to populating the
902 	 * machine from the device tree, if no callback is provided,
903 	 * otherwise we would always need an init_machine callback.
904 	 */
905 	if (machine_desc->init_machine)
906 		machine_desc->init_machine();
907 
908 	return 0;
909 }
910 arch_initcall(customize_machine);
911 
912 static int __init init_machine_late(void)
913 {
914 	struct device_node *root;
915 	int ret;
916 
917 	if (machine_desc->init_late)
918 		machine_desc->init_late();
919 
920 	root = of_find_node_by_path("/");
921 	if (root) {
922 		ret = of_property_read_string(root, "serial-number",
923 					      &system_serial);
924 		if (ret)
925 			system_serial = NULL;
926 	}
927 
928 	if (!system_serial)
929 		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
930 					  system_serial_high,
931 					  system_serial_low);
932 
933 	return 0;
934 }
935 late_initcall(init_machine_late);
936 
937 #ifdef CONFIG_KEXEC
938 /*
939  * The crash region must be aligned to 128MB to avoid
940  * zImage relocating below the reserved region.
941  */
942 #define CRASH_ALIGN	(128 << 20)
943 
944 static inline unsigned long long get_total_mem(void)
945 {
946 	unsigned long total;
947 
948 	total = max_low_pfn - min_low_pfn;
949 	return total << PAGE_SHIFT;
950 }
951 
952 /**
953  * reserve_crashkernel() - reserves memory are for crash kernel
954  *
955  * This function reserves memory area given in "crashkernel=" kernel command
956  * line parameter. The memory reserved is used by a dump capture kernel when
957  * primary kernel is crashing.
958  */
959 static void __init reserve_crashkernel(void)
960 {
961 	unsigned long long crash_size, crash_base;
962 	unsigned long long total_mem;
963 	int ret;
964 
965 	total_mem = get_total_mem();
966 	ret = parse_crashkernel(boot_command_line, total_mem,
967 				&crash_size, &crash_base);
968 	if (ret)
969 		return;
970 
971 	if (crash_base <= 0) {
972 		unsigned long long crash_max = idmap_to_phys((u32)~0);
973 		crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
974 						    crash_size, CRASH_ALIGN);
975 		if (!crash_base) {
976 			pr_err("crashkernel reservation failed - No suitable area found.\n");
977 			return;
978 		}
979 	} else {
980 		unsigned long long start;
981 
982 		start = memblock_find_in_range(crash_base,
983 					       crash_base + crash_size,
984 					       crash_size, SECTION_SIZE);
985 		if (start != crash_base) {
986 			pr_err("crashkernel reservation failed - memory is in use.\n");
987 			return;
988 		}
989 	}
990 
991 	ret = memblock_reserve(crash_base, crash_size);
992 	if (ret < 0) {
993 		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
994 			(unsigned long)crash_base);
995 		return;
996 	}
997 
998 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
999 		(unsigned long)(crash_size >> 20),
1000 		(unsigned long)(crash_base >> 20),
1001 		(unsigned long)(total_mem >> 20));
1002 
1003 	crashk_res.start = crash_base;
1004 	crashk_res.end = crash_base + crash_size - 1;
1005 	insert_resource(&iomem_resource, &crashk_res);
1006 }
1007 #else
1008 static inline void reserve_crashkernel(void) {}
1009 #endif /* CONFIG_KEXEC */
1010 
1011 void __init hyp_mode_check(void)
1012 {
1013 #ifdef CONFIG_ARM_VIRT_EXT
1014 	sync_boot_mode();
1015 
1016 	if (is_hyp_mode_available()) {
1017 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
1018 		pr_info("CPU: Virtualization extensions available.\n");
1019 	} else if (is_hyp_mode_mismatched()) {
1020 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1021 			__boot_cpu_mode & MODE_MASK);
1022 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1023 	} else
1024 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
1025 #endif
1026 }
1027 
1028 void __init setup_arch(char **cmdline_p)
1029 {
1030 	const struct machine_desc *mdesc;
1031 
1032 	setup_processor();
1033 	mdesc = setup_machine_fdt(__atags_pointer);
1034 	if (!mdesc)
1035 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1036 	machine_desc = mdesc;
1037 	machine_name = mdesc->name;
1038 	dump_stack_set_arch_desc("%s", mdesc->name);
1039 
1040 	if (mdesc->reboot_mode != REBOOT_HARD)
1041 		reboot_mode = mdesc->reboot_mode;
1042 
1043 	init_mm.start_code = (unsigned long) _text;
1044 	init_mm.end_code   = (unsigned long) _etext;
1045 	init_mm.end_data   = (unsigned long) _edata;
1046 	init_mm.brk	   = (unsigned long) _end;
1047 
1048 	/* populate cmd_line too for later use, preserving boot_command_line */
1049 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1050 	*cmdline_p = cmd_line;
1051 
1052 	early_fixmap_init();
1053 	early_ioremap_init();
1054 
1055 	parse_early_param();
1056 
1057 #ifdef CONFIG_MMU
1058 	early_paging_init(mdesc);
1059 #endif
1060 	setup_dma_zone(mdesc);
1061 	xen_early_init();
1062 	efi_init();
1063 	sanity_check_meminfo();
1064 	arm_memblock_init(mdesc);
1065 
1066 	early_ioremap_reset();
1067 
1068 	paging_init(mdesc);
1069 	request_standard_resources(mdesc);
1070 
1071 	if (mdesc->restart)
1072 		arm_pm_restart = mdesc->restart;
1073 
1074 	unflatten_device_tree();
1075 
1076 	arm_dt_init_cpu_maps();
1077 	psci_dt_init();
1078 #ifdef CONFIG_SMP
1079 	if (is_smp()) {
1080 		if (!mdesc->smp_init || !mdesc->smp_init()) {
1081 			if (psci_smp_available())
1082 				smp_set_ops(&psci_smp_ops);
1083 			else if (mdesc->smp)
1084 				smp_set_ops(mdesc->smp);
1085 		}
1086 		smp_init_cpus();
1087 		smp_build_mpidr_hash();
1088 	}
1089 #endif
1090 
1091 	if (!is_smp())
1092 		hyp_mode_check();
1093 
1094 	reserve_crashkernel();
1095 
1096 #ifdef CONFIG_MULTI_IRQ_HANDLER
1097 	handle_arch_irq = mdesc->handle_irq;
1098 #endif
1099 
1100 #ifdef CONFIG_VT
1101 #if defined(CONFIG_VGA_CONSOLE)
1102 	conswitchp = &vga_con;
1103 #elif defined(CONFIG_DUMMY_CONSOLE)
1104 	conswitchp = &dummy_con;
1105 #endif
1106 #endif
1107 
1108 	if (mdesc->init_early)
1109 		mdesc->init_early();
1110 }
1111 
1112 
1113 static int __init topology_init(void)
1114 {
1115 	int cpu;
1116 
1117 	for_each_possible_cpu(cpu) {
1118 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1119 		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1120 		register_cpu(&cpuinfo->cpu, cpu);
1121 	}
1122 
1123 	return 0;
1124 }
1125 subsys_initcall(topology_init);
1126 
1127 #ifdef CONFIG_HAVE_PROC_CPU
1128 static int __init proc_cpu_init(void)
1129 {
1130 	struct proc_dir_entry *res;
1131 
1132 	res = proc_mkdir("cpu", NULL);
1133 	if (!res)
1134 		return -ENOMEM;
1135 	return 0;
1136 }
1137 fs_initcall(proc_cpu_init);
1138 #endif
1139 
1140 static const char *hwcap_str[] = {
1141 	"swp",
1142 	"half",
1143 	"thumb",
1144 	"26bit",
1145 	"fastmult",
1146 	"fpa",
1147 	"vfp",
1148 	"edsp",
1149 	"java",
1150 	"iwmmxt",
1151 	"crunch",
1152 	"thumbee",
1153 	"neon",
1154 	"vfpv3",
1155 	"vfpv3d16",
1156 	"tls",
1157 	"vfpv4",
1158 	"idiva",
1159 	"idivt",
1160 	"vfpd32",
1161 	"lpae",
1162 	"evtstrm",
1163 	NULL
1164 };
1165 
1166 static const char *hwcap2_str[] = {
1167 	"aes",
1168 	"pmull",
1169 	"sha1",
1170 	"sha2",
1171 	"crc32",
1172 	NULL
1173 };
1174 
1175 static int c_show(struct seq_file *m, void *v)
1176 {
1177 	int i, j;
1178 	u32 cpuid;
1179 
1180 	for_each_online_cpu(i) {
1181 		/*
1182 		 * glibc reads /proc/cpuinfo to determine the number of
1183 		 * online processors, looking for lines beginning with
1184 		 * "processor".  Give glibc what it expects.
1185 		 */
1186 		seq_printf(m, "processor\t: %d\n", i);
1187 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1188 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1189 			   cpu_name, cpuid & 15, elf_platform);
1190 
1191 #if defined(CONFIG_SMP)
1192 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1193 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1194 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1195 #else
1196 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1197 			   loops_per_jiffy / (500000/HZ),
1198 			   (loops_per_jiffy / (5000/HZ)) % 100);
1199 #endif
1200 		/* dump out the processor features */
1201 		seq_puts(m, "Features\t: ");
1202 
1203 		for (j = 0; hwcap_str[j]; j++)
1204 			if (elf_hwcap & (1 << j))
1205 				seq_printf(m, "%s ", hwcap_str[j]);
1206 
1207 		for (j = 0; hwcap2_str[j]; j++)
1208 			if (elf_hwcap2 & (1 << j))
1209 				seq_printf(m, "%s ", hwcap2_str[j]);
1210 
1211 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1212 		seq_printf(m, "CPU architecture: %s\n",
1213 			   proc_arch[cpu_architecture()]);
1214 
1215 		if ((cpuid & 0x0008f000) == 0x00000000) {
1216 			/* pre-ARM7 */
1217 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1218 		} else {
1219 			if ((cpuid & 0x0008f000) == 0x00007000) {
1220 				/* ARM7 */
1221 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1222 					   (cpuid >> 16) & 127);
1223 			} else {
1224 				/* post-ARM7 */
1225 				seq_printf(m, "CPU variant\t: 0x%x\n",
1226 					   (cpuid >> 20) & 15);
1227 			}
1228 			seq_printf(m, "CPU part\t: 0x%03x\n",
1229 				   (cpuid >> 4) & 0xfff);
1230 		}
1231 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1232 	}
1233 
1234 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1235 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1236 	seq_printf(m, "Serial\t\t: %s\n", system_serial);
1237 
1238 	return 0;
1239 }
1240 
1241 static void *c_start(struct seq_file *m, loff_t *pos)
1242 {
1243 	return *pos < 1 ? (void *)1 : NULL;
1244 }
1245 
1246 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1247 {
1248 	++*pos;
1249 	return NULL;
1250 }
1251 
1252 static void c_stop(struct seq_file *m, void *v)
1253 {
1254 }
1255 
1256 const struct seq_operations cpuinfo_op = {
1257 	.start	= c_start,
1258 	.next	= c_next,
1259 	.stop	= c_stop,
1260 	.show	= c_show
1261 };
1262