xref: /openbmc/linux/arch/arm/kernel/setup.c (revision 9cfc5c90)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_iommu.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34 #include <linux/psci.h>
35 
36 #include <asm/unified.h>
37 #include <asm/cp15.h>
38 #include <asm/cpu.h>
39 #include <asm/cputype.h>
40 #include <asm/elf.h>
41 #include <asm/fixmap.h>
42 #include <asm/procinfo.h>
43 #include <asm/psci.h>
44 #include <asm/sections.h>
45 #include <asm/setup.h>
46 #include <asm/smp_plat.h>
47 #include <asm/mach-types.h>
48 #include <asm/cacheflush.h>
49 #include <asm/cachetype.h>
50 #include <asm/tlbflush.h>
51 #include <asm/xen/hypervisor.h>
52 
53 #include <asm/prom.h>
54 #include <asm/mach/arch.h>
55 #include <asm/mach/irq.h>
56 #include <asm/mach/time.h>
57 #include <asm/system_info.h>
58 #include <asm/system_misc.h>
59 #include <asm/traps.h>
60 #include <asm/unwind.h>
61 #include <asm/memblock.h>
62 #include <asm/virt.h>
63 
64 #include "atags.h"
65 
66 
67 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68 char fpe_type[8];
69 
70 static int __init fpe_setup(char *line)
71 {
72 	memcpy(fpe_type, line, 8);
73 	return 1;
74 }
75 
76 __setup("fpe=", fpe_setup);
77 #endif
78 
79 extern void init_default_cache_policy(unsigned long);
80 extern void paging_init(const struct machine_desc *desc);
81 extern void early_paging_init(const struct machine_desc *);
82 extern void sanity_check_meminfo(void);
83 extern enum reboot_mode reboot_mode;
84 extern void setup_dma_zone(const struct machine_desc *desc);
85 
86 unsigned int processor_id;
87 EXPORT_SYMBOL(processor_id);
88 unsigned int __machine_arch_type __read_mostly;
89 EXPORT_SYMBOL(__machine_arch_type);
90 unsigned int cacheid __read_mostly;
91 EXPORT_SYMBOL(cacheid);
92 
93 unsigned int __atags_pointer __initdata;
94 
95 unsigned int system_rev;
96 EXPORT_SYMBOL(system_rev);
97 
98 const char *system_serial;
99 EXPORT_SYMBOL(system_serial);
100 
101 unsigned int system_serial_low;
102 EXPORT_SYMBOL(system_serial_low);
103 
104 unsigned int system_serial_high;
105 EXPORT_SYMBOL(system_serial_high);
106 
107 unsigned int elf_hwcap __read_mostly;
108 EXPORT_SYMBOL(elf_hwcap);
109 
110 unsigned int elf_hwcap2 __read_mostly;
111 EXPORT_SYMBOL(elf_hwcap2);
112 
113 
114 #ifdef MULTI_CPU
115 struct processor processor __read_mostly;
116 #endif
117 #ifdef MULTI_TLB
118 struct cpu_tlb_fns cpu_tlb __read_mostly;
119 #endif
120 #ifdef MULTI_USER
121 struct cpu_user_fns cpu_user __read_mostly;
122 #endif
123 #ifdef MULTI_CACHE
124 struct cpu_cache_fns cpu_cache __read_mostly;
125 #endif
126 #ifdef CONFIG_OUTER_CACHE
127 struct outer_cache_fns outer_cache __read_mostly;
128 EXPORT_SYMBOL(outer_cache);
129 #endif
130 
131 /*
132  * Cached cpu_architecture() result for use by assembler code.
133  * C code should use the cpu_architecture() function instead of accessing this
134  * variable directly.
135  */
136 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
137 
138 struct stack {
139 	u32 irq[3];
140 	u32 abt[3];
141 	u32 und[3];
142 	u32 fiq[3];
143 } ____cacheline_aligned;
144 
145 #ifndef CONFIG_CPU_V7M
146 static struct stack stacks[NR_CPUS];
147 #endif
148 
149 char elf_platform[ELF_PLATFORM_SIZE];
150 EXPORT_SYMBOL(elf_platform);
151 
152 static const char *cpu_name;
153 static const char *machine_name;
154 static char __initdata cmd_line[COMMAND_LINE_SIZE];
155 const struct machine_desc *machine_desc __initdata;
156 
157 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
158 #define ENDIANNESS ((char)endian_test.l)
159 
160 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
161 
162 /*
163  * Standard memory resources
164  */
165 static struct resource mem_res[] = {
166 	{
167 		.name = "Video RAM",
168 		.start = 0,
169 		.end = 0,
170 		.flags = IORESOURCE_MEM
171 	},
172 	{
173 		.name = "Kernel code",
174 		.start = 0,
175 		.end = 0,
176 		.flags = IORESOURCE_MEM
177 	},
178 	{
179 		.name = "Kernel data",
180 		.start = 0,
181 		.end = 0,
182 		.flags = IORESOURCE_MEM
183 	}
184 };
185 
186 #define video_ram   mem_res[0]
187 #define kernel_code mem_res[1]
188 #define kernel_data mem_res[2]
189 
190 static struct resource io_res[] = {
191 	{
192 		.name = "reserved",
193 		.start = 0x3bc,
194 		.end = 0x3be,
195 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
196 	},
197 	{
198 		.name = "reserved",
199 		.start = 0x378,
200 		.end = 0x37f,
201 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
202 	},
203 	{
204 		.name = "reserved",
205 		.start = 0x278,
206 		.end = 0x27f,
207 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
208 	}
209 };
210 
211 #define lp0 io_res[0]
212 #define lp1 io_res[1]
213 #define lp2 io_res[2]
214 
215 static const char *proc_arch[] = {
216 	"undefined/unknown",
217 	"3",
218 	"4",
219 	"4T",
220 	"5",
221 	"5T",
222 	"5TE",
223 	"5TEJ",
224 	"6TEJ",
225 	"7",
226 	"7M",
227 	"?(12)",
228 	"?(13)",
229 	"?(14)",
230 	"?(15)",
231 	"?(16)",
232 	"?(17)",
233 };
234 
235 #ifdef CONFIG_CPU_V7M
236 static int __get_cpu_architecture(void)
237 {
238 	return CPU_ARCH_ARMv7M;
239 }
240 #else
241 static int __get_cpu_architecture(void)
242 {
243 	int cpu_arch;
244 
245 	if ((read_cpuid_id() & 0x0008f000) == 0) {
246 		cpu_arch = CPU_ARCH_UNKNOWN;
247 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
248 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
249 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
250 		cpu_arch = (read_cpuid_id() >> 16) & 7;
251 		if (cpu_arch)
252 			cpu_arch += CPU_ARCH_ARMv3;
253 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
254 		/* Revised CPUID format. Read the Memory Model Feature
255 		 * Register 0 and check for VMSAv7 or PMSAv7 */
256 		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
257 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
258 		    (mmfr0 & 0x000000f0) >= 0x00000030)
259 			cpu_arch = CPU_ARCH_ARMv7;
260 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
261 			 (mmfr0 & 0x000000f0) == 0x00000020)
262 			cpu_arch = CPU_ARCH_ARMv6;
263 		else
264 			cpu_arch = CPU_ARCH_UNKNOWN;
265 	} else
266 		cpu_arch = CPU_ARCH_UNKNOWN;
267 
268 	return cpu_arch;
269 }
270 #endif
271 
272 int __pure cpu_architecture(void)
273 {
274 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
275 
276 	return __cpu_architecture;
277 }
278 
279 static int cpu_has_aliasing_icache(unsigned int arch)
280 {
281 	int aliasing_icache;
282 	unsigned int id_reg, num_sets, line_size;
283 
284 	/* PIPT caches never alias. */
285 	if (icache_is_pipt())
286 		return 0;
287 
288 	/* arch specifies the register format */
289 	switch (arch) {
290 	case CPU_ARCH_ARMv7:
291 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
292 		    : /* No output operands */
293 		    : "r" (1));
294 		isb();
295 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
296 		    : "=r" (id_reg));
297 		line_size = 4 << ((id_reg & 0x7) + 2);
298 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
299 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
300 		break;
301 	case CPU_ARCH_ARMv6:
302 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
303 		break;
304 	default:
305 		/* I-cache aliases will be handled by D-cache aliasing code */
306 		aliasing_icache = 0;
307 	}
308 
309 	return aliasing_icache;
310 }
311 
312 static void __init cacheid_init(void)
313 {
314 	unsigned int arch = cpu_architecture();
315 
316 	if (arch == CPU_ARCH_ARMv7M) {
317 		cacheid = 0;
318 	} else if (arch >= CPU_ARCH_ARMv6) {
319 		unsigned int cachetype = read_cpuid_cachetype();
320 		if ((cachetype & (7 << 29)) == 4 << 29) {
321 			/* ARMv7 register format */
322 			arch = CPU_ARCH_ARMv7;
323 			cacheid = CACHEID_VIPT_NONALIASING;
324 			switch (cachetype & (3 << 14)) {
325 			case (1 << 14):
326 				cacheid |= CACHEID_ASID_TAGGED;
327 				break;
328 			case (3 << 14):
329 				cacheid |= CACHEID_PIPT;
330 				break;
331 			}
332 		} else {
333 			arch = CPU_ARCH_ARMv6;
334 			if (cachetype & (1 << 23))
335 				cacheid = CACHEID_VIPT_ALIASING;
336 			else
337 				cacheid = CACHEID_VIPT_NONALIASING;
338 		}
339 		if (cpu_has_aliasing_icache(arch))
340 			cacheid |= CACHEID_VIPT_I_ALIASING;
341 	} else {
342 		cacheid = CACHEID_VIVT;
343 	}
344 
345 	pr_info("CPU: %s data cache, %s instruction cache\n",
346 		cache_is_vivt() ? "VIVT" :
347 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
348 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
349 		cache_is_vivt() ? "VIVT" :
350 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
351 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
352 		icache_is_pipt() ? "PIPT" :
353 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
354 }
355 
356 /*
357  * These functions re-use the assembly code in head.S, which
358  * already provide the required functionality.
359  */
360 extern struct proc_info_list *lookup_processor_type(unsigned int);
361 
362 void __init early_print(const char *str, ...)
363 {
364 	extern void printascii(const char *);
365 	char buf[256];
366 	va_list ap;
367 
368 	va_start(ap, str);
369 	vsnprintf(buf, sizeof(buf), str, ap);
370 	va_end(ap);
371 
372 #ifdef CONFIG_DEBUG_LL
373 	printascii(buf);
374 #endif
375 	printk("%s", buf);
376 }
377 
378 static void __init cpuid_init_hwcaps(void)
379 {
380 	int block;
381 	u32 isar5;
382 
383 	if (cpu_architecture() < CPU_ARCH_ARMv7)
384 		return;
385 
386 	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
387 	if (block >= 2)
388 		elf_hwcap |= HWCAP_IDIVA;
389 	if (block >= 1)
390 		elf_hwcap |= HWCAP_IDIVT;
391 
392 	/* LPAE implies atomic ldrd/strd instructions */
393 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
394 	if (block >= 5)
395 		elf_hwcap |= HWCAP_LPAE;
396 
397 	/* check for supported v8 Crypto instructions */
398 	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
399 
400 	block = cpuid_feature_extract_field(isar5, 4);
401 	if (block >= 2)
402 		elf_hwcap2 |= HWCAP2_PMULL;
403 	if (block >= 1)
404 		elf_hwcap2 |= HWCAP2_AES;
405 
406 	block = cpuid_feature_extract_field(isar5, 8);
407 	if (block >= 1)
408 		elf_hwcap2 |= HWCAP2_SHA1;
409 
410 	block = cpuid_feature_extract_field(isar5, 12);
411 	if (block >= 1)
412 		elf_hwcap2 |= HWCAP2_SHA2;
413 
414 	block = cpuid_feature_extract_field(isar5, 16);
415 	if (block >= 1)
416 		elf_hwcap2 |= HWCAP2_CRC32;
417 }
418 
419 static void __init elf_hwcap_fixup(void)
420 {
421 	unsigned id = read_cpuid_id();
422 
423 	/*
424 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
425 	 * see also kuser_get_tls_init.
426 	 */
427 	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
428 	    ((id >> 20) & 3) == 0) {
429 		elf_hwcap &= ~HWCAP_TLS;
430 		return;
431 	}
432 
433 	/* Verify if CPUID scheme is implemented */
434 	if ((id & 0x000f0000) != 0x000f0000)
435 		return;
436 
437 	/*
438 	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
439 	 * avoid advertising SWP; it may not be atomic with
440 	 * multiprocessing cores.
441 	 */
442 	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
443 	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
444 	     cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
445 		elf_hwcap &= ~HWCAP_SWP;
446 }
447 
448 /*
449  * cpu_init - initialise one CPU.
450  *
451  * cpu_init sets up the per-CPU stacks.
452  */
453 void notrace cpu_init(void)
454 {
455 #ifndef CONFIG_CPU_V7M
456 	unsigned int cpu = smp_processor_id();
457 	struct stack *stk = &stacks[cpu];
458 
459 	if (cpu >= NR_CPUS) {
460 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
461 		BUG();
462 	}
463 
464 	/*
465 	 * This only works on resume and secondary cores. For booting on the
466 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
467 	 */
468 	set_my_cpu_offset(per_cpu_offset(cpu));
469 
470 	cpu_proc_init();
471 
472 	/*
473 	 * Define the placement constraint for the inline asm directive below.
474 	 * In Thumb-2, msr with an immediate value is not allowed.
475 	 */
476 #ifdef CONFIG_THUMB2_KERNEL
477 #define PLC	"r"
478 #else
479 #define PLC	"I"
480 #endif
481 
482 	/*
483 	 * setup stacks for re-entrant exception handlers
484 	 */
485 	__asm__ (
486 	"msr	cpsr_c, %1\n\t"
487 	"add	r14, %0, %2\n\t"
488 	"mov	sp, r14\n\t"
489 	"msr	cpsr_c, %3\n\t"
490 	"add	r14, %0, %4\n\t"
491 	"mov	sp, r14\n\t"
492 	"msr	cpsr_c, %5\n\t"
493 	"add	r14, %0, %6\n\t"
494 	"mov	sp, r14\n\t"
495 	"msr	cpsr_c, %7\n\t"
496 	"add	r14, %0, %8\n\t"
497 	"mov	sp, r14\n\t"
498 	"msr	cpsr_c, %9"
499 	    :
500 	    : "r" (stk),
501 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
502 	      "I" (offsetof(struct stack, irq[0])),
503 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
504 	      "I" (offsetof(struct stack, abt[0])),
505 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
506 	      "I" (offsetof(struct stack, und[0])),
507 	      PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
508 	      "I" (offsetof(struct stack, fiq[0])),
509 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
510 	    : "r14");
511 #endif
512 }
513 
514 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
515 
516 void __init smp_setup_processor_id(void)
517 {
518 	int i;
519 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
520 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
521 
522 	cpu_logical_map(0) = cpu;
523 	for (i = 1; i < nr_cpu_ids; ++i)
524 		cpu_logical_map(i) = i == cpu ? 0 : i;
525 
526 	/*
527 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
528 	 * using percpu variable early, for example, lockdep will
529 	 * access percpu variable inside lock_release
530 	 */
531 	set_my_cpu_offset(0);
532 
533 	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
534 }
535 
536 struct mpidr_hash mpidr_hash;
537 #ifdef CONFIG_SMP
538 /**
539  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
540  *			  level in order to build a linear index from an
541  *			  MPIDR value. Resulting algorithm is a collision
542  *			  free hash carried out through shifting and ORing
543  */
544 static void __init smp_build_mpidr_hash(void)
545 {
546 	u32 i, affinity;
547 	u32 fs[3], bits[3], ls, mask = 0;
548 	/*
549 	 * Pre-scan the list of MPIDRS and filter out bits that do
550 	 * not contribute to affinity levels, ie they never toggle.
551 	 */
552 	for_each_possible_cpu(i)
553 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
554 	pr_debug("mask of set bits 0x%x\n", mask);
555 	/*
556 	 * Find and stash the last and first bit set at all affinity levels to
557 	 * check how many bits are required to represent them.
558 	 */
559 	for (i = 0; i < 3; i++) {
560 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
561 		/*
562 		 * Find the MSB bit and LSB bits position
563 		 * to determine how many bits are required
564 		 * to express the affinity level.
565 		 */
566 		ls = fls(affinity);
567 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
568 		bits[i] = ls - fs[i];
569 	}
570 	/*
571 	 * An index can be created from the MPIDR by isolating the
572 	 * significant bits at each affinity level and by shifting
573 	 * them in order to compress the 24 bits values space to a
574 	 * compressed set of values. This is equivalent to hashing
575 	 * the MPIDR through shifting and ORing. It is a collision free
576 	 * hash though not minimal since some levels might contain a number
577 	 * of CPUs that is not an exact power of 2 and their bit
578 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
579 	 */
580 	mpidr_hash.shift_aff[0] = fs[0];
581 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
582 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
583 						(bits[1] + bits[0]);
584 	mpidr_hash.mask = mask;
585 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
586 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
587 				mpidr_hash.shift_aff[0],
588 				mpidr_hash.shift_aff[1],
589 				mpidr_hash.shift_aff[2],
590 				mpidr_hash.mask,
591 				mpidr_hash.bits);
592 	/*
593 	 * 4x is an arbitrary value used to warn on a hash table much bigger
594 	 * than expected on most systems.
595 	 */
596 	if (mpidr_hash_size() > 4 * num_possible_cpus())
597 		pr_warn("Large number of MPIDR hash buckets detected\n");
598 	sync_cache_w(&mpidr_hash);
599 }
600 #endif
601 
602 static void __init setup_processor(void)
603 {
604 	struct proc_info_list *list;
605 
606 	/*
607 	 * locate processor in the list of supported processor
608 	 * types.  The linker builds this table for us from the
609 	 * entries in arch/arm/mm/proc-*.S
610 	 */
611 	list = lookup_processor_type(read_cpuid_id());
612 	if (!list) {
613 		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
614 		       read_cpuid_id());
615 		while (1);
616 	}
617 
618 	cpu_name = list->cpu_name;
619 	__cpu_architecture = __get_cpu_architecture();
620 
621 #ifdef MULTI_CPU
622 	processor = *list->proc;
623 #endif
624 #ifdef MULTI_TLB
625 	cpu_tlb = *list->tlb;
626 #endif
627 #ifdef MULTI_USER
628 	cpu_user = *list->user;
629 #endif
630 #ifdef MULTI_CACHE
631 	cpu_cache = *list->cache;
632 #endif
633 
634 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
635 		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
636 		proc_arch[cpu_architecture()], get_cr());
637 
638 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
639 		 list->arch_name, ENDIANNESS);
640 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
641 		 list->elf_name, ENDIANNESS);
642 	elf_hwcap = list->elf_hwcap;
643 
644 	cpuid_init_hwcaps();
645 
646 #ifndef CONFIG_ARM_THUMB
647 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
648 #endif
649 #ifdef CONFIG_MMU
650 	init_default_cache_policy(list->__cpu_mm_mmu_flags);
651 #endif
652 	erratum_a15_798181_init();
653 
654 	elf_hwcap_fixup();
655 
656 	cacheid_init();
657 	cpu_init();
658 }
659 
660 void __init dump_machine_table(void)
661 {
662 	const struct machine_desc *p;
663 
664 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
665 	for_each_machine_desc(p)
666 		early_print("%08x\t%s\n", p->nr, p->name);
667 
668 	early_print("\nPlease check your kernel config and/or bootloader.\n");
669 
670 	while (true)
671 		/* can't use cpu_relax() here as it may require MMU setup */;
672 }
673 
674 int __init arm_add_memory(u64 start, u64 size)
675 {
676 	u64 aligned_start;
677 
678 	/*
679 	 * Ensure that start/size are aligned to a page boundary.
680 	 * Size is rounded down, start is rounded up.
681 	 */
682 	aligned_start = PAGE_ALIGN(start);
683 	if (aligned_start > start + size)
684 		size = 0;
685 	else
686 		size -= aligned_start - start;
687 
688 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
689 	if (aligned_start > ULONG_MAX) {
690 		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
691 			(long long)start);
692 		return -EINVAL;
693 	}
694 
695 	if (aligned_start + size > ULONG_MAX) {
696 		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
697 			(long long)start);
698 		/*
699 		 * To ensure bank->start + bank->size is representable in
700 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
701 		 * This means we lose a page after masking.
702 		 */
703 		size = ULONG_MAX - aligned_start;
704 	}
705 #endif
706 
707 	if (aligned_start < PHYS_OFFSET) {
708 		if (aligned_start + size <= PHYS_OFFSET) {
709 			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
710 				aligned_start, aligned_start + size);
711 			return -EINVAL;
712 		}
713 
714 		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
715 			aligned_start, (u64)PHYS_OFFSET);
716 
717 		size -= PHYS_OFFSET - aligned_start;
718 		aligned_start = PHYS_OFFSET;
719 	}
720 
721 	start = aligned_start;
722 	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
723 
724 	/*
725 	 * Check whether this memory region has non-zero size or
726 	 * invalid node number.
727 	 */
728 	if (size == 0)
729 		return -EINVAL;
730 
731 	memblock_add(start, size);
732 	return 0;
733 }
734 
735 /*
736  * Pick out the memory size.  We look for mem=size@start,
737  * where start and size are "size[KkMm]"
738  */
739 
740 static int __init early_mem(char *p)
741 {
742 	static int usermem __initdata = 0;
743 	u64 size;
744 	u64 start;
745 	char *endp;
746 
747 	/*
748 	 * If the user specifies memory size, we
749 	 * blow away any automatically generated
750 	 * size.
751 	 */
752 	if (usermem == 0) {
753 		usermem = 1;
754 		memblock_remove(memblock_start_of_DRAM(),
755 			memblock_end_of_DRAM() - memblock_start_of_DRAM());
756 	}
757 
758 	start = PHYS_OFFSET;
759 	size  = memparse(p, &endp);
760 	if (*endp == '@')
761 		start = memparse(endp + 1, NULL);
762 
763 	arm_add_memory(start, size);
764 
765 	return 0;
766 }
767 early_param("mem", early_mem);
768 
769 static void __init request_standard_resources(const struct machine_desc *mdesc)
770 {
771 	struct memblock_region *region;
772 	struct resource *res;
773 
774 	kernel_code.start   = virt_to_phys(_text);
775 	kernel_code.end     = virt_to_phys(_etext - 1);
776 	kernel_data.start   = virt_to_phys(_sdata);
777 	kernel_data.end     = virt_to_phys(_end - 1);
778 
779 	for_each_memblock(memory, region) {
780 		res = memblock_virt_alloc(sizeof(*res), 0);
781 		res->name  = "System RAM";
782 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
783 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
784 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
785 
786 		request_resource(&iomem_resource, res);
787 
788 		if (kernel_code.start >= res->start &&
789 		    kernel_code.end <= res->end)
790 			request_resource(res, &kernel_code);
791 		if (kernel_data.start >= res->start &&
792 		    kernel_data.end <= res->end)
793 			request_resource(res, &kernel_data);
794 	}
795 
796 	if (mdesc->video_start) {
797 		video_ram.start = mdesc->video_start;
798 		video_ram.end   = mdesc->video_end;
799 		request_resource(&iomem_resource, &video_ram);
800 	}
801 
802 	/*
803 	 * Some machines don't have the possibility of ever
804 	 * possessing lp0, lp1 or lp2
805 	 */
806 	if (mdesc->reserve_lp0)
807 		request_resource(&ioport_resource, &lp0);
808 	if (mdesc->reserve_lp1)
809 		request_resource(&ioport_resource, &lp1);
810 	if (mdesc->reserve_lp2)
811 		request_resource(&ioport_resource, &lp2);
812 }
813 
814 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
815 struct screen_info screen_info = {
816  .orig_video_lines	= 30,
817  .orig_video_cols	= 80,
818  .orig_video_mode	= 0,
819  .orig_video_ega_bx	= 0,
820  .orig_video_isVGA	= 1,
821  .orig_video_points	= 8
822 };
823 #endif
824 
825 static int __init customize_machine(void)
826 {
827 	/*
828 	 * customizes platform devices, or adds new ones
829 	 * On DT based machines, we fall back to populating the
830 	 * machine from the device tree, if no callback is provided,
831 	 * otherwise we would always need an init_machine callback.
832 	 */
833 	of_iommu_init();
834 	if (machine_desc->init_machine)
835 		machine_desc->init_machine();
836 #ifdef CONFIG_OF
837 	else
838 		of_platform_populate(NULL, of_default_bus_match_table,
839 					NULL, NULL);
840 #endif
841 	return 0;
842 }
843 arch_initcall(customize_machine);
844 
845 static int __init init_machine_late(void)
846 {
847 	struct device_node *root;
848 	int ret;
849 
850 	if (machine_desc->init_late)
851 		machine_desc->init_late();
852 
853 	root = of_find_node_by_path("/");
854 	if (root) {
855 		ret = of_property_read_string(root, "serial-number",
856 					      &system_serial);
857 		if (ret)
858 			system_serial = NULL;
859 	}
860 
861 	if (!system_serial)
862 		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
863 					  system_serial_high,
864 					  system_serial_low);
865 
866 	return 0;
867 }
868 late_initcall(init_machine_late);
869 
870 #ifdef CONFIG_KEXEC
871 static inline unsigned long long get_total_mem(void)
872 {
873 	unsigned long total;
874 
875 	total = max_low_pfn - min_low_pfn;
876 	return total << PAGE_SHIFT;
877 }
878 
879 /**
880  * reserve_crashkernel() - reserves memory are for crash kernel
881  *
882  * This function reserves memory area given in "crashkernel=" kernel command
883  * line parameter. The memory reserved is used by a dump capture kernel when
884  * primary kernel is crashing.
885  */
886 static void __init reserve_crashkernel(void)
887 {
888 	unsigned long long crash_size, crash_base;
889 	unsigned long long total_mem;
890 	int ret;
891 
892 	total_mem = get_total_mem();
893 	ret = parse_crashkernel(boot_command_line, total_mem,
894 				&crash_size, &crash_base);
895 	if (ret)
896 		return;
897 
898 	ret = memblock_reserve(crash_base, crash_size);
899 	if (ret < 0) {
900 		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
901 			(unsigned long)crash_base);
902 		return;
903 	}
904 
905 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
906 		(unsigned long)(crash_size >> 20),
907 		(unsigned long)(crash_base >> 20),
908 		(unsigned long)(total_mem >> 20));
909 
910 	crashk_res.start = crash_base;
911 	crashk_res.end = crash_base + crash_size - 1;
912 	insert_resource(&iomem_resource, &crashk_res);
913 }
914 #else
915 static inline void reserve_crashkernel(void) {}
916 #endif /* CONFIG_KEXEC */
917 
918 void __init hyp_mode_check(void)
919 {
920 #ifdef CONFIG_ARM_VIRT_EXT
921 	sync_boot_mode();
922 
923 	if (is_hyp_mode_available()) {
924 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
925 		pr_info("CPU: Virtualization extensions available.\n");
926 	} else if (is_hyp_mode_mismatched()) {
927 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
928 			__boot_cpu_mode & MODE_MASK);
929 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
930 	} else
931 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
932 #endif
933 }
934 
935 void __init setup_arch(char **cmdline_p)
936 {
937 	const struct machine_desc *mdesc;
938 
939 	setup_processor();
940 	mdesc = setup_machine_fdt(__atags_pointer);
941 	if (!mdesc)
942 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
943 	machine_desc = mdesc;
944 	machine_name = mdesc->name;
945 	dump_stack_set_arch_desc("%s", mdesc->name);
946 
947 	if (mdesc->reboot_mode != REBOOT_HARD)
948 		reboot_mode = mdesc->reboot_mode;
949 
950 	init_mm.start_code = (unsigned long) _text;
951 	init_mm.end_code   = (unsigned long) _etext;
952 	init_mm.end_data   = (unsigned long) _edata;
953 	init_mm.brk	   = (unsigned long) _end;
954 
955 	/* populate cmd_line too for later use, preserving boot_command_line */
956 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
957 	*cmdline_p = cmd_line;
958 
959 	if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
960 		early_fixmap_init();
961 
962 	parse_early_param();
963 
964 #ifdef CONFIG_MMU
965 	early_paging_init(mdesc);
966 #endif
967 	setup_dma_zone(mdesc);
968 	sanity_check_meminfo();
969 	arm_memblock_init(mdesc);
970 
971 	paging_init(mdesc);
972 	request_standard_resources(mdesc);
973 
974 	if (mdesc->restart)
975 		arm_pm_restart = mdesc->restart;
976 
977 	unflatten_device_tree();
978 
979 	arm_dt_init_cpu_maps();
980 	psci_dt_init();
981 	xen_early_init();
982 #ifdef CONFIG_SMP
983 	if (is_smp()) {
984 		if (!mdesc->smp_init || !mdesc->smp_init()) {
985 			if (psci_smp_available())
986 				smp_set_ops(&psci_smp_ops);
987 			else if (mdesc->smp)
988 				smp_set_ops(mdesc->smp);
989 		}
990 		smp_init_cpus();
991 		smp_build_mpidr_hash();
992 	}
993 #endif
994 
995 	if (!is_smp())
996 		hyp_mode_check();
997 
998 	reserve_crashkernel();
999 
1000 #ifdef CONFIG_MULTI_IRQ_HANDLER
1001 	handle_arch_irq = mdesc->handle_irq;
1002 #endif
1003 
1004 #ifdef CONFIG_VT
1005 #if defined(CONFIG_VGA_CONSOLE)
1006 	conswitchp = &vga_con;
1007 #elif defined(CONFIG_DUMMY_CONSOLE)
1008 	conswitchp = &dummy_con;
1009 #endif
1010 #endif
1011 
1012 	if (mdesc->init_early)
1013 		mdesc->init_early();
1014 }
1015 
1016 
1017 static int __init topology_init(void)
1018 {
1019 	int cpu;
1020 
1021 	for_each_possible_cpu(cpu) {
1022 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1023 		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1024 		register_cpu(&cpuinfo->cpu, cpu);
1025 	}
1026 
1027 	return 0;
1028 }
1029 subsys_initcall(topology_init);
1030 
1031 #ifdef CONFIG_HAVE_PROC_CPU
1032 static int __init proc_cpu_init(void)
1033 {
1034 	struct proc_dir_entry *res;
1035 
1036 	res = proc_mkdir("cpu", NULL);
1037 	if (!res)
1038 		return -ENOMEM;
1039 	return 0;
1040 }
1041 fs_initcall(proc_cpu_init);
1042 #endif
1043 
1044 static const char *hwcap_str[] = {
1045 	"swp",
1046 	"half",
1047 	"thumb",
1048 	"26bit",
1049 	"fastmult",
1050 	"fpa",
1051 	"vfp",
1052 	"edsp",
1053 	"java",
1054 	"iwmmxt",
1055 	"crunch",
1056 	"thumbee",
1057 	"neon",
1058 	"vfpv3",
1059 	"vfpv3d16",
1060 	"tls",
1061 	"vfpv4",
1062 	"idiva",
1063 	"idivt",
1064 	"vfpd32",
1065 	"lpae",
1066 	"evtstrm",
1067 	NULL
1068 };
1069 
1070 static const char *hwcap2_str[] = {
1071 	"aes",
1072 	"pmull",
1073 	"sha1",
1074 	"sha2",
1075 	"crc32",
1076 	NULL
1077 };
1078 
1079 static int c_show(struct seq_file *m, void *v)
1080 {
1081 	int i, j;
1082 	u32 cpuid;
1083 
1084 	for_each_online_cpu(i) {
1085 		/*
1086 		 * glibc reads /proc/cpuinfo to determine the number of
1087 		 * online processors, looking for lines beginning with
1088 		 * "processor".  Give glibc what it expects.
1089 		 */
1090 		seq_printf(m, "processor\t: %d\n", i);
1091 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1092 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1093 			   cpu_name, cpuid & 15, elf_platform);
1094 
1095 #if defined(CONFIG_SMP)
1096 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1097 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1098 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1099 #else
1100 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1101 			   loops_per_jiffy / (500000/HZ),
1102 			   (loops_per_jiffy / (5000/HZ)) % 100);
1103 #endif
1104 		/* dump out the processor features */
1105 		seq_puts(m, "Features\t: ");
1106 
1107 		for (j = 0; hwcap_str[j]; j++)
1108 			if (elf_hwcap & (1 << j))
1109 				seq_printf(m, "%s ", hwcap_str[j]);
1110 
1111 		for (j = 0; hwcap2_str[j]; j++)
1112 			if (elf_hwcap2 & (1 << j))
1113 				seq_printf(m, "%s ", hwcap2_str[j]);
1114 
1115 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1116 		seq_printf(m, "CPU architecture: %s\n",
1117 			   proc_arch[cpu_architecture()]);
1118 
1119 		if ((cpuid & 0x0008f000) == 0x00000000) {
1120 			/* pre-ARM7 */
1121 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1122 		} else {
1123 			if ((cpuid & 0x0008f000) == 0x00007000) {
1124 				/* ARM7 */
1125 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1126 					   (cpuid >> 16) & 127);
1127 			} else {
1128 				/* post-ARM7 */
1129 				seq_printf(m, "CPU variant\t: 0x%x\n",
1130 					   (cpuid >> 20) & 15);
1131 			}
1132 			seq_printf(m, "CPU part\t: 0x%03x\n",
1133 				   (cpuid >> 4) & 0xfff);
1134 		}
1135 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1136 	}
1137 
1138 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1139 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1140 	seq_printf(m, "Serial\t\t: %s\n", system_serial);
1141 
1142 	return 0;
1143 }
1144 
1145 static void *c_start(struct seq_file *m, loff_t *pos)
1146 {
1147 	return *pos < 1 ? (void *)1 : NULL;
1148 }
1149 
1150 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1151 {
1152 	++*pos;
1153 	return NULL;
1154 }
1155 
1156 static void c_stop(struct seq_file *m, void *v)
1157 {
1158 }
1159 
1160 const struct seq_operations cpuinfo_op = {
1161 	.start	= c_start,
1162 	.next	= c_next,
1163 	.stop	= c_stop,
1164 	.show	= c_show
1165 };
1166