xref: /openbmc/linux/arch/arm64/kernel/setup.c (revision 5c73cc4b6c83e88863a5de869cc5df3b913aef4a)
1 /*
2  * Based on arch/arm/kernel/setup.c
3  *
4  * Copyright (C) 1995-2001 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/stddef.h>
23 #include <linux/ioport.h>
24 #include <linux/delay.h>
25 #include <linux/utsname.h>
26 #include <linux/initrd.h>
27 #include <linux/console.h>
28 #include <linux/cache.h>
29 #include <linux/bootmem.h>
30 #include <linux/seq_file.h>
31 #include <linux/screen_info.h>
32 #include <linux/init.h>
33 #include <linux/kexec.h>
34 #include <linux/crash_dump.h>
35 #include <linux/root_dev.h>
36 #include <linux/clk-provider.h>
37 #include <linux/cpu.h>
38 #include <linux/interrupt.h>
39 #include <linux/smp.h>
40 #include <linux/fs.h>
41 #include <linux/proc_fs.h>
42 #include <linux/memblock.h>
43 #include <linux/of_iommu.h>
44 #include <linux/of_fdt.h>
45 #include <linux/of_platform.h>
46 #include <linux/efi.h>
47 #include <linux/personality.h>
48 
49 #include <asm/fixmap.h>
50 #include <asm/cpu.h>
51 #include <asm/cputype.h>
52 #include <asm/elf.h>
53 #include <asm/cpufeature.h>
54 #include <asm/cpu_ops.h>
55 #include <asm/sections.h>
56 #include <asm/setup.h>
57 #include <asm/smp_plat.h>
58 #include <asm/cacheflush.h>
59 #include <asm/tlbflush.h>
60 #include <asm/traps.h>
61 #include <asm/memblock.h>
62 #include <asm/psci.h>
63 #include <asm/efi.h>
64 #include <asm/virt.h>
65 
66 unsigned long elf_hwcap __read_mostly;
67 EXPORT_SYMBOL_GPL(elf_hwcap);
68 
69 #ifdef CONFIG_COMPAT
70 #define COMPAT_ELF_HWCAP_DEFAULT	\
71 				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
72 				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
73 				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
74 				 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
75 				 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
76 				 COMPAT_HWCAP_LPAE)
77 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
78 unsigned int compat_elf_hwcap2 __read_mostly;
79 #endif
80 
81 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
82 
83 phys_addr_t __fdt_pointer __initdata;
84 
85 /*
86  * Standard memory resources
87  */
88 static struct resource mem_res[] = {
89 	{
90 		.name = "Kernel code",
91 		.start = 0,
92 		.end = 0,
93 		.flags = IORESOURCE_MEM
94 	},
95 	{
96 		.name = "Kernel data",
97 		.start = 0,
98 		.end = 0,
99 		.flags = IORESOURCE_MEM
100 	}
101 };
102 
103 #define kernel_code mem_res[0]
104 #define kernel_data mem_res[1]
105 
106 void __init early_print(const char *str, ...)
107 {
108 	char buf[256];
109 	va_list ap;
110 
111 	va_start(ap, str);
112 	vsnprintf(buf, sizeof(buf), str, ap);
113 	va_end(ap);
114 
115 	printk("%s", buf);
116 }
117 
118 /*
119  * The recorded values of x0 .. x3 upon kernel entry.
120  */
121 u64 __cacheline_aligned boot_args[4];
122 
123 void __init smp_setup_processor_id(void)
124 {
125 	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
126 	cpu_logical_map(0) = mpidr;
127 
128 	/*
129 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
130 	 * using percpu variable early, for example, lockdep will
131 	 * access percpu variable inside lock_release
132 	 */
133 	set_my_cpu_offset(0);
134 	pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
135 }
136 
137 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
138 {
139 	return phys_id == cpu_logical_map(cpu);
140 }
141 
142 struct mpidr_hash mpidr_hash;
143 #ifdef CONFIG_SMP
144 /**
145  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
146  *			  level in order to build a linear index from an
147  *			  MPIDR value. Resulting algorithm is a collision
148  *			  free hash carried out through shifting and ORing
149  */
150 static void __init smp_build_mpidr_hash(void)
151 {
152 	u32 i, affinity, fs[4], bits[4], ls;
153 	u64 mask = 0;
154 	/*
155 	 * Pre-scan the list of MPIDRS and filter out bits that do
156 	 * not contribute to affinity levels, ie they never toggle.
157 	 */
158 	for_each_possible_cpu(i)
159 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
160 	pr_debug("mask of set bits %#llx\n", mask);
161 	/*
162 	 * Find and stash the last and first bit set at all affinity levels to
163 	 * check how many bits are required to represent them.
164 	 */
165 	for (i = 0; i < 4; i++) {
166 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
167 		/*
168 		 * Find the MSB bit and LSB bits position
169 		 * to determine how many bits are required
170 		 * to express the affinity level.
171 		 */
172 		ls = fls(affinity);
173 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
174 		bits[i] = ls - fs[i];
175 	}
176 	/*
177 	 * An index can be created from the MPIDR_EL1 by isolating the
178 	 * significant bits at each affinity level and by shifting
179 	 * them in order to compress the 32 bits values space to a
180 	 * compressed set of values. This is equivalent to hashing
181 	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
182 	 * hash though not minimal since some levels might contain a number
183 	 * of CPUs that is not an exact power of 2 and their bit
184 	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
185 	 */
186 	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
187 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
188 	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
189 						(bits[1] + bits[0]);
190 	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
191 				  fs[3] - (bits[2] + bits[1] + bits[0]);
192 	mpidr_hash.mask = mask;
193 	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
194 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
195 		mpidr_hash.shift_aff[0],
196 		mpidr_hash.shift_aff[1],
197 		mpidr_hash.shift_aff[2],
198 		mpidr_hash.shift_aff[3],
199 		mpidr_hash.mask,
200 		mpidr_hash.bits);
201 	/*
202 	 * 4x is an arbitrary value used to warn on a hash table much bigger
203 	 * than expected on most systems.
204 	 */
205 	if (mpidr_hash_size() > 4 * num_possible_cpus())
206 		pr_warn("Large number of MPIDR hash buckets detected\n");
207 	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
208 }
209 #endif
210 
211 static void __init hyp_mode_check(void)
212 {
213 	if (is_hyp_mode_available())
214 		pr_info("CPU: All CPU(s) started at EL2\n");
215 	else if (is_hyp_mode_mismatched())
216 		WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
217 			   "CPU: CPUs started in inconsistent modes");
218 	else
219 		pr_info("CPU: All CPU(s) started at EL1\n");
220 }
221 
222 void __init do_post_cpus_up_work(void)
223 {
224 	hyp_mode_check();
225 	apply_alternatives_all();
226 }
227 
228 #ifdef CONFIG_UP_LATE_INIT
229 void __init up_late_init(void)
230 {
231 	do_post_cpus_up_work();
232 }
233 #endif /* CONFIG_UP_LATE_INIT */
234 
235 static void __init setup_processor(void)
236 {
237 	u64 features, block;
238 	u32 cwg;
239 	int cls;
240 
241 	printk("CPU: AArch64 Processor [%08x] revision %d\n",
242 	       read_cpuid_id(), read_cpuid_id() & 15);
243 
244 	sprintf(init_utsname()->machine, ELF_PLATFORM);
245 	elf_hwcap = 0;
246 
247 	cpuinfo_store_boot_cpu();
248 
249 	/*
250 	 * Check for sane CTR_EL0.CWG value.
251 	 */
252 	cwg = cache_type_cwg();
253 	cls = cache_line_size();
254 	if (!cwg)
255 		pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
256 			cls);
257 	if (L1_CACHE_BYTES < cls)
258 		pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
259 			L1_CACHE_BYTES, cls);
260 
261 	/*
262 	 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
263 	 * The blocks we test below represent incremental functionality
264 	 * for non-negative values. Negative values are reserved.
265 	 */
266 	features = read_cpuid(ID_AA64ISAR0_EL1);
267 	block = (features >> 4) & 0xf;
268 	if (!(block & 0x8)) {
269 		switch (block) {
270 		default:
271 		case 2:
272 			elf_hwcap |= HWCAP_PMULL;
273 		case 1:
274 			elf_hwcap |= HWCAP_AES;
275 		case 0:
276 			break;
277 		}
278 	}
279 
280 	block = (features >> 8) & 0xf;
281 	if (block && !(block & 0x8))
282 		elf_hwcap |= HWCAP_SHA1;
283 
284 	block = (features >> 12) & 0xf;
285 	if (block && !(block & 0x8))
286 		elf_hwcap |= HWCAP_SHA2;
287 
288 	block = (features >> 16) & 0xf;
289 	if (block && !(block & 0x8))
290 		elf_hwcap |= HWCAP_CRC32;
291 
292 #ifdef CONFIG_COMPAT
293 	/*
294 	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
295 	 * the Aarch32 32-bit execution state.
296 	 */
297 	features = read_cpuid(ID_ISAR5_EL1);
298 	block = (features >> 4) & 0xf;
299 	if (!(block & 0x8)) {
300 		switch (block) {
301 		default:
302 		case 2:
303 			compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
304 		case 1:
305 			compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
306 		case 0:
307 			break;
308 		}
309 	}
310 
311 	block = (features >> 8) & 0xf;
312 	if (block && !(block & 0x8))
313 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
314 
315 	block = (features >> 12) & 0xf;
316 	if (block && !(block & 0x8))
317 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
318 
319 	block = (features >> 16) & 0xf;
320 	if (block && !(block & 0x8))
321 		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
322 #endif
323 }
324 
325 static void __init setup_machine_fdt(phys_addr_t dt_phys)
326 {
327 	if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
328 		early_print("\n"
329 			"Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
330 			"The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
331 			"\nPlease check your bootloader.\n",
332 			dt_phys, phys_to_virt(dt_phys));
333 
334 		while (true)
335 			cpu_relax();
336 	}
337 
338 	dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
339 }
340 
341 static void __init request_standard_resources(void)
342 {
343 	struct memblock_region *region;
344 	struct resource *res;
345 
346 	kernel_code.start   = virt_to_phys(_text);
347 	kernel_code.end     = virt_to_phys(_etext - 1);
348 	kernel_data.start   = virt_to_phys(_sdata);
349 	kernel_data.end     = virt_to_phys(_end - 1);
350 
351 	for_each_memblock(memory, region) {
352 		res = alloc_bootmem_low(sizeof(*res));
353 		res->name  = "System RAM";
354 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
355 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
356 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
357 
358 		request_resource(&iomem_resource, res);
359 
360 		if (kernel_code.start >= res->start &&
361 		    kernel_code.end <= res->end)
362 			request_resource(res, &kernel_code);
363 		if (kernel_data.start >= res->start &&
364 		    kernel_data.end <= res->end)
365 			request_resource(res, &kernel_data);
366 	}
367 }
368 
369 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
370 
371 void __init setup_arch(char **cmdline_p)
372 {
373 	setup_processor();
374 
375 	setup_machine_fdt(__fdt_pointer);
376 
377 	init_mm.start_code = (unsigned long) _text;
378 	init_mm.end_code   = (unsigned long) _etext;
379 	init_mm.end_data   = (unsigned long) _edata;
380 	init_mm.brk	   = (unsigned long) _end;
381 
382 	*cmdline_p = boot_command_line;
383 
384 	early_fixmap_init();
385 	early_ioremap_init();
386 
387 	parse_early_param();
388 
389 	/*
390 	 *  Unmask asynchronous aborts after bringing up possible earlycon.
391 	 * (Report possible System Errors once we can report this occurred)
392 	 */
393 	local_async_enable();
394 
395 	efi_init();
396 	arm64_memblock_init();
397 
398 	paging_init();
399 	request_standard_resources();
400 
401 	early_ioremap_reset();
402 
403 	unflatten_device_tree();
404 
405 	psci_init();
406 
407 	cpu_read_bootcpu_ops();
408 #ifdef CONFIG_SMP
409 	smp_init_cpus();
410 	smp_build_mpidr_hash();
411 #endif
412 
413 #ifdef CONFIG_VT
414 #if defined(CONFIG_VGA_CONSOLE)
415 	conswitchp = &vga_con;
416 #elif defined(CONFIG_DUMMY_CONSOLE)
417 	conswitchp = &dummy_con;
418 #endif
419 #endif
420 	if (boot_args[1] || boot_args[2] || boot_args[3]) {
421 		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
422 			"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
423 			"This indicates a broken bootloader or old kernel\n",
424 			boot_args[1], boot_args[2], boot_args[3]);
425 	}
426 }
427 
428 static int __init arm64_device_init(void)
429 {
430 	of_iommu_init();
431 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
432 	return 0;
433 }
434 arch_initcall_sync(arm64_device_init);
435 
436 static int __init topology_init(void)
437 {
438 	int i;
439 
440 	for_each_possible_cpu(i) {
441 		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
442 		cpu->hotpluggable = 1;
443 		register_cpu(cpu, i);
444 	}
445 
446 	return 0;
447 }
448 subsys_initcall(topology_init);
449 
450 static const char *hwcap_str[] = {
451 	"fp",
452 	"asimd",
453 	"evtstrm",
454 	"aes",
455 	"pmull",
456 	"sha1",
457 	"sha2",
458 	"crc32",
459 	NULL
460 };
461 
462 #ifdef CONFIG_COMPAT
463 static const char *compat_hwcap_str[] = {
464 	"swp",
465 	"half",
466 	"thumb",
467 	"26bit",
468 	"fastmult",
469 	"fpa",
470 	"vfp",
471 	"edsp",
472 	"java",
473 	"iwmmxt",
474 	"crunch",
475 	"thumbee",
476 	"neon",
477 	"vfpv3",
478 	"vfpv3d16",
479 	"tls",
480 	"vfpv4",
481 	"idiva",
482 	"idivt",
483 	"vfpd32",
484 	"lpae",
485 	"evtstrm"
486 };
487 
488 static const char *compat_hwcap2_str[] = {
489 	"aes",
490 	"pmull",
491 	"sha1",
492 	"sha2",
493 	"crc32",
494 	NULL
495 };
496 #endif /* CONFIG_COMPAT */
497 
498 static int c_show(struct seq_file *m, void *v)
499 {
500 	int i, j;
501 
502 	for_each_online_cpu(i) {
503 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
504 		u32 midr = cpuinfo->reg_midr;
505 
506 		/*
507 		 * glibc reads /proc/cpuinfo to determine the number of
508 		 * online processors, looking for lines beginning with
509 		 * "processor".  Give glibc what it expects.
510 		 */
511 #ifdef CONFIG_SMP
512 		seq_printf(m, "processor\t: %d\n", i);
513 #endif
514 
515 		/*
516 		 * Dump out the common processor features in a single line.
517 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
518 		 * rather than attempting to parse this, but there's a body of
519 		 * software which does already (at least for 32-bit).
520 		 */
521 		seq_puts(m, "Features\t:");
522 		if (personality(current->personality) == PER_LINUX32) {
523 #ifdef CONFIG_COMPAT
524 			for (j = 0; compat_hwcap_str[j]; j++)
525 				if (compat_elf_hwcap & (1 << j))
526 					seq_printf(m, " %s", compat_hwcap_str[j]);
527 
528 			for (j = 0; compat_hwcap2_str[j]; j++)
529 				if (compat_elf_hwcap2 & (1 << j))
530 					seq_printf(m, " %s", compat_hwcap2_str[j]);
531 #endif /* CONFIG_COMPAT */
532 		} else {
533 			for (j = 0; hwcap_str[j]; j++)
534 				if (elf_hwcap & (1 << j))
535 					seq_printf(m, " %s", hwcap_str[j]);
536 		}
537 		seq_puts(m, "\n");
538 
539 		seq_printf(m, "CPU implementer\t: 0x%02x\n",
540 			   MIDR_IMPLEMENTOR(midr));
541 		seq_printf(m, "CPU architecture: 8\n");
542 		seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
543 		seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
544 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
545 	}
546 
547 	return 0;
548 }
549 
550 static void *c_start(struct seq_file *m, loff_t *pos)
551 {
552 	return *pos < 1 ? (void *)1 : NULL;
553 }
554 
555 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
556 {
557 	++*pos;
558 	return NULL;
559 }
560 
561 static void c_stop(struct seq_file *m, void *v)
562 {
563 }
564 
565 const struct seq_operations cpuinfo_op = {
566 	.start	= c_start,
567 	.next	= c_next,
568 	.stop	= c_stop,
569 	.show	= c_show
570 };
571