xref: /openbmc/linux/arch/arm64/kernel/setup.c (revision c0e297dc)
1 /*
2  * Based on arch/arm/kernel/setup.c
3  *
4  * Copyright (C) 1995-2001 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/acpi.h>
21 #include <linux/export.h>
22 #include <linux/kernel.h>
23 #include <linux/stddef.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/utsname.h>
27 #include <linux/initrd.h>
28 #include <linux/console.h>
29 #include <linux/cache.h>
30 #include <linux/bootmem.h>
31 #include <linux/seq_file.h>
32 #include <linux/screen_info.h>
33 #include <linux/init.h>
34 #include <linux/kexec.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/clk-provider.h>
38 #include <linux/cpu.h>
39 #include <linux/interrupt.h>
40 #include <linux/smp.h>
41 #include <linux/fs.h>
42 #include <linux/proc_fs.h>
43 #include <linux/memblock.h>
44 #include <linux/of_iommu.h>
45 #include <linux/of_fdt.h>
46 #include <linux/of_platform.h>
47 #include <linux/efi.h>
48 #include <linux/personality.h>
49 
50 #include <asm/acpi.h>
51 #include <asm/fixmap.h>
52 #include <asm/cpu.h>
53 #include <asm/cputype.h>
54 #include <asm/elf.h>
55 #include <asm/cpufeature.h>
56 #include <asm/cpu_ops.h>
57 #include <asm/sections.h>
58 #include <asm/setup.h>
59 #include <asm/smp_plat.h>
60 #include <asm/cacheflush.h>
61 #include <asm/tlbflush.h>
62 #include <asm/traps.h>
63 #include <asm/memblock.h>
64 #include <asm/psci.h>
65 #include <asm/efi.h>
66 #include <asm/virt.h>
67 #include <asm/xen/hypervisor.h>
68 
69 unsigned long elf_hwcap __read_mostly;
70 EXPORT_SYMBOL_GPL(elf_hwcap);
71 
72 #ifdef CONFIG_COMPAT
73 #define COMPAT_ELF_HWCAP_DEFAULT	\
74 				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
75 				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
76 				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
77 				 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
78 				 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
79 				 COMPAT_HWCAP_LPAE)
80 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
81 unsigned int compat_elf_hwcap2 __read_mostly;
82 #endif
83 
84 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
85 
86 phys_addr_t __fdt_pointer __initdata;
87 
88 /*
89  * Standard memory resources
90  */
91 static struct resource mem_res[] = {
92 	{
93 		.name = "Kernel code",
94 		.start = 0,
95 		.end = 0,
96 		.flags = IORESOURCE_MEM
97 	},
98 	{
99 		.name = "Kernel data",
100 		.start = 0,
101 		.end = 0,
102 		.flags = IORESOURCE_MEM
103 	}
104 };
105 
106 #define kernel_code mem_res[0]
107 #define kernel_data mem_res[1]
108 
109 /*
110  * The recorded values of x0 .. x3 upon kernel entry.
111  */
112 u64 __cacheline_aligned boot_args[4];
113 
114 void __init smp_setup_processor_id(void)
115 {
116 	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
117 	cpu_logical_map(0) = mpidr;
118 
119 	/*
120 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
121 	 * using percpu variable early, for example, lockdep will
122 	 * access percpu variable inside lock_release
123 	 */
124 	set_my_cpu_offset(0);
125 	pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
126 }
127 
128 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
129 {
130 	return phys_id == cpu_logical_map(cpu);
131 }
132 
133 struct mpidr_hash mpidr_hash;
134 #ifdef CONFIG_SMP
135 /**
136  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
137  *			  level in order to build a linear index from an
138  *			  MPIDR value. Resulting algorithm is a collision
139  *			  free hash carried out through shifting and ORing
140  */
141 static void __init smp_build_mpidr_hash(void)
142 {
143 	u32 i, affinity, fs[4], bits[4], ls;
144 	u64 mask = 0;
145 	/*
146 	 * Pre-scan the list of MPIDRS and filter out bits that do
147 	 * not contribute to affinity levels, ie they never toggle.
148 	 */
149 	for_each_possible_cpu(i)
150 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
151 	pr_debug("mask of set bits %#llx\n", mask);
152 	/*
153 	 * Find and stash the last and first bit set at all affinity levels to
154 	 * check how many bits are required to represent them.
155 	 */
156 	for (i = 0; i < 4; i++) {
157 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
158 		/*
159 		 * Find the MSB bit and LSB bits position
160 		 * to determine how many bits are required
161 		 * to express the affinity level.
162 		 */
163 		ls = fls(affinity);
164 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
165 		bits[i] = ls - fs[i];
166 	}
167 	/*
168 	 * An index can be created from the MPIDR_EL1 by isolating the
169 	 * significant bits at each affinity level and by shifting
170 	 * them in order to compress the 32 bits values space to a
171 	 * compressed set of values. This is equivalent to hashing
172 	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
173 	 * hash though not minimal since some levels might contain a number
174 	 * of CPUs that is not an exact power of 2 and their bit
175 	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
176 	 */
177 	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
178 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
179 	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
180 						(bits[1] + bits[0]);
181 	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
182 				  fs[3] - (bits[2] + bits[1] + bits[0]);
183 	mpidr_hash.mask = mask;
184 	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
185 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
186 		mpidr_hash.shift_aff[0],
187 		mpidr_hash.shift_aff[1],
188 		mpidr_hash.shift_aff[2],
189 		mpidr_hash.shift_aff[3],
190 		mpidr_hash.mask,
191 		mpidr_hash.bits);
192 	/*
193 	 * 4x is an arbitrary value used to warn on a hash table much bigger
194 	 * than expected on most systems.
195 	 */
196 	if (mpidr_hash_size() > 4 * num_possible_cpus())
197 		pr_warn("Large number of MPIDR hash buckets detected\n");
198 	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
199 }
200 #endif
201 
202 static void __init hyp_mode_check(void)
203 {
204 	if (is_hyp_mode_available())
205 		pr_info("CPU: All CPU(s) started at EL2\n");
206 	else if (is_hyp_mode_mismatched())
207 		WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
208 			   "CPU: CPUs started in inconsistent modes");
209 	else
210 		pr_info("CPU: All CPU(s) started at EL1\n");
211 }
212 
213 void __init do_post_cpus_up_work(void)
214 {
215 	hyp_mode_check();
216 	apply_alternatives_all();
217 }
218 
219 #ifdef CONFIG_UP_LATE_INIT
220 void __init up_late_init(void)
221 {
222 	do_post_cpus_up_work();
223 }
224 #endif /* CONFIG_UP_LATE_INIT */
225 
226 static void __init setup_processor(void)
227 {
228 	u64 features, block;
229 	u32 cwg;
230 	int cls;
231 
232 	printk("CPU: AArch64 Processor [%08x] revision %d\n",
233 	       read_cpuid_id(), read_cpuid_id() & 15);
234 
235 	sprintf(init_utsname()->machine, ELF_PLATFORM);
236 	elf_hwcap = 0;
237 
238 	cpuinfo_store_boot_cpu();
239 
240 	/*
241 	 * Check for sane CTR_EL0.CWG value.
242 	 */
243 	cwg = cache_type_cwg();
244 	cls = cache_line_size();
245 	if (!cwg)
246 		pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
247 			cls);
248 	if (L1_CACHE_BYTES < cls)
249 		pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
250 			L1_CACHE_BYTES, cls);
251 
252 	/*
253 	 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
254 	 * The blocks we test below represent incremental functionality
255 	 * for non-negative values. Negative values are reserved.
256 	 */
257 	features = read_cpuid(ID_AA64ISAR0_EL1);
258 	block = (features >> 4) & 0xf;
259 	if (!(block & 0x8)) {
260 		switch (block) {
261 		default:
262 		case 2:
263 			elf_hwcap |= HWCAP_PMULL;
264 		case 1:
265 			elf_hwcap |= HWCAP_AES;
266 		case 0:
267 			break;
268 		}
269 	}
270 
271 	block = (features >> 8) & 0xf;
272 	if (block && !(block & 0x8))
273 		elf_hwcap |= HWCAP_SHA1;
274 
275 	block = (features >> 12) & 0xf;
276 	if (block && !(block & 0x8))
277 		elf_hwcap |= HWCAP_SHA2;
278 
279 	block = (features >> 16) & 0xf;
280 	if (block && !(block & 0x8))
281 		elf_hwcap |= HWCAP_CRC32;
282 
283 #ifdef CONFIG_COMPAT
284 	/*
285 	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
286 	 * the Aarch32 32-bit execution state.
287 	 */
288 	features = read_cpuid(ID_ISAR5_EL1);
289 	block = (features >> 4) & 0xf;
290 	if (!(block & 0x8)) {
291 		switch (block) {
292 		default:
293 		case 2:
294 			compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
295 		case 1:
296 			compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
297 		case 0:
298 			break;
299 		}
300 	}
301 
302 	block = (features >> 8) & 0xf;
303 	if (block && !(block & 0x8))
304 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
305 
306 	block = (features >> 12) & 0xf;
307 	if (block && !(block & 0x8))
308 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
309 
310 	block = (features >> 16) & 0xf;
311 	if (block && !(block & 0x8))
312 		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
313 #endif
314 }
315 
316 static void __init setup_machine_fdt(phys_addr_t dt_phys)
317 {
318 	void *dt_virt = fixmap_remap_fdt(dt_phys);
319 
320 	if (!dt_virt || !early_init_dt_scan(dt_virt)) {
321 		pr_crit("\n"
322 			"Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
323 			"The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
324 			"\nPlease check your bootloader.",
325 			&dt_phys, dt_virt);
326 
327 		while (true)
328 			cpu_relax();
329 	}
330 
331 	dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
332 }
333 
334 static void __init request_standard_resources(void)
335 {
336 	struct memblock_region *region;
337 	struct resource *res;
338 
339 	kernel_code.start   = virt_to_phys(_text);
340 	kernel_code.end     = virt_to_phys(_etext - 1);
341 	kernel_data.start   = virt_to_phys(_sdata);
342 	kernel_data.end     = virt_to_phys(_end - 1);
343 
344 	for_each_memblock(memory, region) {
345 		res = alloc_bootmem_low(sizeof(*res));
346 		res->name  = "System RAM";
347 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
348 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
349 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
350 
351 		request_resource(&iomem_resource, res);
352 
353 		if (kernel_code.start >= res->start &&
354 		    kernel_code.end <= res->end)
355 			request_resource(res, &kernel_code);
356 		if (kernel_data.start >= res->start &&
357 		    kernel_data.end <= res->end)
358 			request_resource(res, &kernel_data);
359 	}
360 }
361 
362 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
363 
364 void __init setup_arch(char **cmdline_p)
365 {
366 	setup_processor();
367 
368 	init_mm.start_code = (unsigned long) _text;
369 	init_mm.end_code   = (unsigned long) _etext;
370 	init_mm.end_data   = (unsigned long) _edata;
371 	init_mm.brk	   = (unsigned long) _end;
372 
373 	*cmdline_p = boot_command_line;
374 
375 	early_fixmap_init();
376 	early_ioremap_init();
377 
378 	setup_machine_fdt(__fdt_pointer);
379 
380 	parse_early_param();
381 
382 	/*
383 	 *  Unmask asynchronous aborts after bringing up possible earlycon.
384 	 * (Report possible System Errors once we can report this occurred)
385 	 */
386 	local_async_enable();
387 
388 	efi_init();
389 	arm64_memblock_init();
390 
391 	/* Parse the ACPI tables for possible boot-time configuration */
392 	acpi_boot_table_init();
393 
394 	paging_init();
395 	request_standard_resources();
396 
397 	early_ioremap_reset();
398 
399 	if (acpi_disabled) {
400 		unflatten_device_tree();
401 		psci_dt_init();
402 	} else {
403 		psci_acpi_init();
404 	}
405 	xen_early_init();
406 
407 	cpu_read_bootcpu_ops();
408 #ifdef CONFIG_SMP
409 	smp_init_cpus();
410 	smp_build_mpidr_hash();
411 #endif
412 
413 #ifdef CONFIG_VT
414 #if defined(CONFIG_VGA_CONSOLE)
415 	conswitchp = &vga_con;
416 #elif defined(CONFIG_DUMMY_CONSOLE)
417 	conswitchp = &dummy_con;
418 #endif
419 #endif
420 	if (boot_args[1] || boot_args[2] || boot_args[3]) {
421 		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
422 			"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
423 			"This indicates a broken bootloader or old kernel\n",
424 			boot_args[1], boot_args[2], boot_args[3]);
425 	}
426 }
427 
428 static int __init arm64_device_init(void)
429 {
430 	of_iommu_init();
431 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
432 	return 0;
433 }
434 arch_initcall_sync(arm64_device_init);
435 
436 static int __init topology_init(void)
437 {
438 	int i;
439 
440 	for_each_possible_cpu(i) {
441 		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
442 		cpu->hotpluggable = 1;
443 		register_cpu(cpu, i);
444 	}
445 
446 	return 0;
447 }
448 subsys_initcall(topology_init);
449 
450 static const char *hwcap_str[] = {
451 	"fp",
452 	"asimd",
453 	"evtstrm",
454 	"aes",
455 	"pmull",
456 	"sha1",
457 	"sha2",
458 	"crc32",
459 	NULL
460 };
461 
462 #ifdef CONFIG_COMPAT
463 static const char *compat_hwcap_str[] = {
464 	"swp",
465 	"half",
466 	"thumb",
467 	"26bit",
468 	"fastmult",
469 	"fpa",
470 	"vfp",
471 	"edsp",
472 	"java",
473 	"iwmmxt",
474 	"crunch",
475 	"thumbee",
476 	"neon",
477 	"vfpv3",
478 	"vfpv3d16",
479 	"tls",
480 	"vfpv4",
481 	"idiva",
482 	"idivt",
483 	"vfpd32",
484 	"lpae",
485 	"evtstrm"
486 };
487 
488 static const char *compat_hwcap2_str[] = {
489 	"aes",
490 	"pmull",
491 	"sha1",
492 	"sha2",
493 	"crc32",
494 	NULL
495 };
496 #endif /* CONFIG_COMPAT */
497 
498 static int c_show(struct seq_file *m, void *v)
499 {
500 	int i, j;
501 
502 	for_each_online_cpu(i) {
503 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
504 		u32 midr = cpuinfo->reg_midr;
505 
506 		/*
507 		 * glibc reads /proc/cpuinfo to determine the number of
508 		 * online processors, looking for lines beginning with
509 		 * "processor".  Give glibc what it expects.
510 		 */
511 #ifdef CONFIG_SMP
512 		seq_printf(m, "processor\t: %d\n", i);
513 #endif
514 
515 		/*
516 		 * Dump out the common processor features in a single line.
517 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
518 		 * rather than attempting to parse this, but there's a body of
519 		 * software which does already (at least for 32-bit).
520 		 */
521 		seq_puts(m, "Features\t:");
522 		if (personality(current->personality) == PER_LINUX32) {
523 #ifdef CONFIG_COMPAT
524 			for (j = 0; compat_hwcap_str[j]; j++)
525 				if (compat_elf_hwcap & (1 << j))
526 					seq_printf(m, " %s", compat_hwcap_str[j]);
527 
528 			for (j = 0; compat_hwcap2_str[j]; j++)
529 				if (compat_elf_hwcap2 & (1 << j))
530 					seq_printf(m, " %s", compat_hwcap2_str[j]);
531 #endif /* CONFIG_COMPAT */
532 		} else {
533 			for (j = 0; hwcap_str[j]; j++)
534 				if (elf_hwcap & (1 << j))
535 					seq_printf(m, " %s", hwcap_str[j]);
536 		}
537 		seq_puts(m, "\n");
538 
539 		seq_printf(m, "CPU implementer\t: 0x%02x\n",
540 			   MIDR_IMPLEMENTOR(midr));
541 		seq_printf(m, "CPU architecture: 8\n");
542 		seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
543 		seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
544 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
545 	}
546 
547 	return 0;
548 }
549 
550 static void *c_start(struct seq_file *m, loff_t *pos)
551 {
552 	return *pos < 1 ? (void *)1 : NULL;
553 }
554 
555 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
556 {
557 	++*pos;
558 	return NULL;
559 }
560 
561 static void c_stop(struct seq_file *m, void *v)
562 {
563 }
564 
565 const struct seq_operations cpuinfo_op = {
566 	.start	= c_start,
567 	.next	= c_next,
568 	.stop	= c_stop,
569 	.show	= c_show
570 };
571