xref: /openbmc/linux/arch/arm64/kernel/setup.c (revision 867a0e05)
1 /*
2  * Based on arch/arm/kernel/setup.c
3  *
4  * Copyright (C) 1995-2001 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/stddef.h>
23 #include <linux/ioport.h>
24 #include <linux/delay.h>
25 #include <linux/utsname.h>
26 #include <linux/initrd.h>
27 #include <linux/console.h>
28 #include <linux/cache.h>
29 #include <linux/bootmem.h>
30 #include <linux/seq_file.h>
31 #include <linux/screen_info.h>
32 #include <linux/init.h>
33 #include <linux/kexec.h>
34 #include <linux/crash_dump.h>
35 #include <linux/root_dev.h>
36 #include <linux/clk-provider.h>
37 #include <linux/cpu.h>
38 #include <linux/interrupt.h>
39 #include <linux/smp.h>
40 #include <linux/fs.h>
41 #include <linux/proc_fs.h>
42 #include <linux/memblock.h>
43 #include <linux/of_fdt.h>
44 #include <linux/of_platform.h>
45 #include <linux/efi.h>
46 #include <linux/personality.h>
47 
48 #include <asm/fixmap.h>
49 #include <asm/cpu.h>
50 #include <asm/cputype.h>
51 #include <asm/elf.h>
52 #include <asm/cputable.h>
53 #include <asm/cpufeature.h>
54 #include <asm/cpu_ops.h>
55 #include <asm/sections.h>
56 #include <asm/setup.h>
57 #include <asm/smp_plat.h>
58 #include <asm/cacheflush.h>
59 #include <asm/tlbflush.h>
60 #include <asm/traps.h>
61 #include <asm/memblock.h>
62 #include <asm/psci.h>
63 #include <asm/efi.h>
64 
65 unsigned int processor_id;
66 EXPORT_SYMBOL(processor_id);
67 
68 unsigned long elf_hwcap __read_mostly;
69 EXPORT_SYMBOL_GPL(elf_hwcap);
70 
71 #ifdef CONFIG_COMPAT
72 #define COMPAT_ELF_HWCAP_DEFAULT	\
73 				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
74 				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
75 				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
76 				 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
77 				 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
78 				 COMPAT_HWCAP_LPAE)
79 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
80 unsigned int compat_elf_hwcap2 __read_mostly;
81 #endif
82 
83 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
84 
85 static const char *cpu_name;
86 phys_addr_t __fdt_pointer __initdata;
87 
88 /*
89  * Standard memory resources
90  */
91 static struct resource mem_res[] = {
92 	{
93 		.name = "Kernel code",
94 		.start = 0,
95 		.end = 0,
96 		.flags = IORESOURCE_MEM
97 	},
98 	{
99 		.name = "Kernel data",
100 		.start = 0,
101 		.end = 0,
102 		.flags = IORESOURCE_MEM
103 	}
104 };
105 
106 #define kernel_code mem_res[0]
107 #define kernel_data mem_res[1]
108 
109 void __init early_print(const char *str, ...)
110 {
111 	char buf[256];
112 	va_list ap;
113 
114 	va_start(ap, str);
115 	vsnprintf(buf, sizeof(buf), str, ap);
116 	va_end(ap);
117 
118 	printk("%s", buf);
119 }
120 
121 void __init smp_setup_processor_id(void)
122 {
123 	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
124 	cpu_logical_map(0) = mpidr;
125 
126 	/*
127 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
128 	 * using percpu variable early, for example, lockdep will
129 	 * access percpu variable inside lock_release
130 	 */
131 	set_my_cpu_offset(0);
132 	pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
133 }
134 
135 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
136 {
137 	return phys_id == cpu_logical_map(cpu);
138 }
139 
140 struct mpidr_hash mpidr_hash;
141 #ifdef CONFIG_SMP
142 /**
143  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
144  *			  level in order to build a linear index from an
145  *			  MPIDR value. Resulting algorithm is a collision
146  *			  free hash carried out through shifting and ORing
147  */
148 static void __init smp_build_mpidr_hash(void)
149 {
150 	u32 i, affinity, fs[4], bits[4], ls;
151 	u64 mask = 0;
152 	/*
153 	 * Pre-scan the list of MPIDRS and filter out bits that do
154 	 * not contribute to affinity levels, ie they never toggle.
155 	 */
156 	for_each_possible_cpu(i)
157 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
158 	pr_debug("mask of set bits %#llx\n", mask);
159 	/*
160 	 * Find and stash the last and first bit set at all affinity levels to
161 	 * check how many bits are required to represent them.
162 	 */
163 	for (i = 0; i < 4; i++) {
164 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
165 		/*
166 		 * Find the MSB bit and LSB bits position
167 		 * to determine how many bits are required
168 		 * to express the affinity level.
169 		 */
170 		ls = fls(affinity);
171 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
172 		bits[i] = ls - fs[i];
173 	}
174 	/*
175 	 * An index can be created from the MPIDR_EL1 by isolating the
176 	 * significant bits at each affinity level and by shifting
177 	 * them in order to compress the 32 bits values space to a
178 	 * compressed set of values. This is equivalent to hashing
179 	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
180 	 * hash though not minimal since some levels might contain a number
181 	 * of CPUs that is not an exact power of 2 and their bit
182 	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
183 	 */
184 	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
185 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
186 	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
187 						(bits[1] + bits[0]);
188 	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
189 				  fs[3] - (bits[2] + bits[1] + bits[0]);
190 	mpidr_hash.mask = mask;
191 	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
192 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
193 		mpidr_hash.shift_aff[0],
194 		mpidr_hash.shift_aff[1],
195 		mpidr_hash.shift_aff[2],
196 		mpidr_hash.shift_aff[3],
197 		mpidr_hash.mask,
198 		mpidr_hash.bits);
199 	/*
200 	 * 4x is an arbitrary value used to warn on a hash table much bigger
201 	 * than expected on most systems.
202 	 */
203 	if (mpidr_hash_size() > 4 * num_possible_cpus())
204 		pr_warn("Large number of MPIDR hash buckets detected\n");
205 	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
206 }
207 #endif
208 
209 static void __init setup_processor(void)
210 {
211 	struct cpu_info *cpu_info;
212 	u64 features, block;
213 	u32 cwg;
214 	int cls;
215 
216 	cpu_info = lookup_processor_type(read_cpuid_id());
217 	if (!cpu_info) {
218 		printk("CPU configuration botched (ID %08x), unable to continue.\n",
219 		       read_cpuid_id());
220 		while (1);
221 	}
222 
223 	cpu_name = cpu_info->cpu_name;
224 
225 	printk("CPU: %s [%08x] revision %d\n",
226 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
227 
228 	sprintf(init_utsname()->machine, ELF_PLATFORM);
229 	elf_hwcap = 0;
230 
231 	cpuinfo_store_boot_cpu();
232 
233 	/*
234 	 * Check for sane CTR_EL0.CWG value.
235 	 */
236 	cwg = cache_type_cwg();
237 	cls = cache_line_size();
238 	if (!cwg)
239 		pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
240 			cls);
241 	if (L1_CACHE_BYTES < cls)
242 		pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
243 			L1_CACHE_BYTES, cls);
244 
245 	/*
246 	 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
247 	 * The blocks we test below represent incremental functionality
248 	 * for non-negative values. Negative values are reserved.
249 	 */
250 	features = read_cpuid(ID_AA64ISAR0_EL1);
251 	block = (features >> 4) & 0xf;
252 	if (!(block & 0x8)) {
253 		switch (block) {
254 		default:
255 		case 2:
256 			elf_hwcap |= HWCAP_PMULL;
257 		case 1:
258 			elf_hwcap |= HWCAP_AES;
259 		case 0:
260 			break;
261 		}
262 	}
263 
264 	block = (features >> 8) & 0xf;
265 	if (block && !(block & 0x8))
266 		elf_hwcap |= HWCAP_SHA1;
267 
268 	block = (features >> 12) & 0xf;
269 	if (block && !(block & 0x8))
270 		elf_hwcap |= HWCAP_SHA2;
271 
272 	block = (features >> 16) & 0xf;
273 	if (block && !(block & 0x8))
274 		elf_hwcap |= HWCAP_CRC32;
275 
276 #ifdef CONFIG_COMPAT
277 	/*
278 	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
279 	 * the Aarch32 32-bit execution state.
280 	 */
281 	features = read_cpuid(ID_ISAR5_EL1);
282 	block = (features >> 4) & 0xf;
283 	if (!(block & 0x8)) {
284 		switch (block) {
285 		default:
286 		case 2:
287 			compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
288 		case 1:
289 			compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
290 		case 0:
291 			break;
292 		}
293 	}
294 
295 	block = (features >> 8) & 0xf;
296 	if (block && !(block & 0x8))
297 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
298 
299 	block = (features >> 12) & 0xf;
300 	if (block && !(block & 0x8))
301 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
302 
303 	block = (features >> 16) & 0xf;
304 	if (block && !(block & 0x8))
305 		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
306 #endif
307 }
308 
309 static void __init setup_machine_fdt(phys_addr_t dt_phys)
310 {
311 	if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
312 		early_print("\n"
313 			"Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
314 			"The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
315 			"\nPlease check your bootloader.\n",
316 			dt_phys, phys_to_virt(dt_phys));
317 
318 		while (true)
319 			cpu_relax();
320 	}
321 
322 	dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
323 }
324 
325 /*
326  * Limit the memory size that was specified via FDT.
327  */
328 static int __init early_mem(char *p)
329 {
330 	phys_addr_t limit;
331 
332 	if (!p)
333 		return 1;
334 
335 	limit = memparse(p, &p) & PAGE_MASK;
336 	pr_notice("Memory limited to %lldMB\n", limit >> 20);
337 
338 	memblock_enforce_memory_limit(limit);
339 
340 	return 0;
341 }
342 early_param("mem", early_mem);
343 
344 static void __init request_standard_resources(void)
345 {
346 	struct memblock_region *region;
347 	struct resource *res;
348 
349 	kernel_code.start   = virt_to_phys(_text);
350 	kernel_code.end     = virt_to_phys(_etext - 1);
351 	kernel_data.start   = virt_to_phys(_sdata);
352 	kernel_data.end     = virt_to_phys(_end - 1);
353 
354 	for_each_memblock(memory, region) {
355 		res = alloc_bootmem_low(sizeof(*res));
356 		res->name  = "System RAM";
357 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
358 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
359 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
360 
361 		request_resource(&iomem_resource, res);
362 
363 		if (kernel_code.start >= res->start &&
364 		    kernel_code.end <= res->end)
365 			request_resource(res, &kernel_code);
366 		if (kernel_data.start >= res->start &&
367 		    kernel_data.end <= res->end)
368 			request_resource(res, &kernel_data);
369 	}
370 }
371 
372 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
373 
374 void __init setup_arch(char **cmdline_p)
375 {
376 	setup_processor();
377 
378 	setup_machine_fdt(__fdt_pointer);
379 
380 	init_mm.start_code = (unsigned long) _text;
381 	init_mm.end_code   = (unsigned long) _etext;
382 	init_mm.end_data   = (unsigned long) _edata;
383 	init_mm.brk	   = (unsigned long) _end;
384 
385 	*cmdline_p = boot_command_line;
386 
387 	early_fixmap_init();
388 	early_ioremap_init();
389 
390 	parse_early_param();
391 
392 	/*
393 	 *  Unmask asynchronous aborts after bringing up possible earlycon.
394 	 * (Report possible System Errors once we can report this occurred)
395 	 */
396 	local_async_enable();
397 
398 	efi_init();
399 	arm64_memblock_init();
400 
401 	paging_init();
402 	request_standard_resources();
403 
404 	efi_idmap_init();
405 	early_ioremap_reset();
406 
407 	unflatten_device_tree();
408 
409 	psci_init();
410 
411 	cpu_read_bootcpu_ops();
412 #ifdef CONFIG_SMP
413 	smp_init_cpus();
414 	smp_build_mpidr_hash();
415 #endif
416 
417 #ifdef CONFIG_VT
418 #if defined(CONFIG_VGA_CONSOLE)
419 	conswitchp = &vga_con;
420 #elif defined(CONFIG_DUMMY_CONSOLE)
421 	conswitchp = &dummy_con;
422 #endif
423 #endif
424 }
425 
426 static int __init arm64_device_init(void)
427 {
428 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
429 	return 0;
430 }
431 arch_initcall_sync(arm64_device_init);
432 
433 static int __init topology_init(void)
434 {
435 	int i;
436 
437 	for_each_possible_cpu(i) {
438 		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
439 		cpu->hotpluggable = 1;
440 		register_cpu(cpu, i);
441 	}
442 
443 	return 0;
444 }
445 subsys_initcall(topology_init);
446 
447 static const char *hwcap_str[] = {
448 	"fp",
449 	"asimd",
450 	"evtstrm",
451 	"aes",
452 	"pmull",
453 	"sha1",
454 	"sha2",
455 	"crc32",
456 	NULL
457 };
458 
459 #ifdef CONFIG_COMPAT
460 static const char *compat_hwcap_str[] = {
461 	"swp",
462 	"half",
463 	"thumb",
464 	"26bit",
465 	"fastmult",
466 	"fpa",
467 	"vfp",
468 	"edsp",
469 	"java",
470 	"iwmmxt",
471 	"crunch",
472 	"thumbee",
473 	"neon",
474 	"vfpv3",
475 	"vfpv3d16",
476 	"tls",
477 	"vfpv4",
478 	"idiva",
479 	"idivt",
480 	"vfpd32",
481 	"lpae",
482 	"evtstrm"
483 };
484 
485 static const char *compat_hwcap2_str[] = {
486 	"aes",
487 	"pmull",
488 	"sha1",
489 	"sha2",
490 	"crc32",
491 	NULL
492 };
493 #endif /* CONFIG_COMPAT */
494 
495 static int c_show(struct seq_file *m, void *v)
496 {
497 	int i, j;
498 
499 	for_each_online_cpu(i) {
500 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
501 		u32 midr = cpuinfo->reg_midr;
502 
503 		/*
504 		 * glibc reads /proc/cpuinfo to determine the number of
505 		 * online processors, looking for lines beginning with
506 		 * "processor".  Give glibc what it expects.
507 		 */
508 #ifdef CONFIG_SMP
509 		seq_printf(m, "processor\t: %d\n", i);
510 #endif
511 
512 		/*
513 		 * Dump out the common processor features in a single line.
514 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
515 		 * rather than attempting to parse this, but there's a body of
516 		 * software which does already (at least for 32-bit).
517 		 */
518 		seq_puts(m, "Features\t:");
519 		if (personality(current->personality) == PER_LINUX32) {
520 #ifdef CONFIG_COMPAT
521 			for (j = 0; compat_hwcap_str[j]; j++)
522 				if (compat_elf_hwcap & (1 << j))
523 					seq_printf(m, " %s", compat_hwcap_str[j]);
524 
525 			for (j = 0; compat_hwcap2_str[j]; j++)
526 				if (compat_elf_hwcap2 & (1 << j))
527 					seq_printf(m, " %s", compat_hwcap2_str[j]);
528 #endif /* CONFIG_COMPAT */
529 		} else {
530 			for (j = 0; hwcap_str[j]; j++)
531 				if (elf_hwcap & (1 << j))
532 					seq_printf(m, " %s", hwcap_str[j]);
533 		}
534 		seq_puts(m, "\n");
535 
536 		seq_printf(m, "CPU implementer\t: 0x%02x\n",
537 			   MIDR_IMPLEMENTOR(midr));
538 		seq_printf(m, "CPU architecture: 8\n");
539 		seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
540 		seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
541 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
542 	}
543 
544 	return 0;
545 }
546 
547 static void *c_start(struct seq_file *m, loff_t *pos)
548 {
549 	return *pos < 1 ? (void *)1 : NULL;
550 }
551 
552 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
553 {
554 	++*pos;
555 	return NULL;
556 }
557 
558 static void c_stop(struct seq_file *m, void *v)
559 {
560 }
561 
562 const struct seq_operations cpuinfo_op = {
563 	.start	= c_start,
564 	.next	= c_next,
565 	.stop	= c_stop,
566 	.show	= c_show
567 };
568