xref: /openbmc/linux/arch/arm64/kernel/setup.c (revision e3b9f1e8)
1 /*
2  * Based on arch/arm/kernel/setup.c
3  *
4  * Copyright (C) 1995-2001 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/acpi.h>
21 #include <linux/export.h>
22 #include <linux/kernel.h>
23 #include <linux/stddef.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/initrd.h>
27 #include <linux/console.h>
28 #include <linux/cache.h>
29 #include <linux/bootmem.h>
30 #include <linux/screen_info.h>
31 #include <linux/init.h>
32 #include <linux/kexec.h>
33 #include <linux/root_dev.h>
34 #include <linux/cpu.h>
35 #include <linux/interrupt.h>
36 #include <linux/smp.h>
37 #include <linux/fs.h>
38 #include <linux/proc_fs.h>
39 #include <linux/memblock.h>
40 #include <linux/of_fdt.h>
41 #include <linux/efi.h>
42 #include <linux/psci.h>
43 #include <linux/sched/task.h>
44 #include <linux/mm.h>
45 
46 #include <asm/acpi.h>
47 #include <asm/fixmap.h>
48 #include <asm/cpu.h>
49 #include <asm/cputype.h>
50 #include <asm/daifflags.h>
51 #include <asm/elf.h>
52 #include <asm/cpufeature.h>
53 #include <asm/cpu_ops.h>
54 #include <asm/kasan.h>
55 #include <asm/numa.h>
56 #include <asm/sections.h>
57 #include <asm/setup.h>
58 #include <asm/smp_plat.h>
59 #include <asm/cacheflush.h>
60 #include <asm/tlbflush.h>
61 #include <asm/traps.h>
62 #include <asm/memblock.h>
63 #include <asm/efi.h>
64 #include <asm/xen/hypervisor.h>
65 #include <asm/mmu_context.h>
66 
67 phys_addr_t __fdt_pointer __initdata;
68 
69 /*
70  * Standard memory resources
71  */
72 static struct resource mem_res[] = {
73 	{
74 		.name = "Kernel code",
75 		.start = 0,
76 		.end = 0,
77 		.flags = IORESOURCE_SYSTEM_RAM
78 	},
79 	{
80 		.name = "Kernel data",
81 		.start = 0,
82 		.end = 0,
83 		.flags = IORESOURCE_SYSTEM_RAM
84 	}
85 };
86 
87 #define kernel_code mem_res[0]
88 #define kernel_data mem_res[1]
89 
90 /*
91  * The recorded values of x0 .. x3 upon kernel entry.
92  */
93 u64 __cacheline_aligned boot_args[4];
94 
95 void __init smp_setup_processor_id(void)
96 {
97 	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
98 	cpu_logical_map(0) = mpidr;
99 
100 	/*
101 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
102 	 * using percpu variable early, for example, lockdep will
103 	 * access percpu variable inside lock_release
104 	 */
105 	set_my_cpu_offset(0);
106 	pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
107 		(unsigned long)mpidr, read_cpuid_id());
108 }
109 
110 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
111 {
112 	return phys_id == cpu_logical_map(cpu);
113 }
114 
115 struct mpidr_hash mpidr_hash;
116 /**
117  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
118  *			  level in order to build a linear index from an
119  *			  MPIDR value. Resulting algorithm is a collision
120  *			  free hash carried out through shifting and ORing
121  */
122 static void __init smp_build_mpidr_hash(void)
123 {
124 	u32 i, affinity, fs[4], bits[4], ls;
125 	u64 mask = 0;
126 	/*
127 	 * Pre-scan the list of MPIDRS and filter out bits that do
128 	 * not contribute to affinity levels, ie they never toggle.
129 	 */
130 	for_each_possible_cpu(i)
131 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
132 	pr_debug("mask of set bits %#llx\n", mask);
133 	/*
134 	 * Find and stash the last and first bit set at all affinity levels to
135 	 * check how many bits are required to represent them.
136 	 */
137 	for (i = 0; i < 4; i++) {
138 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
139 		/*
140 		 * Find the MSB bit and LSB bits position
141 		 * to determine how many bits are required
142 		 * to express the affinity level.
143 		 */
144 		ls = fls(affinity);
145 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
146 		bits[i] = ls - fs[i];
147 	}
148 	/*
149 	 * An index can be created from the MPIDR_EL1 by isolating the
150 	 * significant bits at each affinity level and by shifting
151 	 * them in order to compress the 32 bits values space to a
152 	 * compressed set of values. This is equivalent to hashing
153 	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
154 	 * hash though not minimal since some levels might contain a number
155 	 * of CPUs that is not an exact power of 2 and their bit
156 	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
157 	 */
158 	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
159 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
160 	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
161 						(bits[1] + bits[0]);
162 	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
163 				  fs[3] - (bits[2] + bits[1] + bits[0]);
164 	mpidr_hash.mask = mask;
165 	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
166 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
167 		mpidr_hash.shift_aff[0],
168 		mpidr_hash.shift_aff[1],
169 		mpidr_hash.shift_aff[2],
170 		mpidr_hash.shift_aff[3],
171 		mpidr_hash.mask,
172 		mpidr_hash.bits);
173 	/*
174 	 * 4x is an arbitrary value used to warn on a hash table much bigger
175 	 * than expected on most systems.
176 	 */
177 	if (mpidr_hash_size() > 4 * num_possible_cpus())
178 		pr_warn("Large number of MPIDR hash buckets detected\n");
179 }
180 
181 static void __init setup_machine_fdt(phys_addr_t dt_phys)
182 {
183 	void *dt_virt = fixmap_remap_fdt(dt_phys);
184 	const char *name;
185 
186 	if (!dt_virt || !early_init_dt_scan(dt_virt)) {
187 		pr_crit("\n"
188 			"Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
189 			"The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
190 			"\nPlease check your bootloader.",
191 			&dt_phys, dt_virt);
192 
193 		while (true)
194 			cpu_relax();
195 	}
196 
197 	name = of_flat_dt_get_machine_name();
198 	if (!name)
199 		return;
200 
201 	pr_info("Machine model: %s\n", name);
202 	dump_stack_set_arch_desc("%s (DT)", name);
203 }
204 
205 static void __init request_standard_resources(void)
206 {
207 	struct memblock_region *region;
208 	struct resource *res;
209 
210 	kernel_code.start   = __pa_symbol(_text);
211 	kernel_code.end     = __pa_symbol(__init_begin - 1);
212 	kernel_data.start   = __pa_symbol(_sdata);
213 	kernel_data.end     = __pa_symbol(_end - 1);
214 
215 	for_each_memblock(memory, region) {
216 		res = alloc_bootmem_low(sizeof(*res));
217 		if (memblock_is_nomap(region)) {
218 			res->name  = "reserved";
219 			res->flags = IORESOURCE_MEM;
220 		} else {
221 			res->name  = "System RAM";
222 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
223 		}
224 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
225 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
226 
227 		request_resource(&iomem_resource, res);
228 
229 		if (kernel_code.start >= res->start &&
230 		    kernel_code.end <= res->end)
231 			request_resource(res, &kernel_code);
232 		if (kernel_data.start >= res->start &&
233 		    kernel_data.end <= res->end)
234 			request_resource(res, &kernel_data);
235 #ifdef CONFIG_KEXEC_CORE
236 		/* Userspace will find "Crash kernel" region in /proc/iomem. */
237 		if (crashk_res.end && crashk_res.start >= res->start &&
238 		    crashk_res.end <= res->end)
239 			request_resource(res, &crashk_res);
240 #endif
241 	}
242 }
243 
244 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
245 
246 void __init setup_arch(char **cmdline_p)
247 {
248 	init_mm.start_code = (unsigned long) _text;
249 	init_mm.end_code   = (unsigned long) _etext;
250 	init_mm.end_data   = (unsigned long) _edata;
251 	init_mm.brk	   = (unsigned long) _end;
252 
253 	*cmdline_p = boot_command_line;
254 
255 	early_fixmap_init();
256 	early_ioremap_init();
257 
258 	setup_machine_fdt(__fdt_pointer);
259 
260 	parse_early_param();
261 
262 	/*
263 	 * Unmask asynchronous aborts and fiq after bringing up possible
264 	 * earlycon. (Report possible System Errors once we can report this
265 	 * occurred).
266 	 */
267 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
268 
269 	/*
270 	 * TTBR0 is only used for the identity mapping at this stage. Make it
271 	 * point to zero page to avoid speculatively fetching new entries.
272 	 */
273 	cpu_uninstall_idmap();
274 
275 	xen_early_init();
276 	efi_init();
277 	arm64_memblock_init();
278 
279 	paging_init();
280 
281 	acpi_table_upgrade();
282 
283 	/* Parse the ACPI tables for possible boot-time configuration */
284 	acpi_boot_table_init();
285 
286 	if (acpi_disabled)
287 		unflatten_device_tree();
288 
289 	bootmem_init();
290 
291 	kasan_init();
292 
293 	request_standard_resources();
294 
295 	early_ioremap_reset();
296 
297 	if (acpi_disabled)
298 		psci_dt_init();
299 	else
300 		psci_acpi_init();
301 
302 	cpu_read_bootcpu_ops();
303 	smp_init_cpus();
304 	smp_build_mpidr_hash();
305 
306 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
307 	/*
308 	 * Make sure init_thread_info.ttbr0 always generates translation
309 	 * faults in case uaccess_enable() is inadvertently called by the init
310 	 * thread.
311 	 */
312 	init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
313 #endif
314 
315 #ifdef CONFIG_VT
316 #if defined(CONFIG_VGA_CONSOLE)
317 	conswitchp = &vga_con;
318 #elif defined(CONFIG_DUMMY_CONSOLE)
319 	conswitchp = &dummy_con;
320 #endif
321 #endif
322 	if (boot_args[1] || boot_args[2] || boot_args[3]) {
323 		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
324 			"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
325 			"This indicates a broken bootloader or old kernel\n",
326 			boot_args[1], boot_args[2], boot_args[3]);
327 	}
328 }
329 
330 static int __init topology_init(void)
331 {
332 	int i;
333 
334 	for_each_online_node(i)
335 		register_one_node(i);
336 
337 	for_each_possible_cpu(i) {
338 		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
339 		cpu->hotpluggable = 1;
340 		register_cpu(cpu, i);
341 	}
342 
343 	return 0;
344 }
345 subsys_initcall(topology_init);
346 
347 /*
348  * Dump out kernel offset information on panic.
349  */
350 static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
351 			      void *p)
352 {
353 	const unsigned long offset = kaslr_offset();
354 
355 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
356 		pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
357 			 offset, KIMAGE_VADDR);
358 	} else {
359 		pr_emerg("Kernel Offset: disabled\n");
360 	}
361 	return 0;
362 }
363 
364 static struct notifier_block kernel_offset_notifier = {
365 	.notifier_call = dump_kernel_offset
366 };
367 
368 static int __init register_kernel_offset_dumper(void)
369 {
370 	atomic_notifier_chain_register(&panic_notifier_list,
371 				       &kernel_offset_notifier);
372 	return 0;
373 }
374 __initcall(register_kernel_offset_dumper);
375