xref: /openbmc/linux/arch/arm64/kernel/head.S (revision 165f2d28)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level CPU initialisation
4 * Based on arch/arm/kernel/head.S
5 *
6 * Copyright (C) 1994-2002 Russell King
7 * Copyright (C) 2003-2012 ARM Ltd.
8 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
9 *		Will Deacon <will.deacon@arm.com>
10 */
11
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <linux/irqchip/arm-gic-v3.h>
15
16#include <asm/assembler.h>
17#include <asm/boot.h>
18#include <asm/ptrace.h>
19#include <asm/asm-offsets.h>
20#include <asm/cache.h>
21#include <asm/cputype.h>
22#include <asm/elf.h>
23#include <asm/image.h>
24#include <asm/kernel-pgtable.h>
25#include <asm/kvm_arm.h>
26#include <asm/memory.h>
27#include <asm/pgtable-hwdef.h>
28#include <asm/pgtable.h>
29#include <asm/page.h>
30#include <asm/smp.h>
31#include <asm/sysreg.h>
32#include <asm/thread_info.h>
33#include <asm/virt.h>
34
35#include "efi-header.S"
36
37#define __PHYS_OFFSET	(KERNEL_START - TEXT_OFFSET)
38
39#if (TEXT_OFFSET & 0xfff) != 0
40#error TEXT_OFFSET must be at least 4KB aligned
41#elif (PAGE_OFFSET & 0x1fffff) != 0
42#error PAGE_OFFSET must be at least 2MB aligned
43#elif TEXT_OFFSET > 0x1fffff
44#error TEXT_OFFSET must be less than 2MB
45#endif
46
47/*
48 * Kernel startup entry point.
49 * ---------------------------
50 *
51 * The requirements are:
52 *   MMU = off, D-cache = off, I-cache = on or off,
53 *   x0 = physical address to the FDT blob.
54 *
55 * This code is mostly position independent so you call this at
56 * __pa(PAGE_OFFSET + TEXT_OFFSET).
57 *
58 * Note that the callee-saved registers are used for storing variables
59 * that are useful before the MMU is enabled. The allocations are described
60 * in the entry routines.
61 */
62	__HEAD
63_head:
64	/*
65	 * DO NOT MODIFY. Image header expected by Linux boot-loaders.
66	 */
67#ifdef CONFIG_EFI
68	/*
69	 * This add instruction has no meaningful effect except that
70	 * its opcode forms the magic "MZ" signature required by UEFI.
71	 */
72	add	x13, x18, #0x16
73	b	stext
74#else
75	b	stext				// branch to kernel start, magic
76	.long	0				// reserved
77#endif
78	le64sym	_kernel_offset_le		// Image load offset from start of RAM, little-endian
79	le64sym	_kernel_size_le			// Effective size of kernel image, little-endian
80	le64sym	_kernel_flags_le		// Informative flags, little-endian
81	.quad	0				// reserved
82	.quad	0				// reserved
83	.quad	0				// reserved
84	.ascii	ARM64_IMAGE_MAGIC		// Magic number
85#ifdef CONFIG_EFI
86	.long	pe_header - _head		// Offset to the PE header.
87
88pe_header:
89	__EFI_PE_HEADER
90#else
91	.long	0				// reserved
92#endif
93
94	__INIT
95
96	/*
97	 * The following callee saved general purpose registers are used on the
98	 * primary lowlevel boot path:
99	 *
100	 *  Register   Scope                      Purpose
101	 *  x21        stext() .. start_kernel()  FDT pointer passed at boot in x0
102	 *  x23        stext() .. start_kernel()  physical misalignment/KASLR offset
103	 *  x28        __create_page_tables()     callee preserved temp register
104	 *  x19/x20    __primary_switch()         callee preserved temp registers
105	 *  x24        __primary_switch() .. relocate_kernel()
106	 *                                        current RELR displacement
107	 */
108SYM_CODE_START(stext)
109	bl	preserve_boot_args
110	bl	el2_setup			// Drop to EL1, w0=cpu_boot_mode
111	adrp	x23, __PHYS_OFFSET
112	and	x23, x23, MIN_KIMG_ALIGN - 1	// KASLR offset, defaults to 0
113	bl	set_cpu_boot_mode_flag
114	bl	__create_page_tables
115	/*
116	 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
117	 * details.
118	 * On return, the CPU will be ready for the MMU to be turned on and
119	 * the TCR will have been set.
120	 */
121	mov	x0, #ARM64_CPU_BOOT_PRIMARY
122	bl	__cpu_setup			// initialise processor
123	b	__primary_switch
124SYM_CODE_END(stext)
125
126/*
127 * Preserve the arguments passed by the bootloader in x0 .. x3
128 */
129SYM_CODE_START_LOCAL(preserve_boot_args)
130	mov	x21, x0				// x21=FDT
131
132	adr_l	x0, boot_args			// record the contents of
133	stp	x21, x1, [x0]			// x0 .. x3 at kernel entry
134	stp	x2, x3, [x0, #16]
135
136	dmb	sy				// needed before dc ivac with
137						// MMU off
138
139	mov	x1, #0x20			// 4 x 8 bytes
140	b	__inval_dcache_area		// tail call
141SYM_CODE_END(preserve_boot_args)
142
143/*
144 * Macro to create a table entry to the next page.
145 *
146 *	tbl:	page table address
147 *	virt:	virtual address
148 *	shift:	#imm page table shift
149 *	ptrs:	#imm pointers per table page
150 *
151 * Preserves:	virt
152 * Corrupts:	ptrs, tmp1, tmp2
153 * Returns:	tbl -> next level table page address
154 */
155	.macro	create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
156	add	\tmp1, \tbl, #PAGE_SIZE
157	phys_to_pte \tmp2, \tmp1
158	orr	\tmp2, \tmp2, #PMD_TYPE_TABLE	// address of next table and entry type
159	lsr	\tmp1, \virt, #\shift
160	sub	\ptrs, \ptrs, #1
161	and	\tmp1, \tmp1, \ptrs		// table index
162	str	\tmp2, [\tbl, \tmp1, lsl #3]
163	add	\tbl, \tbl, #PAGE_SIZE		// next level table page
164	.endm
165
166/*
167 * Macro to populate page table entries, these entries can be pointers to the next level
168 * or last level entries pointing to physical memory.
169 *
170 *	tbl:	page table address
171 *	rtbl:	pointer to page table or physical memory
172 *	index:	start index to write
173 *	eindex:	end index to write - [index, eindex] written to
174 *	flags:	flags for pagetable entry to or in
175 *	inc:	increment to rtbl between each entry
176 *	tmp1:	temporary variable
177 *
178 * Preserves:	tbl, eindex, flags, inc
179 * Corrupts:	index, tmp1
180 * Returns:	rtbl
181 */
182	.macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
183.Lpe\@:	phys_to_pte \tmp1, \rtbl
184	orr	\tmp1, \tmp1, \flags	// tmp1 = table entry
185	str	\tmp1, [\tbl, \index, lsl #3]
186	add	\rtbl, \rtbl, \inc	// rtbl = pa next level
187	add	\index, \index, #1
188	cmp	\index, \eindex
189	b.ls	.Lpe\@
190	.endm
191
192/*
193 * Compute indices of table entries from virtual address range. If multiple entries
194 * were needed in the previous page table level then the next page table level is assumed
195 * to be composed of multiple pages. (This effectively scales the end index).
196 *
197 *	vstart:	virtual address of start of range
198 *	vend:	virtual address of end of range
199 *	shift:	shift used to transform virtual address into index
200 *	ptrs:	number of entries in page table
201 *	istart:	index in table corresponding to vstart
202 *	iend:	index in table corresponding to vend
203 *	count:	On entry: how many extra entries were required in previous level, scales
204 *			  our end index.
205 *		On exit: returns how many extra entries required for next page table level
206 *
207 * Preserves:	vstart, vend, shift, ptrs
208 * Returns:	istart, iend, count
209 */
210	.macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
211	lsr	\iend, \vend, \shift
212	mov	\istart, \ptrs
213	sub	\istart, \istart, #1
214	and	\iend, \iend, \istart	// iend = (vend >> shift) & (ptrs - 1)
215	mov	\istart, \ptrs
216	mul	\istart, \istart, \count
217	add	\iend, \iend, \istart	// iend += (count - 1) * ptrs
218					// our entries span multiple tables
219
220	lsr	\istart, \vstart, \shift
221	mov	\count, \ptrs
222	sub	\count, \count, #1
223	and	\istart, \istart, \count
224
225	sub	\count, \iend, \istart
226	.endm
227
228/*
229 * Map memory for specified virtual address range. Each level of page table needed supports
230 * multiple entries. If a level requires n entries the next page table level is assumed to be
231 * formed from n pages.
232 *
233 *	tbl:	location of page table
234 *	rtbl:	address to be used for first level page table entry (typically tbl + PAGE_SIZE)
235 *	vstart:	start address to map
236 *	vend:	end address to map - we map [vstart, vend]
237 *	flags:	flags to use to map last level entries
238 *	phys:	physical address corresponding to vstart - physical memory is contiguous
239 *	pgds:	the number of pgd entries
240 *
241 * Temporaries:	istart, iend, tmp, count, sv - these need to be different registers
242 * Preserves:	vstart, vend, flags
243 * Corrupts:	tbl, rtbl, istart, iend, tmp, count, sv
244 */
245	.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
246	add \rtbl, \tbl, #PAGE_SIZE
247	mov \sv, \rtbl
248	mov \count, #0
249	compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
250	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
251	mov \tbl, \sv
252	mov \sv, \rtbl
253
254#if SWAPPER_PGTABLE_LEVELS > 3
255	compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
256	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
257	mov \tbl, \sv
258	mov \sv, \rtbl
259#endif
260
261#if SWAPPER_PGTABLE_LEVELS > 2
262	compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
263	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
264	mov \tbl, \sv
265#endif
266
267	compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
268	bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
269	populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
270	.endm
271
272/*
273 * Setup the initial page tables. We only setup the barest amount which is
274 * required to get the kernel running. The following sections are required:
275 *   - identity mapping to enable the MMU (low address, TTBR0)
276 *   - first few MB of the kernel linear mapping to jump to once the MMU has
277 *     been enabled
278 */
279SYM_FUNC_START_LOCAL(__create_page_tables)
280	mov	x28, lr
281
282	/*
283	 * Invalidate the init page tables to avoid potential dirty cache lines
284	 * being evicted. Other page tables are allocated in rodata as part of
285	 * the kernel image, and thus are clean to the PoC per the boot
286	 * protocol.
287	 */
288	adrp	x0, init_pg_dir
289	adrp	x1, init_pg_end
290	sub	x1, x1, x0
291	bl	__inval_dcache_area
292
293	/*
294	 * Clear the init page tables.
295	 */
296	adrp	x0, init_pg_dir
297	adrp	x1, init_pg_end
298	sub	x1, x1, x0
2991:	stp	xzr, xzr, [x0], #16
300	stp	xzr, xzr, [x0], #16
301	stp	xzr, xzr, [x0], #16
302	stp	xzr, xzr, [x0], #16
303	subs	x1, x1, #64
304	b.ne	1b
305
306	mov	x7, SWAPPER_MM_MMUFLAGS
307
308	/*
309	 * Create the identity mapping.
310	 */
311	adrp	x0, idmap_pg_dir
312	adrp	x3, __idmap_text_start		// __pa(__idmap_text_start)
313
314#ifdef CONFIG_ARM64_VA_BITS_52
315	mrs_s	x6, SYS_ID_AA64MMFR2_EL1
316	and	x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
317	mov	x5, #52
318	cbnz	x6, 1f
319#endif
320	mov	x5, #VA_BITS_MIN
3211:
322	adr_l	x6, vabits_actual
323	str	x5, [x6]
324	dmb	sy
325	dc	ivac, x6		// Invalidate potentially stale cache line
326
327	/*
328	 * VA_BITS may be too small to allow for an ID mapping to be created
329	 * that covers system RAM if that is located sufficiently high in the
330	 * physical address space. So for the ID map, use an extended virtual
331	 * range in that case, and configure an additional translation level
332	 * if needed.
333	 *
334	 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
335	 * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
336	 * this number conveniently equals the number of leading zeroes in
337	 * the physical address of __idmap_text_end.
338	 */
339	adrp	x5, __idmap_text_end
340	clz	x5, x5
341	cmp	x5, TCR_T0SZ(VA_BITS)	// default T0SZ small enough?
342	b.ge	1f			// .. then skip VA range extension
343
344	adr_l	x6, idmap_t0sz
345	str	x5, [x6]
346	dmb	sy
347	dc	ivac, x6		// Invalidate potentially stale cache line
348
349#if (VA_BITS < 48)
350#define EXTRA_SHIFT	(PGDIR_SHIFT + PAGE_SHIFT - 3)
351#define EXTRA_PTRS	(1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
352
353	/*
354	 * If VA_BITS < 48, we have to configure an additional table level.
355	 * First, we have to verify our assumption that the current value of
356	 * VA_BITS was chosen such that all translation levels are fully
357	 * utilised, and that lowering T0SZ will always result in an additional
358	 * translation level to be configured.
359	 */
360#if VA_BITS != EXTRA_SHIFT
361#error "Mismatch between VA_BITS and page size/number of translation levels"
362#endif
363
364	mov	x4, EXTRA_PTRS
365	create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
366#else
367	/*
368	 * If VA_BITS == 48, we don't have to configure an additional
369	 * translation level, but the top-level table has more entries.
370	 */
371	mov	x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
372	str_l	x4, idmap_ptrs_per_pgd, x5
373#endif
3741:
375	ldr_l	x4, idmap_ptrs_per_pgd
376	mov	x5, x3				// __pa(__idmap_text_start)
377	adr_l	x6, __idmap_text_end		// __pa(__idmap_text_end)
378
379	map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
380
381	/*
382	 * Map the kernel image (starting with PHYS_OFFSET).
383	 */
384	adrp	x0, init_pg_dir
385	mov_q	x5, KIMAGE_VADDR + TEXT_OFFSET	// compile time __va(_text)
386	add	x5, x5, x23			// add KASLR displacement
387	mov	x4, PTRS_PER_PGD
388	adrp	x6, _end			// runtime __pa(_end)
389	adrp	x3, _text			// runtime __pa(_text)
390	sub	x6, x6, x3			// _end - _text
391	add	x6, x6, x5			// runtime __va(_end)
392
393	map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
394
395	/*
396	 * Since the page tables have been populated with non-cacheable
397	 * accesses (MMU disabled), invalidate the idmap and swapper page
398	 * tables again to remove any speculatively loaded cache lines.
399	 */
400	adrp	x0, idmap_pg_dir
401	adrp	x1, init_pg_end
402	sub	x1, x1, x0
403	dmb	sy
404	bl	__inval_dcache_area
405
406	ret	x28
407SYM_FUNC_END(__create_page_tables)
408
409/*
410 * The following fragment of code is executed with the MMU enabled.
411 *
412 *   x0 = __PHYS_OFFSET
413 */
414SYM_FUNC_START_LOCAL(__primary_switched)
415	adrp	x4, init_thread_union
416	add	sp, x4, #THREAD_SIZE
417	adr_l	x5, init_task
418	msr	sp_el0, x5			// Save thread_info
419
420	adr_l	x8, vectors			// load VBAR_EL1 with virtual
421	msr	vbar_el1, x8			// vector table address
422	isb
423
424	stp	xzr, x30, [sp, #-16]!
425	mov	x29, sp
426
427	str_l	x21, __fdt_pointer, x5		// Save FDT pointer
428
429	ldr_l	x4, kimage_vaddr		// Save the offset between
430	sub	x4, x4, x0			// the kernel virtual and
431	str_l	x4, kimage_voffset, x5		// physical mappings
432
433	// Clear BSS
434	adr_l	x0, __bss_start
435	mov	x1, xzr
436	adr_l	x2, __bss_stop
437	sub	x2, x2, x0
438	bl	__pi_memset
439	dsb	ishst				// Make zero page visible to PTW
440
441#ifdef CONFIG_KASAN
442	bl	kasan_early_init
443#endif
444#ifdef CONFIG_RANDOMIZE_BASE
445	tst	x23, ~(MIN_KIMG_ALIGN - 1)	// already running randomized?
446	b.ne	0f
447	mov	x0, x21				// pass FDT address in x0
448	bl	kaslr_early_init		// parse FDT for KASLR options
449	cbz	x0, 0f				// KASLR disabled? just proceed
450	orr	x23, x23, x0			// record KASLR offset
451	ldp	x29, x30, [sp], #16		// we must enable KASLR, return
452	ret					// to __primary_switch()
4530:
454#endif
455	add	sp, sp, #16
456	mov	x29, #0
457	mov	x30, #0
458	b	start_kernel
459SYM_FUNC_END(__primary_switched)
460
461	.pushsection ".rodata", "a"
462SYM_DATA_START(kimage_vaddr)
463	.quad		_text - TEXT_OFFSET
464SYM_DATA_END(kimage_vaddr)
465EXPORT_SYMBOL(kimage_vaddr)
466	.popsection
467
468/*
469 * end early head section, begin head code that is also used for
470 * hotplug and needs to have the same protections as the text region
471 */
472	.section ".idmap.text","awx"
473
474/*
475 * If we're fortunate enough to boot at EL2, ensure that the world is
476 * sane before dropping to EL1.
477 *
478 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
479 * booted in EL1 or EL2 respectively.
480 */
481SYM_FUNC_START(el2_setup)
482	msr	SPsel, #1			// We want to use SP_EL{1,2}
483	mrs	x0, CurrentEL
484	cmp	x0, #CurrentEL_EL2
485	b.eq	1f
486	mov_q	x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
487	msr	sctlr_el1, x0
488	mov	w0, #BOOT_CPU_MODE_EL1		// This cpu booted in EL1
489	isb
490	ret
491
4921:	mov_q	x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
493	msr	sctlr_el2, x0
494
495#ifdef CONFIG_ARM64_VHE
496	/*
497	 * Check for VHE being present. For the rest of the EL2 setup,
498	 * x2 being non-zero indicates that we do have VHE, and that the
499	 * kernel is intended to run at EL2.
500	 */
501	mrs	x2, id_aa64mmfr1_el1
502	ubfx	x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
503#else
504	mov	x2, xzr
505#endif
506
507	/* Hyp configuration. */
508	mov_q	x0, HCR_HOST_NVHE_FLAGS
509	cbz	x2, set_hcr
510	mov_q	x0, HCR_HOST_VHE_FLAGS
511set_hcr:
512	msr	hcr_el2, x0
513	isb
514
515	/*
516	 * Allow Non-secure EL1 and EL0 to access physical timer and counter.
517	 * This is not necessary for VHE, since the host kernel runs in EL2,
518	 * and EL0 accesses are configured in the later stage of boot process.
519	 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
520	 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
521	 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
522	 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
523	 * EL2.
524	 */
525	cbnz	x2, 1f
526	mrs	x0, cnthctl_el2
527	orr	x0, x0, #3			// Enable EL1 physical timers
528	msr	cnthctl_el2, x0
5291:
530	msr	cntvoff_el2, xzr		// Clear virtual offset
531
532#ifdef CONFIG_ARM_GIC_V3
533	/* GICv3 system register access */
534	mrs	x0, id_aa64pfr0_el1
535	ubfx	x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
536	cbz	x0, 3f
537
538	mrs_s	x0, SYS_ICC_SRE_EL2
539	orr	x0, x0, #ICC_SRE_EL2_SRE	// Set ICC_SRE_EL2.SRE==1
540	orr	x0, x0, #ICC_SRE_EL2_ENABLE	// Set ICC_SRE_EL2.Enable==1
541	msr_s	SYS_ICC_SRE_EL2, x0
542	isb					// Make sure SRE is now set
543	mrs_s	x0, SYS_ICC_SRE_EL2		// Read SRE back,
544	tbz	x0, #0, 3f			// and check that it sticks
545	msr_s	SYS_ICH_HCR_EL2, xzr		// Reset ICC_HCR_EL2 to defaults
546
5473:
548#endif
549
550	/* Populate ID registers. */
551	mrs	x0, midr_el1
552	mrs	x1, mpidr_el1
553	msr	vpidr_el2, x0
554	msr	vmpidr_el2, x1
555
556#ifdef CONFIG_COMPAT
557	msr	hstr_el2, xzr			// Disable CP15 traps to EL2
558#endif
559
560	/* EL2 debug */
561	mrs	x1, id_aa64dfr0_el1
562	sbfx	x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
563	cmp	x0, #1
564	b.lt	4f				// Skip if no PMU present
565	mrs	x0, pmcr_el0			// Disable debug access traps
566	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
5674:
568	csel	x3, xzr, x0, lt			// all PMU counters from EL1
569
570	/* Statistical profiling */
571	ubfx	x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
572	cbz	x0, 7f				// Skip if SPE not present
573	cbnz	x2, 6f				// VHE?
574	mrs_s	x4, SYS_PMBIDR_EL1		// If SPE available at EL2,
575	and	x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
576	cbnz	x4, 5f				// then permit sampling of physical
577	mov	x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
578		      1 << SYS_PMSCR_EL2_PA_SHIFT)
579	msr_s	SYS_PMSCR_EL2, x4		// addresses and physical counter
5805:
581	mov	x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
582	orr	x3, x3, x1			// If we don't have VHE, then
583	b	7f				// use EL1&0 translation.
5846:						// For VHE, use EL2 translation
585	orr	x3, x3, #MDCR_EL2_TPMS		// and disable access from EL1
5867:
587	msr	mdcr_el2, x3			// Configure debug traps
588
589	/* LORegions */
590	mrs	x1, id_aa64mmfr1_el1
591	ubfx	x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
592	cbz	x0, 1f
593	msr_s	SYS_LORC_EL1, xzr
5941:
595
596	/* Stage-2 translation */
597	msr	vttbr_el2, xzr
598
599	cbz	x2, install_el2_stub
600
601	mov	w0, #BOOT_CPU_MODE_EL2		// This CPU booted in EL2
602	isb
603	ret
604
605SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
606	/*
607	 * When VHE is not in use, early init of EL2 and EL1 needs to be
608	 * done here.
609	 * When VHE _is_ in use, EL1 will not be used in the host and
610	 * requires no configuration, and all non-hyp-specific EL2 setup
611	 * will be done via the _EL1 system register aliases in __cpu_setup.
612	 */
613	mov_q	x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
614	msr	sctlr_el1, x0
615
616	/* Coprocessor traps. */
617	mov	x0, #0x33ff
618	msr	cptr_el2, x0			// Disable copro. traps to EL2
619
620	/* SVE register access */
621	mrs	x1, id_aa64pfr0_el1
622	ubfx	x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
623	cbz	x1, 7f
624
625	bic	x0, x0, #CPTR_EL2_TZ		// Also disable SVE traps
626	msr	cptr_el2, x0			// Disable copro. traps to EL2
627	isb
628	mov	x1, #ZCR_ELx_LEN_MASK		// SVE: Enable full vector
629	msr_s	SYS_ZCR_EL2, x1			// length for EL1.
630
631	/* Hypervisor stub */
6327:	adr_l	x0, __hyp_stub_vectors
633	msr	vbar_el2, x0
634
635	/* spsr */
636	mov	x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
637		      PSR_MODE_EL1h)
638	msr	spsr_el2, x0
639	msr	elr_el2, lr
640	mov	w0, #BOOT_CPU_MODE_EL2		// This CPU booted in EL2
641	eret
642SYM_FUNC_END(el2_setup)
643
644/*
645 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
646 * in w0. See arch/arm64/include/asm/virt.h for more info.
647 */
648SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
649	adr_l	x1, __boot_cpu_mode
650	cmp	w0, #BOOT_CPU_MODE_EL2
651	b.ne	1f
652	add	x1, x1, #4
6531:	str	w0, [x1]			// This CPU has booted in EL1
654	dmb	sy
655	dc	ivac, x1			// Invalidate potentially stale cache line
656	ret
657SYM_FUNC_END(set_cpu_boot_mode_flag)
658
659/*
660 * These values are written with the MMU off, but read with the MMU on.
661 * Writers will invalidate the corresponding address, discarding up to a
662 * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
663 * sufficient alignment that the CWG doesn't overlap another section.
664 */
665	.pushsection ".mmuoff.data.write", "aw"
666/*
667 * We need to find out the CPU boot mode long after boot, so we need to
668 * store it in a writable variable.
669 *
670 * This is not in .bss, because we set it sufficiently early that the boot-time
671 * zeroing of .bss would clobber it.
672 */
673SYM_DATA_START(__boot_cpu_mode)
674	.long	BOOT_CPU_MODE_EL2
675	.long	BOOT_CPU_MODE_EL1
676SYM_DATA_END(__boot_cpu_mode)
677/*
678 * The booting CPU updates the failed status @__early_cpu_boot_status,
679 * with MMU turned off.
680 */
681SYM_DATA_START(__early_cpu_boot_status)
682	.quad 	0
683SYM_DATA_END(__early_cpu_boot_status)
684
685	.popsection
686
687	/*
688	 * This provides a "holding pen" for platforms to hold all secondary
689	 * cores are held until we're ready for them to initialise.
690	 */
691SYM_FUNC_START(secondary_holding_pen)
692	bl	el2_setup			// Drop to EL1, w0=cpu_boot_mode
693	bl	set_cpu_boot_mode_flag
694	mrs	x0, mpidr_el1
695	mov_q	x1, MPIDR_HWID_BITMASK
696	and	x0, x0, x1
697	adr_l	x3, secondary_holding_pen_release
698pen:	ldr	x4, [x3]
699	cmp	x4, x0
700	b.eq	secondary_startup
701	wfe
702	b	pen
703SYM_FUNC_END(secondary_holding_pen)
704
705	/*
706	 * Secondary entry point that jumps straight into the kernel. Only to
707	 * be used where CPUs are brought online dynamically by the kernel.
708	 */
709SYM_FUNC_START(secondary_entry)
710	bl	el2_setup			// Drop to EL1
711	bl	set_cpu_boot_mode_flag
712	b	secondary_startup
713SYM_FUNC_END(secondary_entry)
714
715SYM_FUNC_START_LOCAL(secondary_startup)
716	/*
717	 * Common entry point for secondary CPUs.
718	 */
719	bl	__cpu_secondary_check52bitva
720	mov	x0, #ARM64_CPU_BOOT_SECONDARY
721	bl	__cpu_setup			// initialise processor
722	adrp	x1, swapper_pg_dir
723	bl	__enable_mmu
724	ldr	x8, =__secondary_switched
725	br	x8
726SYM_FUNC_END(secondary_startup)
727
728SYM_FUNC_START_LOCAL(__secondary_switched)
729	adr_l	x5, vectors
730	msr	vbar_el1, x5
731	isb
732
733	adr_l	x0, secondary_data
734	ldr	x1, [x0, #CPU_BOOT_STACK]	// get secondary_data.stack
735	cbz	x1, __secondary_too_slow
736	mov	sp, x1
737	ldr	x2, [x0, #CPU_BOOT_TASK]
738	cbz	x2, __secondary_too_slow
739	msr	sp_el0, x2
740	mov	x29, #0
741	mov	x30, #0
742	b	secondary_start_kernel
743SYM_FUNC_END(__secondary_switched)
744
745SYM_FUNC_START_LOCAL(__secondary_too_slow)
746	wfe
747	wfi
748	b	__secondary_too_slow
749SYM_FUNC_END(__secondary_too_slow)
750
751/*
752 * The booting CPU updates the failed status @__early_cpu_boot_status,
753 * with MMU turned off.
754 *
755 * update_early_cpu_boot_status tmp, status
756 *  - Corrupts tmp1, tmp2
757 *  - Writes 'status' to __early_cpu_boot_status and makes sure
758 *    it is committed to memory.
759 */
760
761	.macro	update_early_cpu_boot_status status, tmp1, tmp2
762	mov	\tmp2, #\status
763	adr_l	\tmp1, __early_cpu_boot_status
764	str	\tmp2, [\tmp1]
765	dmb	sy
766	dc	ivac, \tmp1			// Invalidate potentially stale cache line
767	.endm
768
769/*
770 * Enable the MMU.
771 *
772 *  x0  = SCTLR_EL1 value for turning on the MMU.
773 *  x1  = TTBR1_EL1 value
774 *
775 * Returns to the caller via x30/lr. This requires the caller to be covered
776 * by the .idmap.text section.
777 *
778 * Checks if the selected granule size is supported by the CPU.
779 * If it isn't, park the CPU
780 */
781SYM_FUNC_START(__enable_mmu)
782	mrs	x2, ID_AA64MMFR0_EL1
783	ubfx	x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
784	cmp	x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
785	b.ne	__no_granule_support
786	update_early_cpu_boot_status 0, x2, x3
787	adrp	x2, idmap_pg_dir
788	phys_to_ttbr x1, x1
789	phys_to_ttbr x2, x2
790	msr	ttbr0_el1, x2			// load TTBR0
791	offset_ttbr1 x1, x3
792	msr	ttbr1_el1, x1			// load TTBR1
793	isb
794	msr	sctlr_el1, x0
795	isb
796	/*
797	 * Invalidate the local I-cache so that any instructions fetched
798	 * speculatively from the PoC are discarded, since they may have
799	 * been dynamically patched at the PoU.
800	 */
801	ic	iallu
802	dsb	nsh
803	isb
804	ret
805SYM_FUNC_END(__enable_mmu)
806
807SYM_FUNC_START(__cpu_secondary_check52bitva)
808#ifdef CONFIG_ARM64_VA_BITS_52
809	ldr_l	x0, vabits_actual
810	cmp	x0, #52
811	b.ne	2f
812
813	mrs_s	x0, SYS_ID_AA64MMFR2_EL1
814	and	x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
815	cbnz	x0, 2f
816
817	update_early_cpu_boot_status \
818		CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
8191:	wfe
820	wfi
821	b	1b
822
823#endif
8242:	ret
825SYM_FUNC_END(__cpu_secondary_check52bitva)
826
827SYM_FUNC_START_LOCAL(__no_granule_support)
828	/* Indicate that this CPU can't boot and is stuck in the kernel */
829	update_early_cpu_boot_status \
830		CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
8311:
832	wfe
833	wfi
834	b	1b
835SYM_FUNC_END(__no_granule_support)
836
837#ifdef CONFIG_RELOCATABLE
838SYM_FUNC_START_LOCAL(__relocate_kernel)
839	/*
840	 * Iterate over each entry in the relocation table, and apply the
841	 * relocations in place.
842	 */
843	ldr	w9, =__rela_offset		// offset to reloc table
844	ldr	w10, =__rela_size		// size of reloc table
845
846	mov_q	x11, KIMAGE_VADDR		// default virtual offset
847	add	x11, x11, x23			// actual virtual offset
848	add	x9, x9, x11			// __va(.rela)
849	add	x10, x9, x10			// __va(.rela) + sizeof(.rela)
850
8510:	cmp	x9, x10
852	b.hs	1f
853	ldp	x12, x13, [x9], #24
854	ldr	x14, [x9, #-8]
855	cmp	w13, #R_AARCH64_RELATIVE
856	b.ne	0b
857	add	x14, x14, x23			// relocate
858	str	x14, [x12, x23]
859	b	0b
860
8611:
862#ifdef CONFIG_RELR
863	/*
864	 * Apply RELR relocations.
865	 *
866	 * RELR is a compressed format for storing relative relocations. The
867	 * encoded sequence of entries looks like:
868	 * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
869	 *
870	 * i.e. start with an address, followed by any number of bitmaps. The
871	 * address entry encodes 1 relocation. The subsequent bitmap entries
872	 * encode up to 63 relocations each, at subsequent offsets following
873	 * the last address entry.
874	 *
875	 * The bitmap entries must have 1 in the least significant bit. The
876	 * assumption here is that an address cannot have 1 in lsb. Odd
877	 * addresses are not supported. Any odd addresses are stored in the RELA
878	 * section, which is handled above.
879	 *
880	 * Excluding the least significant bit in the bitmap, each non-zero
881	 * bit in the bitmap represents a relocation to be applied to
882	 * a corresponding machine word that follows the base address
883	 * word. The second least significant bit represents the machine
884	 * word immediately following the initial address, and each bit
885	 * that follows represents the next word, in linear order. As such,
886	 * a single bitmap can encode up to 63 relocations in a 64-bit object.
887	 *
888	 * In this implementation we store the address of the next RELR table
889	 * entry in x9, the address being relocated by the current address or
890	 * bitmap entry in x13 and the address being relocated by the current
891	 * bit in x14.
892	 *
893	 * Because addends are stored in place in the binary, RELR relocations
894	 * cannot be applied idempotently. We use x24 to keep track of the
895	 * currently applied displacement so that we can correctly relocate if
896	 * __relocate_kernel is called twice with non-zero displacements (i.e.
897	 * if there is both a physical misalignment and a KASLR displacement).
898	 */
899	ldr	w9, =__relr_offset		// offset to reloc table
900	ldr	w10, =__relr_size		// size of reloc table
901	add	x9, x9, x11			// __va(.relr)
902	add	x10, x9, x10			// __va(.relr) + sizeof(.relr)
903
904	sub	x15, x23, x24			// delta from previous offset
905	cbz	x15, 7f				// nothing to do if unchanged
906	mov	x24, x23			// save new offset
907
9082:	cmp	x9, x10
909	b.hs	7f
910	ldr	x11, [x9], #8
911	tbnz	x11, #0, 3f			// branch to handle bitmaps
912	add	x13, x11, x23
913	ldr	x12, [x13]			// relocate address entry
914	add	x12, x12, x15
915	str	x12, [x13], #8			// adjust to start of bitmap
916	b	2b
917
9183:	mov	x14, x13
9194:	lsr	x11, x11, #1
920	cbz	x11, 6f
921	tbz	x11, #0, 5f			// skip bit if not set
922	ldr	x12, [x14]			// relocate bit
923	add	x12, x12, x15
924	str	x12, [x14]
925
9265:	add	x14, x14, #8			// move to next bit's address
927	b	4b
928
9296:	/*
930	 * Move to the next bitmap's address. 8 is the word size, and 63 is the
931	 * number of significant bits in a bitmap entry.
932	 */
933	add	x13, x13, #(8 * 63)
934	b	2b
935
9367:
937#endif
938	ret
939
940SYM_FUNC_END(__relocate_kernel)
941#endif
942
943SYM_FUNC_START_LOCAL(__primary_switch)
944#ifdef CONFIG_RANDOMIZE_BASE
945	mov	x19, x0				// preserve new SCTLR_EL1 value
946	mrs	x20, sctlr_el1			// preserve old SCTLR_EL1 value
947#endif
948
949	adrp	x1, init_pg_dir
950	bl	__enable_mmu
951#ifdef CONFIG_RELOCATABLE
952#ifdef CONFIG_RELR
953	mov	x24, #0				// no RELR displacement yet
954#endif
955	bl	__relocate_kernel
956#ifdef CONFIG_RANDOMIZE_BASE
957	ldr	x8, =__primary_switched
958	adrp	x0, __PHYS_OFFSET
959	blr	x8
960
961	/*
962	 * If we return here, we have a KASLR displacement in x23 which we need
963	 * to take into account by discarding the current kernel mapping and
964	 * creating a new one.
965	 */
966	pre_disable_mmu_workaround
967	msr	sctlr_el1, x20			// disable the MMU
968	isb
969	bl	__create_page_tables		// recreate kernel mapping
970
971	tlbi	vmalle1				// Remove any stale TLB entries
972	dsb	nsh
973
974	msr	sctlr_el1, x19			// re-enable the MMU
975	isb
976	ic	iallu				// flush instructions fetched
977	dsb	nsh				// via old mapping
978	isb
979
980	bl	__relocate_kernel
981#endif
982#endif
983	ldr	x8, =__primary_switched
984	adrp	x0, __PHYS_OFFSET
985	br	x8
986SYM_FUNC_END(__primary_switch)
987