xref: /openbmc/u-boot/arch/arm/cpu/armv8/start.S (revision 64c7abf0)
1/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * SPDX-License-Identifier:	GPL-2.0+
6 */
7
8#include <asm-offsets.h>
9#include <config.h>
10#include <linux/linkage.h>
11#include <asm/macro.h>
12#include <asm/armv8/mmu.h>
13
14/*************************************************************************
15 *
16 * Startup Code (reset vector)
17 *
18 *************************************************************************/
19
20.globl	_start
21_start:
22#ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK
23/*
24 * Various SoCs need something special and SoC-specific up front in
25 * order to boot, allow them to set that in their boot0.h file and then
26 * use it here.
27 */
28#include <asm/arch/boot0.h>
29#else
30	b	reset
31#endif
32
33	.align 3
34
35.globl	_TEXT_BASE
36_TEXT_BASE:
37	.quad	CONFIG_SYS_TEXT_BASE
38
39/*
40 * These are defined in the linker script.
41 */
42.globl	_end_ofs
43_end_ofs:
44	.quad	_end - _start
45
46.globl	_bss_start_ofs
47_bss_start_ofs:
48	.quad	__bss_start - _start
49
50.globl	_bss_end_ofs
51_bss_end_ofs:
52	.quad	__bss_end - _start
53
54reset:
55	/* Allow the board to save important registers */
56	b	save_boot_params
57.globl	save_boot_params_ret
58save_boot_params_ret:
59
60#if CONFIG_POSITION_INDEPENDENT
61	/*
62	 * Fix .rela.dyn relocations. This allows U-Boot to be loaded to and
63	 * executed at a different address than it was linked at.
64	 */
65pie_fixup:
66	adr	x0, _start		/* x0 <- Runtime value of _start */
67	ldr	x1, _TEXT_BASE		/* x1 <- Linked value of _start */
68	sub	x9, x0, x1		/* x9 <- Run-vs-link offset */
69	adr	x2, __rel_dyn_start	/* x2 <- Runtime &__rel_dyn_start */
70	adr	x3, __rel_dyn_end	/* x3 <- Runtime &__rel_dyn_end */
71pie_fix_loop:
72	ldp	x0, x1, [x2], #16	/* (x0, x1) <- (Link location, fixup) */
73	ldr	x4, [x2], #8		/* x4 <- addend */
74	cmp	w1, #1027		/* relative fixup? */
75	bne	pie_skip_reloc
76	/* relative fix: store addend plus offset at dest location */
77	add	x0, x0, x9
78	add	x4, x4, x9
79	str	x4, [x0]
80pie_skip_reloc:
81	cmp	x2, x3
82	b.lo	pie_fix_loop
83pie_fixup_done:
84#endif
85
86#ifdef CONFIG_SYS_RESET_SCTRL
87	bl reset_sctrl
88#endif
89	/*
90	 * Could be EL3/EL2/EL1, Initial State:
91	 * Little Endian, MMU Disabled, i/dCache Disabled
92	 */
93	adr	x0, vectors
94	switch_el x1, 3f, 2f, 1f
953:	msr	vbar_el3, x0
96	mrs	x0, scr_el3
97	orr	x0, x0, #0xf			/* SCR_EL3.NS|IRQ|FIQ|EA */
98	msr	scr_el3, x0
99	msr	cptr_el3, xzr			/* Enable FP/SIMD */
100#ifdef COUNTER_FREQUENCY
101	ldr	x0, =COUNTER_FREQUENCY
102	msr	cntfrq_el0, x0			/* Initialize CNTFRQ */
103#endif
104	b	0f
1052:	msr	vbar_el2, x0
106	mov	x0, #0x33ff
107	msr	cptr_el2, x0			/* Enable FP/SIMD */
108	b	0f
1091:	msr	vbar_el1, x0
110	mov	x0, #3 << 20
111	msr	cpacr_el1, x0			/* Enable FP/SIMD */
1120:
113
114	/*
115	 * Enable SMPEN bit for coherency.
116	 * This register is not architectural but at the moment
117	 * this bit should be set for A53/A57/A72.
118	 */
119#ifdef CONFIG_ARMV8_SET_SMPEN
120	switch_el x1, 3f, 1f, 1f
1213:
122	mrs     x0, S3_1_c15_c2_1               /* cpuectlr_el1 */
123	orr     x0, x0, #0x40
124	msr     S3_1_c15_c2_1, x0
1251:
126#endif
127
128	/* Apply ARM core specific erratas */
129	bl	apply_core_errata
130
131	/*
132	 * Cache/BPB/TLB Invalidate
133	 * i-cache is invalidated before enabled in icache_enable()
134	 * tlb is invalidated before mmu is enabled in dcache_enable()
135	 * d-cache is invalidated before enabled in dcache_enable()
136	 */
137
138	/* Processor specific initialization */
139	bl	lowlevel_init
140
141#if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD)
142	branch_if_master x0, x1, master_cpu
143	b	spin_table_secondary_jump
144	/* never return */
145#elif defined(CONFIG_ARMV8_MULTIENTRY)
146	branch_if_master x0, x1, master_cpu
147
148	/*
149	 * Slave CPUs
150	 */
151slave_cpu:
152	wfe
153	ldr	x1, =CPU_RELEASE_ADDR
154	ldr	x0, [x1]
155	cbz	x0, slave_cpu
156	br	x0			/* branch to the given address */
157#endif /* CONFIG_ARMV8_MULTIENTRY */
158master_cpu:
159	bl	_main
160
161#ifdef CONFIG_SYS_RESET_SCTRL
162reset_sctrl:
163	switch_el x1, 3f, 2f, 1f
1643:
165	mrs	x0, sctlr_el3
166	b	0f
1672:
168	mrs	x0, sctlr_el2
169	b	0f
1701:
171	mrs	x0, sctlr_el1
172
1730:
174	ldr	x1, =0xfdfffffa
175	and	x0, x0, x1
176
177	switch_el x1, 6f, 5f, 4f
1786:
179	msr	sctlr_el3, x0
180	b	7f
1815:
182	msr	sctlr_el2, x0
183	b	7f
1844:
185	msr	sctlr_el1, x0
186
1877:
188	dsb	sy
189	isb
190	b	__asm_invalidate_tlb_all
191	ret
192#endif
193
194/*-----------------------------------------------------------------------*/
195
196WEAK(apply_core_errata)
197
198	mov	x29, lr			/* Save LR */
199	/* For now, we support Cortex-A53, Cortex-A57 specific errata */
200
201	/* Check if we are running on a Cortex-A53 core */
202	branch_if_a53_core x0, apply_a53_core_errata
203
204	/* Check if we are running on a Cortex-A57 core */
205	branch_if_a57_core x0, apply_a57_core_errata
2060:
207	mov	lr, x29			/* Restore LR */
208	ret
209
210apply_a53_core_errata:
211
212#ifdef CONFIG_ARM_ERRATA_855873
213	mrs	x0, midr_el1
214	tst	x0, #(0xf << 20)
215	b.ne	0b
216
217	mrs	x0, midr_el1
218	and	x0, x0, #0xf
219	cmp	x0, #3
220	b.lt	0b
221
222	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
223	/* Enable data cache clean as data cache clean/invalidate */
224	orr	x0, x0, #1 << 44
225	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
226#endif
227	b 0b
228
229apply_a57_core_errata:
230
231#ifdef CONFIG_ARM_ERRATA_828024
232	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
233	/* Disable non-allocate hint of w-b-n-a memory type */
234	orr	x0, x0, #1 << 49
235	/* Disable write streaming no L1-allocate threshold */
236	orr	x0, x0, #3 << 25
237	/* Disable write streaming no-allocate threshold */
238	orr	x0, x0, #3 << 27
239	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
240#endif
241
242#ifdef CONFIG_ARM_ERRATA_826974
243	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
244	/* Disable speculative load execution ahead of a DMB */
245	orr	x0, x0, #1 << 59
246	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
247#endif
248
249#ifdef CONFIG_ARM_ERRATA_833471
250	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
251	/* FPSCR write flush.
252	 * Note that in some cases where a flush is unnecessary this
253	    could impact performance. */
254	orr	x0, x0, #1 << 38
255	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
256#endif
257
258#ifdef CONFIG_ARM_ERRATA_829520
259	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
260	/* Disable Indirect Predictor bit will prevent this erratum
261	    from occurring
262	 * Note that in some cases where a flush is unnecessary this
263	    could impact performance. */
264	orr	x0, x0, #1 << 4
265	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
266#endif
267
268#ifdef CONFIG_ARM_ERRATA_833069
269	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
270	/* Disable Enable Invalidates of BTB bit */
271	and	x0, x0, #0xE
272	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
273#endif
274	b 0b
275ENDPROC(apply_core_errata)
276
277/*-----------------------------------------------------------------------*/
278
279WEAK(lowlevel_init)
280	mov	x29, lr			/* Save LR */
281
282#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
283	branch_if_slave x0, 1f
284	ldr	x0, =GICD_BASE
285	bl	gic_init_secure
2861:
287#if defined(CONFIG_GICV3)
288	ldr	x0, =GICR_BASE
289	bl	gic_init_secure_percpu
290#elif defined(CONFIG_GICV2)
291	ldr	x0, =GICD_BASE
292	ldr	x1, =GICC_BASE
293	bl	gic_init_secure_percpu
294#endif
295#endif
296
297#ifdef CONFIG_ARMV8_MULTIENTRY
298	branch_if_master x0, x1, 2f
299
300	/*
301	 * Slave should wait for master clearing spin table.
302	 * This sync prevent salves observing incorrect
303	 * value of spin table and jumping to wrong place.
304	 */
305#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
306#ifdef CONFIG_GICV2
307	ldr	x0, =GICC_BASE
308#endif
309	bl	gic_wait_for_interrupt
310#endif
311
312	/*
313	 * All slaves will enter EL2 and optionally EL1.
314	 */
315	adr	x4, lowlevel_in_el2
316	ldr	x5, =ES_TO_AARCH64
317	bl	armv8_switch_to_el2
318
319lowlevel_in_el2:
320#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
321	adr	x4, lowlevel_in_el1
322	ldr	x5, =ES_TO_AARCH64
323	bl	armv8_switch_to_el1
324
325lowlevel_in_el1:
326#endif
327
328#endif /* CONFIG_ARMV8_MULTIENTRY */
329
3302:
331	mov	lr, x29			/* Restore LR */
332	ret
333ENDPROC(lowlevel_init)
334
335WEAK(smp_kick_all_cpus)
336	/* Kick secondary cpus up by SGI 0 interrupt */
337#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
338	ldr	x0, =GICD_BASE
339	b	gic_kick_secondary_cpus
340#endif
341	ret
342ENDPROC(smp_kick_all_cpus)
343
344/*-----------------------------------------------------------------------*/
345
346ENTRY(c_runtime_cpu_setup)
347	/* Relocate vBAR */
348	adr	x0, vectors
349	switch_el x1, 3f, 2f, 1f
3503:	msr	vbar_el3, x0
351	b	0f
3522:	msr	vbar_el2, x0
353	b	0f
3541:	msr	vbar_el1, x0
3550:
356
357	ret
358ENDPROC(c_runtime_cpu_setup)
359
360WEAK(save_boot_params)
361	b	save_boot_params_ret	/* back to my caller */
362ENDPROC(save_boot_params)
363