xref: /openbmc/u-boot/arch/arm/cpu/armv8/start.S (revision 505fe9cd)
1/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * SPDX-License-Identifier:	GPL-2.0+
6 */
7
8#include <asm-offsets.h>
9#include <config.h>
10#include <linux/linkage.h>
11#include <asm/macro.h>
12#include <asm/armv8/mmu.h>
13
14/*************************************************************************
15 *
16 * Startup Code (reset vector)
17 *
18 *************************************************************************/
19
20.globl	_start
21_start:
22#ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK
23/*
24 * Various SoCs need something special and SoC-specific up front in
25 * order to boot, allow them to set that in their boot0.h file and then
26 * use it here.
27 */
28#include <asm/arch/boot0.h>
29#else
30	b	reset
31#endif
32
33	.align 3
34
35.globl	_TEXT_BASE
36_TEXT_BASE:
37	.quad	CONFIG_SYS_TEXT_BASE
38
39/*
40 * These are defined in the linker script.
41 */
42.globl	_end_ofs
43_end_ofs:
44	.quad	_end - _start
45
46.globl	_bss_start_ofs
47_bss_start_ofs:
48	.quad	__bss_start - _start
49
50.globl	_bss_end_ofs
51_bss_end_ofs:
52	.quad	__bss_end - _start
53
54reset:
55	/* Allow the board to save important registers */
56	b	save_boot_params
57.globl	save_boot_params_ret
58save_boot_params_ret:
59
60#ifdef CONFIG_SYS_RESET_SCTRL
61	bl reset_sctrl
62#endif
63	/*
64	 * Could be EL3/EL2/EL1, Initial State:
65	 * Little Endian, MMU Disabled, i/dCache Disabled
66	 */
67	adr	x0, vectors
68	switch_el x1, 3f, 2f, 1f
693:	msr	vbar_el3, x0
70	mrs	x0, scr_el3
71	orr	x0, x0, #0xf			/* SCR_EL3.NS|IRQ|FIQ|EA */
72	msr	scr_el3, x0
73	msr	cptr_el3, xzr			/* Enable FP/SIMD */
74#ifdef COUNTER_FREQUENCY
75	ldr	x0, =COUNTER_FREQUENCY
76	msr	cntfrq_el0, x0			/* Initialize CNTFRQ */
77#endif
78	b	0f
792:	msr	vbar_el2, x0
80	mov	x0, #0x33ff
81	msr	cptr_el2, x0			/* Enable FP/SIMD */
82	b	0f
831:	msr	vbar_el1, x0
84	mov	x0, #3 << 20
85	msr	cpacr_el1, x0			/* Enable FP/SIMD */
860:
87
88	/*
89	 * Enable SMPEN bit for coherency.
90	 * This register is not architectural but at the moment
91	 * this bit should be set for A53/A57/A72.
92	 */
93#ifdef CONFIG_ARMV8_SET_SMPEN
94	switch_el x1, 3f, 1f, 1f
953:
96	mrs     x0, S3_1_c15_c2_1               /* cpuectlr_el1 */
97	orr     x0, x0, #0x40
98	msr     S3_1_c15_c2_1, x0
991:
100#endif
101
102	/* Apply ARM core specific erratas */
103	bl	apply_core_errata
104
105	/*
106	 * Cache/BPB/TLB Invalidate
107	 * i-cache is invalidated before enabled in icache_enable()
108	 * tlb is invalidated before mmu is enabled in dcache_enable()
109	 * d-cache is invalidated before enabled in dcache_enable()
110	 */
111
112	/* Processor specific initialization */
113	bl	lowlevel_init
114
115#if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD)
116	branch_if_master x0, x1, master_cpu
117	b	spin_table_secondary_jump
118	/* never return */
119#elif defined(CONFIG_ARMV8_MULTIENTRY)
120	branch_if_master x0, x1, master_cpu
121
122	/*
123	 * Slave CPUs
124	 */
125slave_cpu:
126	wfe
127	ldr	x1, =CPU_RELEASE_ADDR
128	ldr	x0, [x1]
129	cbz	x0, slave_cpu
130	br	x0			/* branch to the given address */
131#endif /* CONFIG_ARMV8_MULTIENTRY */
132master_cpu:
133	bl	_main
134
135#ifdef CONFIG_SYS_RESET_SCTRL
136reset_sctrl:
137	switch_el x1, 3f, 2f, 1f
1383:
139	mrs	x0, sctlr_el3
140	b	0f
1412:
142	mrs	x0, sctlr_el2
143	b	0f
1441:
145	mrs	x0, sctlr_el1
146
1470:
148	ldr	x1, =0xfdfffffa
149	and	x0, x0, x1
150
151	switch_el x1, 6f, 5f, 4f
1526:
153	msr	sctlr_el3, x0
154	b	7f
1555:
156	msr	sctlr_el2, x0
157	b	7f
1584:
159	msr	sctlr_el1, x0
160
1617:
162	dsb	sy
163	isb
164	b	__asm_invalidate_tlb_all
165	ret
166#endif
167
168/*-----------------------------------------------------------------------*/
169
170WEAK(apply_core_errata)
171
172	mov	x29, lr			/* Save LR */
173	/* For now, we support Cortex-A57 specific errata only */
174
175	/* Check if we are running on a Cortex-A57 core */
176	branch_if_a57_core x0, apply_a57_core_errata
1770:
178	mov	lr, x29			/* Restore LR */
179	ret
180
181apply_a57_core_errata:
182
183#ifdef CONFIG_ARM_ERRATA_828024
184	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
185	/* Disable non-allocate hint of w-b-n-a memory type */
186	orr	x0, x0, #1 << 49
187	/* Disable write streaming no L1-allocate threshold */
188	orr	x0, x0, #3 << 25
189	/* Disable write streaming no-allocate threshold */
190	orr	x0, x0, #3 << 27
191	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
192#endif
193
194#ifdef CONFIG_ARM_ERRATA_826974
195	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
196	/* Disable speculative load execution ahead of a DMB */
197	orr	x0, x0, #1 << 59
198	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
199#endif
200
201#ifdef CONFIG_ARM_ERRATA_833471
202	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
203	/* FPSCR write flush.
204	 * Note that in some cases where a flush is unnecessary this
205	    could impact performance. */
206	orr	x0, x0, #1 << 38
207	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
208#endif
209
210#ifdef CONFIG_ARM_ERRATA_829520
211	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
212	/* Disable Indirect Predictor bit will prevent this erratum
213	    from occurring
214	 * Note that in some cases where a flush is unnecessary this
215	    could impact performance. */
216	orr	x0, x0, #1 << 4
217	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
218#endif
219
220#ifdef CONFIG_ARM_ERRATA_833069
221	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
222	/* Disable Enable Invalidates of BTB bit */
223	and	x0, x0, #0xE
224	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
225#endif
226	b 0b
227ENDPROC(apply_core_errata)
228
229/*-----------------------------------------------------------------------*/
230
231WEAK(lowlevel_init)
232	mov	x29, lr			/* Save LR */
233
234#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
235	branch_if_slave x0, 1f
236	ldr	x0, =GICD_BASE
237	bl	gic_init_secure
2381:
239#if defined(CONFIG_GICV3)
240	ldr	x0, =GICR_BASE
241	bl	gic_init_secure_percpu
242#elif defined(CONFIG_GICV2)
243	ldr	x0, =GICD_BASE
244	ldr	x1, =GICC_BASE
245	bl	gic_init_secure_percpu
246#endif
247#endif
248
249#ifdef CONFIG_ARMV8_MULTIENTRY
250	branch_if_master x0, x1, 2f
251
252	/*
253	 * Slave should wait for master clearing spin table.
254	 * This sync prevent salves observing incorrect
255	 * value of spin table and jumping to wrong place.
256	 */
257#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
258#ifdef CONFIG_GICV2
259	ldr	x0, =GICC_BASE
260#endif
261	bl	gic_wait_for_interrupt
262#endif
263
264	/*
265	 * All slaves will enter EL2 and optionally EL1.
266	 */
267	adr	x4, lowlevel_in_el2
268	ldr	x5, =ES_TO_AARCH64
269	bl	armv8_switch_to_el2
270
271lowlevel_in_el2:
272#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
273	adr	x4, lowlevel_in_el1
274	ldr	x5, =ES_TO_AARCH64
275	bl	armv8_switch_to_el1
276
277lowlevel_in_el1:
278#endif
279
280#endif /* CONFIG_ARMV8_MULTIENTRY */
281
2822:
283	mov	lr, x29			/* Restore LR */
284	ret
285ENDPROC(lowlevel_init)
286
287WEAK(smp_kick_all_cpus)
288	/* Kick secondary cpus up by SGI 0 interrupt */
289#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
290	ldr	x0, =GICD_BASE
291	b	gic_kick_secondary_cpus
292#endif
293	ret
294ENDPROC(smp_kick_all_cpus)
295
296/*-----------------------------------------------------------------------*/
297
298ENTRY(c_runtime_cpu_setup)
299	/* Relocate vBAR */
300	adr	x0, vectors
301	switch_el x1, 3f, 2f, 1f
3023:	msr	vbar_el3, x0
303	b	0f
3042:	msr	vbar_el2, x0
305	b	0f
3061:	msr	vbar_el1, x0
3070:
308
309	ret
310ENDPROC(c_runtime_cpu_setup)
311
312WEAK(save_boot_params)
313	b	save_boot_params_ret	/* back to my caller */
314ENDPROC(save_boot_params)
315