xref: /openbmc/u-boot/arch/arm/cpu/armv8/start.S (revision 4a34e4b8)
1/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * SPDX-License-Identifier:	GPL-2.0+
6 */
7
8#include <asm-offsets.h>
9#include <config.h>
10#include <linux/linkage.h>
11#include <asm/macro.h>
12#include <asm/armv8/mmu.h>
13
14/*************************************************************************
15 *
16 * Startup Code (reset vector)
17 *
18 *************************************************************************/
19
20.globl	_start
21_start:
22	b	reset
23
24	.align 3
25
26.globl	_TEXT_BASE
27_TEXT_BASE:
28	.quad	CONFIG_SYS_TEXT_BASE
29
30/*
31 * These are defined in the linker script.
32 */
33.globl	_end_ofs
34_end_ofs:
35	.quad	_end - _start
36
37.globl	_bss_start_ofs
38_bss_start_ofs:
39	.quad	__bss_start - _start
40
41.globl	_bss_end_ofs
42_bss_end_ofs:
43	.quad	__bss_end - _start
44
45reset:
46	/*
47	 * Could be EL3/EL2/EL1, Initial State:
48	 * Little Endian, MMU Disabled, i/dCache Disabled
49	 */
50	adr	x0, vectors
51	switch_el x1, 3f, 2f, 1f
523:	msr	vbar_el3, x0
53	mrs	x0, scr_el3
54	orr	x0, x0, #0xf			/* SCR_EL3.NS|IRQ|FIQ|EA */
55	msr	scr_el3, x0
56	msr	cptr_el3, xzr			/* Enable FP/SIMD */
57	ldr	x0, =COUNTER_FREQUENCY
58	msr	cntfrq_el0, x0			/* Initialize CNTFRQ */
59	b	0f
602:	msr	vbar_el2, x0
61	mov	x0, #0x33ff
62	msr	cptr_el2, x0			/* Enable FP/SIMD */
63	b	0f
641:	msr	vbar_el1, x0
65	mov	x0, #3 << 20
66	msr	cpacr_el1, x0			/* Enable FP/SIMD */
670:
68
69	/* Apply ARM core specific erratas */
70	bl	apply_core_errata
71
72	/*
73	 * Cache/BPB/TLB Invalidate
74	 * i-cache is invalidated before enabled in icache_enable()
75	 * tlb is invalidated before mmu is enabled in dcache_enable()
76	 * d-cache is invalidated before enabled in dcache_enable()
77	 */
78
79	/* Processor specific initialization */
80	bl	lowlevel_init
81
82#ifdef CONFIG_ARMV8_MULTIENTRY
83	branch_if_master x0, x1, master_cpu
84
85	/*
86	 * Slave CPUs
87	 */
88slave_cpu:
89	wfe
90	ldr	x1, =CPU_RELEASE_ADDR
91	ldr	x0, [x1]
92	cbz	x0, slave_cpu
93	br	x0			/* branch to the given address */
94master_cpu:
95	/* On the master CPU */
96#endif /* CONFIG_ARMV8_MULTIENTRY */
97
98	bl	_main
99
100/*-----------------------------------------------------------------------*/
101
102WEAK(apply_core_errata)
103
104	mov	x29, lr			/* Save LR */
105	/* For now, we support Cortex-A57 specific errata only */
106
107	/* Check if we are running on a Cortex-A57 core */
108	branch_if_a57_core x0, apply_a57_core_errata
1090:
110	mov	lr, x29			/* Restore LR */
111	ret
112
113apply_a57_core_errata:
114
115#ifdef CONFIG_ARM_ERRATA_828024
116	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
117	/* Disable non-allocate hint of w-b-n-a memory type */
118	mov	x0, #0x1 << 49
119	/* Disable write streaming no L1-allocate threshold */
120	mov	x0, #0x3 << 25
121	/* Disable write streaming no-allocate threshold */
122	mov	x0, #0x3 << 27
123	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
124#endif
125
126#ifdef CONFIG_ARM_ERRATA_826974
127	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
128	/* Disable speculative load execution ahead of a DMB */
129	mov	x0, #0x1 << 59
130	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
131#endif
132
133#ifdef CONFIG_ARM_ERRATA_833069
134	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
135	/* Disable Enable Invalidates of BTB bit */
136	and	x0, x0, #0xE
137	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
138#endif
139	b 0b
140ENDPROC(apply_core_errata)
141
142/*-----------------------------------------------------------------------*/
143
144WEAK(lowlevel_init)
145	mov	x29, lr			/* Save LR */
146
147#ifndef CONFIG_ARMV8_MULTIENTRY
148	/*
149	 * For single-entry systems the lowlevel init is very simple.
150	 */
151	ldr	x0, =GICD_BASE
152	bl	gic_init_secure
153
154#else /* CONFIG_ARMV8_MULTIENTRY is set */
155
156#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
157	branch_if_slave x0, 1f
158	ldr	x0, =GICD_BASE
159	bl	gic_init_secure
1601:
161#if defined(CONFIG_GICV3)
162	ldr	x0, =GICR_BASE
163	bl	gic_init_secure_percpu
164#elif defined(CONFIG_GICV2)
165	ldr	x0, =GICD_BASE
166	ldr	x1, =GICC_BASE
167	bl	gic_init_secure_percpu
168#endif
169#endif
170
171	branch_if_master x0, x1, 2f
172
173	/*
174	 * Slave should wait for master clearing spin table.
175	 * This sync prevent salves observing incorrect
176	 * value of spin table and jumping to wrong place.
177	 */
178#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
179#ifdef CONFIG_GICV2
180	ldr	x0, =GICC_BASE
181#endif
182	bl	gic_wait_for_interrupt
183#endif
184
185	/*
186	 * All slaves will enter EL2 and optionally EL1.
187	 */
188	bl	armv8_switch_to_el2
189#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
190	bl	armv8_switch_to_el1
191#endif
192
193#endif /* CONFIG_ARMV8_MULTIENTRY */
194
1952:
196	mov	lr, x29			/* Restore LR */
197	ret
198ENDPROC(lowlevel_init)
199
200WEAK(smp_kick_all_cpus)
201	/* Kick secondary cpus up by SGI 0 interrupt */
202	mov	x29, lr			/* Save LR */
203#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
204	ldr	x0, =GICD_BASE
205	bl	gic_kick_secondary_cpus
206#endif
207	mov	lr, x29			/* Restore LR */
208	ret
209ENDPROC(smp_kick_all_cpus)
210
211/*-----------------------------------------------------------------------*/
212
213ENTRY(c_runtime_cpu_setup)
214	/* Relocate vBAR */
215	adr	x0, vectors
216	switch_el x1, 3f, 2f, 1f
2173:	msr	vbar_el3, x0
218	b	0f
2192:	msr	vbar_el2, x0
220	b	0f
2211:	msr	vbar_el1, x0
2220:
223
224	ret
225ENDPROC(c_runtime_cpu_setup)
226