1/* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8#include <asm-offsets.h> 9#include <config.h> 10#include <linux/linkage.h> 11#include <asm/macro.h> 12#include <asm/armv8/mmu.h> 13 14/************************************************************************* 15 * 16 * Startup Code (reset vector) 17 * 18 *************************************************************************/ 19 20.globl _start 21_start: 22#ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK 23/* 24 * Various SoCs need something special and SoC-specific up front in 25 * order to boot, allow them to set that in their boot0.h file and then 26 * use it here. 27 */ 28#include <asm/arch/boot0.h> 29#else 30 b reset 31#endif 32 33 .align 3 34 35.globl _TEXT_BASE 36_TEXT_BASE: 37 .quad CONFIG_SYS_TEXT_BASE 38 39/* 40 * These are defined in the linker script. 41 */ 42.globl _end_ofs 43_end_ofs: 44 .quad _end - _start 45 46.globl _bss_start_ofs 47_bss_start_ofs: 48 .quad __bss_start - _start 49 50.globl _bss_end_ofs 51_bss_end_ofs: 52 .quad __bss_end - _start 53 54reset: 55 /* Allow the board to save important registers */ 56 b save_boot_params 57.globl save_boot_params_ret 58save_boot_params_ret: 59 60#ifdef CONFIG_SYS_RESET_SCTRL 61 bl reset_sctrl 62#endif 63 /* 64 * Could be EL3/EL2/EL1, Initial State: 65 * Little Endian, MMU Disabled, i/dCache Disabled 66 */ 67 adr x0, vectors 68 switch_el x1, 3f, 2f, 1f 693: msr vbar_el3, x0 70 mrs x0, scr_el3 71 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ 72 msr scr_el3, x0 73 msr cptr_el3, xzr /* Enable FP/SIMD */ 74#ifdef COUNTER_FREQUENCY 75 ldr x0, =COUNTER_FREQUENCY 76 msr cntfrq_el0, x0 /* Initialize CNTFRQ */ 77#endif 78 b 0f 792: msr vbar_el2, x0 80 mov x0, #0x33ff 81 msr cptr_el2, x0 /* Enable FP/SIMD */ 82 b 0f 831: msr vbar_el1, x0 84 mov x0, #3 << 20 85 msr cpacr_el1, x0 /* Enable FP/SIMD */ 860: 87 88 /* 89 * Enalbe SMPEN bit for coherency. 90 * This register is not architectural but at the moment 91 * this bit should be set for A53/A57/A72. 92 */ 93#ifdef CONFIG_ARMV8_SET_SMPEN 94 mrs x0, S3_1_c15_c2_1 /* cpuactlr_el1 */ 95 orr x0, x0, #0x40 96 msr S3_1_c15_c2_1, x0 97#endif 98 99 /* Apply ARM core specific erratas */ 100 bl apply_core_errata 101 102 /* 103 * Cache/BPB/TLB Invalidate 104 * i-cache is invalidated before enabled in icache_enable() 105 * tlb is invalidated before mmu is enabled in dcache_enable() 106 * d-cache is invalidated before enabled in dcache_enable() 107 */ 108 109 /* Processor specific initialization */ 110 bl lowlevel_init 111 112#if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD) 113 branch_if_master x0, x1, master_cpu 114 b spin_table_secondary_jump 115 /* never return */ 116#elif defined(CONFIG_ARMV8_MULTIENTRY) 117 branch_if_master x0, x1, master_cpu 118 119 /* 120 * Slave CPUs 121 */ 122slave_cpu: 123 wfe 124 ldr x1, =CPU_RELEASE_ADDR 125 ldr x0, [x1] 126 cbz x0, slave_cpu 127 br x0 /* branch to the given address */ 128#endif /* CONFIG_ARMV8_MULTIENTRY */ 129master_cpu: 130 bl _main 131 132#ifdef CONFIG_SYS_RESET_SCTRL 133reset_sctrl: 134 switch_el x1, 3f, 2f, 1f 1353: 136 mrs x0, sctlr_el3 137 b 0f 1382: 139 mrs x0, sctlr_el2 140 b 0f 1411: 142 mrs x0, sctlr_el1 143 1440: 145 ldr x1, =0xfdfffffa 146 and x0, x0, x1 147 148 switch_el x1, 6f, 5f, 4f 1496: 150 msr sctlr_el3, x0 151 b 7f 1525: 153 msr sctlr_el2, x0 154 b 7f 1554: 156 msr sctlr_el1, x0 157 1587: 159 dsb sy 160 isb 161 b __asm_invalidate_tlb_all 162 ret 163#endif 164 165/*-----------------------------------------------------------------------*/ 166 167WEAK(apply_core_errata) 168 169 mov x29, lr /* Save LR */ 170 /* For now, we support Cortex-A57 specific errata only */ 171 172 /* Check if we are running on a Cortex-A57 core */ 173 branch_if_a57_core x0, apply_a57_core_errata 1740: 175 mov lr, x29 /* Restore LR */ 176 ret 177 178apply_a57_core_errata: 179 180#ifdef CONFIG_ARM_ERRATA_828024 181 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 182 /* Disable non-allocate hint of w-b-n-a memory type */ 183 orr x0, x0, #1 << 49 184 /* Disable write streaming no L1-allocate threshold */ 185 orr x0, x0, #3 << 25 186 /* Disable write streaming no-allocate threshold */ 187 orr x0, x0, #3 << 27 188 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 189#endif 190 191#ifdef CONFIG_ARM_ERRATA_826974 192 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 193 /* Disable speculative load execution ahead of a DMB */ 194 orr x0, x0, #1 << 59 195 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 196#endif 197 198#ifdef CONFIG_ARM_ERRATA_833471 199 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 200 /* FPSCR write flush. 201 * Note that in some cases where a flush is unnecessary this 202 could impact performance. */ 203 orr x0, x0, #1 << 38 204 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 205#endif 206 207#ifdef CONFIG_ARM_ERRATA_829520 208 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 209 /* Disable Indirect Predictor bit will prevent this erratum 210 from occurring 211 * Note that in some cases where a flush is unnecessary this 212 could impact performance. */ 213 orr x0, x0, #1 << 4 214 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 215#endif 216 217#ifdef CONFIG_ARM_ERRATA_833069 218 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 219 /* Disable Enable Invalidates of BTB bit */ 220 and x0, x0, #0xE 221 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 222#endif 223 b 0b 224ENDPROC(apply_core_errata) 225 226/*-----------------------------------------------------------------------*/ 227 228WEAK(lowlevel_init) 229 mov x29, lr /* Save LR */ 230 231#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 232 branch_if_slave x0, 1f 233 ldr x0, =GICD_BASE 234 bl gic_init_secure 2351: 236#if defined(CONFIG_GICV3) 237 ldr x0, =GICR_BASE 238 bl gic_init_secure_percpu 239#elif defined(CONFIG_GICV2) 240 ldr x0, =GICD_BASE 241 ldr x1, =GICC_BASE 242 bl gic_init_secure_percpu 243#endif 244#endif 245 246#ifdef CONFIG_ARMV8_MULTIENTRY 247 branch_if_master x0, x1, 2f 248 249 /* 250 * Slave should wait for master clearing spin table. 251 * This sync prevent salves observing incorrect 252 * value of spin table and jumping to wrong place. 253 */ 254#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 255#ifdef CONFIG_GICV2 256 ldr x0, =GICC_BASE 257#endif 258 bl gic_wait_for_interrupt 259#endif 260 261 /* 262 * All slaves will enter EL2 and optionally EL1. 263 */ 264 adr x4, lowlevel_in_el2 265 ldr x5, =ES_TO_AARCH64 266 bl armv8_switch_to_el2 267 268lowlevel_in_el2: 269#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 270 adr x4, lowlevel_in_el1 271 ldr x5, =ES_TO_AARCH64 272 bl armv8_switch_to_el1 273 274lowlevel_in_el1: 275#endif 276 277#endif /* CONFIG_ARMV8_MULTIENTRY */ 278 2792: 280 mov lr, x29 /* Restore LR */ 281 ret 282ENDPROC(lowlevel_init) 283 284WEAK(smp_kick_all_cpus) 285 /* Kick secondary cpus up by SGI 0 interrupt */ 286#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 287 ldr x0, =GICD_BASE 288 b gic_kick_secondary_cpus 289#endif 290 ret 291ENDPROC(smp_kick_all_cpus) 292 293/*-----------------------------------------------------------------------*/ 294 295ENTRY(c_runtime_cpu_setup) 296 /* Relocate vBAR */ 297 adr x0, vectors 298 switch_el x1, 3f, 2f, 1f 2993: msr vbar_el3, x0 300 b 0f 3012: msr vbar_el2, x0 302 b 0f 3031: msr vbar_el1, x0 3040: 305 306 ret 307ENDPROC(c_runtime_cpu_setup) 308 309WEAK(save_boot_params) 310 b save_boot_params_ret /* back to my caller */ 311ENDPROC(save_boot_params) 312