1/* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8#include <asm-offsets.h> 9#include <config.h> 10#include <linux/linkage.h> 11#include <asm/macro.h> 12#include <asm/armv8/mmu.h> 13 14/************************************************************************* 15 * 16 * Startup Code (reset vector) 17 * 18 *************************************************************************/ 19 20.globl _start 21_start: 22 b reset 23 24#ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK 25/* 26 * Various SoCs need something special and SoC-specific up front in 27 * order to boot, allow them to set that in their boot0.h file and then 28 * use it here. 29 */ 30#include <asm/arch/boot0.h> 31ARM_SOC_BOOT0_HOOK 32#endif 33 34 .align 3 35 36.globl _TEXT_BASE 37_TEXT_BASE: 38 .quad CONFIG_SYS_TEXT_BASE 39 40/* 41 * These are defined in the linker script. 42 */ 43.globl _end_ofs 44_end_ofs: 45 .quad _end - _start 46 47.globl _bss_start_ofs 48_bss_start_ofs: 49 .quad __bss_start - _start 50 51.globl _bss_end_ofs 52_bss_end_ofs: 53 .quad __bss_end - _start 54 55reset: 56#ifdef CONFIG_SYS_RESET_SCTRL 57 bl reset_sctrl 58#endif 59 /* 60 * Could be EL3/EL2/EL1, Initial State: 61 * Little Endian, MMU Disabled, i/dCache Disabled 62 */ 63 adr x0, vectors 64 switch_el x1, 3f, 2f, 1f 653: msr vbar_el3, x0 66 mrs x0, scr_el3 67 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ 68 msr scr_el3, x0 69 msr cptr_el3, xzr /* Enable FP/SIMD */ 70#ifdef COUNTER_FREQUENCY 71 ldr x0, =COUNTER_FREQUENCY 72 msr cntfrq_el0, x0 /* Initialize CNTFRQ */ 73#endif 74 b 0f 752: msr vbar_el2, x0 76 mov x0, #0x33ff 77 msr cptr_el2, x0 /* Enable FP/SIMD */ 78 b 0f 791: msr vbar_el1, x0 80 mov x0, #3 << 20 81 msr cpacr_el1, x0 /* Enable FP/SIMD */ 820: 83 84 /* Enalbe SMPEN bit for coherency. 85 * This register is not architectural but at the moment 86 * this bit should be set for A53/A57/A72. 87 */ 88 mrs x0, S3_1_c15_c2_1 /* cpuactlr_el1 */ 89 orr x0, x0, #0x40 90 msr S3_1_c15_c2_1, x0 91 92 /* Apply ARM core specific erratas */ 93 bl apply_core_errata 94 95 /* 96 * Cache/BPB/TLB Invalidate 97 * i-cache is invalidated before enabled in icache_enable() 98 * tlb is invalidated before mmu is enabled in dcache_enable() 99 * d-cache is invalidated before enabled in dcache_enable() 100 */ 101 102 /* Processor specific initialization */ 103 bl lowlevel_init 104 105#ifdef CONFIG_ARMV8_MULTIENTRY 106 branch_if_master x0, x1, master_cpu 107 108 /* 109 * Slave CPUs 110 */ 111slave_cpu: 112 wfe 113 ldr x1, =CPU_RELEASE_ADDR 114 ldr x0, [x1] 115 cbz x0, slave_cpu 116 br x0 /* branch to the given address */ 117master_cpu: 118 /* On the master CPU */ 119#endif /* CONFIG_ARMV8_MULTIENTRY */ 120 121 bl _main 122 123#ifdef CONFIG_SYS_RESET_SCTRL 124reset_sctrl: 125 switch_el x1, 3f, 2f, 1f 1263: 127 mrs x0, sctlr_el3 128 b 0f 1292: 130 mrs x0, sctlr_el2 131 b 0f 1321: 133 mrs x0, sctlr_el1 134 1350: 136 ldr x1, =0xfdfffffa 137 and x0, x0, x1 138 139 switch_el x1, 6f, 5f, 4f 1406: 141 msr sctlr_el3, x0 142 b 7f 1435: 144 msr sctlr_el2, x0 145 b 7f 1464: 147 msr sctlr_el1, x0 148 1497: 150 dsb sy 151 isb 152 b __asm_invalidate_tlb_all 153 ret 154#endif 155 156/*-----------------------------------------------------------------------*/ 157 158WEAK(apply_core_errata) 159 160 mov x29, lr /* Save LR */ 161 /* For now, we support Cortex-A57 specific errata only */ 162 163 /* Check if we are running on a Cortex-A57 core */ 164 branch_if_a57_core x0, apply_a57_core_errata 1650: 166 mov lr, x29 /* Restore LR */ 167 ret 168 169apply_a57_core_errata: 170 171#ifdef CONFIG_ARM_ERRATA_828024 172 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 173 /* Disable non-allocate hint of w-b-n-a memory type */ 174 orr x0, x0, #1 << 49 175 /* Disable write streaming no L1-allocate threshold */ 176 orr x0, x0, #3 << 25 177 /* Disable write streaming no-allocate threshold */ 178 orr x0, x0, #3 << 27 179 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 180#endif 181 182#ifdef CONFIG_ARM_ERRATA_826974 183 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 184 /* Disable speculative load execution ahead of a DMB */ 185 orr x0, x0, #1 << 59 186 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 187#endif 188 189#ifdef CONFIG_ARM_ERRATA_833471 190 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 191 /* FPSCR write flush. 192 * Note that in some cases where a flush is unnecessary this 193 could impact performance. */ 194 orr x0, x0, #1 << 38 195 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 196#endif 197 198#ifdef CONFIG_ARM_ERRATA_829520 199 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 200 /* Disable Indirect Predictor bit will prevent this erratum 201 from occurring 202 * Note that in some cases where a flush is unnecessary this 203 could impact performance. */ 204 orr x0, x0, #1 << 4 205 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 206#endif 207 208#ifdef CONFIG_ARM_ERRATA_833069 209 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 210 /* Disable Enable Invalidates of BTB bit */ 211 and x0, x0, #0xE 212 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 213#endif 214 b 0b 215ENDPROC(apply_core_errata) 216 217/*-----------------------------------------------------------------------*/ 218 219WEAK(lowlevel_init) 220 mov x29, lr /* Save LR */ 221 222#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 223 branch_if_slave x0, 1f 224 ldr x0, =GICD_BASE 225 bl gic_init_secure 2261: 227#if defined(CONFIG_GICV3) 228 ldr x0, =GICR_BASE 229 bl gic_init_secure_percpu 230#elif defined(CONFIG_GICV2) 231 ldr x0, =GICD_BASE 232 ldr x1, =GICC_BASE 233 bl gic_init_secure_percpu 234#endif 235#endif 236 237#ifdef CONFIG_ARMV8_MULTIENTRY 238 branch_if_master x0, x1, 2f 239 240 /* 241 * Slave should wait for master clearing spin table. 242 * This sync prevent salves observing incorrect 243 * value of spin table and jumping to wrong place. 244 */ 245#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 246#ifdef CONFIG_GICV2 247 ldr x0, =GICC_BASE 248#endif 249 bl gic_wait_for_interrupt 250#endif 251 252 /* 253 * All slaves will enter EL2 and optionally EL1. 254 */ 255 bl armv8_switch_to_el2 256#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 257 bl armv8_switch_to_el1 258#endif 259 260#endif /* CONFIG_ARMV8_MULTIENTRY */ 261 2622: 263 mov lr, x29 /* Restore LR */ 264 ret 265ENDPROC(lowlevel_init) 266 267WEAK(smp_kick_all_cpus) 268 /* Kick secondary cpus up by SGI 0 interrupt */ 269#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 270 ldr x0, =GICD_BASE 271 b gic_kick_secondary_cpus 272#endif 273 ret 274ENDPROC(smp_kick_all_cpus) 275 276/*-----------------------------------------------------------------------*/ 277 278ENTRY(c_runtime_cpu_setup) 279 /* Relocate vBAR */ 280 adr x0, vectors 281 switch_el x1, 3f, 2f, 1f 2823: msr vbar_el3, x0 283 b 0f 2842: msr vbar_el2, x0 285 b 0f 2861: msr vbar_el1, x0 2870: 288 289 ret 290ENDPROC(c_runtime_cpu_setup) 291