1/* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8#include <asm-offsets.h> 9#include <config.h> 10#include <linux/linkage.h> 11#include <asm/macro.h> 12#include <asm/armv8/mmu.h> 13 14/************************************************************************* 15 * 16 * Startup Code (reset vector) 17 * 18 *************************************************************************/ 19 20.globl _start 21_start: 22 b reset 23 24#ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK 25/* 26 * Various SoCs need something special and SoC-specific up front in 27 * order to boot, allow them to set that in their boot0.h file and then 28 * use it here. 29 */ 30#include <asm/arch/boot0.h> 31ARM_SOC_BOOT0_HOOK 32#endif 33 34 .align 3 35 36.globl _TEXT_BASE 37_TEXT_BASE: 38 .quad CONFIG_SYS_TEXT_BASE 39 40/* 41 * These are defined in the linker script. 42 */ 43.globl _end_ofs 44_end_ofs: 45 .quad _end - _start 46 47.globl _bss_start_ofs 48_bss_start_ofs: 49 .quad __bss_start - _start 50 51.globl _bss_end_ofs 52_bss_end_ofs: 53 .quad __bss_end - _start 54 55reset: 56 /* Allow the board to save important registers */ 57 b save_boot_params 58.globl save_boot_params_ret 59save_boot_params_ret: 60 61#ifdef CONFIG_SYS_RESET_SCTRL 62 bl reset_sctrl 63#endif 64 /* 65 * Could be EL3/EL2/EL1, Initial State: 66 * Little Endian, MMU Disabled, i/dCache Disabled 67 */ 68 adr x0, vectors 69 switch_el x1, 3f, 2f, 1f 703: msr vbar_el3, x0 71 mrs x0, scr_el3 72 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ 73 msr scr_el3, x0 74 msr cptr_el3, xzr /* Enable FP/SIMD */ 75#ifdef COUNTER_FREQUENCY 76 ldr x0, =COUNTER_FREQUENCY 77 msr cntfrq_el0, x0 /* Initialize CNTFRQ */ 78#endif 79 b 0f 802: msr vbar_el2, x0 81 mov x0, #0x33ff 82 msr cptr_el2, x0 /* Enable FP/SIMD */ 83 b 0f 841: msr vbar_el1, x0 85 mov x0, #3 << 20 86 msr cpacr_el1, x0 /* Enable FP/SIMD */ 870: 88 89 /* 90 * Enalbe SMPEN bit for coherency. 91 * This register is not architectural but at the moment 92 * this bit should be set for A53/A57/A72. 93 */ 94#ifdef CONFIG_ARMV8_SET_SMPEN 95 mrs x0, S3_1_c15_c2_1 /* cpuactlr_el1 */ 96 orr x0, x0, #0x40 97 msr S3_1_c15_c2_1, x0 98#endif 99 100 /* Apply ARM core specific erratas */ 101 bl apply_core_errata 102 103 /* 104 * Cache/BPB/TLB Invalidate 105 * i-cache is invalidated before enabled in icache_enable() 106 * tlb is invalidated before mmu is enabled in dcache_enable() 107 * d-cache is invalidated before enabled in dcache_enable() 108 */ 109 110 /* Processor specific initialization */ 111 bl lowlevel_init 112 113#if CONFIG_IS_ENABLED(ARMV8_SPIN_TABLE) 114 branch_if_master x0, x1, master_cpu 115 b spin_table_secondary_jump 116 /* never return */ 117#elif defined(CONFIG_ARMV8_MULTIENTRY) 118 branch_if_master x0, x1, master_cpu 119 120 /* 121 * Slave CPUs 122 */ 123slave_cpu: 124 wfe 125 ldr x1, =CPU_RELEASE_ADDR 126 ldr x0, [x1] 127 cbz x0, slave_cpu 128 br x0 /* branch to the given address */ 129#endif /* CONFIG_ARMV8_MULTIENTRY */ 130master_cpu: 131 bl _main 132 133#ifdef CONFIG_SYS_RESET_SCTRL 134reset_sctrl: 135 switch_el x1, 3f, 2f, 1f 1363: 137 mrs x0, sctlr_el3 138 b 0f 1392: 140 mrs x0, sctlr_el2 141 b 0f 1421: 143 mrs x0, sctlr_el1 144 1450: 146 ldr x1, =0xfdfffffa 147 and x0, x0, x1 148 149 switch_el x1, 6f, 5f, 4f 1506: 151 msr sctlr_el3, x0 152 b 7f 1535: 154 msr sctlr_el2, x0 155 b 7f 1564: 157 msr sctlr_el1, x0 158 1597: 160 dsb sy 161 isb 162 b __asm_invalidate_tlb_all 163 ret 164#endif 165 166/*-----------------------------------------------------------------------*/ 167 168WEAK(apply_core_errata) 169 170 mov x29, lr /* Save LR */ 171 /* For now, we support Cortex-A57 specific errata only */ 172 173 /* Check if we are running on a Cortex-A57 core */ 174 branch_if_a57_core x0, apply_a57_core_errata 1750: 176 mov lr, x29 /* Restore LR */ 177 ret 178 179apply_a57_core_errata: 180 181#ifdef CONFIG_ARM_ERRATA_828024 182 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 183 /* Disable non-allocate hint of w-b-n-a memory type */ 184 orr x0, x0, #1 << 49 185 /* Disable write streaming no L1-allocate threshold */ 186 orr x0, x0, #3 << 25 187 /* Disable write streaming no-allocate threshold */ 188 orr x0, x0, #3 << 27 189 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 190#endif 191 192#ifdef CONFIG_ARM_ERRATA_826974 193 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 194 /* Disable speculative load execution ahead of a DMB */ 195 orr x0, x0, #1 << 59 196 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 197#endif 198 199#ifdef CONFIG_ARM_ERRATA_833471 200 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 201 /* FPSCR write flush. 202 * Note that in some cases where a flush is unnecessary this 203 could impact performance. */ 204 orr x0, x0, #1 << 38 205 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 206#endif 207 208#ifdef CONFIG_ARM_ERRATA_829520 209 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 210 /* Disable Indirect Predictor bit will prevent this erratum 211 from occurring 212 * Note that in some cases where a flush is unnecessary this 213 could impact performance. */ 214 orr x0, x0, #1 << 4 215 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 216#endif 217 218#ifdef CONFIG_ARM_ERRATA_833069 219 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 220 /* Disable Enable Invalidates of BTB bit */ 221 and x0, x0, #0xE 222 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 223#endif 224 b 0b 225ENDPROC(apply_core_errata) 226 227/*-----------------------------------------------------------------------*/ 228 229WEAK(lowlevel_init) 230 mov x29, lr /* Save LR */ 231 232#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 233 branch_if_slave x0, 1f 234 ldr x0, =GICD_BASE 235 bl gic_init_secure 2361: 237#if defined(CONFIG_GICV3) 238 ldr x0, =GICR_BASE 239 bl gic_init_secure_percpu 240#elif defined(CONFIG_GICV2) 241 ldr x0, =GICD_BASE 242 ldr x1, =GICC_BASE 243 bl gic_init_secure_percpu 244#endif 245#endif 246 247#ifdef CONFIG_ARMV8_MULTIENTRY 248 branch_if_master x0, x1, 2f 249 250 /* 251 * Slave should wait for master clearing spin table. 252 * This sync prevent salves observing incorrect 253 * value of spin table and jumping to wrong place. 254 */ 255#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 256#ifdef CONFIG_GICV2 257 ldr x0, =GICC_BASE 258#endif 259 bl gic_wait_for_interrupt 260#endif 261 262 /* 263 * All slaves will enter EL2 and optionally EL1. 264 */ 265 adr x3, lowlevel_in_el2 266 ldr x4, =ES_TO_AARCH64 267 bl armv8_switch_to_el2 268 269lowlevel_in_el2: 270#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 271 adr x3, lowlevel_in_el1 272 ldr x4, =ES_TO_AARCH64 273 bl armv8_switch_to_el1 274 275lowlevel_in_el1: 276#endif 277 278#endif /* CONFIG_ARMV8_MULTIENTRY */ 279 2802: 281 mov lr, x29 /* Restore LR */ 282 ret 283ENDPROC(lowlevel_init) 284 285WEAK(smp_kick_all_cpus) 286 /* Kick secondary cpus up by SGI 0 interrupt */ 287#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 288 ldr x0, =GICD_BASE 289 b gic_kick_secondary_cpus 290#endif 291 ret 292ENDPROC(smp_kick_all_cpus) 293 294/*-----------------------------------------------------------------------*/ 295 296ENTRY(c_runtime_cpu_setup) 297 /* Relocate vBAR */ 298 adr x0, vectors 299 switch_el x1, 3f, 2f, 1f 3003: msr vbar_el3, x0 301 b 0f 3022: msr vbar_el2, x0 303 b 0f 3041: msr vbar_el1, x0 3050: 306 307 ret 308ENDPROC(c_runtime_cpu_setup) 309 310WEAK(save_boot_params) 311 b save_boot_params_ret /* back to my caller */ 312ENDPROC(save_boot_params) 313