1/* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8#include <asm-offsets.h> 9#include <config.h> 10#include <linux/linkage.h> 11#include <asm/macro.h> 12#include <asm/armv8/mmu.h> 13 14/************************************************************************* 15 * 16 * Startup Code (reset vector) 17 * 18 *************************************************************************/ 19 20.globl _start 21_start: 22#ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK 23/* 24 * Various SoCs need something special and SoC-specific up front in 25 * order to boot, allow them to set that in their boot0.h file and then 26 * use it here. 27 */ 28#include <asm/arch/boot0.h> 29#else 30 b reset 31#endif 32 33 .align 3 34 35.globl _TEXT_BASE 36_TEXT_BASE: 37 .quad CONFIG_SYS_TEXT_BASE 38 39/* 40 * These are defined in the linker script. 41 */ 42.globl _end_ofs 43_end_ofs: 44 .quad _end - _start 45 46.globl _bss_start_ofs 47_bss_start_ofs: 48 .quad __bss_start - _start 49 50.globl _bss_end_ofs 51_bss_end_ofs: 52 .quad __bss_end - _start 53 54reset: 55 /* Allow the board to save important registers */ 56 b save_boot_params 57.globl save_boot_params_ret 58save_boot_params_ret: 59 60#ifdef CONFIG_SYS_RESET_SCTRL 61 bl reset_sctrl 62#endif 63 /* 64 * Could be EL3/EL2/EL1, Initial State: 65 * Little Endian, MMU Disabled, i/dCache Disabled 66 */ 67 adr x0, vectors 68 switch_el x1, 3f, 2f, 1f 693: msr vbar_el3, x0 70 mrs x0, scr_el3 71 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ 72 msr scr_el3, x0 73 msr cptr_el3, xzr /* Enable FP/SIMD */ 74#ifdef COUNTER_FREQUENCY 75 ldr x0, =COUNTER_FREQUENCY 76 msr cntfrq_el0, x0 /* Initialize CNTFRQ */ 77#endif 78 b 0f 792: msr vbar_el2, x0 80 mov x0, #0x33ff 81 msr cptr_el2, x0 /* Enable FP/SIMD */ 82 b 0f 831: msr vbar_el1, x0 84 mov x0, #3 << 20 85 msr cpacr_el1, x0 /* Enable FP/SIMD */ 860: 87 88 /* Apply ARM core specific erratas */ 89 bl apply_core_errata 90 91 /* 92 * Cache/BPB/TLB Invalidate 93 * i-cache is invalidated before enabled in icache_enable() 94 * tlb is invalidated before mmu is enabled in dcache_enable() 95 * d-cache is invalidated before enabled in dcache_enable() 96 */ 97 98 /* Processor specific initialization */ 99 bl lowlevel_init 100 101#if CONFIG_IS_ENABLED(ARMV8_SPIN_TABLE) 102 branch_if_master x0, x1, master_cpu 103 b spin_table_secondary_jump 104 /* never return */ 105#elif defined(CONFIG_ARMV8_MULTIENTRY) 106 branch_if_master x0, x1, master_cpu 107 108 /* 109 * Slave CPUs 110 */ 111slave_cpu: 112 wfe 113 ldr x1, =CPU_RELEASE_ADDR 114 ldr x0, [x1] 115 cbz x0, slave_cpu 116 br x0 /* branch to the given address */ 117#endif /* CONFIG_ARMV8_MULTIENTRY */ 118master_cpu: 119 bl _main 120 121#ifdef CONFIG_SYS_RESET_SCTRL 122reset_sctrl: 123 switch_el x1, 3f, 2f, 1f 1243: 125 mrs x0, sctlr_el3 126 b 0f 1272: 128 mrs x0, sctlr_el2 129 b 0f 1301: 131 mrs x0, sctlr_el1 132 1330: 134 ldr x1, =0xfdfffffa 135 and x0, x0, x1 136 137 switch_el x1, 6f, 5f, 4f 1386: 139 msr sctlr_el3, x0 140 b 7f 1415: 142 msr sctlr_el2, x0 143 b 7f 1444: 145 msr sctlr_el1, x0 146 1477: 148 dsb sy 149 isb 150 b __asm_invalidate_tlb_all 151 ret 152#endif 153 154/*-----------------------------------------------------------------------*/ 155 156WEAK(apply_core_errata) 157 158 mov x29, lr /* Save LR */ 159 /* For now, we support Cortex-A57 specific errata only */ 160 161 /* Check if we are running on a Cortex-A57 core */ 162 branch_if_a57_core x0, apply_a57_core_errata 1630: 164 mov lr, x29 /* Restore LR */ 165 ret 166 167apply_a57_core_errata: 168 169#ifdef CONFIG_ARM_ERRATA_828024 170 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 171 /* Disable non-allocate hint of w-b-n-a memory type */ 172 orr x0, x0, #1 << 49 173 /* Disable write streaming no L1-allocate threshold */ 174 orr x0, x0, #3 << 25 175 /* Disable write streaming no-allocate threshold */ 176 orr x0, x0, #3 << 27 177 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 178#endif 179 180#ifdef CONFIG_ARM_ERRATA_826974 181 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 182 /* Disable speculative load execution ahead of a DMB */ 183 orr x0, x0, #1 << 59 184 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 185#endif 186 187#ifdef CONFIG_ARM_ERRATA_833471 188 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 189 /* FPSCR write flush. 190 * Note that in some cases where a flush is unnecessary this 191 could impact performance. */ 192 orr x0, x0, #1 << 38 193 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 194#endif 195 196#ifdef CONFIG_ARM_ERRATA_829520 197 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 198 /* Disable Indirect Predictor bit will prevent this erratum 199 from occurring 200 * Note that in some cases where a flush is unnecessary this 201 could impact performance. */ 202 orr x0, x0, #1 << 4 203 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 204#endif 205 206#ifdef CONFIG_ARM_ERRATA_833069 207 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 208 /* Disable Enable Invalidates of BTB bit */ 209 and x0, x0, #0xE 210 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 211#endif 212 b 0b 213ENDPROC(apply_core_errata) 214 215/*-----------------------------------------------------------------------*/ 216 217WEAK(lowlevel_init) 218 mov x29, lr /* Save LR */ 219 220#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 221 branch_if_slave x0, 1f 222 ldr x0, =GICD_BASE 223 bl gic_init_secure 2241: 225#if defined(CONFIG_GICV3) 226 ldr x0, =GICR_BASE 227 bl gic_init_secure_percpu 228#elif defined(CONFIG_GICV2) 229 ldr x0, =GICD_BASE 230 ldr x1, =GICC_BASE 231 bl gic_init_secure_percpu 232#endif 233#endif 234 235#ifdef CONFIG_ARMV8_MULTIENTRY 236 branch_if_master x0, x1, 2f 237 238 /* 239 * Slave should wait for master clearing spin table. 240 * This sync prevent salves observing incorrect 241 * value of spin table and jumping to wrong place. 242 */ 243#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 244#ifdef CONFIG_GICV2 245 ldr x0, =GICC_BASE 246#endif 247 bl gic_wait_for_interrupt 248#endif 249 250 /* 251 * All slaves will enter EL2 and optionally EL1. 252 */ 253 adr x3, lowlevel_in_el2 254 ldr x4, =ES_TO_AARCH64 255 bl armv8_switch_to_el2 256 257lowlevel_in_el2: 258#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 259 adr x3, lowlevel_in_el1 260 ldr x4, =ES_TO_AARCH64 261 bl armv8_switch_to_el1 262 263lowlevel_in_el1: 264#endif 265 266#endif /* CONFIG_ARMV8_MULTIENTRY */ 267 2682: 269 mov lr, x29 /* Restore LR */ 270 ret 271ENDPROC(lowlevel_init) 272 273WEAK(smp_kick_all_cpus) 274 /* Kick secondary cpus up by SGI 0 interrupt */ 275#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 276 ldr x0, =GICD_BASE 277 b gic_kick_secondary_cpus 278#endif 279 ret 280ENDPROC(smp_kick_all_cpus) 281 282/*-----------------------------------------------------------------------*/ 283 284ENTRY(c_runtime_cpu_setup) 285 /* Relocate vBAR */ 286 adr x0, vectors 287 switch_el x1, 3f, 2f, 1f 2883: msr vbar_el3, x0 289 b 0f 2902: msr vbar_el2, x0 291 b 0f 2921: msr vbar_el1, x0 2930: 294 295 ret 296ENDPROC(c_runtime_cpu_setup) 297 298WEAK(save_boot_params) 299 b save_boot_params_ret /* back to my caller */ 300ENDPROC(save_boot_params) 301