1/* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8#include <asm-offsets.h> 9#include <config.h> 10#include <version.h> 11#include <linux/linkage.h> 12#include <asm/macro.h> 13#include <asm/armv8/mmu.h> 14 15/************************************************************************* 16 * 17 * Startup Code (reset vector) 18 * 19 *************************************************************************/ 20 21.globl _start 22_start: 23 b reset 24 25 .align 3 26 27.globl _TEXT_BASE 28_TEXT_BASE: 29 .quad CONFIG_SYS_TEXT_BASE 30 31/* 32 * These are defined in the linker script. 33 */ 34.globl _end_ofs 35_end_ofs: 36 .quad _end - _start 37 38.globl _bss_start_ofs 39_bss_start_ofs: 40 .quad __bss_start - _start 41 42.globl _bss_end_ofs 43_bss_end_ofs: 44 .quad __bss_end - _start 45 46reset: 47 /* 48 * Could be EL3/EL2/EL1, Initial State: 49 * Little Endian, MMU Disabled, i/dCache Disabled 50 */ 51 adr x0, vectors 52 switch_el x1, 3f, 2f, 1f 533: msr vbar_el3, x0 54 mrs x0, scr_el3 55 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ 56 msr scr_el3, x0 57 msr cptr_el3, xzr /* Enable FP/SIMD */ 58 ldr x0, =COUNTER_FREQUENCY 59 msr cntfrq_el0, x0 /* Initialize CNTFRQ */ 60 b 0f 612: msr vbar_el2, x0 62 mov x0, #0x33ff 63 msr cptr_el2, x0 /* Enable FP/SIMD */ 64 b 0f 651: msr vbar_el1, x0 66 mov x0, #3 << 20 67 msr cpacr_el1, x0 /* Enable FP/SIMD */ 680: 69 70 /* Apply ARM core specific erratas */ 71 bl apply_core_errata 72 73 /* 74 * Cache/BPB/TLB Invalidate 75 * i-cache is invalidated before enabled in icache_enable() 76 * tlb is invalidated before mmu is enabled in dcache_enable() 77 * d-cache is invalidated before enabled in dcache_enable() 78 */ 79 80 /* Processor specific initialization */ 81 bl lowlevel_init 82 83#ifdef CONFIG_ARMV8_MULTIENTRY 84 branch_if_master x0, x1, master_cpu 85 86 /* 87 * Slave CPUs 88 */ 89slave_cpu: 90 wfe 91 ldr x1, =CPU_RELEASE_ADDR 92 ldr x0, [x1] 93 cbz x0, slave_cpu 94 br x0 /* branch to the given address */ 95master_cpu: 96 /* On the master CPU */ 97#endif /* CONFIG_ARMV8_MULTIENTRY */ 98 99 bl _main 100 101/*-----------------------------------------------------------------------*/ 102 103WEAK(apply_core_errata) 104 105 mov x29, lr /* Save LR */ 106 /* For now, we support Cortex-A57 specific errata only */ 107 108 /* Check if we are running on a Cortex-A57 core */ 109 branch_if_a57_core x0, apply_a57_core_errata 1100: 111 mov lr, x29 /* Restore LR */ 112 ret 113 114apply_a57_core_errata: 115 116#ifdef CONFIG_ARM_ERRATA_828024 117 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 118 /* Disable non-allocate hint of w-b-n-a memory type */ 119 mov x0, #0x1 << 49 120 /* Disable write streaming no L1-allocate threshold */ 121 mov x0, #0x3 << 25 122 /* Disable write streaming no-allocate threshold */ 123 mov x0, #0x3 << 27 124 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 125#endif 126 127#ifdef CONFIG_ARM_ERRATA_826974 128 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 129 /* Disable speculative load execution ahead of a DMB */ 130 mov x0, #0x1 << 59 131 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 132#endif 133 134#ifdef CONFIG_ARM_ERRATA_833069 135 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 136 /* Disable Enable Invalidates of BTB bit */ 137 and x0, x0, #0xE 138 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 139#endif 140 b 0b 141ENDPROC(apply_core_errata) 142 143/*-----------------------------------------------------------------------*/ 144 145WEAK(lowlevel_init) 146 mov x29, lr /* Save LR */ 147 148#ifndef CONFIG_ARMV8_MULTIENTRY 149 /* 150 * For single-entry systems the lowlevel init is very simple. 151 */ 152 ldr x0, =GICD_BASE 153 bl gic_init_secure 154 155#else /* CONFIG_ARMV8_MULTIENTRY is set */ 156 157#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 158 branch_if_slave x0, 1f 159 ldr x0, =GICD_BASE 160 bl gic_init_secure 1611: 162#if defined(CONFIG_GICV3) 163 ldr x0, =GICR_BASE 164 bl gic_init_secure_percpu 165#elif defined(CONFIG_GICV2) 166 ldr x0, =GICD_BASE 167 ldr x1, =GICC_BASE 168 bl gic_init_secure_percpu 169#endif 170#endif 171 172 branch_if_master x0, x1, 2f 173 174 /* 175 * Slave should wait for master clearing spin table. 176 * This sync prevent salves observing incorrect 177 * value of spin table and jumping to wrong place. 178 */ 179#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 180#ifdef CONFIG_GICV2 181 ldr x0, =GICC_BASE 182#endif 183 bl gic_wait_for_interrupt 184#endif 185 186 /* 187 * All slaves will enter EL2 and optionally EL1. 188 */ 189 bl armv8_switch_to_el2 190#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 191 bl armv8_switch_to_el1 192#endif 193 194#endif /* CONFIG_ARMV8_MULTIENTRY */ 195 1962: 197 mov lr, x29 /* Restore LR */ 198 ret 199ENDPROC(lowlevel_init) 200 201WEAK(smp_kick_all_cpus) 202 /* Kick secondary cpus up by SGI 0 interrupt */ 203 mov x29, lr /* Save LR */ 204#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 205 ldr x0, =GICD_BASE 206 bl gic_kick_secondary_cpus 207#endif 208 mov lr, x29 /* Restore LR */ 209 ret 210ENDPROC(smp_kick_all_cpus) 211 212/*-----------------------------------------------------------------------*/ 213 214ENTRY(c_runtime_cpu_setup) 215 /* Relocate vBAR */ 216 adr x0, vectors 217 switch_el x1, 3f, 2f, 1f 2183: msr vbar_el3, x0 219 b 0f 2202: msr vbar_el2, x0 221 b 0f 2221: msr vbar_el1, x0 2230: 224 225 ret 226ENDPROC(c_runtime_cpu_setup) 227