1/* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8#include <asm-offsets.h> 9#include <config.h> 10#include <linux/linkage.h> 11#include <asm/macro.h> 12#include <asm/armv8/mmu.h> 13 14/************************************************************************* 15 * 16 * Startup Code (reset vector) 17 * 18 *************************************************************************/ 19 20.globl _start 21_start: 22 b reset 23 24 .align 3 25 26.globl _TEXT_BASE 27_TEXT_BASE: 28 .quad CONFIG_SYS_TEXT_BASE 29 30/* 31 * These are defined in the linker script. 32 */ 33.globl _end_ofs 34_end_ofs: 35 .quad _end - _start 36 37.globl _bss_start_ofs 38_bss_start_ofs: 39 .quad __bss_start - _start 40 41.globl _bss_end_ofs 42_bss_end_ofs: 43 .quad __bss_end - _start 44 45reset: 46 /* 47 * Could be EL3/EL2/EL1, Initial State: 48 * Little Endian, MMU Disabled, i/dCache Disabled 49 */ 50 adr x0, vectors 51 switch_el x1, 3f, 2f, 1f 523: msr vbar_el3, x0 53 mrs x0, scr_el3 54 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ 55 msr scr_el3, x0 56 msr cptr_el3, xzr /* Enable FP/SIMD */ 57#ifdef COUNTER_FREQUENCY 58 ldr x0, =COUNTER_FREQUENCY 59 msr cntfrq_el0, x0 /* Initialize CNTFRQ */ 60#endif 61 b 0f 622: msr vbar_el2, x0 63 mov x0, #0x33ff 64 msr cptr_el2, x0 /* Enable FP/SIMD */ 65 b 0f 661: msr vbar_el1, x0 67 mov x0, #3 << 20 68 msr cpacr_el1, x0 /* Enable FP/SIMD */ 690: 70 71 /* Apply ARM core specific erratas */ 72 bl apply_core_errata 73 74 /* 75 * Cache/BPB/TLB Invalidate 76 * i-cache is invalidated before enabled in icache_enable() 77 * tlb is invalidated before mmu is enabled in dcache_enable() 78 * d-cache is invalidated before enabled in dcache_enable() 79 */ 80 81 /* Processor specific initialization */ 82 bl lowlevel_init 83 84#ifdef CONFIG_ARMV8_MULTIENTRY 85 branch_if_master x0, x1, master_cpu 86 87 /* 88 * Slave CPUs 89 */ 90slave_cpu: 91 wfe 92 ldr x1, =CPU_RELEASE_ADDR 93 ldr x0, [x1] 94 cbz x0, slave_cpu 95 br x0 /* branch to the given address */ 96master_cpu: 97 /* On the master CPU */ 98#endif /* CONFIG_ARMV8_MULTIENTRY */ 99 100 bl _main 101 102/*-----------------------------------------------------------------------*/ 103 104WEAK(apply_core_errata) 105 106 mov x29, lr /* Save LR */ 107 /* For now, we support Cortex-A57 specific errata only */ 108 109 /* Check if we are running on a Cortex-A57 core */ 110 branch_if_a57_core x0, apply_a57_core_errata 1110: 112 mov lr, x29 /* Restore LR */ 113 ret 114 115apply_a57_core_errata: 116 117#ifdef CONFIG_ARM_ERRATA_828024 118 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 119 /* Disable non-allocate hint of w-b-n-a memory type */ 120 orr x0, x0, #1 << 49 121 /* Disable write streaming no L1-allocate threshold */ 122 orr x0, x0, #3 << 25 123 /* Disable write streaming no-allocate threshold */ 124 orr x0, x0, #3 << 27 125 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 126#endif 127 128#ifdef CONFIG_ARM_ERRATA_826974 129 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 130 /* Disable speculative load execution ahead of a DMB */ 131 orr x0, x0, #1 << 59 132 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 133#endif 134 135#ifdef CONFIG_ARM_ERRATA_833069 136 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 137 /* Disable Enable Invalidates of BTB bit */ 138 and x0, x0, #0xE 139 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 140#endif 141 b 0b 142ENDPROC(apply_core_errata) 143 144/*-----------------------------------------------------------------------*/ 145 146WEAK(lowlevel_init) 147 mov x29, lr /* Save LR */ 148 149#ifndef CONFIG_ARMV8_MULTIENTRY 150 /* 151 * For single-entry systems the lowlevel init is very simple. 152 */ 153 ldr x0, =GICD_BASE 154 bl gic_init_secure 155 156#else /* CONFIG_ARMV8_MULTIENTRY is set */ 157 158#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 159 branch_if_slave x0, 1f 160 ldr x0, =GICD_BASE 161 bl gic_init_secure 1621: 163#if defined(CONFIG_GICV3) 164 ldr x0, =GICR_BASE 165 bl gic_init_secure_percpu 166#elif defined(CONFIG_GICV2) 167 ldr x0, =GICD_BASE 168 ldr x1, =GICC_BASE 169 bl gic_init_secure_percpu 170#endif 171#endif 172 173 branch_if_master x0, x1, 2f 174 175 /* 176 * Slave should wait for master clearing spin table. 177 * This sync prevent salves observing incorrect 178 * value of spin table and jumping to wrong place. 179 */ 180#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 181#ifdef CONFIG_GICV2 182 ldr x0, =GICC_BASE 183#endif 184 bl gic_wait_for_interrupt 185#endif 186 187 /* 188 * All slaves will enter EL2 and optionally EL1. 189 */ 190 bl armv8_switch_to_el2 191#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 192 bl armv8_switch_to_el1 193#endif 194 195#endif /* CONFIG_ARMV8_MULTIENTRY */ 196 1972: 198 mov lr, x29 /* Restore LR */ 199 ret 200ENDPROC(lowlevel_init) 201 202WEAK(smp_kick_all_cpus) 203 /* Kick secondary cpus up by SGI 0 interrupt */ 204 mov x29, lr /* Save LR */ 205#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 206 ldr x0, =GICD_BASE 207 bl gic_kick_secondary_cpus 208#endif 209 mov lr, x29 /* Restore LR */ 210 ret 211ENDPROC(smp_kick_all_cpus) 212 213/*-----------------------------------------------------------------------*/ 214 215ENTRY(c_runtime_cpu_setup) 216 /* Relocate vBAR */ 217 adr x0, vectors 218 switch_el x1, 3f, 2f, 1f 2193: msr vbar_el3, x0 220 b 0f 2212: msr vbar_el2, x0 222 b 0f 2231: msr vbar_el1, x0 2240: 225 226 ret 227ENDPROC(c_runtime_cpu_setup) 228