1/* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8#include <asm-offsets.h> 9#include <config.h> 10#include <version.h> 11#include <linux/linkage.h> 12#include <asm/macro.h> 13#include <asm/armv8/mmu.h> 14 15/************************************************************************* 16 * 17 * Startup Code (reset vector) 18 * 19 *************************************************************************/ 20 21.globl _start 22_start: 23 b reset 24 25 .align 3 26 27.globl _TEXT_BASE 28_TEXT_BASE: 29 .quad CONFIG_SYS_TEXT_BASE 30 31/* 32 * These are defined in the linker script. 33 */ 34.globl _end_ofs 35_end_ofs: 36 .quad _end - _start 37 38.globl _bss_start_ofs 39_bss_start_ofs: 40 .quad __bss_start - _start 41 42.globl _bss_end_ofs 43_bss_end_ofs: 44 .quad __bss_end - _start 45 46reset: 47 /* 48 * Could be EL3/EL2/EL1, Initial State: 49 * Little Endian, MMU Disabled, i/dCache Disabled 50 */ 51 adr x0, vectors 52 switch_el x1, 3f, 2f, 1f 533: msr vbar_el3, x0 54 mrs x0, scr_el3 55 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ 56 msr scr_el3, x0 57 msr cptr_el3, xzr /* Enable FP/SIMD */ 58 ldr x0, =COUNTER_FREQUENCY 59 msr cntfrq_el0, x0 /* Initialize CNTFRQ */ 60 b 0f 612: msr vbar_el2, x0 62 mov x0, #0x33ff 63 msr cptr_el2, x0 /* Enable FP/SIMD */ 64 b 0f 651: msr vbar_el1, x0 66 mov x0, #3 << 20 67 msr cpacr_el1, x0 /* Enable FP/SIMD */ 680: 69 70 /* 71 * Cache/BPB/TLB Invalidate 72 * i-cache is invalidated before enabled in icache_enable() 73 * tlb is invalidated before mmu is enabled in dcache_enable() 74 * d-cache is invalidated before enabled in dcache_enable() 75 */ 76 77 /* Processor specific initialization */ 78 bl lowlevel_init 79 80 branch_if_master x0, x1, master_cpu 81 82 /* 83 * Slave CPUs 84 */ 85slave_cpu: 86 wfe 87 ldr x1, =CPU_RELEASE_ADDR 88 ldr x0, [x1] 89 cbz x0, slave_cpu 90 br x0 /* branch to the given address */ 91 92 /* 93 * Master CPU 94 */ 95master_cpu: 96 bl _main 97 98/*-----------------------------------------------------------------------*/ 99 100WEAK(lowlevel_init) 101 mov x29, lr /* Save LR */ 102 103#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 104 branch_if_slave x0, 1f 105 ldr x0, =GICD_BASE 106 bl gic_init_secure 1071: 108#if defined(CONFIG_GICV3) 109 ldr x0, =GICR_BASE 110 bl gic_init_secure_percpu 111#elif defined(CONFIG_GICV2) 112 ldr x0, =GICD_BASE 113 ldr x1, =GICC_BASE 114 bl gic_init_secure_percpu 115#endif 116#endif 117 118 branch_if_master x0, x1, 2f 119 120 /* 121 * Slave should wait for master clearing spin table. 122 * This sync prevent salves observing incorrect 123 * value of spin table and jumping to wrong place. 124 */ 125#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 126#ifdef CONFIG_GICV2 127 ldr x0, =GICC_BASE 128#endif 129 bl gic_wait_for_interrupt 130#endif 131 132 /* 133 * All slaves will enter EL2 and optionally EL1. 134 */ 135 bl armv8_switch_to_el2 136#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 137 bl armv8_switch_to_el1 138#endif 139 1402: 141 mov lr, x29 /* Restore LR */ 142 ret 143ENDPROC(lowlevel_init) 144 145WEAK(smp_kick_all_cpus) 146 /* Kick secondary cpus up by SGI 0 interrupt */ 147 mov x29, lr /* Save LR */ 148#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 149 ldr x0, =GICD_BASE 150 bl gic_kick_secondary_cpus 151#endif 152 mov lr, x29 /* Restore LR */ 153 ret 154ENDPROC(smp_kick_all_cpus) 155 156/*-----------------------------------------------------------------------*/ 157 158ENTRY(c_runtime_cpu_setup) 159 /* Relocate vBAR */ 160 adr x0, vectors 161 switch_el x1, 3f, 2f, 1f 1623: msr vbar_el3, x0 163 b 0f 1642: msr vbar_el2, x0 165 b 0f 1661: msr vbar_el1, x0 1670: 168 169 ret 170ENDPROC(c_runtime_cpu_setup) 171