1df0ac8a4SKevin Cernekee/* 2df0ac8a4SKevin Cernekee * This file is subject to the terms and conditions of the GNU General Public 3df0ac8a4SKevin Cernekee * License. See the file "COPYING" in the main directory of this archive 4df0ac8a4SKevin Cernekee * for more details. 5df0ac8a4SKevin Cernekee * 6df0ac8a4SKevin Cernekee * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) 7df0ac8a4SKevin Cernekee * 8df0ac8a4SKevin Cernekee * Reset/NMI/re-entry vectors for BMIPS processors 9df0ac8a4SKevin Cernekee */ 10df0ac8a4SKevin Cernekee 11df0ac8a4SKevin Cernekee 12df0ac8a4SKevin Cernekee#include <asm/asm.h> 13df0ac8a4SKevin Cernekee#include <asm/asmmacro.h> 14df0ac8a4SKevin Cernekee#include <asm/cacheops.h> 156465460cSJonas Gorski#include <asm/cpu.h> 16df0ac8a4SKevin Cernekee#include <asm/regdef.h> 17df0ac8a4SKevin Cernekee#include <asm/mipsregs.h> 18df0ac8a4SKevin Cernekee#include <asm/stackframe.h> 19df0ac8a4SKevin Cernekee#include <asm/addrspace.h> 20df0ac8a4SKevin Cernekee#include <asm/hazards.h> 21df0ac8a4SKevin Cernekee#include <asm/bmips.h> 22df0ac8a4SKevin Cernekee 23df0ac8a4SKevin Cernekee .macro BARRIER 24df0ac8a4SKevin Cernekee .set mips32 25df0ac8a4SKevin Cernekee _ssnop 26df0ac8a4SKevin Cernekee _ssnop 27df0ac8a4SKevin Cernekee _ssnop 28df0ac8a4SKevin Cernekee .set mips0 29df0ac8a4SKevin Cernekee .endm 30df0ac8a4SKevin Cernekee 31df0ac8a4SKevin Cernekee/*********************************************************************** 32df0ac8a4SKevin Cernekee * Alternate CPU1 startup vector for BMIPS4350 33df0ac8a4SKevin Cernekee * 34df0ac8a4SKevin Cernekee * On some systems the bootloader has already started CPU1 and configured 35df0ac8a4SKevin Cernekee * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is 36df0ac8a4SKevin Cernekee * triggered by the SW1 interrupt. If that is the case we try to move 37df0ac8a4SKevin Cernekee * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380. 38df0ac8a4SKevin Cernekee ***********************************************************************/ 39df0ac8a4SKevin Cernekee 40df0ac8a4SKevin CernekeeLEAF(bmips_smp_movevec) 41df0ac8a4SKevin Cernekee la k0, 1f 42df0ac8a4SKevin Cernekee li k1, CKSEG1 43df0ac8a4SKevin Cernekee or k0, k1 44df0ac8a4SKevin Cernekee jr k0 45df0ac8a4SKevin Cernekee 46df0ac8a4SKevin Cernekee1: 47df0ac8a4SKevin Cernekee /* clear IV, pending IPIs */ 48df0ac8a4SKevin Cernekee mtc0 zero, CP0_CAUSE 49df0ac8a4SKevin Cernekee 50df0ac8a4SKevin Cernekee /* re-enable IRQs to wait for SW1 */ 51df0ac8a4SKevin Cernekee li k0, ST0_IE | ST0_BEV | STATUSF_IP1 52df0ac8a4SKevin Cernekee mtc0 k0, CP0_STATUS 53df0ac8a4SKevin Cernekee 54df0ac8a4SKevin Cernekee /* set up CPU1 CBR; move BASE to 0xa000_0000 */ 55df0ac8a4SKevin Cernekee li k0, 0xff400000 56df0ac8a4SKevin Cernekee mtc0 k0, $22, 6 57ff5fadafSFlorian Fainelli /* set up relocation vector address based on thread ID */ 58ff5fadafSFlorian Fainelli mfc0 k1, $22, 3 59ff5fadafSFlorian Fainelli srl k1, 16 60ff5fadafSFlorian Fainelli andi k1, 0x8000 61ff5fadafSFlorian Fainelli or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0 62df0ac8a4SKevin Cernekee or k0, k1 63df0ac8a4SKevin Cernekee li k1, 0xa0080000 64df0ac8a4SKevin Cernekee sw k1, 0(k0) 65df0ac8a4SKevin Cernekee 66df0ac8a4SKevin Cernekee /* wait here for SW1 interrupt from bmips_boot_secondary() */ 67df0ac8a4SKevin Cernekee wait 68df0ac8a4SKevin Cernekee 69df0ac8a4SKevin Cernekee la k0, bmips_reset_nmi_vec 70df0ac8a4SKevin Cernekee li k1, CKSEG1 71df0ac8a4SKevin Cernekee or k0, k1 72df0ac8a4SKevin Cernekee jr k0 73df0ac8a4SKevin CernekeeEND(bmips_smp_movevec) 74df0ac8a4SKevin Cernekee 75df0ac8a4SKevin Cernekee/*********************************************************************** 76df0ac8a4SKevin Cernekee * Reset/NMI vector 77df0ac8a4SKevin Cernekee * For BMIPS processors that can relocate their exception vectors, this 78df0ac8a4SKevin Cernekee * entire function gets copied to 0x8000_0000. 79df0ac8a4SKevin Cernekee ***********************************************************************/ 80df0ac8a4SKevin Cernekee 81df0ac8a4SKevin CernekeeNESTED(bmips_reset_nmi_vec, PT_SIZE, sp) 82df0ac8a4SKevin Cernekee .set push 83df0ac8a4SKevin Cernekee .set noat 84df0ac8a4SKevin Cernekee .align 4 85df0ac8a4SKevin Cernekee 86df0ac8a4SKevin Cernekee#ifdef CONFIG_SMP 87df0ac8a4SKevin Cernekee /* if the NMI bit is clear, assume this is a CPU1 reset instead */ 88df0ac8a4SKevin Cernekee li k1, (1 << 19) 89df0ac8a4SKevin Cernekee mfc0 k0, CP0_STATUS 90df0ac8a4SKevin Cernekee and k0, k1 91*21b30c00SFlorian Fainelli beqz k0, soft_reset 92df0ac8a4SKevin Cernekee 93df0ac8a4SKevin Cernekee#if defined(CONFIG_CPU_BMIPS5000) 946465460cSJonas Gorski mfc0 k0, CP0_PRID 956465460cSJonas Gorski li k1, PRID_IMP_BMIPS5000 96cbbda6e7SFlorian Fainelli /* mask with PRID_IMP_BMIPS5000 to cover both variants */ 97cbbda6e7SFlorian Fainelli andi k0, PRID_IMP_BMIPS5000 986465460cSJonas Gorski bne k0, k1, 1f 996465460cSJonas Gorski 100df0ac8a4SKevin Cernekee /* if we're not on core 0, this must be the SMP boot signal */ 101df0ac8a4SKevin Cernekee li k1, (3 << 25) 102df0ac8a4SKevin Cernekee mfc0 k0, $22 103df0ac8a4SKevin Cernekee and k0, k1 104df0ac8a4SKevin Cernekee bnez k0, bmips_smp_entry 1056465460cSJonas Gorski1: 1066465460cSJonas Gorski#endif /* CONFIG_CPU_BMIPS5000 */ 107df0ac8a4SKevin Cernekee#endif /* CONFIG_SMP */ 108df0ac8a4SKevin Cernekee 109df0ac8a4SKevin Cernekee /* nope, it's just a regular NMI */ 110df0ac8a4SKevin Cernekee SAVE_ALL 111df0ac8a4SKevin Cernekee move a0, sp 112df0ac8a4SKevin Cernekee 113df0ac8a4SKevin Cernekee /* clear EXL, ERL, BEV so that TLB refills still work */ 114df0ac8a4SKevin Cernekee mfc0 k0, CP0_STATUS 115df0ac8a4SKevin Cernekee li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE 116df0ac8a4SKevin Cernekee or k0, k1 117df0ac8a4SKevin Cernekee xor k0, k1 118df0ac8a4SKevin Cernekee mtc0 k0, CP0_STATUS 119df0ac8a4SKevin Cernekee BARRIER 120df0ac8a4SKevin Cernekee 121df0ac8a4SKevin Cernekee /* jump to the NMI handler function */ 122df0ac8a4SKevin Cernekee la k0, nmi_handler 123df0ac8a4SKevin Cernekee jr k0 124df0ac8a4SKevin Cernekee 125df0ac8a4SKevin Cernekee RESTORE_ALL 126a809d460SRalf Baechle .set arch=r4000 127df0ac8a4SKevin Cernekee eret 128df0ac8a4SKevin Cernekee 129*21b30c00SFlorian Fainelli#ifdef CONFIG_SMP 130*21b30c00SFlorian Fainellisoft_reset: 131*21b30c00SFlorian Fainelli 132*21b30c00SFlorian Fainelli#if defined(CONFIG_CPU_BMIPS5000) 133*21b30c00SFlorian Fainelli mfc0 k0, CP0_PRID 134*21b30c00SFlorian Fainelli andi k0, 0xff00 135*21b30c00SFlorian Fainelli li k1, PRID_IMP_BMIPS5200 136*21b30c00SFlorian Fainelli bne k0, k1, bmips_smp_entry 137*21b30c00SFlorian Fainelli 138*21b30c00SFlorian Fainelli /* if running on TP 1, jump to bmips_smp_entry */ 139*21b30c00SFlorian Fainelli mfc0 k0, $22 140*21b30c00SFlorian Fainelli li k1, (1 << 24) 141*21b30c00SFlorian Fainelli and k1, k0 142*21b30c00SFlorian Fainelli bnez k1, bmips_smp_entry 143*21b30c00SFlorian Fainelli nop 144*21b30c00SFlorian Fainelli 145*21b30c00SFlorian Fainelli /* 146*21b30c00SFlorian Fainelli * running on TP0, can not be core 0 (the boot core). 147*21b30c00SFlorian Fainelli * Check for soft reset. Indicates a warm boot 148*21b30c00SFlorian Fainelli */ 149*21b30c00SFlorian Fainelli mfc0 k0, $12 150*21b30c00SFlorian Fainelli li k1, (1 << 20) 151*21b30c00SFlorian Fainelli and k0, k1 152*21b30c00SFlorian Fainelli beqz k0, bmips_smp_entry 153*21b30c00SFlorian Fainelli 154*21b30c00SFlorian Fainelli /* 155*21b30c00SFlorian Fainelli * Warm boot. 156*21b30c00SFlorian Fainelli * Cache init is only done on TP0 157*21b30c00SFlorian Fainelli */ 158*21b30c00SFlorian Fainelli la k0, bmips_5xxx_init 159*21b30c00SFlorian Fainelli jalr k0 160*21b30c00SFlorian Fainelli nop 161*21b30c00SFlorian Fainelli 162*21b30c00SFlorian Fainelli b bmips_smp_entry 163*21b30c00SFlorian Fainelli nop 164*21b30c00SFlorian Fainelli#endif 165*21b30c00SFlorian Fainelli 166df0ac8a4SKevin Cernekee/*********************************************************************** 167df0ac8a4SKevin Cernekee * CPU1 reset vector (used for the initial boot only) 168df0ac8a4SKevin Cernekee * This is still part of bmips_reset_nmi_vec(). 169df0ac8a4SKevin Cernekee ***********************************************************************/ 170df0ac8a4SKevin Cernekee 171df0ac8a4SKevin Cernekeebmips_smp_entry: 172df0ac8a4SKevin Cernekee 173df0ac8a4SKevin Cernekee /* set up CP0 STATUS; enable FPU */ 174df0ac8a4SKevin Cernekee li k0, 0x30000000 175df0ac8a4SKevin Cernekee mtc0 k0, CP0_STATUS 176df0ac8a4SKevin Cernekee BARRIER 177df0ac8a4SKevin Cernekee 178df0ac8a4SKevin Cernekee /* set local CP0 CONFIG to make kseg0 cacheable, write-back */ 179df0ac8a4SKevin Cernekee mfc0 k0, CP0_CONFIG 180df0ac8a4SKevin Cernekee ori k0, 0x07 181df0ac8a4SKevin Cernekee xori k0, 0x04 182df0ac8a4SKevin Cernekee mtc0 k0, CP0_CONFIG 183df0ac8a4SKevin Cernekee 1846465460cSJonas Gorski mfc0 k0, CP0_PRID 1856465460cSJonas Gorski andi k0, 0xff00 186df0ac8a4SKevin Cernekee#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 1876465460cSJonas Gorski li k1, PRID_IMP_BMIPS43XX 1886465460cSJonas Gorski bne k0, k1, 2f 1896465460cSJonas Gorski 190df0ac8a4SKevin Cernekee /* initialize CPU1's local I-cache */ 191df0ac8a4SKevin Cernekee li k0, 0x80000000 192df0ac8a4SKevin Cernekee li k1, 0x80010000 193df0ac8a4SKevin Cernekee mtc0 zero, $28 194df0ac8a4SKevin Cernekee mtc0 zero, $28, 1 195df0ac8a4SKevin Cernekee BARRIER 196df0ac8a4SKevin Cernekee 197df0ac8a4SKevin Cernekee1: cache Index_Store_Tag_I, 0(k0) 198df0ac8a4SKevin Cernekee addiu k0, 16 199df0ac8a4SKevin Cernekee bne k0, k1, 1b 2006465460cSJonas Gorski 2016465460cSJonas Gorski b 3f 2026465460cSJonas Gorski2: 2036465460cSJonas Gorski#endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */ 2046465460cSJonas Gorski#if defined(CONFIG_CPU_BMIPS5000) 205cbbda6e7SFlorian Fainelli /* mask with PRID_IMP_BMIPS5000 to cover both variants */ 2066465460cSJonas Gorski li k1, PRID_IMP_BMIPS5000 207cbbda6e7SFlorian Fainelli andi k0, PRID_IMP_BMIPS5000 2086465460cSJonas Gorski bne k0, k1, 3f 2096465460cSJonas Gorski 210cbbda6e7SFlorian Fainelli /* set exception vector base */ 211df0ac8a4SKevin Cernekee la k0, ebase 212df0ac8a4SKevin Cernekee lw k0, 0(k0) 213df0ac8a4SKevin Cernekee mtc0 k0, $15, 1 214df0ac8a4SKevin Cernekee BARRIER 2156465460cSJonas Gorski#endif /* CONFIG_CPU_BMIPS5000 */ 2166465460cSJonas Gorski3: 217df0ac8a4SKevin Cernekee /* jump back to kseg0 in case we need to remap the kseg1 area */ 218df0ac8a4SKevin Cernekee la k0, 1f 219df0ac8a4SKevin Cernekee jr k0 220df0ac8a4SKevin Cernekee1: 221df0ac8a4SKevin Cernekee la k0, bmips_enable_xks01 222df0ac8a4SKevin Cernekee jalr k0 223df0ac8a4SKevin Cernekee 224df0ac8a4SKevin Cernekee /* use temporary stack to set up upper memory TLB */ 225df0ac8a4SKevin Cernekee li sp, BMIPS_WARM_RESTART_VEC 226df0ac8a4SKevin Cernekee la k0, plat_wired_tlb_setup 227df0ac8a4SKevin Cernekee jalr k0 228df0ac8a4SKevin Cernekee 229df0ac8a4SKevin Cernekee /* switch to permanent stack and continue booting */ 230df0ac8a4SKevin Cernekee 231df0ac8a4SKevin Cernekee .global bmips_secondary_reentry 232df0ac8a4SKevin Cernekeebmips_secondary_reentry: 233df0ac8a4SKevin Cernekee la k0, bmips_smp_boot_sp 234df0ac8a4SKevin Cernekee lw sp, 0(k0) 235df0ac8a4SKevin Cernekee la k0, bmips_smp_boot_gp 236df0ac8a4SKevin Cernekee lw gp, 0(k0) 237df0ac8a4SKevin Cernekee la k0, start_secondary 238df0ac8a4SKevin Cernekee jr k0 239df0ac8a4SKevin Cernekee 240df0ac8a4SKevin Cernekee#endif /* CONFIG_SMP */ 241df0ac8a4SKevin Cernekee 242df0ac8a4SKevin Cernekee .align 4 243df0ac8a4SKevin Cernekee .global bmips_reset_nmi_vec_end 244df0ac8a4SKevin Cernekeebmips_reset_nmi_vec_end: 245df0ac8a4SKevin Cernekee 246df0ac8a4SKevin CernekeeEND(bmips_reset_nmi_vec) 247df0ac8a4SKevin Cernekee 248df0ac8a4SKevin Cernekee .set pop 249df0ac8a4SKevin Cernekee 250df0ac8a4SKevin Cernekee/*********************************************************************** 251df0ac8a4SKevin Cernekee * CPU1 warm restart vector (used for second and subsequent boots). 252df0ac8a4SKevin Cernekee * Also used for S2 standby recovery (PM). 253df0ac8a4SKevin Cernekee * This entire function gets copied to (BMIPS_WARM_RESTART_VEC) 254df0ac8a4SKevin Cernekee ***********************************************************************/ 255df0ac8a4SKevin Cernekee 256df0ac8a4SKevin CernekeeLEAF(bmips_smp_int_vec) 257df0ac8a4SKevin Cernekee 258df0ac8a4SKevin Cernekee .align 4 259df0ac8a4SKevin Cernekee mfc0 k0, CP0_STATUS 260df0ac8a4SKevin Cernekee ori k0, 0x01 261df0ac8a4SKevin Cernekee xori k0, 0x01 262df0ac8a4SKevin Cernekee mtc0 k0, CP0_STATUS 263df0ac8a4SKevin Cernekee eret 264df0ac8a4SKevin Cernekee 265df0ac8a4SKevin Cernekee .align 4 266df0ac8a4SKevin Cernekee .global bmips_smp_int_vec_end 267df0ac8a4SKevin Cernekeebmips_smp_int_vec_end: 268df0ac8a4SKevin Cernekee 269df0ac8a4SKevin CernekeeEND(bmips_smp_int_vec) 270df0ac8a4SKevin Cernekee 271df0ac8a4SKevin Cernekee/*********************************************************************** 272df0ac8a4SKevin Cernekee * XKS01 support 273df0ac8a4SKevin Cernekee * Certain CPUs support extending kseg0 to 1024MB. 274df0ac8a4SKevin Cernekee ***********************************************************************/ 275df0ac8a4SKevin Cernekee 276df0ac8a4SKevin CernekeeLEAF(bmips_enable_xks01) 277df0ac8a4SKevin Cernekee 278df0ac8a4SKevin Cernekee#if defined(CONFIG_XKS01) 2796465460cSJonas Gorski mfc0 t0, CP0_PRID 2806465460cSJonas Gorski andi t2, t0, 0xff00 281df0ac8a4SKevin Cernekee#if defined(CONFIG_CPU_BMIPS4380) 2826465460cSJonas Gorski li t1, PRID_IMP_BMIPS43XX 2836465460cSJonas Gorski bne t2, t1, 1f 2846465460cSJonas Gorski 2856465460cSJonas Gorski andi t0, 0xff 2866465460cSJonas Gorski addiu t1, t0, -PRID_REV_BMIPS4380_HI 2876465460cSJonas Gorski bgtz t1, 2f 2886465460cSJonas Gorski addiu t0, -PRID_REV_BMIPS4380_LO 2896465460cSJonas Gorski bltz t0, 2f 2906465460cSJonas Gorski 291df0ac8a4SKevin Cernekee mfc0 t0, $22, 3 292df0ac8a4SKevin Cernekee li t1, 0x1ff0 293df0ac8a4SKevin Cernekee li t2, (1 << 12) | (1 << 9) 294df0ac8a4SKevin Cernekee or t0, t1 295df0ac8a4SKevin Cernekee xor t0, t1 296df0ac8a4SKevin Cernekee or t0, t2 297df0ac8a4SKevin Cernekee mtc0 t0, $22, 3 298df0ac8a4SKevin Cernekee BARRIER 2996465460cSJonas Gorski b 2f 3006465460cSJonas Gorski1: 3016465460cSJonas Gorski#endif /* CONFIG_CPU_BMIPS4380 */ 3026465460cSJonas Gorski#if defined(CONFIG_CPU_BMIPS5000) 3036465460cSJonas Gorski li t1, PRID_IMP_BMIPS5000 304cbbda6e7SFlorian Fainelli /* mask with PRID_IMP_BMIPS5000 to cover both variants */ 305cbbda6e7SFlorian Fainelli andi t2, PRID_IMP_BMIPS5000 3066465460cSJonas Gorski bne t2, t1, 2f 3076465460cSJonas Gorski 308df0ac8a4SKevin Cernekee mfc0 t0, $22, 5 309df0ac8a4SKevin Cernekee li t1, 0x01ff 310df0ac8a4SKevin Cernekee li t2, (1 << 8) | (1 << 5) 311df0ac8a4SKevin Cernekee or t0, t1 312df0ac8a4SKevin Cernekee xor t0, t1 313df0ac8a4SKevin Cernekee or t0, t2 314df0ac8a4SKevin Cernekee mtc0 t0, $22, 5 315df0ac8a4SKevin Cernekee BARRIER 3166465460cSJonas Gorski#endif /* CONFIG_CPU_BMIPS5000 */ 3176465460cSJonas Gorski2: 318df0ac8a4SKevin Cernekee#endif /* defined(CONFIG_XKS01) */ 319df0ac8a4SKevin Cernekee 320df0ac8a4SKevin Cernekee jr ra 321df0ac8a4SKevin Cernekee 322df0ac8a4SKevin CernekeeEND(bmips_enable_xks01) 323