1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/page.h> 24#include <asm/asm-offsets.h> 25 26#ifdef CONFIG_PPC_BOOK3S_64 27#include <asm/exception-64s.h> 28#endif 29 30/***************************************************************************** 31 * * 32 * Real Mode handlers that need to be in low physical memory * 33 * * 34 ****************************************************************************/ 35 36#if defined(CONFIG_PPC_BOOK3S_64) 37 38#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg) 39#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) 40#define FUNC(name) GLUE(.,name) 41 42kvmppc_skip_interrupt: 43 /* 44 * Here all GPRs are unchanged from when the interrupt happened 45 * except for r13, which is saved in SPRG_SCRATCH0. 46 */ 47 mfspr r13, SPRN_SRR0 48 addi r13, r13, 4 49 mtspr SPRN_SRR0, r13 50 GET_SCRATCH0(r13) 51 rfid 52 b . 53 54kvmppc_skip_Hinterrupt: 55 /* 56 * Here all GPRs are unchanged from when the interrupt happened 57 * except for r13, which is saved in SPRG_SCRATCH0. 58 */ 59 mfspr r13, SPRN_HSRR0 60 addi r13, r13, 4 61 mtspr SPRN_HSRR0, r13 62 GET_SCRATCH0(r13) 63 hrfid 64 b . 65 66#elif defined(CONFIG_PPC_BOOK3S_32) 67 68#define MSR_NOIRQ MSR_KERNEL 69#define FUNC(name) name 70 71.macro INTERRUPT_TRAMPOLINE intno 72 73.global kvmppc_trampoline_\intno 74kvmppc_trampoline_\intno: 75 76 mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */ 77 78 /* 79 * First thing to do is to find out if we're coming 80 * from a KVM guest or a Linux process. 81 * 82 * To distinguish, we check a magic byte in the PACA/current 83 */ 84 mfspr r13, SPRN_SPRG_THREAD 85 lwz r13, THREAD_KVM_SVCPU(r13) 86 /* PPC32 can have a NULL pointer - let's check for that */ 87 mtspr SPRN_SPRG_SCRATCH1, r12 /* Save r12 */ 88 mfcr r12 89 cmpwi r13, 0 90 bne 1f 912: mtcr r12 92 mfspr r12, SPRN_SPRG_SCRATCH1 93 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ 94 b kvmppc_resume_\intno /* Get back original handler */ 95 961: tophys(r13, r13) 97 stw r12, HSTATE_SCRATCH1(r13) 98 mfspr r12, SPRN_SPRG_SCRATCH1 99 stw r12, HSTATE_SCRATCH0(r13) 100 lbz r12, HSTATE_IN_GUEST(r13) 101 cmpwi r12, KVM_GUEST_MODE_NONE 102 bne ..kvmppc_handler_hasmagic_\intno 103 /* No KVM guest? Then jump back to the Linux handler! */ 104 lwz r12, HSTATE_SCRATCH1(r13) 105 b 2b 106 107 /* Now we know we're handling a KVM guest */ 108..kvmppc_handler_hasmagic_\intno: 109 110 /* Should we just skip the faulting instruction? */ 111 cmpwi r12, KVM_GUEST_MODE_SKIP 112 beq kvmppc_handler_skip_ins 113 114 /* Let's store which interrupt we're handling */ 115 li r12, \intno 116 117 /* Jump into the SLB exit code that goes to the highmem handler */ 118 b kvmppc_handler_trampoline_exit 119 120.endm 121 122INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET 123INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK 124INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE 125INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE 126INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL 127INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT 128INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM 129INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL 130INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DECREMENTER 131INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL 132INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE 133INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON 134INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC 135 136/* 137 * Bring us back to the faulting code, but skip the 138 * faulting instruction. 139 * 140 * This is a generic exit path from the interrupt 141 * trampolines above. 142 * 143 * Input Registers: 144 * 145 * R12 = free 146 * R13 = Shadow VCPU (PACA) 147 * HSTATE.SCRATCH0 = guest R12 148 * HSTATE.SCRATCH1 = guest CR 149 * SPRG_SCRATCH0 = guest R13 150 * 151 */ 152kvmppc_handler_skip_ins: 153 154 /* Patch the IP to the next instruction */ 155 mfsrr0 r12 156 addi r12, r12, 4 157 mtsrr0 r12 158 159 /* Clean up all state */ 160 lwz r12, HSTATE_SCRATCH1(r13) 161 mtcr r12 162 PPC_LL r12, HSTATE_SCRATCH0(r13) 163 GET_SCRATCH0(r13) 164 165 /* And get back into the code */ 166 RFI 167#endif 168 169/* 170 * This trampoline brings us back to a real mode handler 171 * 172 * Input Registers: 173 * 174 * R5 = SRR0 175 * R6 = SRR1 176 * LR = real-mode IP 177 * 178 */ 179.global kvmppc_handler_lowmem_trampoline 180kvmppc_handler_lowmem_trampoline: 181 182 mtsrr0 r5 183 mtsrr1 r6 184 blr 185kvmppc_handler_lowmem_trampoline_end: 186 187/* 188 * Call a function in real mode 189 * 190 * Input Registers: 191 * 192 * R3 = function 193 * R4 = MSR 194 * R5 = scratch register 195 * 196 */ 197_GLOBAL(kvmppc_rmcall) 198 LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ) 199 mtmsr r5 /* Disable relocation and interrupts, so mtsrr 200 doesn't get interrupted */ 201 sync 202 mtsrr0 r3 203 mtsrr1 r4 204 RFI 205 206#if defined(CONFIG_PPC_BOOK3S_32) 207#define STACK_LR INT_FRAME_SIZE+4 208 209/* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */ 210#define MSR_EXT_START \ 211 PPC_STL r20, _NIP(r1); \ 212 mfmsr r20; \ 213 LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \ 214 andc r3,r20,r3; /* Disable DR,EE */ \ 215 mtmsr r3; \ 216 sync 217 218#define MSR_EXT_END \ 219 mtmsr r20; /* Enable DR,EE */ \ 220 sync; \ 221 PPC_LL r20, _NIP(r1) 222 223#elif defined(CONFIG_PPC_BOOK3S_64) 224#define STACK_LR _LINK 225#define MSR_EXT_START 226#define MSR_EXT_END 227#endif 228 229/* 230 * Activate current's external feature (FPU/Altivec/VSX) 231 */ 232#define define_load_up(what) \ 233 \ 234_GLOBAL(kvmppc_load_up_ ## what); \ 235 PPC_STLU r1, -INT_FRAME_SIZE(r1); \ 236 mflr r3; \ 237 PPC_STL r3, STACK_LR(r1); \ 238 MSR_EXT_START; \ 239 \ 240 bl FUNC(load_up_ ## what); \ 241 \ 242 MSR_EXT_END; \ 243 PPC_LL r3, STACK_LR(r1); \ 244 mtlr r3; \ 245 addi r1, r1, INT_FRAME_SIZE; \ 246 blr 247 248define_load_up(fpu) 249#ifdef CONFIG_ALTIVEC 250define_load_up(altivec) 251#endif 252#ifdef CONFIG_VSX 253define_load_up(vsx) 254#endif 255 256#include "book3s_segment.S" 257