1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2010 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20/* Real mode helpers */ 21 22#include <asm/asm-compat.h> 23#include <asm/feature-fixups.h> 24 25#if defined(CONFIG_PPC_BOOK3S_64) 26 27#define GET_SHADOW_VCPU(reg) \ 28 mr reg, r13 29 30#elif defined(CONFIG_PPC_BOOK3S_32) 31 32#define GET_SHADOW_VCPU(reg) \ 33 tophys(reg, r2); \ 34 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ 35 tophys(reg, reg) 36 37#endif 38 39/* Disable for nested KVM */ 40#define USE_QUICK_LAST_INST 41 42 43/* Get helper functions for subarch specific functionality */ 44 45#if defined(CONFIG_PPC_BOOK3S_64) 46#include "book3s_64_slb.S" 47#elif defined(CONFIG_PPC_BOOK3S_32) 48#include "book3s_32_sr.S" 49#endif 50 51/****************************************************************************** 52 * * 53 * Entry code * 54 * * 55 *****************************************************************************/ 56 57.global kvmppc_handler_trampoline_enter 58kvmppc_handler_trampoline_enter: 59 60 /* Required state: 61 * 62 * MSR = ~IR|DR 63 * R1 = host R1 64 * R2 = host R2 65 * R4 = guest shadow MSR 66 * R5 = normal host MSR 67 * R6 = current host MSR (EE, IR, DR off) 68 * LR = highmem guest exit code 69 * all other volatile GPRS = free 70 * SVCPU[CR] = guest CR 71 * SVCPU[XER] = guest XER 72 * SVCPU[CTR] = guest CTR 73 * SVCPU[LR] = guest LR 74 */ 75 76 /* r3 = shadow vcpu */ 77 GET_SHADOW_VCPU(r3) 78 79 /* Save guest exit handler address and MSR */ 80 mflr r0 81 PPC_STL r0, HSTATE_VMHANDLER(r3) 82 PPC_STL r5, HSTATE_HOST_MSR(r3) 83 84 /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */ 85 PPC_STL r1, HSTATE_HOST_R1(r3) 86 PPC_STL r2, HSTATE_HOST_R2(r3) 87 88 /* Activate guest mode, so faults get handled by KVM */ 89 li r11, KVM_GUEST_MODE_GUEST 90 stb r11, HSTATE_IN_GUEST(r3) 91 92 /* Switch to guest segment. This is subarch specific. */ 93 LOAD_GUEST_SEGMENTS 94 95#ifdef CONFIG_PPC_BOOK3S_64 96BEGIN_FTR_SECTION 97 /* Save host FSCR */ 98 mfspr r8, SPRN_FSCR 99 std r8, HSTATE_HOST_FSCR(r13) 100 /* Set FSCR during guest execution */ 101 ld r9, SVCPU_SHADOW_FSCR(r13) 102 mtspr SPRN_FSCR, r9 103END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 104 105 /* Some guests may need to have dcbz set to 32 byte length. 106 * 107 * Usually we ensure that by patching the guest's instructions 108 * to trap on dcbz and emulate it in the hypervisor. 109 * 110 * If we can, we should tell the CPU to use 32 byte dcbz though, 111 * because that's a lot faster. 112 */ 113 lbz r0, HSTATE_RESTORE_HID5(r3) 114 cmpwi r0, 0 115 beq no_dcbz32_on 116 117 mfspr r0,SPRN_HID5 118 ori r0, r0, 0x80 /* XXX HID5_dcbz32 = 0x80 */ 119 mtspr SPRN_HID5,r0 120no_dcbz32_on: 121 122#endif /* CONFIG_PPC_BOOK3S_64 */ 123 124 /* Enter guest */ 125 126 PPC_LL r8, SVCPU_CTR(r3) 127 PPC_LL r9, SVCPU_LR(r3) 128 lwz r10, SVCPU_CR(r3) 129 PPC_LL r11, SVCPU_XER(r3) 130 131 mtctr r8 132 mtlr r9 133 mtcr r10 134 mtxer r11 135 136 /* Move SRR0 and SRR1 into the respective regs */ 137 PPC_LL r9, SVCPU_PC(r3) 138 /* First clear RI in our current MSR value */ 139 li r0, MSR_RI 140 andc r6, r6, r0 141 142 PPC_LL r0, SVCPU_R0(r3) 143 PPC_LL r1, SVCPU_R1(r3) 144 PPC_LL r2, SVCPU_R2(r3) 145 PPC_LL r5, SVCPU_R5(r3) 146 PPC_LL r7, SVCPU_R7(r3) 147 PPC_LL r8, SVCPU_R8(r3) 148 PPC_LL r10, SVCPU_R10(r3) 149 PPC_LL r11, SVCPU_R11(r3) 150 PPC_LL r12, SVCPU_R12(r3) 151 PPC_LL r13, SVCPU_R13(r3) 152 153 MTMSR_EERI(r6) 154 mtsrr0 r9 155 mtsrr1 r4 156 157 PPC_LL r4, SVCPU_R4(r3) 158 PPC_LL r6, SVCPU_R6(r3) 159 PPC_LL r9, SVCPU_R9(r3) 160 PPC_LL r3, (SVCPU_R3)(r3) 161 162 RFI_TO_GUEST 163kvmppc_handler_trampoline_enter_end: 164 165 166 167/****************************************************************************** 168 * * 169 * Exit code * 170 * * 171 *****************************************************************************/ 172 173.global kvmppc_interrupt_pr 174kvmppc_interrupt_pr: 175 /* 64-bit entry. Register usage at this point: 176 * 177 * SPRG_SCRATCH0 = guest R13 178 * R12 = (guest CR << 32) | exit handler id 179 * R13 = PACA 180 * HSTATE.SCRATCH0 = guest R12 181 * HSTATE.SCRATCH1 = guest CTR if RELOCATABLE 182 */ 183#ifdef CONFIG_PPC64 184 /* Match 32-bit entry */ 185#ifdef CONFIG_RELOCATABLE 186 std r9, HSTATE_SCRATCH2(r13) 187 ld r9, HSTATE_SCRATCH1(r13) 188 mtctr r9 189 ld r9, HSTATE_SCRATCH2(r13) 190#endif 191 rotldi r12, r12, 32 /* Flip R12 halves for stw */ 192 stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */ 193 srdi r12, r12, 32 /* shift trap into low half */ 194#endif 195 196.global kvmppc_handler_trampoline_exit 197kvmppc_handler_trampoline_exit: 198 /* Register usage at this point: 199 * 200 * SPRG_SCRATCH0 = guest R13 201 * R12 = exit handler id 202 * R13 = shadow vcpu (32-bit) or PACA (64-bit) 203 * HSTATE.SCRATCH0 = guest R12 204 * HSTATE.SCRATCH1 = guest CR 205 */ 206 207 /* Save registers */ 208 209 PPC_STL r0, SVCPU_R0(r13) 210 PPC_STL r1, SVCPU_R1(r13) 211 PPC_STL r2, SVCPU_R2(r13) 212 PPC_STL r3, SVCPU_R3(r13) 213 PPC_STL r4, SVCPU_R4(r13) 214 PPC_STL r5, SVCPU_R5(r13) 215 PPC_STL r6, SVCPU_R6(r13) 216 PPC_STL r7, SVCPU_R7(r13) 217 PPC_STL r8, SVCPU_R8(r13) 218 PPC_STL r9, SVCPU_R9(r13) 219 PPC_STL r10, SVCPU_R10(r13) 220 PPC_STL r11, SVCPU_R11(r13) 221 222 /* Restore R1/R2 so we can handle faults */ 223 PPC_LL r1, HSTATE_HOST_R1(r13) 224 PPC_LL r2, HSTATE_HOST_R2(r13) 225 226 /* Save guest PC and MSR */ 227#ifdef CONFIG_PPC64 228BEGIN_FTR_SECTION 229 andi. r0, r12, 0x2 230 cmpwi cr1, r0, 0 231 beq 1f 232 mfspr r3,SPRN_HSRR0 233 mfspr r4,SPRN_HSRR1 234 andi. r12,r12,0x3ffd 235 b 2f 236END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 237#endif 2381: mfsrr0 r3 239 mfsrr1 r4 2402: 241 PPC_STL r3, SVCPU_PC(r13) 242 PPC_STL r4, SVCPU_SHADOW_SRR1(r13) 243 244 /* Get scratch'ed off registers */ 245 GET_SCRATCH0(r9) 246 PPC_LL r8, HSTATE_SCRATCH0(r13) 247 lwz r7, HSTATE_SCRATCH1(r13) 248 249 PPC_STL r9, SVCPU_R13(r13) 250 PPC_STL r8, SVCPU_R12(r13) 251 stw r7, SVCPU_CR(r13) 252 253 /* Save more register state */ 254 255 mfxer r5 256 mfdar r6 257 mfdsisr r7 258 mfctr r8 259 mflr r9 260 261 PPC_STL r5, SVCPU_XER(r13) 262 PPC_STL r6, SVCPU_FAULT_DAR(r13) 263 stw r7, SVCPU_FAULT_DSISR(r13) 264 PPC_STL r8, SVCPU_CTR(r13) 265 PPC_STL r9, SVCPU_LR(r13) 266 267 /* 268 * In order for us to easily get the last instruction, 269 * we got the #vmexit at, we exploit the fact that the 270 * virtual layout is still the same here, so we can just 271 * ld from the guest's PC address 272 */ 273 274 /* We only load the last instruction when it's safe */ 275 cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE 276 beq ld_last_inst 277 cmpwi r12, BOOK3S_INTERRUPT_PROGRAM 278 beq ld_last_inst 279 cmpwi r12, BOOK3S_INTERRUPT_SYSCALL 280 beq ld_last_prev_inst 281 cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT 282 beq- ld_last_inst 283#ifdef CONFIG_PPC64 284BEGIN_FTR_SECTION 285 cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST 286 beq- ld_last_inst 287END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 288BEGIN_FTR_SECTION 289 cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL 290 beq- ld_last_inst 291END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 292#endif 293 294 b no_ld_last_inst 295 296ld_last_prev_inst: 297 addi r3, r3, -4 298 299ld_last_inst: 300 /* Save off the guest instruction we're at */ 301 302 /* In case lwz faults */ 303 li r0, KVM_INST_FETCH_FAILED 304 305#ifdef USE_QUICK_LAST_INST 306 307 /* Set guest mode to 'jump over instruction' so if lwz faults 308 * we'll just continue at the next IP. */ 309 li r9, KVM_GUEST_MODE_SKIP 310 stb r9, HSTATE_IN_GUEST(r13) 311 312 /* 1) enable paging for data */ 313 mfmsr r9 314 ori r11, r9, MSR_DR /* Enable paging for data */ 315 mtmsr r11 316 sync 317 /* 2) fetch the instruction */ 318 lwz r0, 0(r3) 319 /* 3) disable paging again */ 320 mtmsr r9 321 sync 322 323#endif 324 stw r0, SVCPU_LAST_INST(r13) 325 326no_ld_last_inst: 327 328 /* Unset guest mode */ 329 li r9, KVM_GUEST_MODE_NONE 330 stb r9, HSTATE_IN_GUEST(r13) 331 332 /* Switch back to host MMU */ 333 LOAD_HOST_SEGMENTS 334 335#ifdef CONFIG_PPC_BOOK3S_64 336 337 lbz r5, HSTATE_RESTORE_HID5(r13) 338 cmpwi r5, 0 339 beq no_dcbz32_off 340 341 li r4, 0 342 mfspr r5,SPRN_HID5 343 rldimi r5,r4,6,56 344 mtspr SPRN_HID5,r5 345 346no_dcbz32_off: 347 348BEGIN_FTR_SECTION 349 /* Save guest FSCR on a FAC_UNAVAIL interrupt */ 350 cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL 351 bne+ no_fscr_save 352 mfspr r7, SPRN_FSCR 353 std r7, SVCPU_SHADOW_FSCR(r13) 354no_fscr_save: 355 /* Restore host FSCR */ 356 ld r8, HSTATE_HOST_FSCR(r13) 357 mtspr SPRN_FSCR, r8 358END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 359 360#endif /* CONFIG_PPC_BOOK3S_64 */ 361 362 /* 363 * For some interrupts, we need to call the real Linux 364 * handler, so it can do work for us. This has to happen 365 * as if the interrupt arrived from the kernel though, 366 * so let's fake it here where most state is restored. 367 * 368 * Having set up SRR0/1 with the address where we want 369 * to continue with relocation on (potentially in module 370 * space), we either just go straight there with rfi[d], 371 * or we jump to an interrupt handler if there is an 372 * interrupt to be handled first. In the latter case, 373 * the rfi[d] at the end of the interrupt handler will 374 * get us back to where we want to continue. 375 */ 376 377 /* Register usage at this point: 378 * 379 * R1 = host R1 380 * R2 = host R2 381 * R10 = raw exit handler id 382 * R12 = exit handler id 383 * R13 = shadow vcpu (32-bit) or PACA (64-bit) 384 * SVCPU.* = guest * 385 * 386 */ 387 388 PPC_LL r6, HSTATE_HOST_MSR(r13) 389#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 390 /* 391 * We don't want to change MSR[TS] bits via rfi here. 392 * The actual TM handling logic will be in host with 393 * recovered DR/IR bits after HSTATE_VMHANDLER. 394 * And MSR_TM can be enabled in HOST_MSR so rfid may 395 * not suppress this change and can lead to exception. 396 * Manually set MSR to prevent TS state change here. 397 */ 398 mfmsr r7 399 rldicl r7, r7, 64 - MSR_TS_S_LG, 62 400 rldimi r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG 401#endif 402 PPC_LL r8, HSTATE_VMHANDLER(r13) 403 404#ifdef CONFIG_PPC64 405BEGIN_FTR_SECTION 406 beq cr1, 1f 407 mtspr SPRN_HSRR1, r6 408 mtspr SPRN_HSRR0, r8 409END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 410#endif 4111: /* Restore host msr -> SRR1 */ 412 mtsrr1 r6 413 /* Load highmem handler address */ 414 mtsrr0 r8 415 416 /* RFI into the highmem handler, or jump to interrupt handler */ 417 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 418 beqa BOOK3S_INTERRUPT_EXTERNAL 419 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER 420 beqa BOOK3S_INTERRUPT_DECREMENTER 421 cmpwi r12, BOOK3S_INTERRUPT_PERFMON 422 beqa BOOK3S_INTERRUPT_PERFMON 423 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL 424 beqa BOOK3S_INTERRUPT_DOORBELL 425 426 RFI_TO_KERNEL 427kvmppc_handler_trampoline_exit_end: 428