1d94d71cbSThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */ 207372794SAlexander Graf/* 307372794SAlexander Graf * 407372794SAlexander Graf * Copyright SUSE Linux Products GmbH 2010 507372794SAlexander Graf * 607372794SAlexander Graf * Authors: Alexander Graf <agraf@suse.de> 707372794SAlexander Graf */ 807372794SAlexander Graf 907372794SAlexander Graf/* Real mode helpers */ 1007372794SAlexander Graf 11ec0c464cSChristophe Leroy#include <asm/asm-compat.h> 122c86cd18SChristophe Leroy#include <asm/feature-fixups.h> 13ec0c464cSChristophe Leroy 1407372794SAlexander Graf#if defined(CONFIG_PPC_BOOK3S_64) 1507372794SAlexander Graf 1607372794SAlexander Graf#define GET_SHADOW_VCPU(reg) \ 173c42bf8aSPaul Mackerras mr reg, r13 1807372794SAlexander Graf 1907372794SAlexander Graf#elif defined(CONFIG_PPC_BOOK3S_32) 2007372794SAlexander Graf 2107372794SAlexander Graf#define GET_SHADOW_VCPU(reg) \ 2207372794SAlexander Graf tophys(reg, r2); \ 2307372794SAlexander Graf lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ 2407372794SAlexander Graf tophys(reg, reg) 2507372794SAlexander Graf 2607372794SAlexander Graf#endif 2707372794SAlexander Graf 2807372794SAlexander Graf/* Disable for nested KVM */ 2907372794SAlexander Graf#define USE_QUICK_LAST_INST 3007372794SAlexander Graf 3107372794SAlexander Graf 3207372794SAlexander Graf/* Get helper functions for subarch specific functionality */ 3307372794SAlexander Graf 3407372794SAlexander Graf#if defined(CONFIG_PPC_BOOK3S_64) 3507372794SAlexander Graf#include "book3s_64_slb.S" 3607372794SAlexander Graf#elif defined(CONFIG_PPC_BOOK3S_32) 3707372794SAlexander Graf#include "book3s_32_sr.S" 3807372794SAlexander Graf#endif 3907372794SAlexander Graf 4007372794SAlexander Graf/****************************************************************************** 4107372794SAlexander Graf * * 4207372794SAlexander Graf * Entry code * 4307372794SAlexander Graf * * 4407372794SAlexander Graf *****************************************************************************/ 4507372794SAlexander Graf 4607372794SAlexander Graf.global kvmppc_handler_trampoline_enter 4707372794SAlexander Grafkvmppc_handler_trampoline_enter: 4807372794SAlexander Graf 4907372794SAlexander Graf /* Required state: 5007372794SAlexander Graf * 5107372794SAlexander Graf * MSR = ~IR|DR 5207372794SAlexander Graf * R1 = host R1 5307372794SAlexander Graf * R2 = host R2 5402143947SPaul Mackerras * R4 = guest shadow MSR 5502143947SPaul Mackerras * R5 = normal host MSR 5602143947SPaul Mackerras * R6 = current host MSR (EE, IR, DR off) 5702143947SPaul Mackerras * LR = highmem guest exit code 5807372794SAlexander Graf * all other volatile GPRS = free 5907372794SAlexander Graf * SVCPU[CR] = guest CR 6007372794SAlexander Graf * SVCPU[XER] = guest XER 6107372794SAlexander Graf * SVCPU[CTR] = guest CTR 6207372794SAlexander Graf * SVCPU[LR] = guest LR 6307372794SAlexander Graf */ 6407372794SAlexander Graf 6507372794SAlexander Graf /* r3 = shadow vcpu */ 6607372794SAlexander Graf GET_SHADOW_VCPU(r3) 6707372794SAlexander Graf 6802143947SPaul Mackerras /* Save guest exit handler address and MSR */ 6902143947SPaul Mackerras mflr r0 7002143947SPaul Mackerras PPC_STL r0, HSTATE_VMHANDLER(r3) 7102143947SPaul Mackerras PPC_STL r5, HSTATE_HOST_MSR(r3) 7202143947SPaul Mackerras 733c42bf8aSPaul Mackerras /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */ 743c42bf8aSPaul Mackerras PPC_STL r1, HSTATE_HOST_R1(r3) 753c42bf8aSPaul Mackerras PPC_STL r2, HSTATE_HOST_R2(r3) 763c42bf8aSPaul Mackerras 7707372794SAlexander Graf /* Activate guest mode, so faults get handled by KVM */ 7807372794SAlexander Graf li r11, KVM_GUEST_MODE_GUEST 793c42bf8aSPaul Mackerras stb r11, HSTATE_IN_GUEST(r3) 8007372794SAlexander Graf 8107372794SAlexander Graf /* Switch to guest segment. This is subarch specific. */ 8207372794SAlexander Graf LOAD_GUEST_SEGMENTS 8307372794SAlexander Graf 8402143947SPaul Mackerras#ifdef CONFIG_PPC_BOOK3S_64 85616dff86SAlexander GrafBEGIN_FTR_SECTION 86616dff86SAlexander Graf /* Save host FSCR */ 87616dff86SAlexander Graf mfspr r8, SPRN_FSCR 88616dff86SAlexander Graf std r8, HSTATE_HOST_FSCR(r13) 89616dff86SAlexander Graf /* Set FSCR during guest execution */ 90616dff86SAlexander Graf ld r9, SVCPU_SHADOW_FSCR(r13) 91616dff86SAlexander Graf mtspr SPRN_FSCR, r9 92616dff86SAlexander GrafEND_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 93616dff86SAlexander Graf 9402143947SPaul Mackerras /* Some guests may need to have dcbz set to 32 byte length. 9502143947SPaul Mackerras * 9602143947SPaul Mackerras * Usually we ensure that by patching the guest's instructions 9702143947SPaul Mackerras * to trap on dcbz and emulate it in the hypervisor. 9802143947SPaul Mackerras * 9902143947SPaul Mackerras * If we can, we should tell the CPU to use 32 byte dcbz though, 10002143947SPaul Mackerras * because that's a lot faster. 10102143947SPaul Mackerras */ 10202143947SPaul Mackerras lbz r0, HSTATE_RESTORE_HID5(r3) 10302143947SPaul Mackerras cmpwi r0, 0 10402143947SPaul Mackerras beq no_dcbz32_on 10502143947SPaul Mackerras 10602143947SPaul Mackerras mfspr r0,SPRN_HID5 10702143947SPaul Mackerras ori r0, r0, 0x80 /* XXX HID5_dcbz32 = 0x80 */ 10802143947SPaul Mackerras mtspr SPRN_HID5,r0 10902143947SPaul Mackerrasno_dcbz32_on: 11002143947SPaul Mackerras 11102143947SPaul Mackerras#endif /* CONFIG_PPC_BOOK3S_64 */ 11202143947SPaul Mackerras 11307372794SAlexander Graf /* Enter guest */ 11407372794SAlexander Graf 11502143947SPaul Mackerras PPC_LL r8, SVCPU_CTR(r3) 11602143947SPaul Mackerras PPC_LL r9, SVCPU_LR(r3) 11702143947SPaul Mackerras lwz r10, SVCPU_CR(r3) 118c63517c2SSam bobroff PPC_LL r11, SVCPU_XER(r3) 11907372794SAlexander Graf 12002143947SPaul Mackerras mtctr r8 12102143947SPaul Mackerras mtlr r9 12202143947SPaul Mackerras mtcr r10 12302143947SPaul Mackerras mtxer r11 12402143947SPaul Mackerras 12502143947SPaul Mackerras /* Move SRR0 and SRR1 into the respective regs */ 12602143947SPaul Mackerras PPC_LL r9, SVCPU_PC(r3) 12702143947SPaul Mackerras /* First clear RI in our current MSR value */ 12802143947SPaul Mackerras li r0, MSR_RI 12902143947SPaul Mackerras andc r6, r6, r0 13007372794SAlexander Graf 131de56a948SPaul Mackerras PPC_LL r0, SVCPU_R0(r3) 132de56a948SPaul Mackerras PPC_LL r1, SVCPU_R1(r3) 133de56a948SPaul Mackerras PPC_LL r2, SVCPU_R2(r3) 134de56a948SPaul Mackerras PPC_LL r5, SVCPU_R5(r3) 135de56a948SPaul Mackerras PPC_LL r7, SVCPU_R7(r3) 136de56a948SPaul Mackerras PPC_LL r8, SVCPU_R8(r3) 137de56a948SPaul Mackerras PPC_LL r10, SVCPU_R10(r3) 138de56a948SPaul Mackerras PPC_LL r11, SVCPU_R11(r3) 139de56a948SPaul Mackerras PPC_LL r12, SVCPU_R12(r3) 140de56a948SPaul Mackerras PPC_LL r13, SVCPU_R13(r3) 14107372794SAlexander Graf 1428c2d0be7SAlexander Graf MTMSR_EERI(r6) 1438c2d0be7SAlexander Graf mtsrr0 r9 1448c2d0be7SAlexander Graf mtsrr1 r4 1458c2d0be7SAlexander Graf 1468c2d0be7SAlexander Graf PPC_LL r4, SVCPU_R4(r3) 1478c2d0be7SAlexander Graf PPC_LL r6, SVCPU_R6(r3) 1488c2d0be7SAlexander Graf PPC_LL r9, SVCPU_R9(r3) 14907372794SAlexander Graf PPC_LL r3, (SVCPU_R3)(r3) 15007372794SAlexander Graf 151222f20f1SNicholas Piggin RFI_TO_GUEST 15207372794SAlexander Grafkvmppc_handler_trampoline_enter_end: 15307372794SAlexander Graf 15407372794SAlexander Graf 15507372794SAlexander Graf 15607372794SAlexander Graf/****************************************************************************** 15707372794SAlexander Graf * * 15807372794SAlexander Graf * Exit code * 15907372794SAlexander Graf * * 16007372794SAlexander Graf *****************************************************************************/ 16107372794SAlexander Graf 162dd96b2c2SAneesh Kumar K.V.global kvmppc_interrupt_pr 163dd96b2c2SAneesh Kumar K.Vkvmppc_interrupt_pr: 164d3918e7fSNicholas Piggin /* 64-bit entry. Register usage at this point: 165d3918e7fSNicholas Piggin * 166d3918e7fSNicholas Piggin * SPRG_SCRATCH0 = guest R13 167*1b5821c6SNicholas Piggin * R9 = HSTATE_IN_GUEST 168d3918e7fSNicholas Piggin * R12 = (guest CR << 32) | exit handler id 169d3918e7fSNicholas Piggin * R13 = PACA 170d3918e7fSNicholas Piggin * HSTATE.SCRATCH0 = guest R12 171*1b5821c6SNicholas Piggin * HSTATE.SCRATCH2 = guest R9 172d3918e7fSNicholas Piggin */ 173d3918e7fSNicholas Piggin#ifdef CONFIG_PPC64 174d3918e7fSNicholas Piggin /* Match 32-bit entry */ 175*1b5821c6SNicholas Piggin ld r9,HSTATE_SCRATCH2(r13) 176d3918e7fSNicholas Piggin rotldi r12, r12, 32 /* Flip R12 halves for stw */ 177d3918e7fSNicholas Piggin stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */ 178d3918e7fSNicholas Piggin srdi r12, r12, 32 /* shift trap into low half */ 179d3918e7fSNicholas Piggin#endif 180b01c8b54SPaul Mackerras 181d3918e7fSNicholas Piggin.global kvmppc_handler_trampoline_exit 182d3918e7fSNicholas Pigginkvmppc_handler_trampoline_exit: 18307372794SAlexander Graf /* Register usage at this point: 18407372794SAlexander Graf * 18507372794SAlexander Graf * SPRG_SCRATCH0 = guest R13 18607372794SAlexander Graf * R12 = exit handler id 1873c42bf8aSPaul Mackerras * R13 = shadow vcpu (32-bit) or PACA (64-bit) 1883c42bf8aSPaul Mackerras * HSTATE.SCRATCH0 = guest R12 1893c42bf8aSPaul Mackerras * HSTATE.SCRATCH1 = guest CR 19007372794SAlexander Graf */ 19107372794SAlexander Graf 19207372794SAlexander Graf /* Save registers */ 19307372794SAlexander Graf 1943c42bf8aSPaul Mackerras PPC_STL r0, SVCPU_R0(r13) 1953c42bf8aSPaul Mackerras PPC_STL r1, SVCPU_R1(r13) 1963c42bf8aSPaul Mackerras PPC_STL r2, SVCPU_R2(r13) 1973c42bf8aSPaul Mackerras PPC_STL r3, SVCPU_R3(r13) 1983c42bf8aSPaul Mackerras PPC_STL r4, SVCPU_R4(r13) 1993c42bf8aSPaul Mackerras PPC_STL r5, SVCPU_R5(r13) 2003c42bf8aSPaul Mackerras PPC_STL r6, SVCPU_R6(r13) 2013c42bf8aSPaul Mackerras PPC_STL r7, SVCPU_R7(r13) 2023c42bf8aSPaul Mackerras PPC_STL r8, SVCPU_R8(r13) 2033c42bf8aSPaul Mackerras PPC_STL r9, SVCPU_R9(r13) 2043c42bf8aSPaul Mackerras PPC_STL r10, SVCPU_R10(r13) 2053c42bf8aSPaul Mackerras PPC_STL r11, SVCPU_R11(r13) 20607372794SAlexander Graf 20707372794SAlexander Graf /* Restore R1/R2 so we can handle faults */ 2083c42bf8aSPaul Mackerras PPC_LL r1, HSTATE_HOST_R1(r13) 2093c42bf8aSPaul Mackerras PPC_LL r2, HSTATE_HOST_R2(r13) 21007372794SAlexander Graf 21107372794SAlexander Graf /* Save guest PC and MSR */ 212b01c8b54SPaul Mackerras#ifdef CONFIG_PPC64 213b01c8b54SPaul MackerrasBEGIN_FTR_SECTION 214a5d4f3adSBenjamin Herrenschmidt andi. r0, r12, 0x2 21532c7dbfdSAlexander Graf cmpwi cr1, r0, 0 216a5d4f3adSBenjamin Herrenschmidt beq 1f 217a5d4f3adSBenjamin Herrenschmidt mfspr r3,SPRN_HSRR0 218a5d4f3adSBenjamin Herrenschmidt mfspr r4,SPRN_HSRR1 219a5d4f3adSBenjamin Herrenschmidt andi. r12,r12,0x3ffd 220a5d4f3adSBenjamin Herrenschmidt b 2f 221969391c5SPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 222b01c8b54SPaul Mackerras#endif 223a5d4f3adSBenjamin Herrenschmidt1: mfsrr0 r3 22407372794SAlexander Graf mfsrr1 r4 225a5d4f3adSBenjamin Herrenschmidt2: 2263c42bf8aSPaul Mackerras PPC_STL r3, SVCPU_PC(r13) 2273c42bf8aSPaul Mackerras PPC_STL r4, SVCPU_SHADOW_SRR1(r13) 22807372794SAlexander Graf 22907372794SAlexander Graf /* Get scratch'ed off registers */ 230673b189aSPaul Mackerras GET_SCRATCH0(r9) 2313c42bf8aSPaul Mackerras PPC_LL r8, HSTATE_SCRATCH0(r13) 2323c42bf8aSPaul Mackerras lwz r7, HSTATE_SCRATCH1(r13) 23307372794SAlexander Graf 2343c42bf8aSPaul Mackerras PPC_STL r9, SVCPU_R13(r13) 2353c42bf8aSPaul Mackerras PPC_STL r8, SVCPU_R12(r13) 2363c42bf8aSPaul Mackerras stw r7, SVCPU_CR(r13) 23707372794SAlexander Graf 23807372794SAlexander Graf /* Save more register state */ 23907372794SAlexander Graf 24007372794SAlexander Graf mfxer r5 24107372794SAlexander Graf mfdar r6 24207372794SAlexander Graf mfdsisr r7 24307372794SAlexander Graf mfctr r8 24407372794SAlexander Graf mflr r9 24507372794SAlexander Graf 246c63517c2SSam bobroff PPC_STL r5, SVCPU_XER(r13) 2473c42bf8aSPaul Mackerras PPC_STL r6, SVCPU_FAULT_DAR(r13) 2483c42bf8aSPaul Mackerras stw r7, SVCPU_FAULT_DSISR(r13) 2493c42bf8aSPaul Mackerras PPC_STL r8, SVCPU_CTR(r13) 2503c42bf8aSPaul Mackerras PPC_STL r9, SVCPU_LR(r13) 25107372794SAlexander Graf 25207372794SAlexander Graf /* 25307372794SAlexander Graf * In order for us to easily get the last instruction, 25407372794SAlexander Graf * we got the #vmexit at, we exploit the fact that the 25507372794SAlexander Graf * virtual layout is still the same here, so we can just 25607372794SAlexander Graf * ld from the guest's PC address 25707372794SAlexander Graf */ 25807372794SAlexander Graf 25907372794SAlexander Graf /* We only load the last instruction when it's safe */ 26007372794SAlexander Graf cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE 26107372794SAlexander Graf beq ld_last_inst 26207372794SAlexander Graf cmpwi r12, BOOK3S_INTERRUPT_PROGRAM 26307372794SAlexander Graf beq ld_last_inst 26477e675adSAlexander Graf cmpwi r12, BOOK3S_INTERRUPT_SYSCALL 26577e675adSAlexander Graf beq ld_last_prev_inst 2666fc55825SAlexander Graf cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT 2676fc55825SAlexander Graf beq- ld_last_inst 2687ef4e985SAlexander Graf#ifdef CONFIG_PPC64 2697ef4e985SAlexander GrafBEGIN_FTR_SECTION 2707ef4e985SAlexander Graf cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST 2717ef4e985SAlexander Graf beq- ld_last_inst 2727ef4e985SAlexander GrafEND_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 273616dff86SAlexander GrafBEGIN_FTR_SECTION 274616dff86SAlexander Graf cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL 275616dff86SAlexander Graf beq- ld_last_inst 276616dff86SAlexander GrafEND_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2777ef4e985SAlexander Graf#endif 27807372794SAlexander Graf 27907372794SAlexander Graf b no_ld_last_inst 28007372794SAlexander Graf 28177e675adSAlexander Grafld_last_prev_inst: 28277e675adSAlexander Graf addi r3, r3, -4 28377e675adSAlexander Graf 28407372794SAlexander Grafld_last_inst: 28507372794SAlexander Graf /* Save off the guest instruction we're at */ 28607372794SAlexander Graf 28707372794SAlexander Graf /* In case lwz faults */ 28807372794SAlexander Graf li r0, KVM_INST_FETCH_FAILED 28907372794SAlexander Graf 29007372794SAlexander Graf#ifdef USE_QUICK_LAST_INST 29107372794SAlexander Graf 29207372794SAlexander Graf /* Set guest mode to 'jump over instruction' so if lwz faults 29307372794SAlexander Graf * we'll just continue at the next IP. */ 29407372794SAlexander Graf li r9, KVM_GUEST_MODE_SKIP 2953c42bf8aSPaul Mackerras stb r9, HSTATE_IN_GUEST(r13) 29607372794SAlexander Graf 29707372794SAlexander Graf /* 1) enable paging for data */ 29807372794SAlexander Graf mfmsr r9 29907372794SAlexander Graf ori r11, r9, MSR_DR /* Enable paging for data */ 30007372794SAlexander Graf mtmsr r11 30107372794SAlexander Graf sync 30207372794SAlexander Graf /* 2) fetch the instruction */ 30307372794SAlexander Graf lwz r0, 0(r3) 30407372794SAlexander Graf /* 3) disable paging again */ 30507372794SAlexander Graf mtmsr r9 30607372794SAlexander Graf sync 30707372794SAlexander Graf 30807372794SAlexander Graf#endif 3093c42bf8aSPaul Mackerras stw r0, SVCPU_LAST_INST(r13) 31007372794SAlexander Graf 31107372794SAlexander Grafno_ld_last_inst: 31207372794SAlexander Graf 31307372794SAlexander Graf /* Unset guest mode */ 31407372794SAlexander Graf li r9, KVM_GUEST_MODE_NONE 3153c42bf8aSPaul Mackerras stb r9, HSTATE_IN_GUEST(r13) 31607372794SAlexander Graf 31707372794SAlexander Graf /* Switch back to host MMU */ 31807372794SAlexander Graf LOAD_HOST_SEGMENTS 31907372794SAlexander Graf 32002143947SPaul Mackerras#ifdef CONFIG_PPC_BOOK3S_64 32102143947SPaul Mackerras 32202143947SPaul Mackerras lbz r5, HSTATE_RESTORE_HID5(r13) 32302143947SPaul Mackerras cmpwi r5, 0 32402143947SPaul Mackerras beq no_dcbz32_off 32502143947SPaul Mackerras 32602143947SPaul Mackerras li r4, 0 32702143947SPaul Mackerras mfspr r5,SPRN_HID5 32802143947SPaul Mackerras rldimi r5,r4,6,56 32902143947SPaul Mackerras mtspr SPRN_HID5,r5 33002143947SPaul Mackerras 33102143947SPaul Mackerrasno_dcbz32_off: 33202143947SPaul Mackerras 333616dff86SAlexander GrafBEGIN_FTR_SECTION 334616dff86SAlexander Graf /* Save guest FSCR on a FAC_UNAVAIL interrupt */ 335616dff86SAlexander Graf cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL 336616dff86SAlexander Graf bne+ no_fscr_save 337616dff86SAlexander Graf mfspr r7, SPRN_FSCR 338616dff86SAlexander Graf std r7, SVCPU_SHADOW_FSCR(r13) 339616dff86SAlexander Grafno_fscr_save: 340616dff86SAlexander Graf /* Restore host FSCR */ 341616dff86SAlexander Graf ld r8, HSTATE_HOST_FSCR(r13) 342616dff86SAlexander Graf mtspr SPRN_FSCR, r8 343616dff86SAlexander GrafEND_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 344616dff86SAlexander Graf 34502143947SPaul Mackerras#endif /* CONFIG_PPC_BOOK3S_64 */ 34602143947SPaul Mackerras 34702143947SPaul Mackerras /* 34802143947SPaul Mackerras * For some interrupts, we need to call the real Linux 34902143947SPaul Mackerras * handler, so it can do work for us. This has to happen 35002143947SPaul Mackerras * as if the interrupt arrived from the kernel though, 35102143947SPaul Mackerras * so let's fake it here where most state is restored. 35202143947SPaul Mackerras * 35302143947SPaul Mackerras * Having set up SRR0/1 with the address where we want 35402143947SPaul Mackerras * to continue with relocation on (potentially in module 35502143947SPaul Mackerras * space), we either just go straight there with rfi[d], 35656e13dbaSAlexander Graf * or we jump to an interrupt handler if there is an 35756e13dbaSAlexander Graf * interrupt to be handled first. In the latter case, 35856e13dbaSAlexander Graf * the rfi[d] at the end of the interrupt handler will 35956e13dbaSAlexander Graf * get us back to where we want to continue. 36002143947SPaul Mackerras */ 36102143947SPaul Mackerras 36207372794SAlexander Graf /* Register usage at this point: 36307372794SAlexander Graf * 36407372794SAlexander Graf * R1 = host R1 36507372794SAlexander Graf * R2 = host R2 36656e13dbaSAlexander Graf * R10 = raw exit handler id 36707372794SAlexander Graf * R12 = exit handler id 3683c42bf8aSPaul Mackerras * R13 = shadow vcpu (32-bit) or PACA (64-bit) 36907372794SAlexander Graf * SVCPU.* = guest * 37007372794SAlexander Graf * 37107372794SAlexander Graf */ 37207372794SAlexander Graf 37302143947SPaul Mackerras PPC_LL r6, HSTATE_HOST_MSR(r13) 37436383a08SSimon Guo#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 37536383a08SSimon Guo /* 37636383a08SSimon Guo * We don't want to change MSR[TS] bits via rfi here. 37736383a08SSimon Guo * The actual TM handling logic will be in host with 37836383a08SSimon Guo * recovered DR/IR bits after HSTATE_VMHANDLER. 37936383a08SSimon Guo * And MSR_TM can be enabled in HOST_MSR so rfid may 38036383a08SSimon Guo * not suppress this change and can lead to exception. 38136383a08SSimon Guo * Manually set MSR to prevent TS state change here. 38236383a08SSimon Guo */ 38336383a08SSimon Guo mfmsr r7 38436383a08SSimon Guo rldicl r7, r7, 64 - MSR_TS_S_LG, 62 38536383a08SSimon Guo rldimi r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG 38636383a08SSimon Guo#endif 3873c42bf8aSPaul Mackerras PPC_LL r8, HSTATE_VMHANDLER(r13) 38802143947SPaul Mackerras 38956e13dbaSAlexander Graf#ifdef CONFIG_PPC64 39056e13dbaSAlexander GrafBEGIN_FTR_SECTION 39132c7dbfdSAlexander Graf beq cr1, 1f 39256e13dbaSAlexander Graf mtspr SPRN_HSRR1, r6 39356e13dbaSAlexander Graf mtspr SPRN_HSRR0, r8 39456e13dbaSAlexander GrafEND_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 39556e13dbaSAlexander Graf#endif 39656e13dbaSAlexander Graf1: /* Restore host msr -> SRR1 */ 39702143947SPaul Mackerras mtsrr1 r6 39802143947SPaul Mackerras /* Load highmem handler address */ 39907372794SAlexander Graf mtsrr0 r8 40007372794SAlexander Graf 40102143947SPaul Mackerras /* RFI into the highmem handler, or jump to interrupt handler */ 40256e13dbaSAlexander Graf cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 40356e13dbaSAlexander Graf beqa BOOK3S_INTERRUPT_EXTERNAL 40456e13dbaSAlexander Graf cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER 40556e13dbaSAlexander Graf beqa BOOK3S_INTERRUPT_DECREMENTER 40656e13dbaSAlexander Graf cmpwi r12, BOOK3S_INTERRUPT_PERFMON 40756e13dbaSAlexander Graf beqa BOOK3S_INTERRUPT_PERFMON 40840688909SPaul Mackerras cmpwi r12, BOOK3S_INTERRUPT_DOORBELL 40940688909SPaul Mackerras beqa BOOK3S_INTERRUPT_DOORBELL 41056e13dbaSAlexander Graf 411222f20f1SNicholas Piggin RFI_TO_KERNEL 41207372794SAlexander Grafkvmppc_handler_trampoline_exit_end: 413