1*8a05fd9aSRichard Henderson /* 2*8a05fd9aSRichard Henderson * PowerPC emulation special registers manipulation helpers for qemu. 3*8a05fd9aSRichard Henderson * 4*8a05fd9aSRichard Henderson * Copyright (c) 2003-2007 Jocelyn Mayer 5*8a05fd9aSRichard Henderson * 6*8a05fd9aSRichard Henderson * This library is free software; you can redistribute it and/or 7*8a05fd9aSRichard Henderson * modify it under the terms of the GNU Lesser General Public 8*8a05fd9aSRichard Henderson * License as published by the Free Software Foundation; either 9*8a05fd9aSRichard Henderson * version 2.1 of the License, or (at your option) any later version. 10*8a05fd9aSRichard Henderson * 11*8a05fd9aSRichard Henderson * This library is distributed in the hope that it will be useful, 12*8a05fd9aSRichard Henderson * but WITHOUT ANY WARRANTY; without even the implied warranty of 13*8a05fd9aSRichard Henderson * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14*8a05fd9aSRichard Henderson * Lesser General Public License for more details. 15*8a05fd9aSRichard Henderson * 16*8a05fd9aSRichard Henderson * You should have received a copy of the GNU Lesser General Public 17*8a05fd9aSRichard Henderson * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18*8a05fd9aSRichard Henderson */ 19*8a05fd9aSRichard Henderson 20*8a05fd9aSRichard Henderson #include "qemu/osdep.h" 21*8a05fd9aSRichard Henderson #include "qemu/main-loop.h" 22*8a05fd9aSRichard Henderson #include "exec/exec-all.h" 23*8a05fd9aSRichard Henderson #include "sysemu/kvm.h" 24*8a05fd9aSRichard Henderson #include "helper_regs.h" 25*8a05fd9aSRichard Henderson 26*8a05fd9aSRichard Henderson /* Swap temporary saved registers with GPRs */ 27*8a05fd9aSRichard Henderson void hreg_swap_gpr_tgpr(CPUPPCState *env) 28*8a05fd9aSRichard Henderson { 29*8a05fd9aSRichard Henderson target_ulong tmp; 30*8a05fd9aSRichard Henderson 31*8a05fd9aSRichard Henderson tmp = env->gpr[0]; 32*8a05fd9aSRichard Henderson env->gpr[0] = env->tgpr[0]; 33*8a05fd9aSRichard Henderson env->tgpr[0] = tmp; 34*8a05fd9aSRichard Henderson tmp = env->gpr[1]; 35*8a05fd9aSRichard Henderson env->gpr[1] = env->tgpr[1]; 36*8a05fd9aSRichard Henderson env->tgpr[1] = tmp; 37*8a05fd9aSRichard Henderson tmp = env->gpr[2]; 38*8a05fd9aSRichard Henderson env->gpr[2] = env->tgpr[2]; 39*8a05fd9aSRichard Henderson env->tgpr[2] = tmp; 40*8a05fd9aSRichard Henderson tmp = env->gpr[3]; 41*8a05fd9aSRichard Henderson env->gpr[3] = env->tgpr[3]; 42*8a05fd9aSRichard Henderson env->tgpr[3] = tmp; 43*8a05fd9aSRichard Henderson } 44*8a05fd9aSRichard Henderson 45*8a05fd9aSRichard Henderson void hreg_compute_mem_idx(CPUPPCState *env) 46*8a05fd9aSRichard Henderson { 47*8a05fd9aSRichard Henderson /* 48*8a05fd9aSRichard Henderson * This is our encoding for server processors. The architecture 49*8a05fd9aSRichard Henderson * specifies that there is no such thing as userspace with 50*8a05fd9aSRichard Henderson * translation off, however it appears that MacOS does it and some 51*8a05fd9aSRichard Henderson * 32-bit CPUs support it. Weird... 52*8a05fd9aSRichard Henderson * 53*8a05fd9aSRichard Henderson * 0 = Guest User space virtual mode 54*8a05fd9aSRichard Henderson * 1 = Guest Kernel space virtual mode 55*8a05fd9aSRichard Henderson * 2 = Guest User space real mode 56*8a05fd9aSRichard Henderson * 3 = Guest Kernel space real mode 57*8a05fd9aSRichard Henderson * 4 = HV User space virtual mode 58*8a05fd9aSRichard Henderson * 5 = HV Kernel space virtual mode 59*8a05fd9aSRichard Henderson * 6 = HV User space real mode 60*8a05fd9aSRichard Henderson * 7 = HV Kernel space real mode 61*8a05fd9aSRichard Henderson * 62*8a05fd9aSRichard Henderson * For BookE, we need 8 MMU modes as follow: 63*8a05fd9aSRichard Henderson * 64*8a05fd9aSRichard Henderson * 0 = AS 0 HV User space 65*8a05fd9aSRichard Henderson * 1 = AS 0 HV Kernel space 66*8a05fd9aSRichard Henderson * 2 = AS 1 HV User space 67*8a05fd9aSRichard Henderson * 3 = AS 1 HV Kernel space 68*8a05fd9aSRichard Henderson * 4 = AS 0 Guest User space 69*8a05fd9aSRichard Henderson * 5 = AS 0 Guest Kernel space 70*8a05fd9aSRichard Henderson * 6 = AS 1 Guest User space 71*8a05fd9aSRichard Henderson * 7 = AS 1 Guest Kernel space 72*8a05fd9aSRichard Henderson */ 73*8a05fd9aSRichard Henderson if (env->mmu_model & POWERPC_MMU_BOOKE) { 74*8a05fd9aSRichard Henderson env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1; 75*8a05fd9aSRichard Henderson env->immu_idx += msr_is ? 2 : 0; 76*8a05fd9aSRichard Henderson env->dmmu_idx += msr_ds ? 2 : 0; 77*8a05fd9aSRichard Henderson env->immu_idx += msr_gs ? 4 : 0; 78*8a05fd9aSRichard Henderson env->dmmu_idx += msr_gs ? 4 : 0; 79*8a05fd9aSRichard Henderson } else { 80*8a05fd9aSRichard Henderson env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1; 81*8a05fd9aSRichard Henderson env->immu_idx += msr_ir ? 0 : 2; 82*8a05fd9aSRichard Henderson env->dmmu_idx += msr_dr ? 0 : 2; 83*8a05fd9aSRichard Henderson env->immu_idx += msr_hv ? 4 : 0; 84*8a05fd9aSRichard Henderson env->dmmu_idx += msr_hv ? 4 : 0; 85*8a05fd9aSRichard Henderson } 86*8a05fd9aSRichard Henderson } 87*8a05fd9aSRichard Henderson 88*8a05fd9aSRichard Henderson void hreg_compute_hflags(CPUPPCState *env) 89*8a05fd9aSRichard Henderson { 90*8a05fd9aSRichard Henderson target_ulong hflags_mask; 91*8a05fd9aSRichard Henderson 92*8a05fd9aSRichard Henderson /* We 'forget' FE0 & FE1: we'll never generate imprecise exceptions */ 93*8a05fd9aSRichard Henderson hflags_mask = (1 << MSR_VR) | (1 << MSR_AP) | (1 << MSR_SA) | 94*8a05fd9aSRichard Henderson (1 << MSR_PR) | (1 << MSR_FP) | (1 << MSR_SE) | (1 << MSR_BE) | 95*8a05fd9aSRichard Henderson (1 << MSR_LE) | (1 << MSR_VSX) | (1 << MSR_IR) | (1 << MSR_DR); 96*8a05fd9aSRichard Henderson hflags_mask |= (1ULL << MSR_CM) | (1ULL << MSR_SF) | MSR_HVB; 97*8a05fd9aSRichard Henderson hreg_compute_mem_idx(env); 98*8a05fd9aSRichard Henderson env->hflags = env->msr & hflags_mask; 99*8a05fd9aSRichard Henderson /* Merge with hflags coming from other registers */ 100*8a05fd9aSRichard Henderson env->hflags |= env->hflags_nmsr; 101*8a05fd9aSRichard Henderson } 102*8a05fd9aSRichard Henderson 103*8a05fd9aSRichard Henderson void cpu_interrupt_exittb(CPUState *cs) 104*8a05fd9aSRichard Henderson { 105*8a05fd9aSRichard Henderson if (!kvm_enabled()) { 106*8a05fd9aSRichard Henderson return; 107*8a05fd9aSRichard Henderson } 108*8a05fd9aSRichard Henderson 109*8a05fd9aSRichard Henderson if (!qemu_mutex_iothread_locked()) { 110*8a05fd9aSRichard Henderson qemu_mutex_lock_iothread(); 111*8a05fd9aSRichard Henderson cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); 112*8a05fd9aSRichard Henderson qemu_mutex_unlock_iothread(); 113*8a05fd9aSRichard Henderson } else { 114*8a05fd9aSRichard Henderson cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); 115*8a05fd9aSRichard Henderson } 116*8a05fd9aSRichard Henderson } 117*8a05fd9aSRichard Henderson 118*8a05fd9aSRichard Henderson int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) 119*8a05fd9aSRichard Henderson { 120*8a05fd9aSRichard Henderson int excp; 121*8a05fd9aSRichard Henderson #if !defined(CONFIG_USER_ONLY) 122*8a05fd9aSRichard Henderson CPUState *cs = env_cpu(env); 123*8a05fd9aSRichard Henderson #endif 124*8a05fd9aSRichard Henderson 125*8a05fd9aSRichard Henderson excp = 0; 126*8a05fd9aSRichard Henderson value &= env->msr_mask; 127*8a05fd9aSRichard Henderson #if !defined(CONFIG_USER_ONLY) 128*8a05fd9aSRichard Henderson /* Neither mtmsr nor guest state can alter HV */ 129*8a05fd9aSRichard Henderson if (!alter_hv || !(env->msr & MSR_HVB)) { 130*8a05fd9aSRichard Henderson value &= ~MSR_HVB; 131*8a05fd9aSRichard Henderson value |= env->msr & MSR_HVB; 132*8a05fd9aSRichard Henderson } 133*8a05fd9aSRichard Henderson if (((value >> MSR_IR) & 1) != msr_ir || 134*8a05fd9aSRichard Henderson ((value >> MSR_DR) & 1) != msr_dr) { 135*8a05fd9aSRichard Henderson cpu_interrupt_exittb(cs); 136*8a05fd9aSRichard Henderson } 137*8a05fd9aSRichard Henderson if ((env->mmu_model & POWERPC_MMU_BOOKE) && 138*8a05fd9aSRichard Henderson ((value >> MSR_GS) & 1) != msr_gs) { 139*8a05fd9aSRichard Henderson cpu_interrupt_exittb(cs); 140*8a05fd9aSRichard Henderson } 141*8a05fd9aSRichard Henderson if (unlikely((env->flags & POWERPC_FLAG_TGPR) && 142*8a05fd9aSRichard Henderson ((value ^ env->msr) & (1 << MSR_TGPR)))) { 143*8a05fd9aSRichard Henderson /* Swap temporary saved registers with GPRs */ 144*8a05fd9aSRichard Henderson hreg_swap_gpr_tgpr(env); 145*8a05fd9aSRichard Henderson } 146*8a05fd9aSRichard Henderson if (unlikely((value >> MSR_EP) & 1) != msr_ep) { 147*8a05fd9aSRichard Henderson /* Change the exception prefix on PowerPC 601 */ 148*8a05fd9aSRichard Henderson env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000; 149*8a05fd9aSRichard Henderson } 150*8a05fd9aSRichard Henderson /* 151*8a05fd9aSRichard Henderson * If PR=1 then EE, IR and DR must be 1 152*8a05fd9aSRichard Henderson * 153*8a05fd9aSRichard Henderson * Note: We only enforce this on 64-bit server processors. 154*8a05fd9aSRichard Henderson * It appears that: 155*8a05fd9aSRichard Henderson * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS 156*8a05fd9aSRichard Henderson * exploits it. 157*8a05fd9aSRichard Henderson * - 64-bit embedded implementations do not need any operation to be 158*8a05fd9aSRichard Henderson * performed when PR is set. 159*8a05fd9aSRichard Henderson */ 160*8a05fd9aSRichard Henderson if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) { 161*8a05fd9aSRichard Henderson value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR); 162*8a05fd9aSRichard Henderson } 163*8a05fd9aSRichard Henderson #endif 164*8a05fd9aSRichard Henderson env->msr = value; 165*8a05fd9aSRichard Henderson hreg_compute_hflags(env); 166*8a05fd9aSRichard Henderson #if !defined(CONFIG_USER_ONLY) 167*8a05fd9aSRichard Henderson if (unlikely(msr_pow == 1)) { 168*8a05fd9aSRichard Henderson if (!env->pending_interrupts && (*env->check_pow)(env)) { 169*8a05fd9aSRichard Henderson cs->halted = 1; 170*8a05fd9aSRichard Henderson excp = EXCP_HALTED; 171*8a05fd9aSRichard Henderson } 172*8a05fd9aSRichard Henderson } 173*8a05fd9aSRichard Henderson #endif 174*8a05fd9aSRichard Henderson 175*8a05fd9aSRichard Henderson return excp; 176*8a05fd9aSRichard Henderson } 177*8a05fd9aSRichard Henderson 178*8a05fd9aSRichard Henderson #ifndef CONFIG_USER_ONLY 179*8a05fd9aSRichard Henderson void check_tlb_flush(CPUPPCState *env, bool global) 180*8a05fd9aSRichard Henderson { 181*8a05fd9aSRichard Henderson CPUState *cs = env_cpu(env); 182*8a05fd9aSRichard Henderson 183*8a05fd9aSRichard Henderson /* Handle global flushes first */ 184*8a05fd9aSRichard Henderson if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) { 185*8a05fd9aSRichard Henderson env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH; 186*8a05fd9aSRichard Henderson env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; 187*8a05fd9aSRichard Henderson tlb_flush_all_cpus_synced(cs); 188*8a05fd9aSRichard Henderson return; 189*8a05fd9aSRichard Henderson } 190*8a05fd9aSRichard Henderson 191*8a05fd9aSRichard Henderson /* Then handle local ones */ 192*8a05fd9aSRichard Henderson if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) { 193*8a05fd9aSRichard Henderson env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; 194*8a05fd9aSRichard Henderson tlb_flush(cs); 195*8a05fd9aSRichard Henderson } 196*8a05fd9aSRichard Henderson } 197*8a05fd9aSRichard Henderson #endif 198