1 /* 2 * PowerPC emulation special registers manipulation helpers for qemu. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "qemu/main-loop.h" 23 #include "exec/exec-all.h" 24 #include "sysemu/kvm.h" 25 #include "helper_regs.h" 26 #include "power8-pmu.h" 27 28 /* Swap temporary saved registers with GPRs */ 29 void hreg_swap_gpr_tgpr(CPUPPCState *env) 30 { 31 target_ulong tmp; 32 33 tmp = env->gpr[0]; 34 env->gpr[0] = env->tgpr[0]; 35 env->tgpr[0] = tmp; 36 tmp = env->gpr[1]; 37 env->gpr[1] = env->tgpr[1]; 38 env->tgpr[1] = tmp; 39 tmp = env->gpr[2]; 40 env->gpr[2] = env->tgpr[2]; 41 env->tgpr[2] = tmp; 42 tmp = env->gpr[3]; 43 env->gpr[3] = env->tgpr[3]; 44 env->tgpr[3] = tmp; 45 } 46 47 static uint32_t hreg_compute_hflags_value(CPUPPCState *env) 48 { 49 target_ulong msr = env->msr; 50 uint32_t ppc_flags = env->flags; 51 uint32_t hflags = 0; 52 uint32_t msr_mask; 53 54 /* Some bits come straight across from MSR. */ 55 QEMU_BUILD_BUG_ON(MSR_LE != HFLAGS_LE); 56 QEMU_BUILD_BUG_ON(MSR_PR != HFLAGS_PR); 57 QEMU_BUILD_BUG_ON(MSR_DR != HFLAGS_DR); 58 QEMU_BUILD_BUG_ON(MSR_FP != HFLAGS_FP); 59 msr_mask = ((1 << MSR_LE) | (1 << MSR_PR) | 60 (1 << MSR_DR) | (1 << MSR_FP)); 61 62 if (ppc_flags & POWERPC_FLAG_HID0_LE) { 63 /* 64 * Note that MSR_LE is not set in env->msr_mask for this cpu, 65 * and so will never be set in msr. 66 */ 67 uint32_t le = extract32(env->spr[SPR_HID0], 3, 1); 68 hflags |= le << MSR_LE; 69 } 70 71 if (ppc_flags & POWERPC_FLAG_DE) { 72 target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0]; 73 if (dbcr0 & DBCR0_ICMP) { 74 hflags |= 1 << HFLAGS_SE; 75 } 76 if (dbcr0 & DBCR0_BRT) { 77 hflags |= 1 << HFLAGS_BE; 78 } 79 } else { 80 if (ppc_flags & POWERPC_FLAG_BE) { 81 QEMU_BUILD_BUG_ON(MSR_BE != HFLAGS_BE); 82 msr_mask |= 1 << MSR_BE; 83 } 84 if (ppc_flags & POWERPC_FLAG_SE) { 85 QEMU_BUILD_BUG_ON(MSR_SE != HFLAGS_SE); 86 msr_mask |= 1 << MSR_SE; 87 } 88 } 89 90 if (msr_is_64bit(env, msr)) { 91 hflags |= 1 << HFLAGS_64; 92 } 93 if ((ppc_flags & POWERPC_FLAG_SPE) && (msr & (1 << MSR_SPE))) { 94 hflags |= 1 << HFLAGS_SPE; 95 } 96 if (ppc_flags & POWERPC_FLAG_VRE) { 97 QEMU_BUILD_BUG_ON(MSR_VR != HFLAGS_VR); 98 msr_mask |= 1 << MSR_VR; 99 } 100 if (ppc_flags & POWERPC_FLAG_VSX) { 101 QEMU_BUILD_BUG_ON(MSR_VSX != HFLAGS_VSX); 102 msr_mask |= 1 << MSR_VSX; 103 } 104 if ((ppc_flags & POWERPC_FLAG_TM) && (msr & (1ull << MSR_TM))) { 105 hflags |= 1 << HFLAGS_TM; 106 } 107 if (env->spr[SPR_LPCR] & LPCR_GTSE) { 108 hflags |= 1 << HFLAGS_GTSE; 109 } 110 if (env->spr[SPR_LPCR] & LPCR_HR) { 111 hflags |= 1 << HFLAGS_HR; 112 } 113 if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) { 114 hflags |= 1 << HFLAGS_PMCC0; 115 } 116 if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) { 117 hflags |= 1 << HFLAGS_PMCC1; 118 } 119 120 #ifndef CONFIG_USER_ONLY 121 if (!env->has_hv_mode || (msr & (1ull << MSR_HV))) { 122 hflags |= 1 << HFLAGS_HV; 123 } 124 125 #if defined(TARGET_PPC64) 126 if (env->pmc_ins_cnt) { 127 hflags |= 1 << HFLAGS_INSN_CNT; 128 } 129 #endif 130 131 /* 132 * This is our encoding for server processors. The architecture 133 * specifies that there is no such thing as userspace with 134 * translation off, however it appears that MacOS does it and some 135 * 32-bit CPUs support it. Weird... 136 * 137 * 0 = Guest User space virtual mode 138 * 1 = Guest Kernel space virtual mode 139 * 2 = Guest User space real mode 140 * 3 = Guest Kernel space real mode 141 * 4 = HV User space virtual mode 142 * 5 = HV Kernel space virtual mode 143 * 6 = HV User space real mode 144 * 7 = HV Kernel space real mode 145 * 146 * For BookE, we need 8 MMU modes as follow: 147 * 148 * 0 = AS 0 HV User space 149 * 1 = AS 0 HV Kernel space 150 * 2 = AS 1 HV User space 151 * 3 = AS 1 HV Kernel space 152 * 4 = AS 0 Guest User space 153 * 5 = AS 0 Guest Kernel space 154 * 6 = AS 1 Guest User space 155 * 7 = AS 1 Guest Kernel space 156 */ 157 unsigned immu_idx, dmmu_idx; 158 dmmu_idx = msr & (1 << MSR_PR) ? 0 : 1; 159 if (env->mmu_model == POWERPC_MMU_BOOKE || 160 env->mmu_model == POWERPC_MMU_BOOKE206) { 161 dmmu_idx |= msr & (1 << MSR_GS) ? 4 : 0; 162 immu_idx = dmmu_idx; 163 immu_idx |= msr & (1 << MSR_IS) ? 2 : 0; 164 dmmu_idx |= msr & (1 << MSR_DS) ? 2 : 0; 165 } else { 166 dmmu_idx |= msr & (1ull << MSR_HV) ? 4 : 0; 167 immu_idx = dmmu_idx; 168 immu_idx |= msr & (1 << MSR_IR) ? 0 : 2; 169 dmmu_idx |= msr & (1 << MSR_DR) ? 0 : 2; 170 } 171 hflags |= immu_idx << HFLAGS_IMMU_IDX; 172 hflags |= dmmu_idx << HFLAGS_DMMU_IDX; 173 #endif 174 175 return hflags | (msr & msr_mask); 176 } 177 178 void hreg_compute_hflags(CPUPPCState *env) 179 { 180 env->hflags = hreg_compute_hflags_value(env); 181 } 182 183 #ifdef CONFIG_DEBUG_TCG 184 void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, 185 target_ulong *cs_base, uint32_t *flags) 186 { 187 uint32_t hflags_current = env->hflags; 188 uint32_t hflags_rebuilt; 189 190 *pc = env->nip; 191 *cs_base = 0; 192 *flags = hflags_current; 193 194 hflags_rebuilt = hreg_compute_hflags_value(env); 195 if (unlikely(hflags_current != hflags_rebuilt)) { 196 cpu_abort(env_cpu(env), 197 "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n", 198 hflags_current, hflags_rebuilt); 199 } 200 } 201 #endif 202 203 void cpu_interrupt_exittb(CPUState *cs) 204 { 205 /* 206 * We don't need to worry about translation blocks 207 * when running with KVM. 208 */ 209 if (kvm_enabled()) { 210 return; 211 } 212 213 if (!qemu_mutex_iothread_locked()) { 214 qemu_mutex_lock_iothread(); 215 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); 216 qemu_mutex_unlock_iothread(); 217 } else { 218 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); 219 } 220 } 221 222 int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) 223 { 224 int excp; 225 #if !defined(CONFIG_USER_ONLY) 226 CPUState *cs = env_cpu(env); 227 #endif 228 229 excp = 0; 230 value &= env->msr_mask; 231 #if !defined(CONFIG_USER_ONLY) 232 /* Neither mtmsr nor guest state can alter HV */ 233 if (!alter_hv || !(env->msr & MSR_HVB)) { 234 value &= ~MSR_HVB; 235 value |= env->msr & MSR_HVB; 236 } 237 if (((value >> MSR_IR) & 1) != msr_ir || 238 ((value >> MSR_DR) & 1) != msr_dr) { 239 cpu_interrupt_exittb(cs); 240 } 241 if ((env->mmu_model == POWERPC_MMU_BOOKE || 242 env->mmu_model == POWERPC_MMU_BOOKE206) && 243 ((value >> MSR_GS) & 1) != msr_gs) { 244 cpu_interrupt_exittb(cs); 245 } 246 if (unlikely((env->flags & POWERPC_FLAG_TGPR) && 247 ((value ^ env->msr) & (1 << MSR_TGPR)))) { 248 /* Swap temporary saved registers with GPRs */ 249 hreg_swap_gpr_tgpr(env); 250 } 251 if (unlikely((value >> MSR_EP) & 1) != msr_ep) { 252 /* Change the exception prefix on PowerPC 601 */ 253 env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000; 254 } 255 /* 256 * If PR=1 then EE, IR and DR must be 1 257 * 258 * Note: We only enforce this on 64-bit server processors. 259 * It appears that: 260 * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS 261 * exploits it. 262 * - 64-bit embedded implementations do not need any operation to be 263 * performed when PR is set. 264 */ 265 if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) { 266 value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR); 267 } 268 #endif 269 env->msr = value; 270 hreg_compute_hflags(env); 271 #if !defined(CONFIG_USER_ONLY) 272 if (unlikely(msr_pow == 1)) { 273 if (!env->pending_interrupts && (*env->check_pow)(env)) { 274 cs->halted = 1; 275 excp = EXCP_HALTED; 276 } 277 } 278 #endif 279 280 return excp; 281 } 282 283 #ifdef CONFIG_SOFTMMU 284 void store_40x_sler(CPUPPCState *env, uint32_t val) 285 { 286 /* XXX: TO BE FIXED */ 287 if (val != 0x00000000) { 288 cpu_abort(env_cpu(env), 289 "Little-endian regions are not supported by now\n"); 290 } 291 env->spr[SPR_405_SLER] = val; 292 } 293 #endif /* CONFIG_SOFTMMU */ 294 295 #ifndef CONFIG_USER_ONLY 296 void check_tlb_flush(CPUPPCState *env, bool global) 297 { 298 CPUState *cs = env_cpu(env); 299 300 /* Handle global flushes first */ 301 if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) { 302 env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH; 303 env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; 304 tlb_flush_all_cpus_synced(cs); 305 return; 306 } 307 308 /* Then handle local ones */ 309 if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) { 310 env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; 311 tlb_flush(cs); 312 } 313 } 314 #endif 315