xref: /openbmc/qemu/target/ppc/helper_regs.c (revision 7c8d2fc4)
1 /*
2  *  PowerPC emulation special registers manipulation helpers for qemu.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "qemu/main-loop.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "helper_regs.h"
26 #include "power8-pmu.h"
27 
28 /* Swap temporary saved registers with GPRs */
29 void hreg_swap_gpr_tgpr(CPUPPCState *env)
30 {
31     target_ulong tmp;
32 
33     tmp = env->gpr[0];
34     env->gpr[0] = env->tgpr[0];
35     env->tgpr[0] = tmp;
36     tmp = env->gpr[1];
37     env->gpr[1] = env->tgpr[1];
38     env->tgpr[1] = tmp;
39     tmp = env->gpr[2];
40     env->gpr[2] = env->tgpr[2];
41     env->tgpr[2] = tmp;
42     tmp = env->gpr[3];
43     env->gpr[3] = env->tgpr[3];
44     env->tgpr[3] = tmp;
45 }
46 
47 static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
48 {
49     target_ulong msr = env->msr;
50     uint32_t ppc_flags = env->flags;
51     uint32_t hflags = 0;
52     uint32_t msr_mask;
53 
54     /* Some bits come straight across from MSR. */
55     QEMU_BUILD_BUG_ON(MSR_LE != HFLAGS_LE);
56     QEMU_BUILD_BUG_ON(MSR_PR != HFLAGS_PR);
57     QEMU_BUILD_BUG_ON(MSR_DR != HFLAGS_DR);
58     QEMU_BUILD_BUG_ON(MSR_FP != HFLAGS_FP);
59     msr_mask = ((1 << MSR_LE) | (1 << MSR_PR) |
60                 (1 << MSR_DR) | (1 << MSR_FP));
61 
62     if (ppc_flags & POWERPC_FLAG_DE) {
63         target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0];
64         if (dbcr0 & DBCR0_ICMP) {
65             hflags |= 1 << HFLAGS_SE;
66         }
67         if (dbcr0 & DBCR0_BRT) {
68             hflags |= 1 << HFLAGS_BE;
69         }
70     } else {
71         if (ppc_flags & POWERPC_FLAG_BE) {
72             QEMU_BUILD_BUG_ON(MSR_BE != HFLAGS_BE);
73             msr_mask |= 1 << MSR_BE;
74         }
75         if (ppc_flags & POWERPC_FLAG_SE) {
76             QEMU_BUILD_BUG_ON(MSR_SE != HFLAGS_SE);
77             msr_mask |= 1 << MSR_SE;
78         }
79     }
80 
81     if (msr_is_64bit(env, msr)) {
82         hflags |= 1 << HFLAGS_64;
83     }
84     if ((ppc_flags & POWERPC_FLAG_SPE) && (msr & (1 << MSR_SPE))) {
85         hflags |= 1 << HFLAGS_SPE;
86     }
87     if (ppc_flags & POWERPC_FLAG_VRE) {
88         QEMU_BUILD_BUG_ON(MSR_VR != HFLAGS_VR);
89         msr_mask |= 1 << MSR_VR;
90     }
91     if (ppc_flags & POWERPC_FLAG_VSX) {
92         QEMU_BUILD_BUG_ON(MSR_VSX != HFLAGS_VSX);
93         msr_mask |= 1 << MSR_VSX;
94     }
95     if ((ppc_flags & POWERPC_FLAG_TM) && (msr & (1ull << MSR_TM))) {
96         hflags |= 1 << HFLAGS_TM;
97     }
98     if (env->spr[SPR_LPCR] & LPCR_GTSE) {
99         hflags |= 1 << HFLAGS_GTSE;
100     }
101     if (env->spr[SPR_LPCR] & LPCR_HR) {
102         hflags |= 1 << HFLAGS_HR;
103     }
104     if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) {
105         hflags |= 1 << HFLAGS_PMCC0;
106     }
107     if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) {
108         hflags |= 1 << HFLAGS_PMCC1;
109     }
110 
111 #ifndef CONFIG_USER_ONLY
112     if (!env->has_hv_mode || (msr & (1ull << MSR_HV))) {
113         hflags |= 1 << HFLAGS_HV;
114     }
115 
116 #if defined(TARGET_PPC64)
117     if (env->pmc_ins_cnt) {
118         hflags |= 1 << HFLAGS_INSN_CNT;
119     }
120 #endif
121 
122     /*
123      * This is our encoding for server processors. The architecture
124      * specifies that there is no such thing as userspace with
125      * translation off, however it appears that MacOS does it and some
126      * 32-bit CPUs support it. Weird...
127      *
128      *   0 = Guest User space virtual mode
129      *   1 = Guest Kernel space virtual mode
130      *   2 = Guest User space real mode
131      *   3 = Guest Kernel space real mode
132      *   4 = HV User space virtual mode
133      *   5 = HV Kernel space virtual mode
134      *   6 = HV User space real mode
135      *   7 = HV Kernel space real mode
136      *
137      * For BookE, we need 8 MMU modes as follow:
138      *
139      *  0 = AS 0 HV User space
140      *  1 = AS 0 HV Kernel space
141      *  2 = AS 1 HV User space
142      *  3 = AS 1 HV Kernel space
143      *  4 = AS 0 Guest User space
144      *  5 = AS 0 Guest Kernel space
145      *  6 = AS 1 Guest User space
146      *  7 = AS 1 Guest Kernel space
147      */
148     unsigned immu_idx, dmmu_idx;
149     dmmu_idx = msr & (1 << MSR_PR) ? 0 : 1;
150     if (env->mmu_model == POWERPC_MMU_BOOKE ||
151         env->mmu_model == POWERPC_MMU_BOOKE206) {
152         dmmu_idx |= msr & (1 << MSR_GS) ? 4 : 0;
153         immu_idx = dmmu_idx;
154         immu_idx |= msr & (1 << MSR_IS) ? 2 : 0;
155         dmmu_idx |= msr & (1 << MSR_DS) ? 2 : 0;
156     } else {
157         dmmu_idx |= msr & (1ull << MSR_HV) ? 4 : 0;
158         immu_idx = dmmu_idx;
159         immu_idx |= msr & (1 << MSR_IR) ? 0 : 2;
160         dmmu_idx |= msr & (1 << MSR_DR) ? 0 : 2;
161     }
162     hflags |= immu_idx << HFLAGS_IMMU_IDX;
163     hflags |= dmmu_idx << HFLAGS_DMMU_IDX;
164 #endif
165 
166     return hflags | (msr & msr_mask);
167 }
168 
169 void hreg_compute_hflags(CPUPPCState *env)
170 {
171     env->hflags = hreg_compute_hflags_value(env);
172 }
173 
174 #ifdef CONFIG_DEBUG_TCG
175 void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
176                           target_ulong *cs_base, uint32_t *flags)
177 {
178     uint32_t hflags_current = env->hflags;
179     uint32_t hflags_rebuilt;
180 
181     *pc = env->nip;
182     *cs_base = 0;
183     *flags = hflags_current;
184 
185     hflags_rebuilt = hreg_compute_hflags_value(env);
186     if (unlikely(hflags_current != hflags_rebuilt)) {
187         cpu_abort(env_cpu(env),
188                   "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
189                   hflags_current, hflags_rebuilt);
190     }
191 }
192 #endif
193 
194 void cpu_interrupt_exittb(CPUState *cs)
195 {
196     /*
197      * We don't need to worry about translation blocks
198      * when running with KVM.
199      */
200     if (kvm_enabled()) {
201         return;
202     }
203 
204     if (!qemu_mutex_iothread_locked()) {
205         qemu_mutex_lock_iothread();
206         cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
207         qemu_mutex_unlock_iothread();
208     } else {
209         cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
210     }
211 }
212 
213 int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv)
214 {
215     int excp;
216 #if !defined(CONFIG_USER_ONLY)
217     CPUState *cs = env_cpu(env);
218 #endif
219 
220     excp = 0;
221     value &= env->msr_mask;
222 #if !defined(CONFIG_USER_ONLY)
223     /* Neither mtmsr nor guest state can alter HV */
224     if (!alter_hv || !(env->msr & MSR_HVB)) {
225         value &= ~MSR_HVB;
226         value |= env->msr & MSR_HVB;
227     }
228     if (((value >> MSR_IR) & 1) != msr_ir ||
229         ((value >> MSR_DR) & 1) != msr_dr) {
230         cpu_interrupt_exittb(cs);
231     }
232     if ((env->mmu_model == POWERPC_MMU_BOOKE ||
233          env->mmu_model == POWERPC_MMU_BOOKE206) &&
234         ((value >> MSR_GS) & 1) != msr_gs) {
235         cpu_interrupt_exittb(cs);
236     }
237     if (unlikely((env->flags & POWERPC_FLAG_TGPR) &&
238                  ((value ^ env->msr) & (1 << MSR_TGPR)))) {
239         /* Swap temporary saved registers with GPRs */
240         hreg_swap_gpr_tgpr(env);
241     }
242     if (unlikely((value >> MSR_EP) & 1) != msr_ep) {
243         env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
244     }
245     /*
246      * If PR=1 then EE, IR and DR must be 1
247      *
248      * Note: We only enforce this on 64-bit server processors.
249      * It appears that:
250      * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS
251      *   exploits it.
252      * - 64-bit embedded implementations do not need any operation to be
253      *   performed when PR is set.
254      */
255     if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) {
256         value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR);
257     }
258 #endif
259     env->msr = value;
260     hreg_compute_hflags(env);
261 #if !defined(CONFIG_USER_ONLY)
262     if (unlikely(msr_pow == 1)) {
263         if (!env->pending_interrupts && (*env->check_pow)(env)) {
264             cs->halted = 1;
265             excp = EXCP_HALTED;
266         }
267     }
268 #endif
269 
270     return excp;
271 }
272 
273 #ifdef CONFIG_SOFTMMU
274 void store_40x_sler(CPUPPCState *env, uint32_t val)
275 {
276     /* XXX: TO BE FIXED */
277     if (val != 0x00000000) {
278         cpu_abort(env_cpu(env),
279                   "Little-endian regions are not supported by now\n");
280     }
281     env->spr[SPR_405_SLER] = val;
282 }
283 #endif /* CONFIG_SOFTMMU */
284 
285 #ifndef CONFIG_USER_ONLY
286 void check_tlb_flush(CPUPPCState *env, bool global)
287 {
288     CPUState *cs = env_cpu(env);
289 
290     /* Handle global flushes first */
291     if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
292         env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
293         env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
294         tlb_flush_all_cpus_synced(cs);
295         return;
296     }
297 
298     /* Then handle local ones */
299     if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
300         env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
301         tlb_flush(cs);
302     }
303 }
304 #endif
305