1 /*
2 * Miscellaneous PowerPC emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/cputlb.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "mmu-book3s-v3.h"
28 #include "hw/ppc/ppc.h"
29
30 #include "helper_regs.h"
31
32 /*****************************************************************************/
33 /* SPR accesses */
helper_load_dump_spr(CPUPPCState * env,uint32_t sprn)34 void helper_load_dump_spr(CPUPPCState *env, uint32_t sprn)
35 {
36 qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
37 env->spr[sprn]);
38 }
39
helper_store_dump_spr(CPUPPCState * env,uint32_t sprn)40 void helper_store_dump_spr(CPUPPCState *env, uint32_t sprn)
41 {
42 qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
43 env->spr[sprn]);
44 }
45
helper_spr_core_write_generic(CPUPPCState * env,uint32_t sprn,target_ulong val)46 void helper_spr_core_write_generic(CPUPPCState *env, uint32_t sprn,
47 target_ulong val)
48 {
49 CPUState *cs = env_cpu(env);
50 CPUState *ccs;
51
52 if (ppc_cpu_core_single_threaded(cs)) {
53 env->spr[sprn] = val;
54 return;
55 }
56
57 THREAD_SIBLING_FOREACH(cs, ccs) {
58 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
59 cenv->spr[sprn] = val;
60 }
61 }
62
helper_spr_write_CTRL(CPUPPCState * env,uint32_t sprn,target_ulong val)63 void helper_spr_write_CTRL(CPUPPCState *env, uint32_t sprn,
64 target_ulong val)
65 {
66 CPUState *cs = env_cpu(env);
67 CPUState *ccs;
68 uint32_t run = val & 1;
69 uint32_t ts, ts_mask;
70
71 assert(sprn == SPR_CTRL);
72
73 env->spr[sprn] &= ~1U;
74 env->spr[sprn] |= run;
75
76 ts_mask = ~(1U << (8 + env->spr[SPR_TIR]));
77 ts = run << (8 + env->spr[SPR_TIR]);
78
79 THREAD_SIBLING_FOREACH(cs, ccs) {
80 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
81
82 cenv->spr[sprn] &= ts_mask;
83 cenv->spr[sprn] |= ts;
84 }
85 }
86
87
88 #ifdef TARGET_PPC64
raise_hv_fu_exception(CPUPPCState * env,uint32_t bit,const char * caller,uint32_t cause,uintptr_t raddr)89 static void raise_hv_fu_exception(CPUPPCState *env, uint32_t bit,
90 const char *caller, uint32_t cause,
91 uintptr_t raddr)
92 {
93 qemu_log_mask(CPU_LOG_INT, "HV Facility %d is unavailable (%s)\n",
94 bit, caller);
95
96 env->spr[SPR_HFSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS);
97
98 raise_exception_err_ra(env, POWERPC_EXCP_HV_FU, cause, raddr);
99 }
100
raise_fu_exception(CPUPPCState * env,uint32_t bit,uint32_t sprn,uint32_t cause,uintptr_t raddr)101 static void raise_fu_exception(CPUPPCState *env, uint32_t bit,
102 uint32_t sprn, uint32_t cause,
103 uintptr_t raddr)
104 {
105 qemu_log("Facility SPR %d is unavailable (SPR FSCR:%d)\n", sprn, bit);
106
107 env->spr[SPR_FSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS);
108 cause &= FSCR_IC_MASK;
109 env->spr[SPR_FSCR] |= (target_ulong)cause << FSCR_IC_POS;
110
111 raise_exception_err_ra(env, POWERPC_EXCP_FU, 0, raddr);
112 }
113 #endif
114
helper_hfscr_facility_check(CPUPPCState * env,uint32_t bit,const char * caller,uint32_t cause)115 void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit,
116 const char *caller, uint32_t cause)
117 {
118 #ifdef TARGET_PPC64
119 if ((env->msr_mask & MSR_HVB) && !FIELD_EX64(env->msr, MSR, HV) &&
120 !(env->spr[SPR_HFSCR] & (1UL << bit))) {
121 raise_hv_fu_exception(env, bit, caller, cause, GETPC());
122 }
123 #endif
124 }
125
helper_fscr_facility_check(CPUPPCState * env,uint32_t bit,uint32_t sprn,uint32_t cause)126 void helper_fscr_facility_check(CPUPPCState *env, uint32_t bit,
127 uint32_t sprn, uint32_t cause)
128 {
129 #ifdef TARGET_PPC64
130 if (env->spr[SPR_FSCR] & (1ULL << bit)) {
131 /* Facility is enabled, continue */
132 return;
133 }
134 raise_fu_exception(env, bit, sprn, cause, GETPC());
135 #endif
136 }
137
helper_msr_facility_check(CPUPPCState * env,uint32_t bit,uint32_t sprn,uint32_t cause)138 void helper_msr_facility_check(CPUPPCState *env, uint32_t bit,
139 uint32_t sprn, uint32_t cause)
140 {
141 #ifdef TARGET_PPC64
142 if (env->msr & (1ULL << bit)) {
143 /* Facility is enabled, continue */
144 return;
145 }
146 raise_fu_exception(env, bit, sprn, cause, GETPC());
147 #endif
148 }
149
150 #if !defined(CONFIG_USER_ONLY)
151
152 #ifdef TARGET_PPC64
helper_mmcr0_facility_check(CPUPPCState * env,uint32_t bit,uint32_t sprn,uint32_t cause)153 static void helper_mmcr0_facility_check(CPUPPCState *env, uint32_t bit,
154 uint32_t sprn, uint32_t cause)
155 {
156 if (FIELD_EX64(env->msr, MSR, PR) &&
157 !(env->spr[SPR_POWER_MMCR0] & (1ULL << bit))) {
158 raise_fu_exception(env, bit, sprn, cause, GETPC());
159 }
160 }
161 #endif
162
helper_store_sdr1(CPUPPCState * env,target_ulong val)163 void helper_store_sdr1(CPUPPCState *env, target_ulong val)
164 {
165 if (env->spr[SPR_SDR1] != val) {
166 ppc_store_sdr1(env, val);
167 tlb_flush(env_cpu(env));
168 }
169 }
170
171 #if defined(TARGET_PPC64)
helper_store_ptcr(CPUPPCState * env,target_ulong val)172 void helper_store_ptcr(CPUPPCState *env, target_ulong val)
173 {
174 if (env->spr[SPR_PTCR] != val) {
175 CPUState *cs = env_cpu(env);
176 PowerPCCPU *cpu = env_archcpu(env);
177 target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS;
178 target_ulong patbsize = val & PTCR_PATS;
179
180 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, val);
181
182 assert(!cpu->vhyp);
183 assert(env->mmu_model & POWERPC_MMU_3_00);
184
185 if (val & ~ptcr_mask) {
186 error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR",
187 val & ~ptcr_mask);
188 val &= ptcr_mask;
189 }
190
191 if (patbsize > 24) {
192 error_report("Invalid Partition Table size 0x" TARGET_FMT_lx
193 " stored in PTCR", patbsize);
194 return;
195 }
196
197 if (ppc_cpu_lpar_single_threaded(cs)) {
198 env->spr[SPR_PTCR] = val;
199 tlb_flush(cs);
200 } else {
201 CPUState *ccs;
202
203 THREAD_SIBLING_FOREACH(cs, ccs) {
204 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
205 CPUPPCState *cenv = &ccpu->env;
206 cenv->spr[SPR_PTCR] = val;
207 tlb_flush(ccs);
208 }
209 }
210 }
211 }
212
helper_store_pcr(CPUPPCState * env,target_ulong value)213 void helper_store_pcr(CPUPPCState *env, target_ulong value)
214 {
215 PowerPCCPU *cpu = env_archcpu(env);
216 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
217
218 env->spr[SPR_PCR] = value & pcc->pcr_mask;
219 }
220
helper_store_ciabr(CPUPPCState * env,target_ulong value)221 void helper_store_ciabr(CPUPPCState *env, target_ulong value)
222 {
223 ppc_store_ciabr(env, value);
224 }
225
helper_store_dawr0(CPUPPCState * env,target_ulong value)226 void helper_store_dawr0(CPUPPCState *env, target_ulong value)
227 {
228 ppc_store_dawr0(env, value);
229 }
230
helper_store_dawrx0(CPUPPCState * env,target_ulong value)231 void helper_store_dawrx0(CPUPPCState *env, target_ulong value)
232 {
233 ppc_store_dawrx0(env, value);
234 }
235
helper_store_dawr1(CPUPPCState * env,target_ulong value)236 void helper_store_dawr1(CPUPPCState *env, target_ulong value)
237 {
238 ppc_store_dawr1(env, value);
239 }
240
helper_store_dawrx1(CPUPPCState * env,target_ulong value)241 void helper_store_dawrx1(CPUPPCState *env, target_ulong value)
242 {
243 ppc_store_dawrx1(env, value);
244 }
245
246 /*
247 * DPDES register is shared. Each bit reflects the state of the
248 * doorbell interrupt of a thread of the same core.
249 */
helper_load_dpdes(CPUPPCState * env)250 target_ulong helper_load_dpdes(CPUPPCState *env)
251 {
252 CPUState *cs = env_cpu(env);
253 CPUState *ccs;
254 target_ulong dpdes = 0;
255
256 helper_hfscr_facility_check(env, HFSCR_MSGP, "load DPDES", HFSCR_IC_MSGP);
257
258 /* DPDES behaves as 1-thread in LPAR-per-thread mode */
259 if (ppc_cpu_lpar_single_threaded(cs)) {
260 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
261 dpdes = 1;
262 }
263 return dpdes;
264 }
265
266 bql_lock();
267 THREAD_SIBLING_FOREACH(cs, ccs) {
268 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
269 CPUPPCState *cenv = &ccpu->env;
270 uint32_t thread_id = ppc_cpu_tir(ccpu);
271
272 if (cenv->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
273 dpdes |= (0x1 << thread_id);
274 }
275 }
276 bql_unlock();
277
278 return dpdes;
279 }
280
helper_store_dpdes(CPUPPCState * env,target_ulong val)281 void helper_store_dpdes(CPUPPCState *env, target_ulong val)
282 {
283 PowerPCCPU *cpu = env_archcpu(env);
284 CPUState *cs = env_cpu(env);
285 CPUState *ccs;
286
287 helper_hfscr_facility_check(env, HFSCR_MSGP, "store DPDES", HFSCR_IC_MSGP);
288
289 /* DPDES behaves as 1-thread in LPAR-per-thread mode */
290 if (ppc_cpu_lpar_single_threaded(cs)) {
291 ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & 0x1);
292 return;
293 }
294
295 /* Does iothread need to be locked for walking CPU list? */
296 bql_lock();
297 THREAD_SIBLING_FOREACH(cs, ccs) {
298 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
299 uint32_t thread_id = ppc_cpu_tir(ccpu);
300
301 ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id));
302 }
303 bql_unlock();
304 }
305
306 /*
307 * qemu-user breaks with pnv headers, so they go under ifdefs for now.
308 * A clean up may be to move powernv specific registers and helpers into
309 * target/ppc/pnv_helper.c
310 */
311 #include "hw/ppc/pnv_core.h"
312
313 /* Indirect SCOM (SPRC/SPRD) access to SCRATCH0-7 are implemented. */
helper_store_sprc(CPUPPCState * env,target_ulong val)314 void helper_store_sprc(CPUPPCState *env, target_ulong val)
315 {
316 if (val & ~0x3f8ULL) {
317 qemu_log_mask(LOG_GUEST_ERROR, "Invalid SPRC register value "
318 TARGET_FMT_lx"\n", val);
319 return;
320 }
321 env->spr[SPR_POWER_SPRC] = val;
322 }
323
helper_load_sprd(CPUPPCState * env)324 target_ulong helper_load_sprd(CPUPPCState *env)
325 {
326 /*
327 * SPRD is a HV-only register for Power CPUs, so this will only be
328 * accessed by powernv machines.
329 */
330 PowerPCCPU *cpu = env_archcpu(env);
331 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
332
333 if (pcc->load_sprd) {
334 return pcc->load_sprd(env);
335 }
336
337 return 0;
338 }
339
helper_store_sprd(CPUPPCState * env,target_ulong val)340 void helper_store_sprd(CPUPPCState *env, target_ulong val)
341 {
342 PowerPCCPU *cpu = env_archcpu(env);
343 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
344
345 if (pcc->store_sprd) {
346 return pcc->store_sprd(env, val);
347 }
348 }
349
helper_load_pmsr(CPUPPCState * env)350 target_ulong helper_load_pmsr(CPUPPCState *env)
351 {
352 target_ulong lowerps = extract64(env->spr[SPR_PMCR], PPC_BIT_NR(15), 8);
353 target_ulong val = 0;
354
355 val |= PPC_BIT(63); /* verion 0x1 (POWER9/10) */
356 /* Pmin = 0 */
357 /* XXX: POWER9 should be 3 */
358 val |= 4ULL << PPC_BIT_NR(31); /* Pmax */
359 val |= lowerps << PPC_BIT_NR(15); /* Local actual Pstate */
360 val |= lowerps << PPC_BIT_NR(7); /* Global actual Pstate */
361
362 return val;
363 }
364
ppc_set_pmcr(PowerPCCPU * cpu,target_ulong val)365 static void ppc_set_pmcr(PowerPCCPU *cpu, target_ulong val)
366 {
367 cpu->env.spr[SPR_PMCR] = val;
368 }
369
helper_store_pmcr(CPUPPCState * env,target_ulong val)370 void helper_store_pmcr(CPUPPCState *env, target_ulong val)
371 {
372 PowerPCCPU *cpu = env_archcpu(env);
373 CPUState *cs = env_cpu(env);
374 CPUState *ccs;
375
376 /* Leave version field unchanged (0x1) */
377 val &= ~PPC_BITMASK(60, 63);
378 val |= PPC_BIT(63);
379
380 val &= ~PPC_BITMASK(0, 7); /* UpperPS ignored */
381 if (val & PPC_BITMASK(16, 59)) {
382 qemu_log_mask(LOG_GUEST_ERROR, "Non-zero PMCR reserved bits "
383 TARGET_FMT_lx"\n", val);
384 val &= ~PPC_BITMASK(16, 59);
385 }
386
387 /* DPDES behaves as 1-thread in LPAR-per-thread mode */
388 if (ppc_cpu_lpar_single_threaded(cs)) {
389 ppc_set_pmcr(cpu, val);
390 return;
391 }
392
393 /* Does iothread need to be locked for walking CPU list? */
394 bql_lock();
395 THREAD_SIBLING_FOREACH(cs, ccs) {
396 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
397 ppc_set_pmcr(ccpu, val);
398 }
399 bql_unlock();
400 }
401
402 #endif /* defined(TARGET_PPC64) */
403
helper_store_pidr(CPUPPCState * env,target_ulong val)404 void helper_store_pidr(CPUPPCState *env, target_ulong val)
405 {
406 env->spr[SPR_BOOKS_PID] = (uint32_t)val;
407 tlb_flush(env_cpu(env));
408 }
409
helper_store_lpidr(CPUPPCState * env,target_ulong val)410 void helper_store_lpidr(CPUPPCState *env, target_ulong val)
411 {
412 env->spr[SPR_LPIDR] = (uint32_t)val;
413
414 /*
415 * We need to flush the TLB on LPID changes as we only tag HV vs
416 * guest in TCG TLB. Also the quadrants means the HV will
417 * potentially access and cache entries for the current LPID as
418 * well.
419 */
420 tlb_flush(env_cpu(env));
421 }
422
helper_store_40x_dbcr0(CPUPPCState * env,target_ulong val)423 void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val)
424 {
425 /* Bits 26 & 27 affect single-stepping. */
426 hreg_compute_hflags(env);
427 /* Bits 28 & 29 affect reset or shutdown. */
428 store_40x_dbcr0(env, val);
429 }
430
helper_store_40x_sler(CPUPPCState * env,target_ulong val)431 void helper_store_40x_sler(CPUPPCState *env, target_ulong val)
432 {
433 store_40x_sler(env, val);
434 }
435 #endif
436
437 /*****************************************************************************/
438 /* Special registers manipulation */
439
440 /*
441 * This code is lifted from MacOnLinux. It is called whenever THRM1,2
442 * or 3 is read an fixes up the values in such a way that will make
443 * MacOS not hang. These registers exist on some 75x and 74xx
444 * processors.
445 */
helper_fixup_thrm(CPUPPCState * env)446 void helper_fixup_thrm(CPUPPCState *env)
447 {
448 target_ulong v, t;
449 int i;
450
451 #define THRM1_TIN (1 << 31)
452 #define THRM1_TIV (1 << 30)
453 #define THRM1_THRES(x) (((x) & 0x7f) << 23)
454 #define THRM1_TID (1 << 2)
455 #define THRM1_TIE (1 << 1)
456 #define THRM1_V (1 << 0)
457 #define THRM3_E (1 << 0)
458
459 if (!(env->spr[SPR_THRM3] & THRM3_E)) {
460 return;
461 }
462
463 /* Note: Thermal interrupts are unimplemented */
464 for (i = SPR_THRM1; i <= SPR_THRM2; i++) {
465 v = env->spr[i];
466 if (!(v & THRM1_V)) {
467 continue;
468 }
469 v |= THRM1_TIV;
470 v &= ~THRM1_TIN;
471 t = v & THRM1_THRES(127);
472 if ((v & THRM1_TID) && t < THRM1_THRES(24)) {
473 v |= THRM1_TIN;
474 }
475 if (!(v & THRM1_TID) && t > THRM1_THRES(24)) {
476 v |= THRM1_TIN;
477 }
478 env->spr[i] = v;
479 }
480 }
481
482 #if !defined(CONFIG_USER_ONLY)
483 #if defined(TARGET_PPC64)
helper_clrbhrb(CPUPPCState * env)484 void helper_clrbhrb(CPUPPCState *env)
485 {
486 helper_hfscr_facility_check(env, HFSCR_BHRB, "clrbhrb", FSCR_IC_BHRB);
487
488 helper_mmcr0_facility_check(env, MMCR0_BHRBA_NR, 0, FSCR_IC_BHRB);
489
490 if (env->flags & POWERPC_FLAG_BHRB) {
491 memset(env->bhrb, 0, sizeof(env->bhrb));
492 }
493 }
494
helper_mfbhrbe(CPUPPCState * env,uint32_t bhrbe)495 uint64_t helper_mfbhrbe(CPUPPCState *env, uint32_t bhrbe)
496 {
497 unsigned int index;
498
499 helper_hfscr_facility_check(env, HFSCR_BHRB, "mfbhrbe", FSCR_IC_BHRB);
500
501 helper_mmcr0_facility_check(env, MMCR0_BHRBA_NR, 0, FSCR_IC_BHRB);
502
503 if (!(env->flags & POWERPC_FLAG_BHRB) ||
504 (bhrbe >= env->bhrb_num_entries) ||
505 (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE)) {
506 return 0;
507 }
508
509 /*
510 * Note: bhrb_offset is the byte offset for writing the
511 * next entry (over the oldest entry), which is why we
512 * must offset bhrbe by 1 to get to the 0th entry.
513 */
514 index = ((env->bhrb_offset / sizeof(uint64_t)) - (bhrbe + 1)) %
515 env->bhrb_num_entries;
516 return env->bhrb[index];
517 }
518 #endif
519 #endif
520