1 /*
2 * Miscellaneous PowerPC emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/cputlb.h"
25 #include "exec/helper-proto.h"
26 #include "qemu/error-report.h"
27 #include "qemu/main-loop.h"
28 #include "mmu-book3s-v3.h"
29 #include "hw/ppc/ppc.h"
30
31 #include "helper_regs.h"
32
33 /*****************************************************************************/
34 /* SPR accesses */
helper_load_dump_spr(CPUPPCState * env,uint32_t sprn)35 void helper_load_dump_spr(CPUPPCState *env, uint32_t sprn)
36 {
37 qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
38 env->spr[sprn]);
39 }
40
helper_store_dump_spr(CPUPPCState * env,uint32_t sprn)41 void helper_store_dump_spr(CPUPPCState *env, uint32_t sprn)
42 {
43 qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
44 env->spr[sprn]);
45 }
46
helper_spr_core_write_generic(CPUPPCState * env,uint32_t sprn,target_ulong val)47 void helper_spr_core_write_generic(CPUPPCState *env, uint32_t sprn,
48 target_ulong val)
49 {
50 CPUState *cs = env_cpu(env);
51 CPUState *ccs;
52
53 if (ppc_cpu_core_single_threaded(cs)) {
54 env->spr[sprn] = val;
55 return;
56 }
57
58 THREAD_SIBLING_FOREACH(cs, ccs) {
59 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
60 cenv->spr[sprn] = val;
61 }
62 }
63
helper_spr_write_CTRL(CPUPPCState * env,uint32_t sprn,target_ulong val)64 void helper_spr_write_CTRL(CPUPPCState *env, uint32_t sprn,
65 target_ulong val)
66 {
67 CPUState *cs = env_cpu(env);
68 CPUState *ccs;
69 uint32_t run = val & 1;
70 uint32_t ts, ts_mask;
71
72 assert(sprn == SPR_CTRL);
73
74 env->spr[sprn] &= ~1U;
75 env->spr[sprn] |= run;
76
77 ts_mask = ~(1U << (8 + env->spr[SPR_TIR]));
78 ts = run << (8 + env->spr[SPR_TIR]);
79
80 THREAD_SIBLING_FOREACH(cs, ccs) {
81 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
82
83 cenv->spr[sprn] &= ts_mask;
84 cenv->spr[sprn] |= ts;
85 }
86 }
87
88
89 #ifdef TARGET_PPC64
raise_hv_fu_exception(CPUPPCState * env,uint32_t bit,const char * caller,uint32_t cause,uintptr_t raddr)90 static void raise_hv_fu_exception(CPUPPCState *env, uint32_t bit,
91 const char *caller, uint32_t cause,
92 uintptr_t raddr)
93 {
94 qemu_log_mask(CPU_LOG_INT, "HV Facility %d is unavailable (%s)\n",
95 bit, caller);
96
97 env->spr[SPR_HFSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS);
98
99 raise_exception_err_ra(env, POWERPC_EXCP_HV_FU, cause, raddr);
100 }
101
raise_fu_exception(CPUPPCState * env,uint32_t bit,uint32_t sprn,uint32_t cause,uintptr_t raddr)102 static void raise_fu_exception(CPUPPCState *env, uint32_t bit,
103 uint32_t sprn, uint32_t cause,
104 uintptr_t raddr)
105 {
106 qemu_log("Facility SPR %d is unavailable (SPR FSCR:%d)\n", sprn, bit);
107
108 env->spr[SPR_FSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS);
109 cause &= FSCR_IC_MASK;
110 env->spr[SPR_FSCR] |= (target_ulong)cause << FSCR_IC_POS;
111
112 raise_exception_err_ra(env, POWERPC_EXCP_FU, 0, raddr);
113 }
114 #endif
115
helper_hfscr_facility_check(CPUPPCState * env,uint32_t bit,const char * caller,uint32_t cause)116 void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit,
117 const char *caller, uint32_t cause)
118 {
119 #ifdef TARGET_PPC64
120 if ((env->msr_mask & MSR_HVB) && !FIELD_EX64(env->msr, MSR, HV) &&
121 !(env->spr[SPR_HFSCR] & (1UL << bit))) {
122 raise_hv_fu_exception(env, bit, caller, cause, GETPC());
123 }
124 #endif
125 }
126
helper_fscr_facility_check(CPUPPCState * env,uint32_t bit,uint32_t sprn,uint32_t cause)127 void helper_fscr_facility_check(CPUPPCState *env, uint32_t bit,
128 uint32_t sprn, uint32_t cause)
129 {
130 #ifdef TARGET_PPC64
131 if (env->spr[SPR_FSCR] & (1ULL << bit)) {
132 /* Facility is enabled, continue */
133 return;
134 }
135 raise_fu_exception(env, bit, sprn, cause, GETPC());
136 #endif
137 }
138
helper_msr_facility_check(CPUPPCState * env,uint32_t bit,uint32_t sprn,uint32_t cause)139 void helper_msr_facility_check(CPUPPCState *env, uint32_t bit,
140 uint32_t sprn, uint32_t cause)
141 {
142 #ifdef TARGET_PPC64
143 if (env->msr & (1ULL << bit)) {
144 /* Facility is enabled, continue */
145 return;
146 }
147 raise_fu_exception(env, bit, sprn, cause, GETPC());
148 #endif
149 }
150
151 #if !defined(CONFIG_USER_ONLY)
152
153 #ifdef TARGET_PPC64
helper_mmcr0_facility_check(CPUPPCState * env,uint32_t bit,uint32_t sprn,uint32_t cause)154 static void helper_mmcr0_facility_check(CPUPPCState *env, uint32_t bit,
155 uint32_t sprn, uint32_t cause)
156 {
157 if (FIELD_EX64(env->msr, MSR, PR) &&
158 !(env->spr[SPR_POWER_MMCR0] & (1ULL << bit))) {
159 raise_fu_exception(env, bit, sprn, cause, GETPC());
160 }
161 }
162 #endif
163
helper_store_sdr1(CPUPPCState * env,target_ulong val)164 void helper_store_sdr1(CPUPPCState *env, target_ulong val)
165 {
166 if (env->spr[SPR_SDR1] != val) {
167 ppc_store_sdr1(env, val);
168 tlb_flush(env_cpu(env));
169 }
170 }
171
172 #if defined(TARGET_PPC64)
helper_store_ptcr(CPUPPCState * env,target_ulong val)173 void helper_store_ptcr(CPUPPCState *env, target_ulong val)
174 {
175 if (env->spr[SPR_PTCR] != val) {
176 CPUState *cs = env_cpu(env);
177 PowerPCCPU *cpu = env_archcpu(env);
178 target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS;
179 target_ulong patbsize = val & PTCR_PATS;
180
181 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, val);
182
183 assert(!cpu->vhyp);
184 assert(env->mmu_model & POWERPC_MMU_3_00);
185
186 if (val & ~ptcr_mask) {
187 error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR",
188 val & ~ptcr_mask);
189 val &= ptcr_mask;
190 }
191
192 if (patbsize > 24) {
193 error_report("Invalid Partition Table size 0x" TARGET_FMT_lx
194 " stored in PTCR", patbsize);
195 return;
196 }
197
198 if (ppc_cpu_lpar_single_threaded(cs)) {
199 env->spr[SPR_PTCR] = val;
200 tlb_flush(cs);
201 } else {
202 CPUState *ccs;
203
204 THREAD_SIBLING_FOREACH(cs, ccs) {
205 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
206 CPUPPCState *cenv = &ccpu->env;
207 cenv->spr[SPR_PTCR] = val;
208 tlb_flush(ccs);
209 }
210 }
211 }
212 }
213
helper_store_pcr(CPUPPCState * env,target_ulong value)214 void helper_store_pcr(CPUPPCState *env, target_ulong value)
215 {
216 PowerPCCPU *cpu = env_archcpu(env);
217 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
218
219 env->spr[SPR_PCR] = value & pcc->pcr_mask;
220 }
221
helper_store_ciabr(CPUPPCState * env,target_ulong value)222 void helper_store_ciabr(CPUPPCState *env, target_ulong value)
223 {
224 ppc_store_ciabr(env, value);
225 }
226
helper_store_dawr0(CPUPPCState * env,target_ulong value)227 void helper_store_dawr0(CPUPPCState *env, target_ulong value)
228 {
229 ppc_store_dawr0(env, value);
230 }
231
helper_store_dawrx0(CPUPPCState * env,target_ulong value)232 void helper_store_dawrx0(CPUPPCState *env, target_ulong value)
233 {
234 ppc_store_dawrx0(env, value);
235 }
236
helper_store_dawr1(CPUPPCState * env,target_ulong value)237 void helper_store_dawr1(CPUPPCState *env, target_ulong value)
238 {
239 ppc_store_dawr1(env, value);
240 }
241
helper_store_dawrx1(CPUPPCState * env,target_ulong value)242 void helper_store_dawrx1(CPUPPCState *env, target_ulong value)
243 {
244 ppc_store_dawrx1(env, value);
245 }
246
247 /*
248 * DPDES register is shared. Each bit reflects the state of the
249 * doorbell interrupt of a thread of the same core.
250 */
helper_load_dpdes(CPUPPCState * env)251 target_ulong helper_load_dpdes(CPUPPCState *env)
252 {
253 CPUState *cs = env_cpu(env);
254 CPUState *ccs;
255 target_ulong dpdes = 0;
256
257 helper_hfscr_facility_check(env, HFSCR_MSGP, "load DPDES", HFSCR_IC_MSGP);
258
259 /* DPDES behaves as 1-thread in LPAR-per-thread mode */
260 if (ppc_cpu_lpar_single_threaded(cs)) {
261 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
262 dpdes = 1;
263 }
264 return dpdes;
265 }
266
267 bql_lock();
268 THREAD_SIBLING_FOREACH(cs, ccs) {
269 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
270 CPUPPCState *cenv = &ccpu->env;
271 uint32_t thread_id = ppc_cpu_tir(ccpu);
272
273 if (cenv->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
274 dpdes |= (0x1 << thread_id);
275 }
276 }
277 bql_unlock();
278
279 return dpdes;
280 }
281
helper_store_dpdes(CPUPPCState * env,target_ulong val)282 void helper_store_dpdes(CPUPPCState *env, target_ulong val)
283 {
284 PowerPCCPU *cpu = env_archcpu(env);
285 CPUState *cs = env_cpu(env);
286 CPUState *ccs;
287
288 helper_hfscr_facility_check(env, HFSCR_MSGP, "store DPDES", HFSCR_IC_MSGP);
289
290 /* DPDES behaves as 1-thread in LPAR-per-thread mode */
291 if (ppc_cpu_lpar_single_threaded(cs)) {
292 ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & 0x1);
293 return;
294 }
295
296 /* Does iothread need to be locked for walking CPU list? */
297 bql_lock();
298 THREAD_SIBLING_FOREACH(cs, ccs) {
299 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
300 uint32_t thread_id = ppc_cpu_tir(ccpu);
301
302 ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id));
303 }
304 bql_unlock();
305 }
306
307 /*
308 * qemu-user breaks with pnv headers, so they go under ifdefs for now.
309 * A clean up may be to move powernv specific registers and helpers into
310 * target/ppc/pnv_helper.c
311 */
312 #include "hw/ppc/pnv_core.h"
313
314 /* Indirect SCOM (SPRC/SPRD) access to SCRATCH0-7 are implemented. */
helper_store_sprc(CPUPPCState * env,target_ulong val)315 void helper_store_sprc(CPUPPCState *env, target_ulong val)
316 {
317 if (val & ~0x3f8ULL) {
318 qemu_log_mask(LOG_GUEST_ERROR, "Invalid SPRC register value "
319 TARGET_FMT_lx"\n", val);
320 return;
321 }
322 env->spr[SPR_POWER_SPRC] = val;
323 }
324
helper_load_sprd(CPUPPCState * env)325 target_ulong helper_load_sprd(CPUPPCState *env)
326 {
327 /*
328 * SPRD is a HV-only register for Power CPUs, so this will only be
329 * accessed by powernv machines.
330 */
331 PowerPCCPU *cpu = env_archcpu(env);
332 PnvCore *pc = pnv_cpu_state(cpu)->pnv_core;
333 target_ulong sprc = env->spr[SPR_POWER_SPRC];
334
335 if (pc->big_core) {
336 pc = pnv_chip_find_core(pc->chip, CPU_CORE(pc)->core_id & ~0x1);
337 }
338
339 switch (sprc & 0x3e0) {
340 case 0: /* SCRATCH0-3 */
341 case 1: /* SCRATCH4-7 */
342 return pc->scratch[(sprc >> 3) & 0x7];
343
344 case 0x1e0: /* core thread state */
345 if (env->excp_model == POWERPC_EXCP_POWER9) {
346 /*
347 * Only implement for POWER9 because skiboot uses it to check
348 * big-core mode. Other bits are unimplemented so we would
349 * prefer to get unimplemented message on POWER10 if it were
350 * used anywhere.
351 */
352 if (pc->big_core) {
353 return PPC_BIT(63);
354 } else {
355 return 0;
356 }
357 }
358 /* fallthru */
359
360 default:
361 qemu_log_mask(LOG_UNIMP, "mfSPRD: Unimplemented SPRC:0x"
362 TARGET_FMT_lx"\n", sprc);
363 break;
364 }
365 return 0;
366 }
367
helper_store_sprd(CPUPPCState * env,target_ulong val)368 void helper_store_sprd(CPUPPCState *env, target_ulong val)
369 {
370 target_ulong sprc = env->spr[SPR_POWER_SPRC];
371 PowerPCCPU *cpu = env_archcpu(env);
372 PnvCore *pc = pnv_cpu_state(cpu)->pnv_core;
373 int nr;
374
375 if (pc->big_core) {
376 pc = pnv_chip_find_core(pc->chip, CPU_CORE(pc)->core_id & ~0x1);
377 }
378
379 switch (sprc & 0x3e0) {
380 case 0: /* SCRATCH0-3 */
381 case 1: /* SCRATCH4-7 */
382 /*
383 * Log stores to SCRATCH, because some firmware uses these for
384 * debugging and logging, but they would normally be read by the BMC,
385 * which is not implemented in QEMU yet. This gives a way to get at the
386 * information. Could also dump these upon checkstop.
387 */
388 nr = (sprc >> 3) & 0x7;
389 pc->scratch[nr] = val;
390 break;
391 default:
392 qemu_log_mask(LOG_UNIMP, "mtSPRD: Unimplemented SPRC:0x"
393 TARGET_FMT_lx"\n", sprc);
394 break;
395 }
396 }
397
helper_load_pmsr(CPUPPCState * env)398 target_ulong helper_load_pmsr(CPUPPCState *env)
399 {
400 target_ulong lowerps = extract64(env->spr[SPR_PMCR], PPC_BIT_NR(15), 8);
401 target_ulong val = 0;
402
403 val |= PPC_BIT(63); /* verion 0x1 (POWER9/10) */
404 /* Pmin = 0 */
405 /* XXX: POWER9 should be 3 */
406 val |= 4ULL << PPC_BIT_NR(31); /* Pmax */
407 val |= lowerps << PPC_BIT_NR(15); /* Local actual Pstate */
408 val |= lowerps << PPC_BIT_NR(7); /* Global actual Pstate */
409
410 return val;
411 }
412
ppc_set_pmcr(PowerPCCPU * cpu,target_ulong val)413 static void ppc_set_pmcr(PowerPCCPU *cpu, target_ulong val)
414 {
415 cpu->env.spr[SPR_PMCR] = val;
416 }
417
helper_store_pmcr(CPUPPCState * env,target_ulong val)418 void helper_store_pmcr(CPUPPCState *env, target_ulong val)
419 {
420 PowerPCCPU *cpu = env_archcpu(env);
421 CPUState *cs = env_cpu(env);
422 CPUState *ccs;
423
424 /* Leave version field unchanged (0x1) */
425 val &= ~PPC_BITMASK(60, 63);
426 val |= PPC_BIT(63);
427
428 val &= ~PPC_BITMASK(0, 7); /* UpperPS ignored */
429 if (val & PPC_BITMASK(16, 59)) {
430 qemu_log_mask(LOG_GUEST_ERROR, "Non-zero PMCR reserved bits "
431 TARGET_FMT_lx"\n", val);
432 val &= ~PPC_BITMASK(16, 59);
433 }
434
435 /* DPDES behaves as 1-thread in LPAR-per-thread mode */
436 if (ppc_cpu_lpar_single_threaded(cs)) {
437 ppc_set_pmcr(cpu, val);
438 return;
439 }
440
441 /* Does iothread need to be locked for walking CPU list? */
442 bql_lock();
443 THREAD_SIBLING_FOREACH(cs, ccs) {
444 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
445 ppc_set_pmcr(ccpu, val);
446 }
447 bql_unlock();
448 }
449
450 #endif /* defined(TARGET_PPC64) */
451
helper_store_pidr(CPUPPCState * env,target_ulong val)452 void helper_store_pidr(CPUPPCState *env, target_ulong val)
453 {
454 env->spr[SPR_BOOKS_PID] = (uint32_t)val;
455 tlb_flush(env_cpu(env));
456 }
457
helper_store_lpidr(CPUPPCState * env,target_ulong val)458 void helper_store_lpidr(CPUPPCState *env, target_ulong val)
459 {
460 env->spr[SPR_LPIDR] = (uint32_t)val;
461
462 /*
463 * We need to flush the TLB on LPID changes as we only tag HV vs
464 * guest in TCG TLB. Also the quadrants means the HV will
465 * potentially access and cache entries for the current LPID as
466 * well.
467 */
468 tlb_flush(env_cpu(env));
469 }
470
helper_store_40x_dbcr0(CPUPPCState * env,target_ulong val)471 void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val)
472 {
473 /* Bits 26 & 27 affect single-stepping. */
474 hreg_compute_hflags(env);
475 /* Bits 28 & 29 affect reset or shutdown. */
476 store_40x_dbcr0(env, val);
477 }
478
helper_store_40x_sler(CPUPPCState * env,target_ulong val)479 void helper_store_40x_sler(CPUPPCState *env, target_ulong val)
480 {
481 store_40x_sler(env, val);
482 }
483 #endif
484
485 /*****************************************************************************/
486 /* Special registers manipulation */
487
488 /*
489 * This code is lifted from MacOnLinux. It is called whenever THRM1,2
490 * or 3 is read an fixes up the values in such a way that will make
491 * MacOS not hang. These registers exist on some 75x and 74xx
492 * processors.
493 */
helper_fixup_thrm(CPUPPCState * env)494 void helper_fixup_thrm(CPUPPCState *env)
495 {
496 target_ulong v, t;
497 int i;
498
499 #define THRM1_TIN (1 << 31)
500 #define THRM1_TIV (1 << 30)
501 #define THRM1_THRES(x) (((x) & 0x7f) << 23)
502 #define THRM1_TID (1 << 2)
503 #define THRM1_TIE (1 << 1)
504 #define THRM1_V (1 << 0)
505 #define THRM3_E (1 << 0)
506
507 if (!(env->spr[SPR_THRM3] & THRM3_E)) {
508 return;
509 }
510
511 /* Note: Thermal interrupts are unimplemented */
512 for (i = SPR_THRM1; i <= SPR_THRM2; i++) {
513 v = env->spr[i];
514 if (!(v & THRM1_V)) {
515 continue;
516 }
517 v |= THRM1_TIV;
518 v &= ~THRM1_TIN;
519 t = v & THRM1_THRES(127);
520 if ((v & THRM1_TID) && t < THRM1_THRES(24)) {
521 v |= THRM1_TIN;
522 }
523 if (!(v & THRM1_TID) && t > THRM1_THRES(24)) {
524 v |= THRM1_TIN;
525 }
526 env->spr[i] = v;
527 }
528 }
529
530 #if !defined(CONFIG_USER_ONLY)
531 #if defined(TARGET_PPC64)
helper_clrbhrb(CPUPPCState * env)532 void helper_clrbhrb(CPUPPCState *env)
533 {
534 helper_hfscr_facility_check(env, HFSCR_BHRB, "clrbhrb", FSCR_IC_BHRB);
535
536 helper_mmcr0_facility_check(env, MMCR0_BHRBA_NR, 0, FSCR_IC_BHRB);
537
538 if (env->flags & POWERPC_FLAG_BHRB) {
539 memset(env->bhrb, 0, sizeof(env->bhrb));
540 }
541 }
542
helper_mfbhrbe(CPUPPCState * env,uint32_t bhrbe)543 uint64_t helper_mfbhrbe(CPUPPCState *env, uint32_t bhrbe)
544 {
545 unsigned int index;
546
547 helper_hfscr_facility_check(env, HFSCR_BHRB, "mfbhrbe", FSCR_IC_BHRB);
548
549 helper_mmcr0_facility_check(env, MMCR0_BHRBA_NR, 0, FSCR_IC_BHRB);
550
551 if (!(env->flags & POWERPC_FLAG_BHRB) ||
552 (bhrbe >= env->bhrb_num_entries) ||
553 (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE)) {
554 return 0;
555 }
556
557 /*
558 * Note: bhrb_offset is the byte offset for writing the
559 * next entry (over the oldest entry), which is why we
560 * must offset bhrbe by 1 to get to the 0th entry.
561 */
562 index = ((env->bhrb_offset / sizeof(uint64_t)) - (bhrbe + 1)) %
563 env->bhrb_num_entries;
564 return env->bhrb[index];
565 }
566 #endif
567 #endif
568