xref: /openbmc/qemu/hw/ppc/ppc.c (revision cde3c425)
1 /*
2  * QEMU generic PowerPC hardware System Emulator
3  *
4  * Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "hw/irq.h"
27 #include "hw/ppc/ppc.h"
28 #include "hw/ppc/ppc_e500.h"
29 #include "qemu/timer.h"
30 #include "sysemu/cpus.h"
31 #include "qemu/log.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/error-report.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/replay.h"
36 #include "sysemu/runstate.h"
37 #include "kvm_ppc.h"
38 #include "migration/vmstate.h"
39 #include "trace.h"
40 
41 static void cpu_ppc_tb_stop (CPUPPCState *env);
42 static void cpu_ppc_tb_start (CPUPPCState *env);
43 
44 void ppc_set_irq(PowerPCCPU *cpu, int irq, int level)
45 {
46     CPUPPCState *env = &cpu->env;
47     unsigned int old_pending;
48 
49     /* We may already have the BQL if coming from the reset path */
50     BQL_LOCK_GUARD();
51 
52     old_pending = env->pending_interrupts;
53 
54     if (level) {
55         env->pending_interrupts |= irq;
56     } else {
57         env->pending_interrupts &= ~irq;
58     }
59 
60     if (old_pending != env->pending_interrupts) {
61         ppc_maybe_interrupt(env);
62         if (kvm_enabled()) {
63             kvmppc_set_interrupt(cpu, irq, level);
64         }
65     }
66 
67     trace_ppc_irq_set_exit(env, irq, level, env->pending_interrupts,
68                            CPU(cpu)->interrupt_request);
69 }
70 
71 /* PowerPC 6xx / 7xx internal IRQ controller */
72 static void ppc6xx_set_irq(void *opaque, int pin, int level)
73 {
74     PowerPCCPU *cpu = opaque;
75     CPUPPCState *env = &cpu->env;
76     int cur_level;
77 
78     trace_ppc_irq_set(env, pin, level);
79 
80     cur_level = (env->irq_input_state >> pin) & 1;
81     /* Don't generate spurious events */
82     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
83         CPUState *cs = CPU(cpu);
84 
85         switch (pin) {
86         case PPC6xx_INPUT_TBEN:
87             /* Level sensitive - active high */
88             trace_ppc_irq_set_state("time base", level);
89             if (level) {
90                 cpu_ppc_tb_start(env);
91             } else {
92                 cpu_ppc_tb_stop(env);
93             }
94             break;
95         case PPC6xx_INPUT_INT:
96             /* Level sensitive - active high */
97             trace_ppc_irq_set_state("external IRQ", level);
98             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
99             break;
100         case PPC6xx_INPUT_SMI:
101             /* Level sensitive - active high */
102             trace_ppc_irq_set_state("SMI IRQ", level);
103             ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
104             break;
105         case PPC6xx_INPUT_MCP:
106             /* Negative edge sensitive */
107             /* XXX: TODO: actual reaction may depends on HID0 status
108              *            603/604/740/750: check HID0[EMCP]
109              */
110             if (cur_level == 1 && level == 0) {
111                 trace_ppc_irq_set_state("machine check", 1);
112                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
113             }
114             break;
115         case PPC6xx_INPUT_CKSTP_IN:
116             /* Level sensitive - active low */
117             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
118             /* XXX: Note that the only way to restart the CPU is to reset it */
119             if (level) {
120                 trace_ppc_irq_cpu("stop");
121                 cs->halted = 1;
122             }
123             break;
124         case PPC6xx_INPUT_HRESET:
125             /* Level sensitive - active low */
126             if (level) {
127                 trace_ppc_irq_reset("CPU");
128                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
129             }
130             break;
131         case PPC6xx_INPUT_SRESET:
132             trace_ppc_irq_set_state("RESET IRQ", level);
133             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
134             break;
135         default:
136             g_assert_not_reached();
137         }
138         if (level)
139             env->irq_input_state |= 1 << pin;
140         else
141             env->irq_input_state &= ~(1 << pin);
142     }
143 }
144 
145 void ppc6xx_irq_init(PowerPCCPU *cpu)
146 {
147     qdev_init_gpio_in(DEVICE(cpu), ppc6xx_set_irq, PPC6xx_INPUT_NB);
148 }
149 
150 #if defined(TARGET_PPC64)
151 /* PowerPC 970 internal IRQ controller */
152 static void ppc970_set_irq(void *opaque, int pin, int level)
153 {
154     PowerPCCPU *cpu = opaque;
155     CPUPPCState *env = &cpu->env;
156     int cur_level;
157 
158     trace_ppc_irq_set(env, pin, level);
159 
160     cur_level = (env->irq_input_state >> pin) & 1;
161     /* Don't generate spurious events */
162     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
163         CPUState *cs = CPU(cpu);
164 
165         switch (pin) {
166         case PPC970_INPUT_INT:
167             /* Level sensitive - active high */
168             trace_ppc_irq_set_state("external IRQ", level);
169             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
170             break;
171         case PPC970_INPUT_THINT:
172             /* Level sensitive - active high */
173             trace_ppc_irq_set_state("SMI IRQ", level);
174             ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
175             break;
176         case PPC970_INPUT_MCP:
177             /* Negative edge sensitive */
178             /* XXX: TODO: actual reaction may depends on HID0 status
179              *            603/604/740/750: check HID0[EMCP]
180              */
181             if (cur_level == 1 && level == 0) {
182                 trace_ppc_irq_set_state("machine check", 1);
183                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
184             }
185             break;
186         case PPC970_INPUT_CKSTP:
187             /* Level sensitive - active low */
188             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
189             if (level) {
190                 trace_ppc_irq_cpu("stop");
191                 cs->halted = 1;
192             } else {
193                 trace_ppc_irq_cpu("restart");
194                 cs->halted = 0;
195                 qemu_cpu_kick(cs);
196             }
197             break;
198         case PPC970_INPUT_HRESET:
199             /* Level sensitive - active low */
200             if (level) {
201                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
202             }
203             break;
204         case PPC970_INPUT_SRESET:
205             trace_ppc_irq_set_state("RESET IRQ", level);
206             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
207             break;
208         case PPC970_INPUT_TBEN:
209             trace_ppc_irq_set_state("TBEN IRQ", level);
210             /* XXX: TODO */
211             break;
212         default:
213             g_assert_not_reached();
214         }
215         if (level)
216             env->irq_input_state |= 1 << pin;
217         else
218             env->irq_input_state &= ~(1 << pin);
219     }
220 }
221 
222 void ppc970_irq_init(PowerPCCPU *cpu)
223 {
224     qdev_init_gpio_in(DEVICE(cpu), ppc970_set_irq, PPC970_INPUT_NB);
225 }
226 
227 /* POWER7 internal IRQ controller */
228 static void power7_set_irq(void *opaque, int pin, int level)
229 {
230     PowerPCCPU *cpu = opaque;
231 
232     trace_ppc_irq_set(&cpu->env, pin, level);
233 
234     switch (pin) {
235     case POWER7_INPUT_INT:
236         /* Level sensitive - active high */
237         trace_ppc_irq_set_state("external IRQ", level);
238         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
239         break;
240     default:
241         g_assert_not_reached();
242     }
243 }
244 
245 void ppcPOWER7_irq_init(PowerPCCPU *cpu)
246 {
247     qdev_init_gpio_in(DEVICE(cpu), power7_set_irq, POWER7_INPUT_NB);
248 }
249 
250 /* POWER9 internal IRQ controller */
251 static void power9_set_irq(void *opaque, int pin, int level)
252 {
253     PowerPCCPU *cpu = opaque;
254 
255     trace_ppc_irq_set(&cpu->env, pin, level);
256 
257     switch (pin) {
258     case POWER9_INPUT_INT:
259         /* Level sensitive - active high */
260         trace_ppc_irq_set_state("external IRQ", level);
261         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
262         break;
263     case POWER9_INPUT_HINT:
264         /* Level sensitive - active high */
265         trace_ppc_irq_set_state("HV external IRQ", level);
266         ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
267         break;
268     default:
269         g_assert_not_reached();
270     }
271 }
272 
273 void ppcPOWER9_irq_init(PowerPCCPU *cpu)
274 {
275     qdev_init_gpio_in(DEVICE(cpu), power9_set_irq, POWER9_INPUT_NB);
276 }
277 #endif /* defined(TARGET_PPC64) */
278 
279 void ppc40x_core_reset(PowerPCCPU *cpu)
280 {
281     CPUPPCState *env = &cpu->env;
282     target_ulong dbsr;
283 
284     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
285     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
286     dbsr = env->spr[SPR_40x_DBSR];
287     dbsr &= ~0x00000300;
288     dbsr |= 0x00000100;
289     env->spr[SPR_40x_DBSR] = dbsr;
290 }
291 
292 void ppc40x_chip_reset(PowerPCCPU *cpu)
293 {
294     CPUPPCState *env = &cpu->env;
295     target_ulong dbsr;
296 
297     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
298     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
299     /* XXX: TODO reset all internal peripherals */
300     dbsr = env->spr[SPR_40x_DBSR];
301     dbsr &= ~0x00000300;
302     dbsr |= 0x00000200;
303     env->spr[SPR_40x_DBSR] = dbsr;
304 }
305 
306 void ppc40x_system_reset(PowerPCCPU *cpu)
307 {
308     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
309     qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
310 }
311 
312 void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
313 {
314     PowerPCCPU *cpu = env_archcpu(env);
315 
316     bql_lock();
317 
318     switch ((val >> 28) & 0x3) {
319     case 0x0:
320         /* No action */
321         break;
322     case 0x1:
323         /* Core reset */
324         ppc40x_core_reset(cpu);
325         break;
326     case 0x2:
327         /* Chip reset */
328         ppc40x_chip_reset(cpu);
329         break;
330     case 0x3:
331         /* System reset */
332         ppc40x_system_reset(cpu);
333         break;
334     }
335 
336     bql_unlock();
337 }
338 
339 /* PowerPC 40x internal IRQ controller */
340 static void ppc40x_set_irq(void *opaque, int pin, int level)
341 {
342     PowerPCCPU *cpu = opaque;
343     CPUPPCState *env = &cpu->env;
344     int cur_level;
345 
346     trace_ppc_irq_set(env, pin, level);
347 
348     cur_level = (env->irq_input_state >> pin) & 1;
349     /* Don't generate spurious events */
350     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
351         CPUState *cs = CPU(cpu);
352 
353         switch (pin) {
354         case PPC40x_INPUT_RESET_SYS:
355             if (level) {
356                 trace_ppc_irq_reset("system");
357                 ppc40x_system_reset(cpu);
358             }
359             break;
360         case PPC40x_INPUT_RESET_CHIP:
361             if (level) {
362                 trace_ppc_irq_reset("chip");
363                 ppc40x_chip_reset(cpu);
364             }
365             break;
366         case PPC40x_INPUT_RESET_CORE:
367             /* XXX: TODO: update DBSR[MRR] */
368             if (level) {
369                 trace_ppc_irq_reset("core");
370                 ppc40x_core_reset(cpu);
371             }
372             break;
373         case PPC40x_INPUT_CINT:
374             /* Level sensitive - active high */
375             trace_ppc_irq_set_state("critical IRQ", level);
376             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
377             break;
378         case PPC40x_INPUT_INT:
379             /* Level sensitive - active high */
380             trace_ppc_irq_set_state("external IRQ", level);
381             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
382             break;
383         case PPC40x_INPUT_HALT:
384             /* Level sensitive - active low */
385             if (level) {
386                 trace_ppc_irq_cpu("stop");
387                 cs->halted = 1;
388             } else {
389                 trace_ppc_irq_cpu("restart");
390                 cs->halted = 0;
391                 qemu_cpu_kick(cs);
392             }
393             break;
394         case PPC40x_INPUT_DEBUG:
395             /* Level sensitive - active high */
396             trace_ppc_irq_set_state("debug pin", level);
397             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
398             break;
399         default:
400             g_assert_not_reached();
401         }
402         if (level)
403             env->irq_input_state |= 1 << pin;
404         else
405             env->irq_input_state &= ~(1 << pin);
406     }
407 }
408 
409 void ppc40x_irq_init(PowerPCCPU *cpu)
410 {
411     qdev_init_gpio_in(DEVICE(cpu), ppc40x_set_irq, PPC40x_INPUT_NB);
412 }
413 
414 /* PowerPC E500 internal IRQ controller */
415 static void ppce500_set_irq(void *opaque, int pin, int level)
416 {
417     PowerPCCPU *cpu = opaque;
418     CPUPPCState *env = &cpu->env;
419     int cur_level;
420 
421     trace_ppc_irq_set(env, pin, level);
422 
423     cur_level = (env->irq_input_state >> pin) & 1;
424     /* Don't generate spurious events */
425     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
426         switch (pin) {
427         case PPCE500_INPUT_MCK:
428             if (level) {
429                 trace_ppc_irq_reset("system");
430                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
431             }
432             break;
433         case PPCE500_INPUT_RESET_CORE:
434             if (level) {
435                 trace_ppc_irq_reset("core");
436                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
437             }
438             break;
439         case PPCE500_INPUT_CINT:
440             /* Level sensitive - active high */
441             trace_ppc_irq_set_state("critical IRQ", level);
442             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
443             break;
444         case PPCE500_INPUT_INT:
445             /* Level sensitive - active high */
446             trace_ppc_irq_set_state("core IRQ", level);
447             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
448             break;
449         case PPCE500_INPUT_DEBUG:
450             /* Level sensitive - active high */
451             trace_ppc_irq_set_state("debug pin", level);
452             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
453             break;
454         default:
455             g_assert_not_reached();
456         }
457         if (level)
458             env->irq_input_state |= 1 << pin;
459         else
460             env->irq_input_state &= ~(1 << pin);
461     }
462 }
463 
464 void ppce500_irq_init(PowerPCCPU *cpu)
465 {
466     qdev_init_gpio_in(DEVICE(cpu), ppce500_set_irq, PPCE500_INPUT_NB);
467 }
468 
469 /* Enable or Disable the E500 EPR capability */
470 void ppce500_set_mpic_proxy(bool enabled)
471 {
472     CPUState *cs;
473 
474     CPU_FOREACH(cs) {
475         PowerPCCPU *cpu = POWERPC_CPU(cs);
476 
477         cpu->env.mpic_proxy = enabled;
478         if (kvm_enabled()) {
479             kvmppc_set_mpic_proxy(cpu, enabled);
480         }
481     }
482 }
483 
484 /*****************************************************************************/
485 /* PowerPC time base and decrementer emulation */
486 
487 /*
488  * Conversion between QEMU_CLOCK_VIRTUAL ns and timebase (TB) ticks:
489  * TB ticks are arrived at by multiplying tb_freq then dividing by
490  * ns per second, and rounding down. TB ticks drive all clocks and
491  * timers in the target machine.
492  *
493  * Converting TB intervals to ns for the purpose of setting a
494  * QEMU_CLOCK_VIRTUAL timer should go the other way, but rounding
495  * up. Rounding down could cause the timer to fire before the TB
496  * value has been reached.
497  */
498 static uint64_t ns_to_tb(uint32_t freq, int64_t clock)
499 {
500     return muldiv64(clock, freq, NANOSECONDS_PER_SECOND);
501 }
502 
503 /* virtual clock in TB ticks, not adjusted by TB offset */
504 static int64_t tb_to_ns_round_up(uint32_t freq, uint64_t tb)
505 {
506     return muldiv64_round_up(tb, NANOSECONDS_PER_SECOND, freq);
507 }
508 
509 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
510 {
511     /* TB time in tb periods */
512     return ns_to_tb(tb_env->tb_freq, vmclk) + tb_offset;
513 }
514 
515 uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
516 {
517     ppc_tb_t *tb_env = env->tb_env;
518     uint64_t tb;
519 
520     if (kvm_enabled()) {
521         return env->spr[SPR_TBL];
522     }
523 
524     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
525                         tb_env->tb_offset);
526     trace_ppc_tb_load(tb);
527 
528     return tb;
529 }
530 
531 static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
532 {
533     ppc_tb_t *tb_env = env->tb_env;
534     uint64_t tb;
535 
536     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
537                         tb_env->tb_offset);
538     trace_ppc_tb_load(tb);
539 
540     return tb >> 32;
541 }
542 
543 uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
544 {
545     if (kvm_enabled()) {
546         return env->spr[SPR_TBU];
547     }
548 
549     return _cpu_ppc_load_tbu(env);
550 }
551 
552 static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
553                                     int64_t *tb_offsetp, uint64_t value)
554 {
555     *tb_offsetp = value - ns_to_tb(tb_env->tb_freq, vmclk);
556 
557     trace_ppc_tb_store(value, *tb_offsetp);
558 }
559 
560 void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
561 {
562     ppc_tb_t *tb_env = env->tb_env;
563     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
564     uint64_t tb;
565 
566     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
567     tb &= 0xFFFFFFFF00000000ULL;
568     cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset, tb | (uint64_t)value);
569 }
570 
571 static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
572 {
573     ppc_tb_t *tb_env = env->tb_env;
574     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
575     uint64_t tb;
576 
577     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
578     tb &= 0x00000000FFFFFFFFULL;
579     cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset,
580                      ((uint64_t)value << 32) | tb);
581 }
582 
583 void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
584 {
585     _cpu_ppc_store_tbu(env, value);
586 }
587 
588 uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
589 {
590     ppc_tb_t *tb_env = env->tb_env;
591     uint64_t tb;
592 
593     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
594                         tb_env->atb_offset);
595     trace_ppc_tb_load(tb);
596 
597     return tb;
598 }
599 
600 uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
601 {
602     ppc_tb_t *tb_env = env->tb_env;
603     uint64_t tb;
604 
605     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
606                         tb_env->atb_offset);
607     trace_ppc_tb_load(tb);
608 
609     return tb >> 32;
610 }
611 
612 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
613 {
614     ppc_tb_t *tb_env = env->tb_env;
615     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
616     uint64_t tb;
617 
618     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->atb_offset);
619     tb &= 0xFFFFFFFF00000000ULL;
620     cpu_ppc_store_tb(tb_env, clock, &tb_env->atb_offset, tb | (uint64_t)value);
621 }
622 
623 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
624 {
625     ppc_tb_t *tb_env = env->tb_env;
626     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
627     uint64_t tb;
628 
629     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->atb_offset);
630     tb &= 0x00000000FFFFFFFFULL;
631     cpu_ppc_store_tb(tb_env, clock, &tb_env->atb_offset,
632                      ((uint64_t)value << 32) | tb);
633 }
634 
635 void cpu_ppc_increase_tb_by_offset(CPUPPCState *env, int64_t offset)
636 {
637     env->tb_env->tb_offset += offset;
638 }
639 
640 void cpu_ppc_decrease_tb_by_offset(CPUPPCState *env, int64_t offset)
641 {
642     env->tb_env->tb_offset -= offset;
643 }
644 
645 uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
646 {
647     ppc_tb_t *tb_env = env->tb_env;
648 
649     return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
650                           tb_env->vtb_offset);
651 }
652 
653 void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value)
654 {
655     ppc_tb_t *tb_env = env->tb_env;
656 
657     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
658                      &tb_env->vtb_offset, value);
659 }
660 
661 void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value)
662 {
663     ppc_tb_t *tb_env = env->tb_env;
664     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
665     uint64_t tb;
666 
667     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
668     tb &= 0xFFFFFFUL;
669     tb |= (value & ~0xFFFFFFUL);
670     cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset, tb);
671 }
672 
673 static void cpu_ppc_tb_stop (CPUPPCState *env)
674 {
675     ppc_tb_t *tb_env = env->tb_env;
676     uint64_t tb, atb, vmclk;
677 
678     /* If the time base is already frozen, do nothing */
679     if (tb_env->tb_freq != 0) {
680         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
681         /* Get the time base */
682         tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
683         /* Get the alternate time base */
684         atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
685         /* Store the time base value (ie compute the current offset) */
686         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
687         /* Store the alternate time base value (compute the current offset) */
688         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
689         /* Set the time base frequency to zero */
690         tb_env->tb_freq = 0;
691         /* Now, the time bases are frozen to tb_offset / atb_offset value */
692     }
693 }
694 
695 static void cpu_ppc_tb_start (CPUPPCState *env)
696 {
697     ppc_tb_t *tb_env = env->tb_env;
698     uint64_t tb, atb, vmclk;
699 
700     /* If the time base is not frozen, do nothing */
701     if (tb_env->tb_freq == 0) {
702         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
703         /* Get the time base from tb_offset */
704         tb = tb_env->tb_offset;
705         /* Get the alternate time base from atb_offset */
706         atb = tb_env->atb_offset;
707         /* Restore the tb frequency from the decrementer frequency */
708         tb_env->tb_freq = tb_env->decr_freq;
709         /* Store the time base value */
710         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
711         /* Store the alternate time base value */
712         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
713     }
714 }
715 
716 bool ppc_decr_clear_on_delivery(CPUPPCState *env)
717 {
718     ppc_tb_t *tb_env = env->tb_env;
719     int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
720     return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
721 }
722 
723 static inline int64_t __cpu_ppc_load_decr(CPUPPCState *env, int64_t now,
724                                           uint64_t next)
725 {
726     ppc_tb_t *tb_env = env->tb_env;
727     uint64_t n;
728     int64_t decr;
729 
730     n = ns_to_tb(tb_env->decr_freq, now);
731     if (next > n && tb_env->flags & PPC_TIMER_BOOKE) {
732         decr = 0;
733     } else {
734         decr = next - n;
735     }
736 
737     trace_ppc_decr_load(decr);
738 
739     return decr;
740 }
741 
742 static target_ulong _cpu_ppc_load_decr(CPUPPCState *env, int64_t now)
743 {
744     ppc_tb_t *tb_env = env->tb_env;
745     uint64_t decr;
746 
747     decr = __cpu_ppc_load_decr(env, now, tb_env->decr_next);
748 
749     /*
750      * If large decrementer is enabled then the decrementer is signed extended
751      * to 64 bits, otherwise it is a 32 bit value.
752      */
753     if (env->spr[SPR_LPCR] & LPCR_LD) {
754         PowerPCCPU *cpu = env_archcpu(env);
755         PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
756         return sextract64(decr, 0, pcc->lrg_decr_bits);
757     }
758     return (uint32_t) decr;
759 }
760 
761 target_ulong cpu_ppc_load_decr(CPUPPCState *env)
762 {
763     if (kvm_enabled()) {
764         return env->spr[SPR_DECR];
765     } else {
766         return _cpu_ppc_load_decr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
767     }
768 }
769 
770 static target_ulong _cpu_ppc_load_hdecr(CPUPPCState *env, int64_t now)
771 {
772     PowerPCCPU *cpu = env_archcpu(env);
773     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
774     ppc_tb_t *tb_env = env->tb_env;
775     uint64_t hdecr;
776 
777     hdecr =  __cpu_ppc_load_decr(env, now, tb_env->hdecr_next);
778 
779     /*
780      * If we have a large decrementer (POWER9 or later) then hdecr is sign
781      * extended to 64 bits, otherwise it is 32 bits.
782      */
783     if (pcc->lrg_decr_bits > 32) {
784         return sextract64(hdecr, 0, pcc->lrg_decr_bits);
785     }
786     return (uint32_t) hdecr;
787 }
788 
789 target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
790 {
791     return _cpu_ppc_load_hdecr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
792 }
793 
794 uint64_t cpu_ppc_load_purr (CPUPPCState *env)
795 {
796     ppc_tb_t *tb_env = env->tb_env;
797 
798     return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
799                           tb_env->purr_offset);
800 }
801 
802 /* When decrementer expires,
803  * all we need to do is generate or queue a CPU exception
804  */
805 static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
806 {
807     /* Raise it */
808     trace_ppc_decr_excp("raise");
809     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
810 }
811 
812 static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
813 {
814     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
815 }
816 
817 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
818 {
819     CPUPPCState *env = &cpu->env;
820 
821     /* Raise it */
822     trace_ppc_decr_excp("raise HV");
823 
824     /* The architecture specifies that we don't deliver HDEC
825      * interrupts in a PM state. Not only they don't cause a
826      * wakeup but they also get effectively discarded.
827      */
828     if (!env->resume_as_sreset) {
829         ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
830     }
831 }
832 
833 static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
834 {
835     ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
836 }
837 
838 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, int64_t now, uint64_t *nextp,
839                                  QEMUTimer *timer,
840                                  void (*raise_excp)(void *),
841                                  void (*lower_excp)(PowerPCCPU *),
842                                  uint32_t flags, target_ulong decr,
843                                  target_ulong value, int nr_bits)
844 {
845     CPUPPCState *env = &cpu->env;
846     ppc_tb_t *tb_env = env->tb_env;
847     uint64_t next;
848     int64_t signed_value;
849     int64_t signed_decr;
850 
851     /* Truncate value to decr_width and sign extend for simplicity */
852     value = extract64(value, 0, nr_bits);
853     decr = extract64(decr, 0, nr_bits);
854     signed_value = sextract64(value, 0, nr_bits);
855     signed_decr = sextract64(decr, 0, nr_bits);
856 
857     trace_ppc_decr_store(nr_bits, decr, value);
858 
859     /*
860      * Calculate the next decrementer event and set a timer.
861      * decr_next is in timebase units to keep rounding simple. Note it is
862      * not adjusted by tb_offset because if TB changes via tb_offset changing,
863      * decrementer does not change, so not directly comparable with TB.
864      */
865     next = ns_to_tb(tb_env->decr_freq, now) + value;
866     *nextp = next; /* nextp is in timebase units */
867 
868     /*
869      * Going from 1 -> 0 or 0 -> -1 is the event to generate a DEC interrupt.
870      *
871      * On MSB level based DEC implementations the MSB always means the interrupt
872      * is pending, so raise it on those.
873      *
874      * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
875      * an edge interrupt, so raise it here too.
876      */
877     if (((flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
878         ((flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0
879           && signed_decr >= 0)) {
880         (*raise_excp)(cpu);
881         return;
882     }
883 
884     /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
885     if (signed_value >= 0 && (flags & PPC_DECR_UNDERFLOW_LEVEL)) {
886         (*lower_excp)(cpu);
887     }
888 
889     /* Adjust timer */
890     timer_mod(timer, tb_to_ns_round_up(tb_env->decr_freq, next));
891 }
892 
893 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, int64_t now,
894                                        target_ulong decr, target_ulong value,
895                                        int nr_bits)
896 {
897     ppc_tb_t *tb_env = cpu->env.tb_env;
898 
899     __cpu_ppc_store_decr(cpu, now, &tb_env->decr_next, tb_env->decr_timer,
900                          tb_env->decr_timer->cb, &cpu_ppc_decr_lower,
901                          tb_env->flags, decr, value, nr_bits);
902 }
903 
904 void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
905 {
906     PowerPCCPU *cpu = env_archcpu(env);
907     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
908     int64_t now;
909     target_ulong decr;
910     int nr_bits = 32;
911 
912     if (kvm_enabled()) {
913         /* KVM handles decrementer exceptions, we don't need our own timer */
914         return;
915     }
916 
917     if (env->spr[SPR_LPCR] & LPCR_LD) {
918         nr_bits = pcc->lrg_decr_bits;
919     }
920 
921     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
922     decr = _cpu_ppc_load_decr(env, now);
923     _cpu_ppc_store_decr(cpu, now, decr, value, nr_bits);
924 }
925 
926 static void cpu_ppc_decr_cb(void *opaque)
927 {
928     PowerPCCPU *cpu = opaque;
929 
930     cpu_ppc_decr_excp(cpu);
931 }
932 
933 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, int64_t now,
934                                         target_ulong hdecr, target_ulong value,
935                                         int nr_bits)
936 {
937     ppc_tb_t *tb_env = cpu->env.tb_env;
938 
939     if (tb_env->hdecr_timer != NULL) {
940         /* HDECR (Book3S 64bit) is edge-based, not level like DECR */
941         __cpu_ppc_store_decr(cpu, now, &tb_env->hdecr_next, tb_env->hdecr_timer,
942                              tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
943                              PPC_DECR_UNDERFLOW_TRIGGERED,
944                              hdecr, value, nr_bits);
945     }
946 }
947 
948 void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
949 {
950     PowerPCCPU *cpu = env_archcpu(env);
951     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
952     int64_t now;
953     target_ulong hdecr;
954 
955     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
956     hdecr = _cpu_ppc_load_hdecr(env, now);
957     _cpu_ppc_store_hdecr(cpu, now, hdecr, value, pcc->lrg_decr_bits);
958 }
959 
960 static void cpu_ppc_hdecr_cb(void *opaque)
961 {
962     PowerPCCPU *cpu = opaque;
963 
964     cpu_ppc_hdecr_excp(cpu);
965 }
966 
967 static void _cpu_ppc_store_purr(CPUPPCState *env, int64_t now, uint64_t value)
968 {
969     ppc_tb_t *tb_env = env->tb_env;
970 
971     cpu_ppc_store_tb(tb_env, now, &tb_env->purr_offset, value);
972 }
973 
974 void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
975 {
976     _cpu_ppc_store_purr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), value);
977 }
978 
979 static void timebase_save(PPCTimebase *tb)
980 {
981     uint64_t ticks = cpu_get_host_ticks();
982     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
983 
984     if (!first_ppc_cpu->env.tb_env) {
985         error_report("No timebase object");
986         return;
987     }
988 
989     if (replay_mode == REPLAY_MODE_NONE) {
990         /* not used anymore, we keep it for compatibility */
991         tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
992     } else {
993         /* simpler for record-replay to avoid this event, compat not needed */
994         tb->time_of_the_day_ns = 0;
995     }
996 
997     /*
998      * tb_offset is only expected to be changed by QEMU so
999      * there is no need to update it from KVM here
1000      */
1001     tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
1002 
1003     tb->runstate_paused =
1004         runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
1005 }
1006 
1007 static void timebase_load(PPCTimebase *tb)
1008 {
1009     CPUState *cpu;
1010     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1011     int64_t tb_off_adj, tb_off;
1012     unsigned long freq;
1013 
1014     if (!first_ppc_cpu->env.tb_env) {
1015         error_report("No timebase object");
1016         return;
1017     }
1018 
1019     freq = first_ppc_cpu->env.tb_env->tb_freq;
1020 
1021     tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
1022 
1023     tb_off = first_ppc_cpu->env.tb_env->tb_offset;
1024     trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
1025                         (tb_off_adj - tb_off) / freq);
1026 
1027     /* Set new offset to all CPUs */
1028     CPU_FOREACH(cpu) {
1029         PowerPCCPU *pcpu = POWERPC_CPU(cpu);
1030         pcpu->env.tb_env->tb_offset = tb_off_adj;
1031         kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
1032     }
1033 }
1034 
1035 void cpu_ppc_clock_vm_state_change(void *opaque, bool running,
1036                                    RunState state)
1037 {
1038     PPCTimebase *tb = opaque;
1039 
1040     if (running) {
1041         timebase_load(tb);
1042     } else {
1043         timebase_save(tb);
1044     }
1045 }
1046 
1047 /*
1048  * When migrating a running guest, read the clock just
1049  * before migration, so that the guest clock counts
1050  * during the events between:
1051  *
1052  *  * vm_stop()
1053  *  *
1054  *  * pre_save()
1055  *
1056  *  This reduces clock difference on migration from 5s
1057  *  to 0.1s (when max_downtime == 5s), because sending the
1058  *  final pages of memory (which happens between vm_stop()
1059  *  and pre_save()) takes max_downtime.
1060  */
1061 static int timebase_pre_save(void *opaque)
1062 {
1063     PPCTimebase *tb = opaque;
1064 
1065     /* guest_timebase won't be overridden in case of paused guest or savevm */
1066     if (!tb->runstate_paused) {
1067         timebase_save(tb);
1068     }
1069 
1070     return 0;
1071 }
1072 
1073 const VMStateDescription vmstate_ppc_timebase = {
1074     .name = "timebase",
1075     .version_id = 1,
1076     .minimum_version_id = 1,
1077     .pre_save = timebase_pre_save,
1078     .fields = (const VMStateField []) {
1079         VMSTATE_UINT64(guest_timebase, PPCTimebase),
1080         VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1081         VMSTATE_END_OF_LIST()
1082     },
1083 };
1084 
1085 /* Set up (once) timebase frequency (in Hz) */
1086 void cpu_ppc_tb_init(CPUPPCState *env, uint32_t freq)
1087 {
1088     PowerPCCPU *cpu = env_archcpu(env);
1089     ppc_tb_t *tb_env;
1090 
1091     tb_env = g_new0(ppc_tb_t, 1);
1092     env->tb_env = tb_env;
1093     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1094     if (is_book3s_arch2x(env)) {
1095         /* All Book3S 64bit CPUs implement level based DEC logic */
1096         tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1097     }
1098     /* Create new timer */
1099     tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1100                                       &cpu_ppc_decr_cb, cpu);
1101     if (env->has_hv_mode && !cpu->vhyp) {
1102         tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1103                                            &cpu_ppc_hdecr_cb, cpu);
1104     } else {
1105         tb_env->hdecr_timer = NULL;
1106     }
1107 
1108     tb_env->tb_freq = freq;
1109     tb_env->decr_freq = freq;
1110 }
1111 
1112 void cpu_ppc_tb_reset(CPUPPCState *env)
1113 {
1114     PowerPCCPU *cpu = env_archcpu(env);
1115     ppc_tb_t *tb_env = env->tb_env;
1116 
1117     timer_del(tb_env->decr_timer);
1118     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
1119     tb_env->decr_next = 0;
1120     if (tb_env->hdecr_timer != NULL) {
1121         timer_del(tb_env->hdecr_timer);
1122         ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
1123         tb_env->hdecr_next = 0;
1124     }
1125 
1126     /*
1127      * There is a bug in Linux 2.4 kernels:
1128      * if a decrementer exception is pending when it enables msr_ee at startup,
1129      * it's not ready to handle it...
1130      */
1131     cpu_ppc_store_decr(env, -1);
1132     cpu_ppc_store_hdecr(env, -1);
1133     cpu_ppc_store_purr(env, 0x0000000000000000ULL);
1134 }
1135 
1136 void cpu_ppc_tb_free(CPUPPCState *env)
1137 {
1138     timer_free(env->tb_env->decr_timer);
1139     timer_free(env->tb_env->hdecr_timer);
1140     g_free(env->tb_env);
1141 }
1142 
1143 /* cpu_ppc_hdecr_init may be used if the timer is not used by HDEC emulation */
1144 void cpu_ppc_hdecr_init(CPUPPCState *env)
1145 {
1146     PowerPCCPU *cpu = env_archcpu(env);
1147 
1148     assert(env->tb_env->hdecr_timer == NULL);
1149 
1150     env->tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1151                                             &cpu_ppc_hdecr_cb, cpu);
1152 }
1153 
1154 void cpu_ppc_hdecr_exit(CPUPPCState *env)
1155 {
1156     PowerPCCPU *cpu = env_archcpu(env);
1157 
1158     timer_free(env->tb_env->hdecr_timer);
1159     env->tb_env->hdecr_timer = NULL;
1160 
1161     cpu_ppc_hdecr_lower(cpu);
1162 }
1163 
1164 /*****************************************************************************/
1165 /* PowerPC 40x timers */
1166 
1167 /* PIT, FIT & WDT */
1168 typedef struct ppc40x_timer_t ppc40x_timer_t;
1169 struct ppc40x_timer_t {
1170     uint64_t pit_reload;  /* PIT auto-reload value        */
1171     uint64_t fit_next;    /* Tick for next FIT interrupt  */
1172     QEMUTimer *fit_timer;
1173     uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1174     QEMUTimer *wdt_timer;
1175 
1176     /* 405 have the PIT, 440 have a DECR.  */
1177     unsigned int decr_excp;
1178 };
1179 
1180 /* Fixed interval timer */
1181 static void cpu_4xx_fit_cb (void *opaque)
1182 {
1183     PowerPCCPU *cpu = opaque;
1184     CPUPPCState *env = &cpu->env;
1185     ppc_tb_t *tb_env;
1186     ppc40x_timer_t *ppc40x_timer;
1187     uint64_t now, next;
1188 
1189     tb_env = env->tb_env;
1190     ppc40x_timer = tb_env->opaque;
1191     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1192     switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1193     case 0:
1194         next = 1 << 9;
1195         break;
1196     case 1:
1197         next = 1 << 13;
1198         break;
1199     case 2:
1200         next = 1 << 17;
1201         break;
1202     case 3:
1203         next = 1 << 21;
1204         break;
1205     default:
1206         /* Cannot occur, but makes gcc happy */
1207         return;
1208     }
1209     next = now + tb_to_ns_round_up(tb_env->tb_freq, next);
1210     timer_mod(ppc40x_timer->fit_timer, next);
1211     env->spr[SPR_40x_TSR] |= 1 << 26;
1212     if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1213         ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1214     }
1215     trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1216                          env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1217 }
1218 
1219 /* Programmable interval timer */
1220 static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1221 {
1222     ppc40x_timer_t *ppc40x_timer;
1223     uint64_t now, next;
1224 
1225     ppc40x_timer = tb_env->opaque;
1226     if (ppc40x_timer->pit_reload <= 1 ||
1227         !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1228         (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1229         /* Stop PIT */
1230         trace_ppc4xx_pit_stop();
1231         timer_del(tb_env->decr_timer);
1232     } else {
1233         trace_ppc4xx_pit_start(ppc40x_timer->pit_reload);
1234         now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1235 
1236         if (is_excp) {
1237             tb_env->decr_next += ppc40x_timer->pit_reload;
1238         } else {
1239             tb_env->decr_next = ns_to_tb(tb_env->decr_freq, now)
1240                                 + ppc40x_timer->pit_reload;
1241         }
1242         next = tb_to_ns_round_up(tb_env->decr_freq, tb_env->decr_next);
1243         timer_mod(tb_env->decr_timer, next);
1244     }
1245 }
1246 
1247 static void cpu_4xx_pit_cb (void *opaque)
1248 {
1249     PowerPCCPU *cpu = opaque;
1250     CPUPPCState *env = &cpu->env;
1251     ppc_tb_t *tb_env;
1252     ppc40x_timer_t *ppc40x_timer;
1253 
1254     tb_env = env->tb_env;
1255     ppc40x_timer = tb_env->opaque;
1256     env->spr[SPR_40x_TSR] |= 1 << 27;
1257     if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1258         ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1259     }
1260     start_stop_pit(env, tb_env, 1);
1261     trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1262            (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1263            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1264            ppc40x_timer->pit_reload);
1265 }
1266 
1267 /* Watchdog timer */
1268 static void cpu_4xx_wdt_cb (void *opaque)
1269 {
1270     PowerPCCPU *cpu = opaque;
1271     CPUPPCState *env = &cpu->env;
1272     ppc_tb_t *tb_env;
1273     ppc40x_timer_t *ppc40x_timer;
1274     uint64_t now, next;
1275 
1276     tb_env = env->tb_env;
1277     ppc40x_timer = tb_env->opaque;
1278     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1279     switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1280     case 0:
1281         next = 1 << 17;
1282         break;
1283     case 1:
1284         next = 1 << 21;
1285         break;
1286     case 2:
1287         next = 1 << 25;
1288         break;
1289     case 3:
1290         next = 1 << 29;
1291         break;
1292     default:
1293         /* Cannot occur, but makes gcc happy */
1294         return;
1295     }
1296     next = now + tb_to_ns_round_up(tb_env->decr_freq, next);
1297     trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1298     switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1299     case 0x0:
1300     case 0x1:
1301         timer_mod(ppc40x_timer->wdt_timer, next);
1302         ppc40x_timer->wdt_next = next;
1303         env->spr[SPR_40x_TSR] |= 1U << 31;
1304         break;
1305     case 0x2:
1306         timer_mod(ppc40x_timer->wdt_timer, next);
1307         ppc40x_timer->wdt_next = next;
1308         env->spr[SPR_40x_TSR] |= 1 << 30;
1309         if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1310             ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1311         }
1312         break;
1313     case 0x3:
1314         env->spr[SPR_40x_TSR] &= ~0x30000000;
1315         env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1316         switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1317         case 0x0:
1318             /* No reset */
1319             break;
1320         case 0x1: /* Core reset */
1321             ppc40x_core_reset(cpu);
1322             break;
1323         case 0x2: /* Chip reset */
1324             ppc40x_chip_reset(cpu);
1325             break;
1326         case 0x3: /* System reset */
1327             ppc40x_system_reset(cpu);
1328             break;
1329         }
1330     }
1331 }
1332 
1333 void store_40x_pit (CPUPPCState *env, target_ulong val)
1334 {
1335     ppc_tb_t *tb_env;
1336     ppc40x_timer_t *ppc40x_timer;
1337 
1338     tb_env = env->tb_env;
1339     ppc40x_timer = tb_env->opaque;
1340     trace_ppc40x_store_pit(val);
1341     ppc40x_timer->pit_reload = val;
1342     start_stop_pit(env, tb_env, 0);
1343 }
1344 
1345 target_ulong load_40x_pit (CPUPPCState *env)
1346 {
1347     return cpu_ppc_load_decr(env);
1348 }
1349 
1350 void store_40x_tsr(CPUPPCState *env, target_ulong val)
1351 {
1352     PowerPCCPU *cpu = env_archcpu(env);
1353 
1354     trace_ppc40x_store_tcr(val);
1355 
1356     env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000);
1357     if (val & 0x80000000) {
1358         ppc_set_irq(cpu, PPC_INTERRUPT_PIT, 0);
1359     }
1360 }
1361 
1362 void store_40x_tcr(CPUPPCState *env, target_ulong val)
1363 {
1364     PowerPCCPU *cpu = env_archcpu(env);
1365     ppc_tb_t *tb_env;
1366 
1367     trace_ppc40x_store_tsr(val);
1368 
1369     tb_env = env->tb_env;
1370     env->spr[SPR_40x_TCR] = val & 0xFFC00000;
1371     start_stop_pit(env, tb_env, 1);
1372     cpu_4xx_wdt_cb(cpu);
1373 }
1374 
1375 static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1376 {
1377     CPUPPCState *env = opaque;
1378     ppc_tb_t *tb_env = env->tb_env;
1379 
1380     trace_ppc40x_set_tb_clk(freq);
1381     tb_env->tb_freq = freq;
1382     tb_env->decr_freq = freq;
1383     /* XXX: we should also update all timers */
1384 }
1385 
1386 clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1387                                   unsigned int decr_excp)
1388 {
1389     ppc_tb_t *tb_env;
1390     ppc40x_timer_t *ppc40x_timer;
1391     PowerPCCPU *cpu = env_archcpu(env);
1392 
1393     trace_ppc40x_timers_init(freq);
1394 
1395     tb_env = g_new0(ppc_tb_t, 1);
1396     ppc40x_timer = g_new0(ppc40x_timer_t, 1);
1397 
1398     env->tb_env = tb_env;
1399     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1400     tb_env->tb_freq = freq;
1401     tb_env->decr_freq = freq;
1402     tb_env->opaque = ppc40x_timer;
1403 
1404     /* We use decr timer for PIT */
1405     tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, cpu);
1406     ppc40x_timer->fit_timer =
1407         timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, cpu);
1408     ppc40x_timer->wdt_timer =
1409         timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, cpu);
1410     ppc40x_timer->decr_excp = decr_excp;
1411 
1412     return &ppc_40x_set_tb_clk;
1413 }
1414 
1415 /*****************************************************************************/
1416 /* Embedded PowerPC Device Control Registers */
1417 typedef struct ppc_dcrn_t ppc_dcrn_t;
1418 struct ppc_dcrn_t {
1419     dcr_read_cb dcr_read;
1420     dcr_write_cb dcr_write;
1421     void *opaque;
1422 };
1423 
1424 /* XXX: on 460, DCR addresses are 32 bits wide,
1425  *      using DCRIPR to get the 22 upper bits of the DCR address
1426  */
1427 #define DCRN_NB 1024
1428 struct ppc_dcr_t {
1429     ppc_dcrn_t dcrn[DCRN_NB];
1430     int (*read_error)(int dcrn);
1431     int (*write_error)(int dcrn);
1432 };
1433 
1434 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1435 {
1436     ppc_dcrn_t *dcr;
1437 
1438     if (dcrn < 0 || dcrn >= DCRN_NB)
1439         goto error;
1440     dcr = &dcr_env->dcrn[dcrn];
1441     if (dcr->dcr_read == NULL)
1442         goto error;
1443     *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1444     trace_ppc_dcr_read(dcrn, *valp);
1445 
1446     return 0;
1447 
1448  error:
1449     if (dcr_env->read_error != NULL)
1450         return (*dcr_env->read_error)(dcrn);
1451 
1452     return -1;
1453 }
1454 
1455 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1456 {
1457     ppc_dcrn_t *dcr;
1458 
1459     if (dcrn < 0 || dcrn >= DCRN_NB)
1460         goto error;
1461     dcr = &dcr_env->dcrn[dcrn];
1462     if (dcr->dcr_write == NULL)
1463         goto error;
1464     trace_ppc_dcr_write(dcrn, val);
1465     (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1466 
1467     return 0;
1468 
1469  error:
1470     if (dcr_env->write_error != NULL)
1471         return (*dcr_env->write_error)(dcrn);
1472 
1473     return -1;
1474 }
1475 
1476 int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1477                       dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1478 {
1479     ppc_dcr_t *dcr_env;
1480     ppc_dcrn_t *dcr;
1481 
1482     dcr_env = env->dcr_env;
1483     if (dcr_env == NULL)
1484         return -1;
1485     if (dcrn < 0 || dcrn >= DCRN_NB)
1486         return -1;
1487     dcr = &dcr_env->dcrn[dcrn];
1488     if (dcr->opaque != NULL ||
1489         dcr->dcr_read != NULL ||
1490         dcr->dcr_write != NULL)
1491         return -1;
1492     dcr->opaque = opaque;
1493     dcr->dcr_read = dcr_read;
1494     dcr->dcr_write = dcr_write;
1495 
1496     return 0;
1497 }
1498 
1499 int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1500                   int (*write_error)(int dcrn))
1501 {
1502     ppc_dcr_t *dcr_env;
1503 
1504     dcr_env = g_new0(ppc_dcr_t, 1);
1505     dcr_env->read_error = read_error;
1506     dcr_env->write_error = write_error;
1507     env->dcr_env = dcr_env;
1508 
1509     return 0;
1510 }
1511 
1512 /*****************************************************************************/
1513 
1514 int ppc_cpu_pir(PowerPCCPU *cpu)
1515 {
1516     CPUPPCState *env = &cpu->env;
1517     return env->spr_cb[SPR_PIR].default_value;
1518 }
1519 
1520 int ppc_cpu_tir(PowerPCCPU *cpu)
1521 {
1522     CPUPPCState *env = &cpu->env;
1523     return env->spr_cb[SPR_TIR].default_value;
1524 }
1525 
1526 PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1527 {
1528     CPUState *cs;
1529 
1530     CPU_FOREACH(cs) {
1531         PowerPCCPU *cpu = POWERPC_CPU(cs);
1532 
1533         if (ppc_cpu_pir(cpu) == pir) {
1534             return cpu;
1535         }
1536     }
1537 
1538     return NULL;
1539 }
1540 
1541 void ppc_irq_reset(PowerPCCPU *cpu)
1542 {
1543     CPUPPCState *env = &cpu->env;
1544 
1545     env->irq_input_state = 0;
1546     if (kvm_enabled()) {
1547         kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0);
1548     }
1549 }
1550