xref: /openbmc/qemu/hw/ppc/ppc.c (revision d6fd5d83)
1 /*
2  * QEMU generic PowerPC hardware System Emulator
3  *
4  * Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "hw/irq.h"
27 #include "hw/ppc/ppc.h"
28 #include "hw/ppc/ppc_e500.h"
29 #include "qemu/timer.h"
30 #include "sysemu/cpus.h"
31 #include "qemu/log.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/error-report.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/replay.h"
36 #include "sysemu/runstate.h"
37 #include "kvm_ppc.h"
38 #include "migration/vmstate.h"
39 #include "trace.h"
40 
41 static void cpu_ppc_tb_stop (CPUPPCState *env);
42 static void cpu_ppc_tb_start (CPUPPCState *env);
43 
44 void ppc_set_irq(PowerPCCPU *cpu, int irq, int level)
45 {
46     CPUPPCState *env = &cpu->env;
47     unsigned int old_pending;
48 
49     /* We may already have the BQL if coming from the reset path */
50     BQL_LOCK_GUARD();
51 
52     old_pending = env->pending_interrupts;
53 
54     if (level) {
55         env->pending_interrupts |= irq;
56     } else {
57         env->pending_interrupts &= ~irq;
58     }
59 
60     if (old_pending != env->pending_interrupts) {
61         ppc_maybe_interrupt(env);
62         if (kvm_enabled()) {
63             kvmppc_set_interrupt(cpu, irq, level);
64         }
65     }
66 
67     trace_ppc_irq_set_exit(env, irq, level, env->pending_interrupts,
68                            CPU(cpu)->interrupt_request);
69 }
70 
71 /* PowerPC 6xx / 7xx internal IRQ controller */
72 static void ppc6xx_set_irq(void *opaque, int pin, int level)
73 {
74     PowerPCCPU *cpu = opaque;
75     CPUPPCState *env = &cpu->env;
76     int cur_level;
77 
78     trace_ppc_irq_set(env, pin, level);
79 
80     cur_level = (env->irq_input_state >> pin) & 1;
81     /* Don't generate spurious events */
82     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
83         CPUState *cs = CPU(cpu);
84 
85         switch (pin) {
86         case PPC6xx_INPUT_TBEN:
87             /* Level sensitive - active high */
88             trace_ppc_irq_set_state("time base", level);
89             if (level) {
90                 cpu_ppc_tb_start(env);
91             } else {
92                 cpu_ppc_tb_stop(env);
93             }
94             break;
95         case PPC6xx_INPUT_INT:
96             /* Level sensitive - active high */
97             trace_ppc_irq_set_state("external IRQ", level);
98             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
99             break;
100         case PPC6xx_INPUT_SMI:
101             /* Level sensitive - active high */
102             trace_ppc_irq_set_state("SMI IRQ", level);
103             ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
104             break;
105         case PPC6xx_INPUT_MCP:
106             /* Negative edge sensitive */
107             /* XXX: TODO: actual reaction may depends on HID0 status
108              *            603/604/740/750: check HID0[EMCP]
109              */
110             if (cur_level == 1 && level == 0) {
111                 trace_ppc_irq_set_state("machine check", 1);
112                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
113             }
114             break;
115         case PPC6xx_INPUT_CKSTP_IN:
116             /* Level sensitive - active low */
117             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
118             /* XXX: Note that the only way to restart the CPU is to reset it */
119             if (level) {
120                 trace_ppc_irq_cpu("stop");
121                 cs->halted = 1;
122             }
123             break;
124         case PPC6xx_INPUT_HRESET:
125             /* Level sensitive - active low */
126             if (level) {
127                 trace_ppc_irq_reset("CPU");
128                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
129             }
130             break;
131         case PPC6xx_INPUT_SRESET:
132             trace_ppc_irq_set_state("RESET IRQ", level);
133             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
134             break;
135         default:
136             g_assert_not_reached();
137         }
138         if (level)
139             env->irq_input_state |= 1 << pin;
140         else
141             env->irq_input_state &= ~(1 << pin);
142     }
143 }
144 
145 void ppc6xx_irq_init(PowerPCCPU *cpu)
146 {
147     qdev_init_gpio_in(DEVICE(cpu), ppc6xx_set_irq, PPC6xx_INPUT_NB);
148 }
149 
150 #if defined(TARGET_PPC64)
151 /* PowerPC 970 internal IRQ controller */
152 static void ppc970_set_irq(void *opaque, int pin, int level)
153 {
154     PowerPCCPU *cpu = opaque;
155     CPUPPCState *env = &cpu->env;
156     int cur_level;
157 
158     trace_ppc_irq_set(env, pin, level);
159 
160     cur_level = (env->irq_input_state >> pin) & 1;
161     /* Don't generate spurious events */
162     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
163         CPUState *cs = CPU(cpu);
164 
165         switch (pin) {
166         case PPC970_INPUT_INT:
167             /* Level sensitive - active high */
168             trace_ppc_irq_set_state("external IRQ", level);
169             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
170             break;
171         case PPC970_INPUT_THINT:
172             /* Level sensitive - active high */
173             trace_ppc_irq_set_state("SMI IRQ", level);
174             ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
175             break;
176         case PPC970_INPUT_MCP:
177             /* Negative edge sensitive */
178             /* XXX: TODO: actual reaction may depends on HID0 status
179              *            603/604/740/750: check HID0[EMCP]
180              */
181             if (cur_level == 1 && level == 0) {
182                 trace_ppc_irq_set_state("machine check", 1);
183                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
184             }
185             break;
186         case PPC970_INPUT_CKSTP:
187             /* Level sensitive - active low */
188             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
189             if (level) {
190                 trace_ppc_irq_cpu("stop");
191                 cs->halted = 1;
192             } else {
193                 trace_ppc_irq_cpu("restart");
194                 cs->halted = 0;
195                 qemu_cpu_kick(cs);
196             }
197             break;
198         case PPC970_INPUT_HRESET:
199             /* Level sensitive - active low */
200             if (level) {
201                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
202             }
203             break;
204         case PPC970_INPUT_SRESET:
205             trace_ppc_irq_set_state("RESET IRQ", level);
206             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
207             break;
208         case PPC970_INPUT_TBEN:
209             trace_ppc_irq_set_state("TBEN IRQ", level);
210             /* XXX: TODO */
211             break;
212         default:
213             g_assert_not_reached();
214         }
215         if (level)
216             env->irq_input_state |= 1 << pin;
217         else
218             env->irq_input_state &= ~(1 << pin);
219     }
220 }
221 
222 void ppc970_irq_init(PowerPCCPU *cpu)
223 {
224     qdev_init_gpio_in(DEVICE(cpu), ppc970_set_irq, PPC970_INPUT_NB);
225 }
226 
227 /* POWER7 internal IRQ controller */
228 static void power7_set_irq(void *opaque, int pin, int level)
229 {
230     PowerPCCPU *cpu = opaque;
231 
232     trace_ppc_irq_set(&cpu->env, pin, level);
233 
234     switch (pin) {
235     case POWER7_INPUT_INT:
236         /* Level sensitive - active high */
237         trace_ppc_irq_set_state("external IRQ", level);
238         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
239         break;
240     default:
241         g_assert_not_reached();
242     }
243 }
244 
245 void ppcPOWER7_irq_init(PowerPCCPU *cpu)
246 {
247     qdev_init_gpio_in(DEVICE(cpu), power7_set_irq, POWER7_INPUT_NB);
248 }
249 
250 /* POWER9 internal IRQ controller */
251 static void power9_set_irq(void *opaque, int pin, int level)
252 {
253     PowerPCCPU *cpu = opaque;
254 
255     trace_ppc_irq_set(&cpu->env, pin, level);
256 
257     switch (pin) {
258     case POWER9_INPUT_INT:
259         /* Level sensitive - active high */
260         trace_ppc_irq_set_state("external IRQ", level);
261         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
262         break;
263     case POWER9_INPUT_HINT:
264         /* Level sensitive - active high */
265         trace_ppc_irq_set_state("HV external IRQ", level);
266         ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
267         break;
268     default:
269         g_assert_not_reached();
270         return;
271     }
272 }
273 
274 void ppcPOWER9_irq_init(PowerPCCPU *cpu)
275 {
276     qdev_init_gpio_in(DEVICE(cpu), power9_set_irq, POWER9_INPUT_NB);
277 }
278 #endif /* defined(TARGET_PPC64) */
279 
280 void ppc40x_core_reset(PowerPCCPU *cpu)
281 {
282     CPUPPCState *env = &cpu->env;
283     target_ulong dbsr;
284 
285     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
286     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
287     dbsr = env->spr[SPR_40x_DBSR];
288     dbsr &= ~0x00000300;
289     dbsr |= 0x00000100;
290     env->spr[SPR_40x_DBSR] = dbsr;
291 }
292 
293 void ppc40x_chip_reset(PowerPCCPU *cpu)
294 {
295     CPUPPCState *env = &cpu->env;
296     target_ulong dbsr;
297 
298     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
299     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
300     /* XXX: TODO reset all internal peripherals */
301     dbsr = env->spr[SPR_40x_DBSR];
302     dbsr &= ~0x00000300;
303     dbsr |= 0x00000200;
304     env->spr[SPR_40x_DBSR] = dbsr;
305 }
306 
307 void ppc40x_system_reset(PowerPCCPU *cpu)
308 {
309     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
310     qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
311 }
312 
313 void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
314 {
315     PowerPCCPU *cpu = env_archcpu(env);
316 
317     bql_lock();
318 
319     switch ((val >> 28) & 0x3) {
320     case 0x0:
321         /* No action */
322         break;
323     case 0x1:
324         /* Core reset */
325         ppc40x_core_reset(cpu);
326         break;
327     case 0x2:
328         /* Chip reset */
329         ppc40x_chip_reset(cpu);
330         break;
331     case 0x3:
332         /* System reset */
333         ppc40x_system_reset(cpu);
334         break;
335     }
336 
337     bql_unlock();
338 }
339 
340 /* PowerPC 40x internal IRQ controller */
341 static void ppc40x_set_irq(void *opaque, int pin, int level)
342 {
343     PowerPCCPU *cpu = opaque;
344     CPUPPCState *env = &cpu->env;
345     int cur_level;
346 
347     trace_ppc_irq_set(env, pin, level);
348 
349     cur_level = (env->irq_input_state >> pin) & 1;
350     /* Don't generate spurious events */
351     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
352         CPUState *cs = CPU(cpu);
353 
354         switch (pin) {
355         case PPC40x_INPUT_RESET_SYS:
356             if (level) {
357                 trace_ppc_irq_reset("system");
358                 ppc40x_system_reset(cpu);
359             }
360             break;
361         case PPC40x_INPUT_RESET_CHIP:
362             if (level) {
363                 trace_ppc_irq_reset("chip");
364                 ppc40x_chip_reset(cpu);
365             }
366             break;
367         case PPC40x_INPUT_RESET_CORE:
368             /* XXX: TODO: update DBSR[MRR] */
369             if (level) {
370                 trace_ppc_irq_reset("core");
371                 ppc40x_core_reset(cpu);
372             }
373             break;
374         case PPC40x_INPUT_CINT:
375             /* Level sensitive - active high */
376             trace_ppc_irq_set_state("critical IRQ", level);
377             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
378             break;
379         case PPC40x_INPUT_INT:
380             /* Level sensitive - active high */
381             trace_ppc_irq_set_state("external IRQ", level);
382             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
383             break;
384         case PPC40x_INPUT_HALT:
385             /* Level sensitive - active low */
386             if (level) {
387                 trace_ppc_irq_cpu("stop");
388                 cs->halted = 1;
389             } else {
390                 trace_ppc_irq_cpu("restart");
391                 cs->halted = 0;
392                 qemu_cpu_kick(cs);
393             }
394             break;
395         case PPC40x_INPUT_DEBUG:
396             /* Level sensitive - active high */
397             trace_ppc_irq_set_state("debug pin", level);
398             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
399             break;
400         default:
401             g_assert_not_reached();
402         }
403         if (level)
404             env->irq_input_state |= 1 << pin;
405         else
406             env->irq_input_state &= ~(1 << pin);
407     }
408 }
409 
410 void ppc40x_irq_init(PowerPCCPU *cpu)
411 {
412     qdev_init_gpio_in(DEVICE(cpu), ppc40x_set_irq, PPC40x_INPUT_NB);
413 }
414 
415 /* PowerPC E500 internal IRQ controller */
416 static void ppce500_set_irq(void *opaque, int pin, int level)
417 {
418     PowerPCCPU *cpu = opaque;
419     CPUPPCState *env = &cpu->env;
420     int cur_level;
421 
422     trace_ppc_irq_set(env, pin, level);
423 
424     cur_level = (env->irq_input_state >> pin) & 1;
425     /* Don't generate spurious events */
426     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
427         switch (pin) {
428         case PPCE500_INPUT_MCK:
429             if (level) {
430                 trace_ppc_irq_reset("system");
431                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
432             }
433             break;
434         case PPCE500_INPUT_RESET_CORE:
435             if (level) {
436                 trace_ppc_irq_reset("core");
437                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
438             }
439             break;
440         case PPCE500_INPUT_CINT:
441             /* Level sensitive - active high */
442             trace_ppc_irq_set_state("critical IRQ", level);
443             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
444             break;
445         case PPCE500_INPUT_INT:
446             /* Level sensitive - active high */
447             trace_ppc_irq_set_state("core IRQ", level);
448             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
449             break;
450         case PPCE500_INPUT_DEBUG:
451             /* Level sensitive - active high */
452             trace_ppc_irq_set_state("debug pin", level);
453             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
454             break;
455         default:
456             g_assert_not_reached();
457         }
458         if (level)
459             env->irq_input_state |= 1 << pin;
460         else
461             env->irq_input_state &= ~(1 << pin);
462     }
463 }
464 
465 void ppce500_irq_init(PowerPCCPU *cpu)
466 {
467     qdev_init_gpio_in(DEVICE(cpu), ppce500_set_irq, PPCE500_INPUT_NB);
468 }
469 
470 /* Enable or Disable the E500 EPR capability */
471 void ppce500_set_mpic_proxy(bool enabled)
472 {
473     CPUState *cs;
474 
475     CPU_FOREACH(cs) {
476         PowerPCCPU *cpu = POWERPC_CPU(cs);
477 
478         cpu->env.mpic_proxy = enabled;
479         if (kvm_enabled()) {
480             kvmppc_set_mpic_proxy(cpu, enabled);
481         }
482     }
483 }
484 
485 /*****************************************************************************/
486 /* PowerPC time base and decrementer emulation */
487 
488 /*
489  * Conversion between QEMU_CLOCK_VIRTUAL ns and timebase (TB) ticks:
490  * TB ticks are arrived at by multiplying tb_freq then dividing by
491  * ns per second, and rounding down. TB ticks drive all clocks and
492  * timers in the target machine.
493  *
494  * Converting TB intervals to ns for the purpose of setting a
495  * QEMU_CLOCK_VIRTUAL timer should go the other way, but rounding
496  * up. Rounding down could cause the timer to fire before the TB
497  * value has been reached.
498  */
499 static uint64_t ns_to_tb(uint32_t freq, int64_t clock)
500 {
501     return muldiv64(clock, freq, NANOSECONDS_PER_SECOND);
502 }
503 
504 /* virtual clock in TB ticks, not adjusted by TB offset */
505 static int64_t tb_to_ns_round_up(uint32_t freq, uint64_t tb)
506 {
507     return muldiv64_round_up(tb, NANOSECONDS_PER_SECOND, freq);
508 }
509 
510 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
511 {
512     /* TB time in tb periods */
513     return ns_to_tb(tb_env->tb_freq, vmclk) + tb_offset;
514 }
515 
516 uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
517 {
518     ppc_tb_t *tb_env = env->tb_env;
519     uint64_t tb;
520 
521     if (kvm_enabled()) {
522         return env->spr[SPR_TBL];
523     }
524 
525     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
526                         tb_env->tb_offset);
527     trace_ppc_tb_load(tb);
528 
529     return tb;
530 }
531 
532 static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
533 {
534     ppc_tb_t *tb_env = env->tb_env;
535     uint64_t tb;
536 
537     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
538                         tb_env->tb_offset);
539     trace_ppc_tb_load(tb);
540 
541     return tb >> 32;
542 }
543 
544 uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
545 {
546     if (kvm_enabled()) {
547         return env->spr[SPR_TBU];
548     }
549 
550     return _cpu_ppc_load_tbu(env);
551 }
552 
553 static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
554                                     int64_t *tb_offsetp, uint64_t value)
555 {
556     *tb_offsetp = value - ns_to_tb(tb_env->tb_freq, vmclk);
557 
558     trace_ppc_tb_store(value, *tb_offsetp);
559 }
560 
561 void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
562 {
563     ppc_tb_t *tb_env = env->tb_env;
564     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
565     uint64_t tb;
566 
567     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
568     tb &= 0xFFFFFFFF00000000ULL;
569     cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset, tb | (uint64_t)value);
570 }
571 
572 static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
573 {
574     ppc_tb_t *tb_env = env->tb_env;
575     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
576     uint64_t tb;
577 
578     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
579     tb &= 0x00000000FFFFFFFFULL;
580     cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset,
581                      ((uint64_t)value << 32) | tb);
582 }
583 
584 void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
585 {
586     _cpu_ppc_store_tbu(env, value);
587 }
588 
589 uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
590 {
591     ppc_tb_t *tb_env = env->tb_env;
592     uint64_t tb;
593 
594     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
595                         tb_env->atb_offset);
596     trace_ppc_tb_load(tb);
597 
598     return tb;
599 }
600 
601 uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
602 {
603     ppc_tb_t *tb_env = env->tb_env;
604     uint64_t tb;
605 
606     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
607                         tb_env->atb_offset);
608     trace_ppc_tb_load(tb);
609 
610     return tb >> 32;
611 }
612 
613 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
614 {
615     ppc_tb_t *tb_env = env->tb_env;
616     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
617     uint64_t tb;
618 
619     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->atb_offset);
620     tb &= 0xFFFFFFFF00000000ULL;
621     cpu_ppc_store_tb(tb_env, clock, &tb_env->atb_offset, tb | (uint64_t)value);
622 }
623 
624 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
625 {
626     ppc_tb_t *tb_env = env->tb_env;
627     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
628     uint64_t tb;
629 
630     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->atb_offset);
631     tb &= 0x00000000FFFFFFFFULL;
632     cpu_ppc_store_tb(tb_env, clock, &tb_env->atb_offset,
633                      ((uint64_t)value << 32) | tb);
634 }
635 
636 void cpu_ppc_increase_tb_by_offset(CPUPPCState *env, int64_t offset)
637 {
638     env->tb_env->tb_offset += offset;
639 }
640 
641 void cpu_ppc_decrease_tb_by_offset(CPUPPCState *env, int64_t offset)
642 {
643     env->tb_env->tb_offset -= offset;
644 }
645 
646 uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
647 {
648     ppc_tb_t *tb_env = env->tb_env;
649 
650     return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
651                           tb_env->vtb_offset);
652 }
653 
654 void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value)
655 {
656     ppc_tb_t *tb_env = env->tb_env;
657 
658     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
659                      &tb_env->vtb_offset, value);
660 }
661 
662 void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value)
663 {
664     ppc_tb_t *tb_env = env->tb_env;
665     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
666     uint64_t tb;
667 
668     tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
669     tb &= 0xFFFFFFUL;
670     tb |= (value & ~0xFFFFFFUL);
671     cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset, tb);
672 }
673 
674 static void cpu_ppc_tb_stop (CPUPPCState *env)
675 {
676     ppc_tb_t *tb_env = env->tb_env;
677     uint64_t tb, atb, vmclk;
678 
679     /* If the time base is already frozen, do nothing */
680     if (tb_env->tb_freq != 0) {
681         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
682         /* Get the time base */
683         tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
684         /* Get the alternate time base */
685         atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
686         /* Store the time base value (ie compute the current offset) */
687         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
688         /* Store the alternate time base value (compute the current offset) */
689         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
690         /* Set the time base frequency to zero */
691         tb_env->tb_freq = 0;
692         /* Now, the time bases are frozen to tb_offset / atb_offset value */
693     }
694 }
695 
696 static void cpu_ppc_tb_start (CPUPPCState *env)
697 {
698     ppc_tb_t *tb_env = env->tb_env;
699     uint64_t tb, atb, vmclk;
700 
701     /* If the time base is not frozen, do nothing */
702     if (tb_env->tb_freq == 0) {
703         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
704         /* Get the time base from tb_offset */
705         tb = tb_env->tb_offset;
706         /* Get the alternate time base from atb_offset */
707         atb = tb_env->atb_offset;
708         /* Restore the tb frequency from the decrementer frequency */
709         tb_env->tb_freq = tb_env->decr_freq;
710         /* Store the time base value */
711         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
712         /* Store the alternate time base value */
713         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
714     }
715 }
716 
717 bool ppc_decr_clear_on_delivery(CPUPPCState *env)
718 {
719     ppc_tb_t *tb_env = env->tb_env;
720     int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
721     return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
722 }
723 
724 static inline int64_t __cpu_ppc_load_decr(CPUPPCState *env, int64_t now,
725                                           uint64_t next)
726 {
727     ppc_tb_t *tb_env = env->tb_env;
728     uint64_t n;
729     int64_t decr;
730 
731     n = ns_to_tb(tb_env->decr_freq, now);
732     if (next > n && tb_env->flags & PPC_TIMER_BOOKE) {
733         decr = 0;
734     } else {
735         decr = next - n;
736     }
737 
738     trace_ppc_decr_load(decr);
739 
740     return decr;
741 }
742 
743 static target_ulong _cpu_ppc_load_decr(CPUPPCState *env, int64_t now)
744 {
745     ppc_tb_t *tb_env = env->tb_env;
746     uint64_t decr;
747 
748     decr = __cpu_ppc_load_decr(env, now, tb_env->decr_next);
749 
750     /*
751      * If large decrementer is enabled then the decrementer is signed extended
752      * to 64 bits, otherwise it is a 32 bit value.
753      */
754     if (env->spr[SPR_LPCR] & LPCR_LD) {
755         PowerPCCPU *cpu = env_archcpu(env);
756         PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
757         return sextract64(decr, 0, pcc->lrg_decr_bits);
758     }
759     return (uint32_t) decr;
760 }
761 
762 target_ulong cpu_ppc_load_decr(CPUPPCState *env)
763 {
764     if (kvm_enabled()) {
765         return env->spr[SPR_DECR];
766     } else {
767         return _cpu_ppc_load_decr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
768     }
769 }
770 
771 static target_ulong _cpu_ppc_load_hdecr(CPUPPCState *env, int64_t now)
772 {
773     PowerPCCPU *cpu = env_archcpu(env);
774     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
775     ppc_tb_t *tb_env = env->tb_env;
776     uint64_t hdecr;
777 
778     hdecr =  __cpu_ppc_load_decr(env, now, tb_env->hdecr_next);
779 
780     /*
781      * If we have a large decrementer (POWER9 or later) then hdecr is sign
782      * extended to 64 bits, otherwise it is 32 bits.
783      */
784     if (pcc->lrg_decr_bits > 32) {
785         return sextract64(hdecr, 0, pcc->lrg_decr_bits);
786     }
787     return (uint32_t) hdecr;
788 }
789 
790 target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
791 {
792     return _cpu_ppc_load_hdecr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
793 }
794 
795 uint64_t cpu_ppc_load_purr (CPUPPCState *env)
796 {
797     ppc_tb_t *tb_env = env->tb_env;
798 
799     return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
800                           tb_env->purr_offset);
801 }
802 
803 /* When decrementer expires,
804  * all we need to do is generate or queue a CPU exception
805  */
806 static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
807 {
808     /* Raise it */
809     trace_ppc_decr_excp("raise");
810     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
811 }
812 
813 static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
814 {
815     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
816 }
817 
818 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
819 {
820     CPUPPCState *env = &cpu->env;
821 
822     /* Raise it */
823     trace_ppc_decr_excp("raise HV");
824 
825     /* The architecture specifies that we don't deliver HDEC
826      * interrupts in a PM state. Not only they don't cause a
827      * wakeup but they also get effectively discarded.
828      */
829     if (!env->resume_as_sreset) {
830         ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
831     }
832 }
833 
834 static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
835 {
836     ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
837 }
838 
839 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, int64_t now, uint64_t *nextp,
840                                  QEMUTimer *timer,
841                                  void (*raise_excp)(void *),
842                                  void (*lower_excp)(PowerPCCPU *),
843                                  uint32_t flags, target_ulong decr,
844                                  target_ulong value, int nr_bits)
845 {
846     CPUPPCState *env = &cpu->env;
847     ppc_tb_t *tb_env = env->tb_env;
848     uint64_t next;
849     int64_t signed_value;
850     int64_t signed_decr;
851 
852     /* Truncate value to decr_width and sign extend for simplicity */
853     value = extract64(value, 0, nr_bits);
854     decr = extract64(decr, 0, nr_bits);
855     signed_value = sextract64(value, 0, nr_bits);
856     signed_decr = sextract64(decr, 0, nr_bits);
857 
858     trace_ppc_decr_store(nr_bits, decr, value);
859 
860     /*
861      * Calculate the next decrementer event and set a timer.
862      * decr_next is in timebase units to keep rounding simple. Note it is
863      * not adjusted by tb_offset because if TB changes via tb_offset changing,
864      * decrementer does not change, so not directly comparable with TB.
865      */
866     next = ns_to_tb(tb_env->decr_freq, now) + value;
867     *nextp = next; /* nextp is in timebase units */
868 
869     /*
870      * Going from 1 -> 0 or 0 -> -1 is the event to generate a DEC interrupt.
871      *
872      * On MSB level based DEC implementations the MSB always means the interrupt
873      * is pending, so raise it on those.
874      *
875      * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
876      * an edge interrupt, so raise it here too.
877      */
878     if (((flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
879         ((flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0
880           && signed_decr >= 0)) {
881         (*raise_excp)(cpu);
882         return;
883     }
884 
885     /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
886     if (signed_value >= 0 && (flags & PPC_DECR_UNDERFLOW_LEVEL)) {
887         (*lower_excp)(cpu);
888     }
889 
890     /* Adjust timer */
891     timer_mod(timer, tb_to_ns_round_up(tb_env->decr_freq, next));
892 }
893 
894 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, int64_t now,
895                                        target_ulong decr, target_ulong value,
896                                        int nr_bits)
897 {
898     ppc_tb_t *tb_env = cpu->env.tb_env;
899 
900     __cpu_ppc_store_decr(cpu, now, &tb_env->decr_next, tb_env->decr_timer,
901                          tb_env->decr_timer->cb, &cpu_ppc_decr_lower,
902                          tb_env->flags, decr, value, nr_bits);
903 }
904 
905 void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
906 {
907     PowerPCCPU *cpu = env_archcpu(env);
908     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
909     int64_t now;
910     target_ulong decr;
911     int nr_bits = 32;
912 
913     if (kvm_enabled()) {
914         /* KVM handles decrementer exceptions, we don't need our own timer */
915         return;
916     }
917 
918     if (env->spr[SPR_LPCR] & LPCR_LD) {
919         nr_bits = pcc->lrg_decr_bits;
920     }
921 
922     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
923     decr = _cpu_ppc_load_decr(env, now);
924     _cpu_ppc_store_decr(cpu, now, decr, value, nr_bits);
925 }
926 
927 static void cpu_ppc_decr_cb(void *opaque)
928 {
929     PowerPCCPU *cpu = opaque;
930 
931     cpu_ppc_decr_excp(cpu);
932 }
933 
934 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, int64_t now,
935                                         target_ulong hdecr, target_ulong value,
936                                         int nr_bits)
937 {
938     ppc_tb_t *tb_env = cpu->env.tb_env;
939 
940     if (tb_env->hdecr_timer != NULL) {
941         /* HDECR (Book3S 64bit) is edge-based, not level like DECR */
942         __cpu_ppc_store_decr(cpu, now, &tb_env->hdecr_next, tb_env->hdecr_timer,
943                              tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
944                              PPC_DECR_UNDERFLOW_TRIGGERED,
945                              hdecr, value, nr_bits);
946     }
947 }
948 
949 void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
950 {
951     PowerPCCPU *cpu = env_archcpu(env);
952     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
953     int64_t now;
954     target_ulong hdecr;
955 
956     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
957     hdecr = _cpu_ppc_load_hdecr(env, now);
958     _cpu_ppc_store_hdecr(cpu, now, hdecr, value, pcc->lrg_decr_bits);
959 }
960 
961 static void cpu_ppc_hdecr_cb(void *opaque)
962 {
963     PowerPCCPU *cpu = opaque;
964 
965     cpu_ppc_hdecr_excp(cpu);
966 }
967 
968 static void _cpu_ppc_store_purr(CPUPPCState *env, int64_t now, uint64_t value)
969 {
970     ppc_tb_t *tb_env = env->tb_env;
971 
972     cpu_ppc_store_tb(tb_env, now, &tb_env->purr_offset, value);
973 }
974 
975 void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
976 {
977     _cpu_ppc_store_purr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), value);
978 }
979 
980 static void timebase_save(PPCTimebase *tb)
981 {
982     uint64_t ticks = cpu_get_host_ticks();
983     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
984 
985     if (!first_ppc_cpu->env.tb_env) {
986         error_report("No timebase object");
987         return;
988     }
989 
990     if (replay_mode == REPLAY_MODE_NONE) {
991         /* not used anymore, we keep it for compatibility */
992         tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
993     } else {
994         /* simpler for record-replay to avoid this event, compat not needed */
995         tb->time_of_the_day_ns = 0;
996     }
997 
998     /*
999      * tb_offset is only expected to be changed by QEMU so
1000      * there is no need to update it from KVM here
1001      */
1002     tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
1003 
1004     tb->runstate_paused =
1005         runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
1006 }
1007 
1008 static void timebase_load(PPCTimebase *tb)
1009 {
1010     CPUState *cpu;
1011     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1012     int64_t tb_off_adj, tb_off;
1013     unsigned long freq;
1014 
1015     if (!first_ppc_cpu->env.tb_env) {
1016         error_report("No timebase object");
1017         return;
1018     }
1019 
1020     freq = first_ppc_cpu->env.tb_env->tb_freq;
1021 
1022     tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
1023 
1024     tb_off = first_ppc_cpu->env.tb_env->tb_offset;
1025     trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
1026                         (tb_off_adj - tb_off) / freq);
1027 
1028     /* Set new offset to all CPUs */
1029     CPU_FOREACH(cpu) {
1030         PowerPCCPU *pcpu = POWERPC_CPU(cpu);
1031         pcpu->env.tb_env->tb_offset = tb_off_adj;
1032         kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
1033     }
1034 }
1035 
1036 void cpu_ppc_clock_vm_state_change(void *opaque, bool running,
1037                                    RunState state)
1038 {
1039     PPCTimebase *tb = opaque;
1040 
1041     if (running) {
1042         timebase_load(tb);
1043     } else {
1044         timebase_save(tb);
1045     }
1046 }
1047 
1048 /*
1049  * When migrating a running guest, read the clock just
1050  * before migration, so that the guest clock counts
1051  * during the events between:
1052  *
1053  *  * vm_stop()
1054  *  *
1055  *  * pre_save()
1056  *
1057  *  This reduces clock difference on migration from 5s
1058  *  to 0.1s (when max_downtime == 5s), because sending the
1059  *  final pages of memory (which happens between vm_stop()
1060  *  and pre_save()) takes max_downtime.
1061  */
1062 static int timebase_pre_save(void *opaque)
1063 {
1064     PPCTimebase *tb = opaque;
1065 
1066     /* guest_timebase won't be overridden in case of paused guest or savevm */
1067     if (!tb->runstate_paused) {
1068         timebase_save(tb);
1069     }
1070 
1071     return 0;
1072 }
1073 
1074 const VMStateDescription vmstate_ppc_timebase = {
1075     .name = "timebase",
1076     .version_id = 1,
1077     .minimum_version_id = 1,
1078     .pre_save = timebase_pre_save,
1079     .fields = (const VMStateField []) {
1080         VMSTATE_UINT64(guest_timebase, PPCTimebase),
1081         VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1082         VMSTATE_END_OF_LIST()
1083     },
1084 };
1085 
1086 /* Set up (once) timebase frequency (in Hz) */
1087 void cpu_ppc_tb_init(CPUPPCState *env, uint32_t freq)
1088 {
1089     PowerPCCPU *cpu = env_archcpu(env);
1090     ppc_tb_t *tb_env;
1091 
1092     tb_env = g_new0(ppc_tb_t, 1);
1093     env->tb_env = tb_env;
1094     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1095     if (is_book3s_arch2x(env)) {
1096         /* All Book3S 64bit CPUs implement level based DEC logic */
1097         tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1098     }
1099     /* Create new timer */
1100     tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1101                                       &cpu_ppc_decr_cb, cpu);
1102     if (env->has_hv_mode && !cpu->vhyp) {
1103         tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1104                                            &cpu_ppc_hdecr_cb, cpu);
1105     } else {
1106         tb_env->hdecr_timer = NULL;
1107     }
1108 
1109     tb_env->tb_freq = freq;
1110     tb_env->decr_freq = freq;
1111 }
1112 
1113 void cpu_ppc_tb_reset(CPUPPCState *env)
1114 {
1115     PowerPCCPU *cpu = env_archcpu(env);
1116     ppc_tb_t *tb_env = env->tb_env;
1117 
1118     timer_del(tb_env->decr_timer);
1119     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
1120     tb_env->decr_next = 0;
1121     if (tb_env->hdecr_timer != NULL) {
1122         timer_del(tb_env->hdecr_timer);
1123         ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
1124         tb_env->hdecr_next = 0;
1125     }
1126 
1127     /*
1128      * There is a bug in Linux 2.4 kernels:
1129      * if a decrementer exception is pending when it enables msr_ee at startup,
1130      * it's not ready to handle it...
1131      */
1132     cpu_ppc_store_decr(env, -1);
1133     cpu_ppc_store_hdecr(env, -1);
1134     cpu_ppc_store_purr(env, 0x0000000000000000ULL);
1135 }
1136 
1137 void cpu_ppc_tb_free(CPUPPCState *env)
1138 {
1139     timer_free(env->tb_env->decr_timer);
1140     timer_free(env->tb_env->hdecr_timer);
1141     g_free(env->tb_env);
1142 }
1143 
1144 /* cpu_ppc_hdecr_init may be used if the timer is not used by HDEC emulation */
1145 void cpu_ppc_hdecr_init(CPUPPCState *env)
1146 {
1147     PowerPCCPU *cpu = env_archcpu(env);
1148 
1149     assert(env->tb_env->hdecr_timer == NULL);
1150 
1151     env->tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1152                                             &cpu_ppc_hdecr_cb, cpu);
1153 }
1154 
1155 void cpu_ppc_hdecr_exit(CPUPPCState *env)
1156 {
1157     PowerPCCPU *cpu = env_archcpu(env);
1158 
1159     timer_free(env->tb_env->hdecr_timer);
1160     env->tb_env->hdecr_timer = NULL;
1161 
1162     cpu_ppc_hdecr_lower(cpu);
1163 }
1164 
1165 /*****************************************************************************/
1166 /* PowerPC 40x timers */
1167 
1168 /* PIT, FIT & WDT */
1169 typedef struct ppc40x_timer_t ppc40x_timer_t;
1170 struct ppc40x_timer_t {
1171     uint64_t pit_reload;  /* PIT auto-reload value        */
1172     uint64_t fit_next;    /* Tick for next FIT interrupt  */
1173     QEMUTimer *fit_timer;
1174     uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1175     QEMUTimer *wdt_timer;
1176 
1177     /* 405 have the PIT, 440 have a DECR.  */
1178     unsigned int decr_excp;
1179 };
1180 
1181 /* Fixed interval timer */
1182 static void cpu_4xx_fit_cb (void *opaque)
1183 {
1184     PowerPCCPU *cpu = opaque;
1185     CPUPPCState *env = &cpu->env;
1186     ppc_tb_t *tb_env;
1187     ppc40x_timer_t *ppc40x_timer;
1188     uint64_t now, next;
1189 
1190     tb_env = env->tb_env;
1191     ppc40x_timer = tb_env->opaque;
1192     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1193     switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1194     case 0:
1195         next = 1 << 9;
1196         break;
1197     case 1:
1198         next = 1 << 13;
1199         break;
1200     case 2:
1201         next = 1 << 17;
1202         break;
1203     case 3:
1204         next = 1 << 21;
1205         break;
1206     default:
1207         /* Cannot occur, but makes gcc happy */
1208         return;
1209     }
1210     next = now + tb_to_ns_round_up(tb_env->tb_freq, next);
1211     timer_mod(ppc40x_timer->fit_timer, next);
1212     env->spr[SPR_40x_TSR] |= 1 << 26;
1213     if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1214         ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1215     }
1216     trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1217                          env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1218 }
1219 
1220 /* Programmable interval timer */
1221 static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1222 {
1223     ppc40x_timer_t *ppc40x_timer;
1224     uint64_t now, next;
1225 
1226     ppc40x_timer = tb_env->opaque;
1227     if (ppc40x_timer->pit_reload <= 1 ||
1228         !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1229         (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1230         /* Stop PIT */
1231         trace_ppc4xx_pit_stop();
1232         timer_del(tb_env->decr_timer);
1233     } else {
1234         trace_ppc4xx_pit_start(ppc40x_timer->pit_reload);
1235         now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1236 
1237         if (is_excp) {
1238             tb_env->decr_next += ppc40x_timer->pit_reload;
1239         } else {
1240             tb_env->decr_next = ns_to_tb(tb_env->decr_freq, now)
1241                                 + ppc40x_timer->pit_reload;
1242         }
1243         next = tb_to_ns_round_up(tb_env->decr_freq, tb_env->decr_next);
1244         timer_mod(tb_env->decr_timer, next);
1245     }
1246 }
1247 
1248 static void cpu_4xx_pit_cb (void *opaque)
1249 {
1250     PowerPCCPU *cpu = opaque;
1251     CPUPPCState *env = &cpu->env;
1252     ppc_tb_t *tb_env;
1253     ppc40x_timer_t *ppc40x_timer;
1254 
1255     tb_env = env->tb_env;
1256     ppc40x_timer = tb_env->opaque;
1257     env->spr[SPR_40x_TSR] |= 1 << 27;
1258     if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1259         ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1260     }
1261     start_stop_pit(env, tb_env, 1);
1262     trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1263            (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1264            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1265            ppc40x_timer->pit_reload);
1266 }
1267 
1268 /* Watchdog timer */
1269 static void cpu_4xx_wdt_cb (void *opaque)
1270 {
1271     PowerPCCPU *cpu = opaque;
1272     CPUPPCState *env = &cpu->env;
1273     ppc_tb_t *tb_env;
1274     ppc40x_timer_t *ppc40x_timer;
1275     uint64_t now, next;
1276 
1277     tb_env = env->tb_env;
1278     ppc40x_timer = tb_env->opaque;
1279     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1280     switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1281     case 0:
1282         next = 1 << 17;
1283         break;
1284     case 1:
1285         next = 1 << 21;
1286         break;
1287     case 2:
1288         next = 1 << 25;
1289         break;
1290     case 3:
1291         next = 1 << 29;
1292         break;
1293     default:
1294         /* Cannot occur, but makes gcc happy */
1295         return;
1296     }
1297     next = now + tb_to_ns_round_up(tb_env->decr_freq, next);
1298     trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1299     switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1300     case 0x0:
1301     case 0x1:
1302         timer_mod(ppc40x_timer->wdt_timer, next);
1303         ppc40x_timer->wdt_next = next;
1304         env->spr[SPR_40x_TSR] |= 1U << 31;
1305         break;
1306     case 0x2:
1307         timer_mod(ppc40x_timer->wdt_timer, next);
1308         ppc40x_timer->wdt_next = next;
1309         env->spr[SPR_40x_TSR] |= 1 << 30;
1310         if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1311             ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1312         }
1313         break;
1314     case 0x3:
1315         env->spr[SPR_40x_TSR] &= ~0x30000000;
1316         env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1317         switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1318         case 0x0:
1319             /* No reset */
1320             break;
1321         case 0x1: /* Core reset */
1322             ppc40x_core_reset(cpu);
1323             break;
1324         case 0x2: /* Chip reset */
1325             ppc40x_chip_reset(cpu);
1326             break;
1327         case 0x3: /* System reset */
1328             ppc40x_system_reset(cpu);
1329             break;
1330         }
1331     }
1332 }
1333 
1334 void store_40x_pit (CPUPPCState *env, target_ulong val)
1335 {
1336     ppc_tb_t *tb_env;
1337     ppc40x_timer_t *ppc40x_timer;
1338 
1339     tb_env = env->tb_env;
1340     ppc40x_timer = tb_env->opaque;
1341     trace_ppc40x_store_pit(val);
1342     ppc40x_timer->pit_reload = val;
1343     start_stop_pit(env, tb_env, 0);
1344 }
1345 
1346 target_ulong load_40x_pit (CPUPPCState *env)
1347 {
1348     return cpu_ppc_load_decr(env);
1349 }
1350 
1351 void store_40x_tsr(CPUPPCState *env, target_ulong val)
1352 {
1353     PowerPCCPU *cpu = env_archcpu(env);
1354 
1355     trace_ppc40x_store_tcr(val);
1356 
1357     env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000);
1358     if (val & 0x80000000) {
1359         ppc_set_irq(cpu, PPC_INTERRUPT_PIT, 0);
1360     }
1361 }
1362 
1363 void store_40x_tcr(CPUPPCState *env, target_ulong val)
1364 {
1365     PowerPCCPU *cpu = env_archcpu(env);
1366     ppc_tb_t *tb_env;
1367 
1368     trace_ppc40x_store_tsr(val);
1369 
1370     tb_env = env->tb_env;
1371     env->spr[SPR_40x_TCR] = val & 0xFFC00000;
1372     start_stop_pit(env, tb_env, 1);
1373     cpu_4xx_wdt_cb(cpu);
1374 }
1375 
1376 static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1377 {
1378     CPUPPCState *env = opaque;
1379     ppc_tb_t *tb_env = env->tb_env;
1380 
1381     trace_ppc40x_set_tb_clk(freq);
1382     tb_env->tb_freq = freq;
1383     tb_env->decr_freq = freq;
1384     /* XXX: we should also update all timers */
1385 }
1386 
1387 clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1388                                   unsigned int decr_excp)
1389 {
1390     ppc_tb_t *tb_env;
1391     ppc40x_timer_t *ppc40x_timer;
1392     PowerPCCPU *cpu = env_archcpu(env);
1393 
1394     trace_ppc40x_timers_init(freq);
1395 
1396     tb_env = g_new0(ppc_tb_t, 1);
1397     ppc40x_timer = g_new0(ppc40x_timer_t, 1);
1398 
1399     env->tb_env = tb_env;
1400     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1401     tb_env->tb_freq = freq;
1402     tb_env->decr_freq = freq;
1403     tb_env->opaque = ppc40x_timer;
1404 
1405     /* We use decr timer for PIT */
1406     tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, cpu);
1407     ppc40x_timer->fit_timer =
1408         timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, cpu);
1409     ppc40x_timer->wdt_timer =
1410         timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, cpu);
1411     ppc40x_timer->decr_excp = decr_excp;
1412 
1413     return &ppc_40x_set_tb_clk;
1414 }
1415 
1416 /*****************************************************************************/
1417 /* Embedded PowerPC Device Control Registers */
1418 typedef struct ppc_dcrn_t ppc_dcrn_t;
1419 struct ppc_dcrn_t {
1420     dcr_read_cb dcr_read;
1421     dcr_write_cb dcr_write;
1422     void *opaque;
1423 };
1424 
1425 /* XXX: on 460, DCR addresses are 32 bits wide,
1426  *      using DCRIPR to get the 22 upper bits of the DCR address
1427  */
1428 #define DCRN_NB 1024
1429 struct ppc_dcr_t {
1430     ppc_dcrn_t dcrn[DCRN_NB];
1431     int (*read_error)(int dcrn);
1432     int (*write_error)(int dcrn);
1433 };
1434 
1435 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1436 {
1437     ppc_dcrn_t *dcr;
1438 
1439     if (dcrn < 0 || dcrn >= DCRN_NB)
1440         goto error;
1441     dcr = &dcr_env->dcrn[dcrn];
1442     if (dcr->dcr_read == NULL)
1443         goto error;
1444     *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1445     trace_ppc_dcr_read(dcrn, *valp);
1446 
1447     return 0;
1448 
1449  error:
1450     if (dcr_env->read_error != NULL)
1451         return (*dcr_env->read_error)(dcrn);
1452 
1453     return -1;
1454 }
1455 
1456 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1457 {
1458     ppc_dcrn_t *dcr;
1459 
1460     if (dcrn < 0 || dcrn >= DCRN_NB)
1461         goto error;
1462     dcr = &dcr_env->dcrn[dcrn];
1463     if (dcr->dcr_write == NULL)
1464         goto error;
1465     trace_ppc_dcr_write(dcrn, val);
1466     (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1467 
1468     return 0;
1469 
1470  error:
1471     if (dcr_env->write_error != NULL)
1472         return (*dcr_env->write_error)(dcrn);
1473 
1474     return -1;
1475 }
1476 
1477 int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1478                       dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1479 {
1480     ppc_dcr_t *dcr_env;
1481     ppc_dcrn_t *dcr;
1482 
1483     dcr_env = env->dcr_env;
1484     if (dcr_env == NULL)
1485         return -1;
1486     if (dcrn < 0 || dcrn >= DCRN_NB)
1487         return -1;
1488     dcr = &dcr_env->dcrn[dcrn];
1489     if (dcr->opaque != NULL ||
1490         dcr->dcr_read != NULL ||
1491         dcr->dcr_write != NULL)
1492         return -1;
1493     dcr->opaque = opaque;
1494     dcr->dcr_read = dcr_read;
1495     dcr->dcr_write = dcr_write;
1496 
1497     return 0;
1498 }
1499 
1500 int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1501                   int (*write_error)(int dcrn))
1502 {
1503     ppc_dcr_t *dcr_env;
1504 
1505     dcr_env = g_new0(ppc_dcr_t, 1);
1506     dcr_env->read_error = read_error;
1507     dcr_env->write_error = write_error;
1508     env->dcr_env = dcr_env;
1509 
1510     return 0;
1511 }
1512 
1513 /*****************************************************************************/
1514 
1515 int ppc_cpu_pir(PowerPCCPU *cpu)
1516 {
1517     CPUPPCState *env = &cpu->env;
1518     return env->spr_cb[SPR_PIR].default_value;
1519 }
1520 
1521 int ppc_cpu_tir(PowerPCCPU *cpu)
1522 {
1523     CPUPPCState *env = &cpu->env;
1524     return env->spr_cb[SPR_TIR].default_value;
1525 }
1526 
1527 PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1528 {
1529     CPUState *cs;
1530 
1531     CPU_FOREACH(cs) {
1532         PowerPCCPU *cpu = POWERPC_CPU(cs);
1533 
1534         if (ppc_cpu_pir(cpu) == pir) {
1535             return cpu;
1536         }
1537     }
1538 
1539     return NULL;
1540 }
1541 
1542 void ppc_irq_reset(PowerPCCPU *cpu)
1543 {
1544     CPUPPCState *env = &cpu->env;
1545 
1546     env->irq_input_state = 0;
1547     if (kvm_enabled()) {
1548         kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0);
1549     }
1550 }
1551