xref: /openbmc/qemu/hw/ppc/ppc.c (revision 8692aa29)
1 /*
2  * QEMU generic PowerPC hardware System Emulator
3  *
4  * Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #include "cpu.h"
27 #include "hw/hw.h"
28 #include "hw/ppc/ppc.h"
29 #include "hw/ppc/ppc_e500.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/cpus.h"
33 #include "hw/timer/m48t59.h"
34 #include "qemu/log.h"
35 #include "qemu/error-report.h"
36 #include "hw/loader.h"
37 #include "sysemu/kvm.h"
38 #include "kvm_ppc.h"
39 #include "trace.h"
40 
41 //#define PPC_DEBUG_IRQ
42 //#define PPC_DEBUG_TB
43 
44 #ifdef PPC_DEBUG_IRQ
45 #  define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
46 #else
47 #  define LOG_IRQ(...) do { } while (0)
48 #endif
49 
50 
51 #ifdef PPC_DEBUG_TB
52 #  define LOG_TB(...) qemu_log(__VA_ARGS__)
53 #else
54 #  define LOG_TB(...) do { } while (0)
55 #endif
56 
57 static void cpu_ppc_tb_stop (CPUPPCState *env);
58 static void cpu_ppc_tb_start (CPUPPCState *env);
59 
60 void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
61 {
62     CPUState *cs = CPU(cpu);
63     CPUPPCState *env = &cpu->env;
64     unsigned int old_pending = env->pending_interrupts;
65 
66     if (level) {
67         env->pending_interrupts |= 1 << n_IRQ;
68         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
69     } else {
70         env->pending_interrupts &= ~(1 << n_IRQ);
71         if (env->pending_interrupts == 0) {
72             cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
73         }
74     }
75 
76     if (old_pending != env->pending_interrupts) {
77 #ifdef CONFIG_KVM
78         kvmppc_set_interrupt(cpu, n_IRQ, level);
79 #endif
80     }
81 
82     LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32
83                 "req %08x\n", __func__, env, n_IRQ, level,
84                 env->pending_interrupts, CPU(cpu)->interrupt_request);
85 }
86 
87 /* PowerPC 6xx / 7xx internal IRQ controller */
88 static void ppc6xx_set_irq(void *opaque, int pin, int level)
89 {
90     PowerPCCPU *cpu = opaque;
91     CPUPPCState *env = &cpu->env;
92     int cur_level;
93 
94     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
95                 env, pin, level);
96     cur_level = (env->irq_input_state >> pin) & 1;
97     /* Don't generate spurious events */
98     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
99         CPUState *cs = CPU(cpu);
100 
101         switch (pin) {
102         case PPC6xx_INPUT_TBEN:
103             /* Level sensitive - active high */
104             LOG_IRQ("%s: %s the time base\n",
105                         __func__, level ? "start" : "stop");
106             if (level) {
107                 cpu_ppc_tb_start(env);
108             } else {
109                 cpu_ppc_tb_stop(env);
110             }
111         case PPC6xx_INPUT_INT:
112             /* Level sensitive - active high */
113             LOG_IRQ("%s: set the external IRQ state to %d\n",
114                         __func__, level);
115             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
116             break;
117         case PPC6xx_INPUT_SMI:
118             /* Level sensitive - active high */
119             LOG_IRQ("%s: set the SMI IRQ state to %d\n",
120                         __func__, level);
121             ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
122             break;
123         case PPC6xx_INPUT_MCP:
124             /* Negative edge sensitive */
125             /* XXX: TODO: actual reaction may depends on HID0 status
126              *            603/604/740/750: check HID0[EMCP]
127              */
128             if (cur_level == 1 && level == 0) {
129                 LOG_IRQ("%s: raise machine check state\n",
130                             __func__);
131                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
132             }
133             break;
134         case PPC6xx_INPUT_CKSTP_IN:
135             /* Level sensitive - active low */
136             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
137             /* XXX: Note that the only way to restart the CPU is to reset it */
138             if (level) {
139                 LOG_IRQ("%s: stop the CPU\n", __func__);
140                 cs->halted = 1;
141             }
142             break;
143         case PPC6xx_INPUT_HRESET:
144             /* Level sensitive - active low */
145             if (level) {
146                 LOG_IRQ("%s: reset the CPU\n", __func__);
147                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
148             }
149             break;
150         case PPC6xx_INPUT_SRESET:
151             LOG_IRQ("%s: set the RESET IRQ state to %d\n",
152                         __func__, level);
153             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
154             break;
155         default:
156             /* Unknown pin - do nothing */
157             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
158             return;
159         }
160         if (level)
161             env->irq_input_state |= 1 << pin;
162         else
163             env->irq_input_state &= ~(1 << pin);
164     }
165 }
166 
167 void ppc6xx_irq_init(PowerPCCPU *cpu)
168 {
169     CPUPPCState *env = &cpu->env;
170 
171     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu,
172                                                   PPC6xx_INPUT_NB);
173 }
174 
175 #if defined(TARGET_PPC64)
176 /* PowerPC 970 internal IRQ controller */
177 static void ppc970_set_irq(void *opaque, int pin, int level)
178 {
179     PowerPCCPU *cpu = opaque;
180     CPUPPCState *env = &cpu->env;
181     int cur_level;
182 
183     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
184                 env, pin, level);
185     cur_level = (env->irq_input_state >> pin) & 1;
186     /* Don't generate spurious events */
187     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
188         CPUState *cs = CPU(cpu);
189 
190         switch (pin) {
191         case PPC970_INPUT_INT:
192             /* Level sensitive - active high */
193             LOG_IRQ("%s: set the external IRQ state to %d\n",
194                         __func__, level);
195             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
196             break;
197         case PPC970_INPUT_THINT:
198             /* Level sensitive - active high */
199             LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__,
200                         level);
201             ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
202             break;
203         case PPC970_INPUT_MCP:
204             /* Negative edge sensitive */
205             /* XXX: TODO: actual reaction may depends on HID0 status
206              *            603/604/740/750: check HID0[EMCP]
207              */
208             if (cur_level == 1 && level == 0) {
209                 LOG_IRQ("%s: raise machine check state\n",
210                             __func__);
211                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
212             }
213             break;
214         case PPC970_INPUT_CKSTP:
215             /* Level sensitive - active low */
216             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
217             if (level) {
218                 LOG_IRQ("%s: stop the CPU\n", __func__);
219                 cs->halted = 1;
220             } else {
221                 LOG_IRQ("%s: restart the CPU\n", __func__);
222                 cs->halted = 0;
223                 qemu_cpu_kick(cs);
224             }
225             break;
226         case PPC970_INPUT_HRESET:
227             /* Level sensitive - active low */
228             if (level) {
229                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
230             }
231             break;
232         case PPC970_INPUT_SRESET:
233             LOG_IRQ("%s: set the RESET IRQ state to %d\n",
234                         __func__, level);
235             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
236             break;
237         case PPC970_INPUT_TBEN:
238             LOG_IRQ("%s: set the TBEN state to %d\n", __func__,
239                         level);
240             /* XXX: TODO */
241             break;
242         default:
243             /* Unknown pin - do nothing */
244             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
245             return;
246         }
247         if (level)
248             env->irq_input_state |= 1 << pin;
249         else
250             env->irq_input_state &= ~(1 << pin);
251     }
252 }
253 
254 void ppc970_irq_init(PowerPCCPU *cpu)
255 {
256     CPUPPCState *env = &cpu->env;
257 
258     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu,
259                                                   PPC970_INPUT_NB);
260 }
261 
262 /* POWER7 internal IRQ controller */
263 static void power7_set_irq(void *opaque, int pin, int level)
264 {
265     PowerPCCPU *cpu = opaque;
266     CPUPPCState *env = &cpu->env;
267 
268     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
269                 env, pin, level);
270 
271     switch (pin) {
272     case POWER7_INPUT_INT:
273         /* Level sensitive - active high */
274         LOG_IRQ("%s: set the external IRQ state to %d\n",
275                 __func__, level);
276         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
277         break;
278     default:
279         /* Unknown pin - do nothing */
280         LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
281         return;
282     }
283     if (level) {
284         env->irq_input_state |= 1 << pin;
285     } else {
286         env->irq_input_state &= ~(1 << pin);
287     }
288 }
289 
290 void ppcPOWER7_irq_init(PowerPCCPU *cpu)
291 {
292     CPUPPCState *env = &cpu->env;
293 
294     env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu,
295                                                   POWER7_INPUT_NB);
296 }
297 #endif /* defined(TARGET_PPC64) */
298 
299 /* PowerPC 40x internal IRQ controller */
300 static void ppc40x_set_irq(void *opaque, int pin, int level)
301 {
302     PowerPCCPU *cpu = opaque;
303     CPUPPCState *env = &cpu->env;
304     int cur_level;
305 
306     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
307                 env, pin, level);
308     cur_level = (env->irq_input_state >> pin) & 1;
309     /* Don't generate spurious events */
310     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
311         CPUState *cs = CPU(cpu);
312 
313         switch (pin) {
314         case PPC40x_INPUT_RESET_SYS:
315             if (level) {
316                 LOG_IRQ("%s: reset the PowerPC system\n",
317                             __func__);
318                 ppc40x_system_reset(cpu);
319             }
320             break;
321         case PPC40x_INPUT_RESET_CHIP:
322             if (level) {
323                 LOG_IRQ("%s: reset the PowerPC chip\n", __func__);
324                 ppc40x_chip_reset(cpu);
325             }
326             break;
327         case PPC40x_INPUT_RESET_CORE:
328             /* XXX: TODO: update DBSR[MRR] */
329             if (level) {
330                 LOG_IRQ("%s: reset the PowerPC core\n", __func__);
331                 ppc40x_core_reset(cpu);
332             }
333             break;
334         case PPC40x_INPUT_CINT:
335             /* Level sensitive - active high */
336             LOG_IRQ("%s: set the critical IRQ state to %d\n",
337                         __func__, level);
338             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
339             break;
340         case PPC40x_INPUT_INT:
341             /* Level sensitive - active high */
342             LOG_IRQ("%s: set the external IRQ state to %d\n",
343                         __func__, level);
344             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
345             break;
346         case PPC40x_INPUT_HALT:
347             /* Level sensitive - active low */
348             if (level) {
349                 LOG_IRQ("%s: stop the CPU\n", __func__);
350                 cs->halted = 1;
351             } else {
352                 LOG_IRQ("%s: restart the CPU\n", __func__);
353                 cs->halted = 0;
354                 qemu_cpu_kick(cs);
355             }
356             break;
357         case PPC40x_INPUT_DEBUG:
358             /* Level sensitive - active high */
359             LOG_IRQ("%s: set the debug pin state to %d\n",
360                         __func__, level);
361             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
362             break;
363         default:
364             /* Unknown pin - do nothing */
365             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
366             return;
367         }
368         if (level)
369             env->irq_input_state |= 1 << pin;
370         else
371             env->irq_input_state &= ~(1 << pin);
372     }
373 }
374 
375 void ppc40x_irq_init(PowerPCCPU *cpu)
376 {
377     CPUPPCState *env = &cpu->env;
378 
379     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq,
380                                                   cpu, PPC40x_INPUT_NB);
381 }
382 
383 /* PowerPC E500 internal IRQ controller */
384 static void ppce500_set_irq(void *opaque, int pin, int level)
385 {
386     PowerPCCPU *cpu = opaque;
387     CPUPPCState *env = &cpu->env;
388     int cur_level;
389 
390     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
391                 env, pin, level);
392     cur_level = (env->irq_input_state >> pin) & 1;
393     /* Don't generate spurious events */
394     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
395         switch (pin) {
396         case PPCE500_INPUT_MCK:
397             if (level) {
398                 LOG_IRQ("%s: reset the PowerPC system\n",
399                             __func__);
400                 qemu_system_reset_request();
401             }
402             break;
403         case PPCE500_INPUT_RESET_CORE:
404             if (level) {
405                 LOG_IRQ("%s: reset the PowerPC core\n", __func__);
406                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
407             }
408             break;
409         case PPCE500_INPUT_CINT:
410             /* Level sensitive - active high */
411             LOG_IRQ("%s: set the critical IRQ state to %d\n",
412                         __func__, level);
413             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
414             break;
415         case PPCE500_INPUT_INT:
416             /* Level sensitive - active high */
417             LOG_IRQ("%s: set the core IRQ state to %d\n",
418                         __func__, level);
419             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
420             break;
421         case PPCE500_INPUT_DEBUG:
422             /* Level sensitive - active high */
423             LOG_IRQ("%s: set the debug pin state to %d\n",
424                         __func__, level);
425             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
426             break;
427         default:
428             /* Unknown pin - do nothing */
429             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
430             return;
431         }
432         if (level)
433             env->irq_input_state |= 1 << pin;
434         else
435             env->irq_input_state &= ~(1 << pin);
436     }
437 }
438 
439 void ppce500_irq_init(PowerPCCPU *cpu)
440 {
441     CPUPPCState *env = &cpu->env;
442 
443     env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq,
444                                                   cpu, PPCE500_INPUT_NB);
445 }
446 
447 /* Enable or Disable the E500 EPR capability */
448 void ppce500_set_mpic_proxy(bool enabled)
449 {
450     CPUState *cs;
451 
452     CPU_FOREACH(cs) {
453         PowerPCCPU *cpu = POWERPC_CPU(cs);
454 
455         cpu->env.mpic_proxy = enabled;
456         if (kvm_enabled()) {
457             kvmppc_set_mpic_proxy(cpu, enabled);
458         }
459     }
460 }
461 
462 /*****************************************************************************/
463 /* PowerPC time base and decrementer emulation */
464 
465 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
466 {
467     /* TB time in tb periods */
468     return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
469 }
470 
471 uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
472 {
473     ppc_tb_t *tb_env = env->tb_env;
474     uint64_t tb;
475 
476     if (kvm_enabled()) {
477         return env->spr[SPR_TBL];
478     }
479 
480     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
481     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
482 
483     return tb;
484 }
485 
486 static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
487 {
488     ppc_tb_t *tb_env = env->tb_env;
489     uint64_t tb;
490 
491     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
492     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
493 
494     return tb >> 32;
495 }
496 
497 uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
498 {
499     if (kvm_enabled()) {
500         return env->spr[SPR_TBU];
501     }
502 
503     return _cpu_ppc_load_tbu(env);
504 }
505 
506 static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
507                                     int64_t *tb_offsetp, uint64_t value)
508 {
509     *tb_offsetp = value -
510         muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
511 
512     LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n",
513                 __func__, value, *tb_offsetp);
514 }
515 
516 void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
517 {
518     ppc_tb_t *tb_env = env->tb_env;
519     uint64_t tb;
520 
521     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
522     tb &= 0xFFFFFFFF00000000ULL;
523     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
524                      &tb_env->tb_offset, tb | (uint64_t)value);
525 }
526 
527 static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
528 {
529     ppc_tb_t *tb_env = env->tb_env;
530     uint64_t tb;
531 
532     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
533     tb &= 0x00000000FFFFFFFFULL;
534     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
535                      &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
536 }
537 
538 void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
539 {
540     _cpu_ppc_store_tbu(env, value);
541 }
542 
543 uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
544 {
545     ppc_tb_t *tb_env = env->tb_env;
546     uint64_t tb;
547 
548     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
549     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
550 
551     return tb;
552 }
553 
554 uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
555 {
556     ppc_tb_t *tb_env = env->tb_env;
557     uint64_t tb;
558 
559     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
560     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
561 
562     return tb >> 32;
563 }
564 
565 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
566 {
567     ppc_tb_t *tb_env = env->tb_env;
568     uint64_t tb;
569 
570     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
571     tb &= 0xFFFFFFFF00000000ULL;
572     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
573                      &tb_env->atb_offset, tb | (uint64_t)value);
574 }
575 
576 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
577 {
578     ppc_tb_t *tb_env = env->tb_env;
579     uint64_t tb;
580 
581     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
582     tb &= 0x00000000FFFFFFFFULL;
583     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
584                      &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
585 }
586 
587 static void cpu_ppc_tb_stop (CPUPPCState *env)
588 {
589     ppc_tb_t *tb_env = env->tb_env;
590     uint64_t tb, atb, vmclk;
591 
592     /* If the time base is already frozen, do nothing */
593     if (tb_env->tb_freq != 0) {
594         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
595         /* Get the time base */
596         tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
597         /* Get the alternate time base */
598         atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
599         /* Store the time base value (ie compute the current offset) */
600         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
601         /* Store the alternate time base value (compute the current offset) */
602         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
603         /* Set the time base frequency to zero */
604         tb_env->tb_freq = 0;
605         /* Now, the time bases are frozen to tb_offset / atb_offset value */
606     }
607 }
608 
609 static void cpu_ppc_tb_start (CPUPPCState *env)
610 {
611     ppc_tb_t *tb_env = env->tb_env;
612     uint64_t tb, atb, vmclk;
613 
614     /* If the time base is not frozen, do nothing */
615     if (tb_env->tb_freq == 0) {
616         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
617         /* Get the time base from tb_offset */
618         tb = tb_env->tb_offset;
619         /* Get the alternate time base from atb_offset */
620         atb = tb_env->atb_offset;
621         /* Restore the tb frequency from the decrementer frequency */
622         tb_env->tb_freq = tb_env->decr_freq;
623         /* Store the time base value */
624         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
625         /* Store the alternate time base value */
626         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
627     }
628 }
629 
630 bool ppc_decr_clear_on_delivery(CPUPPCState *env)
631 {
632     ppc_tb_t *tb_env = env->tb_env;
633     int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
634     return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
635 }
636 
637 static inline uint32_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
638 {
639     ppc_tb_t *tb_env = env->tb_env;
640     uint32_t decr;
641     int64_t diff;
642 
643     diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
644     if (diff >= 0) {
645         decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
646     } else if (tb_env->flags & PPC_TIMER_BOOKE) {
647         decr = 0;
648     }  else {
649         decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
650     }
651     LOG_TB("%s: %08" PRIx32 "\n", __func__, decr);
652 
653     return decr;
654 }
655 
656 uint32_t cpu_ppc_load_decr (CPUPPCState *env)
657 {
658     ppc_tb_t *tb_env = env->tb_env;
659 
660     if (kvm_enabled()) {
661         return env->spr[SPR_DECR];
662     }
663 
664     return _cpu_ppc_load_decr(env, tb_env->decr_next);
665 }
666 
667 uint32_t cpu_ppc_load_hdecr (CPUPPCState *env)
668 {
669     ppc_tb_t *tb_env = env->tb_env;
670 
671     return _cpu_ppc_load_decr(env, tb_env->hdecr_next);
672 }
673 
674 uint64_t cpu_ppc_load_purr (CPUPPCState *env)
675 {
676     ppc_tb_t *tb_env = env->tb_env;
677     uint64_t diff;
678 
679     diff = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - tb_env->purr_start;
680 
681     return tb_env->purr_load +
682         muldiv64(diff, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
683 }
684 
685 /* When decrementer expires,
686  * all we need to do is generate or queue a CPU exception
687  */
688 static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
689 {
690     /* Raise it */
691     LOG_TB("raise decrementer exception\n");
692     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
693 }
694 
695 static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
696 {
697     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
698 }
699 
700 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
701 {
702     CPUPPCState *env = &cpu->env;
703 
704     /* Raise it */
705     LOG_TB("raise hv decrementer exception\n");
706 
707     /* The architecture specifies that we don't deliver HDEC
708      * interrupts in a PM state. Not only they don't cause a
709      * wakeup but they also get effectively discarded.
710      */
711     if (!env->in_pm_state) {
712         ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
713     }
714 }
715 
716 static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
717 {
718     ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
719 }
720 
721 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
722                                  QEMUTimer *timer,
723                                  void (*raise_excp)(void *),
724                                  void (*lower_excp)(PowerPCCPU *),
725                                  uint32_t decr, uint32_t value)
726 {
727     CPUPPCState *env = &cpu->env;
728     ppc_tb_t *tb_env = env->tb_env;
729     uint64_t now, next;
730 
731     LOG_TB("%s: %08" PRIx32 " => %08" PRIx32 "\n", __func__,
732                 decr, value);
733 
734     if (kvm_enabled()) {
735         /* KVM handles decrementer exceptions, we don't need our own timer */
736         return;
737     }
738 
739     /*
740      * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
741      * interrupt.
742      *
743      * If we get a really small DEC value, we can assume that by the time we
744      * handled it we should inject an interrupt already.
745      *
746      * On MSB level based DEC implementations the MSB always means the interrupt
747      * is pending, so raise it on those.
748      *
749      * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
750      * an edge interrupt, so raise it here too.
751      */
752     if ((value < 3) ||
753         ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && (value & 0x80000000)) ||
754         ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && (value & 0x80000000)
755           && !(decr & 0x80000000))) {
756         (*raise_excp)(cpu);
757         return;
758     }
759 
760     /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
761     if (!(value & 0x80000000) && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
762         (*lower_excp)(cpu);
763     }
764 
765     /* Calculate the next timer event */
766     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
767     next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
768     *nextp = next;
769 
770     /* Adjust timer */
771     timer_mod(timer, next);
772 }
773 
774 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, uint32_t decr,
775                                        uint32_t value)
776 {
777     ppc_tb_t *tb_env = cpu->env.tb_env;
778 
779     __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
780                          tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
781                          value);
782 }
783 
784 void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value)
785 {
786     PowerPCCPU *cpu = ppc_env_get_cpu(env);
787 
788     _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value);
789 }
790 
791 static void cpu_ppc_decr_cb(void *opaque)
792 {
793     PowerPCCPU *cpu = opaque;
794 
795     cpu_ppc_decr_excp(cpu);
796 }
797 
798 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, uint32_t hdecr,
799                                         uint32_t value)
800 {
801     ppc_tb_t *tb_env = cpu->env.tb_env;
802 
803     if (tb_env->hdecr_timer != NULL) {
804         __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
805                              tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
806                              hdecr, value);
807     }
808 }
809 
810 void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value)
811 {
812     PowerPCCPU *cpu = ppc_env_get_cpu(env);
813 
814     _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value);
815 }
816 
817 static void cpu_ppc_hdecr_cb(void *opaque)
818 {
819     PowerPCCPU *cpu = opaque;
820 
821     cpu_ppc_hdecr_excp(cpu);
822 }
823 
824 static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value)
825 {
826     ppc_tb_t *tb_env = cpu->env.tb_env;
827 
828     tb_env->purr_load = value;
829     tb_env->purr_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
830 }
831 
832 static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
833 {
834     CPUPPCState *env = opaque;
835     PowerPCCPU *cpu = ppc_env_get_cpu(env);
836     ppc_tb_t *tb_env = env->tb_env;
837 
838     tb_env->tb_freq = freq;
839     tb_env->decr_freq = freq;
840     /* There is a bug in Linux 2.4 kernels:
841      * if a decrementer exception is pending when it enables msr_ee at startup,
842      * it's not ready to handle it...
843      */
844     _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
845     _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
846     cpu_ppc_store_purr(cpu, 0x0000000000000000ULL);
847 }
848 
849 static void timebase_pre_save(void *opaque)
850 {
851     PPCTimebase *tb = opaque;
852     uint64_t ticks = cpu_get_host_ticks();
853     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
854 
855     if (!first_ppc_cpu->env.tb_env) {
856         error_report("No timebase object");
857         return;
858     }
859 
860     tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
861     /*
862      * tb_offset is only expected to be changed by migration so
863      * there is no need to update it from KVM here
864      */
865     tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
866 }
867 
868 static int timebase_post_load(void *opaque, int version_id)
869 {
870     PPCTimebase *tb_remote = opaque;
871     CPUState *cpu;
872     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
873     int64_t tb_off_adj, tb_off, ns_diff;
874     int64_t migration_duration_ns, migration_duration_tb, guest_tb, host_ns;
875     unsigned long freq;
876 
877     if (!first_ppc_cpu->env.tb_env) {
878         error_report("No timebase object");
879         return -1;
880     }
881 
882     freq = first_ppc_cpu->env.tb_env->tb_freq;
883     /*
884      * Calculate timebase on the destination side of migration.
885      * The destination timebase must be not less than the source timebase.
886      * We try to adjust timebase by downtime if host clocks are not
887      * too much out of sync (1 second for now).
888      */
889     host_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
890     ns_diff = MAX(0, host_ns - tb_remote->time_of_the_day_ns);
891     migration_duration_ns = MIN(NANOSECONDS_PER_SECOND, ns_diff);
892     migration_duration_tb = muldiv64(freq, migration_duration_ns,
893                                      NANOSECONDS_PER_SECOND);
894     guest_tb = tb_remote->guest_timebase + MIN(0, migration_duration_tb);
895 
896     tb_off_adj = guest_tb - cpu_get_host_ticks();
897 
898     tb_off = first_ppc_cpu->env.tb_env->tb_offset;
899     trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
900                         (tb_off_adj - tb_off) / freq);
901 
902     /* Set new offset to all CPUs */
903     CPU_FOREACH(cpu) {
904         PowerPCCPU *pcpu = POWERPC_CPU(cpu);
905         pcpu->env.tb_env->tb_offset = tb_off_adj;
906     }
907 
908     return 0;
909 }
910 
911 const VMStateDescription vmstate_ppc_timebase = {
912     .name = "timebase",
913     .version_id = 1,
914     .minimum_version_id = 1,
915     .minimum_version_id_old = 1,
916     .pre_save = timebase_pre_save,
917     .post_load = timebase_post_load,
918     .fields      = (VMStateField []) {
919         VMSTATE_UINT64(guest_timebase, PPCTimebase),
920         VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
921         VMSTATE_END_OF_LIST()
922     },
923 };
924 
925 /* Set up (once) timebase frequency (in Hz) */
926 clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
927 {
928     PowerPCCPU *cpu = ppc_env_get_cpu(env);
929     ppc_tb_t *tb_env;
930 
931     tb_env = g_malloc0(sizeof(ppc_tb_t));
932     env->tb_env = tb_env;
933     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
934     if (env->insns_flags & PPC_SEGMENT_64B) {
935         /* All Book3S 64bit CPUs implement level based DEC logic */
936         tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
937     }
938     /* Create new timer */
939     tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
940     if (env->has_hv_mode) {
941         tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
942                                                 cpu);
943     } else {
944         tb_env->hdecr_timer = NULL;
945     }
946     cpu_ppc_set_tb_clk(env, freq);
947 
948     return &cpu_ppc_set_tb_clk;
949 }
950 
951 /* Specific helpers for POWER & PowerPC 601 RTC */
952 #if 0
953 static clk_setup_cb cpu_ppc601_rtc_init (CPUPPCState *env)
954 {
955     return cpu_ppc_tb_init(env, 7812500);
956 }
957 #endif
958 
959 void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value)
960 {
961     _cpu_ppc_store_tbu(env, value);
962 }
963 
964 uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env)
965 {
966     return _cpu_ppc_load_tbu(env);
967 }
968 
969 void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value)
970 {
971     cpu_ppc_store_tbl(env, value & 0x3FFFFF80);
972 }
973 
974 uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env)
975 {
976     return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
977 }
978 
979 /*****************************************************************************/
980 /* PowerPC 40x timers */
981 
982 /* PIT, FIT & WDT */
983 typedef struct ppc40x_timer_t ppc40x_timer_t;
984 struct ppc40x_timer_t {
985     uint64_t pit_reload;  /* PIT auto-reload value        */
986     uint64_t fit_next;    /* Tick for next FIT interrupt  */
987     QEMUTimer *fit_timer;
988     uint64_t wdt_next;    /* Tick for next WDT interrupt  */
989     QEMUTimer *wdt_timer;
990 
991     /* 405 have the PIT, 440 have a DECR.  */
992     unsigned int decr_excp;
993 };
994 
995 /* Fixed interval timer */
996 static void cpu_4xx_fit_cb (void *opaque)
997 {
998     PowerPCCPU *cpu;
999     CPUPPCState *env;
1000     ppc_tb_t *tb_env;
1001     ppc40x_timer_t *ppc40x_timer;
1002     uint64_t now, next;
1003 
1004     env = opaque;
1005     cpu = ppc_env_get_cpu(env);
1006     tb_env = env->tb_env;
1007     ppc40x_timer = tb_env->opaque;
1008     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1009     switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1010     case 0:
1011         next = 1 << 9;
1012         break;
1013     case 1:
1014         next = 1 << 13;
1015         break;
1016     case 2:
1017         next = 1 << 17;
1018         break;
1019     case 3:
1020         next = 1 << 21;
1021         break;
1022     default:
1023         /* Cannot occur, but makes gcc happy */
1024         return;
1025     }
1026     next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
1027     if (next == now)
1028         next++;
1029     timer_mod(ppc40x_timer->fit_timer, next);
1030     env->spr[SPR_40x_TSR] |= 1 << 26;
1031     if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1032         ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1033     }
1034     LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1035            (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1036            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1037 }
1038 
1039 /* Programmable interval timer */
1040 static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1041 {
1042     ppc40x_timer_t *ppc40x_timer;
1043     uint64_t now, next;
1044 
1045     ppc40x_timer = tb_env->opaque;
1046     if (ppc40x_timer->pit_reload <= 1 ||
1047         !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1048         (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1049         /* Stop PIT */
1050         LOG_TB("%s: stop PIT\n", __func__);
1051         timer_del(tb_env->decr_timer);
1052     } else {
1053         LOG_TB("%s: start PIT %016" PRIx64 "\n",
1054                     __func__, ppc40x_timer->pit_reload);
1055         now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1056         next = now + muldiv64(ppc40x_timer->pit_reload,
1057                               NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1058         if (is_excp)
1059             next += tb_env->decr_next - now;
1060         if (next == now)
1061             next++;
1062         timer_mod(tb_env->decr_timer, next);
1063         tb_env->decr_next = next;
1064     }
1065 }
1066 
1067 static void cpu_4xx_pit_cb (void *opaque)
1068 {
1069     PowerPCCPU *cpu;
1070     CPUPPCState *env;
1071     ppc_tb_t *tb_env;
1072     ppc40x_timer_t *ppc40x_timer;
1073 
1074     env = opaque;
1075     cpu = ppc_env_get_cpu(env);
1076     tb_env = env->tb_env;
1077     ppc40x_timer = tb_env->opaque;
1078     env->spr[SPR_40x_TSR] |= 1 << 27;
1079     if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1080         ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1081     }
1082     start_stop_pit(env, tb_env, 1);
1083     LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " "
1084            "%016" PRIx64 "\n", __func__,
1085            (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1086            (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1087            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1088            ppc40x_timer->pit_reload);
1089 }
1090 
1091 /* Watchdog timer */
1092 static void cpu_4xx_wdt_cb (void *opaque)
1093 {
1094     PowerPCCPU *cpu;
1095     CPUPPCState *env;
1096     ppc_tb_t *tb_env;
1097     ppc40x_timer_t *ppc40x_timer;
1098     uint64_t now, next;
1099 
1100     env = opaque;
1101     cpu = ppc_env_get_cpu(env);
1102     tb_env = env->tb_env;
1103     ppc40x_timer = tb_env->opaque;
1104     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1105     switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1106     case 0:
1107         next = 1 << 17;
1108         break;
1109     case 1:
1110         next = 1 << 21;
1111         break;
1112     case 2:
1113         next = 1 << 25;
1114         break;
1115     case 3:
1116         next = 1 << 29;
1117         break;
1118     default:
1119         /* Cannot occur, but makes gcc happy */
1120         return;
1121     }
1122     next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1123     if (next == now)
1124         next++;
1125     LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1126            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1127     switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1128     case 0x0:
1129     case 0x1:
1130         timer_mod(ppc40x_timer->wdt_timer, next);
1131         ppc40x_timer->wdt_next = next;
1132         env->spr[SPR_40x_TSR] |= 1U << 31;
1133         break;
1134     case 0x2:
1135         timer_mod(ppc40x_timer->wdt_timer, next);
1136         ppc40x_timer->wdt_next = next;
1137         env->spr[SPR_40x_TSR] |= 1 << 30;
1138         if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1139             ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1140         }
1141         break;
1142     case 0x3:
1143         env->spr[SPR_40x_TSR] &= ~0x30000000;
1144         env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1145         switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1146         case 0x0:
1147             /* No reset */
1148             break;
1149         case 0x1: /* Core reset */
1150             ppc40x_core_reset(cpu);
1151             break;
1152         case 0x2: /* Chip reset */
1153             ppc40x_chip_reset(cpu);
1154             break;
1155         case 0x3: /* System reset */
1156             ppc40x_system_reset(cpu);
1157             break;
1158         }
1159     }
1160 }
1161 
1162 void store_40x_pit (CPUPPCState *env, target_ulong val)
1163 {
1164     ppc_tb_t *tb_env;
1165     ppc40x_timer_t *ppc40x_timer;
1166 
1167     tb_env = env->tb_env;
1168     ppc40x_timer = tb_env->opaque;
1169     LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val);
1170     ppc40x_timer->pit_reload = val;
1171     start_stop_pit(env, tb_env, 0);
1172 }
1173 
1174 target_ulong load_40x_pit (CPUPPCState *env)
1175 {
1176     return cpu_ppc_load_decr(env);
1177 }
1178 
1179 static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1180 {
1181     CPUPPCState *env = opaque;
1182     ppc_tb_t *tb_env = env->tb_env;
1183 
1184     LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__,
1185                 freq);
1186     tb_env->tb_freq = freq;
1187     tb_env->decr_freq = freq;
1188     /* XXX: we should also update all timers */
1189 }
1190 
1191 clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1192                                   unsigned int decr_excp)
1193 {
1194     ppc_tb_t *tb_env;
1195     ppc40x_timer_t *ppc40x_timer;
1196 
1197     tb_env = g_malloc0(sizeof(ppc_tb_t));
1198     env->tb_env = tb_env;
1199     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1200     ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t));
1201     tb_env->tb_freq = freq;
1202     tb_env->decr_freq = freq;
1203     tb_env->opaque = ppc40x_timer;
1204     LOG_TB("%s freq %" PRIu32 "\n", __func__, freq);
1205     if (ppc40x_timer != NULL) {
1206         /* We use decr timer for PIT */
1207         tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env);
1208         ppc40x_timer->fit_timer =
1209             timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env);
1210         ppc40x_timer->wdt_timer =
1211             timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env);
1212         ppc40x_timer->decr_excp = decr_excp;
1213     }
1214 
1215     return &ppc_40x_set_tb_clk;
1216 }
1217 
1218 /*****************************************************************************/
1219 /* Embedded PowerPC Device Control Registers */
1220 typedef struct ppc_dcrn_t ppc_dcrn_t;
1221 struct ppc_dcrn_t {
1222     dcr_read_cb dcr_read;
1223     dcr_write_cb dcr_write;
1224     void *opaque;
1225 };
1226 
1227 /* XXX: on 460, DCR addresses are 32 bits wide,
1228  *      using DCRIPR to get the 22 upper bits of the DCR address
1229  */
1230 #define DCRN_NB 1024
1231 struct ppc_dcr_t {
1232     ppc_dcrn_t dcrn[DCRN_NB];
1233     int (*read_error)(int dcrn);
1234     int (*write_error)(int dcrn);
1235 };
1236 
1237 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1238 {
1239     ppc_dcrn_t *dcr;
1240 
1241     if (dcrn < 0 || dcrn >= DCRN_NB)
1242         goto error;
1243     dcr = &dcr_env->dcrn[dcrn];
1244     if (dcr->dcr_read == NULL)
1245         goto error;
1246     *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1247 
1248     return 0;
1249 
1250  error:
1251     if (dcr_env->read_error != NULL)
1252         return (*dcr_env->read_error)(dcrn);
1253 
1254     return -1;
1255 }
1256 
1257 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1258 {
1259     ppc_dcrn_t *dcr;
1260 
1261     if (dcrn < 0 || dcrn >= DCRN_NB)
1262         goto error;
1263     dcr = &dcr_env->dcrn[dcrn];
1264     if (dcr->dcr_write == NULL)
1265         goto error;
1266     (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1267 
1268     return 0;
1269 
1270  error:
1271     if (dcr_env->write_error != NULL)
1272         return (*dcr_env->write_error)(dcrn);
1273 
1274     return -1;
1275 }
1276 
1277 int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1278                       dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1279 {
1280     ppc_dcr_t *dcr_env;
1281     ppc_dcrn_t *dcr;
1282 
1283     dcr_env = env->dcr_env;
1284     if (dcr_env == NULL)
1285         return -1;
1286     if (dcrn < 0 || dcrn >= DCRN_NB)
1287         return -1;
1288     dcr = &dcr_env->dcrn[dcrn];
1289     if (dcr->opaque != NULL ||
1290         dcr->dcr_read != NULL ||
1291         dcr->dcr_write != NULL)
1292         return -1;
1293     dcr->opaque = opaque;
1294     dcr->dcr_read = dcr_read;
1295     dcr->dcr_write = dcr_write;
1296 
1297     return 0;
1298 }
1299 
1300 int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1301                   int (*write_error)(int dcrn))
1302 {
1303     ppc_dcr_t *dcr_env;
1304 
1305     dcr_env = g_malloc0(sizeof(ppc_dcr_t));
1306     dcr_env->read_error = read_error;
1307     dcr_env->write_error = write_error;
1308     env->dcr_env = dcr_env;
1309 
1310     return 0;
1311 }
1312 
1313 /*****************************************************************************/
1314 /* Debug port */
1315 void PPC_debug_write (void *opaque, uint32_t addr, uint32_t val)
1316 {
1317     addr &= 0xF;
1318     switch (addr) {
1319     case 0:
1320         printf("%c", val);
1321         break;
1322     case 1:
1323         printf("\n");
1324         fflush(stdout);
1325         break;
1326     case 2:
1327         printf("Set loglevel to %04" PRIx32 "\n", val);
1328         qemu_set_log(val | 0x100);
1329         break;
1330     }
1331 }
1332 
1333 /* CPU device-tree ID helpers */
1334 int ppc_get_vcpu_dt_id(PowerPCCPU *cpu)
1335 {
1336     return cpu->cpu_dt_id;
1337 }
1338 
1339 PowerPCCPU *ppc_get_vcpu_by_dt_id(int cpu_dt_id)
1340 {
1341     CPUState *cs;
1342 
1343     CPU_FOREACH(cs) {
1344         PowerPCCPU *cpu = POWERPC_CPU(cs);
1345 
1346         if (cpu->cpu_dt_id == cpu_dt_id) {
1347             return cpu;
1348         }
1349     }
1350 
1351     return NULL;
1352 }
1353