xref: /openbmc/qemu/hw/ppc/ppc.c (revision 4c4465ff)
1 /*
2  * QEMU generic PowerPC hardware System Emulator
3  *
4  * Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "cpu.h"
27 #include "hw/irq.h"
28 #include "hw/ppc/ppc.h"
29 #include "hw/ppc/ppc_e500.h"
30 #include "qemu/timer.h"
31 #include "sysemu/cpus.h"
32 #include "qemu/log.h"
33 #include "qemu/main-loop.h"
34 #include "qemu/error-report.h"
35 #include "sysemu/kvm.h"
36 #include "sysemu/runstate.h"
37 #include "kvm_ppc.h"
38 #include "migration/vmstate.h"
39 #include "trace.h"
40 
41 //#define PPC_DEBUG_IRQ
42 //#define PPC_DEBUG_TB
43 
44 #ifdef PPC_DEBUG_IRQ
45 #  define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
46 #else
47 #  define LOG_IRQ(...) do { } while (0)
48 #endif
49 
50 
51 #ifdef PPC_DEBUG_TB
52 #  define LOG_TB(...) qemu_log(__VA_ARGS__)
53 #else
54 #  define LOG_TB(...) do { } while (0)
55 #endif
56 
57 static void cpu_ppc_tb_stop (CPUPPCState *env);
58 static void cpu_ppc_tb_start (CPUPPCState *env);
59 
60 void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
61 {
62     CPUState *cs = CPU(cpu);
63     CPUPPCState *env = &cpu->env;
64     unsigned int old_pending;
65     bool locked = false;
66 
67     /* We may already have the BQL if coming from the reset path */
68     if (!qemu_mutex_iothread_locked()) {
69         locked = true;
70         qemu_mutex_lock_iothread();
71     }
72 
73     old_pending = env->pending_interrupts;
74 
75     if (level) {
76         env->pending_interrupts |= 1 << n_IRQ;
77         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
78     } else {
79         env->pending_interrupts &= ~(1 << n_IRQ);
80         if (env->pending_interrupts == 0) {
81             cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
82         }
83     }
84 
85     if (old_pending != env->pending_interrupts) {
86         kvmppc_set_interrupt(cpu, n_IRQ, level);
87     }
88 
89 
90     LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32
91                 "req %08x\n", __func__, env, n_IRQ, level,
92                 env->pending_interrupts, CPU(cpu)->interrupt_request);
93 
94     if (locked) {
95         qemu_mutex_unlock_iothread();
96     }
97 }
98 
99 /* PowerPC 6xx / 7xx internal IRQ controller */
100 static void ppc6xx_set_irq(void *opaque, int pin, int level)
101 {
102     PowerPCCPU *cpu = opaque;
103     CPUPPCState *env = &cpu->env;
104     int cur_level;
105 
106     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
107                 env, pin, level);
108     cur_level = (env->irq_input_state >> pin) & 1;
109     /* Don't generate spurious events */
110     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
111         CPUState *cs = CPU(cpu);
112 
113         switch (pin) {
114         case PPC6xx_INPUT_TBEN:
115             /* Level sensitive - active high */
116             LOG_IRQ("%s: %s the time base\n",
117                         __func__, level ? "start" : "stop");
118             if (level) {
119                 cpu_ppc_tb_start(env);
120             } else {
121                 cpu_ppc_tb_stop(env);
122             }
123             break;
124         case PPC6xx_INPUT_INT:
125             /* Level sensitive - active high */
126             LOG_IRQ("%s: set the external IRQ state to %d\n",
127                         __func__, level);
128             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
129             break;
130         case PPC6xx_INPUT_SMI:
131             /* Level sensitive - active high */
132             LOG_IRQ("%s: set the SMI IRQ state to %d\n",
133                         __func__, level);
134             ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
135             break;
136         case PPC6xx_INPUT_MCP:
137             /* Negative edge sensitive */
138             /* XXX: TODO: actual reaction may depends on HID0 status
139              *            603/604/740/750: check HID0[EMCP]
140              */
141             if (cur_level == 1 && level == 0) {
142                 LOG_IRQ("%s: raise machine check state\n",
143                             __func__);
144                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
145             }
146             break;
147         case PPC6xx_INPUT_CKSTP_IN:
148             /* Level sensitive - active low */
149             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
150             /* XXX: Note that the only way to restart the CPU is to reset it */
151             if (level) {
152                 LOG_IRQ("%s: stop the CPU\n", __func__);
153                 cs->halted = 1;
154             }
155             break;
156         case PPC6xx_INPUT_HRESET:
157             /* Level sensitive - active low */
158             if (level) {
159                 LOG_IRQ("%s: reset the CPU\n", __func__);
160                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
161             }
162             break;
163         case PPC6xx_INPUT_SRESET:
164             LOG_IRQ("%s: set the RESET IRQ state to %d\n",
165                         __func__, level);
166             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
167             break;
168         default:
169             /* Unknown pin - do nothing */
170             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
171             return;
172         }
173         if (level)
174             env->irq_input_state |= 1 << pin;
175         else
176             env->irq_input_state &= ~(1 << pin);
177     }
178 }
179 
180 void ppc6xx_irq_init(PowerPCCPU *cpu)
181 {
182     CPUPPCState *env = &cpu->env;
183 
184     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu,
185                                                   PPC6xx_INPUT_NB);
186 }
187 
188 #if defined(TARGET_PPC64)
189 /* PowerPC 970 internal IRQ controller */
190 static void ppc970_set_irq(void *opaque, int pin, int level)
191 {
192     PowerPCCPU *cpu = opaque;
193     CPUPPCState *env = &cpu->env;
194     int cur_level;
195 
196     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
197                 env, pin, level);
198     cur_level = (env->irq_input_state >> pin) & 1;
199     /* Don't generate spurious events */
200     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
201         CPUState *cs = CPU(cpu);
202 
203         switch (pin) {
204         case PPC970_INPUT_INT:
205             /* Level sensitive - active high */
206             LOG_IRQ("%s: set the external IRQ state to %d\n",
207                         __func__, level);
208             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
209             break;
210         case PPC970_INPUT_THINT:
211             /* Level sensitive - active high */
212             LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__,
213                         level);
214             ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
215             break;
216         case PPC970_INPUT_MCP:
217             /* Negative edge sensitive */
218             /* XXX: TODO: actual reaction may depends on HID0 status
219              *            603/604/740/750: check HID0[EMCP]
220              */
221             if (cur_level == 1 && level == 0) {
222                 LOG_IRQ("%s: raise machine check state\n",
223                             __func__);
224                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
225             }
226             break;
227         case PPC970_INPUT_CKSTP:
228             /* Level sensitive - active low */
229             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
230             if (level) {
231                 LOG_IRQ("%s: stop the CPU\n", __func__);
232                 cs->halted = 1;
233             } else {
234                 LOG_IRQ("%s: restart the CPU\n", __func__);
235                 cs->halted = 0;
236                 qemu_cpu_kick(cs);
237             }
238             break;
239         case PPC970_INPUT_HRESET:
240             /* Level sensitive - active low */
241             if (level) {
242                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
243             }
244             break;
245         case PPC970_INPUT_SRESET:
246             LOG_IRQ("%s: set the RESET IRQ state to %d\n",
247                         __func__, level);
248             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
249             break;
250         case PPC970_INPUT_TBEN:
251             LOG_IRQ("%s: set the TBEN state to %d\n", __func__,
252                         level);
253             /* XXX: TODO */
254             break;
255         default:
256             /* Unknown pin - do nothing */
257             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
258             return;
259         }
260         if (level)
261             env->irq_input_state |= 1 << pin;
262         else
263             env->irq_input_state &= ~(1 << pin);
264     }
265 }
266 
267 void ppc970_irq_init(PowerPCCPU *cpu)
268 {
269     CPUPPCState *env = &cpu->env;
270 
271     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu,
272                                                   PPC970_INPUT_NB);
273 }
274 
275 /* POWER7 internal IRQ controller */
276 static void power7_set_irq(void *opaque, int pin, int level)
277 {
278     PowerPCCPU *cpu = opaque;
279 
280     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
281             &cpu->env, pin, level);
282 
283     switch (pin) {
284     case POWER7_INPUT_INT:
285         /* Level sensitive - active high */
286         LOG_IRQ("%s: set the external IRQ state to %d\n",
287                 __func__, level);
288         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
289         break;
290     default:
291         /* Unknown pin - do nothing */
292         LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
293         return;
294     }
295 }
296 
297 void ppcPOWER7_irq_init(PowerPCCPU *cpu)
298 {
299     CPUPPCState *env = &cpu->env;
300 
301     env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu,
302                                                   POWER7_INPUT_NB);
303 }
304 
305 /* POWER9 internal IRQ controller */
306 static void power9_set_irq(void *opaque, int pin, int level)
307 {
308     PowerPCCPU *cpu = opaque;
309 
310     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
311             &cpu->env, pin, level);
312 
313     switch (pin) {
314     case POWER9_INPUT_INT:
315         /* Level sensitive - active high */
316         LOG_IRQ("%s: set the external IRQ state to %d\n",
317                 __func__, level);
318         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
319         break;
320     case POWER9_INPUT_HINT:
321         /* Level sensitive - active high */
322         LOG_IRQ("%s: set the external IRQ state to %d\n",
323                 __func__, level);
324         ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
325         break;
326     default:
327         /* Unknown pin - do nothing */
328         LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
329         return;
330     }
331 }
332 
333 void ppcPOWER9_irq_init(PowerPCCPU *cpu)
334 {
335     CPUPPCState *env = &cpu->env;
336 
337     env->irq_inputs = (void **)qemu_allocate_irqs(&power9_set_irq, cpu,
338                                                   POWER9_INPUT_NB);
339 }
340 #endif /* defined(TARGET_PPC64) */
341 
342 void ppc40x_core_reset(PowerPCCPU *cpu)
343 {
344     CPUPPCState *env = &cpu->env;
345     target_ulong dbsr;
346 
347     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
348     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
349     dbsr = env->spr[SPR_40x_DBSR];
350     dbsr &= ~0x00000300;
351     dbsr |= 0x00000100;
352     env->spr[SPR_40x_DBSR] = dbsr;
353 }
354 
355 void ppc40x_chip_reset(PowerPCCPU *cpu)
356 {
357     CPUPPCState *env = &cpu->env;
358     target_ulong dbsr;
359 
360     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
361     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
362     /* XXX: TODO reset all internal peripherals */
363     dbsr = env->spr[SPR_40x_DBSR];
364     dbsr &= ~0x00000300;
365     dbsr |= 0x00000200;
366     env->spr[SPR_40x_DBSR] = dbsr;
367 }
368 
369 void ppc40x_system_reset(PowerPCCPU *cpu)
370 {
371     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
372     qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
373 }
374 
375 void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
376 {
377     PowerPCCPU *cpu = env_archcpu(env);
378 
379     switch ((val >> 28) & 0x3) {
380     case 0x0:
381         /* No action */
382         break;
383     case 0x1:
384         /* Core reset */
385         ppc40x_core_reset(cpu);
386         break;
387     case 0x2:
388         /* Chip reset */
389         ppc40x_chip_reset(cpu);
390         break;
391     case 0x3:
392         /* System reset */
393         ppc40x_system_reset(cpu);
394         break;
395     }
396 }
397 
398 /* PowerPC 40x internal IRQ controller */
399 static void ppc40x_set_irq(void *opaque, int pin, int level)
400 {
401     PowerPCCPU *cpu = opaque;
402     CPUPPCState *env = &cpu->env;
403     int cur_level;
404 
405     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
406                 env, pin, level);
407     cur_level = (env->irq_input_state >> pin) & 1;
408     /* Don't generate spurious events */
409     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
410         CPUState *cs = CPU(cpu);
411 
412         switch (pin) {
413         case PPC40x_INPUT_RESET_SYS:
414             if (level) {
415                 LOG_IRQ("%s: reset the PowerPC system\n",
416                             __func__);
417                 ppc40x_system_reset(cpu);
418             }
419             break;
420         case PPC40x_INPUT_RESET_CHIP:
421             if (level) {
422                 LOG_IRQ("%s: reset the PowerPC chip\n", __func__);
423                 ppc40x_chip_reset(cpu);
424             }
425             break;
426         case PPC40x_INPUT_RESET_CORE:
427             /* XXX: TODO: update DBSR[MRR] */
428             if (level) {
429                 LOG_IRQ("%s: reset the PowerPC core\n", __func__);
430                 ppc40x_core_reset(cpu);
431             }
432             break;
433         case PPC40x_INPUT_CINT:
434             /* Level sensitive - active high */
435             LOG_IRQ("%s: set the critical IRQ state to %d\n",
436                         __func__, level);
437             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
438             break;
439         case PPC40x_INPUT_INT:
440             /* Level sensitive - active high */
441             LOG_IRQ("%s: set the external IRQ state to %d\n",
442                         __func__, level);
443             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
444             break;
445         case PPC40x_INPUT_HALT:
446             /* Level sensitive - active low */
447             if (level) {
448                 LOG_IRQ("%s: stop the CPU\n", __func__);
449                 cs->halted = 1;
450             } else {
451                 LOG_IRQ("%s: restart the CPU\n", __func__);
452                 cs->halted = 0;
453                 qemu_cpu_kick(cs);
454             }
455             break;
456         case PPC40x_INPUT_DEBUG:
457             /* Level sensitive - active high */
458             LOG_IRQ("%s: set the debug pin state to %d\n",
459                         __func__, level);
460             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
461             break;
462         default:
463             /* Unknown pin - do nothing */
464             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
465             return;
466         }
467         if (level)
468             env->irq_input_state |= 1 << pin;
469         else
470             env->irq_input_state &= ~(1 << pin);
471     }
472 }
473 
474 void ppc40x_irq_init(PowerPCCPU *cpu)
475 {
476     CPUPPCState *env = &cpu->env;
477 
478     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq,
479                                                   cpu, PPC40x_INPUT_NB);
480 }
481 
482 /* PowerPC E500 internal IRQ controller */
483 static void ppce500_set_irq(void *opaque, int pin, int level)
484 {
485     PowerPCCPU *cpu = opaque;
486     CPUPPCState *env = &cpu->env;
487     int cur_level;
488 
489     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
490                 env, pin, level);
491     cur_level = (env->irq_input_state >> pin) & 1;
492     /* Don't generate spurious events */
493     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
494         switch (pin) {
495         case PPCE500_INPUT_MCK:
496             if (level) {
497                 LOG_IRQ("%s: reset the PowerPC system\n",
498                             __func__);
499                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
500             }
501             break;
502         case PPCE500_INPUT_RESET_CORE:
503             if (level) {
504                 LOG_IRQ("%s: reset the PowerPC core\n", __func__);
505                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
506             }
507             break;
508         case PPCE500_INPUT_CINT:
509             /* Level sensitive - active high */
510             LOG_IRQ("%s: set the critical IRQ state to %d\n",
511                         __func__, level);
512             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
513             break;
514         case PPCE500_INPUT_INT:
515             /* Level sensitive - active high */
516             LOG_IRQ("%s: set the core IRQ state to %d\n",
517                         __func__, level);
518             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
519             break;
520         case PPCE500_INPUT_DEBUG:
521             /* Level sensitive - active high */
522             LOG_IRQ("%s: set the debug pin state to %d\n",
523                         __func__, level);
524             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
525             break;
526         default:
527             /* Unknown pin - do nothing */
528             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
529             return;
530         }
531         if (level)
532             env->irq_input_state |= 1 << pin;
533         else
534             env->irq_input_state &= ~(1 << pin);
535     }
536 }
537 
538 void ppce500_irq_init(PowerPCCPU *cpu)
539 {
540     CPUPPCState *env = &cpu->env;
541 
542     env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq,
543                                                   cpu, PPCE500_INPUT_NB);
544 }
545 
546 /* Enable or Disable the E500 EPR capability */
547 void ppce500_set_mpic_proxy(bool enabled)
548 {
549     CPUState *cs;
550 
551     CPU_FOREACH(cs) {
552         PowerPCCPU *cpu = POWERPC_CPU(cs);
553 
554         cpu->env.mpic_proxy = enabled;
555         if (kvm_enabled()) {
556             kvmppc_set_mpic_proxy(cpu, enabled);
557         }
558     }
559 }
560 
561 /*****************************************************************************/
562 /* PowerPC time base and decrementer emulation */
563 
564 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
565 {
566     /* TB time in tb periods */
567     return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
568 }
569 
570 uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
571 {
572     ppc_tb_t *tb_env = env->tb_env;
573     uint64_t tb;
574 
575     if (kvm_enabled()) {
576         return env->spr[SPR_TBL];
577     }
578 
579     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
580     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
581 
582     return tb;
583 }
584 
585 static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
586 {
587     ppc_tb_t *tb_env = env->tb_env;
588     uint64_t tb;
589 
590     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
591     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
592 
593     return tb >> 32;
594 }
595 
596 uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
597 {
598     if (kvm_enabled()) {
599         return env->spr[SPR_TBU];
600     }
601 
602     return _cpu_ppc_load_tbu(env);
603 }
604 
605 static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
606                                     int64_t *tb_offsetp, uint64_t value)
607 {
608     *tb_offsetp = value -
609         muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
610 
611     LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n",
612                 __func__, value, *tb_offsetp);
613 }
614 
615 void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
616 {
617     ppc_tb_t *tb_env = env->tb_env;
618     uint64_t tb;
619 
620     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
621     tb &= 0xFFFFFFFF00000000ULL;
622     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
623                      &tb_env->tb_offset, tb | (uint64_t)value);
624 }
625 
626 static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
627 {
628     ppc_tb_t *tb_env = env->tb_env;
629     uint64_t tb;
630 
631     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
632     tb &= 0x00000000FFFFFFFFULL;
633     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
634                      &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
635 }
636 
637 void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
638 {
639     _cpu_ppc_store_tbu(env, value);
640 }
641 
642 uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
643 {
644     ppc_tb_t *tb_env = env->tb_env;
645     uint64_t tb;
646 
647     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
648     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
649 
650     return tb;
651 }
652 
653 uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
654 {
655     ppc_tb_t *tb_env = env->tb_env;
656     uint64_t tb;
657 
658     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
659     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
660 
661     return tb >> 32;
662 }
663 
664 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
665 {
666     ppc_tb_t *tb_env = env->tb_env;
667     uint64_t tb;
668 
669     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
670     tb &= 0xFFFFFFFF00000000ULL;
671     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
672                      &tb_env->atb_offset, tb | (uint64_t)value);
673 }
674 
675 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
676 {
677     ppc_tb_t *tb_env = env->tb_env;
678     uint64_t tb;
679 
680     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
681     tb &= 0x00000000FFFFFFFFULL;
682     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
683                      &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
684 }
685 
686 uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
687 {
688     ppc_tb_t *tb_env = env->tb_env;
689 
690     return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
691                           tb_env->vtb_offset);
692 }
693 
694 void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value)
695 {
696     ppc_tb_t *tb_env = env->tb_env;
697 
698     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
699                      &tb_env->vtb_offset, value);
700 }
701 
702 void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value)
703 {
704     ppc_tb_t *tb_env = env->tb_env;
705     uint64_t tb;
706 
707     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
708                         tb_env->tb_offset);
709     tb &= 0xFFFFFFUL;
710     tb |= (value & ~0xFFFFFFUL);
711     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
712                      &tb_env->tb_offset, tb);
713 }
714 
715 static void cpu_ppc_tb_stop (CPUPPCState *env)
716 {
717     ppc_tb_t *tb_env = env->tb_env;
718     uint64_t tb, atb, vmclk;
719 
720     /* If the time base is already frozen, do nothing */
721     if (tb_env->tb_freq != 0) {
722         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
723         /* Get the time base */
724         tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
725         /* Get the alternate time base */
726         atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
727         /* Store the time base value (ie compute the current offset) */
728         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
729         /* Store the alternate time base value (compute the current offset) */
730         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
731         /* Set the time base frequency to zero */
732         tb_env->tb_freq = 0;
733         /* Now, the time bases are frozen to tb_offset / atb_offset value */
734     }
735 }
736 
737 static void cpu_ppc_tb_start (CPUPPCState *env)
738 {
739     ppc_tb_t *tb_env = env->tb_env;
740     uint64_t tb, atb, vmclk;
741 
742     /* If the time base is not frozen, do nothing */
743     if (tb_env->tb_freq == 0) {
744         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
745         /* Get the time base from tb_offset */
746         tb = tb_env->tb_offset;
747         /* Get the alternate time base from atb_offset */
748         atb = tb_env->atb_offset;
749         /* Restore the tb frequency from the decrementer frequency */
750         tb_env->tb_freq = tb_env->decr_freq;
751         /* Store the time base value */
752         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
753         /* Store the alternate time base value */
754         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
755     }
756 }
757 
758 bool ppc_decr_clear_on_delivery(CPUPPCState *env)
759 {
760     ppc_tb_t *tb_env = env->tb_env;
761     int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
762     return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
763 }
764 
765 static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
766 {
767     ppc_tb_t *tb_env = env->tb_env;
768     int64_t decr, diff;
769 
770     diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
771     if (diff >= 0) {
772         decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
773     } else if (tb_env->flags & PPC_TIMER_BOOKE) {
774         decr = 0;
775     }  else {
776         decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
777     }
778     LOG_TB("%s: %016" PRIx64 "\n", __func__, decr);
779 
780     return decr;
781 }
782 
783 target_ulong cpu_ppc_load_decr(CPUPPCState *env)
784 {
785     ppc_tb_t *tb_env = env->tb_env;
786     uint64_t decr;
787 
788     if (kvm_enabled()) {
789         return env->spr[SPR_DECR];
790     }
791 
792     decr = _cpu_ppc_load_decr(env, tb_env->decr_next);
793 
794     /*
795      * If large decrementer is enabled then the decrementer is signed extened
796      * to 64 bits, otherwise it is a 32 bit value.
797      */
798     if (env->spr[SPR_LPCR] & LPCR_LD) {
799         return decr;
800     }
801     return (uint32_t) decr;
802 }
803 
804 target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
805 {
806     PowerPCCPU *cpu = env_archcpu(env);
807     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
808     ppc_tb_t *tb_env = env->tb_env;
809     uint64_t hdecr;
810 
811     hdecr =  _cpu_ppc_load_decr(env, tb_env->hdecr_next);
812 
813     /*
814      * If we have a large decrementer (POWER9 or later) then hdecr is sign
815      * extended to 64 bits, otherwise it is 32 bits.
816      */
817     if (pcc->lrg_decr_bits > 32) {
818         return hdecr;
819     }
820     return (uint32_t) hdecr;
821 }
822 
823 uint64_t cpu_ppc_load_purr (CPUPPCState *env)
824 {
825     ppc_tb_t *tb_env = env->tb_env;
826 
827     return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
828                           tb_env->purr_offset);
829 }
830 
831 /* When decrementer expires,
832  * all we need to do is generate or queue a CPU exception
833  */
834 static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
835 {
836     /* Raise it */
837     LOG_TB("raise decrementer exception\n");
838     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
839 }
840 
841 static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
842 {
843     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
844 }
845 
846 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
847 {
848     CPUPPCState *env = &cpu->env;
849 
850     /* Raise it */
851     LOG_TB("raise hv decrementer exception\n");
852 
853     /* The architecture specifies that we don't deliver HDEC
854      * interrupts in a PM state. Not only they don't cause a
855      * wakeup but they also get effectively discarded.
856      */
857     if (!env->resume_as_sreset) {
858         ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
859     }
860 }
861 
862 static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
863 {
864     ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
865 }
866 
867 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
868                                  QEMUTimer *timer,
869                                  void (*raise_excp)(void *),
870                                  void (*lower_excp)(PowerPCCPU *),
871                                  target_ulong decr, target_ulong value,
872                                  int nr_bits)
873 {
874     CPUPPCState *env = &cpu->env;
875     ppc_tb_t *tb_env = env->tb_env;
876     uint64_t now, next;
877     bool negative;
878 
879     /* Truncate value to decr_width and sign extend for simplicity */
880     value &= ((1ULL << nr_bits) - 1);
881     negative = !!(value & (1ULL << (nr_bits - 1)));
882     if (negative) {
883         value |= (0xFFFFFFFFULL << nr_bits);
884     }
885 
886     LOG_TB("%s: " TARGET_FMT_lx " => " TARGET_FMT_lx "\n", __func__,
887                 decr, value);
888 
889     if (kvm_enabled()) {
890         /* KVM handles decrementer exceptions, we don't need our own timer */
891         return;
892     }
893 
894     /*
895      * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
896      * interrupt.
897      *
898      * If we get a really small DEC value, we can assume that by the time we
899      * handled it we should inject an interrupt already.
900      *
901      * On MSB level based DEC implementations the MSB always means the interrupt
902      * is pending, so raise it on those.
903      *
904      * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
905      * an edge interrupt, so raise it here too.
906      */
907     if ((value < 3) ||
908         ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && negative) ||
909         ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && negative
910           && !(decr & (1ULL << (nr_bits - 1))))) {
911         (*raise_excp)(cpu);
912         return;
913     }
914 
915     /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
916     if (!negative && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
917         (*lower_excp)(cpu);
918     }
919 
920     /* Calculate the next timer event */
921     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
922     next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
923     *nextp = next;
924 
925     /* Adjust timer */
926     timer_mod(timer, next);
927 }
928 
929 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr,
930                                        target_ulong value, int nr_bits)
931 {
932     ppc_tb_t *tb_env = cpu->env.tb_env;
933 
934     __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
935                          tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
936                          value, nr_bits);
937 }
938 
939 void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
940 {
941     PowerPCCPU *cpu = env_archcpu(env);
942     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
943     int nr_bits = 32;
944 
945     if (env->spr[SPR_LPCR] & LPCR_LD) {
946         nr_bits = pcc->lrg_decr_bits;
947     }
948 
949     _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits);
950 }
951 
952 static void cpu_ppc_decr_cb(void *opaque)
953 {
954     PowerPCCPU *cpu = opaque;
955 
956     cpu_ppc_decr_excp(cpu);
957 }
958 
959 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr,
960                                         target_ulong value, int nr_bits)
961 {
962     ppc_tb_t *tb_env = cpu->env.tb_env;
963 
964     if (tb_env->hdecr_timer != NULL) {
965         __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
966                              tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
967                              hdecr, value, nr_bits);
968     }
969 }
970 
971 void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
972 {
973     PowerPCCPU *cpu = env_archcpu(env);
974     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
975 
976     _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value,
977                          pcc->lrg_decr_bits);
978 }
979 
980 static void cpu_ppc_hdecr_cb(void *opaque)
981 {
982     PowerPCCPU *cpu = opaque;
983 
984     cpu_ppc_hdecr_excp(cpu);
985 }
986 
987 void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
988 {
989     ppc_tb_t *tb_env = env->tb_env;
990 
991     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
992                      &tb_env->purr_offset, value);
993 }
994 
995 static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
996 {
997     CPUPPCState *env = opaque;
998     PowerPCCPU *cpu = env_archcpu(env);
999     ppc_tb_t *tb_env = env->tb_env;
1000 
1001     tb_env->tb_freq = freq;
1002     tb_env->decr_freq = freq;
1003     /* There is a bug in Linux 2.4 kernels:
1004      * if a decrementer exception is pending when it enables msr_ee at startup,
1005      * it's not ready to handle it...
1006      */
1007     _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
1008     _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
1009     cpu_ppc_store_purr(env, 0x0000000000000000ULL);
1010 }
1011 
1012 static void timebase_save(PPCTimebase *tb)
1013 {
1014     uint64_t ticks = cpu_get_host_ticks();
1015     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1016 
1017     if (!first_ppc_cpu->env.tb_env) {
1018         error_report("No timebase object");
1019         return;
1020     }
1021 
1022     /* not used anymore, we keep it for compatibility */
1023     tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
1024     /*
1025      * tb_offset is only expected to be changed by QEMU so
1026      * there is no need to update it from KVM here
1027      */
1028     tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
1029 
1030     tb->runstate_paused =
1031         runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
1032 }
1033 
1034 static void timebase_load(PPCTimebase *tb)
1035 {
1036     CPUState *cpu;
1037     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1038     int64_t tb_off_adj, tb_off;
1039     unsigned long freq;
1040 
1041     if (!first_ppc_cpu->env.tb_env) {
1042         error_report("No timebase object");
1043         return;
1044     }
1045 
1046     freq = first_ppc_cpu->env.tb_env->tb_freq;
1047 
1048     tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
1049 
1050     tb_off = first_ppc_cpu->env.tb_env->tb_offset;
1051     trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
1052                         (tb_off_adj - tb_off) / freq);
1053 
1054     /* Set new offset to all CPUs */
1055     CPU_FOREACH(cpu) {
1056         PowerPCCPU *pcpu = POWERPC_CPU(cpu);
1057         pcpu->env.tb_env->tb_offset = tb_off_adj;
1058         kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
1059     }
1060 }
1061 
1062 void cpu_ppc_clock_vm_state_change(void *opaque, int running,
1063                                    RunState state)
1064 {
1065     PPCTimebase *tb = opaque;
1066 
1067     if (running) {
1068         timebase_load(tb);
1069     } else {
1070         timebase_save(tb);
1071     }
1072 }
1073 
1074 /*
1075  * When migrating a running guest, read the clock just
1076  * before migration, so that the guest clock counts
1077  * during the events between:
1078  *
1079  *  * vm_stop()
1080  *  *
1081  *  * pre_save()
1082  *
1083  *  This reduces clock difference on migration from 5s
1084  *  to 0.1s (when max_downtime == 5s), because sending the
1085  *  final pages of memory (which happens between vm_stop()
1086  *  and pre_save()) takes max_downtime.
1087  */
1088 static int timebase_pre_save(void *opaque)
1089 {
1090     PPCTimebase *tb = opaque;
1091 
1092     /* guest_timebase won't be overridden in case of paused guest or savevm */
1093     if (!tb->runstate_paused) {
1094         timebase_save(tb);
1095     }
1096 
1097     return 0;
1098 }
1099 
1100 const VMStateDescription vmstate_ppc_timebase = {
1101     .name = "timebase",
1102     .version_id = 1,
1103     .minimum_version_id = 1,
1104     .minimum_version_id_old = 1,
1105     .pre_save = timebase_pre_save,
1106     .fields      = (VMStateField []) {
1107         VMSTATE_UINT64(guest_timebase, PPCTimebase),
1108         VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1109         VMSTATE_END_OF_LIST()
1110     },
1111 };
1112 
1113 /* Set up (once) timebase frequency (in Hz) */
1114 clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
1115 {
1116     PowerPCCPU *cpu = env_archcpu(env);
1117     ppc_tb_t *tb_env;
1118 
1119     tb_env = g_malloc0(sizeof(ppc_tb_t));
1120     env->tb_env = tb_env;
1121     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1122     if (is_book3s_arch2x(env)) {
1123         /* All Book3S 64bit CPUs implement level based DEC logic */
1124         tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1125     }
1126     /* Create new timer */
1127     tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
1128     if (env->has_hv_mode) {
1129         tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
1130                                                 cpu);
1131     } else {
1132         tb_env->hdecr_timer = NULL;
1133     }
1134     cpu_ppc_set_tb_clk(env, freq);
1135 
1136     return &cpu_ppc_set_tb_clk;
1137 }
1138 
1139 /* Specific helpers for POWER & PowerPC 601 RTC */
1140 void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value)
1141 {
1142     _cpu_ppc_store_tbu(env, value);
1143 }
1144 
1145 uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env)
1146 {
1147     return _cpu_ppc_load_tbu(env);
1148 }
1149 
1150 void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value)
1151 {
1152     cpu_ppc_store_tbl(env, value & 0x3FFFFF80);
1153 }
1154 
1155 uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env)
1156 {
1157     return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
1158 }
1159 
1160 /*****************************************************************************/
1161 /* PowerPC 40x timers */
1162 
1163 /* PIT, FIT & WDT */
1164 typedef struct ppc40x_timer_t ppc40x_timer_t;
1165 struct ppc40x_timer_t {
1166     uint64_t pit_reload;  /* PIT auto-reload value        */
1167     uint64_t fit_next;    /* Tick for next FIT interrupt  */
1168     QEMUTimer *fit_timer;
1169     uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1170     QEMUTimer *wdt_timer;
1171 
1172     /* 405 have the PIT, 440 have a DECR.  */
1173     unsigned int decr_excp;
1174 };
1175 
1176 /* Fixed interval timer */
1177 static void cpu_4xx_fit_cb (void *opaque)
1178 {
1179     PowerPCCPU *cpu;
1180     CPUPPCState *env;
1181     ppc_tb_t *tb_env;
1182     ppc40x_timer_t *ppc40x_timer;
1183     uint64_t now, next;
1184 
1185     env = opaque;
1186     cpu = env_archcpu(env);
1187     tb_env = env->tb_env;
1188     ppc40x_timer = tb_env->opaque;
1189     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1190     switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1191     case 0:
1192         next = 1 << 9;
1193         break;
1194     case 1:
1195         next = 1 << 13;
1196         break;
1197     case 2:
1198         next = 1 << 17;
1199         break;
1200     case 3:
1201         next = 1 << 21;
1202         break;
1203     default:
1204         /* Cannot occur, but makes gcc happy */
1205         return;
1206     }
1207     next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
1208     if (next == now)
1209         next++;
1210     timer_mod(ppc40x_timer->fit_timer, next);
1211     env->spr[SPR_40x_TSR] |= 1 << 26;
1212     if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1213         ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1214     }
1215     LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1216            (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1217            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1218 }
1219 
1220 /* Programmable interval timer */
1221 static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1222 {
1223     ppc40x_timer_t *ppc40x_timer;
1224     uint64_t now, next;
1225 
1226     ppc40x_timer = tb_env->opaque;
1227     if (ppc40x_timer->pit_reload <= 1 ||
1228         !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1229         (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1230         /* Stop PIT */
1231         LOG_TB("%s: stop PIT\n", __func__);
1232         timer_del(tb_env->decr_timer);
1233     } else {
1234         LOG_TB("%s: start PIT %016" PRIx64 "\n",
1235                     __func__, ppc40x_timer->pit_reload);
1236         now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1237         next = now + muldiv64(ppc40x_timer->pit_reload,
1238                               NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1239         if (is_excp)
1240             next += tb_env->decr_next - now;
1241         if (next == now)
1242             next++;
1243         timer_mod(tb_env->decr_timer, next);
1244         tb_env->decr_next = next;
1245     }
1246 }
1247 
1248 static void cpu_4xx_pit_cb (void *opaque)
1249 {
1250     PowerPCCPU *cpu;
1251     CPUPPCState *env;
1252     ppc_tb_t *tb_env;
1253     ppc40x_timer_t *ppc40x_timer;
1254 
1255     env = opaque;
1256     cpu = env_archcpu(env);
1257     tb_env = env->tb_env;
1258     ppc40x_timer = tb_env->opaque;
1259     env->spr[SPR_40x_TSR] |= 1 << 27;
1260     if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1261         ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1262     }
1263     start_stop_pit(env, tb_env, 1);
1264     LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " "
1265            "%016" PRIx64 "\n", __func__,
1266            (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1267            (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1268            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1269            ppc40x_timer->pit_reload);
1270 }
1271 
1272 /* Watchdog timer */
1273 static void cpu_4xx_wdt_cb (void *opaque)
1274 {
1275     PowerPCCPU *cpu;
1276     CPUPPCState *env;
1277     ppc_tb_t *tb_env;
1278     ppc40x_timer_t *ppc40x_timer;
1279     uint64_t now, next;
1280 
1281     env = opaque;
1282     cpu = env_archcpu(env);
1283     tb_env = env->tb_env;
1284     ppc40x_timer = tb_env->opaque;
1285     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1286     switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1287     case 0:
1288         next = 1 << 17;
1289         break;
1290     case 1:
1291         next = 1 << 21;
1292         break;
1293     case 2:
1294         next = 1 << 25;
1295         break;
1296     case 3:
1297         next = 1 << 29;
1298         break;
1299     default:
1300         /* Cannot occur, but makes gcc happy */
1301         return;
1302     }
1303     next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1304     if (next == now)
1305         next++;
1306     LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1307            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1308     switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1309     case 0x0:
1310     case 0x1:
1311         timer_mod(ppc40x_timer->wdt_timer, next);
1312         ppc40x_timer->wdt_next = next;
1313         env->spr[SPR_40x_TSR] |= 1U << 31;
1314         break;
1315     case 0x2:
1316         timer_mod(ppc40x_timer->wdt_timer, next);
1317         ppc40x_timer->wdt_next = next;
1318         env->spr[SPR_40x_TSR] |= 1 << 30;
1319         if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1320             ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1321         }
1322         break;
1323     case 0x3:
1324         env->spr[SPR_40x_TSR] &= ~0x30000000;
1325         env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1326         switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1327         case 0x0:
1328             /* No reset */
1329             break;
1330         case 0x1: /* Core reset */
1331             ppc40x_core_reset(cpu);
1332             break;
1333         case 0x2: /* Chip reset */
1334             ppc40x_chip_reset(cpu);
1335             break;
1336         case 0x3: /* System reset */
1337             ppc40x_system_reset(cpu);
1338             break;
1339         }
1340     }
1341 }
1342 
1343 void store_40x_pit (CPUPPCState *env, target_ulong val)
1344 {
1345     ppc_tb_t *tb_env;
1346     ppc40x_timer_t *ppc40x_timer;
1347 
1348     tb_env = env->tb_env;
1349     ppc40x_timer = tb_env->opaque;
1350     LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val);
1351     ppc40x_timer->pit_reload = val;
1352     start_stop_pit(env, tb_env, 0);
1353 }
1354 
1355 target_ulong load_40x_pit (CPUPPCState *env)
1356 {
1357     return cpu_ppc_load_decr(env);
1358 }
1359 
1360 static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1361 {
1362     CPUPPCState *env = opaque;
1363     ppc_tb_t *tb_env = env->tb_env;
1364 
1365     LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__,
1366                 freq);
1367     tb_env->tb_freq = freq;
1368     tb_env->decr_freq = freq;
1369     /* XXX: we should also update all timers */
1370 }
1371 
1372 clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1373                                   unsigned int decr_excp)
1374 {
1375     ppc_tb_t *tb_env;
1376     ppc40x_timer_t *ppc40x_timer;
1377 
1378     tb_env = g_malloc0(sizeof(ppc_tb_t));
1379     env->tb_env = tb_env;
1380     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1381     ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t));
1382     tb_env->tb_freq = freq;
1383     tb_env->decr_freq = freq;
1384     tb_env->opaque = ppc40x_timer;
1385     LOG_TB("%s freq %" PRIu32 "\n", __func__, freq);
1386     if (ppc40x_timer != NULL) {
1387         /* We use decr timer for PIT */
1388         tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env);
1389         ppc40x_timer->fit_timer =
1390             timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env);
1391         ppc40x_timer->wdt_timer =
1392             timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env);
1393         ppc40x_timer->decr_excp = decr_excp;
1394     }
1395 
1396     return &ppc_40x_set_tb_clk;
1397 }
1398 
1399 /*****************************************************************************/
1400 /* Embedded PowerPC Device Control Registers */
1401 typedef struct ppc_dcrn_t ppc_dcrn_t;
1402 struct ppc_dcrn_t {
1403     dcr_read_cb dcr_read;
1404     dcr_write_cb dcr_write;
1405     void *opaque;
1406 };
1407 
1408 /* XXX: on 460, DCR addresses are 32 bits wide,
1409  *      using DCRIPR to get the 22 upper bits of the DCR address
1410  */
1411 #define DCRN_NB 1024
1412 struct ppc_dcr_t {
1413     ppc_dcrn_t dcrn[DCRN_NB];
1414     int (*read_error)(int dcrn);
1415     int (*write_error)(int dcrn);
1416 };
1417 
1418 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1419 {
1420     ppc_dcrn_t *dcr;
1421 
1422     if (dcrn < 0 || dcrn >= DCRN_NB)
1423         goto error;
1424     dcr = &dcr_env->dcrn[dcrn];
1425     if (dcr->dcr_read == NULL)
1426         goto error;
1427     *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1428 
1429     return 0;
1430 
1431  error:
1432     if (dcr_env->read_error != NULL)
1433         return (*dcr_env->read_error)(dcrn);
1434 
1435     return -1;
1436 }
1437 
1438 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1439 {
1440     ppc_dcrn_t *dcr;
1441 
1442     if (dcrn < 0 || dcrn >= DCRN_NB)
1443         goto error;
1444     dcr = &dcr_env->dcrn[dcrn];
1445     if (dcr->dcr_write == NULL)
1446         goto error;
1447     (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1448 
1449     return 0;
1450 
1451  error:
1452     if (dcr_env->write_error != NULL)
1453         return (*dcr_env->write_error)(dcrn);
1454 
1455     return -1;
1456 }
1457 
1458 int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1459                       dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1460 {
1461     ppc_dcr_t *dcr_env;
1462     ppc_dcrn_t *dcr;
1463 
1464     dcr_env = env->dcr_env;
1465     if (dcr_env == NULL)
1466         return -1;
1467     if (dcrn < 0 || dcrn >= DCRN_NB)
1468         return -1;
1469     dcr = &dcr_env->dcrn[dcrn];
1470     if (dcr->opaque != NULL ||
1471         dcr->dcr_read != NULL ||
1472         dcr->dcr_write != NULL)
1473         return -1;
1474     dcr->opaque = opaque;
1475     dcr->dcr_read = dcr_read;
1476     dcr->dcr_write = dcr_write;
1477 
1478     return 0;
1479 }
1480 
1481 int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1482                   int (*write_error)(int dcrn))
1483 {
1484     ppc_dcr_t *dcr_env;
1485 
1486     dcr_env = g_malloc0(sizeof(ppc_dcr_t));
1487     dcr_env->read_error = read_error;
1488     dcr_env->write_error = write_error;
1489     env->dcr_env = dcr_env;
1490 
1491     return 0;
1492 }
1493 
1494 /*****************************************************************************/
1495 
1496 int ppc_cpu_pir(PowerPCCPU *cpu)
1497 {
1498     CPUPPCState *env = &cpu->env;
1499     return env->spr_cb[SPR_PIR].default_value;
1500 }
1501 
1502 PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1503 {
1504     CPUState *cs;
1505 
1506     CPU_FOREACH(cs) {
1507         PowerPCCPU *cpu = POWERPC_CPU(cs);
1508 
1509         if (ppc_cpu_pir(cpu) == pir) {
1510             return cpu;
1511         }
1512     }
1513 
1514     return NULL;
1515 }
1516 
1517 void ppc_irq_reset(PowerPCCPU *cpu)
1518 {
1519     CPUPPCState *env = &cpu->env;
1520 
1521     env->irq_input_state = 0;
1522     kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0);
1523 }
1524