xref: /openbmc/qemu/hw/ppc/ppc.c (revision 8e6fe6b8)
1 /*
2  * QEMU generic PowerPC hardware System Emulator
3  *
4  * Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include "qemu/osdep.h"
25 #include "cpu.h"
26 #include "hw/hw.h"
27 #include "hw/ppc/ppc.h"
28 #include "hw/ppc/ppc_e500.h"
29 #include "qemu/timer.h"
30 #include "sysemu/sysemu.h"
31 #include "sysemu/cpus.h"
32 #include "qemu/log.h"
33 #include "qemu/error-report.h"
34 #include "sysemu/kvm.h"
35 #include "kvm_ppc.h"
36 #include "trace.h"
37 
38 //#define PPC_DEBUG_IRQ
39 //#define PPC_DEBUG_TB
40 
41 #ifdef PPC_DEBUG_IRQ
42 #  define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
43 #else
44 #  define LOG_IRQ(...) do { } while (0)
45 #endif
46 
47 
48 #ifdef PPC_DEBUG_TB
49 #  define LOG_TB(...) qemu_log(__VA_ARGS__)
50 #else
51 #  define LOG_TB(...) do { } while (0)
52 #endif
53 
54 static void cpu_ppc_tb_stop (CPUPPCState *env);
55 static void cpu_ppc_tb_start (CPUPPCState *env);
56 
57 void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
58 {
59     CPUState *cs = CPU(cpu);
60     CPUPPCState *env = &cpu->env;
61     unsigned int old_pending;
62     bool locked = false;
63 
64     /* We may already have the BQL if coming from the reset path */
65     if (!qemu_mutex_iothread_locked()) {
66         locked = true;
67         qemu_mutex_lock_iothread();
68     }
69 
70     old_pending = env->pending_interrupts;
71 
72     if (level) {
73         env->pending_interrupts |= 1 << n_IRQ;
74         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
75     } else {
76         env->pending_interrupts &= ~(1 << n_IRQ);
77         if (env->pending_interrupts == 0) {
78             cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
79         }
80     }
81 
82     if (old_pending != env->pending_interrupts) {
83 #ifdef CONFIG_KVM
84         kvmppc_set_interrupt(cpu, n_IRQ, level);
85 #endif
86     }
87 
88 
89     LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32
90                 "req %08x\n", __func__, env, n_IRQ, level,
91                 env->pending_interrupts, CPU(cpu)->interrupt_request);
92 
93     if (locked) {
94         qemu_mutex_unlock_iothread();
95     }
96 }
97 
98 /* PowerPC 6xx / 7xx internal IRQ controller */
99 static void ppc6xx_set_irq(void *opaque, int pin, int level)
100 {
101     PowerPCCPU *cpu = opaque;
102     CPUPPCState *env = &cpu->env;
103     int cur_level;
104 
105     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
106                 env, pin, level);
107     cur_level = (env->irq_input_state >> pin) & 1;
108     /* Don't generate spurious events */
109     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
110         CPUState *cs = CPU(cpu);
111 
112         switch (pin) {
113         case PPC6xx_INPUT_TBEN:
114             /* Level sensitive - active high */
115             LOG_IRQ("%s: %s the time base\n",
116                         __func__, level ? "start" : "stop");
117             if (level) {
118                 cpu_ppc_tb_start(env);
119             } else {
120                 cpu_ppc_tb_stop(env);
121             }
122         case PPC6xx_INPUT_INT:
123             /* Level sensitive - active high */
124             LOG_IRQ("%s: set the external IRQ state to %d\n",
125                         __func__, level);
126             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
127             break;
128         case PPC6xx_INPUT_SMI:
129             /* Level sensitive - active high */
130             LOG_IRQ("%s: set the SMI IRQ state to %d\n",
131                         __func__, level);
132             ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
133             break;
134         case PPC6xx_INPUT_MCP:
135             /* Negative edge sensitive */
136             /* XXX: TODO: actual reaction may depends on HID0 status
137              *            603/604/740/750: check HID0[EMCP]
138              */
139             if (cur_level == 1 && level == 0) {
140                 LOG_IRQ("%s: raise machine check state\n",
141                             __func__);
142                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
143             }
144             break;
145         case PPC6xx_INPUT_CKSTP_IN:
146             /* Level sensitive - active low */
147             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
148             /* XXX: Note that the only way to restart the CPU is to reset it */
149             if (level) {
150                 LOG_IRQ("%s: stop the CPU\n", __func__);
151                 cs->halted = 1;
152             }
153             break;
154         case PPC6xx_INPUT_HRESET:
155             /* Level sensitive - active low */
156             if (level) {
157                 LOG_IRQ("%s: reset the CPU\n", __func__);
158                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
159             }
160             break;
161         case PPC6xx_INPUT_SRESET:
162             LOG_IRQ("%s: set the RESET IRQ state to %d\n",
163                         __func__, level);
164             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
165             break;
166         default:
167             /* Unknown pin - do nothing */
168             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
169             return;
170         }
171         if (level)
172             env->irq_input_state |= 1 << pin;
173         else
174             env->irq_input_state &= ~(1 << pin);
175     }
176 }
177 
178 void ppc6xx_irq_init(PowerPCCPU *cpu)
179 {
180     CPUPPCState *env = &cpu->env;
181 
182     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu,
183                                                   PPC6xx_INPUT_NB);
184 }
185 
186 #if defined(TARGET_PPC64)
187 /* PowerPC 970 internal IRQ controller */
188 static void ppc970_set_irq(void *opaque, int pin, int level)
189 {
190     PowerPCCPU *cpu = opaque;
191     CPUPPCState *env = &cpu->env;
192     int cur_level;
193 
194     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
195                 env, pin, level);
196     cur_level = (env->irq_input_state >> pin) & 1;
197     /* Don't generate spurious events */
198     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
199         CPUState *cs = CPU(cpu);
200 
201         switch (pin) {
202         case PPC970_INPUT_INT:
203             /* Level sensitive - active high */
204             LOG_IRQ("%s: set the external IRQ state to %d\n",
205                         __func__, level);
206             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
207             break;
208         case PPC970_INPUT_THINT:
209             /* Level sensitive - active high */
210             LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__,
211                         level);
212             ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
213             break;
214         case PPC970_INPUT_MCP:
215             /* Negative edge sensitive */
216             /* XXX: TODO: actual reaction may depends on HID0 status
217              *            603/604/740/750: check HID0[EMCP]
218              */
219             if (cur_level == 1 && level == 0) {
220                 LOG_IRQ("%s: raise machine check state\n",
221                             __func__);
222                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
223             }
224             break;
225         case PPC970_INPUT_CKSTP:
226             /* Level sensitive - active low */
227             /* XXX: TODO: relay the signal to CKSTP_OUT pin */
228             if (level) {
229                 LOG_IRQ("%s: stop the CPU\n", __func__);
230                 cs->halted = 1;
231             } else {
232                 LOG_IRQ("%s: restart the CPU\n", __func__);
233                 cs->halted = 0;
234                 qemu_cpu_kick(cs);
235             }
236             break;
237         case PPC970_INPUT_HRESET:
238             /* Level sensitive - active low */
239             if (level) {
240                 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
241             }
242             break;
243         case PPC970_INPUT_SRESET:
244             LOG_IRQ("%s: set the RESET IRQ state to %d\n",
245                         __func__, level);
246             ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
247             break;
248         case PPC970_INPUT_TBEN:
249             LOG_IRQ("%s: set the TBEN state to %d\n", __func__,
250                         level);
251             /* XXX: TODO */
252             break;
253         default:
254             /* Unknown pin - do nothing */
255             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
256             return;
257         }
258         if (level)
259             env->irq_input_state |= 1 << pin;
260         else
261             env->irq_input_state &= ~(1 << pin);
262     }
263 }
264 
265 void ppc970_irq_init(PowerPCCPU *cpu)
266 {
267     CPUPPCState *env = &cpu->env;
268 
269     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu,
270                                                   PPC970_INPUT_NB);
271 }
272 
273 /* POWER7 internal IRQ controller */
274 static void power7_set_irq(void *opaque, int pin, int level)
275 {
276     PowerPCCPU *cpu = opaque;
277     CPUPPCState *env = &cpu->env;
278 
279     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
280                 env, pin, level);
281 
282     switch (pin) {
283     case POWER7_INPUT_INT:
284         /* Level sensitive - active high */
285         LOG_IRQ("%s: set the external IRQ state to %d\n",
286                 __func__, level);
287         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
288         break;
289     default:
290         /* Unknown pin - do nothing */
291         LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
292         return;
293     }
294     if (level) {
295         env->irq_input_state |= 1 << pin;
296     } else {
297         env->irq_input_state &= ~(1 << pin);
298     }
299 }
300 
301 void ppcPOWER7_irq_init(PowerPCCPU *cpu)
302 {
303     CPUPPCState *env = &cpu->env;
304 
305     env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu,
306                                                   POWER7_INPUT_NB);
307 }
308 
309 /* POWER9 internal IRQ controller */
310 static void power9_set_irq(void *opaque, int pin, int level)
311 {
312     PowerPCCPU *cpu = opaque;
313     CPUPPCState *env = &cpu->env;
314 
315     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
316                 env, pin, level);
317 
318     switch (pin) {
319     case POWER9_INPUT_INT:
320         /* Level sensitive - active high */
321         LOG_IRQ("%s: set the external IRQ state to %d\n",
322                 __func__, level);
323         ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
324         break;
325     case POWER9_INPUT_HINT:
326         /* Level sensitive - active high */
327         LOG_IRQ("%s: set the external IRQ state to %d\n",
328                 __func__, level);
329         ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
330         break;
331     default:
332         /* Unknown pin - do nothing */
333         LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
334         return;
335     }
336     if (level) {
337         env->irq_input_state |= 1 << pin;
338     } else {
339         env->irq_input_state &= ~(1 << pin);
340     }
341 }
342 
343 void ppcPOWER9_irq_init(PowerPCCPU *cpu)
344 {
345     CPUPPCState *env = &cpu->env;
346 
347     env->irq_inputs = (void **)qemu_allocate_irqs(&power9_set_irq, cpu,
348                                                   POWER9_INPUT_NB);
349 }
350 #endif /* defined(TARGET_PPC64) */
351 
352 void ppc40x_core_reset(PowerPCCPU *cpu)
353 {
354     CPUPPCState *env = &cpu->env;
355     target_ulong dbsr;
356 
357     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
358     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
359     dbsr = env->spr[SPR_40x_DBSR];
360     dbsr &= ~0x00000300;
361     dbsr |= 0x00000100;
362     env->spr[SPR_40x_DBSR] = dbsr;
363 }
364 
365 void ppc40x_chip_reset(PowerPCCPU *cpu)
366 {
367     CPUPPCState *env = &cpu->env;
368     target_ulong dbsr;
369 
370     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
371     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
372     /* XXX: TODO reset all internal peripherals */
373     dbsr = env->spr[SPR_40x_DBSR];
374     dbsr &= ~0x00000300;
375     dbsr |= 0x00000200;
376     env->spr[SPR_40x_DBSR] = dbsr;
377 }
378 
379 void ppc40x_system_reset(PowerPCCPU *cpu)
380 {
381     qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
382     qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
383 }
384 
385 void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
386 {
387     PowerPCCPU *cpu = env_archcpu(env);
388 
389     switch ((val >> 28) & 0x3) {
390     case 0x0:
391         /* No action */
392         break;
393     case 0x1:
394         /* Core reset */
395         ppc40x_core_reset(cpu);
396         break;
397     case 0x2:
398         /* Chip reset */
399         ppc40x_chip_reset(cpu);
400         break;
401     case 0x3:
402         /* System reset */
403         ppc40x_system_reset(cpu);
404         break;
405     }
406 }
407 
408 /* PowerPC 40x internal IRQ controller */
409 static void ppc40x_set_irq(void *opaque, int pin, int level)
410 {
411     PowerPCCPU *cpu = opaque;
412     CPUPPCState *env = &cpu->env;
413     int cur_level;
414 
415     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
416                 env, pin, level);
417     cur_level = (env->irq_input_state >> pin) & 1;
418     /* Don't generate spurious events */
419     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
420         CPUState *cs = CPU(cpu);
421 
422         switch (pin) {
423         case PPC40x_INPUT_RESET_SYS:
424             if (level) {
425                 LOG_IRQ("%s: reset the PowerPC system\n",
426                             __func__);
427                 ppc40x_system_reset(cpu);
428             }
429             break;
430         case PPC40x_INPUT_RESET_CHIP:
431             if (level) {
432                 LOG_IRQ("%s: reset the PowerPC chip\n", __func__);
433                 ppc40x_chip_reset(cpu);
434             }
435             break;
436         case PPC40x_INPUT_RESET_CORE:
437             /* XXX: TODO: update DBSR[MRR] */
438             if (level) {
439                 LOG_IRQ("%s: reset the PowerPC core\n", __func__);
440                 ppc40x_core_reset(cpu);
441             }
442             break;
443         case PPC40x_INPUT_CINT:
444             /* Level sensitive - active high */
445             LOG_IRQ("%s: set the critical IRQ state to %d\n",
446                         __func__, level);
447             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
448             break;
449         case PPC40x_INPUT_INT:
450             /* Level sensitive - active high */
451             LOG_IRQ("%s: set the external IRQ state to %d\n",
452                         __func__, level);
453             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
454             break;
455         case PPC40x_INPUT_HALT:
456             /* Level sensitive - active low */
457             if (level) {
458                 LOG_IRQ("%s: stop the CPU\n", __func__);
459                 cs->halted = 1;
460             } else {
461                 LOG_IRQ("%s: restart the CPU\n", __func__);
462                 cs->halted = 0;
463                 qemu_cpu_kick(cs);
464             }
465             break;
466         case PPC40x_INPUT_DEBUG:
467             /* Level sensitive - active high */
468             LOG_IRQ("%s: set the debug pin state to %d\n",
469                         __func__, level);
470             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
471             break;
472         default:
473             /* Unknown pin - do nothing */
474             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
475             return;
476         }
477         if (level)
478             env->irq_input_state |= 1 << pin;
479         else
480             env->irq_input_state &= ~(1 << pin);
481     }
482 }
483 
484 void ppc40x_irq_init(PowerPCCPU *cpu)
485 {
486     CPUPPCState *env = &cpu->env;
487 
488     env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq,
489                                                   cpu, PPC40x_INPUT_NB);
490 }
491 
492 /* PowerPC E500 internal IRQ controller */
493 static void ppce500_set_irq(void *opaque, int pin, int level)
494 {
495     PowerPCCPU *cpu = opaque;
496     CPUPPCState *env = &cpu->env;
497     int cur_level;
498 
499     LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
500                 env, pin, level);
501     cur_level = (env->irq_input_state >> pin) & 1;
502     /* Don't generate spurious events */
503     if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
504         switch (pin) {
505         case PPCE500_INPUT_MCK:
506             if (level) {
507                 LOG_IRQ("%s: reset the PowerPC system\n",
508                             __func__);
509                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
510             }
511             break;
512         case PPCE500_INPUT_RESET_CORE:
513             if (level) {
514                 LOG_IRQ("%s: reset the PowerPC core\n", __func__);
515                 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
516             }
517             break;
518         case PPCE500_INPUT_CINT:
519             /* Level sensitive - active high */
520             LOG_IRQ("%s: set the critical IRQ state to %d\n",
521                         __func__, level);
522             ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
523             break;
524         case PPCE500_INPUT_INT:
525             /* Level sensitive - active high */
526             LOG_IRQ("%s: set the core IRQ state to %d\n",
527                         __func__, level);
528             ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
529             break;
530         case PPCE500_INPUT_DEBUG:
531             /* Level sensitive - active high */
532             LOG_IRQ("%s: set the debug pin state to %d\n",
533                         __func__, level);
534             ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
535             break;
536         default:
537             /* Unknown pin - do nothing */
538             LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
539             return;
540         }
541         if (level)
542             env->irq_input_state |= 1 << pin;
543         else
544             env->irq_input_state &= ~(1 << pin);
545     }
546 }
547 
548 void ppce500_irq_init(PowerPCCPU *cpu)
549 {
550     CPUPPCState *env = &cpu->env;
551 
552     env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq,
553                                                   cpu, PPCE500_INPUT_NB);
554 }
555 
556 /* Enable or Disable the E500 EPR capability */
557 void ppce500_set_mpic_proxy(bool enabled)
558 {
559     CPUState *cs;
560 
561     CPU_FOREACH(cs) {
562         PowerPCCPU *cpu = POWERPC_CPU(cs);
563 
564         cpu->env.mpic_proxy = enabled;
565         if (kvm_enabled()) {
566             kvmppc_set_mpic_proxy(cpu, enabled);
567         }
568     }
569 }
570 
571 /*****************************************************************************/
572 /* PowerPC time base and decrementer emulation */
573 
574 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
575 {
576     /* TB time in tb periods */
577     return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
578 }
579 
580 uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
581 {
582     ppc_tb_t *tb_env = env->tb_env;
583     uint64_t tb;
584 
585     if (kvm_enabled()) {
586         return env->spr[SPR_TBL];
587     }
588 
589     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
590     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
591 
592     return tb;
593 }
594 
595 static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
596 {
597     ppc_tb_t *tb_env = env->tb_env;
598     uint64_t tb;
599 
600     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
601     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
602 
603     return tb >> 32;
604 }
605 
606 uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
607 {
608     if (kvm_enabled()) {
609         return env->spr[SPR_TBU];
610     }
611 
612     return _cpu_ppc_load_tbu(env);
613 }
614 
615 static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
616                                     int64_t *tb_offsetp, uint64_t value)
617 {
618     *tb_offsetp = value -
619         muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
620 
621     LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n",
622                 __func__, value, *tb_offsetp);
623 }
624 
625 void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
626 {
627     ppc_tb_t *tb_env = env->tb_env;
628     uint64_t tb;
629 
630     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
631     tb &= 0xFFFFFFFF00000000ULL;
632     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
633                      &tb_env->tb_offset, tb | (uint64_t)value);
634 }
635 
636 static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
637 {
638     ppc_tb_t *tb_env = env->tb_env;
639     uint64_t tb;
640 
641     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
642     tb &= 0x00000000FFFFFFFFULL;
643     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
644                      &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
645 }
646 
647 void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
648 {
649     _cpu_ppc_store_tbu(env, value);
650 }
651 
652 uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
653 {
654     ppc_tb_t *tb_env = env->tb_env;
655     uint64_t tb;
656 
657     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
658     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
659 
660     return tb;
661 }
662 
663 uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
664 {
665     ppc_tb_t *tb_env = env->tb_env;
666     uint64_t tb;
667 
668     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
669     LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
670 
671     return tb >> 32;
672 }
673 
674 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
675 {
676     ppc_tb_t *tb_env = env->tb_env;
677     uint64_t tb;
678 
679     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
680     tb &= 0xFFFFFFFF00000000ULL;
681     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
682                      &tb_env->atb_offset, tb | (uint64_t)value);
683 }
684 
685 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
686 {
687     ppc_tb_t *tb_env = env->tb_env;
688     uint64_t tb;
689 
690     tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
691     tb &= 0x00000000FFFFFFFFULL;
692     cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
693                      &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
694 }
695 
696 static void cpu_ppc_tb_stop (CPUPPCState *env)
697 {
698     ppc_tb_t *tb_env = env->tb_env;
699     uint64_t tb, atb, vmclk;
700 
701     /* If the time base is already frozen, do nothing */
702     if (tb_env->tb_freq != 0) {
703         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
704         /* Get the time base */
705         tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
706         /* Get the alternate time base */
707         atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
708         /* Store the time base value (ie compute the current offset) */
709         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
710         /* Store the alternate time base value (compute the current offset) */
711         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
712         /* Set the time base frequency to zero */
713         tb_env->tb_freq = 0;
714         /* Now, the time bases are frozen to tb_offset / atb_offset value */
715     }
716 }
717 
718 static void cpu_ppc_tb_start (CPUPPCState *env)
719 {
720     ppc_tb_t *tb_env = env->tb_env;
721     uint64_t tb, atb, vmclk;
722 
723     /* If the time base is not frozen, do nothing */
724     if (tb_env->tb_freq == 0) {
725         vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
726         /* Get the time base from tb_offset */
727         tb = tb_env->tb_offset;
728         /* Get the alternate time base from atb_offset */
729         atb = tb_env->atb_offset;
730         /* Restore the tb frequency from the decrementer frequency */
731         tb_env->tb_freq = tb_env->decr_freq;
732         /* Store the time base value */
733         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
734         /* Store the alternate time base value */
735         cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
736     }
737 }
738 
739 bool ppc_decr_clear_on_delivery(CPUPPCState *env)
740 {
741     ppc_tb_t *tb_env = env->tb_env;
742     int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
743     return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
744 }
745 
746 static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
747 {
748     ppc_tb_t *tb_env = env->tb_env;
749     int64_t decr, diff;
750 
751     diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
752     if (diff >= 0) {
753         decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
754     } else if (tb_env->flags & PPC_TIMER_BOOKE) {
755         decr = 0;
756     }  else {
757         decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
758     }
759     LOG_TB("%s: %016" PRIx64 "\n", __func__, decr);
760 
761     return decr;
762 }
763 
764 target_ulong cpu_ppc_load_decr(CPUPPCState *env)
765 {
766     ppc_tb_t *tb_env = env->tb_env;
767     uint64_t decr;
768 
769     if (kvm_enabled()) {
770         return env->spr[SPR_DECR];
771     }
772 
773     decr = _cpu_ppc_load_decr(env, tb_env->decr_next);
774 
775     /*
776      * If large decrementer is enabled then the decrementer is signed extened
777      * to 64 bits, otherwise it is a 32 bit value.
778      */
779     if (env->spr[SPR_LPCR] & LPCR_LD) {
780         return decr;
781     }
782     return (uint32_t) decr;
783 }
784 
785 target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
786 {
787     PowerPCCPU *cpu = env_archcpu(env);
788     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
789     ppc_tb_t *tb_env = env->tb_env;
790     uint64_t hdecr;
791 
792     hdecr =  _cpu_ppc_load_decr(env, tb_env->hdecr_next);
793 
794     /*
795      * If we have a large decrementer (POWER9 or later) then hdecr is sign
796      * extended to 64 bits, otherwise it is 32 bits.
797      */
798     if (pcc->lrg_decr_bits > 32) {
799         return hdecr;
800     }
801     return (uint32_t) hdecr;
802 }
803 
804 uint64_t cpu_ppc_load_purr (CPUPPCState *env)
805 {
806     ppc_tb_t *tb_env = env->tb_env;
807     uint64_t diff;
808 
809     diff = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - tb_env->purr_start;
810 
811     return tb_env->purr_load +
812         muldiv64(diff, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
813 }
814 
815 /* When decrementer expires,
816  * all we need to do is generate or queue a CPU exception
817  */
818 static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
819 {
820     /* Raise it */
821     LOG_TB("raise decrementer exception\n");
822     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
823 }
824 
825 static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
826 {
827     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
828 }
829 
830 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
831 {
832     CPUPPCState *env = &cpu->env;
833 
834     /* Raise it */
835     LOG_TB("raise hv decrementer exception\n");
836 
837     /* The architecture specifies that we don't deliver HDEC
838      * interrupts in a PM state. Not only they don't cause a
839      * wakeup but they also get effectively discarded.
840      */
841     if (!env->resume_as_sreset) {
842         ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
843     }
844 }
845 
846 static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
847 {
848     ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
849 }
850 
851 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
852                                  QEMUTimer *timer,
853                                  void (*raise_excp)(void *),
854                                  void (*lower_excp)(PowerPCCPU *),
855                                  target_ulong decr, target_ulong value,
856                                  int nr_bits)
857 {
858     CPUPPCState *env = &cpu->env;
859     ppc_tb_t *tb_env = env->tb_env;
860     uint64_t now, next;
861     bool negative;
862 
863     /* Truncate value to decr_width and sign extend for simplicity */
864     value &= ((1ULL << nr_bits) - 1);
865     negative = !!(value & (1ULL << (nr_bits - 1)));
866     if (negative) {
867         value |= (0xFFFFFFFFULL << nr_bits);
868     }
869 
870     LOG_TB("%s: " TARGET_FMT_lx " => " TARGET_FMT_lx "\n", __func__,
871                 decr, value);
872 
873     if (kvm_enabled()) {
874         /* KVM handles decrementer exceptions, we don't need our own timer */
875         return;
876     }
877 
878     /*
879      * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
880      * interrupt.
881      *
882      * If we get a really small DEC value, we can assume that by the time we
883      * handled it we should inject an interrupt already.
884      *
885      * On MSB level based DEC implementations the MSB always means the interrupt
886      * is pending, so raise it on those.
887      *
888      * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
889      * an edge interrupt, so raise it here too.
890      */
891     if ((value < 3) ||
892         ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && negative) ||
893         ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && negative
894           && !(decr & (1ULL << (nr_bits - 1))))) {
895         (*raise_excp)(cpu);
896         return;
897     }
898 
899     /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
900     if (!negative && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
901         (*lower_excp)(cpu);
902     }
903 
904     /* Calculate the next timer event */
905     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
906     next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
907     *nextp = next;
908 
909     /* Adjust timer */
910     timer_mod(timer, next);
911 }
912 
913 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr,
914                                        target_ulong value, int nr_bits)
915 {
916     ppc_tb_t *tb_env = cpu->env.tb_env;
917 
918     __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
919                          tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
920                          value, nr_bits);
921 }
922 
923 void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
924 {
925     PowerPCCPU *cpu = env_archcpu(env);
926     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
927     int nr_bits = 32;
928 
929     if (env->spr[SPR_LPCR] & LPCR_LD) {
930         nr_bits = pcc->lrg_decr_bits;
931     }
932 
933     _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits);
934 }
935 
936 static void cpu_ppc_decr_cb(void *opaque)
937 {
938     PowerPCCPU *cpu = opaque;
939 
940     cpu_ppc_decr_excp(cpu);
941 }
942 
943 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr,
944                                         target_ulong value, int nr_bits)
945 {
946     ppc_tb_t *tb_env = cpu->env.tb_env;
947 
948     if (tb_env->hdecr_timer != NULL) {
949         __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
950                              tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
951                              hdecr, value, nr_bits);
952     }
953 }
954 
955 void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
956 {
957     PowerPCCPU *cpu = env_archcpu(env);
958     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
959 
960     _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value,
961                          pcc->lrg_decr_bits);
962 }
963 
964 static void cpu_ppc_hdecr_cb(void *opaque)
965 {
966     PowerPCCPU *cpu = opaque;
967 
968     cpu_ppc_hdecr_excp(cpu);
969 }
970 
971 static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value)
972 {
973     ppc_tb_t *tb_env = cpu->env.tb_env;
974 
975     tb_env->purr_load = value;
976     tb_env->purr_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
977 }
978 
979 static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
980 {
981     CPUPPCState *env = opaque;
982     PowerPCCPU *cpu = env_archcpu(env);
983     ppc_tb_t *tb_env = env->tb_env;
984 
985     tb_env->tb_freq = freq;
986     tb_env->decr_freq = freq;
987     /* There is a bug in Linux 2.4 kernels:
988      * if a decrementer exception is pending when it enables msr_ee at startup,
989      * it's not ready to handle it...
990      */
991     _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
992     _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
993     cpu_ppc_store_purr(cpu, 0x0000000000000000ULL);
994 }
995 
996 static void timebase_save(PPCTimebase *tb)
997 {
998     uint64_t ticks = cpu_get_host_ticks();
999     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1000 
1001     if (!first_ppc_cpu->env.tb_env) {
1002         error_report("No timebase object");
1003         return;
1004     }
1005 
1006     /* not used anymore, we keep it for compatibility */
1007     tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
1008     /*
1009      * tb_offset is only expected to be changed by QEMU so
1010      * there is no need to update it from KVM here
1011      */
1012     tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
1013 }
1014 
1015 static void timebase_load(PPCTimebase *tb)
1016 {
1017     CPUState *cpu;
1018     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1019     int64_t tb_off_adj, tb_off;
1020     unsigned long freq;
1021 
1022     if (!first_ppc_cpu->env.tb_env) {
1023         error_report("No timebase object");
1024         return;
1025     }
1026 
1027     freq = first_ppc_cpu->env.tb_env->tb_freq;
1028 
1029     tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
1030 
1031     tb_off = first_ppc_cpu->env.tb_env->tb_offset;
1032     trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
1033                         (tb_off_adj - tb_off) / freq);
1034 
1035     /* Set new offset to all CPUs */
1036     CPU_FOREACH(cpu) {
1037         PowerPCCPU *pcpu = POWERPC_CPU(cpu);
1038         pcpu->env.tb_env->tb_offset = tb_off_adj;
1039 #if defined(CONFIG_KVM)
1040         kvm_set_one_reg(cpu, KVM_REG_PPC_TB_OFFSET,
1041                         &pcpu->env.tb_env->tb_offset);
1042 #endif
1043     }
1044 }
1045 
1046 void cpu_ppc_clock_vm_state_change(void *opaque, int running,
1047                                    RunState state)
1048 {
1049     PPCTimebase *tb = opaque;
1050 
1051     if (running) {
1052         timebase_load(tb);
1053     } else {
1054         timebase_save(tb);
1055     }
1056 }
1057 
1058 /*
1059  * When migrating, read the clock just before migration,
1060  * so that the guest clock counts during the events
1061  * between:
1062  *
1063  *  * vm_stop()
1064  *  *
1065  *  * pre_save()
1066  *
1067  *  This reduces clock difference on migration from 5s
1068  *  to 0.1s (when max_downtime == 5s), because sending the
1069  *  final pages of memory (which happens between vm_stop()
1070  *  and pre_save()) takes max_downtime.
1071  */
1072 static int timebase_pre_save(void *opaque)
1073 {
1074     PPCTimebase *tb = opaque;
1075 
1076     timebase_save(tb);
1077 
1078     return 0;
1079 }
1080 
1081 const VMStateDescription vmstate_ppc_timebase = {
1082     .name = "timebase",
1083     .version_id = 1,
1084     .minimum_version_id = 1,
1085     .minimum_version_id_old = 1,
1086     .pre_save = timebase_pre_save,
1087     .fields      = (VMStateField []) {
1088         VMSTATE_UINT64(guest_timebase, PPCTimebase),
1089         VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1090         VMSTATE_END_OF_LIST()
1091     },
1092 };
1093 
1094 /* Set up (once) timebase frequency (in Hz) */
1095 clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
1096 {
1097     PowerPCCPU *cpu = env_archcpu(env);
1098     ppc_tb_t *tb_env;
1099 
1100     tb_env = g_malloc0(sizeof(ppc_tb_t));
1101     env->tb_env = tb_env;
1102     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1103     if (is_book3s_arch2x(env)) {
1104         /* All Book3S 64bit CPUs implement level based DEC logic */
1105         tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1106     }
1107     /* Create new timer */
1108     tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
1109     if (env->has_hv_mode) {
1110         tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
1111                                                 cpu);
1112     } else {
1113         tb_env->hdecr_timer = NULL;
1114     }
1115     cpu_ppc_set_tb_clk(env, freq);
1116 
1117     return &cpu_ppc_set_tb_clk;
1118 }
1119 
1120 /* Specific helpers for POWER & PowerPC 601 RTC */
1121 void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value)
1122 {
1123     _cpu_ppc_store_tbu(env, value);
1124 }
1125 
1126 uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env)
1127 {
1128     return _cpu_ppc_load_tbu(env);
1129 }
1130 
1131 void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value)
1132 {
1133     cpu_ppc_store_tbl(env, value & 0x3FFFFF80);
1134 }
1135 
1136 uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env)
1137 {
1138     return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
1139 }
1140 
1141 /*****************************************************************************/
1142 /* PowerPC 40x timers */
1143 
1144 /* PIT, FIT & WDT */
1145 typedef struct ppc40x_timer_t ppc40x_timer_t;
1146 struct ppc40x_timer_t {
1147     uint64_t pit_reload;  /* PIT auto-reload value        */
1148     uint64_t fit_next;    /* Tick for next FIT interrupt  */
1149     QEMUTimer *fit_timer;
1150     uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1151     QEMUTimer *wdt_timer;
1152 
1153     /* 405 have the PIT, 440 have a DECR.  */
1154     unsigned int decr_excp;
1155 };
1156 
1157 /* Fixed interval timer */
1158 static void cpu_4xx_fit_cb (void *opaque)
1159 {
1160     PowerPCCPU *cpu;
1161     CPUPPCState *env;
1162     ppc_tb_t *tb_env;
1163     ppc40x_timer_t *ppc40x_timer;
1164     uint64_t now, next;
1165 
1166     env = opaque;
1167     cpu = env_archcpu(env);
1168     tb_env = env->tb_env;
1169     ppc40x_timer = tb_env->opaque;
1170     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1171     switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1172     case 0:
1173         next = 1 << 9;
1174         break;
1175     case 1:
1176         next = 1 << 13;
1177         break;
1178     case 2:
1179         next = 1 << 17;
1180         break;
1181     case 3:
1182         next = 1 << 21;
1183         break;
1184     default:
1185         /* Cannot occur, but makes gcc happy */
1186         return;
1187     }
1188     next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
1189     if (next == now)
1190         next++;
1191     timer_mod(ppc40x_timer->fit_timer, next);
1192     env->spr[SPR_40x_TSR] |= 1 << 26;
1193     if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1194         ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1195     }
1196     LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1197            (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1198            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1199 }
1200 
1201 /* Programmable interval timer */
1202 static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1203 {
1204     ppc40x_timer_t *ppc40x_timer;
1205     uint64_t now, next;
1206 
1207     ppc40x_timer = tb_env->opaque;
1208     if (ppc40x_timer->pit_reload <= 1 ||
1209         !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1210         (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1211         /* Stop PIT */
1212         LOG_TB("%s: stop PIT\n", __func__);
1213         timer_del(tb_env->decr_timer);
1214     } else {
1215         LOG_TB("%s: start PIT %016" PRIx64 "\n",
1216                     __func__, ppc40x_timer->pit_reload);
1217         now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1218         next = now + muldiv64(ppc40x_timer->pit_reload,
1219                               NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1220         if (is_excp)
1221             next += tb_env->decr_next - now;
1222         if (next == now)
1223             next++;
1224         timer_mod(tb_env->decr_timer, next);
1225         tb_env->decr_next = next;
1226     }
1227 }
1228 
1229 static void cpu_4xx_pit_cb (void *opaque)
1230 {
1231     PowerPCCPU *cpu;
1232     CPUPPCState *env;
1233     ppc_tb_t *tb_env;
1234     ppc40x_timer_t *ppc40x_timer;
1235 
1236     env = opaque;
1237     cpu = env_archcpu(env);
1238     tb_env = env->tb_env;
1239     ppc40x_timer = tb_env->opaque;
1240     env->spr[SPR_40x_TSR] |= 1 << 27;
1241     if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1242         ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1243     }
1244     start_stop_pit(env, tb_env, 1);
1245     LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " "
1246            "%016" PRIx64 "\n", __func__,
1247            (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1248            (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1249            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1250            ppc40x_timer->pit_reload);
1251 }
1252 
1253 /* Watchdog timer */
1254 static void cpu_4xx_wdt_cb (void *opaque)
1255 {
1256     PowerPCCPU *cpu;
1257     CPUPPCState *env;
1258     ppc_tb_t *tb_env;
1259     ppc40x_timer_t *ppc40x_timer;
1260     uint64_t now, next;
1261 
1262     env = opaque;
1263     cpu = env_archcpu(env);
1264     tb_env = env->tb_env;
1265     ppc40x_timer = tb_env->opaque;
1266     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1267     switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1268     case 0:
1269         next = 1 << 17;
1270         break;
1271     case 1:
1272         next = 1 << 21;
1273         break;
1274     case 2:
1275         next = 1 << 25;
1276         break;
1277     case 3:
1278         next = 1 << 29;
1279         break;
1280     default:
1281         /* Cannot occur, but makes gcc happy */
1282         return;
1283     }
1284     next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1285     if (next == now)
1286         next++;
1287     LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1288            env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1289     switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1290     case 0x0:
1291     case 0x1:
1292         timer_mod(ppc40x_timer->wdt_timer, next);
1293         ppc40x_timer->wdt_next = next;
1294         env->spr[SPR_40x_TSR] |= 1U << 31;
1295         break;
1296     case 0x2:
1297         timer_mod(ppc40x_timer->wdt_timer, next);
1298         ppc40x_timer->wdt_next = next;
1299         env->spr[SPR_40x_TSR] |= 1 << 30;
1300         if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1301             ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1302         }
1303         break;
1304     case 0x3:
1305         env->spr[SPR_40x_TSR] &= ~0x30000000;
1306         env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1307         switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1308         case 0x0:
1309             /* No reset */
1310             break;
1311         case 0x1: /* Core reset */
1312             ppc40x_core_reset(cpu);
1313             break;
1314         case 0x2: /* Chip reset */
1315             ppc40x_chip_reset(cpu);
1316             break;
1317         case 0x3: /* System reset */
1318             ppc40x_system_reset(cpu);
1319             break;
1320         }
1321     }
1322 }
1323 
1324 void store_40x_pit (CPUPPCState *env, target_ulong val)
1325 {
1326     ppc_tb_t *tb_env;
1327     ppc40x_timer_t *ppc40x_timer;
1328 
1329     tb_env = env->tb_env;
1330     ppc40x_timer = tb_env->opaque;
1331     LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val);
1332     ppc40x_timer->pit_reload = val;
1333     start_stop_pit(env, tb_env, 0);
1334 }
1335 
1336 target_ulong load_40x_pit (CPUPPCState *env)
1337 {
1338     return cpu_ppc_load_decr(env);
1339 }
1340 
1341 static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1342 {
1343     CPUPPCState *env = opaque;
1344     ppc_tb_t *tb_env = env->tb_env;
1345 
1346     LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__,
1347                 freq);
1348     tb_env->tb_freq = freq;
1349     tb_env->decr_freq = freq;
1350     /* XXX: we should also update all timers */
1351 }
1352 
1353 clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1354                                   unsigned int decr_excp)
1355 {
1356     ppc_tb_t *tb_env;
1357     ppc40x_timer_t *ppc40x_timer;
1358 
1359     tb_env = g_malloc0(sizeof(ppc_tb_t));
1360     env->tb_env = tb_env;
1361     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1362     ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t));
1363     tb_env->tb_freq = freq;
1364     tb_env->decr_freq = freq;
1365     tb_env->opaque = ppc40x_timer;
1366     LOG_TB("%s freq %" PRIu32 "\n", __func__, freq);
1367     if (ppc40x_timer != NULL) {
1368         /* We use decr timer for PIT */
1369         tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env);
1370         ppc40x_timer->fit_timer =
1371             timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env);
1372         ppc40x_timer->wdt_timer =
1373             timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env);
1374         ppc40x_timer->decr_excp = decr_excp;
1375     }
1376 
1377     return &ppc_40x_set_tb_clk;
1378 }
1379 
1380 /*****************************************************************************/
1381 /* Embedded PowerPC Device Control Registers */
1382 typedef struct ppc_dcrn_t ppc_dcrn_t;
1383 struct ppc_dcrn_t {
1384     dcr_read_cb dcr_read;
1385     dcr_write_cb dcr_write;
1386     void *opaque;
1387 };
1388 
1389 /* XXX: on 460, DCR addresses are 32 bits wide,
1390  *      using DCRIPR to get the 22 upper bits of the DCR address
1391  */
1392 #define DCRN_NB 1024
1393 struct ppc_dcr_t {
1394     ppc_dcrn_t dcrn[DCRN_NB];
1395     int (*read_error)(int dcrn);
1396     int (*write_error)(int dcrn);
1397 };
1398 
1399 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1400 {
1401     ppc_dcrn_t *dcr;
1402 
1403     if (dcrn < 0 || dcrn >= DCRN_NB)
1404         goto error;
1405     dcr = &dcr_env->dcrn[dcrn];
1406     if (dcr->dcr_read == NULL)
1407         goto error;
1408     *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1409 
1410     return 0;
1411 
1412  error:
1413     if (dcr_env->read_error != NULL)
1414         return (*dcr_env->read_error)(dcrn);
1415 
1416     return -1;
1417 }
1418 
1419 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1420 {
1421     ppc_dcrn_t *dcr;
1422 
1423     if (dcrn < 0 || dcrn >= DCRN_NB)
1424         goto error;
1425     dcr = &dcr_env->dcrn[dcrn];
1426     if (dcr->dcr_write == NULL)
1427         goto error;
1428     (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1429 
1430     return 0;
1431 
1432  error:
1433     if (dcr_env->write_error != NULL)
1434         return (*dcr_env->write_error)(dcrn);
1435 
1436     return -1;
1437 }
1438 
1439 int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1440                       dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1441 {
1442     ppc_dcr_t *dcr_env;
1443     ppc_dcrn_t *dcr;
1444 
1445     dcr_env = env->dcr_env;
1446     if (dcr_env == NULL)
1447         return -1;
1448     if (dcrn < 0 || dcrn >= DCRN_NB)
1449         return -1;
1450     dcr = &dcr_env->dcrn[dcrn];
1451     if (dcr->opaque != NULL ||
1452         dcr->dcr_read != NULL ||
1453         dcr->dcr_write != NULL)
1454         return -1;
1455     dcr->opaque = opaque;
1456     dcr->dcr_read = dcr_read;
1457     dcr->dcr_write = dcr_write;
1458 
1459     return 0;
1460 }
1461 
1462 int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1463                   int (*write_error)(int dcrn))
1464 {
1465     ppc_dcr_t *dcr_env;
1466 
1467     dcr_env = g_malloc0(sizeof(ppc_dcr_t));
1468     dcr_env->read_error = read_error;
1469     dcr_env->write_error = write_error;
1470     env->dcr_env = dcr_env;
1471 
1472     return 0;
1473 }
1474 
1475 /*****************************************************************************/
1476 /* Debug port */
1477 void PPC_debug_write (void *opaque, uint32_t addr, uint32_t val)
1478 {
1479     addr &= 0xF;
1480     switch (addr) {
1481     case 0:
1482         printf("%c", val);
1483         break;
1484     case 1:
1485         printf("\n");
1486         fflush(stdout);
1487         break;
1488     case 2:
1489         printf("Set loglevel to %04" PRIx32 "\n", val);
1490         qemu_set_log(val | 0x100);
1491         break;
1492     }
1493 }
1494 
1495 PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1496 {
1497     CPUState *cs;
1498 
1499     CPU_FOREACH(cs) {
1500         PowerPCCPU *cpu = POWERPC_CPU(cs);
1501         CPUPPCState *env = &cpu->env;
1502 
1503         if (env->spr_cb[SPR_PIR].default_value == pir) {
1504             return cpu;
1505         }
1506     }
1507 
1508     return NULL;
1509 }
1510