xref: /openbmc/qemu/hw/ppc/ppc.c (revision 9b4b4e510bcb8b1c3c4789615dce3b520aa1f1d3)
1  /*
2   * QEMU generic PowerPC hardware System Emulator
3   *
4   * Copyright (c) 2003-2007 Jocelyn Mayer
5   *
6   * Permission is hereby granted, free of charge, to any person obtaining a copy
7   * of this software and associated documentation files (the "Software"), to deal
8   * in the Software without restriction, including without limitation the rights
9   * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10   * copies of the Software, and to permit persons to whom the Software is
11   * furnished to do so, subject to the following conditions:
12   *
13   * The above copyright notice and this permission notice shall be included in
14   * all copies or substantial portions of the Software.
15   *
16   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19   * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21   * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22   * THE SOFTWARE.
23   */
24  
25  #include "qemu/osdep.h"
26  #include "hw/irq.h"
27  #include "hw/ppc/ppc.h"
28  #include "hw/ppc/ppc_e500.h"
29  #include "qemu/timer.h"
30  #include "sysemu/cpus.h"
31  #include "qemu/log.h"
32  #include "qemu/main-loop.h"
33  #include "qemu/error-report.h"
34  #include "sysemu/kvm.h"
35  #include "sysemu/replay.h"
36  #include "sysemu/runstate.h"
37  #include "kvm_ppc.h"
38  #include "migration/vmstate.h"
39  #include "trace.h"
40  
41  static void cpu_ppc_tb_stop (CPUPPCState *env);
42  static void cpu_ppc_tb_start (CPUPPCState *env);
43  
44  void ppc_set_irq(PowerPCCPU *cpu, int irq, int level)
45  {
46      CPUPPCState *env = &cpu->env;
47      unsigned int old_pending;
48  
49      /* We may already have the BQL if coming from the reset path */
50      QEMU_IOTHREAD_LOCK_GUARD();
51  
52      old_pending = env->pending_interrupts;
53  
54      if (level) {
55          env->pending_interrupts |= irq;
56      } else {
57          env->pending_interrupts &= ~irq;
58      }
59  
60      if (old_pending != env->pending_interrupts) {
61          ppc_maybe_interrupt(env);
62          if (kvm_enabled()) {
63              kvmppc_set_interrupt(cpu, irq, level);
64          }
65      }
66  
67      trace_ppc_irq_set_exit(env, irq, level, env->pending_interrupts,
68                             CPU(cpu)->interrupt_request);
69  }
70  
71  /* PowerPC 6xx / 7xx internal IRQ controller */
72  static void ppc6xx_set_irq(void *opaque, int pin, int level)
73  {
74      PowerPCCPU *cpu = opaque;
75      CPUPPCState *env = &cpu->env;
76      int cur_level;
77  
78      trace_ppc_irq_set(env, pin, level);
79  
80      cur_level = (env->irq_input_state >> pin) & 1;
81      /* Don't generate spurious events */
82      if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
83          CPUState *cs = CPU(cpu);
84  
85          switch (pin) {
86          case PPC6xx_INPUT_TBEN:
87              /* Level sensitive - active high */
88              trace_ppc_irq_set_state("time base", level);
89              if (level) {
90                  cpu_ppc_tb_start(env);
91              } else {
92                  cpu_ppc_tb_stop(env);
93              }
94              break;
95          case PPC6xx_INPUT_INT:
96              /* Level sensitive - active high */
97              trace_ppc_irq_set_state("external IRQ", level);
98              ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
99              break;
100          case PPC6xx_INPUT_SMI:
101              /* Level sensitive - active high */
102              trace_ppc_irq_set_state("SMI IRQ", level);
103              ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
104              break;
105          case PPC6xx_INPUT_MCP:
106              /* Negative edge sensitive */
107              /* XXX: TODO: actual reaction may depends on HID0 status
108               *            603/604/740/750: check HID0[EMCP]
109               */
110              if (cur_level == 1 && level == 0) {
111                  trace_ppc_irq_set_state("machine check", 1);
112                  ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
113              }
114              break;
115          case PPC6xx_INPUT_CKSTP_IN:
116              /* Level sensitive - active low */
117              /* XXX: TODO: relay the signal to CKSTP_OUT pin */
118              /* XXX: Note that the only way to restart the CPU is to reset it */
119              if (level) {
120                  trace_ppc_irq_cpu("stop");
121                  cs->halted = 1;
122              }
123              break;
124          case PPC6xx_INPUT_HRESET:
125              /* Level sensitive - active low */
126              if (level) {
127                  trace_ppc_irq_reset("CPU");
128                  cpu_interrupt(cs, CPU_INTERRUPT_RESET);
129              }
130              break;
131          case PPC6xx_INPUT_SRESET:
132              trace_ppc_irq_set_state("RESET IRQ", level);
133              ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
134              break;
135          default:
136              g_assert_not_reached();
137          }
138          if (level)
139              env->irq_input_state |= 1 << pin;
140          else
141              env->irq_input_state &= ~(1 << pin);
142      }
143  }
144  
145  void ppc6xx_irq_init(PowerPCCPU *cpu)
146  {
147      qdev_init_gpio_in(DEVICE(cpu), ppc6xx_set_irq, PPC6xx_INPUT_NB);
148  }
149  
150  #if defined(TARGET_PPC64)
151  /* PowerPC 970 internal IRQ controller */
152  static void ppc970_set_irq(void *opaque, int pin, int level)
153  {
154      PowerPCCPU *cpu = opaque;
155      CPUPPCState *env = &cpu->env;
156      int cur_level;
157  
158      trace_ppc_irq_set(env, pin, level);
159  
160      cur_level = (env->irq_input_state >> pin) & 1;
161      /* Don't generate spurious events */
162      if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
163          CPUState *cs = CPU(cpu);
164  
165          switch (pin) {
166          case PPC970_INPUT_INT:
167              /* Level sensitive - active high */
168              trace_ppc_irq_set_state("external IRQ", level);
169              ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
170              break;
171          case PPC970_INPUT_THINT:
172              /* Level sensitive - active high */
173              trace_ppc_irq_set_state("SMI IRQ", level);
174              ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
175              break;
176          case PPC970_INPUT_MCP:
177              /* Negative edge sensitive */
178              /* XXX: TODO: actual reaction may depends on HID0 status
179               *            603/604/740/750: check HID0[EMCP]
180               */
181              if (cur_level == 1 && level == 0) {
182                  trace_ppc_irq_set_state("machine check", 1);
183                  ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
184              }
185              break;
186          case PPC970_INPUT_CKSTP:
187              /* Level sensitive - active low */
188              /* XXX: TODO: relay the signal to CKSTP_OUT pin */
189              if (level) {
190                  trace_ppc_irq_cpu("stop");
191                  cs->halted = 1;
192              } else {
193                  trace_ppc_irq_cpu("restart");
194                  cs->halted = 0;
195                  qemu_cpu_kick(cs);
196              }
197              break;
198          case PPC970_INPUT_HRESET:
199              /* Level sensitive - active low */
200              if (level) {
201                  cpu_interrupt(cs, CPU_INTERRUPT_RESET);
202              }
203              break;
204          case PPC970_INPUT_SRESET:
205              trace_ppc_irq_set_state("RESET IRQ", level);
206              ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
207              break;
208          case PPC970_INPUT_TBEN:
209              trace_ppc_irq_set_state("TBEN IRQ", level);
210              /* XXX: TODO */
211              break;
212          default:
213              g_assert_not_reached();
214          }
215          if (level)
216              env->irq_input_state |= 1 << pin;
217          else
218              env->irq_input_state &= ~(1 << pin);
219      }
220  }
221  
222  void ppc970_irq_init(PowerPCCPU *cpu)
223  {
224      qdev_init_gpio_in(DEVICE(cpu), ppc970_set_irq, PPC970_INPUT_NB);
225  }
226  
227  /* POWER7 internal IRQ controller */
228  static void power7_set_irq(void *opaque, int pin, int level)
229  {
230      PowerPCCPU *cpu = opaque;
231  
232      trace_ppc_irq_set(&cpu->env, pin, level);
233  
234      switch (pin) {
235      case POWER7_INPUT_INT:
236          /* Level sensitive - active high */
237          trace_ppc_irq_set_state("external IRQ", level);
238          ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
239          break;
240      default:
241          g_assert_not_reached();
242      }
243  }
244  
245  void ppcPOWER7_irq_init(PowerPCCPU *cpu)
246  {
247      qdev_init_gpio_in(DEVICE(cpu), power7_set_irq, POWER7_INPUT_NB);
248  }
249  
250  /* POWER9 internal IRQ controller */
251  static void power9_set_irq(void *opaque, int pin, int level)
252  {
253      PowerPCCPU *cpu = opaque;
254  
255      trace_ppc_irq_set(&cpu->env, pin, level);
256  
257      switch (pin) {
258      case POWER9_INPUT_INT:
259          /* Level sensitive - active high */
260          trace_ppc_irq_set_state("external IRQ", level);
261          ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
262          break;
263      case POWER9_INPUT_HINT:
264          /* Level sensitive - active high */
265          trace_ppc_irq_set_state("HV external IRQ", level);
266          ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
267          break;
268      default:
269          g_assert_not_reached();
270          return;
271      }
272  }
273  
274  void ppcPOWER9_irq_init(PowerPCCPU *cpu)
275  {
276      qdev_init_gpio_in(DEVICE(cpu), power9_set_irq, POWER9_INPUT_NB);
277  }
278  #endif /* defined(TARGET_PPC64) */
279  
280  void ppc40x_core_reset(PowerPCCPU *cpu)
281  {
282      CPUPPCState *env = &cpu->env;
283      target_ulong dbsr;
284  
285      qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
286      cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
287      dbsr = env->spr[SPR_40x_DBSR];
288      dbsr &= ~0x00000300;
289      dbsr |= 0x00000100;
290      env->spr[SPR_40x_DBSR] = dbsr;
291  }
292  
293  void ppc40x_chip_reset(PowerPCCPU *cpu)
294  {
295      CPUPPCState *env = &cpu->env;
296      target_ulong dbsr;
297  
298      qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
299      cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
300      /* XXX: TODO reset all internal peripherals */
301      dbsr = env->spr[SPR_40x_DBSR];
302      dbsr &= ~0x00000300;
303      dbsr |= 0x00000200;
304      env->spr[SPR_40x_DBSR] = dbsr;
305  }
306  
307  void ppc40x_system_reset(PowerPCCPU *cpu)
308  {
309      qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
310      qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
311  }
312  
313  void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
314  {
315      PowerPCCPU *cpu = env_archcpu(env);
316  
317      qemu_mutex_lock_iothread();
318  
319      switch ((val >> 28) & 0x3) {
320      case 0x0:
321          /* No action */
322          break;
323      case 0x1:
324          /* Core reset */
325          ppc40x_core_reset(cpu);
326          break;
327      case 0x2:
328          /* Chip reset */
329          ppc40x_chip_reset(cpu);
330          break;
331      case 0x3:
332          /* System reset */
333          ppc40x_system_reset(cpu);
334          break;
335      }
336  
337      qemu_mutex_unlock_iothread();
338  }
339  
340  /* PowerPC 40x internal IRQ controller */
341  static void ppc40x_set_irq(void *opaque, int pin, int level)
342  {
343      PowerPCCPU *cpu = opaque;
344      CPUPPCState *env = &cpu->env;
345      int cur_level;
346  
347      trace_ppc_irq_set(env, pin, level);
348  
349      cur_level = (env->irq_input_state >> pin) & 1;
350      /* Don't generate spurious events */
351      if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
352          CPUState *cs = CPU(cpu);
353  
354          switch (pin) {
355          case PPC40x_INPUT_RESET_SYS:
356              if (level) {
357                  trace_ppc_irq_reset("system");
358                  ppc40x_system_reset(cpu);
359              }
360              break;
361          case PPC40x_INPUT_RESET_CHIP:
362              if (level) {
363                  trace_ppc_irq_reset("chip");
364                  ppc40x_chip_reset(cpu);
365              }
366              break;
367          case PPC40x_INPUT_RESET_CORE:
368              /* XXX: TODO: update DBSR[MRR] */
369              if (level) {
370                  trace_ppc_irq_reset("core");
371                  ppc40x_core_reset(cpu);
372              }
373              break;
374          case PPC40x_INPUT_CINT:
375              /* Level sensitive - active high */
376              trace_ppc_irq_set_state("critical IRQ", level);
377              ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
378              break;
379          case PPC40x_INPUT_INT:
380              /* Level sensitive - active high */
381              trace_ppc_irq_set_state("external IRQ", level);
382              ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
383              break;
384          case PPC40x_INPUT_HALT:
385              /* Level sensitive - active low */
386              if (level) {
387                  trace_ppc_irq_cpu("stop");
388                  cs->halted = 1;
389              } else {
390                  trace_ppc_irq_cpu("restart");
391                  cs->halted = 0;
392                  qemu_cpu_kick(cs);
393              }
394              break;
395          case PPC40x_INPUT_DEBUG:
396              /* Level sensitive - active high */
397              trace_ppc_irq_set_state("debug pin", level);
398              ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
399              break;
400          default:
401              g_assert_not_reached();
402          }
403          if (level)
404              env->irq_input_state |= 1 << pin;
405          else
406              env->irq_input_state &= ~(1 << pin);
407      }
408  }
409  
410  void ppc40x_irq_init(PowerPCCPU *cpu)
411  {
412      qdev_init_gpio_in(DEVICE(cpu), ppc40x_set_irq, PPC40x_INPUT_NB);
413  }
414  
415  /* PowerPC E500 internal IRQ controller */
416  static void ppce500_set_irq(void *opaque, int pin, int level)
417  {
418      PowerPCCPU *cpu = opaque;
419      CPUPPCState *env = &cpu->env;
420      int cur_level;
421  
422      trace_ppc_irq_set(env, pin, level);
423  
424      cur_level = (env->irq_input_state >> pin) & 1;
425      /* Don't generate spurious events */
426      if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
427          switch (pin) {
428          case PPCE500_INPUT_MCK:
429              if (level) {
430                  trace_ppc_irq_reset("system");
431                  qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
432              }
433              break;
434          case PPCE500_INPUT_RESET_CORE:
435              if (level) {
436                  trace_ppc_irq_reset("core");
437                  ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
438              }
439              break;
440          case PPCE500_INPUT_CINT:
441              /* Level sensitive - active high */
442              trace_ppc_irq_set_state("critical IRQ", level);
443              ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
444              break;
445          case PPCE500_INPUT_INT:
446              /* Level sensitive - active high */
447              trace_ppc_irq_set_state("core IRQ", level);
448              ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
449              break;
450          case PPCE500_INPUT_DEBUG:
451              /* Level sensitive - active high */
452              trace_ppc_irq_set_state("debug pin", level);
453              ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
454              break;
455          default:
456              g_assert_not_reached();
457          }
458          if (level)
459              env->irq_input_state |= 1 << pin;
460          else
461              env->irq_input_state &= ~(1 << pin);
462      }
463  }
464  
465  void ppce500_irq_init(PowerPCCPU *cpu)
466  {
467      qdev_init_gpio_in(DEVICE(cpu), ppce500_set_irq, PPCE500_INPUT_NB);
468  }
469  
470  /* Enable or Disable the E500 EPR capability */
471  void ppce500_set_mpic_proxy(bool enabled)
472  {
473      CPUState *cs;
474  
475      CPU_FOREACH(cs) {
476          PowerPCCPU *cpu = POWERPC_CPU(cs);
477  
478          cpu->env.mpic_proxy = enabled;
479          if (kvm_enabled()) {
480              kvmppc_set_mpic_proxy(cpu, enabled);
481          }
482      }
483  }
484  
485  /*****************************************************************************/
486  /* PowerPC time base and decrementer emulation */
487  
488  /*
489   * Conversion between QEMU_CLOCK_VIRTUAL ns and timebase (TB) ticks:
490   * TB ticks are arrived at by multiplying tb_freq then dividing by
491   * ns per second, and rounding down. TB ticks drive all clocks and
492   * timers in the target machine.
493   *
494   * Converting TB intervals to ns for the purpose of setting a
495   * QEMU_CLOCK_VIRTUAL timer should go the other way, but rounding
496   * up. Rounding down could cause the timer to fire before the TB
497   * value has been reached.
498   */
499  static uint64_t ns_to_tb(uint32_t freq, int64_t clock)
500  {
501      return muldiv64(clock, freq, NANOSECONDS_PER_SECOND);
502  }
503  
504  /* virtual clock in TB ticks, not adjusted by TB offset */
505  static int64_t tb_to_ns_round_up(uint32_t freq, uint64_t tb)
506  {
507      return muldiv64_round_up(tb, NANOSECONDS_PER_SECOND, freq);
508  }
509  
510  uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
511  {
512      /* TB time in tb periods */
513      return ns_to_tb(tb_env->tb_freq, vmclk) + tb_offset;
514  }
515  
516  uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
517  {
518      ppc_tb_t *tb_env = env->tb_env;
519      uint64_t tb;
520  
521      if (kvm_enabled()) {
522          return env->spr[SPR_TBL];
523      }
524  
525      tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
526                          tb_env->tb_offset);
527      trace_ppc_tb_load(tb);
528  
529      return tb;
530  }
531  
532  static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
533  {
534      ppc_tb_t *tb_env = env->tb_env;
535      uint64_t tb;
536  
537      tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
538                          tb_env->tb_offset);
539      trace_ppc_tb_load(tb);
540  
541      return tb >> 32;
542  }
543  
544  uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
545  {
546      if (kvm_enabled()) {
547          return env->spr[SPR_TBU];
548      }
549  
550      return _cpu_ppc_load_tbu(env);
551  }
552  
553  static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
554                                      int64_t *tb_offsetp, uint64_t value)
555  {
556      *tb_offsetp = value - ns_to_tb(tb_env->tb_freq, vmclk);
557  
558      trace_ppc_tb_store(value, *tb_offsetp);
559  }
560  
561  void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
562  {
563      ppc_tb_t *tb_env = env->tb_env;
564      int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
565      uint64_t tb;
566  
567      tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
568      tb &= 0xFFFFFFFF00000000ULL;
569      cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset, tb | (uint64_t)value);
570  }
571  
572  static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
573  {
574      ppc_tb_t *tb_env = env->tb_env;
575      int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
576      uint64_t tb;
577  
578      tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
579      tb &= 0x00000000FFFFFFFFULL;
580      cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset,
581                       ((uint64_t)value << 32) | tb);
582  }
583  
584  void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
585  {
586      _cpu_ppc_store_tbu(env, value);
587  }
588  
589  uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
590  {
591      ppc_tb_t *tb_env = env->tb_env;
592      uint64_t tb;
593  
594      tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
595                          tb_env->atb_offset);
596      trace_ppc_tb_load(tb);
597  
598      return tb;
599  }
600  
601  uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
602  {
603      ppc_tb_t *tb_env = env->tb_env;
604      uint64_t tb;
605  
606      tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
607                          tb_env->atb_offset);
608      trace_ppc_tb_load(tb);
609  
610      return tb >> 32;
611  }
612  
613  void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
614  {
615      ppc_tb_t *tb_env = env->tb_env;
616      int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
617      uint64_t tb;
618  
619      tb = cpu_ppc_get_tb(tb_env, clock, tb_env->atb_offset);
620      tb &= 0xFFFFFFFF00000000ULL;
621      cpu_ppc_store_tb(tb_env, clock, &tb_env->atb_offset, tb | (uint64_t)value);
622  }
623  
624  void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
625  {
626      ppc_tb_t *tb_env = env->tb_env;
627      int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
628      uint64_t tb;
629  
630      tb = cpu_ppc_get_tb(tb_env, clock, tb_env->atb_offset);
631      tb &= 0x00000000FFFFFFFFULL;
632      cpu_ppc_store_tb(tb_env, clock, &tb_env->atb_offset,
633                       ((uint64_t)value << 32) | tb);
634  }
635  
636  uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
637  {
638      ppc_tb_t *tb_env = env->tb_env;
639  
640      return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
641                            tb_env->vtb_offset);
642  }
643  
644  void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value)
645  {
646      ppc_tb_t *tb_env = env->tb_env;
647  
648      cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
649                       &tb_env->vtb_offset, value);
650  }
651  
652  void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value)
653  {
654      ppc_tb_t *tb_env = env->tb_env;
655      int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
656      uint64_t tb;
657  
658      tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
659      tb &= 0xFFFFFFUL;
660      tb |= (value & ~0xFFFFFFUL);
661      cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset, tb);
662  }
663  
664  static void cpu_ppc_tb_stop (CPUPPCState *env)
665  {
666      ppc_tb_t *tb_env = env->tb_env;
667      uint64_t tb, atb, vmclk;
668  
669      /* If the time base is already frozen, do nothing */
670      if (tb_env->tb_freq != 0) {
671          vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
672          /* Get the time base */
673          tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
674          /* Get the alternate time base */
675          atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
676          /* Store the time base value (ie compute the current offset) */
677          cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
678          /* Store the alternate time base value (compute the current offset) */
679          cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
680          /* Set the time base frequency to zero */
681          tb_env->tb_freq = 0;
682          /* Now, the time bases are frozen to tb_offset / atb_offset value */
683      }
684  }
685  
686  static void cpu_ppc_tb_start (CPUPPCState *env)
687  {
688      ppc_tb_t *tb_env = env->tb_env;
689      uint64_t tb, atb, vmclk;
690  
691      /* If the time base is not frozen, do nothing */
692      if (tb_env->tb_freq == 0) {
693          vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
694          /* Get the time base from tb_offset */
695          tb = tb_env->tb_offset;
696          /* Get the alternate time base from atb_offset */
697          atb = tb_env->atb_offset;
698          /* Restore the tb frequency from the decrementer frequency */
699          tb_env->tb_freq = tb_env->decr_freq;
700          /* Store the time base value */
701          cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
702          /* Store the alternate time base value */
703          cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
704      }
705  }
706  
707  bool ppc_decr_clear_on_delivery(CPUPPCState *env)
708  {
709      ppc_tb_t *tb_env = env->tb_env;
710      int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
711      return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
712  }
713  
714  static inline int64_t __cpu_ppc_load_decr(CPUPPCState *env, int64_t now,
715                                            uint64_t next)
716  {
717      ppc_tb_t *tb_env = env->tb_env;
718      uint64_t n;
719      int64_t decr;
720  
721      n = ns_to_tb(tb_env->decr_freq, now);
722      if (next > n && tb_env->flags & PPC_TIMER_BOOKE) {
723          decr = 0;
724      } else {
725          decr = next - n;
726      }
727  
728      trace_ppc_decr_load(decr);
729  
730      return decr;
731  }
732  
733  static target_ulong _cpu_ppc_load_decr(CPUPPCState *env, int64_t now)
734  {
735      ppc_tb_t *tb_env = env->tb_env;
736      uint64_t decr;
737  
738      decr = __cpu_ppc_load_decr(env, now, tb_env->decr_next);
739  
740      /*
741       * If large decrementer is enabled then the decrementer is signed extended
742       * to 64 bits, otherwise it is a 32 bit value.
743       */
744      if (env->spr[SPR_LPCR] & LPCR_LD) {
745          PowerPCCPU *cpu = env_archcpu(env);
746          PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
747          return sextract64(decr, 0, pcc->lrg_decr_bits);
748      }
749      return (uint32_t) decr;
750  }
751  
752  target_ulong cpu_ppc_load_decr(CPUPPCState *env)
753  {
754      if (kvm_enabled()) {
755          return env->spr[SPR_DECR];
756      } else {
757          return _cpu_ppc_load_decr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
758      }
759  }
760  
761  static target_ulong _cpu_ppc_load_hdecr(CPUPPCState *env, int64_t now)
762  {
763      PowerPCCPU *cpu = env_archcpu(env);
764      PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
765      ppc_tb_t *tb_env = env->tb_env;
766      uint64_t hdecr;
767  
768      hdecr =  __cpu_ppc_load_decr(env, now, tb_env->hdecr_next);
769  
770      /*
771       * If we have a large decrementer (POWER9 or later) then hdecr is sign
772       * extended to 64 bits, otherwise it is 32 bits.
773       */
774      if (pcc->lrg_decr_bits > 32) {
775          return sextract64(hdecr, 0, pcc->lrg_decr_bits);
776      }
777      return (uint32_t) hdecr;
778  }
779  
780  target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
781  {
782      return _cpu_ppc_load_hdecr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
783  }
784  
785  uint64_t cpu_ppc_load_purr (CPUPPCState *env)
786  {
787      ppc_tb_t *tb_env = env->tb_env;
788  
789      return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
790                            tb_env->purr_offset);
791  }
792  
793  /* When decrementer expires,
794   * all we need to do is generate or queue a CPU exception
795   */
796  static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
797  {
798      /* Raise it */
799      trace_ppc_decr_excp("raise");
800      ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
801  }
802  
803  static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
804  {
805      ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
806  }
807  
808  static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
809  {
810      CPUPPCState *env = &cpu->env;
811  
812      /* Raise it */
813      trace_ppc_decr_excp("raise HV");
814  
815      /* The architecture specifies that we don't deliver HDEC
816       * interrupts in a PM state. Not only they don't cause a
817       * wakeup but they also get effectively discarded.
818       */
819      if (!env->resume_as_sreset) {
820          ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
821      }
822  }
823  
824  static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
825  {
826      ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
827  }
828  
829  static void __cpu_ppc_store_decr(PowerPCCPU *cpu, int64_t now, uint64_t *nextp,
830                                   QEMUTimer *timer,
831                                   void (*raise_excp)(void *),
832                                   void (*lower_excp)(PowerPCCPU *),
833                                   uint32_t flags, target_ulong decr,
834                                   target_ulong value, int nr_bits)
835  {
836      CPUPPCState *env = &cpu->env;
837      ppc_tb_t *tb_env = env->tb_env;
838      uint64_t next;
839      int64_t signed_value;
840      int64_t signed_decr;
841  
842      /* Truncate value to decr_width and sign extend for simplicity */
843      value = extract64(value, 0, nr_bits);
844      decr = extract64(decr, 0, nr_bits);
845      signed_value = sextract64(value, 0, nr_bits);
846      signed_decr = sextract64(decr, 0, nr_bits);
847  
848      trace_ppc_decr_store(nr_bits, decr, value);
849  
850      /*
851       * Calculate the next decrementer event and set a timer.
852       * decr_next is in timebase units to keep rounding simple. Note it is
853       * not adjusted by tb_offset because if TB changes via tb_offset changing,
854       * decrementer does not change, so not directly comparable with TB.
855       */
856      next = ns_to_tb(tb_env->decr_freq, now) + value;
857      *nextp = next; /* nextp is in timebase units */
858  
859      /*
860       * Going from 1 -> 0 or 0 -> -1 is the event to generate a DEC interrupt.
861       *
862       * On MSB level based DEC implementations the MSB always means the interrupt
863       * is pending, so raise it on those.
864       *
865       * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
866       * an edge interrupt, so raise it here too.
867       */
868      if (((flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
869          ((flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0
870            && signed_decr >= 0)) {
871          (*raise_excp)(cpu);
872          return;
873      }
874  
875      /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
876      if (signed_value >= 0 && (flags & PPC_DECR_UNDERFLOW_LEVEL)) {
877          (*lower_excp)(cpu);
878      }
879  
880      /* Adjust timer */
881      timer_mod(timer, tb_to_ns_round_up(tb_env->decr_freq, next));
882  }
883  
884  static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, int64_t now,
885                                         target_ulong decr, target_ulong value,
886                                         int nr_bits)
887  {
888      ppc_tb_t *tb_env = cpu->env.tb_env;
889  
890      __cpu_ppc_store_decr(cpu, now, &tb_env->decr_next, tb_env->decr_timer,
891                           tb_env->decr_timer->cb, &cpu_ppc_decr_lower,
892                           tb_env->flags, decr, value, nr_bits);
893  }
894  
895  void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
896  {
897      PowerPCCPU *cpu = env_archcpu(env);
898      PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
899      int64_t now;
900      target_ulong decr;
901      int nr_bits = 32;
902  
903      if (kvm_enabled()) {
904          /* KVM handles decrementer exceptions, we don't need our own timer */
905          return;
906      }
907  
908      if (env->spr[SPR_LPCR] & LPCR_LD) {
909          nr_bits = pcc->lrg_decr_bits;
910      }
911  
912      now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
913      decr = _cpu_ppc_load_decr(env, now);
914      _cpu_ppc_store_decr(cpu, now, decr, value, nr_bits);
915  }
916  
917  static void cpu_ppc_decr_cb(void *opaque)
918  {
919      PowerPCCPU *cpu = opaque;
920  
921      cpu_ppc_decr_excp(cpu);
922  }
923  
924  static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, int64_t now,
925                                          target_ulong hdecr, target_ulong value,
926                                          int nr_bits)
927  {
928      ppc_tb_t *tb_env = cpu->env.tb_env;
929  
930      if (tb_env->hdecr_timer != NULL) {
931          /* HDECR (Book3S 64bit) is edge-based, not level like DECR */
932          __cpu_ppc_store_decr(cpu, now, &tb_env->hdecr_next, tb_env->hdecr_timer,
933                               tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
934                               PPC_DECR_UNDERFLOW_TRIGGERED,
935                               hdecr, value, nr_bits);
936      }
937  }
938  
939  void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
940  {
941      PowerPCCPU *cpu = env_archcpu(env);
942      PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
943      int64_t now;
944      target_ulong hdecr;
945  
946      now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
947      hdecr = _cpu_ppc_load_hdecr(env, now);
948      _cpu_ppc_store_hdecr(cpu, now, hdecr, value, pcc->lrg_decr_bits);
949  }
950  
951  static void cpu_ppc_hdecr_cb(void *opaque)
952  {
953      PowerPCCPU *cpu = opaque;
954  
955      cpu_ppc_hdecr_excp(cpu);
956  }
957  
958  static void _cpu_ppc_store_purr(CPUPPCState *env, int64_t now, uint64_t value)
959  {
960      ppc_tb_t *tb_env = env->tb_env;
961  
962      cpu_ppc_store_tb(tb_env, now, &tb_env->purr_offset, value);
963  }
964  
965  void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
966  {
967      _cpu_ppc_store_purr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), value);
968  }
969  
970  static void timebase_save(PPCTimebase *tb)
971  {
972      uint64_t ticks = cpu_get_host_ticks();
973      PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
974  
975      if (!first_ppc_cpu->env.tb_env) {
976          error_report("No timebase object");
977          return;
978      }
979  
980      if (replay_mode == REPLAY_MODE_NONE) {
981          /* not used anymore, we keep it for compatibility */
982          tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
983      } else {
984          /* simpler for record-replay to avoid this event, compat not needed */
985          tb->time_of_the_day_ns = 0;
986      }
987  
988      /*
989       * tb_offset is only expected to be changed by QEMU so
990       * there is no need to update it from KVM here
991       */
992      tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
993  
994      tb->runstate_paused =
995          runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
996  }
997  
998  static void timebase_load(PPCTimebase *tb)
999  {
1000      CPUState *cpu;
1001      PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1002      int64_t tb_off_adj, tb_off;
1003      unsigned long freq;
1004  
1005      if (!first_ppc_cpu->env.tb_env) {
1006          error_report("No timebase object");
1007          return;
1008      }
1009  
1010      freq = first_ppc_cpu->env.tb_env->tb_freq;
1011  
1012      tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
1013  
1014      tb_off = first_ppc_cpu->env.tb_env->tb_offset;
1015      trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
1016                          (tb_off_adj - tb_off) / freq);
1017  
1018      /* Set new offset to all CPUs */
1019      CPU_FOREACH(cpu) {
1020          PowerPCCPU *pcpu = POWERPC_CPU(cpu);
1021          pcpu->env.tb_env->tb_offset = tb_off_adj;
1022          kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
1023      }
1024  }
1025  
1026  void cpu_ppc_clock_vm_state_change(void *opaque, bool running,
1027                                     RunState state)
1028  {
1029      PPCTimebase *tb = opaque;
1030  
1031      if (running) {
1032          timebase_load(tb);
1033      } else {
1034          timebase_save(tb);
1035      }
1036  }
1037  
1038  /*
1039   * When migrating a running guest, read the clock just
1040   * before migration, so that the guest clock counts
1041   * during the events between:
1042   *
1043   *  * vm_stop()
1044   *  *
1045   *  * pre_save()
1046   *
1047   *  This reduces clock difference on migration from 5s
1048   *  to 0.1s (when max_downtime == 5s), because sending the
1049   *  final pages of memory (which happens between vm_stop()
1050   *  and pre_save()) takes max_downtime.
1051   */
1052  static int timebase_pre_save(void *opaque)
1053  {
1054      PPCTimebase *tb = opaque;
1055  
1056      /* guest_timebase won't be overridden in case of paused guest or savevm */
1057      if (!tb->runstate_paused) {
1058          timebase_save(tb);
1059      }
1060  
1061      return 0;
1062  }
1063  
1064  const VMStateDescription vmstate_ppc_timebase = {
1065      .name = "timebase",
1066      .version_id = 1,
1067      .minimum_version_id = 1,
1068      .pre_save = timebase_pre_save,
1069      .fields      = (VMStateField []) {
1070          VMSTATE_UINT64(guest_timebase, PPCTimebase),
1071          VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1072          VMSTATE_END_OF_LIST()
1073      },
1074  };
1075  
1076  /* Set up (once) timebase frequency (in Hz) */
1077  void cpu_ppc_tb_init(CPUPPCState *env, uint32_t freq)
1078  {
1079      PowerPCCPU *cpu = env_archcpu(env);
1080      ppc_tb_t *tb_env;
1081  
1082      tb_env = g_new0(ppc_tb_t, 1);
1083      env->tb_env = tb_env;
1084      tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1085      if (is_book3s_arch2x(env)) {
1086          /* All Book3S 64bit CPUs implement level based DEC logic */
1087          tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1088      }
1089      /* Create new timer */
1090      tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1091                                        &cpu_ppc_decr_cb, cpu);
1092      if (env->has_hv_mode && !cpu->vhyp) {
1093          tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1094                                             &cpu_ppc_hdecr_cb, cpu);
1095      } else {
1096          tb_env->hdecr_timer = NULL;
1097      }
1098  
1099      tb_env->tb_freq = freq;
1100      tb_env->decr_freq = freq;
1101  }
1102  
1103  void cpu_ppc_tb_reset(CPUPPCState *env)
1104  {
1105      PowerPCCPU *cpu = env_archcpu(env);
1106      ppc_tb_t *tb_env = env->tb_env;
1107  
1108      timer_del(tb_env->decr_timer);
1109      ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
1110      tb_env->decr_next = 0;
1111      if (tb_env->hdecr_timer != NULL) {
1112          timer_del(tb_env->hdecr_timer);
1113          ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
1114          tb_env->hdecr_next = 0;
1115      }
1116  
1117      /*
1118       * There is a bug in Linux 2.4 kernels:
1119       * if a decrementer exception is pending when it enables msr_ee at startup,
1120       * it's not ready to handle it...
1121       */
1122      cpu_ppc_store_decr(env, -1);
1123      cpu_ppc_store_hdecr(env, -1);
1124      cpu_ppc_store_purr(env, 0x0000000000000000ULL);
1125  }
1126  
1127  void cpu_ppc_tb_free(CPUPPCState *env)
1128  {
1129      timer_free(env->tb_env->decr_timer);
1130      timer_free(env->tb_env->hdecr_timer);
1131      g_free(env->tb_env);
1132  }
1133  
1134  /* cpu_ppc_hdecr_init may be used if the timer is not used by HDEC emulation */
1135  void cpu_ppc_hdecr_init(CPUPPCState *env)
1136  {
1137      PowerPCCPU *cpu = env_archcpu(env);
1138  
1139      assert(env->tb_env->hdecr_timer == NULL);
1140  
1141      env->tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1142                                              &cpu_ppc_hdecr_cb, cpu);
1143  }
1144  
1145  void cpu_ppc_hdecr_exit(CPUPPCState *env)
1146  {
1147      PowerPCCPU *cpu = env_archcpu(env);
1148  
1149      timer_free(env->tb_env->hdecr_timer);
1150      env->tb_env->hdecr_timer = NULL;
1151  
1152      cpu_ppc_hdecr_lower(cpu);
1153  }
1154  
1155  /*****************************************************************************/
1156  /* PowerPC 40x timers */
1157  
1158  /* PIT, FIT & WDT */
1159  typedef struct ppc40x_timer_t ppc40x_timer_t;
1160  struct ppc40x_timer_t {
1161      uint64_t pit_reload;  /* PIT auto-reload value        */
1162      uint64_t fit_next;    /* Tick for next FIT interrupt  */
1163      QEMUTimer *fit_timer;
1164      uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1165      QEMUTimer *wdt_timer;
1166  
1167      /* 405 have the PIT, 440 have a DECR.  */
1168      unsigned int decr_excp;
1169  };
1170  
1171  /* Fixed interval timer */
1172  static void cpu_4xx_fit_cb (void *opaque)
1173  {
1174      PowerPCCPU *cpu = opaque;
1175      CPUPPCState *env = &cpu->env;
1176      ppc_tb_t *tb_env;
1177      ppc40x_timer_t *ppc40x_timer;
1178      uint64_t now, next;
1179  
1180      tb_env = env->tb_env;
1181      ppc40x_timer = tb_env->opaque;
1182      now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1183      switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1184      case 0:
1185          next = 1 << 9;
1186          break;
1187      case 1:
1188          next = 1 << 13;
1189          break;
1190      case 2:
1191          next = 1 << 17;
1192          break;
1193      case 3:
1194          next = 1 << 21;
1195          break;
1196      default:
1197          /* Cannot occur, but makes gcc happy */
1198          return;
1199      }
1200      next = now + tb_to_ns_round_up(tb_env->tb_freq, next);
1201      timer_mod(ppc40x_timer->fit_timer, next);
1202      env->spr[SPR_40x_TSR] |= 1 << 26;
1203      if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1204          ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1205      }
1206      trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1207                           env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1208  }
1209  
1210  /* Programmable interval timer */
1211  static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1212  {
1213      ppc40x_timer_t *ppc40x_timer;
1214      uint64_t now, next;
1215  
1216      ppc40x_timer = tb_env->opaque;
1217      if (ppc40x_timer->pit_reload <= 1 ||
1218          !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1219          (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1220          /* Stop PIT */
1221          trace_ppc4xx_pit_stop();
1222          timer_del(tb_env->decr_timer);
1223      } else {
1224          trace_ppc4xx_pit_start(ppc40x_timer->pit_reload);
1225          now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1226  
1227          if (is_excp) {
1228              tb_env->decr_next += ppc40x_timer->pit_reload;
1229          } else {
1230              tb_env->decr_next = ns_to_tb(tb_env->decr_freq, now)
1231                                  + ppc40x_timer->pit_reload;
1232          }
1233          next = tb_to_ns_round_up(tb_env->decr_freq, tb_env->decr_next);
1234          timer_mod(tb_env->decr_timer, next);
1235      }
1236  }
1237  
1238  static void cpu_4xx_pit_cb (void *opaque)
1239  {
1240      PowerPCCPU *cpu = opaque;
1241      CPUPPCState *env = &cpu->env;
1242      ppc_tb_t *tb_env;
1243      ppc40x_timer_t *ppc40x_timer;
1244  
1245      tb_env = env->tb_env;
1246      ppc40x_timer = tb_env->opaque;
1247      env->spr[SPR_40x_TSR] |= 1 << 27;
1248      if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1249          ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1250      }
1251      start_stop_pit(env, tb_env, 1);
1252      trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1253             (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1254             env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1255             ppc40x_timer->pit_reload);
1256  }
1257  
1258  /* Watchdog timer */
1259  static void cpu_4xx_wdt_cb (void *opaque)
1260  {
1261      PowerPCCPU *cpu = opaque;
1262      CPUPPCState *env = &cpu->env;
1263      ppc_tb_t *tb_env;
1264      ppc40x_timer_t *ppc40x_timer;
1265      uint64_t now, next;
1266  
1267      tb_env = env->tb_env;
1268      ppc40x_timer = tb_env->opaque;
1269      now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1270      switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1271      case 0:
1272          next = 1 << 17;
1273          break;
1274      case 1:
1275          next = 1 << 21;
1276          break;
1277      case 2:
1278          next = 1 << 25;
1279          break;
1280      case 3:
1281          next = 1 << 29;
1282          break;
1283      default:
1284          /* Cannot occur, but makes gcc happy */
1285          return;
1286      }
1287      next = now + tb_to_ns_round_up(tb_env->decr_freq, next);
1288      trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1289      switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1290      case 0x0:
1291      case 0x1:
1292          timer_mod(ppc40x_timer->wdt_timer, next);
1293          ppc40x_timer->wdt_next = next;
1294          env->spr[SPR_40x_TSR] |= 1U << 31;
1295          break;
1296      case 0x2:
1297          timer_mod(ppc40x_timer->wdt_timer, next);
1298          ppc40x_timer->wdt_next = next;
1299          env->spr[SPR_40x_TSR] |= 1 << 30;
1300          if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1301              ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1302          }
1303          break;
1304      case 0x3:
1305          env->spr[SPR_40x_TSR] &= ~0x30000000;
1306          env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1307          switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1308          case 0x0:
1309              /* No reset */
1310              break;
1311          case 0x1: /* Core reset */
1312              ppc40x_core_reset(cpu);
1313              break;
1314          case 0x2: /* Chip reset */
1315              ppc40x_chip_reset(cpu);
1316              break;
1317          case 0x3: /* System reset */
1318              ppc40x_system_reset(cpu);
1319              break;
1320          }
1321      }
1322  }
1323  
1324  void store_40x_pit (CPUPPCState *env, target_ulong val)
1325  {
1326      ppc_tb_t *tb_env;
1327      ppc40x_timer_t *ppc40x_timer;
1328  
1329      tb_env = env->tb_env;
1330      ppc40x_timer = tb_env->opaque;
1331      trace_ppc40x_store_pit(val);
1332      ppc40x_timer->pit_reload = val;
1333      start_stop_pit(env, tb_env, 0);
1334  }
1335  
1336  target_ulong load_40x_pit (CPUPPCState *env)
1337  {
1338      return cpu_ppc_load_decr(env);
1339  }
1340  
1341  void store_40x_tsr(CPUPPCState *env, target_ulong val)
1342  {
1343      PowerPCCPU *cpu = env_archcpu(env);
1344  
1345      trace_ppc40x_store_tcr(val);
1346  
1347      env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000);
1348      if (val & 0x80000000) {
1349          ppc_set_irq(cpu, PPC_INTERRUPT_PIT, 0);
1350      }
1351  }
1352  
1353  void store_40x_tcr(CPUPPCState *env, target_ulong val)
1354  {
1355      PowerPCCPU *cpu = env_archcpu(env);
1356      ppc_tb_t *tb_env;
1357  
1358      trace_ppc40x_store_tsr(val);
1359  
1360      tb_env = env->tb_env;
1361      env->spr[SPR_40x_TCR] = val & 0xFFC00000;
1362      start_stop_pit(env, tb_env, 1);
1363      cpu_4xx_wdt_cb(cpu);
1364  }
1365  
1366  static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1367  {
1368      CPUPPCState *env = opaque;
1369      ppc_tb_t *tb_env = env->tb_env;
1370  
1371      trace_ppc40x_set_tb_clk(freq);
1372      tb_env->tb_freq = freq;
1373      tb_env->decr_freq = freq;
1374      /* XXX: we should also update all timers */
1375  }
1376  
1377  clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1378                                    unsigned int decr_excp)
1379  {
1380      ppc_tb_t *tb_env;
1381      ppc40x_timer_t *ppc40x_timer;
1382      PowerPCCPU *cpu = env_archcpu(env);
1383  
1384      trace_ppc40x_timers_init(freq);
1385  
1386      tb_env = g_new0(ppc_tb_t, 1);
1387      ppc40x_timer = g_new0(ppc40x_timer_t, 1);
1388  
1389      env->tb_env = tb_env;
1390      tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1391      tb_env->tb_freq = freq;
1392      tb_env->decr_freq = freq;
1393      tb_env->opaque = ppc40x_timer;
1394  
1395      /* We use decr timer for PIT */
1396      tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, cpu);
1397      ppc40x_timer->fit_timer =
1398          timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, cpu);
1399      ppc40x_timer->wdt_timer =
1400          timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, cpu);
1401      ppc40x_timer->decr_excp = decr_excp;
1402  
1403      return &ppc_40x_set_tb_clk;
1404  }
1405  
1406  /*****************************************************************************/
1407  /* Embedded PowerPC Device Control Registers */
1408  typedef struct ppc_dcrn_t ppc_dcrn_t;
1409  struct ppc_dcrn_t {
1410      dcr_read_cb dcr_read;
1411      dcr_write_cb dcr_write;
1412      void *opaque;
1413  };
1414  
1415  /* XXX: on 460, DCR addresses are 32 bits wide,
1416   *      using DCRIPR to get the 22 upper bits of the DCR address
1417   */
1418  #define DCRN_NB 1024
1419  struct ppc_dcr_t {
1420      ppc_dcrn_t dcrn[DCRN_NB];
1421      int (*read_error)(int dcrn);
1422      int (*write_error)(int dcrn);
1423  };
1424  
1425  int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1426  {
1427      ppc_dcrn_t *dcr;
1428  
1429      if (dcrn < 0 || dcrn >= DCRN_NB)
1430          goto error;
1431      dcr = &dcr_env->dcrn[dcrn];
1432      if (dcr->dcr_read == NULL)
1433          goto error;
1434      *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1435      trace_ppc_dcr_read(dcrn, *valp);
1436  
1437      return 0;
1438  
1439   error:
1440      if (dcr_env->read_error != NULL)
1441          return (*dcr_env->read_error)(dcrn);
1442  
1443      return -1;
1444  }
1445  
1446  int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1447  {
1448      ppc_dcrn_t *dcr;
1449  
1450      if (dcrn < 0 || dcrn >= DCRN_NB)
1451          goto error;
1452      dcr = &dcr_env->dcrn[dcrn];
1453      if (dcr->dcr_write == NULL)
1454          goto error;
1455      trace_ppc_dcr_write(dcrn, val);
1456      (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1457  
1458      return 0;
1459  
1460   error:
1461      if (dcr_env->write_error != NULL)
1462          return (*dcr_env->write_error)(dcrn);
1463  
1464      return -1;
1465  }
1466  
1467  int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1468                        dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1469  {
1470      ppc_dcr_t *dcr_env;
1471      ppc_dcrn_t *dcr;
1472  
1473      dcr_env = env->dcr_env;
1474      if (dcr_env == NULL)
1475          return -1;
1476      if (dcrn < 0 || dcrn >= DCRN_NB)
1477          return -1;
1478      dcr = &dcr_env->dcrn[dcrn];
1479      if (dcr->opaque != NULL ||
1480          dcr->dcr_read != NULL ||
1481          dcr->dcr_write != NULL)
1482          return -1;
1483      dcr->opaque = opaque;
1484      dcr->dcr_read = dcr_read;
1485      dcr->dcr_write = dcr_write;
1486  
1487      return 0;
1488  }
1489  
1490  int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1491                    int (*write_error)(int dcrn))
1492  {
1493      ppc_dcr_t *dcr_env;
1494  
1495      dcr_env = g_new0(ppc_dcr_t, 1);
1496      dcr_env->read_error = read_error;
1497      dcr_env->write_error = write_error;
1498      env->dcr_env = dcr_env;
1499  
1500      return 0;
1501  }
1502  
1503  /*****************************************************************************/
1504  
1505  int ppc_cpu_pir(PowerPCCPU *cpu)
1506  {
1507      CPUPPCState *env = &cpu->env;
1508      return env->spr_cb[SPR_PIR].default_value;
1509  }
1510  
1511  int ppc_cpu_tir(PowerPCCPU *cpu)
1512  {
1513      CPUPPCState *env = &cpu->env;
1514      return env->spr_cb[SPR_TIR].default_value;
1515  }
1516  
1517  PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1518  {
1519      CPUState *cs;
1520  
1521      CPU_FOREACH(cs) {
1522          PowerPCCPU *cpu = POWERPC_CPU(cs);
1523  
1524          if (ppc_cpu_pir(cpu) == pir) {
1525              return cpu;
1526          }
1527      }
1528  
1529      return NULL;
1530  }
1531  
1532  void ppc_irq_reset(PowerPCCPU *cpu)
1533  {
1534      CPUPPCState *env = &cpu->env;
1535  
1536      env->irq_input_state = 0;
1537      if (kvm_enabled()) {
1538          kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0);
1539      }
1540  }
1541