1 /* 2 * QEMU generic PowerPC hardware System Emulator 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu/osdep.h" 25 #include "qemu-common.h" 26 #include "cpu.h" 27 #include "hw/hw.h" 28 #include "hw/ppc/ppc.h" 29 #include "hw/ppc/ppc_e500.h" 30 #include "qemu/timer.h" 31 #include "sysemu/sysemu.h" 32 #include "sysemu/cpus.h" 33 #include "hw/timer/m48t59.h" 34 #include "qemu/log.h" 35 #include "qemu/error-report.h" 36 #include "hw/loader.h" 37 #include "sysemu/kvm.h" 38 #include "kvm_ppc.h" 39 #include "trace.h" 40 41 //#define PPC_DEBUG_IRQ 42 //#define PPC_DEBUG_TB 43 44 #ifdef PPC_DEBUG_IRQ 45 # define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__) 46 #else 47 # define LOG_IRQ(...) do { } while (0) 48 #endif 49 50 51 #ifdef PPC_DEBUG_TB 52 # define LOG_TB(...) qemu_log(__VA_ARGS__) 53 #else 54 # define LOG_TB(...) do { } while (0) 55 #endif 56 57 static void cpu_ppc_tb_stop (CPUPPCState *env); 58 static void cpu_ppc_tb_start (CPUPPCState *env); 59 60 void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) 61 { 62 CPUState *cs = CPU(cpu); 63 CPUPPCState *env = &cpu->env; 64 unsigned int old_pending; 65 bool locked = false; 66 67 /* We may already have the BQL if coming from the reset path */ 68 if (!qemu_mutex_iothread_locked()) { 69 locked = true; 70 qemu_mutex_lock_iothread(); 71 } 72 73 old_pending = env->pending_interrupts; 74 75 if (level) { 76 env->pending_interrupts |= 1 << n_IRQ; 77 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 78 } else { 79 env->pending_interrupts &= ~(1 << n_IRQ); 80 if (env->pending_interrupts == 0) { 81 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 82 } 83 } 84 85 if (old_pending != env->pending_interrupts) { 86 #ifdef CONFIG_KVM 87 kvmppc_set_interrupt(cpu, n_IRQ, level); 88 #endif 89 } 90 91 92 LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32 93 "req %08x\n", __func__, env, n_IRQ, level, 94 env->pending_interrupts, CPU(cpu)->interrupt_request); 95 96 if (locked) { 97 qemu_mutex_unlock_iothread(); 98 } 99 } 100 101 /* PowerPC 6xx / 7xx internal IRQ controller */ 102 static void ppc6xx_set_irq(void *opaque, int pin, int level) 103 { 104 PowerPCCPU *cpu = opaque; 105 CPUPPCState *env = &cpu->env; 106 int cur_level; 107 108 LOG_IRQ("%s: env %p pin %d level %d\n", __func__, 109 env, pin, level); 110 cur_level = (env->irq_input_state >> pin) & 1; 111 /* Don't generate spurious events */ 112 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { 113 CPUState *cs = CPU(cpu); 114 115 switch (pin) { 116 case PPC6xx_INPUT_TBEN: 117 /* Level sensitive - active high */ 118 LOG_IRQ("%s: %s the time base\n", 119 __func__, level ? "start" : "stop"); 120 if (level) { 121 cpu_ppc_tb_start(env); 122 } else { 123 cpu_ppc_tb_stop(env); 124 } 125 case PPC6xx_INPUT_INT: 126 /* Level sensitive - active high */ 127 LOG_IRQ("%s: set the external IRQ state to %d\n", 128 __func__, level); 129 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); 130 break; 131 case PPC6xx_INPUT_SMI: 132 /* Level sensitive - active high */ 133 LOG_IRQ("%s: set the SMI IRQ state to %d\n", 134 __func__, level); 135 ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level); 136 break; 137 case PPC6xx_INPUT_MCP: 138 /* Negative edge sensitive */ 139 /* XXX: TODO: actual reaction may depends on HID0 status 140 * 603/604/740/750: check HID0[EMCP] 141 */ 142 if (cur_level == 1 && level == 0) { 143 LOG_IRQ("%s: raise machine check state\n", 144 __func__); 145 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); 146 } 147 break; 148 case PPC6xx_INPUT_CKSTP_IN: 149 /* Level sensitive - active low */ 150 /* XXX: TODO: relay the signal to CKSTP_OUT pin */ 151 /* XXX: Note that the only way to restart the CPU is to reset it */ 152 if (level) { 153 LOG_IRQ("%s: stop the CPU\n", __func__); 154 cs->halted = 1; 155 } 156 break; 157 case PPC6xx_INPUT_HRESET: 158 /* Level sensitive - active low */ 159 if (level) { 160 LOG_IRQ("%s: reset the CPU\n", __func__); 161 cpu_interrupt(cs, CPU_INTERRUPT_RESET); 162 } 163 break; 164 case PPC6xx_INPUT_SRESET: 165 LOG_IRQ("%s: set the RESET IRQ state to %d\n", 166 __func__, level); 167 ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); 168 break; 169 default: 170 /* Unknown pin - do nothing */ 171 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); 172 return; 173 } 174 if (level) 175 env->irq_input_state |= 1 << pin; 176 else 177 env->irq_input_state &= ~(1 << pin); 178 } 179 } 180 181 void ppc6xx_irq_init(PowerPCCPU *cpu) 182 { 183 CPUPPCState *env = &cpu->env; 184 185 env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu, 186 PPC6xx_INPUT_NB); 187 } 188 189 #if defined(TARGET_PPC64) 190 /* PowerPC 970 internal IRQ controller */ 191 static void ppc970_set_irq(void *opaque, int pin, int level) 192 { 193 PowerPCCPU *cpu = opaque; 194 CPUPPCState *env = &cpu->env; 195 int cur_level; 196 197 LOG_IRQ("%s: env %p pin %d level %d\n", __func__, 198 env, pin, level); 199 cur_level = (env->irq_input_state >> pin) & 1; 200 /* Don't generate spurious events */ 201 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { 202 CPUState *cs = CPU(cpu); 203 204 switch (pin) { 205 case PPC970_INPUT_INT: 206 /* Level sensitive - active high */ 207 LOG_IRQ("%s: set the external IRQ state to %d\n", 208 __func__, level); 209 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); 210 break; 211 case PPC970_INPUT_THINT: 212 /* Level sensitive - active high */ 213 LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__, 214 level); 215 ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level); 216 break; 217 case PPC970_INPUT_MCP: 218 /* Negative edge sensitive */ 219 /* XXX: TODO: actual reaction may depends on HID0 status 220 * 603/604/740/750: check HID0[EMCP] 221 */ 222 if (cur_level == 1 && level == 0) { 223 LOG_IRQ("%s: raise machine check state\n", 224 __func__); 225 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); 226 } 227 break; 228 case PPC970_INPUT_CKSTP: 229 /* Level sensitive - active low */ 230 /* XXX: TODO: relay the signal to CKSTP_OUT pin */ 231 if (level) { 232 LOG_IRQ("%s: stop the CPU\n", __func__); 233 cs->halted = 1; 234 } else { 235 LOG_IRQ("%s: restart the CPU\n", __func__); 236 cs->halted = 0; 237 qemu_cpu_kick(cs); 238 } 239 break; 240 case PPC970_INPUT_HRESET: 241 /* Level sensitive - active low */ 242 if (level) { 243 cpu_interrupt(cs, CPU_INTERRUPT_RESET); 244 } 245 break; 246 case PPC970_INPUT_SRESET: 247 LOG_IRQ("%s: set the RESET IRQ state to %d\n", 248 __func__, level); 249 ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); 250 break; 251 case PPC970_INPUT_TBEN: 252 LOG_IRQ("%s: set the TBEN state to %d\n", __func__, 253 level); 254 /* XXX: TODO */ 255 break; 256 default: 257 /* Unknown pin - do nothing */ 258 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); 259 return; 260 } 261 if (level) 262 env->irq_input_state |= 1 << pin; 263 else 264 env->irq_input_state &= ~(1 << pin); 265 } 266 } 267 268 void ppc970_irq_init(PowerPCCPU *cpu) 269 { 270 CPUPPCState *env = &cpu->env; 271 272 env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu, 273 PPC970_INPUT_NB); 274 } 275 276 /* POWER7 internal IRQ controller */ 277 static void power7_set_irq(void *opaque, int pin, int level) 278 { 279 PowerPCCPU *cpu = opaque; 280 CPUPPCState *env = &cpu->env; 281 282 LOG_IRQ("%s: env %p pin %d level %d\n", __func__, 283 env, pin, level); 284 285 switch (pin) { 286 case POWER7_INPUT_INT: 287 /* Level sensitive - active high */ 288 LOG_IRQ("%s: set the external IRQ state to %d\n", 289 __func__, level); 290 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); 291 break; 292 default: 293 /* Unknown pin - do nothing */ 294 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); 295 return; 296 } 297 if (level) { 298 env->irq_input_state |= 1 << pin; 299 } else { 300 env->irq_input_state &= ~(1 << pin); 301 } 302 } 303 304 void ppcPOWER7_irq_init(PowerPCCPU *cpu) 305 { 306 CPUPPCState *env = &cpu->env; 307 308 env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu, 309 POWER7_INPUT_NB); 310 } 311 #endif /* defined(TARGET_PPC64) */ 312 313 void ppc40x_core_reset(PowerPCCPU *cpu) 314 { 315 CPUPPCState *env = &cpu->env; 316 target_ulong dbsr; 317 318 qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n"); 319 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET); 320 dbsr = env->spr[SPR_40x_DBSR]; 321 dbsr &= ~0x00000300; 322 dbsr |= 0x00000100; 323 env->spr[SPR_40x_DBSR] = dbsr; 324 } 325 326 void ppc40x_chip_reset(PowerPCCPU *cpu) 327 { 328 CPUPPCState *env = &cpu->env; 329 target_ulong dbsr; 330 331 qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n"); 332 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET); 333 /* XXX: TODO reset all internal peripherals */ 334 dbsr = env->spr[SPR_40x_DBSR]; 335 dbsr &= ~0x00000300; 336 dbsr |= 0x00000200; 337 env->spr[SPR_40x_DBSR] = dbsr; 338 } 339 340 void ppc40x_system_reset(PowerPCCPU *cpu) 341 { 342 qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n"); 343 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 344 } 345 346 void store_40x_dbcr0(CPUPPCState *env, uint32_t val) 347 { 348 PowerPCCPU *cpu = ppc_env_get_cpu(env); 349 350 switch ((val >> 28) & 0x3) { 351 case 0x0: 352 /* No action */ 353 break; 354 case 0x1: 355 /* Core reset */ 356 ppc40x_core_reset(cpu); 357 break; 358 case 0x2: 359 /* Chip reset */ 360 ppc40x_chip_reset(cpu); 361 break; 362 case 0x3: 363 /* System reset */ 364 ppc40x_system_reset(cpu); 365 break; 366 } 367 } 368 369 /* PowerPC 40x internal IRQ controller */ 370 static void ppc40x_set_irq(void *opaque, int pin, int level) 371 { 372 PowerPCCPU *cpu = opaque; 373 CPUPPCState *env = &cpu->env; 374 int cur_level; 375 376 LOG_IRQ("%s: env %p pin %d level %d\n", __func__, 377 env, pin, level); 378 cur_level = (env->irq_input_state >> pin) & 1; 379 /* Don't generate spurious events */ 380 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { 381 CPUState *cs = CPU(cpu); 382 383 switch (pin) { 384 case PPC40x_INPUT_RESET_SYS: 385 if (level) { 386 LOG_IRQ("%s: reset the PowerPC system\n", 387 __func__); 388 ppc40x_system_reset(cpu); 389 } 390 break; 391 case PPC40x_INPUT_RESET_CHIP: 392 if (level) { 393 LOG_IRQ("%s: reset the PowerPC chip\n", __func__); 394 ppc40x_chip_reset(cpu); 395 } 396 break; 397 case PPC40x_INPUT_RESET_CORE: 398 /* XXX: TODO: update DBSR[MRR] */ 399 if (level) { 400 LOG_IRQ("%s: reset the PowerPC core\n", __func__); 401 ppc40x_core_reset(cpu); 402 } 403 break; 404 case PPC40x_INPUT_CINT: 405 /* Level sensitive - active high */ 406 LOG_IRQ("%s: set the critical IRQ state to %d\n", 407 __func__, level); 408 ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); 409 break; 410 case PPC40x_INPUT_INT: 411 /* Level sensitive - active high */ 412 LOG_IRQ("%s: set the external IRQ state to %d\n", 413 __func__, level); 414 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); 415 break; 416 case PPC40x_INPUT_HALT: 417 /* Level sensitive - active low */ 418 if (level) { 419 LOG_IRQ("%s: stop the CPU\n", __func__); 420 cs->halted = 1; 421 } else { 422 LOG_IRQ("%s: restart the CPU\n", __func__); 423 cs->halted = 0; 424 qemu_cpu_kick(cs); 425 } 426 break; 427 case PPC40x_INPUT_DEBUG: 428 /* Level sensitive - active high */ 429 LOG_IRQ("%s: set the debug pin state to %d\n", 430 __func__, level); 431 ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); 432 break; 433 default: 434 /* Unknown pin - do nothing */ 435 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); 436 return; 437 } 438 if (level) 439 env->irq_input_state |= 1 << pin; 440 else 441 env->irq_input_state &= ~(1 << pin); 442 } 443 } 444 445 void ppc40x_irq_init(PowerPCCPU *cpu) 446 { 447 CPUPPCState *env = &cpu->env; 448 449 env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq, 450 cpu, PPC40x_INPUT_NB); 451 } 452 453 /* PowerPC E500 internal IRQ controller */ 454 static void ppce500_set_irq(void *opaque, int pin, int level) 455 { 456 PowerPCCPU *cpu = opaque; 457 CPUPPCState *env = &cpu->env; 458 int cur_level; 459 460 LOG_IRQ("%s: env %p pin %d level %d\n", __func__, 461 env, pin, level); 462 cur_level = (env->irq_input_state >> pin) & 1; 463 /* Don't generate spurious events */ 464 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { 465 switch (pin) { 466 case PPCE500_INPUT_MCK: 467 if (level) { 468 LOG_IRQ("%s: reset the PowerPC system\n", 469 __func__); 470 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 471 } 472 break; 473 case PPCE500_INPUT_RESET_CORE: 474 if (level) { 475 LOG_IRQ("%s: reset the PowerPC core\n", __func__); 476 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level); 477 } 478 break; 479 case PPCE500_INPUT_CINT: 480 /* Level sensitive - active high */ 481 LOG_IRQ("%s: set the critical IRQ state to %d\n", 482 __func__, level); 483 ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); 484 break; 485 case PPCE500_INPUT_INT: 486 /* Level sensitive - active high */ 487 LOG_IRQ("%s: set the core IRQ state to %d\n", 488 __func__, level); 489 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); 490 break; 491 case PPCE500_INPUT_DEBUG: 492 /* Level sensitive - active high */ 493 LOG_IRQ("%s: set the debug pin state to %d\n", 494 __func__, level); 495 ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); 496 break; 497 default: 498 /* Unknown pin - do nothing */ 499 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); 500 return; 501 } 502 if (level) 503 env->irq_input_state |= 1 << pin; 504 else 505 env->irq_input_state &= ~(1 << pin); 506 } 507 } 508 509 void ppce500_irq_init(PowerPCCPU *cpu) 510 { 511 CPUPPCState *env = &cpu->env; 512 513 env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq, 514 cpu, PPCE500_INPUT_NB); 515 } 516 517 /* Enable or Disable the E500 EPR capability */ 518 void ppce500_set_mpic_proxy(bool enabled) 519 { 520 CPUState *cs; 521 522 CPU_FOREACH(cs) { 523 PowerPCCPU *cpu = POWERPC_CPU(cs); 524 525 cpu->env.mpic_proxy = enabled; 526 if (kvm_enabled()) { 527 kvmppc_set_mpic_proxy(cpu, enabled); 528 } 529 } 530 } 531 532 /*****************************************************************************/ 533 /* PowerPC time base and decrementer emulation */ 534 535 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset) 536 { 537 /* TB time in tb periods */ 538 return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset; 539 } 540 541 uint64_t cpu_ppc_load_tbl (CPUPPCState *env) 542 { 543 ppc_tb_t *tb_env = env->tb_env; 544 uint64_t tb; 545 546 if (kvm_enabled()) { 547 return env->spr[SPR_TBL]; 548 } 549 550 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); 551 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); 552 553 return tb; 554 } 555 556 static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env) 557 { 558 ppc_tb_t *tb_env = env->tb_env; 559 uint64_t tb; 560 561 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); 562 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); 563 564 return tb >> 32; 565 } 566 567 uint32_t cpu_ppc_load_tbu (CPUPPCState *env) 568 { 569 if (kvm_enabled()) { 570 return env->spr[SPR_TBU]; 571 } 572 573 return _cpu_ppc_load_tbu(env); 574 } 575 576 static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk, 577 int64_t *tb_offsetp, uint64_t value) 578 { 579 *tb_offsetp = value - 580 muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND); 581 582 LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n", 583 __func__, value, *tb_offsetp); 584 } 585 586 void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value) 587 { 588 ppc_tb_t *tb_env = env->tb_env; 589 uint64_t tb; 590 591 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); 592 tb &= 0xFFFFFFFF00000000ULL; 593 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 594 &tb_env->tb_offset, tb | (uint64_t)value); 595 } 596 597 static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value) 598 { 599 ppc_tb_t *tb_env = env->tb_env; 600 uint64_t tb; 601 602 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); 603 tb &= 0x00000000FFFFFFFFULL; 604 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 605 &tb_env->tb_offset, ((uint64_t)value << 32) | tb); 606 } 607 608 void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value) 609 { 610 _cpu_ppc_store_tbu(env, value); 611 } 612 613 uint64_t cpu_ppc_load_atbl (CPUPPCState *env) 614 { 615 ppc_tb_t *tb_env = env->tb_env; 616 uint64_t tb; 617 618 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); 619 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); 620 621 return tb; 622 } 623 624 uint32_t cpu_ppc_load_atbu (CPUPPCState *env) 625 { 626 ppc_tb_t *tb_env = env->tb_env; 627 uint64_t tb; 628 629 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); 630 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); 631 632 return tb >> 32; 633 } 634 635 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value) 636 { 637 ppc_tb_t *tb_env = env->tb_env; 638 uint64_t tb; 639 640 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); 641 tb &= 0xFFFFFFFF00000000ULL; 642 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 643 &tb_env->atb_offset, tb | (uint64_t)value); 644 } 645 646 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value) 647 { 648 ppc_tb_t *tb_env = env->tb_env; 649 uint64_t tb; 650 651 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); 652 tb &= 0x00000000FFFFFFFFULL; 653 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 654 &tb_env->atb_offset, ((uint64_t)value << 32) | tb); 655 } 656 657 static void cpu_ppc_tb_stop (CPUPPCState *env) 658 { 659 ppc_tb_t *tb_env = env->tb_env; 660 uint64_t tb, atb, vmclk; 661 662 /* If the time base is already frozen, do nothing */ 663 if (tb_env->tb_freq != 0) { 664 vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 665 /* Get the time base */ 666 tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset); 667 /* Get the alternate time base */ 668 atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset); 669 /* Store the time base value (ie compute the current offset) */ 670 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb); 671 /* Store the alternate time base value (compute the current offset) */ 672 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb); 673 /* Set the time base frequency to zero */ 674 tb_env->tb_freq = 0; 675 /* Now, the time bases are frozen to tb_offset / atb_offset value */ 676 } 677 } 678 679 static void cpu_ppc_tb_start (CPUPPCState *env) 680 { 681 ppc_tb_t *tb_env = env->tb_env; 682 uint64_t tb, atb, vmclk; 683 684 /* If the time base is not frozen, do nothing */ 685 if (tb_env->tb_freq == 0) { 686 vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 687 /* Get the time base from tb_offset */ 688 tb = tb_env->tb_offset; 689 /* Get the alternate time base from atb_offset */ 690 atb = tb_env->atb_offset; 691 /* Restore the tb frequency from the decrementer frequency */ 692 tb_env->tb_freq = tb_env->decr_freq; 693 /* Store the time base value */ 694 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb); 695 /* Store the alternate time base value */ 696 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb); 697 } 698 } 699 700 bool ppc_decr_clear_on_delivery(CPUPPCState *env) 701 { 702 ppc_tb_t *tb_env = env->tb_env; 703 int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL; 704 return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED); 705 } 706 707 static inline uint32_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next) 708 { 709 ppc_tb_t *tb_env = env->tb_env; 710 uint32_t decr; 711 int64_t diff; 712 713 diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 714 if (diff >= 0) { 715 decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); 716 } else if (tb_env->flags & PPC_TIMER_BOOKE) { 717 decr = 0; 718 } else { 719 decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); 720 } 721 LOG_TB("%s: %08" PRIx32 "\n", __func__, decr); 722 723 return decr; 724 } 725 726 uint32_t cpu_ppc_load_decr (CPUPPCState *env) 727 { 728 ppc_tb_t *tb_env = env->tb_env; 729 730 if (kvm_enabled()) { 731 return env->spr[SPR_DECR]; 732 } 733 734 return _cpu_ppc_load_decr(env, tb_env->decr_next); 735 } 736 737 uint32_t cpu_ppc_load_hdecr (CPUPPCState *env) 738 { 739 ppc_tb_t *tb_env = env->tb_env; 740 741 return _cpu_ppc_load_decr(env, tb_env->hdecr_next); 742 } 743 744 uint64_t cpu_ppc_load_purr (CPUPPCState *env) 745 { 746 ppc_tb_t *tb_env = env->tb_env; 747 uint64_t diff; 748 749 diff = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - tb_env->purr_start; 750 751 return tb_env->purr_load + 752 muldiv64(diff, tb_env->tb_freq, NANOSECONDS_PER_SECOND); 753 } 754 755 /* When decrementer expires, 756 * all we need to do is generate or queue a CPU exception 757 */ 758 static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu) 759 { 760 /* Raise it */ 761 LOG_TB("raise decrementer exception\n"); 762 ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1); 763 } 764 765 static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu) 766 { 767 ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0); 768 } 769 770 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu) 771 { 772 CPUPPCState *env = &cpu->env; 773 774 /* Raise it */ 775 LOG_TB("raise hv decrementer exception\n"); 776 777 /* The architecture specifies that we don't deliver HDEC 778 * interrupts in a PM state. Not only they don't cause a 779 * wakeup but they also get effectively discarded. 780 */ 781 if (!env->in_pm_state) { 782 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1); 783 } 784 } 785 786 static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu) 787 { 788 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0); 789 } 790 791 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, 792 QEMUTimer *timer, 793 void (*raise_excp)(void *), 794 void (*lower_excp)(PowerPCCPU *), 795 uint32_t decr, uint32_t value) 796 { 797 CPUPPCState *env = &cpu->env; 798 ppc_tb_t *tb_env = env->tb_env; 799 uint64_t now, next; 800 801 LOG_TB("%s: %08" PRIx32 " => %08" PRIx32 "\n", __func__, 802 decr, value); 803 804 if (kvm_enabled()) { 805 /* KVM handles decrementer exceptions, we don't need our own timer */ 806 return; 807 } 808 809 /* 810 * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC 811 * interrupt. 812 * 813 * If we get a really small DEC value, we can assume that by the time we 814 * handled it we should inject an interrupt already. 815 * 816 * On MSB level based DEC implementations the MSB always means the interrupt 817 * is pending, so raise it on those. 818 * 819 * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers 820 * an edge interrupt, so raise it here too. 821 */ 822 if ((value < 3) || 823 ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && (value & 0x80000000)) || 824 ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && (value & 0x80000000) 825 && !(decr & 0x80000000))) { 826 (*raise_excp)(cpu); 827 return; 828 } 829 830 /* On MSB level based systems a 0 for the MSB stops interrupt delivery */ 831 if (!(value & 0x80000000) && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) { 832 (*lower_excp)(cpu); 833 } 834 835 /* Calculate the next timer event */ 836 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 837 next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq); 838 *nextp = next; 839 840 /* Adjust timer */ 841 timer_mod(timer, next); 842 } 843 844 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, uint32_t decr, 845 uint32_t value) 846 { 847 ppc_tb_t *tb_env = cpu->env.tb_env; 848 849 __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer, 850 tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr, 851 value); 852 } 853 854 void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value) 855 { 856 PowerPCCPU *cpu = ppc_env_get_cpu(env); 857 858 _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value); 859 } 860 861 static void cpu_ppc_decr_cb(void *opaque) 862 { 863 PowerPCCPU *cpu = opaque; 864 865 cpu_ppc_decr_excp(cpu); 866 } 867 868 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, uint32_t hdecr, 869 uint32_t value) 870 { 871 ppc_tb_t *tb_env = cpu->env.tb_env; 872 873 if (tb_env->hdecr_timer != NULL) { 874 __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer, 875 tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower, 876 hdecr, value); 877 } 878 } 879 880 void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value) 881 { 882 PowerPCCPU *cpu = ppc_env_get_cpu(env); 883 884 _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value); 885 } 886 887 static void cpu_ppc_hdecr_cb(void *opaque) 888 { 889 PowerPCCPU *cpu = opaque; 890 891 cpu_ppc_hdecr_excp(cpu); 892 } 893 894 static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value) 895 { 896 ppc_tb_t *tb_env = cpu->env.tb_env; 897 898 tb_env->purr_load = value; 899 tb_env->purr_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 900 } 901 902 static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq) 903 { 904 CPUPPCState *env = opaque; 905 PowerPCCPU *cpu = ppc_env_get_cpu(env); 906 ppc_tb_t *tb_env = env->tb_env; 907 908 tb_env->tb_freq = freq; 909 tb_env->decr_freq = freq; 910 /* There is a bug in Linux 2.4 kernels: 911 * if a decrementer exception is pending when it enables msr_ee at startup, 912 * it's not ready to handle it... 913 */ 914 _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF); 915 _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF); 916 cpu_ppc_store_purr(cpu, 0x0000000000000000ULL); 917 } 918 919 static void timebase_save(PPCTimebase *tb) 920 { 921 uint64_t ticks = cpu_get_host_ticks(); 922 PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); 923 924 if (!first_ppc_cpu->env.tb_env) { 925 error_report("No timebase object"); 926 return; 927 } 928 929 /* not used anymore, we keep it for compatibility */ 930 tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST); 931 /* 932 * tb_offset is only expected to be changed by QEMU so 933 * there is no need to update it from KVM here 934 */ 935 tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset; 936 } 937 938 static void timebase_load(PPCTimebase *tb) 939 { 940 CPUState *cpu; 941 PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); 942 int64_t tb_off_adj, tb_off; 943 unsigned long freq; 944 945 if (!first_ppc_cpu->env.tb_env) { 946 error_report("No timebase object"); 947 return; 948 } 949 950 freq = first_ppc_cpu->env.tb_env->tb_freq; 951 952 tb_off_adj = tb->guest_timebase - cpu_get_host_ticks(); 953 954 tb_off = first_ppc_cpu->env.tb_env->tb_offset; 955 trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off, 956 (tb_off_adj - tb_off) / freq); 957 958 /* Set new offset to all CPUs */ 959 CPU_FOREACH(cpu) { 960 PowerPCCPU *pcpu = POWERPC_CPU(cpu); 961 pcpu->env.tb_env->tb_offset = tb_off_adj; 962 #if defined(CONFIG_KVM) 963 kvm_set_one_reg(cpu, KVM_REG_PPC_TB_OFFSET, 964 &pcpu->env.tb_env->tb_offset); 965 #endif 966 } 967 } 968 969 void cpu_ppc_clock_vm_state_change(void *opaque, int running, 970 RunState state) 971 { 972 PPCTimebase *tb = opaque; 973 974 if (running) { 975 timebase_load(tb); 976 } else { 977 timebase_save(tb); 978 } 979 } 980 981 /* 982 * When migrating, read the clock just before migration, 983 * so that the guest clock counts during the events 984 * between: 985 * 986 * * vm_stop() 987 * * 988 * * pre_save() 989 * 990 * This reduces clock difference on migration from 5s 991 * to 0.1s (when max_downtime == 5s), because sending the 992 * final pages of memory (which happens between vm_stop() 993 * and pre_save()) takes max_downtime. 994 */ 995 static int timebase_pre_save(void *opaque) 996 { 997 PPCTimebase *tb = opaque; 998 999 timebase_save(tb); 1000 1001 return 0; 1002 } 1003 1004 const VMStateDescription vmstate_ppc_timebase = { 1005 .name = "timebase", 1006 .version_id = 1, 1007 .minimum_version_id = 1, 1008 .minimum_version_id_old = 1, 1009 .pre_save = timebase_pre_save, 1010 .fields = (VMStateField []) { 1011 VMSTATE_UINT64(guest_timebase, PPCTimebase), 1012 VMSTATE_INT64(time_of_the_day_ns, PPCTimebase), 1013 VMSTATE_END_OF_LIST() 1014 }, 1015 }; 1016 1017 /* Set up (once) timebase frequency (in Hz) */ 1018 clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq) 1019 { 1020 PowerPCCPU *cpu = ppc_env_get_cpu(env); 1021 ppc_tb_t *tb_env; 1022 1023 tb_env = g_malloc0(sizeof(ppc_tb_t)); 1024 env->tb_env = tb_env; 1025 tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; 1026 if (env->insns_flags & PPC_SEGMENT_64B) { 1027 /* All Book3S 64bit CPUs implement level based DEC logic */ 1028 tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL; 1029 } 1030 /* Create new timer */ 1031 tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu); 1032 if (env->has_hv_mode) { 1033 tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb, 1034 cpu); 1035 } else { 1036 tb_env->hdecr_timer = NULL; 1037 } 1038 cpu_ppc_set_tb_clk(env, freq); 1039 1040 return &cpu_ppc_set_tb_clk; 1041 } 1042 1043 /* Specific helpers for POWER & PowerPC 601 RTC */ 1044 void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value) 1045 { 1046 _cpu_ppc_store_tbu(env, value); 1047 } 1048 1049 uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env) 1050 { 1051 return _cpu_ppc_load_tbu(env); 1052 } 1053 1054 void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value) 1055 { 1056 cpu_ppc_store_tbl(env, value & 0x3FFFFF80); 1057 } 1058 1059 uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env) 1060 { 1061 return cpu_ppc_load_tbl(env) & 0x3FFFFF80; 1062 } 1063 1064 /*****************************************************************************/ 1065 /* PowerPC 40x timers */ 1066 1067 /* PIT, FIT & WDT */ 1068 typedef struct ppc40x_timer_t ppc40x_timer_t; 1069 struct ppc40x_timer_t { 1070 uint64_t pit_reload; /* PIT auto-reload value */ 1071 uint64_t fit_next; /* Tick for next FIT interrupt */ 1072 QEMUTimer *fit_timer; 1073 uint64_t wdt_next; /* Tick for next WDT interrupt */ 1074 QEMUTimer *wdt_timer; 1075 1076 /* 405 have the PIT, 440 have a DECR. */ 1077 unsigned int decr_excp; 1078 }; 1079 1080 /* Fixed interval timer */ 1081 static void cpu_4xx_fit_cb (void *opaque) 1082 { 1083 PowerPCCPU *cpu; 1084 CPUPPCState *env; 1085 ppc_tb_t *tb_env; 1086 ppc40x_timer_t *ppc40x_timer; 1087 uint64_t now, next; 1088 1089 env = opaque; 1090 cpu = ppc_env_get_cpu(env); 1091 tb_env = env->tb_env; 1092 ppc40x_timer = tb_env->opaque; 1093 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 1094 switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) { 1095 case 0: 1096 next = 1 << 9; 1097 break; 1098 case 1: 1099 next = 1 << 13; 1100 break; 1101 case 2: 1102 next = 1 << 17; 1103 break; 1104 case 3: 1105 next = 1 << 21; 1106 break; 1107 default: 1108 /* Cannot occur, but makes gcc happy */ 1109 return; 1110 } 1111 next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq); 1112 if (next == now) 1113 next++; 1114 timer_mod(ppc40x_timer->fit_timer, next); 1115 env->spr[SPR_40x_TSR] |= 1 << 26; 1116 if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) { 1117 ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1); 1118 } 1119 LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, 1120 (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1), 1121 env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); 1122 } 1123 1124 /* Programmable interval timer */ 1125 static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp) 1126 { 1127 ppc40x_timer_t *ppc40x_timer; 1128 uint64_t now, next; 1129 1130 ppc40x_timer = tb_env->opaque; 1131 if (ppc40x_timer->pit_reload <= 1 || 1132 !((env->spr[SPR_40x_TCR] >> 26) & 0x1) || 1133 (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) { 1134 /* Stop PIT */ 1135 LOG_TB("%s: stop PIT\n", __func__); 1136 timer_del(tb_env->decr_timer); 1137 } else { 1138 LOG_TB("%s: start PIT %016" PRIx64 "\n", 1139 __func__, ppc40x_timer->pit_reload); 1140 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 1141 next = now + muldiv64(ppc40x_timer->pit_reload, 1142 NANOSECONDS_PER_SECOND, tb_env->decr_freq); 1143 if (is_excp) 1144 next += tb_env->decr_next - now; 1145 if (next == now) 1146 next++; 1147 timer_mod(tb_env->decr_timer, next); 1148 tb_env->decr_next = next; 1149 } 1150 } 1151 1152 static void cpu_4xx_pit_cb (void *opaque) 1153 { 1154 PowerPCCPU *cpu; 1155 CPUPPCState *env; 1156 ppc_tb_t *tb_env; 1157 ppc40x_timer_t *ppc40x_timer; 1158 1159 env = opaque; 1160 cpu = ppc_env_get_cpu(env); 1161 tb_env = env->tb_env; 1162 ppc40x_timer = tb_env->opaque; 1163 env->spr[SPR_40x_TSR] |= 1 << 27; 1164 if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) { 1165 ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1); 1166 } 1167 start_stop_pit(env, tb_env, 1); 1168 LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " " 1169 "%016" PRIx64 "\n", __func__, 1170 (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1), 1171 (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1), 1172 env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR], 1173 ppc40x_timer->pit_reload); 1174 } 1175 1176 /* Watchdog timer */ 1177 static void cpu_4xx_wdt_cb (void *opaque) 1178 { 1179 PowerPCCPU *cpu; 1180 CPUPPCState *env; 1181 ppc_tb_t *tb_env; 1182 ppc40x_timer_t *ppc40x_timer; 1183 uint64_t now, next; 1184 1185 env = opaque; 1186 cpu = ppc_env_get_cpu(env); 1187 tb_env = env->tb_env; 1188 ppc40x_timer = tb_env->opaque; 1189 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 1190 switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) { 1191 case 0: 1192 next = 1 << 17; 1193 break; 1194 case 1: 1195 next = 1 << 21; 1196 break; 1197 case 2: 1198 next = 1 << 25; 1199 break; 1200 case 3: 1201 next = 1 << 29; 1202 break; 1203 default: 1204 /* Cannot occur, but makes gcc happy */ 1205 return; 1206 } 1207 next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq); 1208 if (next == now) 1209 next++; 1210 LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, 1211 env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); 1212 switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) { 1213 case 0x0: 1214 case 0x1: 1215 timer_mod(ppc40x_timer->wdt_timer, next); 1216 ppc40x_timer->wdt_next = next; 1217 env->spr[SPR_40x_TSR] |= 1U << 31; 1218 break; 1219 case 0x2: 1220 timer_mod(ppc40x_timer->wdt_timer, next); 1221 ppc40x_timer->wdt_next = next; 1222 env->spr[SPR_40x_TSR] |= 1 << 30; 1223 if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) { 1224 ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1); 1225 } 1226 break; 1227 case 0x3: 1228 env->spr[SPR_40x_TSR] &= ~0x30000000; 1229 env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000; 1230 switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) { 1231 case 0x0: 1232 /* No reset */ 1233 break; 1234 case 0x1: /* Core reset */ 1235 ppc40x_core_reset(cpu); 1236 break; 1237 case 0x2: /* Chip reset */ 1238 ppc40x_chip_reset(cpu); 1239 break; 1240 case 0x3: /* System reset */ 1241 ppc40x_system_reset(cpu); 1242 break; 1243 } 1244 } 1245 } 1246 1247 void store_40x_pit (CPUPPCState *env, target_ulong val) 1248 { 1249 ppc_tb_t *tb_env; 1250 ppc40x_timer_t *ppc40x_timer; 1251 1252 tb_env = env->tb_env; 1253 ppc40x_timer = tb_env->opaque; 1254 LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val); 1255 ppc40x_timer->pit_reload = val; 1256 start_stop_pit(env, tb_env, 0); 1257 } 1258 1259 target_ulong load_40x_pit (CPUPPCState *env) 1260 { 1261 return cpu_ppc_load_decr(env); 1262 } 1263 1264 static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq) 1265 { 1266 CPUPPCState *env = opaque; 1267 ppc_tb_t *tb_env = env->tb_env; 1268 1269 LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__, 1270 freq); 1271 tb_env->tb_freq = freq; 1272 tb_env->decr_freq = freq; 1273 /* XXX: we should also update all timers */ 1274 } 1275 1276 clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq, 1277 unsigned int decr_excp) 1278 { 1279 ppc_tb_t *tb_env; 1280 ppc40x_timer_t *ppc40x_timer; 1281 1282 tb_env = g_malloc0(sizeof(ppc_tb_t)); 1283 env->tb_env = tb_env; 1284 tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; 1285 ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t)); 1286 tb_env->tb_freq = freq; 1287 tb_env->decr_freq = freq; 1288 tb_env->opaque = ppc40x_timer; 1289 LOG_TB("%s freq %" PRIu32 "\n", __func__, freq); 1290 if (ppc40x_timer != NULL) { 1291 /* We use decr timer for PIT */ 1292 tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env); 1293 ppc40x_timer->fit_timer = 1294 timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env); 1295 ppc40x_timer->wdt_timer = 1296 timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env); 1297 ppc40x_timer->decr_excp = decr_excp; 1298 } 1299 1300 return &ppc_40x_set_tb_clk; 1301 } 1302 1303 /*****************************************************************************/ 1304 /* Embedded PowerPC Device Control Registers */ 1305 typedef struct ppc_dcrn_t ppc_dcrn_t; 1306 struct ppc_dcrn_t { 1307 dcr_read_cb dcr_read; 1308 dcr_write_cb dcr_write; 1309 void *opaque; 1310 }; 1311 1312 /* XXX: on 460, DCR addresses are 32 bits wide, 1313 * using DCRIPR to get the 22 upper bits of the DCR address 1314 */ 1315 #define DCRN_NB 1024 1316 struct ppc_dcr_t { 1317 ppc_dcrn_t dcrn[DCRN_NB]; 1318 int (*read_error)(int dcrn); 1319 int (*write_error)(int dcrn); 1320 }; 1321 1322 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp) 1323 { 1324 ppc_dcrn_t *dcr; 1325 1326 if (dcrn < 0 || dcrn >= DCRN_NB) 1327 goto error; 1328 dcr = &dcr_env->dcrn[dcrn]; 1329 if (dcr->dcr_read == NULL) 1330 goto error; 1331 *valp = (*dcr->dcr_read)(dcr->opaque, dcrn); 1332 1333 return 0; 1334 1335 error: 1336 if (dcr_env->read_error != NULL) 1337 return (*dcr_env->read_error)(dcrn); 1338 1339 return -1; 1340 } 1341 1342 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val) 1343 { 1344 ppc_dcrn_t *dcr; 1345 1346 if (dcrn < 0 || dcrn >= DCRN_NB) 1347 goto error; 1348 dcr = &dcr_env->dcrn[dcrn]; 1349 if (dcr->dcr_write == NULL) 1350 goto error; 1351 (*dcr->dcr_write)(dcr->opaque, dcrn, val); 1352 1353 return 0; 1354 1355 error: 1356 if (dcr_env->write_error != NULL) 1357 return (*dcr_env->write_error)(dcrn); 1358 1359 return -1; 1360 } 1361 1362 int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque, 1363 dcr_read_cb dcr_read, dcr_write_cb dcr_write) 1364 { 1365 ppc_dcr_t *dcr_env; 1366 ppc_dcrn_t *dcr; 1367 1368 dcr_env = env->dcr_env; 1369 if (dcr_env == NULL) 1370 return -1; 1371 if (dcrn < 0 || dcrn >= DCRN_NB) 1372 return -1; 1373 dcr = &dcr_env->dcrn[dcrn]; 1374 if (dcr->opaque != NULL || 1375 dcr->dcr_read != NULL || 1376 dcr->dcr_write != NULL) 1377 return -1; 1378 dcr->opaque = opaque; 1379 dcr->dcr_read = dcr_read; 1380 dcr->dcr_write = dcr_write; 1381 1382 return 0; 1383 } 1384 1385 int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn), 1386 int (*write_error)(int dcrn)) 1387 { 1388 ppc_dcr_t *dcr_env; 1389 1390 dcr_env = g_malloc0(sizeof(ppc_dcr_t)); 1391 dcr_env->read_error = read_error; 1392 dcr_env->write_error = write_error; 1393 env->dcr_env = dcr_env; 1394 1395 return 0; 1396 } 1397 1398 /*****************************************************************************/ 1399 /* Debug port */ 1400 void PPC_debug_write (void *opaque, uint32_t addr, uint32_t val) 1401 { 1402 addr &= 0xF; 1403 switch (addr) { 1404 case 0: 1405 printf("%c", val); 1406 break; 1407 case 1: 1408 printf("\n"); 1409 fflush(stdout); 1410 break; 1411 case 2: 1412 printf("Set loglevel to %04" PRIx32 "\n", val); 1413 qemu_set_log(val | 0x100); 1414 break; 1415 } 1416 } 1417