1 /* 2 * QEMU generic PowerPC hardware System Emulator 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "hw/hw.h" 25 #include "hw/ppc/ppc.h" 26 #include "hw/ppc/ppc_e500.h" 27 #include "qemu/timer.h" 28 #include "sysemu/sysemu.h" 29 #include "sysemu/cpus.h" 30 #include "hw/timer/m48t59.h" 31 #include "qemu/log.h" 32 #include "qemu/error-report.h" 33 #include "hw/loader.h" 34 #include "sysemu/kvm.h" 35 #include "kvm_ppc.h" 36 #include "trace.h" 37 38 //#define PPC_DEBUG_IRQ 39 //#define PPC_DEBUG_TB 40 41 #ifdef PPC_DEBUG_IRQ 42 # define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__) 43 #else 44 # define LOG_IRQ(...) do { } while (0) 45 #endif 46 47 48 #ifdef PPC_DEBUG_TB 49 # define LOG_TB(...) qemu_log(__VA_ARGS__) 50 #else 51 # define LOG_TB(...) do { } while (0) 52 #endif 53 54 static void cpu_ppc_tb_stop (CPUPPCState *env); 55 static void cpu_ppc_tb_start (CPUPPCState *env); 56 57 void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) 58 { 59 CPUState *cs = CPU(cpu); 60 CPUPPCState *env = &cpu->env; 61 unsigned int old_pending = env->pending_interrupts; 62 63 if (level) { 64 env->pending_interrupts |= 1 << n_IRQ; 65 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 66 } else { 67 env->pending_interrupts &= ~(1 << n_IRQ); 68 if (env->pending_interrupts == 0) { 69 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 70 } 71 } 72 73 if (old_pending != env->pending_interrupts) { 74 #ifdef CONFIG_KVM 75 kvmppc_set_interrupt(cpu, n_IRQ, level); 76 #endif 77 } 78 79 LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32 80 "req %08x\n", __func__, env, n_IRQ, level, 81 env->pending_interrupts, CPU(cpu)->interrupt_request); 82 } 83 84 /* PowerPC 6xx / 7xx internal IRQ controller */ 85 static void ppc6xx_set_irq(void *opaque, int pin, int level) 86 { 87 PowerPCCPU *cpu = opaque; 88 CPUPPCState *env = &cpu->env; 89 int cur_level; 90 91 LOG_IRQ("%s: env %p pin %d level %d\n", __func__, 92 env, pin, level); 93 cur_level = (env->irq_input_state >> pin) & 1; 94 /* Don't generate spurious events */ 95 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { 96 CPUState *cs = CPU(cpu); 97 98 switch (pin) { 99 case PPC6xx_INPUT_TBEN: 100 /* Level sensitive - active high */ 101 LOG_IRQ("%s: %s the time base\n", 102 __func__, level ? "start" : "stop"); 103 if (level) { 104 cpu_ppc_tb_start(env); 105 } else { 106 cpu_ppc_tb_stop(env); 107 } 108 case PPC6xx_INPUT_INT: 109 /* Level sensitive - active high */ 110 LOG_IRQ("%s: set the external IRQ state to %d\n", 111 __func__, level); 112 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); 113 break; 114 case PPC6xx_INPUT_SMI: 115 /* Level sensitive - active high */ 116 LOG_IRQ("%s: set the SMI IRQ state to %d\n", 117 __func__, level); 118 ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level); 119 break; 120 case PPC6xx_INPUT_MCP: 121 /* Negative edge sensitive */ 122 /* XXX: TODO: actual reaction may depends on HID0 status 123 * 603/604/740/750: check HID0[EMCP] 124 */ 125 if (cur_level == 1 && level == 0) { 126 LOG_IRQ("%s: raise machine check state\n", 127 __func__); 128 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); 129 } 130 break; 131 case PPC6xx_INPUT_CKSTP_IN: 132 /* Level sensitive - active low */ 133 /* XXX: TODO: relay the signal to CKSTP_OUT pin */ 134 /* XXX: Note that the only way to restart the CPU is to reset it */ 135 if (level) { 136 LOG_IRQ("%s: stop the CPU\n", __func__); 137 cs->halted = 1; 138 } 139 break; 140 case PPC6xx_INPUT_HRESET: 141 /* Level sensitive - active low */ 142 if (level) { 143 LOG_IRQ("%s: reset the CPU\n", __func__); 144 cpu_interrupt(cs, CPU_INTERRUPT_RESET); 145 } 146 break; 147 case PPC6xx_INPUT_SRESET: 148 LOG_IRQ("%s: set the RESET IRQ state to %d\n", 149 __func__, level); 150 ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); 151 break; 152 default: 153 /* Unknown pin - do nothing */ 154 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); 155 return; 156 } 157 if (level) 158 env->irq_input_state |= 1 << pin; 159 else 160 env->irq_input_state &= ~(1 << pin); 161 } 162 } 163 164 void ppc6xx_irq_init(CPUPPCState *env) 165 { 166 PowerPCCPU *cpu = ppc_env_get_cpu(env); 167 168 env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu, 169 PPC6xx_INPUT_NB); 170 } 171 172 #if defined(TARGET_PPC64) 173 /* PowerPC 970 internal IRQ controller */ 174 static void ppc970_set_irq(void *opaque, int pin, int level) 175 { 176 PowerPCCPU *cpu = opaque; 177 CPUPPCState *env = &cpu->env; 178 int cur_level; 179 180 LOG_IRQ("%s: env %p pin %d level %d\n", __func__, 181 env, pin, level); 182 cur_level = (env->irq_input_state >> pin) & 1; 183 /* Don't generate spurious events */ 184 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { 185 CPUState *cs = CPU(cpu); 186 187 switch (pin) { 188 case PPC970_INPUT_INT: 189 /* Level sensitive - active high */ 190 LOG_IRQ("%s: set the external IRQ state to %d\n", 191 __func__, level); 192 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); 193 break; 194 case PPC970_INPUT_THINT: 195 /* Level sensitive - active high */ 196 LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__, 197 level); 198 ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level); 199 break; 200 case PPC970_INPUT_MCP: 201 /* Negative edge sensitive */ 202 /* XXX: TODO: actual reaction may depends on HID0 status 203 * 603/604/740/750: check HID0[EMCP] 204 */ 205 if (cur_level == 1 && level == 0) { 206 LOG_IRQ("%s: raise machine check state\n", 207 __func__); 208 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); 209 } 210 break; 211 case PPC970_INPUT_CKSTP: 212 /* Level sensitive - active low */ 213 /* XXX: TODO: relay the signal to CKSTP_OUT pin */ 214 if (level) { 215 LOG_IRQ("%s: stop the CPU\n", __func__); 216 cs->halted = 1; 217 } else { 218 LOG_IRQ("%s: restart the CPU\n", __func__); 219 cs->halted = 0; 220 qemu_cpu_kick(cs); 221 } 222 break; 223 case PPC970_INPUT_HRESET: 224 /* Level sensitive - active low */ 225 if (level) { 226 cpu_interrupt(cs, CPU_INTERRUPT_RESET); 227 } 228 break; 229 case PPC970_INPUT_SRESET: 230 LOG_IRQ("%s: set the RESET IRQ state to %d\n", 231 __func__, level); 232 ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); 233 break; 234 case PPC970_INPUT_TBEN: 235 LOG_IRQ("%s: set the TBEN state to %d\n", __func__, 236 level); 237 /* XXX: TODO */ 238 break; 239 default: 240 /* Unknown pin - do nothing */ 241 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); 242 return; 243 } 244 if (level) 245 env->irq_input_state |= 1 << pin; 246 else 247 env->irq_input_state &= ~(1 << pin); 248 } 249 } 250 251 void ppc970_irq_init(CPUPPCState *env) 252 { 253 PowerPCCPU *cpu = ppc_env_get_cpu(env); 254 255 env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu, 256 PPC970_INPUT_NB); 257 } 258 259 /* POWER7 internal IRQ controller */ 260 static void power7_set_irq(void *opaque, int pin, int level) 261 { 262 PowerPCCPU *cpu = opaque; 263 CPUPPCState *env = &cpu->env; 264 265 LOG_IRQ("%s: env %p pin %d level %d\n", __func__, 266 env, pin, level); 267 268 switch (pin) { 269 case POWER7_INPUT_INT: 270 /* Level sensitive - active high */ 271 LOG_IRQ("%s: set the external IRQ state to %d\n", 272 __func__, level); 273 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); 274 break; 275 default: 276 /* Unknown pin - do nothing */ 277 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); 278 return; 279 } 280 if (level) { 281 env->irq_input_state |= 1 << pin; 282 } else { 283 env->irq_input_state &= ~(1 << pin); 284 } 285 } 286 287 void ppcPOWER7_irq_init(CPUPPCState *env) 288 { 289 PowerPCCPU *cpu = ppc_env_get_cpu(env); 290 291 env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu, 292 POWER7_INPUT_NB); 293 } 294 #endif /* defined(TARGET_PPC64) */ 295 296 /* PowerPC 40x internal IRQ controller */ 297 static void ppc40x_set_irq(void *opaque, int pin, int level) 298 { 299 PowerPCCPU *cpu = opaque; 300 CPUPPCState *env = &cpu->env; 301 int cur_level; 302 303 LOG_IRQ("%s: env %p pin %d level %d\n", __func__, 304 env, pin, level); 305 cur_level = (env->irq_input_state >> pin) & 1; 306 /* Don't generate spurious events */ 307 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { 308 CPUState *cs = CPU(cpu); 309 310 switch (pin) { 311 case PPC40x_INPUT_RESET_SYS: 312 if (level) { 313 LOG_IRQ("%s: reset the PowerPC system\n", 314 __func__); 315 ppc40x_system_reset(cpu); 316 } 317 break; 318 case PPC40x_INPUT_RESET_CHIP: 319 if (level) { 320 LOG_IRQ("%s: reset the PowerPC chip\n", __func__); 321 ppc40x_chip_reset(cpu); 322 } 323 break; 324 case PPC40x_INPUT_RESET_CORE: 325 /* XXX: TODO: update DBSR[MRR] */ 326 if (level) { 327 LOG_IRQ("%s: reset the PowerPC core\n", __func__); 328 ppc40x_core_reset(cpu); 329 } 330 break; 331 case PPC40x_INPUT_CINT: 332 /* Level sensitive - active high */ 333 LOG_IRQ("%s: set the critical IRQ state to %d\n", 334 __func__, level); 335 ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); 336 break; 337 case PPC40x_INPUT_INT: 338 /* Level sensitive - active high */ 339 LOG_IRQ("%s: set the external IRQ state to %d\n", 340 __func__, level); 341 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); 342 break; 343 case PPC40x_INPUT_HALT: 344 /* Level sensitive - active low */ 345 if (level) { 346 LOG_IRQ("%s: stop the CPU\n", __func__); 347 cs->halted = 1; 348 } else { 349 LOG_IRQ("%s: restart the CPU\n", __func__); 350 cs->halted = 0; 351 qemu_cpu_kick(cs); 352 } 353 break; 354 case PPC40x_INPUT_DEBUG: 355 /* Level sensitive - active high */ 356 LOG_IRQ("%s: set the debug pin state to %d\n", 357 __func__, level); 358 ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); 359 break; 360 default: 361 /* Unknown pin - do nothing */ 362 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); 363 return; 364 } 365 if (level) 366 env->irq_input_state |= 1 << pin; 367 else 368 env->irq_input_state &= ~(1 << pin); 369 } 370 } 371 372 void ppc40x_irq_init(CPUPPCState *env) 373 { 374 PowerPCCPU *cpu = ppc_env_get_cpu(env); 375 376 env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq, 377 cpu, PPC40x_INPUT_NB); 378 } 379 380 /* PowerPC E500 internal IRQ controller */ 381 static void ppce500_set_irq(void *opaque, int pin, int level) 382 { 383 PowerPCCPU *cpu = opaque; 384 CPUPPCState *env = &cpu->env; 385 int cur_level; 386 387 LOG_IRQ("%s: env %p pin %d level %d\n", __func__, 388 env, pin, level); 389 cur_level = (env->irq_input_state >> pin) & 1; 390 /* Don't generate spurious events */ 391 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { 392 switch (pin) { 393 case PPCE500_INPUT_MCK: 394 if (level) { 395 LOG_IRQ("%s: reset the PowerPC system\n", 396 __func__); 397 qemu_system_reset_request(); 398 } 399 break; 400 case PPCE500_INPUT_RESET_CORE: 401 if (level) { 402 LOG_IRQ("%s: reset the PowerPC core\n", __func__); 403 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level); 404 } 405 break; 406 case PPCE500_INPUT_CINT: 407 /* Level sensitive - active high */ 408 LOG_IRQ("%s: set the critical IRQ state to %d\n", 409 __func__, level); 410 ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); 411 break; 412 case PPCE500_INPUT_INT: 413 /* Level sensitive - active high */ 414 LOG_IRQ("%s: set the core IRQ state to %d\n", 415 __func__, level); 416 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); 417 break; 418 case PPCE500_INPUT_DEBUG: 419 /* Level sensitive - active high */ 420 LOG_IRQ("%s: set the debug pin state to %d\n", 421 __func__, level); 422 ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); 423 break; 424 default: 425 /* Unknown pin - do nothing */ 426 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); 427 return; 428 } 429 if (level) 430 env->irq_input_state |= 1 << pin; 431 else 432 env->irq_input_state &= ~(1 << pin); 433 } 434 } 435 436 void ppce500_irq_init(CPUPPCState *env) 437 { 438 PowerPCCPU *cpu = ppc_env_get_cpu(env); 439 440 env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq, 441 cpu, PPCE500_INPUT_NB); 442 } 443 444 /* Enable or Disable the E500 EPR capability */ 445 void ppce500_set_mpic_proxy(bool enabled) 446 { 447 CPUState *cs; 448 449 CPU_FOREACH(cs) { 450 PowerPCCPU *cpu = POWERPC_CPU(cs); 451 452 cpu->env.mpic_proxy = enabled; 453 if (kvm_enabled()) { 454 kvmppc_set_mpic_proxy(cpu, enabled); 455 } 456 } 457 } 458 459 /*****************************************************************************/ 460 /* PowerPC time base and decrementer emulation */ 461 462 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset) 463 { 464 /* TB time in tb periods */ 465 return muldiv64(vmclk, tb_env->tb_freq, get_ticks_per_sec()) + tb_offset; 466 } 467 468 uint64_t cpu_ppc_load_tbl (CPUPPCState *env) 469 { 470 ppc_tb_t *tb_env = env->tb_env; 471 uint64_t tb; 472 473 if (kvm_enabled()) { 474 return env->spr[SPR_TBL]; 475 } 476 477 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); 478 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); 479 480 return tb; 481 } 482 483 static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env) 484 { 485 ppc_tb_t *tb_env = env->tb_env; 486 uint64_t tb; 487 488 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); 489 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); 490 491 return tb >> 32; 492 } 493 494 uint32_t cpu_ppc_load_tbu (CPUPPCState *env) 495 { 496 if (kvm_enabled()) { 497 return env->spr[SPR_TBU]; 498 } 499 500 return _cpu_ppc_load_tbu(env); 501 } 502 503 static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk, 504 int64_t *tb_offsetp, uint64_t value) 505 { 506 *tb_offsetp = value - muldiv64(vmclk, tb_env->tb_freq, get_ticks_per_sec()); 507 LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n", 508 __func__, value, *tb_offsetp); 509 } 510 511 void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value) 512 { 513 ppc_tb_t *tb_env = env->tb_env; 514 uint64_t tb; 515 516 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); 517 tb &= 0xFFFFFFFF00000000ULL; 518 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 519 &tb_env->tb_offset, tb | (uint64_t)value); 520 } 521 522 static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value) 523 { 524 ppc_tb_t *tb_env = env->tb_env; 525 uint64_t tb; 526 527 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); 528 tb &= 0x00000000FFFFFFFFULL; 529 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 530 &tb_env->tb_offset, ((uint64_t)value << 32) | tb); 531 } 532 533 void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value) 534 { 535 _cpu_ppc_store_tbu(env, value); 536 } 537 538 uint64_t cpu_ppc_load_atbl (CPUPPCState *env) 539 { 540 ppc_tb_t *tb_env = env->tb_env; 541 uint64_t tb; 542 543 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); 544 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); 545 546 return tb; 547 } 548 549 uint32_t cpu_ppc_load_atbu (CPUPPCState *env) 550 { 551 ppc_tb_t *tb_env = env->tb_env; 552 uint64_t tb; 553 554 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); 555 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); 556 557 return tb >> 32; 558 } 559 560 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value) 561 { 562 ppc_tb_t *tb_env = env->tb_env; 563 uint64_t tb; 564 565 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); 566 tb &= 0xFFFFFFFF00000000ULL; 567 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 568 &tb_env->atb_offset, tb | (uint64_t)value); 569 } 570 571 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value) 572 { 573 ppc_tb_t *tb_env = env->tb_env; 574 uint64_t tb; 575 576 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); 577 tb &= 0x00000000FFFFFFFFULL; 578 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 579 &tb_env->atb_offset, ((uint64_t)value << 32) | tb); 580 } 581 582 static void cpu_ppc_tb_stop (CPUPPCState *env) 583 { 584 ppc_tb_t *tb_env = env->tb_env; 585 uint64_t tb, atb, vmclk; 586 587 /* If the time base is already frozen, do nothing */ 588 if (tb_env->tb_freq != 0) { 589 vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 590 /* Get the time base */ 591 tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset); 592 /* Get the alternate time base */ 593 atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset); 594 /* Store the time base value (ie compute the current offset) */ 595 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb); 596 /* Store the alternate time base value (compute the current offset) */ 597 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb); 598 /* Set the time base frequency to zero */ 599 tb_env->tb_freq = 0; 600 /* Now, the time bases are frozen to tb_offset / atb_offset value */ 601 } 602 } 603 604 static void cpu_ppc_tb_start (CPUPPCState *env) 605 { 606 ppc_tb_t *tb_env = env->tb_env; 607 uint64_t tb, atb, vmclk; 608 609 /* If the time base is not frozen, do nothing */ 610 if (tb_env->tb_freq == 0) { 611 vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 612 /* Get the time base from tb_offset */ 613 tb = tb_env->tb_offset; 614 /* Get the alternate time base from atb_offset */ 615 atb = tb_env->atb_offset; 616 /* Restore the tb frequency from the decrementer frequency */ 617 tb_env->tb_freq = tb_env->decr_freq; 618 /* Store the time base value */ 619 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb); 620 /* Store the alternate time base value */ 621 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb); 622 } 623 } 624 625 bool ppc_decr_clear_on_delivery(CPUPPCState *env) 626 { 627 ppc_tb_t *tb_env = env->tb_env; 628 int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL; 629 return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED); 630 } 631 632 static inline uint32_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next) 633 { 634 ppc_tb_t *tb_env = env->tb_env; 635 uint32_t decr; 636 int64_t diff; 637 638 diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 639 if (diff >= 0) { 640 decr = muldiv64(diff, tb_env->decr_freq, get_ticks_per_sec()); 641 } else if (tb_env->flags & PPC_TIMER_BOOKE) { 642 decr = 0; 643 } else { 644 decr = -muldiv64(-diff, tb_env->decr_freq, get_ticks_per_sec()); 645 } 646 LOG_TB("%s: %08" PRIx32 "\n", __func__, decr); 647 648 return decr; 649 } 650 651 uint32_t cpu_ppc_load_decr (CPUPPCState *env) 652 { 653 ppc_tb_t *tb_env = env->tb_env; 654 655 if (kvm_enabled()) { 656 return env->spr[SPR_DECR]; 657 } 658 659 return _cpu_ppc_load_decr(env, tb_env->decr_next); 660 } 661 662 uint32_t cpu_ppc_load_hdecr (CPUPPCState *env) 663 { 664 ppc_tb_t *tb_env = env->tb_env; 665 666 return _cpu_ppc_load_decr(env, tb_env->hdecr_next); 667 } 668 669 uint64_t cpu_ppc_load_purr (CPUPPCState *env) 670 { 671 ppc_tb_t *tb_env = env->tb_env; 672 uint64_t diff; 673 674 diff = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - tb_env->purr_start; 675 676 return tb_env->purr_load + muldiv64(diff, tb_env->tb_freq, get_ticks_per_sec()); 677 } 678 679 /* When decrementer expires, 680 * all we need to do is generate or queue a CPU exception 681 */ 682 static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu) 683 { 684 /* Raise it */ 685 LOG_TB("raise decrementer exception\n"); 686 ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1); 687 } 688 689 static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu) 690 { 691 ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0); 692 } 693 694 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu) 695 { 696 /* Raise it */ 697 LOG_TB("raise decrementer exception\n"); 698 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1); 699 } 700 701 static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu) 702 { 703 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0); 704 } 705 706 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, 707 QEMUTimer *timer, 708 void (*raise_excp)(void *), 709 void (*lower_excp)(PowerPCCPU *), 710 uint32_t decr, uint32_t value) 711 { 712 CPUPPCState *env = &cpu->env; 713 ppc_tb_t *tb_env = env->tb_env; 714 uint64_t now, next; 715 716 LOG_TB("%s: %08" PRIx32 " => %08" PRIx32 "\n", __func__, 717 decr, value); 718 719 if (kvm_enabled()) { 720 /* KVM handles decrementer exceptions, we don't need our own timer */ 721 return; 722 } 723 724 /* 725 * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC 726 * interrupt. 727 * 728 * If we get a really small DEC value, we can assume that by the time we 729 * handled it we should inject an interrupt already. 730 * 731 * On MSB level based DEC implementations the MSB always means the interrupt 732 * is pending, so raise it on those. 733 * 734 * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers 735 * an edge interrupt, so raise it here too. 736 */ 737 if ((value < 3) || 738 ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && (value & 0x80000000)) || 739 ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && (value & 0x80000000) 740 && !(decr & 0x80000000))) { 741 (*raise_excp)(cpu); 742 return; 743 } 744 745 /* On MSB level based systems a 0 for the MSB stops interrupt delivery */ 746 if (!(value & 0x80000000) && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) { 747 (*lower_excp)(cpu); 748 } 749 750 /* Calculate the next timer event */ 751 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 752 next = now + muldiv64(value, get_ticks_per_sec(), tb_env->decr_freq); 753 *nextp = next; 754 755 /* Adjust timer */ 756 timer_mod(timer, next); 757 } 758 759 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, uint32_t decr, 760 uint32_t value) 761 { 762 ppc_tb_t *tb_env = cpu->env.tb_env; 763 764 __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer, 765 tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr, 766 value); 767 } 768 769 void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value) 770 { 771 PowerPCCPU *cpu = ppc_env_get_cpu(env); 772 773 _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value); 774 } 775 776 static void cpu_ppc_decr_cb(void *opaque) 777 { 778 PowerPCCPU *cpu = opaque; 779 780 cpu_ppc_decr_excp(cpu); 781 } 782 783 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, uint32_t hdecr, 784 uint32_t value) 785 { 786 ppc_tb_t *tb_env = cpu->env.tb_env; 787 788 if (tb_env->hdecr_timer != NULL) { 789 __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer, 790 tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower, 791 hdecr, value); 792 } 793 } 794 795 void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value) 796 { 797 PowerPCCPU *cpu = ppc_env_get_cpu(env); 798 799 _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value); 800 } 801 802 static void cpu_ppc_hdecr_cb(void *opaque) 803 { 804 PowerPCCPU *cpu = opaque; 805 806 cpu_ppc_hdecr_excp(cpu); 807 } 808 809 static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value) 810 { 811 ppc_tb_t *tb_env = cpu->env.tb_env; 812 813 tb_env->purr_load = value; 814 tb_env->purr_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 815 } 816 817 static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq) 818 { 819 CPUPPCState *env = opaque; 820 PowerPCCPU *cpu = ppc_env_get_cpu(env); 821 ppc_tb_t *tb_env = env->tb_env; 822 823 tb_env->tb_freq = freq; 824 tb_env->decr_freq = freq; 825 /* There is a bug in Linux 2.4 kernels: 826 * if a decrementer exception is pending when it enables msr_ee at startup, 827 * it's not ready to handle it... 828 */ 829 _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF); 830 _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF); 831 cpu_ppc_store_purr(cpu, 0x0000000000000000ULL); 832 } 833 834 static void timebase_pre_save(void *opaque) 835 { 836 PPCTimebase *tb = opaque; 837 uint64_t ticks = cpu_get_host_ticks(); 838 PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); 839 840 if (!first_ppc_cpu->env.tb_env) { 841 error_report("No timebase object"); 842 return; 843 } 844 845 tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST); 846 /* 847 * tb_offset is only expected to be changed by migration so 848 * there is no need to update it from KVM here 849 */ 850 tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset; 851 } 852 853 static int timebase_post_load(void *opaque, int version_id) 854 { 855 PPCTimebase *tb_remote = opaque; 856 CPUState *cpu; 857 PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); 858 int64_t tb_off_adj, tb_off, ns_diff; 859 int64_t migration_duration_ns, migration_duration_tb, guest_tb, host_ns; 860 unsigned long freq; 861 862 if (!first_ppc_cpu->env.tb_env) { 863 error_report("No timebase object"); 864 return -1; 865 } 866 867 freq = first_ppc_cpu->env.tb_env->tb_freq; 868 /* 869 * Calculate timebase on the destination side of migration. 870 * The destination timebase must be not less than the source timebase. 871 * We try to adjust timebase by downtime if host clocks are not 872 * too much out of sync (1 second for now). 873 */ 874 host_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST); 875 ns_diff = MAX(0, host_ns - tb_remote->time_of_the_day_ns); 876 migration_duration_ns = MIN(NANOSECONDS_PER_SECOND, ns_diff); 877 migration_duration_tb = muldiv64(migration_duration_ns, freq, 878 NANOSECONDS_PER_SECOND); 879 guest_tb = tb_remote->guest_timebase + MIN(0, migration_duration_tb); 880 881 tb_off_adj = guest_tb - cpu_get_host_ticks(); 882 883 tb_off = first_ppc_cpu->env.tb_env->tb_offset; 884 trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off, 885 (tb_off_adj - tb_off) / freq); 886 887 /* Set new offset to all CPUs */ 888 CPU_FOREACH(cpu) { 889 PowerPCCPU *pcpu = POWERPC_CPU(cpu); 890 pcpu->env.tb_env->tb_offset = tb_off_adj; 891 } 892 893 return 0; 894 } 895 896 const VMStateDescription vmstate_ppc_timebase = { 897 .name = "timebase", 898 .version_id = 1, 899 .minimum_version_id = 1, 900 .minimum_version_id_old = 1, 901 .pre_save = timebase_pre_save, 902 .post_load = timebase_post_load, 903 .fields = (VMStateField []) { 904 VMSTATE_UINT64(guest_timebase, PPCTimebase), 905 VMSTATE_INT64(time_of_the_day_ns, PPCTimebase), 906 VMSTATE_END_OF_LIST() 907 }, 908 }; 909 910 /* Set up (once) timebase frequency (in Hz) */ 911 clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq) 912 { 913 PowerPCCPU *cpu = ppc_env_get_cpu(env); 914 ppc_tb_t *tb_env; 915 916 tb_env = g_malloc0(sizeof(ppc_tb_t)); 917 env->tb_env = tb_env; 918 tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; 919 if (env->insns_flags & PPC_SEGMENT_64B) { 920 /* All Book3S 64bit CPUs implement level based DEC logic */ 921 tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL; 922 } 923 /* Create new timer */ 924 tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu); 925 if (0) { 926 /* XXX: find a suitable condition to enable the hypervisor decrementer 927 */ 928 tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb, 929 cpu); 930 } else { 931 tb_env->hdecr_timer = NULL; 932 } 933 cpu_ppc_set_tb_clk(env, freq); 934 935 return &cpu_ppc_set_tb_clk; 936 } 937 938 /* Specific helpers for POWER & PowerPC 601 RTC */ 939 #if 0 940 static clk_setup_cb cpu_ppc601_rtc_init (CPUPPCState *env) 941 { 942 return cpu_ppc_tb_init(env, 7812500); 943 } 944 #endif 945 946 void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value) 947 { 948 _cpu_ppc_store_tbu(env, value); 949 } 950 951 uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env) 952 { 953 return _cpu_ppc_load_tbu(env); 954 } 955 956 void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value) 957 { 958 cpu_ppc_store_tbl(env, value & 0x3FFFFF80); 959 } 960 961 uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env) 962 { 963 return cpu_ppc_load_tbl(env) & 0x3FFFFF80; 964 } 965 966 /*****************************************************************************/ 967 /* PowerPC 40x timers */ 968 969 /* PIT, FIT & WDT */ 970 typedef struct ppc40x_timer_t ppc40x_timer_t; 971 struct ppc40x_timer_t { 972 uint64_t pit_reload; /* PIT auto-reload value */ 973 uint64_t fit_next; /* Tick for next FIT interrupt */ 974 QEMUTimer *fit_timer; 975 uint64_t wdt_next; /* Tick for next WDT interrupt */ 976 QEMUTimer *wdt_timer; 977 978 /* 405 have the PIT, 440 have a DECR. */ 979 unsigned int decr_excp; 980 }; 981 982 /* Fixed interval timer */ 983 static void cpu_4xx_fit_cb (void *opaque) 984 { 985 PowerPCCPU *cpu; 986 CPUPPCState *env; 987 ppc_tb_t *tb_env; 988 ppc40x_timer_t *ppc40x_timer; 989 uint64_t now, next; 990 991 env = opaque; 992 cpu = ppc_env_get_cpu(env); 993 tb_env = env->tb_env; 994 ppc40x_timer = tb_env->opaque; 995 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 996 switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) { 997 case 0: 998 next = 1 << 9; 999 break; 1000 case 1: 1001 next = 1 << 13; 1002 break; 1003 case 2: 1004 next = 1 << 17; 1005 break; 1006 case 3: 1007 next = 1 << 21; 1008 break; 1009 default: 1010 /* Cannot occur, but makes gcc happy */ 1011 return; 1012 } 1013 next = now + muldiv64(next, get_ticks_per_sec(), tb_env->tb_freq); 1014 if (next == now) 1015 next++; 1016 timer_mod(ppc40x_timer->fit_timer, next); 1017 env->spr[SPR_40x_TSR] |= 1 << 26; 1018 if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) { 1019 ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1); 1020 } 1021 LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, 1022 (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1), 1023 env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); 1024 } 1025 1026 /* Programmable interval timer */ 1027 static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp) 1028 { 1029 ppc40x_timer_t *ppc40x_timer; 1030 uint64_t now, next; 1031 1032 ppc40x_timer = tb_env->opaque; 1033 if (ppc40x_timer->pit_reload <= 1 || 1034 !((env->spr[SPR_40x_TCR] >> 26) & 0x1) || 1035 (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) { 1036 /* Stop PIT */ 1037 LOG_TB("%s: stop PIT\n", __func__); 1038 timer_del(tb_env->decr_timer); 1039 } else { 1040 LOG_TB("%s: start PIT %016" PRIx64 "\n", 1041 __func__, ppc40x_timer->pit_reload); 1042 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 1043 next = now + muldiv64(ppc40x_timer->pit_reload, 1044 get_ticks_per_sec(), tb_env->decr_freq); 1045 if (is_excp) 1046 next += tb_env->decr_next - now; 1047 if (next == now) 1048 next++; 1049 timer_mod(tb_env->decr_timer, next); 1050 tb_env->decr_next = next; 1051 } 1052 } 1053 1054 static void cpu_4xx_pit_cb (void *opaque) 1055 { 1056 PowerPCCPU *cpu; 1057 CPUPPCState *env; 1058 ppc_tb_t *tb_env; 1059 ppc40x_timer_t *ppc40x_timer; 1060 1061 env = opaque; 1062 cpu = ppc_env_get_cpu(env); 1063 tb_env = env->tb_env; 1064 ppc40x_timer = tb_env->opaque; 1065 env->spr[SPR_40x_TSR] |= 1 << 27; 1066 if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) { 1067 ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1); 1068 } 1069 start_stop_pit(env, tb_env, 1); 1070 LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " " 1071 "%016" PRIx64 "\n", __func__, 1072 (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1), 1073 (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1), 1074 env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR], 1075 ppc40x_timer->pit_reload); 1076 } 1077 1078 /* Watchdog timer */ 1079 static void cpu_4xx_wdt_cb (void *opaque) 1080 { 1081 PowerPCCPU *cpu; 1082 CPUPPCState *env; 1083 ppc_tb_t *tb_env; 1084 ppc40x_timer_t *ppc40x_timer; 1085 uint64_t now, next; 1086 1087 env = opaque; 1088 cpu = ppc_env_get_cpu(env); 1089 tb_env = env->tb_env; 1090 ppc40x_timer = tb_env->opaque; 1091 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 1092 switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) { 1093 case 0: 1094 next = 1 << 17; 1095 break; 1096 case 1: 1097 next = 1 << 21; 1098 break; 1099 case 2: 1100 next = 1 << 25; 1101 break; 1102 case 3: 1103 next = 1 << 29; 1104 break; 1105 default: 1106 /* Cannot occur, but makes gcc happy */ 1107 return; 1108 } 1109 next = now + muldiv64(next, get_ticks_per_sec(), tb_env->decr_freq); 1110 if (next == now) 1111 next++; 1112 LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, 1113 env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); 1114 switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) { 1115 case 0x0: 1116 case 0x1: 1117 timer_mod(ppc40x_timer->wdt_timer, next); 1118 ppc40x_timer->wdt_next = next; 1119 env->spr[SPR_40x_TSR] |= 1U << 31; 1120 break; 1121 case 0x2: 1122 timer_mod(ppc40x_timer->wdt_timer, next); 1123 ppc40x_timer->wdt_next = next; 1124 env->spr[SPR_40x_TSR] |= 1 << 30; 1125 if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) { 1126 ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1); 1127 } 1128 break; 1129 case 0x3: 1130 env->spr[SPR_40x_TSR] &= ~0x30000000; 1131 env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000; 1132 switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) { 1133 case 0x0: 1134 /* No reset */ 1135 break; 1136 case 0x1: /* Core reset */ 1137 ppc40x_core_reset(cpu); 1138 break; 1139 case 0x2: /* Chip reset */ 1140 ppc40x_chip_reset(cpu); 1141 break; 1142 case 0x3: /* System reset */ 1143 ppc40x_system_reset(cpu); 1144 break; 1145 } 1146 } 1147 } 1148 1149 void store_40x_pit (CPUPPCState *env, target_ulong val) 1150 { 1151 ppc_tb_t *tb_env; 1152 ppc40x_timer_t *ppc40x_timer; 1153 1154 tb_env = env->tb_env; 1155 ppc40x_timer = tb_env->opaque; 1156 LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val); 1157 ppc40x_timer->pit_reload = val; 1158 start_stop_pit(env, tb_env, 0); 1159 } 1160 1161 target_ulong load_40x_pit (CPUPPCState *env) 1162 { 1163 return cpu_ppc_load_decr(env); 1164 } 1165 1166 static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq) 1167 { 1168 CPUPPCState *env = opaque; 1169 ppc_tb_t *tb_env = env->tb_env; 1170 1171 LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__, 1172 freq); 1173 tb_env->tb_freq = freq; 1174 tb_env->decr_freq = freq; 1175 /* XXX: we should also update all timers */ 1176 } 1177 1178 clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq, 1179 unsigned int decr_excp) 1180 { 1181 ppc_tb_t *tb_env; 1182 ppc40x_timer_t *ppc40x_timer; 1183 1184 tb_env = g_malloc0(sizeof(ppc_tb_t)); 1185 env->tb_env = tb_env; 1186 tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; 1187 ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t)); 1188 tb_env->tb_freq = freq; 1189 tb_env->decr_freq = freq; 1190 tb_env->opaque = ppc40x_timer; 1191 LOG_TB("%s freq %" PRIu32 "\n", __func__, freq); 1192 if (ppc40x_timer != NULL) { 1193 /* We use decr timer for PIT */ 1194 tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env); 1195 ppc40x_timer->fit_timer = 1196 timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env); 1197 ppc40x_timer->wdt_timer = 1198 timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env); 1199 ppc40x_timer->decr_excp = decr_excp; 1200 } 1201 1202 return &ppc_40x_set_tb_clk; 1203 } 1204 1205 /*****************************************************************************/ 1206 /* Embedded PowerPC Device Control Registers */ 1207 typedef struct ppc_dcrn_t ppc_dcrn_t; 1208 struct ppc_dcrn_t { 1209 dcr_read_cb dcr_read; 1210 dcr_write_cb dcr_write; 1211 void *opaque; 1212 }; 1213 1214 /* XXX: on 460, DCR addresses are 32 bits wide, 1215 * using DCRIPR to get the 22 upper bits of the DCR address 1216 */ 1217 #define DCRN_NB 1024 1218 struct ppc_dcr_t { 1219 ppc_dcrn_t dcrn[DCRN_NB]; 1220 int (*read_error)(int dcrn); 1221 int (*write_error)(int dcrn); 1222 }; 1223 1224 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp) 1225 { 1226 ppc_dcrn_t *dcr; 1227 1228 if (dcrn < 0 || dcrn >= DCRN_NB) 1229 goto error; 1230 dcr = &dcr_env->dcrn[dcrn]; 1231 if (dcr->dcr_read == NULL) 1232 goto error; 1233 *valp = (*dcr->dcr_read)(dcr->opaque, dcrn); 1234 1235 return 0; 1236 1237 error: 1238 if (dcr_env->read_error != NULL) 1239 return (*dcr_env->read_error)(dcrn); 1240 1241 return -1; 1242 } 1243 1244 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val) 1245 { 1246 ppc_dcrn_t *dcr; 1247 1248 if (dcrn < 0 || dcrn >= DCRN_NB) 1249 goto error; 1250 dcr = &dcr_env->dcrn[dcrn]; 1251 if (dcr->dcr_write == NULL) 1252 goto error; 1253 (*dcr->dcr_write)(dcr->opaque, dcrn, val); 1254 1255 return 0; 1256 1257 error: 1258 if (dcr_env->write_error != NULL) 1259 return (*dcr_env->write_error)(dcrn); 1260 1261 return -1; 1262 } 1263 1264 int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque, 1265 dcr_read_cb dcr_read, dcr_write_cb dcr_write) 1266 { 1267 ppc_dcr_t *dcr_env; 1268 ppc_dcrn_t *dcr; 1269 1270 dcr_env = env->dcr_env; 1271 if (dcr_env == NULL) 1272 return -1; 1273 if (dcrn < 0 || dcrn >= DCRN_NB) 1274 return -1; 1275 dcr = &dcr_env->dcrn[dcrn]; 1276 if (dcr->opaque != NULL || 1277 dcr->dcr_read != NULL || 1278 dcr->dcr_write != NULL) 1279 return -1; 1280 dcr->opaque = opaque; 1281 dcr->dcr_read = dcr_read; 1282 dcr->dcr_write = dcr_write; 1283 1284 return 0; 1285 } 1286 1287 int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn), 1288 int (*write_error)(int dcrn)) 1289 { 1290 ppc_dcr_t *dcr_env; 1291 1292 dcr_env = g_malloc0(sizeof(ppc_dcr_t)); 1293 dcr_env->read_error = read_error; 1294 dcr_env->write_error = write_error; 1295 env->dcr_env = dcr_env; 1296 1297 return 0; 1298 } 1299 1300 /*****************************************************************************/ 1301 /* Debug port */ 1302 void PPC_debug_write (void *opaque, uint32_t addr, uint32_t val) 1303 { 1304 addr &= 0xF; 1305 switch (addr) { 1306 case 0: 1307 printf("%c", val); 1308 break; 1309 case 1: 1310 printf("\n"); 1311 fflush(stdout); 1312 break; 1313 case 2: 1314 printf("Set loglevel to %04" PRIx32 "\n", val); 1315 qemu_set_log(val | 0x100); 1316 break; 1317 } 1318 } 1319 1320 /* CPU device-tree ID helpers */ 1321 int ppc_get_vcpu_dt_id(PowerPCCPU *cpu) 1322 { 1323 return cpu->cpu_dt_id; 1324 } 1325 1326 PowerPCCPU *ppc_get_vcpu_by_dt_id(int cpu_dt_id) 1327 { 1328 CPUState *cs; 1329 1330 CPU_FOREACH(cs) { 1331 PowerPCCPU *cpu = POWERPC_CPU(cs); 1332 1333 if (cpu->cpu_dt_id == cpu_dt_id) { 1334 return cpu; 1335 } 1336 } 1337 1338 return NULL; 1339 } 1340