1 /* 2 * OpenPIC emulation 3 * 4 * Copyright (c) 2004 Jocelyn Mayer 5 * 2011 Alexander Graf 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 /* 26 * 27 * Based on OpenPic implementations: 28 * - Intel GW80314 I/O companion chip developer's manual 29 * - Motorola MPC8245 & MPC8540 user manuals. 30 * - Motorola MCP750 (aka Raven) programmer manual. 31 * - Motorola Harrier programmer manuel 32 * 33 * Serial interrupts, as implemented in Raven chipset are not supported yet. 34 * 35 */ 36 37 #include "qemu/osdep.h" 38 #include "hw/irq.h" 39 #include "hw/ppc/mac.h" 40 #include "hw/pci/pci.h" 41 #include "hw/ppc/openpic.h" 42 #include "hw/ppc/ppc_e500.h" 43 #include "hw/qdev-properties.h" 44 #include "hw/sysbus.h" 45 #include "migration/vmstate.h" 46 #include "hw/pci/msi.h" 47 #include "qapi/error.h" 48 #include "qemu/bitops.h" 49 #include "qapi/qmp/qerror.h" 50 #include "qemu/log.h" 51 #include "qemu/module.h" 52 #include "qemu/timer.h" 53 #include "qemu/error-report.h" 54 55 //#define DEBUG_OPENPIC 56 57 #ifdef DEBUG_OPENPIC 58 static const int debug_openpic = 1; 59 #else 60 static const int debug_openpic = 0; 61 #endif 62 63 static int get_current_cpu(void); 64 #define DPRINTF(fmt, ...) do { \ 65 if (debug_openpic) { \ 66 info_report("Core%d: " fmt, get_current_cpu(), ## __VA_ARGS__); \ 67 } \ 68 } while (0) 69 70 /* OpenPIC capability flags */ 71 #define OPENPIC_FLAG_IDR_CRIT (1 << 0) 72 #define OPENPIC_FLAG_ILR (2 << 0) 73 74 /* OpenPIC address map */ 75 #define OPENPIC_GLB_REG_START 0x0 76 #define OPENPIC_GLB_REG_SIZE 0x10F0 77 #define OPENPIC_TMR_REG_START 0x10F0 78 #define OPENPIC_TMR_REG_SIZE 0x220 79 #define OPENPIC_MSI_REG_START 0x1600 80 #define OPENPIC_MSI_REG_SIZE 0x200 81 #define OPENPIC_SUMMARY_REG_START 0x3800 82 #define OPENPIC_SUMMARY_REG_SIZE 0x800 83 #define OPENPIC_SRC_REG_START 0x10000 84 #define OPENPIC_SRC_REG_SIZE (OPENPIC_MAX_SRC * 0x20) 85 #define OPENPIC_CPU_REG_START 0x20000 86 #define OPENPIC_CPU_REG_SIZE 0x100 + ((MAX_CPU - 1) * 0x1000) 87 88 static FslMpicInfo fsl_mpic_20 = { 89 .max_ext = 12, 90 }; 91 92 static FslMpicInfo fsl_mpic_42 = { 93 .max_ext = 12, 94 }; 95 96 #define FRR_NIRQ_SHIFT 16 97 #define FRR_NCPU_SHIFT 8 98 #define FRR_VID_SHIFT 0 99 100 #define VID_REVISION_1_2 2 101 #define VID_REVISION_1_3 3 102 103 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */ 104 #define VIR_MPIC2A 0x00004614 /* IBM MPIC-2A */ 105 106 #define GCR_RESET 0x80000000 107 #define GCR_MODE_PASS 0x00000000 108 #define GCR_MODE_MIXED 0x20000000 109 #define GCR_MODE_PROXY 0x60000000 110 111 #define TBCR_CI 0x80000000 /* count inhibit */ 112 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */ 113 114 #define IDR_EP_SHIFT 31 115 #define IDR_EP_MASK (1U << IDR_EP_SHIFT) 116 #define IDR_CI0_SHIFT 30 117 #define IDR_CI1_SHIFT 29 118 #define IDR_P1_SHIFT 1 119 #define IDR_P0_SHIFT 0 120 121 #define ILR_INTTGT_MASK 0x000000ff 122 #define ILR_INTTGT_INT 0x00 123 #define ILR_INTTGT_CINT 0x01 /* critical */ 124 #define ILR_INTTGT_MCP 0x02 /* machine check */ 125 126 /* The currently supported INTTGT values happen to be the same as QEMU's 127 * openpic output codes, but don't depend on this. The output codes 128 * could change (unlikely, but...) or support could be added for 129 * more INTTGT values. 130 */ 131 static const int inttgt_output[][2] = { 132 { ILR_INTTGT_INT, OPENPIC_OUTPUT_INT }, 133 { ILR_INTTGT_CINT, OPENPIC_OUTPUT_CINT }, 134 { ILR_INTTGT_MCP, OPENPIC_OUTPUT_MCK }, 135 }; 136 137 static int inttgt_to_output(int inttgt) 138 { 139 int i; 140 141 for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) { 142 if (inttgt_output[i][0] == inttgt) { 143 return inttgt_output[i][1]; 144 } 145 } 146 147 error_report("%s: unsupported inttgt %d", __func__, inttgt); 148 return OPENPIC_OUTPUT_INT; 149 } 150 151 static int output_to_inttgt(int output) 152 { 153 int i; 154 155 for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) { 156 if (inttgt_output[i][1] == output) { 157 return inttgt_output[i][0]; 158 } 159 } 160 161 abort(); 162 } 163 164 #define MSIIR_OFFSET 0x140 165 #define MSIIR_SRS_SHIFT 29 166 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT) 167 #define MSIIR_IBS_SHIFT 24 168 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT) 169 170 static int get_current_cpu(void) 171 { 172 if (!current_cpu) { 173 return -1; 174 } 175 176 return current_cpu->cpu_index; 177 } 178 179 static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr, 180 int idx); 181 static void openpic_cpu_write_internal(void *opaque, hwaddr addr, 182 uint32_t val, int idx); 183 static void openpic_reset(DeviceState *d); 184 185 /* Convert between openpic clock ticks and nanosecs. In the hardware the clock 186 frequency is driven by board inputs to the PIC which the PIC would then 187 divide by 4 or 8. For now hard code to 25MZ. 188 */ 189 #define OPENPIC_TIMER_FREQ_MHZ 25 190 #define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ) 191 static inline uint64_t ns_to_ticks(uint64_t ns) 192 { 193 return ns / OPENPIC_TIMER_NS_PER_TICK; 194 } 195 static inline uint64_t ticks_to_ns(uint64_t ticks) 196 { 197 return ticks * OPENPIC_TIMER_NS_PER_TICK; 198 } 199 200 static inline void IRQ_setbit(IRQQueue *q, int n_IRQ) 201 { 202 set_bit(n_IRQ, q->queue); 203 } 204 205 static inline void IRQ_resetbit(IRQQueue *q, int n_IRQ) 206 { 207 clear_bit(n_IRQ, q->queue); 208 } 209 210 static void IRQ_check(OpenPICState *opp, IRQQueue *q) 211 { 212 int irq = -1; 213 int next = -1; 214 int priority = -1; 215 216 for (;;) { 217 irq = find_next_bit(q->queue, opp->max_irq, irq + 1); 218 if (irq == opp->max_irq) { 219 break; 220 } 221 222 DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d", 223 irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority); 224 225 if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) { 226 next = irq; 227 priority = IVPR_PRIORITY(opp->src[irq].ivpr); 228 } 229 } 230 231 q->next = next; 232 q->priority = priority; 233 } 234 235 static int IRQ_get_next(OpenPICState *opp, IRQQueue *q) 236 { 237 /* XXX: optimize */ 238 IRQ_check(opp, q); 239 240 return q->next; 241 } 242 243 static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ, 244 bool active, bool was_active) 245 { 246 IRQDest *dst; 247 IRQSource *src; 248 int priority; 249 250 dst = &opp->dst[n_CPU]; 251 src = &opp->src[n_IRQ]; 252 253 DPRINTF("%s: IRQ %d active %d was %d", 254 __func__, n_IRQ, active, was_active); 255 256 if (src->output != OPENPIC_OUTPUT_INT) { 257 DPRINTF("%s: output %d irq %d active %d was %d count %d", 258 __func__, src->output, n_IRQ, active, was_active, 259 dst->outputs_active[src->output]); 260 261 /* On Freescale MPIC, critical interrupts ignore priority, 262 * IACK, EOI, etc. Before MPIC v4.1 they also ignore 263 * masking. 264 */ 265 if (active) { 266 if (!was_active && dst->outputs_active[src->output]++ == 0) { 267 DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d", 268 __func__, src->output, n_CPU, n_IRQ); 269 qemu_irq_raise(dst->irqs[src->output]); 270 } 271 } else { 272 if (was_active && --dst->outputs_active[src->output] == 0) { 273 DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d", 274 __func__, src->output, n_CPU, n_IRQ); 275 qemu_irq_lower(dst->irqs[src->output]); 276 } 277 } 278 279 return; 280 } 281 282 priority = IVPR_PRIORITY(src->ivpr); 283 284 /* Even if the interrupt doesn't have enough priority, 285 * it is still raised, in case ctpr is lowered later. 286 */ 287 if (active) { 288 IRQ_setbit(&dst->raised, n_IRQ); 289 } else { 290 IRQ_resetbit(&dst->raised, n_IRQ); 291 } 292 293 IRQ_check(opp, &dst->raised); 294 295 if (active && priority <= dst->ctpr) { 296 DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d", 297 __func__, n_IRQ, priority, dst->ctpr, n_CPU); 298 active = 0; 299 } 300 301 if (active) { 302 if (IRQ_get_next(opp, &dst->servicing) >= 0 && 303 priority <= dst->servicing.priority) { 304 DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d", 305 __func__, n_IRQ, dst->servicing.next, n_CPU); 306 } else { 307 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d", 308 __func__, n_CPU, n_IRQ, dst->raised.next); 309 qemu_irq_raise(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]); 310 } 311 } else { 312 IRQ_get_next(opp, &dst->servicing); 313 if (dst->raised.priority > dst->ctpr && 314 dst->raised.priority > dst->servicing.priority) { 315 DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d", 316 __func__, n_IRQ, dst->raised.next, dst->raised.priority, 317 dst->ctpr, dst->servicing.priority, n_CPU); 318 /* IRQ line stays asserted */ 319 } else { 320 DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d", 321 __func__, n_IRQ, dst->ctpr, dst->servicing.priority, n_CPU); 322 qemu_irq_lower(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]); 323 } 324 } 325 } 326 327 /* update pic state because registers for n_IRQ have changed value */ 328 static void openpic_update_irq(OpenPICState *opp, int n_IRQ) 329 { 330 IRQSource *src; 331 bool active, was_active; 332 int i; 333 334 src = &opp->src[n_IRQ]; 335 active = src->pending; 336 337 if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) { 338 /* Interrupt source is disabled */ 339 DPRINTF("%s: IRQ %d is disabled", __func__, n_IRQ); 340 active = false; 341 } 342 343 was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK); 344 345 /* 346 * We don't have a similar check for already-active because 347 * ctpr may have changed and we need to withdraw the interrupt. 348 */ 349 if (!active && !was_active) { 350 DPRINTF("%s: IRQ %d is already inactive", __func__, n_IRQ); 351 return; 352 } 353 354 if (active) { 355 src->ivpr |= IVPR_ACTIVITY_MASK; 356 } else { 357 src->ivpr &= ~IVPR_ACTIVITY_MASK; 358 } 359 360 if (src->destmask == 0) { 361 /* No target */ 362 DPRINTF("%s: IRQ %d has no target", __func__, n_IRQ); 363 return; 364 } 365 366 if (src->destmask == (1 << src->last_cpu)) { 367 /* Only one CPU is allowed to receive this IRQ */ 368 IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active); 369 } else if (!(src->ivpr & IVPR_MODE_MASK)) { 370 /* Directed delivery mode */ 371 for (i = 0; i < opp->nb_cpus; i++) { 372 if (src->destmask & (1 << i)) { 373 IRQ_local_pipe(opp, i, n_IRQ, active, was_active); 374 } 375 } 376 } else { 377 /* Distributed delivery mode */ 378 for (i = src->last_cpu + 1; i != src->last_cpu; i++) { 379 if (i == opp->nb_cpus) { 380 i = 0; 381 } 382 if (src->destmask & (1 << i)) { 383 IRQ_local_pipe(opp, i, n_IRQ, active, was_active); 384 src->last_cpu = i; 385 break; 386 } 387 } 388 } 389 } 390 391 static void openpic_set_irq(void *opaque, int n_IRQ, int level) 392 { 393 OpenPICState *opp = opaque; 394 IRQSource *src; 395 396 if (n_IRQ >= OPENPIC_MAX_IRQ) { 397 error_report("%s: IRQ %d out of range", __func__, n_IRQ); 398 abort(); 399 } 400 401 src = &opp->src[n_IRQ]; 402 DPRINTF("openpic: set irq %d = %d ivpr=0x%08x", 403 n_IRQ, level, src->ivpr); 404 if (src->level) { 405 /* level-sensitive irq */ 406 src->pending = level; 407 openpic_update_irq(opp, n_IRQ); 408 } else { 409 /* edge-sensitive irq */ 410 if (level) { 411 src->pending = 1; 412 openpic_update_irq(opp, n_IRQ); 413 } 414 415 if (src->output != OPENPIC_OUTPUT_INT) { 416 /* Edge-triggered interrupts shouldn't be used 417 * with non-INT delivery, but just in case, 418 * try to make it do something sane rather than 419 * cause an interrupt storm. This is close to 420 * what you'd probably see happen in real hardware. 421 */ 422 src->pending = 0; 423 openpic_update_irq(opp, n_IRQ); 424 } 425 } 426 } 427 428 static inline uint32_t read_IRQreg_idr(OpenPICState *opp, int n_IRQ) 429 { 430 return opp->src[n_IRQ].idr; 431 } 432 433 static inline uint32_t read_IRQreg_ilr(OpenPICState *opp, int n_IRQ) 434 { 435 if (opp->flags & OPENPIC_FLAG_ILR) { 436 return output_to_inttgt(opp->src[n_IRQ].output); 437 } 438 439 return 0xffffffff; 440 } 441 442 static inline uint32_t read_IRQreg_ivpr(OpenPICState *opp, int n_IRQ) 443 { 444 return opp->src[n_IRQ].ivpr; 445 } 446 447 static inline void write_IRQreg_idr(OpenPICState *opp, int n_IRQ, uint32_t val) 448 { 449 IRQSource *src = &opp->src[n_IRQ]; 450 uint32_t normal_mask = (1UL << opp->nb_cpus) - 1; 451 uint32_t crit_mask = 0; 452 uint32_t mask = normal_mask; 453 int crit_shift = IDR_EP_SHIFT - opp->nb_cpus; 454 int i; 455 456 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { 457 crit_mask = mask << crit_shift; 458 mask |= crit_mask | IDR_EP; 459 } 460 461 src->idr = val & mask; 462 DPRINTF("Set IDR %d to 0x%08x", n_IRQ, src->idr); 463 464 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { 465 if (src->idr & crit_mask) { 466 if (src->idr & normal_mask) { 467 DPRINTF("%s: IRQ configured for multiple output types, using " 468 "critical", __func__); 469 } 470 471 src->output = OPENPIC_OUTPUT_CINT; 472 src->nomask = true; 473 src->destmask = 0; 474 475 for (i = 0; i < opp->nb_cpus; i++) { 476 int n_ci = IDR_CI0_SHIFT - i; 477 478 if (src->idr & (1UL << n_ci)) { 479 src->destmask |= 1UL << i; 480 } 481 } 482 } else { 483 src->output = OPENPIC_OUTPUT_INT; 484 src->nomask = false; 485 src->destmask = src->idr & normal_mask; 486 } 487 } else { 488 src->destmask = src->idr; 489 } 490 } 491 492 static inline void write_IRQreg_ilr(OpenPICState *opp, int n_IRQ, uint32_t val) 493 { 494 if (opp->flags & OPENPIC_FLAG_ILR) { 495 IRQSource *src = &opp->src[n_IRQ]; 496 497 src->output = inttgt_to_output(val & ILR_INTTGT_MASK); 498 DPRINTF("Set ILR %d to 0x%08x, output %d", n_IRQ, src->idr, 499 src->output); 500 501 /* TODO: on MPIC v4.0 only, set nomask for non-INT */ 502 } 503 } 504 505 static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val) 506 { 507 uint32_t mask; 508 509 /* NOTE when implementing newer FSL MPIC models: starting with v4.0, 510 * the polarity bit is read-only on internal interrupts. 511 */ 512 mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK | 513 IVPR_POLARITY_MASK | opp->vector_mask; 514 515 /* ACTIVITY bit is read-only */ 516 opp->src[n_IRQ].ivpr = 517 (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask); 518 519 /* For FSL internal interrupts, The sense bit is reserved and zero, 520 * and the interrupt is always level-triggered. Timers and IPIs 521 * have no sense or polarity bits, and are edge-triggered. 522 */ 523 switch (opp->src[n_IRQ].type) { 524 case IRQ_TYPE_NORMAL: 525 opp->src[n_IRQ].level = !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK); 526 break; 527 528 case IRQ_TYPE_FSLINT: 529 opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK; 530 break; 531 532 case IRQ_TYPE_FSLSPECIAL: 533 opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK); 534 break; 535 } 536 537 openpic_update_irq(opp, n_IRQ); 538 DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x", n_IRQ, val, 539 opp->src[n_IRQ].ivpr); 540 } 541 542 static void openpic_gcr_write(OpenPICState *opp, uint64_t val) 543 { 544 bool mpic_proxy = false; 545 546 if (val & GCR_RESET) { 547 openpic_reset(DEVICE(opp)); 548 return; 549 } 550 551 opp->gcr &= ~opp->mpic_mode_mask; 552 opp->gcr |= val & opp->mpic_mode_mask; 553 554 /* Set external proxy mode */ 555 if ((val & opp->mpic_mode_mask) == GCR_MODE_PROXY) { 556 mpic_proxy = true; 557 } 558 559 ppce500_set_mpic_proxy(mpic_proxy); 560 } 561 562 static void openpic_gbl_write(void *opaque, hwaddr addr, uint64_t val, 563 unsigned len) 564 { 565 OpenPICState *opp = opaque; 566 IRQDest *dst; 567 int idx; 568 569 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64, 570 __func__, addr, val); 571 if (addr & 0xF) { 572 return; 573 } 574 switch (addr) { 575 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */ 576 break; 577 case 0x40: 578 case 0x50: 579 case 0x60: 580 case 0x70: 581 case 0x80: 582 case 0x90: 583 case 0xA0: 584 case 0xB0: 585 openpic_cpu_write_internal(opp, addr, val, get_current_cpu()); 586 break; 587 case 0x1000: /* FRR */ 588 break; 589 case 0x1020: /* GCR */ 590 openpic_gcr_write(opp, val); 591 break; 592 case 0x1080: /* VIR */ 593 break; 594 case 0x1090: /* PIR */ 595 for (idx = 0; idx < opp->nb_cpus; idx++) { 596 if ((val & (1 << idx)) && !(opp->pir & (1 << idx))) { 597 DPRINTF("Raise OpenPIC RESET output for CPU %d", idx); 598 dst = &opp->dst[idx]; 599 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_RESET]); 600 } else if (!(val & (1 << idx)) && (opp->pir & (1 << idx))) { 601 DPRINTF("Lower OpenPIC RESET output for CPU %d", idx); 602 dst = &opp->dst[idx]; 603 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_RESET]); 604 } 605 } 606 opp->pir = val; 607 break; 608 case 0x10A0: /* IPI_IVPR */ 609 case 0x10B0: 610 case 0x10C0: 611 case 0x10D0: 612 { 613 int idx; 614 idx = (addr - 0x10A0) >> 4; 615 write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val); 616 } 617 break; 618 case 0x10E0: /* SPVE */ 619 opp->spve = val & opp->vector_mask; 620 break; 621 default: 622 break; 623 } 624 } 625 626 static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len) 627 { 628 OpenPICState *opp = opaque; 629 uint32_t retval; 630 631 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr); 632 retval = 0xFFFFFFFF; 633 if (addr & 0xF) { 634 return retval; 635 } 636 switch (addr) { 637 case 0x1000: /* FRR */ 638 retval = opp->frr; 639 break; 640 case 0x1020: /* GCR */ 641 retval = opp->gcr; 642 break; 643 case 0x1080: /* VIR */ 644 retval = opp->vir; 645 break; 646 case 0x1090: /* PIR */ 647 retval = 0x00000000; 648 break; 649 case 0x00: /* Block Revision Register1 (BRR1) */ 650 retval = opp->brr1; 651 break; 652 case 0x40: 653 case 0x50: 654 case 0x60: 655 case 0x70: 656 case 0x80: 657 case 0x90: 658 case 0xA0: 659 case 0xB0: 660 retval = openpic_cpu_read_internal(opp, addr, get_current_cpu()); 661 break; 662 case 0x10A0: /* IPI_IVPR */ 663 case 0x10B0: 664 case 0x10C0: 665 case 0x10D0: 666 { 667 int idx; 668 idx = (addr - 0x10A0) >> 4; 669 retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx); 670 } 671 break; 672 case 0x10E0: /* SPVE */ 673 retval = opp->spve; 674 break; 675 default: 676 break; 677 } 678 DPRINTF("%s: => 0x%08x", __func__, retval); 679 680 return retval; 681 } 682 683 static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled); 684 685 static void qemu_timer_cb(void *opaque) 686 { 687 OpenPICTimer *tmr = opaque; 688 OpenPICState *opp = tmr->opp; 689 uint32_t n_IRQ = tmr->n_IRQ; 690 uint32_t val = tmr->tbcr & ~TBCR_CI; 691 uint32_t tog = ((tmr->tccr & TCCR_TOG) ^ TCCR_TOG); /* invert toggle. */ 692 693 DPRINTF("%s n_IRQ=%d", __func__, n_IRQ); 694 /* Reload current count from base count and setup timer. */ 695 tmr->tccr = val | tog; 696 openpic_tmr_set_tmr(tmr, val, /*enabled=*/true); 697 /* Raise the interrupt. */ 698 opp->src[n_IRQ].destmask = read_IRQreg_idr(opp, n_IRQ); 699 openpic_set_irq(opp, n_IRQ, 1); 700 openpic_set_irq(opp, n_IRQ, 0); 701 } 702 703 /* If enabled is true, arranges for an interrupt to be raised val clocks into 704 the future, if enabled is false cancels the timer. */ 705 static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled) 706 { 707 uint64_t ns = ticks_to_ns(val & ~TCCR_TOG); 708 /* A count of zero causes a timer to be set to expire immediately. This 709 effectively stops the simulation since the timer is constantly expiring 710 which prevents guest code execution, so we don't honor that 711 configuration. On real hardware, this situation would generate an 712 interrupt on every clock cycle if the interrupt was unmasked. */ 713 if ((ns == 0) || !enabled) { 714 tmr->qemu_timer_active = false; 715 tmr->tccr = tmr->tccr & TCCR_TOG; 716 timer_del(tmr->qemu_timer); /* set timer to never expire. */ 717 } else { 718 tmr->qemu_timer_active = true; 719 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 720 tmr->origin_time = now; 721 timer_mod(tmr->qemu_timer, now + ns); /* set timer expiration. */ 722 } 723 } 724 725 /* Returns the currrent tccr value, i.e., timer value (in clocks) with 726 appropriate TOG. */ 727 static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr) 728 { 729 uint64_t retval; 730 if (!tmr->qemu_timer_active) { 731 retval = tmr->tccr; 732 } else { 733 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 734 uint64_t used = now - tmr->origin_time; /* nsecs */ 735 uint32_t used_ticks = (uint32_t)ns_to_ticks(used); 736 uint32_t count = (tmr->tccr & ~TCCR_TOG) - used_ticks; 737 retval = (uint32_t)((tmr->tccr & TCCR_TOG) | (count & ~TCCR_TOG)); 738 } 739 return retval; 740 } 741 742 static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val, 743 unsigned len) 744 { 745 OpenPICState *opp = opaque; 746 int idx; 747 748 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64, 749 __func__, (addr + 0x10f0), val); 750 if (addr & 0xF) { 751 return; 752 } 753 754 if (addr == 0) { 755 /* TFRR */ 756 opp->tfrr = val; 757 return; 758 } 759 addr -= 0x10; /* correct for TFRR */ 760 idx = (addr >> 6) & 0x3; 761 762 switch (addr & 0x30) { 763 case 0x00: /* TCCR */ 764 break; 765 case 0x10: /* TBCR */ 766 /* Did the enable status change? */ 767 if ((opp->timers[idx].tbcr & TBCR_CI) != (val & TBCR_CI)) { 768 /* Did "Count Inhibit" transition from 1 to 0? */ 769 if ((val & TBCR_CI) == 0) { 770 opp->timers[idx].tccr = val & ~TCCR_TOG; 771 } 772 openpic_tmr_set_tmr(&opp->timers[idx], 773 (val & ~TBCR_CI), 774 /*enabled=*/((val & TBCR_CI) == 0)); 775 } 776 opp->timers[idx].tbcr = val; 777 break; 778 case 0x20: /* TVPR */ 779 write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val); 780 break; 781 case 0x30: /* TDR */ 782 write_IRQreg_idr(opp, opp->irq_tim0 + idx, val); 783 break; 784 } 785 } 786 787 static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len) 788 { 789 OpenPICState *opp = opaque; 790 uint32_t retval = -1; 791 int idx; 792 793 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr + 0x10f0); 794 if (addr & 0xF) { 795 goto out; 796 } 797 if (addr == 0) { 798 /* TFRR */ 799 retval = opp->tfrr; 800 goto out; 801 } 802 addr -= 0x10; /* correct for TFRR */ 803 idx = (addr >> 6) & 0x3; 804 switch (addr & 0x30) { 805 case 0x00: /* TCCR */ 806 retval = openpic_tmr_get_timer(&opp->timers[idx]); 807 break; 808 case 0x10: /* TBCR */ 809 retval = opp->timers[idx].tbcr; 810 break; 811 case 0x20: /* TVPR */ 812 retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx); 813 break; 814 case 0x30: /* TDR */ 815 retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx); 816 break; 817 } 818 819 out: 820 DPRINTF("%s: => 0x%08x", __func__, retval); 821 822 return retval; 823 } 824 825 static void openpic_src_write(void *opaque, hwaddr addr, uint64_t val, 826 unsigned len) 827 { 828 OpenPICState *opp = opaque; 829 int idx; 830 831 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64, 832 __func__, addr, val); 833 834 addr = addr & 0xffff; 835 idx = addr >> 5; 836 837 switch (addr & 0x1f) { 838 case 0x00: 839 write_IRQreg_ivpr(opp, idx, val); 840 break; 841 case 0x10: 842 write_IRQreg_idr(opp, idx, val); 843 break; 844 case 0x18: 845 write_IRQreg_ilr(opp, idx, val); 846 break; 847 } 848 } 849 850 static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len) 851 { 852 OpenPICState *opp = opaque; 853 uint32_t retval; 854 int idx; 855 856 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr); 857 retval = 0xFFFFFFFF; 858 859 addr = addr & 0xffff; 860 idx = addr >> 5; 861 862 switch (addr & 0x1f) { 863 case 0x00: 864 retval = read_IRQreg_ivpr(opp, idx); 865 break; 866 case 0x10: 867 retval = read_IRQreg_idr(opp, idx); 868 break; 869 case 0x18: 870 retval = read_IRQreg_ilr(opp, idx); 871 break; 872 } 873 874 DPRINTF("%s: => 0x%08x", __func__, retval); 875 return retval; 876 } 877 878 static void openpic_msi_write(void *opaque, hwaddr addr, uint64_t val, 879 unsigned size) 880 { 881 OpenPICState *opp = opaque; 882 int idx = opp->irq_msi; 883 int srs, ibs; 884 885 DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64, 886 __func__, addr, val); 887 if (addr & 0xF) { 888 return; 889 } 890 891 switch (addr) { 892 case MSIIR_OFFSET: 893 srs = val >> MSIIR_SRS_SHIFT; 894 idx += srs; 895 ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT; 896 opp->msi[srs].msir |= 1 << ibs; 897 openpic_set_irq(opp, idx, 1); 898 break; 899 default: 900 /* most registers are read-only, thus ignored */ 901 break; 902 } 903 } 904 905 static uint64_t openpic_msi_read(void *opaque, hwaddr addr, unsigned size) 906 { 907 OpenPICState *opp = opaque; 908 uint64_t r = 0; 909 int i, srs; 910 911 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr); 912 if (addr & 0xF) { 913 return -1; 914 } 915 916 srs = addr >> 4; 917 918 switch (addr) { 919 case 0x00: 920 case 0x10: 921 case 0x20: 922 case 0x30: 923 case 0x40: 924 case 0x50: 925 case 0x60: 926 case 0x70: /* MSIRs */ 927 r = opp->msi[srs].msir; 928 /* Clear on read */ 929 opp->msi[srs].msir = 0; 930 openpic_set_irq(opp, opp->irq_msi + srs, 0); 931 break; 932 case 0x120: /* MSISR */ 933 for (i = 0; i < MAX_MSI; i++) { 934 r |= (opp->msi[i].msir ? 1 : 0) << i; 935 } 936 break; 937 } 938 939 return r; 940 } 941 942 static uint64_t openpic_summary_read(void *opaque, hwaddr addr, unsigned size) 943 { 944 uint64_t r = 0; 945 946 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr); 947 948 /* TODO: EISR/EIMR */ 949 950 return r; 951 } 952 953 static void openpic_summary_write(void *opaque, hwaddr addr, uint64_t val, 954 unsigned size) 955 { 956 DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64, 957 __func__, addr, val); 958 959 /* TODO: EISR/EIMR */ 960 } 961 962 static void openpic_cpu_write_internal(void *opaque, hwaddr addr, 963 uint32_t val, int idx) 964 { 965 OpenPICState *opp = opaque; 966 IRQSource *src; 967 IRQDest *dst; 968 int s_IRQ, n_IRQ; 969 970 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx " <= 0x%08x", __func__, idx, 971 addr, val); 972 973 if (idx < 0 || idx >= opp->nb_cpus) { 974 return; 975 } 976 977 if (addr & 0xF) { 978 return; 979 } 980 dst = &opp->dst[idx]; 981 addr &= 0xFF0; 982 switch (addr) { 983 case 0x40: /* IPIDR */ 984 case 0x50: 985 case 0x60: 986 case 0x70: 987 idx = (addr - 0x40) >> 4; 988 /* we use IDE as mask which CPUs to deliver the IPI to still. */ 989 opp->src[opp->irq_ipi0 + idx].destmask |= val; 990 openpic_set_irq(opp, opp->irq_ipi0 + idx, 1); 991 openpic_set_irq(opp, opp->irq_ipi0 + idx, 0); 992 break; 993 case 0x80: /* CTPR */ 994 dst->ctpr = val & 0x0000000F; 995 996 DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d", 997 __func__, idx, dst->ctpr, dst->raised.priority, 998 dst->servicing.priority); 999 1000 if (dst->raised.priority <= dst->ctpr) { 1001 DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr", 1002 __func__, idx); 1003 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]); 1004 } else if (dst->raised.priority > dst->servicing.priority) { 1005 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d", 1006 __func__, idx, dst->raised.next); 1007 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_INT]); 1008 } 1009 1010 break; 1011 case 0x90: /* WHOAMI */ 1012 /* Read-only register */ 1013 break; 1014 case 0xA0: /* IACK */ 1015 /* Read-only register */ 1016 break; 1017 case 0xB0: /* EOI */ 1018 DPRINTF("EOI"); 1019 s_IRQ = IRQ_get_next(opp, &dst->servicing); 1020 1021 if (s_IRQ < 0) { 1022 DPRINTF("%s: EOI with no interrupt in service", __func__); 1023 break; 1024 } 1025 1026 IRQ_resetbit(&dst->servicing, s_IRQ); 1027 /* Set up next servicing IRQ */ 1028 s_IRQ = IRQ_get_next(opp, &dst->servicing); 1029 /* Check queued interrupts. */ 1030 n_IRQ = IRQ_get_next(opp, &dst->raised); 1031 src = &opp->src[n_IRQ]; 1032 if (n_IRQ != -1 && 1033 (s_IRQ == -1 || 1034 IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) { 1035 DPRINTF("Raise OpenPIC INT output cpu %d irq %d", 1036 idx, n_IRQ); 1037 qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]); 1038 } 1039 break; 1040 default: 1041 break; 1042 } 1043 } 1044 1045 static void openpic_cpu_write(void *opaque, hwaddr addr, uint64_t val, 1046 unsigned len) 1047 { 1048 openpic_cpu_write_internal(opaque, addr, val, (addr & 0x1f000) >> 12); 1049 } 1050 1051 1052 static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu) 1053 { 1054 IRQSource *src; 1055 int retval, irq; 1056 1057 DPRINTF("Lower OpenPIC INT output"); 1058 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]); 1059 1060 irq = IRQ_get_next(opp, &dst->raised); 1061 DPRINTF("IACK: irq=%d", irq); 1062 1063 if (irq == -1) { 1064 /* No more interrupt pending */ 1065 return opp->spve; 1066 } 1067 1068 src = &opp->src[irq]; 1069 if (!(src->ivpr & IVPR_ACTIVITY_MASK) || 1070 !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) { 1071 error_report("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x", 1072 __func__, irq, dst->ctpr, src->ivpr); 1073 openpic_update_irq(opp, irq); 1074 retval = opp->spve; 1075 } else { 1076 /* IRQ enter servicing state */ 1077 IRQ_setbit(&dst->servicing, irq); 1078 retval = IVPR_VECTOR(opp, src->ivpr); 1079 } 1080 1081 if (!src->level) { 1082 /* edge-sensitive IRQ */ 1083 src->ivpr &= ~IVPR_ACTIVITY_MASK; 1084 src->pending = 0; 1085 IRQ_resetbit(&dst->raised, irq); 1086 } 1087 1088 /* Timers and IPIs support multicast. */ 1089 if (((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) || 1090 ((irq >= opp->irq_tim0) && (irq < (opp->irq_tim0 + OPENPIC_MAX_TMR)))) { 1091 DPRINTF("irq is IPI or TMR"); 1092 src->destmask &= ~(1 << cpu); 1093 if (src->destmask && !src->level) { 1094 /* trigger on CPUs that didn't know about it yet */ 1095 openpic_set_irq(opp, irq, 1); 1096 openpic_set_irq(opp, irq, 0); 1097 /* if all CPUs knew about it, set active bit again */ 1098 src->ivpr |= IVPR_ACTIVITY_MASK; 1099 } 1100 } 1101 1102 return retval; 1103 } 1104 1105 static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr, 1106 int idx) 1107 { 1108 OpenPICState *opp = opaque; 1109 IRQDest *dst; 1110 uint32_t retval; 1111 1112 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx, __func__, idx, addr); 1113 retval = 0xFFFFFFFF; 1114 1115 if (idx < 0 || idx >= opp->nb_cpus) { 1116 return retval; 1117 } 1118 1119 if (addr & 0xF) { 1120 return retval; 1121 } 1122 dst = &opp->dst[idx]; 1123 addr &= 0xFF0; 1124 switch (addr) { 1125 case 0x80: /* CTPR */ 1126 retval = dst->ctpr; 1127 break; 1128 case 0x90: /* WHOAMI */ 1129 retval = idx; 1130 break; 1131 case 0xA0: /* IACK */ 1132 retval = openpic_iack(opp, dst, idx); 1133 break; 1134 case 0xB0: /* EOI */ 1135 retval = 0; 1136 break; 1137 default: 1138 break; 1139 } 1140 DPRINTF("%s: => 0x%08x", __func__, retval); 1141 1142 return retval; 1143 } 1144 1145 static uint64_t openpic_cpu_read(void *opaque, hwaddr addr, unsigned len) 1146 { 1147 return openpic_cpu_read_internal(opaque, addr, (addr & 0x1f000) >> 12); 1148 } 1149 1150 static const MemoryRegionOps openpic_glb_ops_le = { 1151 .write = openpic_gbl_write, 1152 .read = openpic_gbl_read, 1153 .endianness = DEVICE_LITTLE_ENDIAN, 1154 .impl = { 1155 .min_access_size = 4, 1156 .max_access_size = 4, 1157 }, 1158 }; 1159 1160 static const MemoryRegionOps openpic_glb_ops_be = { 1161 .write = openpic_gbl_write, 1162 .read = openpic_gbl_read, 1163 .endianness = DEVICE_BIG_ENDIAN, 1164 .impl = { 1165 .min_access_size = 4, 1166 .max_access_size = 4, 1167 }, 1168 }; 1169 1170 static const MemoryRegionOps openpic_tmr_ops_le = { 1171 .write = openpic_tmr_write, 1172 .read = openpic_tmr_read, 1173 .endianness = DEVICE_LITTLE_ENDIAN, 1174 .impl = { 1175 .min_access_size = 4, 1176 .max_access_size = 4, 1177 }, 1178 }; 1179 1180 static const MemoryRegionOps openpic_tmr_ops_be = { 1181 .write = openpic_tmr_write, 1182 .read = openpic_tmr_read, 1183 .endianness = DEVICE_BIG_ENDIAN, 1184 .impl = { 1185 .min_access_size = 4, 1186 .max_access_size = 4, 1187 }, 1188 }; 1189 1190 static const MemoryRegionOps openpic_cpu_ops_le = { 1191 .write = openpic_cpu_write, 1192 .read = openpic_cpu_read, 1193 .endianness = DEVICE_LITTLE_ENDIAN, 1194 .impl = { 1195 .min_access_size = 4, 1196 .max_access_size = 4, 1197 }, 1198 }; 1199 1200 static const MemoryRegionOps openpic_cpu_ops_be = { 1201 .write = openpic_cpu_write, 1202 .read = openpic_cpu_read, 1203 .endianness = DEVICE_BIG_ENDIAN, 1204 .impl = { 1205 .min_access_size = 4, 1206 .max_access_size = 4, 1207 }, 1208 }; 1209 1210 static const MemoryRegionOps openpic_src_ops_le = { 1211 .write = openpic_src_write, 1212 .read = openpic_src_read, 1213 .endianness = DEVICE_LITTLE_ENDIAN, 1214 .impl = { 1215 .min_access_size = 4, 1216 .max_access_size = 4, 1217 }, 1218 }; 1219 1220 static const MemoryRegionOps openpic_src_ops_be = { 1221 .write = openpic_src_write, 1222 .read = openpic_src_read, 1223 .endianness = DEVICE_BIG_ENDIAN, 1224 .impl = { 1225 .min_access_size = 4, 1226 .max_access_size = 4, 1227 }, 1228 }; 1229 1230 static const MemoryRegionOps openpic_msi_ops_be = { 1231 .read = openpic_msi_read, 1232 .write = openpic_msi_write, 1233 .endianness = DEVICE_BIG_ENDIAN, 1234 .impl = { 1235 .min_access_size = 4, 1236 .max_access_size = 4, 1237 }, 1238 }; 1239 1240 static const MemoryRegionOps openpic_summary_ops_be = { 1241 .read = openpic_summary_read, 1242 .write = openpic_summary_write, 1243 .endianness = DEVICE_BIG_ENDIAN, 1244 .impl = { 1245 .min_access_size = 4, 1246 .max_access_size = 4, 1247 }, 1248 }; 1249 1250 static void openpic_reset(DeviceState *d) 1251 { 1252 OpenPICState *opp = OPENPIC(d); 1253 int i; 1254 1255 opp->gcr = GCR_RESET; 1256 /* Initialise controller registers */ 1257 opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) | 1258 ((opp->nb_cpus - 1) << FRR_NCPU_SHIFT) | 1259 (opp->vid << FRR_VID_SHIFT); 1260 1261 opp->pir = 0; 1262 opp->spve = -1 & opp->vector_mask; 1263 opp->tfrr = opp->tfrr_reset; 1264 /* Initialise IRQ sources */ 1265 for (i = 0; i < opp->max_irq; i++) { 1266 opp->src[i].ivpr = opp->ivpr_reset; 1267 switch (opp->src[i].type) { 1268 case IRQ_TYPE_NORMAL: 1269 opp->src[i].level = !!(opp->ivpr_reset & IVPR_SENSE_MASK); 1270 break; 1271 1272 case IRQ_TYPE_FSLINT: 1273 opp->src[i].ivpr |= IVPR_POLARITY_MASK; 1274 break; 1275 1276 case IRQ_TYPE_FSLSPECIAL: 1277 break; 1278 } 1279 1280 write_IRQreg_idr(opp, i, opp->idr_reset); 1281 } 1282 /* Initialise IRQ destinations */ 1283 for (i = 0; i < opp->nb_cpus; i++) { 1284 opp->dst[i].ctpr = 15; 1285 opp->dst[i].raised.next = -1; 1286 opp->dst[i].raised.priority = 0; 1287 bitmap_clear(opp->dst[i].raised.queue, 0, IRQQUEUE_SIZE_BITS); 1288 opp->dst[i].servicing.next = -1; 1289 opp->dst[i].servicing.priority = 0; 1290 bitmap_clear(opp->dst[i].servicing.queue, 0, IRQQUEUE_SIZE_BITS); 1291 } 1292 /* Initialise timers */ 1293 for (i = 0; i < OPENPIC_MAX_TMR; i++) { 1294 opp->timers[i].tccr = 0; 1295 opp->timers[i].tbcr = TBCR_CI; 1296 if (opp->timers[i].qemu_timer_active) { 1297 timer_del(opp->timers[i].qemu_timer); /* Inhibit timer */ 1298 opp->timers[i].qemu_timer_active = false; 1299 } 1300 } 1301 /* Go out of RESET state */ 1302 opp->gcr = 0; 1303 } 1304 1305 typedef struct MemReg { 1306 const char *name; 1307 MemoryRegionOps const *ops; 1308 hwaddr start_addr; 1309 ram_addr_t size; 1310 } MemReg; 1311 1312 static void fsl_common_init(OpenPICState *opp) 1313 { 1314 int i; 1315 int virq = OPENPIC_MAX_SRC; 1316 1317 opp->vid = VID_REVISION_1_2; 1318 opp->vir = VIR_GENERIC; 1319 opp->vector_mask = 0xFFFF; 1320 opp->tfrr_reset = 0; 1321 opp->ivpr_reset = IVPR_MASK_MASK; 1322 opp->idr_reset = 1 << 0; 1323 opp->max_irq = OPENPIC_MAX_IRQ; 1324 1325 opp->irq_ipi0 = virq; 1326 virq += OPENPIC_MAX_IPI; 1327 opp->irq_tim0 = virq; 1328 virq += OPENPIC_MAX_TMR; 1329 1330 assert(virq <= OPENPIC_MAX_IRQ); 1331 1332 opp->irq_msi = 224; 1333 1334 msi_nonbroken = true; 1335 for (i = 0; i < opp->fsl->max_ext; i++) { 1336 opp->src[i].level = false; 1337 } 1338 1339 /* Internal interrupts, including message and MSI */ 1340 for (i = 16; i < OPENPIC_MAX_SRC; i++) { 1341 opp->src[i].type = IRQ_TYPE_FSLINT; 1342 opp->src[i].level = true; 1343 } 1344 1345 /* timers and IPIs */ 1346 for (i = OPENPIC_MAX_SRC; i < virq; i++) { 1347 opp->src[i].type = IRQ_TYPE_FSLSPECIAL; 1348 opp->src[i].level = false; 1349 } 1350 1351 for (i = 0; i < OPENPIC_MAX_TMR; i++) { 1352 opp->timers[i].n_IRQ = opp->irq_tim0 + i; 1353 opp->timers[i].qemu_timer_active = false; 1354 opp->timers[i].qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1355 &qemu_timer_cb, 1356 &opp->timers[i]); 1357 opp->timers[i].opp = opp; 1358 } 1359 } 1360 1361 static void map_list(OpenPICState *opp, const MemReg *list, int *count) 1362 { 1363 while (list->name) { 1364 assert(*count < ARRAY_SIZE(opp->sub_io_mem)); 1365 1366 memory_region_init_io(&opp->sub_io_mem[*count], OBJECT(opp), list->ops, 1367 opp, list->name, list->size); 1368 1369 memory_region_add_subregion(&opp->mem, list->start_addr, 1370 &opp->sub_io_mem[*count]); 1371 1372 (*count)++; 1373 list++; 1374 } 1375 } 1376 1377 static const VMStateDescription vmstate_openpic_irq_queue = { 1378 .name = "openpic_irq_queue", 1379 .version_id = 0, 1380 .minimum_version_id = 0, 1381 .fields = (VMStateField[]) { 1382 VMSTATE_BITMAP(queue, IRQQueue, 0, queue_size), 1383 VMSTATE_INT32(next, IRQQueue), 1384 VMSTATE_INT32(priority, IRQQueue), 1385 VMSTATE_END_OF_LIST() 1386 } 1387 }; 1388 1389 static const VMStateDescription vmstate_openpic_irqdest = { 1390 .name = "openpic_irqdest", 1391 .version_id = 0, 1392 .minimum_version_id = 0, 1393 .fields = (VMStateField[]) { 1394 VMSTATE_INT32(ctpr, IRQDest), 1395 VMSTATE_STRUCT(raised, IRQDest, 0, vmstate_openpic_irq_queue, 1396 IRQQueue), 1397 VMSTATE_STRUCT(servicing, IRQDest, 0, vmstate_openpic_irq_queue, 1398 IRQQueue), 1399 VMSTATE_UINT32_ARRAY(outputs_active, IRQDest, OPENPIC_OUTPUT_NB), 1400 VMSTATE_END_OF_LIST() 1401 } 1402 }; 1403 1404 static const VMStateDescription vmstate_openpic_irqsource = { 1405 .name = "openpic_irqsource", 1406 .version_id = 0, 1407 .minimum_version_id = 0, 1408 .fields = (VMStateField[]) { 1409 VMSTATE_UINT32(ivpr, IRQSource), 1410 VMSTATE_UINT32(idr, IRQSource), 1411 VMSTATE_UINT32(destmask, IRQSource), 1412 VMSTATE_INT32(last_cpu, IRQSource), 1413 VMSTATE_INT32(pending, IRQSource), 1414 VMSTATE_END_OF_LIST() 1415 } 1416 }; 1417 1418 static const VMStateDescription vmstate_openpic_timer = { 1419 .name = "openpic_timer", 1420 .version_id = 0, 1421 .minimum_version_id = 0, 1422 .fields = (VMStateField[]) { 1423 VMSTATE_UINT32(tccr, OpenPICTimer), 1424 VMSTATE_UINT32(tbcr, OpenPICTimer), 1425 VMSTATE_END_OF_LIST() 1426 } 1427 }; 1428 1429 static const VMStateDescription vmstate_openpic_msi = { 1430 .name = "openpic_msi", 1431 .version_id = 0, 1432 .minimum_version_id = 0, 1433 .fields = (VMStateField[]) { 1434 VMSTATE_UINT32(msir, OpenPICMSI), 1435 VMSTATE_END_OF_LIST() 1436 } 1437 }; 1438 1439 static int openpic_post_load(void *opaque, int version_id) 1440 { 1441 OpenPICState *opp = (OpenPICState *)opaque; 1442 int i; 1443 1444 /* Update internal ivpr and idr variables */ 1445 for (i = 0; i < opp->max_irq; i++) { 1446 write_IRQreg_idr(opp, i, opp->src[i].idr); 1447 write_IRQreg_ivpr(opp, i, opp->src[i].ivpr); 1448 } 1449 1450 return 0; 1451 } 1452 1453 static const VMStateDescription vmstate_openpic = { 1454 .name = "openpic", 1455 .version_id = 3, 1456 .minimum_version_id = 3, 1457 .post_load = openpic_post_load, 1458 .fields = (VMStateField[]) { 1459 VMSTATE_UINT32(gcr, OpenPICState), 1460 VMSTATE_UINT32(vir, OpenPICState), 1461 VMSTATE_UINT32(pir, OpenPICState), 1462 VMSTATE_UINT32(spve, OpenPICState), 1463 VMSTATE_UINT32(tfrr, OpenPICState), 1464 VMSTATE_UINT32(max_irq, OpenPICState), 1465 VMSTATE_STRUCT_VARRAY_UINT32(src, OpenPICState, max_irq, 0, 1466 vmstate_openpic_irqsource, IRQSource), 1467 VMSTATE_UINT32_EQUAL(nb_cpus, OpenPICState, NULL), 1468 VMSTATE_STRUCT_VARRAY_UINT32(dst, OpenPICState, nb_cpus, 0, 1469 vmstate_openpic_irqdest, IRQDest), 1470 VMSTATE_STRUCT_ARRAY(timers, OpenPICState, OPENPIC_MAX_TMR, 0, 1471 vmstate_openpic_timer, OpenPICTimer), 1472 VMSTATE_STRUCT_ARRAY(msi, OpenPICState, MAX_MSI, 0, 1473 vmstate_openpic_msi, OpenPICMSI), 1474 VMSTATE_UINT32(irq_ipi0, OpenPICState), 1475 VMSTATE_UINT32(irq_tim0, OpenPICState), 1476 VMSTATE_UINT32(irq_msi, OpenPICState), 1477 VMSTATE_END_OF_LIST() 1478 } 1479 }; 1480 1481 static void openpic_init(Object *obj) 1482 { 1483 OpenPICState *opp = OPENPIC(obj); 1484 1485 memory_region_init(&opp->mem, obj, "openpic", 0x40000); 1486 } 1487 1488 static void openpic_realize(DeviceState *dev, Error **errp) 1489 { 1490 SysBusDevice *d = SYS_BUS_DEVICE(dev); 1491 OpenPICState *opp = OPENPIC(dev); 1492 int i, j; 1493 int list_count = 0; 1494 static const MemReg list_le[] = { 1495 {"glb", &openpic_glb_ops_le, 1496 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE}, 1497 {"tmr", &openpic_tmr_ops_le, 1498 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE}, 1499 {"src", &openpic_src_ops_le, 1500 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE}, 1501 {"cpu", &openpic_cpu_ops_le, 1502 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE}, 1503 {NULL} 1504 }; 1505 static const MemReg list_be[] = { 1506 {"glb", &openpic_glb_ops_be, 1507 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE}, 1508 {"tmr", &openpic_tmr_ops_be, 1509 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE}, 1510 {"src", &openpic_src_ops_be, 1511 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE}, 1512 {"cpu", &openpic_cpu_ops_be, 1513 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE}, 1514 {NULL} 1515 }; 1516 static const MemReg list_fsl[] = { 1517 {"msi", &openpic_msi_ops_be, 1518 OPENPIC_MSI_REG_START, OPENPIC_MSI_REG_SIZE}, 1519 {"summary", &openpic_summary_ops_be, 1520 OPENPIC_SUMMARY_REG_START, OPENPIC_SUMMARY_REG_SIZE}, 1521 {NULL} 1522 }; 1523 1524 if (opp->nb_cpus > MAX_CPU) { 1525 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, 1526 TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus, 1527 (uint64_t)0, (uint64_t)MAX_CPU); 1528 return; 1529 } 1530 1531 switch (opp->model) { 1532 case OPENPIC_MODEL_FSL_MPIC_20: 1533 default: 1534 opp->fsl = &fsl_mpic_20; 1535 opp->brr1 = 0x00400200; 1536 opp->flags |= OPENPIC_FLAG_IDR_CRIT; 1537 opp->nb_irqs = 80; 1538 opp->mpic_mode_mask = GCR_MODE_MIXED; 1539 1540 fsl_common_init(opp); 1541 map_list(opp, list_be, &list_count); 1542 map_list(opp, list_fsl, &list_count); 1543 1544 break; 1545 1546 case OPENPIC_MODEL_FSL_MPIC_42: 1547 opp->fsl = &fsl_mpic_42; 1548 opp->brr1 = 0x00400402; 1549 opp->flags |= OPENPIC_FLAG_ILR; 1550 opp->nb_irqs = 196; 1551 opp->mpic_mode_mask = GCR_MODE_PROXY; 1552 1553 fsl_common_init(opp); 1554 map_list(opp, list_be, &list_count); 1555 map_list(opp, list_fsl, &list_count); 1556 1557 break; 1558 1559 case OPENPIC_MODEL_RAVEN: 1560 opp->nb_irqs = RAVEN_MAX_EXT; 1561 opp->vid = VID_REVISION_1_3; 1562 opp->vir = VIR_GENERIC; 1563 opp->vector_mask = 0xFF; 1564 opp->tfrr_reset = 4160000; 1565 opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK; 1566 opp->idr_reset = 0; 1567 opp->max_irq = RAVEN_MAX_IRQ; 1568 opp->irq_ipi0 = RAVEN_IPI_IRQ; 1569 opp->irq_tim0 = RAVEN_TMR_IRQ; 1570 opp->brr1 = -1; 1571 opp->mpic_mode_mask = GCR_MODE_MIXED; 1572 1573 if (opp->nb_cpus != 1) { 1574 error_setg(errp, "Only UP supported today"); 1575 return; 1576 } 1577 1578 map_list(opp, list_le, &list_count); 1579 break; 1580 1581 case OPENPIC_MODEL_KEYLARGO: 1582 opp->nb_irqs = KEYLARGO_MAX_EXT; 1583 opp->vid = VID_REVISION_1_2; 1584 opp->vir = VIR_GENERIC; 1585 opp->vector_mask = 0xFF; 1586 opp->tfrr_reset = 4160000; 1587 opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK; 1588 opp->idr_reset = 0; 1589 opp->max_irq = KEYLARGO_MAX_IRQ; 1590 opp->irq_ipi0 = KEYLARGO_IPI_IRQ; 1591 opp->irq_tim0 = KEYLARGO_TMR_IRQ; 1592 opp->brr1 = -1; 1593 opp->mpic_mode_mask = GCR_MODE_MIXED; 1594 1595 if (opp->nb_cpus != 1) { 1596 error_setg(errp, "Only UP supported today"); 1597 return; 1598 } 1599 1600 map_list(opp, list_le, &list_count); 1601 break; 1602 } 1603 1604 for (i = 0; i < opp->nb_cpus; i++) { 1605 opp->dst[i].irqs = g_new0(qemu_irq, OPENPIC_OUTPUT_NB); 1606 for (j = 0; j < OPENPIC_OUTPUT_NB; j++) { 1607 sysbus_init_irq(d, &opp->dst[i].irqs[j]); 1608 } 1609 1610 opp->dst[i].raised.queue_size = IRQQUEUE_SIZE_BITS; 1611 opp->dst[i].raised.queue = bitmap_new(IRQQUEUE_SIZE_BITS); 1612 opp->dst[i].servicing.queue_size = IRQQUEUE_SIZE_BITS; 1613 opp->dst[i].servicing.queue = bitmap_new(IRQQUEUE_SIZE_BITS); 1614 } 1615 1616 sysbus_init_mmio(d, &opp->mem); 1617 qdev_init_gpio_in(dev, openpic_set_irq, opp->max_irq); 1618 } 1619 1620 static Property openpic_properties[] = { 1621 DEFINE_PROP_UINT32("model", OpenPICState, model, OPENPIC_MODEL_FSL_MPIC_20), 1622 DEFINE_PROP_UINT32("nb_cpus", OpenPICState, nb_cpus, 1), 1623 DEFINE_PROP_END_OF_LIST(), 1624 }; 1625 1626 static void openpic_class_init(ObjectClass *oc, void *data) 1627 { 1628 DeviceClass *dc = DEVICE_CLASS(oc); 1629 1630 dc->realize = openpic_realize; 1631 dc->props = openpic_properties; 1632 dc->reset = openpic_reset; 1633 dc->vmsd = &vmstate_openpic; 1634 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 1635 } 1636 1637 static const TypeInfo openpic_info = { 1638 .name = TYPE_OPENPIC, 1639 .parent = TYPE_SYS_BUS_DEVICE, 1640 .instance_size = sizeof(OpenPICState), 1641 .instance_init = openpic_init, 1642 .class_init = openpic_class_init, 1643 }; 1644 1645 static void openpic_register_types(void) 1646 { 1647 type_register_static(&openpic_info); 1648 } 1649 1650 type_init(openpic_register_types) 1651