1 /* 2 * ARM Generic/Distributed Interrupt Controller 3 * 4 * Copyright (c) 2006-2007 CodeSourcery. 5 * Written by Paul Brook 6 * 7 * This code is licensed under the GPL. 8 */ 9 10 /* This file contains implementation code for the RealView EB interrupt 11 * controller, MPCore distributed interrupt controller and ARMv7-M 12 * Nested Vectored Interrupt Controller. 13 * It is compiled in two ways: 14 * (1) as a standalone file to produce a sysbus device which is a GIC 15 * that can be used on the realview board and as one of the builtin 16 * private peripherals for the ARM MP CPUs (11MPCore, A9, etc) 17 * (2) by being directly #included into armv7m_nvic.c to produce the 18 * armv7m_nvic device. 19 */ 20 21 #include "hw/sysbus.h" 22 #include "gic_internal.h" 23 #include "qom/cpu.h" 24 25 //#define DEBUG_GIC 26 27 #ifdef DEBUG_GIC 28 #define DPRINTF(fmt, ...) \ 29 do { fprintf(stderr, "arm_gic: " fmt , ## __VA_ARGS__); } while (0) 30 #else 31 #define DPRINTF(fmt, ...) do {} while(0) 32 #endif 33 34 static const uint8_t gic_id[] = { 35 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 36 }; 37 38 static inline int gic_get_current_cpu(GICState *s) 39 { 40 if (s->num_cpu > 1) { 41 return current_cpu->cpu_index; 42 } 43 return 0; 44 } 45 46 /* Return true if this GIC config has interrupt groups, which is 47 * true if we're a GICv2, or a GICv1 with the security extensions. 48 */ 49 static inline bool gic_has_groups(GICState *s) 50 { 51 return s->revision == 2 || s->security_extn; 52 } 53 54 /* TODO: Many places that call this routine could be optimized. */ 55 /* Update interrupt status after enabled or pending bits have been changed. */ 56 void gic_update(GICState *s) 57 { 58 int best_irq; 59 int best_prio; 60 int irq; 61 int irq_level, fiq_level; 62 int cpu; 63 int cm; 64 65 for (cpu = 0; cpu < s->num_cpu; cpu++) { 66 cm = 1 << cpu; 67 s->current_pending[cpu] = 1023; 68 if (!(s->ctlr & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) 69 || !(s->cpu_ctlr[cpu] & (GICC_CTLR_EN_GRP0 | GICC_CTLR_EN_GRP1))) { 70 qemu_irq_lower(s->parent_irq[cpu]); 71 qemu_irq_lower(s->parent_fiq[cpu]); 72 continue; 73 } 74 best_prio = 0x100; 75 best_irq = 1023; 76 for (irq = 0; irq < s->num_irq; irq++) { 77 if (GIC_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) && 78 (irq < GIC_INTERNAL || GIC_TARGET(irq) & cm)) { 79 if (GIC_GET_PRIORITY(irq, cpu) < best_prio) { 80 best_prio = GIC_GET_PRIORITY(irq, cpu); 81 best_irq = irq; 82 } 83 } 84 } 85 86 irq_level = fiq_level = 0; 87 88 if (best_prio < s->priority_mask[cpu]) { 89 s->current_pending[cpu] = best_irq; 90 if (best_prio < s->running_priority[cpu]) { 91 int group = GIC_TEST_GROUP(best_irq, cm); 92 93 if (extract32(s->ctlr, group, 1) && 94 extract32(s->cpu_ctlr[cpu], group, 1)) { 95 if (group == 0 && s->cpu_ctlr[cpu] & GICC_CTLR_FIQ_EN) { 96 DPRINTF("Raised pending FIQ %d (cpu %d)\n", 97 best_irq, cpu); 98 fiq_level = 1; 99 } else { 100 DPRINTF("Raised pending IRQ %d (cpu %d)\n", 101 best_irq, cpu); 102 irq_level = 1; 103 } 104 } 105 } 106 } 107 108 qemu_set_irq(s->parent_irq[cpu], irq_level); 109 qemu_set_irq(s->parent_fiq[cpu], fiq_level); 110 } 111 } 112 113 void gic_set_pending_private(GICState *s, int cpu, int irq) 114 { 115 int cm = 1 << cpu; 116 117 if (gic_test_pending(s, irq, cm)) { 118 return; 119 } 120 121 DPRINTF("Set %d pending cpu %d\n", irq, cpu); 122 GIC_SET_PENDING(irq, cm); 123 gic_update(s); 124 } 125 126 static void gic_set_irq_11mpcore(GICState *s, int irq, int level, 127 int cm, int target) 128 { 129 if (level) { 130 GIC_SET_LEVEL(irq, cm); 131 if (GIC_TEST_EDGE_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) { 132 DPRINTF("Set %d pending mask %x\n", irq, target); 133 GIC_SET_PENDING(irq, target); 134 } 135 } else { 136 GIC_CLEAR_LEVEL(irq, cm); 137 } 138 } 139 140 static void gic_set_irq_generic(GICState *s, int irq, int level, 141 int cm, int target) 142 { 143 if (level) { 144 GIC_SET_LEVEL(irq, cm); 145 DPRINTF("Set %d pending mask %x\n", irq, target); 146 if (GIC_TEST_EDGE_TRIGGER(irq)) { 147 GIC_SET_PENDING(irq, target); 148 } 149 } else { 150 GIC_CLEAR_LEVEL(irq, cm); 151 } 152 } 153 154 /* Process a change in an external IRQ input. */ 155 static void gic_set_irq(void *opaque, int irq, int level) 156 { 157 /* Meaning of the 'irq' parameter: 158 * [0..N-1] : external interrupts 159 * [N..N+31] : PPI (internal) interrupts for CPU 0 160 * [N+32..N+63] : PPI (internal interrupts for CPU 1 161 * ... 162 */ 163 GICState *s = (GICState *)opaque; 164 int cm, target; 165 if (irq < (s->num_irq - GIC_INTERNAL)) { 166 /* The first external input line is internal interrupt 32. */ 167 cm = ALL_CPU_MASK; 168 irq += GIC_INTERNAL; 169 target = GIC_TARGET(irq); 170 } else { 171 int cpu; 172 irq -= (s->num_irq - GIC_INTERNAL); 173 cpu = irq / GIC_INTERNAL; 174 irq %= GIC_INTERNAL; 175 cm = 1 << cpu; 176 target = cm; 177 } 178 179 assert(irq >= GIC_NR_SGIS); 180 181 if (level == GIC_TEST_LEVEL(irq, cm)) { 182 return; 183 } 184 185 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 186 gic_set_irq_11mpcore(s, irq, level, cm, target); 187 } else { 188 gic_set_irq_generic(s, irq, level, cm, target); 189 } 190 191 gic_update(s); 192 } 193 194 static uint16_t gic_get_current_pending_irq(GICState *s, int cpu, 195 MemTxAttrs attrs) 196 { 197 uint16_t pending_irq = s->current_pending[cpu]; 198 199 if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) { 200 int group = GIC_TEST_GROUP(pending_irq, (1 << cpu)); 201 /* On a GIC without the security extensions, reading this register 202 * behaves in the same way as a secure access to a GIC with them. 203 */ 204 bool secure = !s->security_extn || attrs.secure; 205 206 if (group == 0 && !secure) { 207 /* Group0 interrupts hidden from Non-secure access */ 208 return 1023; 209 } 210 if (group == 1 && secure && !(s->cpu_ctlr[cpu] & GICC_CTLR_ACK_CTL)) { 211 /* Group1 interrupts only seen by Secure access if 212 * AckCtl bit set. 213 */ 214 return 1022; 215 } 216 } 217 return pending_irq; 218 } 219 220 static int gic_get_group_priority(GICState *s, int cpu, int irq) 221 { 222 /* Return the group priority of the specified interrupt 223 * (which is the top bits of its priority, with the number 224 * of bits masked determined by the applicable binary point register). 225 */ 226 int bpr; 227 uint32_t mask; 228 229 if (gic_has_groups(s) && 230 !(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) && 231 GIC_TEST_GROUP(irq, (1 << cpu))) { 232 bpr = s->abpr[cpu]; 233 } else { 234 bpr = s->bpr[cpu]; 235 } 236 237 /* a BPR of 0 means the group priority bits are [7:1]; 238 * a BPR of 1 means they are [7:2], and so on down to 239 * a BPR of 7 meaning no group priority bits at all. 240 */ 241 mask = ~0U << ((bpr & 7) + 1); 242 243 return GIC_GET_PRIORITY(irq, cpu) & mask; 244 } 245 246 static void gic_activate_irq(GICState *s, int cpu, int irq) 247 { 248 /* Set the appropriate Active Priority Register bit for this IRQ, 249 * and update the running priority. 250 */ 251 int prio = gic_get_group_priority(s, cpu, irq); 252 int preemption_level = prio >> (GIC_MIN_BPR + 1); 253 int regno = preemption_level / 32; 254 int bitno = preemption_level % 32; 255 256 if (gic_has_groups(s) && GIC_TEST_GROUP(irq, (1 << cpu))) { 257 s->nsapr[regno][cpu] |= (1 << bitno); 258 } else { 259 s->apr[regno][cpu] |= (1 << bitno); 260 } 261 262 s->running_priority[cpu] = prio; 263 GIC_SET_ACTIVE(irq, 1 << cpu); 264 } 265 266 static int gic_get_prio_from_apr_bits(GICState *s, int cpu) 267 { 268 /* Recalculate the current running priority for this CPU based 269 * on the set bits in the Active Priority Registers. 270 */ 271 int i; 272 for (i = 0; i < GIC_NR_APRS; i++) { 273 uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu]; 274 if (!apr) { 275 continue; 276 } 277 return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1); 278 } 279 return 0x100; 280 } 281 282 static void gic_drop_prio(GICState *s, int cpu, int group) 283 { 284 /* Drop the priority of the currently active interrupt in the 285 * specified group. 286 * 287 * Note that we can guarantee (because of the requirement to nest 288 * GICC_IAR reads [which activate an interrupt and raise priority] 289 * with GICC_EOIR writes [which drop the priority for the interrupt]) 290 * that the interrupt we're being called for is the highest priority 291 * active interrupt, meaning that it has the lowest set bit in the 292 * APR registers. 293 * 294 * If the guest does not honour the ordering constraints then the 295 * behaviour of the GIC is UNPREDICTABLE, which for us means that 296 * the values of the APR registers might become incorrect and the 297 * running priority will be wrong, so interrupts that should preempt 298 * might not do so, and interrupts that should not preempt might do so. 299 */ 300 int i; 301 302 for (i = 0; i < GIC_NR_APRS; i++) { 303 uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu]; 304 if (!*papr) { 305 continue; 306 } 307 /* Clear lowest set bit */ 308 *papr &= *papr - 1; 309 break; 310 } 311 312 s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); 313 } 314 315 uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs) 316 { 317 int ret, irq, src; 318 int cm = 1 << cpu; 319 320 /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately 321 * for the case where this GIC supports grouping and the pending interrupt 322 * is in the wrong group. 323 */ 324 irq = gic_get_current_pending_irq(s, cpu, attrs); 325 326 if (irq >= GIC_MAXIRQ) { 327 DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq); 328 return irq; 329 } 330 331 if (GIC_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) { 332 DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq); 333 return 1023; 334 } 335 336 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 337 /* Clear pending flags for both level and edge triggered interrupts. 338 * Level triggered IRQs will be reasserted once they become inactive. 339 */ 340 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 341 ret = irq; 342 } else { 343 if (irq < GIC_NR_SGIS) { 344 /* Lookup the source CPU for the SGI and clear this in the 345 * sgi_pending map. Return the src and clear the overall pending 346 * state on this CPU if the SGI is not pending from any CPUs. 347 */ 348 assert(s->sgi_pending[irq][cpu] != 0); 349 src = ctz32(s->sgi_pending[irq][cpu]); 350 s->sgi_pending[irq][cpu] &= ~(1 << src); 351 if (s->sgi_pending[irq][cpu] == 0) { 352 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 353 } 354 ret = irq | ((src & 0x7) << 10); 355 } else { 356 /* Clear pending state for both level and edge triggered 357 * interrupts. (level triggered interrupts with an active line 358 * remain pending, see gic_test_pending) 359 */ 360 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 361 ret = irq; 362 } 363 } 364 365 gic_activate_irq(s, cpu, irq); 366 gic_update(s); 367 DPRINTF("ACK %d\n", irq); 368 return ret; 369 } 370 371 void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val, 372 MemTxAttrs attrs) 373 { 374 if (s->security_extn && !attrs.secure) { 375 if (!GIC_TEST_GROUP(irq, (1 << cpu))) { 376 return; /* Ignore Non-secure access of Group0 IRQ */ 377 } 378 val = 0x80 | (val >> 1); /* Non-secure view */ 379 } 380 381 if (irq < GIC_INTERNAL) { 382 s->priority1[irq][cpu] = val; 383 } else { 384 s->priority2[(irq) - GIC_INTERNAL] = val; 385 } 386 } 387 388 static uint32_t gic_get_priority(GICState *s, int cpu, int irq, 389 MemTxAttrs attrs) 390 { 391 uint32_t prio = GIC_GET_PRIORITY(irq, cpu); 392 393 if (s->security_extn && !attrs.secure) { 394 if (!GIC_TEST_GROUP(irq, (1 << cpu))) { 395 return 0; /* Non-secure access cannot read priority of Group0 IRQ */ 396 } 397 prio = (prio << 1) & 0xff; /* Non-secure view */ 398 } 399 return prio; 400 } 401 402 static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask, 403 MemTxAttrs attrs) 404 { 405 if (s->security_extn && !attrs.secure) { 406 if (s->priority_mask[cpu] & 0x80) { 407 /* Priority Mask in upper half */ 408 pmask = 0x80 | (pmask >> 1); 409 } else { 410 /* Non-secure write ignored if priority mask is in lower half */ 411 return; 412 } 413 } 414 s->priority_mask[cpu] = pmask; 415 } 416 417 static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs) 418 { 419 uint32_t pmask = s->priority_mask[cpu]; 420 421 if (s->security_extn && !attrs.secure) { 422 if (pmask & 0x80) { 423 /* Priority Mask in upper half, return Non-secure view */ 424 pmask = (pmask << 1) & 0xff; 425 } else { 426 /* Priority Mask in lower half, RAZ */ 427 pmask = 0; 428 } 429 } 430 return pmask; 431 } 432 433 static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs) 434 { 435 uint32_t ret = s->cpu_ctlr[cpu]; 436 437 if (s->security_extn && !attrs.secure) { 438 /* Construct the NS banked view of GICC_CTLR from the correct 439 * bits of the S banked view. We don't need to move the bypass 440 * control bits because we don't implement that (IMPDEF) part 441 * of the GIC architecture. 442 */ 443 ret = (ret & (GICC_CTLR_EN_GRP1 | GICC_CTLR_EOIMODE_NS)) >> 1; 444 } 445 return ret; 446 } 447 448 static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value, 449 MemTxAttrs attrs) 450 { 451 uint32_t mask; 452 453 if (s->security_extn && !attrs.secure) { 454 /* The NS view can only write certain bits in the register; 455 * the rest are unchanged 456 */ 457 mask = GICC_CTLR_EN_GRP1; 458 if (s->revision == 2) { 459 mask |= GICC_CTLR_EOIMODE_NS; 460 } 461 s->cpu_ctlr[cpu] &= ~mask; 462 s->cpu_ctlr[cpu] |= (value << 1) & mask; 463 } else { 464 if (s->revision == 2) { 465 mask = s->security_extn ? GICC_CTLR_V2_S_MASK : GICC_CTLR_V2_MASK; 466 } else { 467 mask = s->security_extn ? GICC_CTLR_V1_S_MASK : GICC_CTLR_V1_MASK; 468 } 469 s->cpu_ctlr[cpu] = value & mask; 470 } 471 DPRINTF("CPU Interface %d: Group0 Interrupts %sabled, " 472 "Group1 Interrupts %sabled\n", cpu, 473 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP0) ? "En" : "Dis", 474 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP1) ? "En" : "Dis"); 475 } 476 477 static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs) 478 { 479 if (s->security_extn && !attrs.secure) { 480 if (s->running_priority[cpu] & 0x80) { 481 /* Running priority in upper half of range: return the Non-secure 482 * view of the priority. 483 */ 484 return s->running_priority[cpu] << 1; 485 } else { 486 /* Running priority in lower half of range: RAZ */ 487 return 0; 488 } 489 } else { 490 return s->running_priority[cpu]; 491 } 492 } 493 494 void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) 495 { 496 int cm = 1 << cpu; 497 int group; 498 499 DPRINTF("EOI %d\n", irq); 500 if (irq >= s->num_irq) { 501 /* This handles two cases: 502 * 1. If software writes the ID of a spurious interrupt [ie 1023] 503 * to the GICC_EOIR, the GIC ignores that write. 504 * 2. If software writes the number of a non-existent interrupt 505 * this must be a subcase of "value written does not match the last 506 * valid interrupt value read from the Interrupt Acknowledge 507 * register" and so this is UNPREDICTABLE. We choose to ignore it. 508 */ 509 return; 510 } 511 if (s->running_priority[cpu] == 0x100) { 512 return; /* No active IRQ. */ 513 } 514 515 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 516 /* Mark level triggered interrupts as pending if they are still 517 raised. */ 518 if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm) 519 && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) { 520 DPRINTF("Set %d pending mask %x\n", irq, cm); 521 GIC_SET_PENDING(irq, cm); 522 } 523 } 524 525 group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm); 526 527 if (s->security_extn && !attrs.secure && !group) { 528 DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq); 529 return; 530 } 531 532 /* Secure EOI with GICC_CTLR.AckCtl == 0 when the IRQ is a Group 1 533 * interrupt is UNPREDICTABLE. We choose to handle it as if AckCtl == 1, 534 * i.e. go ahead and complete the irq anyway. 535 */ 536 537 gic_drop_prio(s, cpu, group); 538 GIC_CLEAR_ACTIVE(irq, cm); 539 gic_update(s); 540 } 541 542 static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) 543 { 544 GICState *s = (GICState *)opaque; 545 uint32_t res; 546 int irq; 547 int i; 548 int cpu; 549 int cm; 550 int mask; 551 552 cpu = gic_get_current_cpu(s); 553 cm = 1 << cpu; 554 if (offset < 0x100) { 555 if (offset == 0) { /* GICD_CTLR */ 556 if (s->security_extn && !attrs.secure) { 557 /* The NS bank of this register is just an alias of the 558 * EnableGrp1 bit in the S bank version. 559 */ 560 return extract32(s->ctlr, 1, 1); 561 } else { 562 return s->ctlr; 563 } 564 } 565 if (offset == 4) 566 /* Interrupt Controller Type Register */ 567 return ((s->num_irq / 32) - 1) 568 | ((s->num_cpu - 1) << 5) 569 | (s->security_extn << 10); 570 if (offset < 0x08) 571 return 0; 572 if (offset >= 0x80) { 573 /* Interrupt Group Registers: these RAZ/WI if this is an NS 574 * access to a GIC with the security extensions, or if the GIC 575 * doesn't have groups at all. 576 */ 577 res = 0; 578 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 579 /* Every byte offset holds 8 group status bits */ 580 irq = (offset - 0x080) * 8 + GIC_BASE_IRQ; 581 if (irq >= s->num_irq) { 582 goto bad_reg; 583 } 584 for (i = 0; i < 8; i++) { 585 if (GIC_TEST_GROUP(irq + i, cm)) { 586 res |= (1 << i); 587 } 588 } 589 } 590 return res; 591 } 592 goto bad_reg; 593 } else if (offset < 0x200) { 594 /* Interrupt Set/Clear Enable. */ 595 if (offset < 0x180) 596 irq = (offset - 0x100) * 8; 597 else 598 irq = (offset - 0x180) * 8; 599 irq += GIC_BASE_IRQ; 600 if (irq >= s->num_irq) 601 goto bad_reg; 602 res = 0; 603 for (i = 0; i < 8; i++) { 604 if (GIC_TEST_ENABLED(irq + i, cm)) { 605 res |= (1 << i); 606 } 607 } 608 } else if (offset < 0x300) { 609 /* Interrupt Set/Clear Pending. */ 610 if (offset < 0x280) 611 irq = (offset - 0x200) * 8; 612 else 613 irq = (offset - 0x280) * 8; 614 irq += GIC_BASE_IRQ; 615 if (irq >= s->num_irq) 616 goto bad_reg; 617 res = 0; 618 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 619 for (i = 0; i < 8; i++) { 620 if (gic_test_pending(s, irq + i, mask)) { 621 res |= (1 << i); 622 } 623 } 624 } else if (offset < 0x400) { 625 /* Interrupt Active. */ 626 irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; 627 if (irq >= s->num_irq) 628 goto bad_reg; 629 res = 0; 630 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 631 for (i = 0; i < 8; i++) { 632 if (GIC_TEST_ACTIVE(irq + i, mask)) { 633 res |= (1 << i); 634 } 635 } 636 } else if (offset < 0x800) { 637 /* Interrupt Priority. */ 638 irq = (offset - 0x400) + GIC_BASE_IRQ; 639 if (irq >= s->num_irq) 640 goto bad_reg; 641 res = gic_get_priority(s, cpu, irq, attrs); 642 } else if (offset < 0xc00) { 643 /* Interrupt CPU Target. */ 644 if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { 645 /* For uniprocessor GICs these RAZ/WI */ 646 res = 0; 647 } else { 648 irq = (offset - 0x800) + GIC_BASE_IRQ; 649 if (irq >= s->num_irq) { 650 goto bad_reg; 651 } 652 if (irq >= 29 && irq <= 31) { 653 res = cm; 654 } else { 655 res = GIC_TARGET(irq); 656 } 657 } 658 } else if (offset < 0xf00) { 659 /* Interrupt Configuration. */ 660 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; 661 if (irq >= s->num_irq) 662 goto bad_reg; 663 res = 0; 664 for (i = 0; i < 4; i++) { 665 if (GIC_TEST_MODEL(irq + i)) 666 res |= (1 << (i * 2)); 667 if (GIC_TEST_EDGE_TRIGGER(irq + i)) 668 res |= (2 << (i * 2)); 669 } 670 } else if (offset < 0xf10) { 671 goto bad_reg; 672 } else if (offset < 0xf30) { 673 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 674 goto bad_reg; 675 } 676 677 if (offset < 0xf20) { 678 /* GICD_CPENDSGIRn */ 679 irq = (offset - 0xf10); 680 } else { 681 irq = (offset - 0xf20); 682 /* GICD_SPENDSGIRn */ 683 } 684 685 res = s->sgi_pending[irq][cpu]; 686 } else if (offset < 0xfe0) { 687 goto bad_reg; 688 } else /* offset >= 0xfe0 */ { 689 if (offset & 3) { 690 res = 0; 691 } else { 692 res = gic_id[(offset - 0xfe0) >> 2]; 693 } 694 } 695 return res; 696 bad_reg: 697 qemu_log_mask(LOG_GUEST_ERROR, 698 "gic_dist_readb: Bad offset %x\n", (int)offset); 699 return 0; 700 } 701 702 static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data, 703 unsigned size, MemTxAttrs attrs) 704 { 705 switch (size) { 706 case 1: 707 *data = gic_dist_readb(opaque, offset, attrs); 708 return MEMTX_OK; 709 case 2: 710 *data = gic_dist_readb(opaque, offset, attrs); 711 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 712 return MEMTX_OK; 713 case 4: 714 *data = gic_dist_readb(opaque, offset, attrs); 715 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 716 *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16; 717 *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24; 718 return MEMTX_OK; 719 default: 720 return MEMTX_ERROR; 721 } 722 } 723 724 static void gic_dist_writeb(void *opaque, hwaddr offset, 725 uint32_t value, MemTxAttrs attrs) 726 { 727 GICState *s = (GICState *)opaque; 728 int irq; 729 int i; 730 int cpu; 731 732 cpu = gic_get_current_cpu(s); 733 if (offset < 0x100) { 734 if (offset == 0) { 735 if (s->security_extn && !attrs.secure) { 736 /* NS version is just an alias of the S version's bit 1 */ 737 s->ctlr = deposit32(s->ctlr, 1, 1, value); 738 } else if (gic_has_groups(s)) { 739 s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1); 740 } else { 741 s->ctlr = value & GICD_CTLR_EN_GRP0; 742 } 743 DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n", 744 s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis", 745 s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis"); 746 } else if (offset < 4) { 747 /* ignored. */ 748 } else if (offset >= 0x80) { 749 /* Interrupt Group Registers: RAZ/WI for NS access to secure 750 * GIC, or for GICs without groups. 751 */ 752 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 753 /* Every byte offset holds 8 group status bits */ 754 irq = (offset - 0x80) * 8 + GIC_BASE_IRQ; 755 if (irq >= s->num_irq) { 756 goto bad_reg; 757 } 758 for (i = 0; i < 8; i++) { 759 /* Group bits are banked for private interrupts */ 760 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 761 if (value & (1 << i)) { 762 /* Group1 (Non-secure) */ 763 GIC_SET_GROUP(irq + i, cm); 764 } else { 765 /* Group0 (Secure) */ 766 GIC_CLEAR_GROUP(irq + i, cm); 767 } 768 } 769 } 770 } else { 771 goto bad_reg; 772 } 773 } else if (offset < 0x180) { 774 /* Interrupt Set Enable. */ 775 irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; 776 if (irq >= s->num_irq) 777 goto bad_reg; 778 if (irq < GIC_NR_SGIS) { 779 value = 0xff; 780 } 781 782 for (i = 0; i < 8; i++) { 783 if (value & (1 << i)) { 784 int mask = 785 (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq + i); 786 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 787 788 if (!GIC_TEST_ENABLED(irq + i, cm)) { 789 DPRINTF("Enabled IRQ %d\n", irq + i); 790 } 791 GIC_SET_ENABLED(irq + i, cm); 792 /* If a raised level triggered IRQ enabled then mark 793 is as pending. */ 794 if (GIC_TEST_LEVEL(irq + i, mask) 795 && !GIC_TEST_EDGE_TRIGGER(irq + i)) { 796 DPRINTF("Set %d pending mask %x\n", irq + i, mask); 797 GIC_SET_PENDING(irq + i, mask); 798 } 799 } 800 } 801 } else if (offset < 0x200) { 802 /* Interrupt Clear Enable. */ 803 irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; 804 if (irq >= s->num_irq) 805 goto bad_reg; 806 if (irq < GIC_NR_SGIS) { 807 value = 0; 808 } 809 810 for (i = 0; i < 8; i++) { 811 if (value & (1 << i)) { 812 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 813 814 if (GIC_TEST_ENABLED(irq + i, cm)) { 815 DPRINTF("Disabled IRQ %d\n", irq + i); 816 } 817 GIC_CLEAR_ENABLED(irq + i, cm); 818 } 819 } 820 } else if (offset < 0x280) { 821 /* Interrupt Set Pending. */ 822 irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; 823 if (irq >= s->num_irq) 824 goto bad_reg; 825 if (irq < GIC_NR_SGIS) { 826 value = 0; 827 } 828 829 for (i = 0; i < 8; i++) { 830 if (value & (1 << i)) { 831 GIC_SET_PENDING(irq + i, GIC_TARGET(irq + i)); 832 } 833 } 834 } else if (offset < 0x300) { 835 /* Interrupt Clear Pending. */ 836 irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; 837 if (irq >= s->num_irq) 838 goto bad_reg; 839 if (irq < GIC_NR_SGIS) { 840 value = 0; 841 } 842 843 for (i = 0; i < 8; i++) { 844 /* ??? This currently clears the pending bit for all CPUs, even 845 for per-CPU interrupts. It's unclear whether this is the 846 corect behavior. */ 847 if (value & (1 << i)) { 848 GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); 849 } 850 } 851 } else if (offset < 0x400) { 852 /* Interrupt Active. */ 853 goto bad_reg; 854 } else if (offset < 0x800) { 855 /* Interrupt Priority. */ 856 irq = (offset - 0x400) + GIC_BASE_IRQ; 857 if (irq >= s->num_irq) 858 goto bad_reg; 859 gic_set_priority(s, cpu, irq, value, attrs); 860 } else if (offset < 0xc00) { 861 /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the 862 * annoying exception of the 11MPCore's GIC. 863 */ 864 if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { 865 irq = (offset - 0x800) + GIC_BASE_IRQ; 866 if (irq >= s->num_irq) { 867 goto bad_reg; 868 } 869 if (irq < 29) { 870 value = 0; 871 } else if (irq < GIC_INTERNAL) { 872 value = ALL_CPU_MASK; 873 } 874 s->irq_target[irq] = value & ALL_CPU_MASK; 875 } 876 } else if (offset < 0xf00) { 877 /* Interrupt Configuration. */ 878 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; 879 if (irq >= s->num_irq) 880 goto bad_reg; 881 if (irq < GIC_NR_SGIS) 882 value |= 0xaa; 883 for (i = 0; i < 4; i++) { 884 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 885 if (value & (1 << (i * 2))) { 886 GIC_SET_MODEL(irq + i); 887 } else { 888 GIC_CLEAR_MODEL(irq + i); 889 } 890 } 891 if (value & (2 << (i * 2))) { 892 GIC_SET_EDGE_TRIGGER(irq + i); 893 } else { 894 GIC_CLEAR_EDGE_TRIGGER(irq + i); 895 } 896 } 897 } else if (offset < 0xf10) { 898 /* 0xf00 is only handled for 32-bit writes. */ 899 goto bad_reg; 900 } else if (offset < 0xf20) { 901 /* GICD_CPENDSGIRn */ 902 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 903 goto bad_reg; 904 } 905 irq = (offset - 0xf10); 906 907 s->sgi_pending[irq][cpu] &= ~value; 908 if (s->sgi_pending[irq][cpu] == 0) { 909 GIC_CLEAR_PENDING(irq, 1 << cpu); 910 } 911 } else if (offset < 0xf30) { 912 /* GICD_SPENDSGIRn */ 913 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 914 goto bad_reg; 915 } 916 irq = (offset - 0xf20); 917 918 GIC_SET_PENDING(irq, 1 << cpu); 919 s->sgi_pending[irq][cpu] |= value; 920 } else { 921 goto bad_reg; 922 } 923 gic_update(s); 924 return; 925 bad_reg: 926 qemu_log_mask(LOG_GUEST_ERROR, 927 "gic_dist_writeb: Bad offset %x\n", (int)offset); 928 } 929 930 static void gic_dist_writew(void *opaque, hwaddr offset, 931 uint32_t value, MemTxAttrs attrs) 932 { 933 gic_dist_writeb(opaque, offset, value & 0xff, attrs); 934 gic_dist_writeb(opaque, offset + 1, value >> 8, attrs); 935 } 936 937 static void gic_dist_writel(void *opaque, hwaddr offset, 938 uint32_t value, MemTxAttrs attrs) 939 { 940 GICState *s = (GICState *)opaque; 941 if (offset == 0xf00) { 942 int cpu; 943 int irq; 944 int mask; 945 int target_cpu; 946 947 cpu = gic_get_current_cpu(s); 948 irq = value & 0x3ff; 949 switch ((value >> 24) & 3) { 950 case 0: 951 mask = (value >> 16) & ALL_CPU_MASK; 952 break; 953 case 1: 954 mask = ALL_CPU_MASK ^ (1 << cpu); 955 break; 956 case 2: 957 mask = 1 << cpu; 958 break; 959 default: 960 DPRINTF("Bad Soft Int target filter\n"); 961 mask = ALL_CPU_MASK; 962 break; 963 } 964 GIC_SET_PENDING(irq, mask); 965 target_cpu = ctz32(mask); 966 while (target_cpu < GIC_NCPU) { 967 s->sgi_pending[irq][target_cpu] |= (1 << cpu); 968 mask &= ~(1 << target_cpu); 969 target_cpu = ctz32(mask); 970 } 971 gic_update(s); 972 return; 973 } 974 gic_dist_writew(opaque, offset, value & 0xffff, attrs); 975 gic_dist_writew(opaque, offset + 2, value >> 16, attrs); 976 } 977 978 static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data, 979 unsigned size, MemTxAttrs attrs) 980 { 981 switch (size) { 982 case 1: 983 gic_dist_writeb(opaque, offset, data, attrs); 984 return MEMTX_OK; 985 case 2: 986 gic_dist_writew(opaque, offset, data, attrs); 987 return MEMTX_OK; 988 case 4: 989 gic_dist_writel(opaque, offset, data, attrs); 990 return MEMTX_OK; 991 default: 992 return MEMTX_ERROR; 993 } 994 } 995 996 static inline uint32_t gic_apr_ns_view(GICState *s, int cpu, int regno) 997 { 998 /* Return the Nonsecure view of GICC_APR<regno>. This is the 999 * second half of GICC_NSAPR. 1000 */ 1001 switch (GIC_MIN_BPR) { 1002 case 0: 1003 if (regno < 2) { 1004 return s->nsapr[regno + 2][cpu]; 1005 } 1006 break; 1007 case 1: 1008 if (regno == 0) { 1009 return s->nsapr[regno + 1][cpu]; 1010 } 1011 break; 1012 case 2: 1013 if (regno == 0) { 1014 return extract32(s->nsapr[0][cpu], 16, 16); 1015 } 1016 break; 1017 case 3: 1018 if (regno == 0) { 1019 return extract32(s->nsapr[0][cpu], 8, 8); 1020 } 1021 break; 1022 default: 1023 g_assert_not_reached(); 1024 } 1025 return 0; 1026 } 1027 1028 static inline void gic_apr_write_ns_view(GICState *s, int cpu, int regno, 1029 uint32_t value) 1030 { 1031 /* Write the Nonsecure view of GICC_APR<regno>. */ 1032 switch (GIC_MIN_BPR) { 1033 case 0: 1034 if (regno < 2) { 1035 s->nsapr[regno + 2][cpu] = value; 1036 } 1037 break; 1038 case 1: 1039 if (regno == 0) { 1040 s->nsapr[regno + 1][cpu] = value; 1041 } 1042 break; 1043 case 2: 1044 if (regno == 0) { 1045 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 16, 16, value); 1046 } 1047 break; 1048 case 3: 1049 if (regno == 0) { 1050 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 8, 8, value); 1051 } 1052 break; 1053 default: 1054 g_assert_not_reached(); 1055 } 1056 } 1057 1058 static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, 1059 uint64_t *data, MemTxAttrs attrs) 1060 { 1061 switch (offset) { 1062 case 0x00: /* Control */ 1063 *data = gic_get_cpu_control(s, cpu, attrs); 1064 break; 1065 case 0x04: /* Priority mask */ 1066 *data = gic_get_priority_mask(s, cpu, attrs); 1067 break; 1068 case 0x08: /* Binary Point */ 1069 if (s->security_extn && !attrs.secure) { 1070 /* BPR is banked. Non-secure copy stored in ABPR. */ 1071 *data = s->abpr[cpu]; 1072 } else { 1073 *data = s->bpr[cpu]; 1074 } 1075 break; 1076 case 0x0c: /* Acknowledge */ 1077 *data = gic_acknowledge_irq(s, cpu, attrs); 1078 break; 1079 case 0x14: /* Running Priority */ 1080 *data = gic_get_running_priority(s, cpu, attrs); 1081 break; 1082 case 0x18: /* Highest Pending Interrupt */ 1083 *data = gic_get_current_pending_irq(s, cpu, attrs); 1084 break; 1085 case 0x1c: /* Aliased Binary Point */ 1086 /* GIC v2, no security: ABPR 1087 * GIC v1, no security: not implemented (RAZ/WI) 1088 * With security extensions, secure access: ABPR (alias of NS BPR) 1089 * With security extensions, nonsecure access: RAZ/WI 1090 */ 1091 if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { 1092 *data = 0; 1093 } else { 1094 *data = s->abpr[cpu]; 1095 } 1096 break; 1097 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 1098 { 1099 int regno = (offset - 0xd0) / 4; 1100 1101 if (regno >= GIC_NR_APRS || s->revision != 2) { 1102 *data = 0; 1103 } else if (s->security_extn && !attrs.secure) { 1104 /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ 1105 *data = gic_apr_ns_view(s, regno, cpu); 1106 } else { 1107 *data = s->apr[regno][cpu]; 1108 } 1109 break; 1110 } 1111 case 0xe0: case 0xe4: case 0xe8: case 0xec: 1112 { 1113 int regno = (offset - 0xe0) / 4; 1114 1115 if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) || 1116 (s->security_extn && !attrs.secure)) { 1117 *data = 0; 1118 } else { 1119 *data = s->nsapr[regno][cpu]; 1120 } 1121 break; 1122 } 1123 default: 1124 qemu_log_mask(LOG_GUEST_ERROR, 1125 "gic_cpu_read: Bad offset %x\n", (int)offset); 1126 return MEMTX_ERROR; 1127 } 1128 return MEMTX_OK; 1129 } 1130 1131 static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, 1132 uint32_t value, MemTxAttrs attrs) 1133 { 1134 switch (offset) { 1135 case 0x00: /* Control */ 1136 gic_set_cpu_control(s, cpu, value, attrs); 1137 break; 1138 case 0x04: /* Priority mask */ 1139 gic_set_priority_mask(s, cpu, value, attrs); 1140 break; 1141 case 0x08: /* Binary Point */ 1142 if (s->security_extn && !attrs.secure) { 1143 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 1144 } else { 1145 s->bpr[cpu] = MAX(value & 0x7, GIC_MIN_BPR); 1146 } 1147 break; 1148 case 0x10: /* End Of Interrupt */ 1149 gic_complete_irq(s, cpu, value & 0x3ff, attrs); 1150 return MEMTX_OK; 1151 case 0x1c: /* Aliased Binary Point */ 1152 if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { 1153 /* unimplemented, or NS access: RAZ/WI */ 1154 return MEMTX_OK; 1155 } else { 1156 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 1157 } 1158 break; 1159 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 1160 { 1161 int regno = (offset - 0xd0) / 4; 1162 1163 if (regno >= GIC_NR_APRS || s->revision != 2) { 1164 return MEMTX_OK; 1165 } 1166 if (s->security_extn && !attrs.secure) { 1167 /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ 1168 gic_apr_write_ns_view(s, regno, cpu, value); 1169 } else { 1170 s->apr[regno][cpu] = value; 1171 } 1172 break; 1173 } 1174 case 0xe0: case 0xe4: case 0xe8: case 0xec: 1175 { 1176 int regno = (offset - 0xe0) / 4; 1177 1178 if (regno >= GIC_NR_APRS || s->revision != 2) { 1179 return MEMTX_OK; 1180 } 1181 if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { 1182 return MEMTX_OK; 1183 } 1184 s->nsapr[regno][cpu] = value; 1185 break; 1186 } 1187 default: 1188 qemu_log_mask(LOG_GUEST_ERROR, 1189 "gic_cpu_write: Bad offset %x\n", (int)offset); 1190 return MEMTX_ERROR; 1191 } 1192 gic_update(s); 1193 return MEMTX_OK; 1194 } 1195 1196 /* Wrappers to read/write the GIC CPU interface for the current CPU */ 1197 static MemTxResult gic_thiscpu_read(void *opaque, hwaddr addr, uint64_t *data, 1198 unsigned size, MemTxAttrs attrs) 1199 { 1200 GICState *s = (GICState *)opaque; 1201 return gic_cpu_read(s, gic_get_current_cpu(s), addr, data, attrs); 1202 } 1203 1204 static MemTxResult gic_thiscpu_write(void *opaque, hwaddr addr, 1205 uint64_t value, unsigned size, 1206 MemTxAttrs attrs) 1207 { 1208 GICState *s = (GICState *)opaque; 1209 return gic_cpu_write(s, gic_get_current_cpu(s), addr, value, attrs); 1210 } 1211 1212 /* Wrappers to read/write the GIC CPU interface for a specific CPU. 1213 * These just decode the opaque pointer into GICState* + cpu id. 1214 */ 1215 static MemTxResult gic_do_cpu_read(void *opaque, hwaddr addr, uint64_t *data, 1216 unsigned size, MemTxAttrs attrs) 1217 { 1218 GICState **backref = (GICState **)opaque; 1219 GICState *s = *backref; 1220 int id = (backref - s->backref); 1221 return gic_cpu_read(s, id, addr, data, attrs); 1222 } 1223 1224 static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr, 1225 uint64_t value, unsigned size, 1226 MemTxAttrs attrs) 1227 { 1228 GICState **backref = (GICState **)opaque; 1229 GICState *s = *backref; 1230 int id = (backref - s->backref); 1231 return gic_cpu_write(s, id, addr, value, attrs); 1232 } 1233 1234 static const MemoryRegionOps gic_ops[2] = { 1235 { 1236 .read_with_attrs = gic_dist_read, 1237 .write_with_attrs = gic_dist_write, 1238 .endianness = DEVICE_NATIVE_ENDIAN, 1239 }, 1240 { 1241 .read_with_attrs = gic_thiscpu_read, 1242 .write_with_attrs = gic_thiscpu_write, 1243 .endianness = DEVICE_NATIVE_ENDIAN, 1244 } 1245 }; 1246 1247 static const MemoryRegionOps gic_cpu_ops = { 1248 .read_with_attrs = gic_do_cpu_read, 1249 .write_with_attrs = gic_do_cpu_write, 1250 .endianness = DEVICE_NATIVE_ENDIAN, 1251 }; 1252 1253 /* This function is used by nvic model */ 1254 void gic_init_irqs_and_distributor(GICState *s) 1255 { 1256 gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops); 1257 } 1258 1259 static void arm_gic_realize(DeviceState *dev, Error **errp) 1260 { 1261 /* Device instance realize function for the GIC sysbus device */ 1262 int i; 1263 GICState *s = ARM_GIC(dev); 1264 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1265 ARMGICClass *agc = ARM_GIC_GET_CLASS(s); 1266 Error *local_err = NULL; 1267 1268 agc->parent_realize(dev, &local_err); 1269 if (local_err) { 1270 error_propagate(errp, local_err); 1271 return; 1272 } 1273 1274 /* This creates distributor and main CPU interface (s->cpuiomem[0]) */ 1275 gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops); 1276 1277 /* Extra core-specific regions for the CPU interfaces. This is 1278 * necessary for "franken-GIC" implementations, for example on 1279 * Exynos 4. 1280 * NB that the memory region size of 0x100 applies for the 11MPCore 1281 * and also cores following the GIC v1 spec (ie A9). 1282 * GIC v2 defines a larger memory region (0x1000) so this will need 1283 * to be extended when we implement A15. 1284 */ 1285 for (i = 0; i < s->num_cpu; i++) { 1286 s->backref[i] = s; 1287 memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops, 1288 &s->backref[i], "gic_cpu", 0x100); 1289 sysbus_init_mmio(sbd, &s->cpuiomem[i+1]); 1290 } 1291 } 1292 1293 static void arm_gic_class_init(ObjectClass *klass, void *data) 1294 { 1295 DeviceClass *dc = DEVICE_CLASS(klass); 1296 ARMGICClass *agc = ARM_GIC_CLASS(klass); 1297 1298 agc->parent_realize = dc->realize; 1299 dc->realize = arm_gic_realize; 1300 } 1301 1302 static const TypeInfo arm_gic_info = { 1303 .name = TYPE_ARM_GIC, 1304 .parent = TYPE_ARM_GIC_COMMON, 1305 .instance_size = sizeof(GICState), 1306 .class_init = arm_gic_class_init, 1307 .class_size = sizeof(ARMGICClass), 1308 }; 1309 1310 static void arm_gic_register_types(void) 1311 { 1312 type_register_static(&arm_gic_info); 1313 } 1314 1315 type_init(arm_gic_register_types) 1316