1 /* 2 * ARM Generic/Distributed Interrupt Controller 3 * 4 * Copyright (c) 2006-2007 CodeSourcery. 5 * Written by Paul Brook 6 * 7 * This code is licensed under the GPL. 8 */ 9 10 /* This file contains implementation code for the RealView EB interrupt 11 * controller, MPCore distributed interrupt controller and ARMv7-M 12 * Nested Vectored Interrupt Controller. 13 * It is compiled in two ways: 14 * (1) as a standalone file to produce a sysbus device which is a GIC 15 * that can be used on the realview board and as one of the builtin 16 * private peripherals for the ARM MP CPUs (11MPCore, A9, etc) 17 * (2) by being directly #included into armv7m_nvic.c to produce the 18 * armv7m_nvic device. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "hw/sysbus.h" 23 #include "gic_internal.h" 24 #include "qapi/error.h" 25 #include "qom/cpu.h" 26 #include "qemu/log.h" 27 #include "trace.h" 28 #include "sysemu/kvm.h" 29 30 /* #define DEBUG_GIC */ 31 32 #ifdef DEBUG_GIC 33 #define DEBUG_GIC_GATE 1 34 #else 35 #define DEBUG_GIC_GATE 0 36 #endif 37 38 #define DPRINTF(fmt, ...) do { \ 39 if (DEBUG_GIC_GATE) { \ 40 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 41 } \ 42 } while (0) 43 44 static const uint8_t gic_id_11mpcore[] = { 45 0x00, 0x00, 0x00, 0x00, 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 46 }; 47 48 static const uint8_t gic_id_gicv1[] = { 49 0x04, 0x00, 0x00, 0x00, 0x90, 0xb3, 0x1b, 0x00, 0x0d, 0xf0, 0x05, 0xb1 50 }; 51 52 static const uint8_t gic_id_gicv2[] = { 53 0x04, 0x00, 0x00, 0x00, 0x90, 0xb4, 0x2b, 0x00, 0x0d, 0xf0, 0x05, 0xb1 54 }; 55 56 static inline int gic_get_current_cpu(GICState *s) 57 { 58 if (s->num_cpu > 1) { 59 return current_cpu->cpu_index; 60 } 61 return 0; 62 } 63 64 static inline int gic_get_current_vcpu(GICState *s) 65 { 66 return gic_get_current_cpu(s) + GIC_NCPU; 67 } 68 69 /* Return true if this GIC config has interrupt groups, which is 70 * true if we're a GICv2, or a GICv1 with the security extensions. 71 */ 72 static inline bool gic_has_groups(GICState *s) 73 { 74 return s->revision == 2 || s->security_extn; 75 } 76 77 static inline bool gic_cpu_ns_access(GICState *s, int cpu, MemTxAttrs attrs) 78 { 79 return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure; 80 } 81 82 /* TODO: Many places that call this routine could be optimized. */ 83 /* Update interrupt status after enabled or pending bits have been changed. */ 84 static void gic_update(GICState *s) 85 { 86 int best_irq; 87 int best_prio; 88 int irq; 89 int irq_level, fiq_level; 90 int cpu; 91 int cm; 92 93 for (cpu = 0; cpu < s->num_cpu; cpu++) { 94 cm = 1 << cpu; 95 s->current_pending[cpu] = 1023; 96 if (!(s->ctlr & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) 97 || !(s->cpu_ctlr[cpu] & (GICC_CTLR_EN_GRP0 | GICC_CTLR_EN_GRP1))) { 98 qemu_irq_lower(s->parent_irq[cpu]); 99 qemu_irq_lower(s->parent_fiq[cpu]); 100 continue; 101 } 102 best_prio = 0x100; 103 best_irq = 1023; 104 for (irq = 0; irq < s->num_irq; irq++) { 105 if (GIC_DIST_TEST_ENABLED(irq, cm) && 106 gic_test_pending(s, irq, cm) && 107 (!GIC_DIST_TEST_ACTIVE(irq, cm)) && 108 (irq < GIC_INTERNAL || GIC_DIST_TARGET(irq) & cm)) { 109 if (GIC_DIST_GET_PRIORITY(irq, cpu) < best_prio) { 110 best_prio = GIC_DIST_GET_PRIORITY(irq, cpu); 111 best_irq = irq; 112 } 113 } 114 } 115 116 if (best_irq != 1023) { 117 trace_gic_update_bestirq(cpu, best_irq, best_prio, 118 s->priority_mask[cpu], s->running_priority[cpu]); 119 } 120 121 irq_level = fiq_level = 0; 122 123 if (best_prio < s->priority_mask[cpu]) { 124 s->current_pending[cpu] = best_irq; 125 if (best_prio < s->running_priority[cpu]) { 126 int group = GIC_DIST_TEST_GROUP(best_irq, cm); 127 128 if (extract32(s->ctlr, group, 1) && 129 extract32(s->cpu_ctlr[cpu], group, 1)) { 130 if (group == 0 && s->cpu_ctlr[cpu] & GICC_CTLR_FIQ_EN) { 131 DPRINTF("Raised pending FIQ %d (cpu %d)\n", 132 best_irq, cpu); 133 fiq_level = 1; 134 trace_gic_update_set_irq(cpu, "fiq", fiq_level); 135 } else { 136 DPRINTF("Raised pending IRQ %d (cpu %d)\n", 137 best_irq, cpu); 138 irq_level = 1; 139 trace_gic_update_set_irq(cpu, "irq", irq_level); 140 } 141 } 142 } 143 } 144 145 qemu_set_irq(s->parent_irq[cpu], irq_level); 146 qemu_set_irq(s->parent_fiq[cpu], fiq_level); 147 } 148 } 149 150 /* Return true if this LR is empty, i.e. the corresponding bit 151 * in ELRSR is set. 152 */ 153 static inline bool gic_lr_entry_is_free(uint32_t entry) 154 { 155 return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID) 156 && (GICH_LR_HW(entry) || !GICH_LR_EOI(entry)); 157 } 158 159 /* Return true if this LR should trigger an EOI maintenance interrupt, i.e. the 160 * corrsponding bit in EISR is set. 161 */ 162 static inline bool gic_lr_entry_is_eoi(uint32_t entry) 163 { 164 return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID) 165 && !GICH_LR_HW(entry) && GICH_LR_EOI(entry); 166 } 167 168 static void gic_set_irq_11mpcore(GICState *s, int irq, int level, 169 int cm, int target) 170 { 171 if (level) { 172 GIC_DIST_SET_LEVEL(irq, cm); 173 if (GIC_DIST_TEST_EDGE_TRIGGER(irq) || GIC_DIST_TEST_ENABLED(irq, cm)) { 174 DPRINTF("Set %d pending mask %x\n", irq, target); 175 GIC_DIST_SET_PENDING(irq, target); 176 } 177 } else { 178 GIC_DIST_CLEAR_LEVEL(irq, cm); 179 } 180 } 181 182 static void gic_set_irq_generic(GICState *s, int irq, int level, 183 int cm, int target) 184 { 185 if (level) { 186 GIC_DIST_SET_LEVEL(irq, cm); 187 DPRINTF("Set %d pending mask %x\n", irq, target); 188 if (GIC_DIST_TEST_EDGE_TRIGGER(irq)) { 189 GIC_DIST_SET_PENDING(irq, target); 190 } 191 } else { 192 GIC_DIST_CLEAR_LEVEL(irq, cm); 193 } 194 } 195 196 /* Process a change in an external IRQ input. */ 197 static void gic_set_irq(void *opaque, int irq, int level) 198 { 199 /* Meaning of the 'irq' parameter: 200 * [0..N-1] : external interrupts 201 * [N..N+31] : PPI (internal) interrupts for CPU 0 202 * [N+32..N+63] : PPI (internal interrupts for CPU 1 203 * ... 204 */ 205 GICState *s = (GICState *)opaque; 206 int cm, target; 207 if (irq < (s->num_irq - GIC_INTERNAL)) { 208 /* The first external input line is internal interrupt 32. */ 209 cm = ALL_CPU_MASK; 210 irq += GIC_INTERNAL; 211 target = GIC_DIST_TARGET(irq); 212 } else { 213 int cpu; 214 irq -= (s->num_irq - GIC_INTERNAL); 215 cpu = irq / GIC_INTERNAL; 216 irq %= GIC_INTERNAL; 217 cm = 1 << cpu; 218 target = cm; 219 } 220 221 assert(irq >= GIC_NR_SGIS); 222 223 if (level == GIC_DIST_TEST_LEVEL(irq, cm)) { 224 return; 225 } 226 227 if (s->revision == REV_11MPCORE) { 228 gic_set_irq_11mpcore(s, irq, level, cm, target); 229 } else { 230 gic_set_irq_generic(s, irq, level, cm, target); 231 } 232 trace_gic_set_irq(irq, level, cm, target); 233 234 gic_update(s); 235 } 236 237 static uint16_t gic_get_current_pending_irq(GICState *s, int cpu, 238 MemTxAttrs attrs) 239 { 240 uint16_t pending_irq = s->current_pending[cpu]; 241 242 if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) { 243 int group = gic_test_group(s, pending_irq, cpu); 244 245 /* On a GIC without the security extensions, reading this register 246 * behaves in the same way as a secure access to a GIC with them. 247 */ 248 bool secure = !gic_cpu_ns_access(s, cpu, attrs); 249 250 if (group == 0 && !secure) { 251 /* Group0 interrupts hidden from Non-secure access */ 252 return 1023; 253 } 254 if (group == 1 && secure && !(s->cpu_ctlr[cpu] & GICC_CTLR_ACK_CTL)) { 255 /* Group1 interrupts only seen by Secure access if 256 * AckCtl bit set. 257 */ 258 return 1022; 259 } 260 } 261 return pending_irq; 262 } 263 264 static int gic_get_group_priority(GICState *s, int cpu, int irq) 265 { 266 /* Return the group priority of the specified interrupt 267 * (which is the top bits of its priority, with the number 268 * of bits masked determined by the applicable binary point register). 269 */ 270 int bpr; 271 uint32_t mask; 272 273 if (gic_has_groups(s) && 274 !(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) && 275 gic_test_group(s, irq, cpu)) { 276 bpr = s->abpr[cpu] - 1; 277 assert(bpr >= 0); 278 } else { 279 bpr = s->bpr[cpu]; 280 } 281 282 /* a BPR of 0 means the group priority bits are [7:1]; 283 * a BPR of 1 means they are [7:2], and so on down to 284 * a BPR of 7 meaning no group priority bits at all. 285 */ 286 mask = ~0U << ((bpr & 7) + 1); 287 288 return gic_get_priority(s, irq, cpu) & mask; 289 } 290 291 static void gic_activate_irq(GICState *s, int cpu, int irq) 292 { 293 /* Set the appropriate Active Priority Register bit for this IRQ, 294 * and update the running priority. 295 */ 296 int prio = gic_get_group_priority(s, cpu, irq); 297 int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; 298 int preemption_level = prio >> (min_bpr + 1); 299 int regno = preemption_level / 32; 300 int bitno = preemption_level % 32; 301 uint32_t *papr = NULL; 302 303 if (gic_is_vcpu(cpu)) { 304 assert(regno == 0); 305 papr = &s->h_apr[gic_get_vcpu_real_id(cpu)]; 306 } else if (gic_has_groups(s) && gic_test_group(s, irq, cpu)) { 307 papr = &s->nsapr[regno][cpu]; 308 } else { 309 papr = &s->apr[regno][cpu]; 310 } 311 312 *papr |= (1 << bitno); 313 314 s->running_priority[cpu] = prio; 315 gic_set_active(s, irq, cpu); 316 } 317 318 static int gic_get_prio_from_apr_bits(GICState *s, int cpu) 319 { 320 /* Recalculate the current running priority for this CPU based 321 * on the set bits in the Active Priority Registers. 322 */ 323 int i; 324 325 if (gic_is_vcpu(cpu)) { 326 uint32_t apr = s->h_apr[gic_get_vcpu_real_id(cpu)]; 327 if (apr) { 328 return ctz32(apr) << (GIC_VIRT_MIN_BPR + 1); 329 } else { 330 return 0x100; 331 } 332 } 333 334 for (i = 0; i < GIC_NR_APRS; i++) { 335 uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu]; 336 if (!apr) { 337 continue; 338 } 339 return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1); 340 } 341 return 0x100; 342 } 343 344 static void gic_drop_prio(GICState *s, int cpu, int group) 345 { 346 /* Drop the priority of the currently active interrupt in the 347 * specified group. 348 * 349 * Note that we can guarantee (because of the requirement to nest 350 * GICC_IAR reads [which activate an interrupt and raise priority] 351 * with GICC_EOIR writes [which drop the priority for the interrupt]) 352 * that the interrupt we're being called for is the highest priority 353 * active interrupt, meaning that it has the lowest set bit in the 354 * APR registers. 355 * 356 * If the guest does not honour the ordering constraints then the 357 * behaviour of the GIC is UNPREDICTABLE, which for us means that 358 * the values of the APR registers might become incorrect and the 359 * running priority will be wrong, so interrupts that should preempt 360 * might not do so, and interrupts that should not preempt might do so. 361 */ 362 if (gic_is_vcpu(cpu)) { 363 int rcpu = gic_get_vcpu_real_id(cpu); 364 365 if (s->h_apr[rcpu]) { 366 /* Clear lowest set bit */ 367 s->h_apr[rcpu] &= s->h_apr[rcpu] - 1; 368 } 369 } else { 370 int i; 371 372 for (i = 0; i < GIC_NR_APRS; i++) { 373 uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu]; 374 if (!*papr) { 375 continue; 376 } 377 /* Clear lowest set bit */ 378 *papr &= *papr - 1; 379 break; 380 } 381 } 382 383 s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); 384 } 385 386 static inline uint32_t gic_clear_pending_sgi(GICState *s, int irq, int cpu) 387 { 388 int src; 389 uint32_t ret; 390 391 if (!gic_is_vcpu(cpu)) { 392 /* Lookup the source CPU for the SGI and clear this in the 393 * sgi_pending map. Return the src and clear the overall pending 394 * state on this CPU if the SGI is not pending from any CPUs. 395 */ 396 assert(s->sgi_pending[irq][cpu] != 0); 397 src = ctz32(s->sgi_pending[irq][cpu]); 398 s->sgi_pending[irq][cpu] &= ~(1 << src); 399 if (s->sgi_pending[irq][cpu] == 0) { 400 gic_clear_pending(s, irq, cpu); 401 } 402 ret = irq | ((src & 0x7) << 10); 403 } else { 404 uint32_t *lr_entry = gic_get_lr_entry(s, irq, cpu); 405 src = GICH_LR_CPUID(*lr_entry); 406 407 gic_clear_pending(s, irq, cpu); 408 ret = irq | (src << 10); 409 } 410 411 return ret; 412 } 413 414 uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs) 415 { 416 int ret, irq; 417 418 /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately 419 * for the case where this GIC supports grouping and the pending interrupt 420 * is in the wrong group. 421 */ 422 irq = gic_get_current_pending_irq(s, cpu, attrs); 423 trace_gic_acknowledge_irq(gic_get_vcpu_real_id(cpu), irq); 424 425 if (irq >= GIC_MAXIRQ) { 426 DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq); 427 return irq; 428 } 429 430 if (gic_get_priority(s, irq, cpu) >= s->running_priority[cpu]) { 431 DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq); 432 return 1023; 433 } 434 435 gic_activate_irq(s, cpu, irq); 436 437 if (s->revision == REV_11MPCORE) { 438 /* Clear pending flags for both level and edge triggered interrupts. 439 * Level triggered IRQs will be reasserted once they become inactive. 440 */ 441 gic_clear_pending(s, irq, cpu); 442 ret = irq; 443 } else { 444 if (irq < GIC_NR_SGIS) { 445 ret = gic_clear_pending_sgi(s, irq, cpu); 446 } else { 447 gic_clear_pending(s, irq, cpu); 448 ret = irq; 449 } 450 } 451 452 gic_update(s); 453 DPRINTF("ACK %d\n", irq); 454 return ret; 455 } 456 457 void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val, 458 MemTxAttrs attrs) 459 { 460 if (s->security_extn && !attrs.secure) { 461 if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) { 462 return; /* Ignore Non-secure access of Group0 IRQ */ 463 } 464 val = 0x80 | (val >> 1); /* Non-secure view */ 465 } 466 467 if (irq < GIC_INTERNAL) { 468 s->priority1[irq][cpu] = val; 469 } else { 470 s->priority2[(irq) - GIC_INTERNAL] = val; 471 } 472 } 473 474 static uint32_t gic_dist_get_priority(GICState *s, int cpu, int irq, 475 MemTxAttrs attrs) 476 { 477 uint32_t prio = GIC_DIST_GET_PRIORITY(irq, cpu); 478 479 if (s->security_extn && !attrs.secure) { 480 if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) { 481 return 0; /* Non-secure access cannot read priority of Group0 IRQ */ 482 } 483 prio = (prio << 1) & 0xff; /* Non-secure view */ 484 } 485 return prio; 486 } 487 488 static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask, 489 MemTxAttrs attrs) 490 { 491 if (gic_cpu_ns_access(s, cpu, attrs)) { 492 if (s->priority_mask[cpu] & 0x80) { 493 /* Priority Mask in upper half */ 494 pmask = 0x80 | (pmask >> 1); 495 } else { 496 /* Non-secure write ignored if priority mask is in lower half */ 497 return; 498 } 499 } 500 s->priority_mask[cpu] = pmask; 501 } 502 503 static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs) 504 { 505 uint32_t pmask = s->priority_mask[cpu]; 506 507 if (gic_cpu_ns_access(s, cpu, attrs)) { 508 if (pmask & 0x80) { 509 /* Priority Mask in upper half, return Non-secure view */ 510 pmask = (pmask << 1) & 0xff; 511 } else { 512 /* Priority Mask in lower half, RAZ */ 513 pmask = 0; 514 } 515 } 516 return pmask; 517 } 518 519 static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs) 520 { 521 uint32_t ret = s->cpu_ctlr[cpu]; 522 523 if (gic_cpu_ns_access(s, cpu, attrs)) { 524 /* Construct the NS banked view of GICC_CTLR from the correct 525 * bits of the S banked view. We don't need to move the bypass 526 * control bits because we don't implement that (IMPDEF) part 527 * of the GIC architecture. 528 */ 529 ret = (ret & (GICC_CTLR_EN_GRP1 | GICC_CTLR_EOIMODE_NS)) >> 1; 530 } 531 return ret; 532 } 533 534 static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value, 535 MemTxAttrs attrs) 536 { 537 uint32_t mask; 538 539 if (gic_cpu_ns_access(s, cpu, attrs)) { 540 /* The NS view can only write certain bits in the register; 541 * the rest are unchanged 542 */ 543 mask = GICC_CTLR_EN_GRP1; 544 if (s->revision == 2) { 545 mask |= GICC_CTLR_EOIMODE_NS; 546 } 547 s->cpu_ctlr[cpu] &= ~mask; 548 s->cpu_ctlr[cpu] |= (value << 1) & mask; 549 } else { 550 if (s->revision == 2) { 551 mask = s->security_extn ? GICC_CTLR_V2_S_MASK : GICC_CTLR_V2_MASK; 552 } else { 553 mask = s->security_extn ? GICC_CTLR_V1_S_MASK : GICC_CTLR_V1_MASK; 554 } 555 s->cpu_ctlr[cpu] = value & mask; 556 } 557 DPRINTF("CPU Interface %d: Group0 Interrupts %sabled, " 558 "Group1 Interrupts %sabled\n", cpu, 559 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP0) ? "En" : "Dis", 560 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP1) ? "En" : "Dis"); 561 } 562 563 static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs) 564 { 565 if ((s->revision != REV_11MPCORE) && (s->running_priority[cpu] > 0xff)) { 566 /* Idle priority */ 567 return 0xff; 568 } 569 570 if (gic_cpu_ns_access(s, cpu, attrs)) { 571 if (s->running_priority[cpu] & 0x80) { 572 /* Running priority in upper half of range: return the Non-secure 573 * view of the priority. 574 */ 575 return s->running_priority[cpu] << 1; 576 } else { 577 /* Running priority in lower half of range: RAZ */ 578 return 0; 579 } 580 } else { 581 return s->running_priority[cpu]; 582 } 583 } 584 585 /* Return true if we should split priority drop and interrupt deactivation, 586 * ie whether the relevant EOIMode bit is set. 587 */ 588 static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs) 589 { 590 if (s->revision != 2) { 591 /* Before GICv2 prio-drop and deactivate are not separable */ 592 return false; 593 } 594 if (gic_cpu_ns_access(s, cpu, attrs)) { 595 return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS; 596 } 597 return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE; 598 } 599 600 static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) 601 { 602 int group; 603 604 if (irq >= GIC_MAXIRQ || (!gic_is_vcpu(cpu) && irq >= s->num_irq)) { 605 /* 606 * This handles two cases: 607 * 1. If software writes the ID of a spurious interrupt [ie 1023] 608 * to the GICC_DIR, the GIC ignores that write. 609 * 2. If software writes the number of a non-existent interrupt 610 * this must be a subcase of "value written is not an active interrupt" 611 * and so this is UNPREDICTABLE. We choose to ignore it. For vCPUs, 612 * all IRQs potentially exist, so this limit does not apply. 613 */ 614 return; 615 } 616 617 if (!gic_eoi_split(s, cpu, attrs)) { 618 /* This is UNPREDICTABLE; we choose to ignore it */ 619 qemu_log_mask(LOG_GUEST_ERROR, 620 "gic_deactivate_irq: GICC_DIR write when EOIMode clear"); 621 return; 622 } 623 624 if (gic_is_vcpu(cpu) && !gic_virq_is_valid(s, irq, cpu)) { 625 /* This vIRQ does not have an LR entry which is either active or 626 * pending and active. Increment EOICount and ignore the write. 627 */ 628 int rcpu = gic_get_vcpu_real_id(cpu); 629 s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; 630 return; 631 } 632 633 group = gic_has_groups(s) && gic_test_group(s, irq, cpu); 634 635 if (gic_cpu_ns_access(s, cpu, attrs) && !group) { 636 DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq); 637 return; 638 } 639 640 gic_clear_active(s, irq, cpu); 641 } 642 643 static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) 644 { 645 int cm = 1 << cpu; 646 int group; 647 648 DPRINTF("EOI %d\n", irq); 649 if (gic_is_vcpu(cpu)) { 650 /* The call to gic_prio_drop() will clear a bit in GICH_APR iff the 651 * running prio is < 0x100. 652 */ 653 bool prio_drop = s->running_priority[cpu] < 0x100; 654 655 if (irq >= GIC_MAXIRQ) { 656 /* Ignore spurious interrupt */ 657 return; 658 } 659 660 gic_drop_prio(s, cpu, 0); 661 662 if (!gic_eoi_split(s, cpu, attrs)) { 663 bool valid = gic_virq_is_valid(s, irq, cpu); 664 if (prio_drop && !valid) { 665 /* We are in a situation where: 666 * - V_CTRL.EOIMode is false (no EOI split), 667 * - The call to gic_drop_prio() cleared a bit in GICH_APR, 668 * - This vIRQ does not have an LR entry which is either 669 * active or pending and active. 670 * In that case, we must increment EOICount. 671 */ 672 int rcpu = gic_get_vcpu_real_id(cpu); 673 s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; 674 } else if (valid) { 675 gic_clear_active(s, irq, cpu); 676 } 677 } 678 679 return; 680 } 681 682 if (irq >= s->num_irq) { 683 /* This handles two cases: 684 * 1. If software writes the ID of a spurious interrupt [ie 1023] 685 * to the GICC_EOIR, the GIC ignores that write. 686 * 2. If software writes the number of a non-existent interrupt 687 * this must be a subcase of "value written does not match the last 688 * valid interrupt value read from the Interrupt Acknowledge 689 * register" and so this is UNPREDICTABLE. We choose to ignore it. 690 */ 691 return; 692 } 693 if (s->running_priority[cpu] == 0x100) { 694 return; /* No active IRQ. */ 695 } 696 697 if (s->revision == REV_11MPCORE) { 698 /* Mark level triggered interrupts as pending if they are still 699 raised. */ 700 if (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_ENABLED(irq, cm) 701 && GIC_DIST_TEST_LEVEL(irq, cm) 702 && (GIC_DIST_TARGET(irq) & cm) != 0) { 703 DPRINTF("Set %d pending mask %x\n", irq, cm); 704 GIC_DIST_SET_PENDING(irq, cm); 705 } 706 } 707 708 group = gic_has_groups(s) && gic_test_group(s, irq, cpu); 709 710 if (gic_cpu_ns_access(s, cpu, attrs) && !group) { 711 DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq); 712 return; 713 } 714 715 /* Secure EOI with GICC_CTLR.AckCtl == 0 when the IRQ is a Group 1 716 * interrupt is UNPREDICTABLE. We choose to handle it as if AckCtl == 1, 717 * i.e. go ahead and complete the irq anyway. 718 */ 719 720 gic_drop_prio(s, cpu, group); 721 722 /* In GICv2 the guest can choose to split priority-drop and deactivate */ 723 if (!gic_eoi_split(s, cpu, attrs)) { 724 gic_clear_active(s, irq, cpu); 725 } 726 gic_update(s); 727 } 728 729 static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) 730 { 731 GICState *s = (GICState *)opaque; 732 uint32_t res; 733 int irq; 734 int i; 735 int cpu; 736 int cm; 737 int mask; 738 739 cpu = gic_get_current_cpu(s); 740 cm = 1 << cpu; 741 if (offset < 0x100) { 742 if (offset == 0) { /* GICD_CTLR */ 743 if (s->security_extn && !attrs.secure) { 744 /* The NS bank of this register is just an alias of the 745 * EnableGrp1 bit in the S bank version. 746 */ 747 return extract32(s->ctlr, 1, 1); 748 } else { 749 return s->ctlr; 750 } 751 } 752 if (offset == 4) 753 /* Interrupt Controller Type Register */ 754 return ((s->num_irq / 32) - 1) 755 | ((s->num_cpu - 1) << 5) 756 | (s->security_extn << 10); 757 if (offset < 0x08) 758 return 0; 759 if (offset >= 0x80) { 760 /* Interrupt Group Registers: these RAZ/WI if this is an NS 761 * access to a GIC with the security extensions, or if the GIC 762 * doesn't have groups at all. 763 */ 764 res = 0; 765 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 766 /* Every byte offset holds 8 group status bits */ 767 irq = (offset - 0x080) * 8 + GIC_BASE_IRQ; 768 if (irq >= s->num_irq) { 769 goto bad_reg; 770 } 771 for (i = 0; i < 8; i++) { 772 if (GIC_DIST_TEST_GROUP(irq + i, cm)) { 773 res |= (1 << i); 774 } 775 } 776 } 777 return res; 778 } 779 goto bad_reg; 780 } else if (offset < 0x200) { 781 /* Interrupt Set/Clear Enable. */ 782 if (offset < 0x180) 783 irq = (offset - 0x100) * 8; 784 else 785 irq = (offset - 0x180) * 8; 786 irq += GIC_BASE_IRQ; 787 if (irq >= s->num_irq) 788 goto bad_reg; 789 res = 0; 790 for (i = 0; i < 8; i++) { 791 if (s->security_extn && !attrs.secure && 792 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 793 continue; /* Ignore Non-secure access of Group0 IRQ */ 794 } 795 796 if (GIC_DIST_TEST_ENABLED(irq + i, cm)) { 797 res |= (1 << i); 798 } 799 } 800 } else if (offset < 0x300) { 801 /* Interrupt Set/Clear Pending. */ 802 if (offset < 0x280) 803 irq = (offset - 0x200) * 8; 804 else 805 irq = (offset - 0x280) * 8; 806 irq += GIC_BASE_IRQ; 807 if (irq >= s->num_irq) 808 goto bad_reg; 809 res = 0; 810 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 811 for (i = 0; i < 8; i++) { 812 if (s->security_extn && !attrs.secure && 813 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 814 continue; /* Ignore Non-secure access of Group0 IRQ */ 815 } 816 817 if (gic_test_pending(s, irq + i, mask)) { 818 res |= (1 << i); 819 } 820 } 821 } else if (offset < 0x400) { 822 /* Interrupt Set/Clear Active. */ 823 if (offset < 0x380) { 824 irq = (offset - 0x300) * 8; 825 } else if (s->revision == 2) { 826 irq = (offset - 0x380) * 8; 827 } else { 828 goto bad_reg; 829 } 830 831 irq += GIC_BASE_IRQ; 832 if (irq >= s->num_irq) 833 goto bad_reg; 834 res = 0; 835 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 836 for (i = 0; i < 8; i++) { 837 if (s->security_extn && !attrs.secure && 838 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 839 continue; /* Ignore Non-secure access of Group0 IRQ */ 840 } 841 842 if (GIC_DIST_TEST_ACTIVE(irq + i, mask)) { 843 res |= (1 << i); 844 } 845 } 846 } else if (offset < 0x800) { 847 /* Interrupt Priority. */ 848 irq = (offset - 0x400) + GIC_BASE_IRQ; 849 if (irq >= s->num_irq) 850 goto bad_reg; 851 res = gic_dist_get_priority(s, cpu, irq, attrs); 852 } else if (offset < 0xc00) { 853 /* Interrupt CPU Target. */ 854 if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { 855 /* For uniprocessor GICs these RAZ/WI */ 856 res = 0; 857 } else { 858 irq = (offset - 0x800) + GIC_BASE_IRQ; 859 if (irq >= s->num_irq) { 860 goto bad_reg; 861 } 862 if (irq < 29 && s->revision == REV_11MPCORE) { 863 res = 0; 864 } else if (irq < GIC_INTERNAL) { 865 res = cm; 866 } else { 867 res = GIC_DIST_TARGET(irq); 868 } 869 } 870 } else if (offset < 0xf00) { 871 /* Interrupt Configuration. */ 872 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; 873 if (irq >= s->num_irq) 874 goto bad_reg; 875 res = 0; 876 for (i = 0; i < 4; i++) { 877 if (s->security_extn && !attrs.secure && 878 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 879 continue; /* Ignore Non-secure access of Group0 IRQ */ 880 } 881 882 if (GIC_DIST_TEST_MODEL(irq + i)) { 883 res |= (1 << (i * 2)); 884 } 885 if (GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) { 886 res |= (2 << (i * 2)); 887 } 888 } 889 } else if (offset < 0xf10) { 890 goto bad_reg; 891 } else if (offset < 0xf30) { 892 if (s->revision == REV_11MPCORE) { 893 goto bad_reg; 894 } 895 896 if (offset < 0xf20) { 897 /* GICD_CPENDSGIRn */ 898 irq = (offset - 0xf10); 899 } else { 900 irq = (offset - 0xf20); 901 /* GICD_SPENDSGIRn */ 902 } 903 904 if (s->security_extn && !attrs.secure && 905 !GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { 906 res = 0; /* Ignore Non-secure access of Group0 IRQ */ 907 } else { 908 res = s->sgi_pending[irq][cpu]; 909 } 910 } else if (offset < 0xfd0) { 911 goto bad_reg; 912 } else if (offset < 0x1000) { 913 if (offset & 3) { 914 res = 0; 915 } else { 916 switch (s->revision) { 917 case REV_11MPCORE: 918 res = gic_id_11mpcore[(offset - 0xfd0) >> 2]; 919 break; 920 case 1: 921 res = gic_id_gicv1[(offset - 0xfd0) >> 2]; 922 break; 923 case 2: 924 res = gic_id_gicv2[(offset - 0xfd0) >> 2]; 925 break; 926 default: 927 res = 0; 928 } 929 } 930 } else { 931 g_assert_not_reached(); 932 } 933 return res; 934 bad_reg: 935 qemu_log_mask(LOG_GUEST_ERROR, 936 "gic_dist_readb: Bad offset %x\n", (int)offset); 937 return 0; 938 } 939 940 static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data, 941 unsigned size, MemTxAttrs attrs) 942 { 943 switch (size) { 944 case 1: 945 *data = gic_dist_readb(opaque, offset, attrs); 946 return MEMTX_OK; 947 case 2: 948 *data = gic_dist_readb(opaque, offset, attrs); 949 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 950 return MEMTX_OK; 951 case 4: 952 *data = gic_dist_readb(opaque, offset, attrs); 953 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 954 *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16; 955 *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24; 956 return MEMTX_OK; 957 default: 958 return MEMTX_ERROR; 959 } 960 } 961 962 static void gic_dist_writeb(void *opaque, hwaddr offset, 963 uint32_t value, MemTxAttrs attrs) 964 { 965 GICState *s = (GICState *)opaque; 966 int irq; 967 int i; 968 int cpu; 969 970 cpu = gic_get_current_cpu(s); 971 if (offset < 0x100) { 972 if (offset == 0) { 973 if (s->security_extn && !attrs.secure) { 974 /* NS version is just an alias of the S version's bit 1 */ 975 s->ctlr = deposit32(s->ctlr, 1, 1, value); 976 } else if (gic_has_groups(s)) { 977 s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1); 978 } else { 979 s->ctlr = value & GICD_CTLR_EN_GRP0; 980 } 981 DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n", 982 s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis", 983 s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis"); 984 } else if (offset < 4) { 985 /* ignored. */ 986 } else if (offset >= 0x80) { 987 /* Interrupt Group Registers: RAZ/WI for NS access to secure 988 * GIC, or for GICs without groups. 989 */ 990 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 991 /* Every byte offset holds 8 group status bits */ 992 irq = (offset - 0x80) * 8 + GIC_BASE_IRQ; 993 if (irq >= s->num_irq) { 994 goto bad_reg; 995 } 996 for (i = 0; i < 8; i++) { 997 /* Group bits are banked for private interrupts */ 998 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 999 if (value & (1 << i)) { 1000 /* Group1 (Non-secure) */ 1001 GIC_DIST_SET_GROUP(irq + i, cm); 1002 } else { 1003 /* Group0 (Secure) */ 1004 GIC_DIST_CLEAR_GROUP(irq + i, cm); 1005 } 1006 } 1007 } 1008 } else { 1009 goto bad_reg; 1010 } 1011 } else if (offset < 0x180) { 1012 /* Interrupt Set Enable. */ 1013 irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; 1014 if (irq >= s->num_irq) 1015 goto bad_reg; 1016 if (irq < GIC_NR_SGIS) { 1017 value = 0xff; 1018 } 1019 1020 for (i = 0; i < 8; i++) { 1021 if (value & (1 << i)) { 1022 int mask = 1023 (irq < GIC_INTERNAL) ? (1 << cpu) 1024 : GIC_DIST_TARGET(irq + i); 1025 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 1026 1027 if (s->security_extn && !attrs.secure && 1028 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1029 continue; /* Ignore Non-secure access of Group0 IRQ */ 1030 } 1031 1032 if (!GIC_DIST_TEST_ENABLED(irq + i, cm)) { 1033 DPRINTF("Enabled IRQ %d\n", irq + i); 1034 trace_gic_enable_irq(irq + i); 1035 } 1036 GIC_DIST_SET_ENABLED(irq + i, cm); 1037 /* If a raised level triggered IRQ enabled then mark 1038 is as pending. */ 1039 if (GIC_DIST_TEST_LEVEL(irq + i, mask) 1040 && !GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) { 1041 DPRINTF("Set %d pending mask %x\n", irq + i, mask); 1042 GIC_DIST_SET_PENDING(irq + i, mask); 1043 } 1044 } 1045 } 1046 } else if (offset < 0x200) { 1047 /* Interrupt Clear Enable. */ 1048 irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; 1049 if (irq >= s->num_irq) 1050 goto bad_reg; 1051 if (irq < GIC_NR_SGIS) { 1052 value = 0; 1053 } 1054 1055 for (i = 0; i < 8; i++) { 1056 if (value & (1 << i)) { 1057 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 1058 1059 if (s->security_extn && !attrs.secure && 1060 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1061 continue; /* Ignore Non-secure access of Group0 IRQ */ 1062 } 1063 1064 if (GIC_DIST_TEST_ENABLED(irq + i, cm)) { 1065 DPRINTF("Disabled IRQ %d\n", irq + i); 1066 trace_gic_disable_irq(irq + i); 1067 } 1068 GIC_DIST_CLEAR_ENABLED(irq + i, cm); 1069 } 1070 } 1071 } else if (offset < 0x280) { 1072 /* Interrupt Set Pending. */ 1073 irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; 1074 if (irq >= s->num_irq) 1075 goto bad_reg; 1076 if (irq < GIC_NR_SGIS) { 1077 value = 0; 1078 } 1079 1080 for (i = 0; i < 8; i++) { 1081 if (value & (1 << i)) { 1082 if (s->security_extn && !attrs.secure && 1083 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1084 continue; /* Ignore Non-secure access of Group0 IRQ */ 1085 } 1086 1087 GIC_DIST_SET_PENDING(irq + i, GIC_DIST_TARGET(irq + i)); 1088 } 1089 } 1090 } else if (offset < 0x300) { 1091 /* Interrupt Clear Pending. */ 1092 irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; 1093 if (irq >= s->num_irq) 1094 goto bad_reg; 1095 if (irq < GIC_NR_SGIS) { 1096 value = 0; 1097 } 1098 1099 for (i = 0; i < 8; i++) { 1100 if (s->security_extn && !attrs.secure && 1101 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1102 continue; /* Ignore Non-secure access of Group0 IRQ */ 1103 } 1104 1105 /* ??? This currently clears the pending bit for all CPUs, even 1106 for per-CPU interrupts. It's unclear whether this is the 1107 corect behavior. */ 1108 if (value & (1 << i)) { 1109 GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK); 1110 } 1111 } 1112 } else if (offset < 0x380) { 1113 /* Interrupt Set Active. */ 1114 if (s->revision != 2) { 1115 goto bad_reg; 1116 } 1117 1118 irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; 1119 if (irq >= s->num_irq) { 1120 goto bad_reg; 1121 } 1122 1123 /* This register is banked per-cpu for PPIs */ 1124 int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK; 1125 1126 for (i = 0; i < 8; i++) { 1127 if (s->security_extn && !attrs.secure && 1128 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1129 continue; /* Ignore Non-secure access of Group0 IRQ */ 1130 } 1131 1132 if (value & (1 << i)) { 1133 GIC_DIST_SET_ACTIVE(irq + i, cm); 1134 } 1135 } 1136 } else if (offset < 0x400) { 1137 /* Interrupt Clear Active. */ 1138 if (s->revision != 2) { 1139 goto bad_reg; 1140 } 1141 1142 irq = (offset - 0x380) * 8 + GIC_BASE_IRQ; 1143 if (irq >= s->num_irq) { 1144 goto bad_reg; 1145 } 1146 1147 /* This register is banked per-cpu for PPIs */ 1148 int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK; 1149 1150 for (i = 0; i < 8; i++) { 1151 if (s->security_extn && !attrs.secure && 1152 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1153 continue; /* Ignore Non-secure access of Group0 IRQ */ 1154 } 1155 1156 if (value & (1 << i)) { 1157 GIC_DIST_CLEAR_ACTIVE(irq + i, cm); 1158 } 1159 } 1160 } else if (offset < 0x800) { 1161 /* Interrupt Priority. */ 1162 irq = (offset - 0x400) + GIC_BASE_IRQ; 1163 if (irq >= s->num_irq) 1164 goto bad_reg; 1165 gic_dist_set_priority(s, cpu, irq, value, attrs); 1166 } else if (offset < 0xc00) { 1167 /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the 1168 * annoying exception of the 11MPCore's GIC. 1169 */ 1170 if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { 1171 irq = (offset - 0x800) + GIC_BASE_IRQ; 1172 if (irq >= s->num_irq) { 1173 goto bad_reg; 1174 } 1175 if (irq < 29 && s->revision == REV_11MPCORE) { 1176 value = 0; 1177 } else if (irq < GIC_INTERNAL) { 1178 value = ALL_CPU_MASK; 1179 } 1180 s->irq_target[irq] = value & ALL_CPU_MASK; 1181 } 1182 } else if (offset < 0xf00) { 1183 /* Interrupt Configuration. */ 1184 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; 1185 if (irq >= s->num_irq) 1186 goto bad_reg; 1187 if (irq < GIC_NR_SGIS) 1188 value |= 0xaa; 1189 for (i = 0; i < 4; i++) { 1190 if (s->security_extn && !attrs.secure && 1191 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1192 continue; /* Ignore Non-secure access of Group0 IRQ */ 1193 } 1194 1195 if (s->revision == REV_11MPCORE) { 1196 if (value & (1 << (i * 2))) { 1197 GIC_DIST_SET_MODEL(irq + i); 1198 } else { 1199 GIC_DIST_CLEAR_MODEL(irq + i); 1200 } 1201 } 1202 if (value & (2 << (i * 2))) { 1203 GIC_DIST_SET_EDGE_TRIGGER(irq + i); 1204 } else { 1205 GIC_DIST_CLEAR_EDGE_TRIGGER(irq + i); 1206 } 1207 } 1208 } else if (offset < 0xf10) { 1209 /* 0xf00 is only handled for 32-bit writes. */ 1210 goto bad_reg; 1211 } else if (offset < 0xf20) { 1212 /* GICD_CPENDSGIRn */ 1213 if (s->revision == REV_11MPCORE) { 1214 goto bad_reg; 1215 } 1216 irq = (offset - 0xf10); 1217 1218 if (!s->security_extn || attrs.secure || 1219 GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { 1220 s->sgi_pending[irq][cpu] &= ~value; 1221 if (s->sgi_pending[irq][cpu] == 0) { 1222 GIC_DIST_CLEAR_PENDING(irq, 1 << cpu); 1223 } 1224 } 1225 } else if (offset < 0xf30) { 1226 /* GICD_SPENDSGIRn */ 1227 if (s->revision == REV_11MPCORE) { 1228 goto bad_reg; 1229 } 1230 irq = (offset - 0xf20); 1231 1232 if (!s->security_extn || attrs.secure || 1233 GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { 1234 GIC_DIST_SET_PENDING(irq, 1 << cpu); 1235 s->sgi_pending[irq][cpu] |= value; 1236 } 1237 } else { 1238 goto bad_reg; 1239 } 1240 gic_update(s); 1241 return; 1242 bad_reg: 1243 qemu_log_mask(LOG_GUEST_ERROR, 1244 "gic_dist_writeb: Bad offset %x\n", (int)offset); 1245 } 1246 1247 static void gic_dist_writew(void *opaque, hwaddr offset, 1248 uint32_t value, MemTxAttrs attrs) 1249 { 1250 gic_dist_writeb(opaque, offset, value & 0xff, attrs); 1251 gic_dist_writeb(opaque, offset + 1, value >> 8, attrs); 1252 } 1253 1254 static void gic_dist_writel(void *opaque, hwaddr offset, 1255 uint32_t value, MemTxAttrs attrs) 1256 { 1257 GICState *s = (GICState *)opaque; 1258 if (offset == 0xf00) { 1259 int cpu; 1260 int irq; 1261 int mask; 1262 int target_cpu; 1263 1264 cpu = gic_get_current_cpu(s); 1265 irq = value & 0x3ff; 1266 switch ((value >> 24) & 3) { 1267 case 0: 1268 mask = (value >> 16) & ALL_CPU_MASK; 1269 break; 1270 case 1: 1271 mask = ALL_CPU_MASK ^ (1 << cpu); 1272 break; 1273 case 2: 1274 mask = 1 << cpu; 1275 break; 1276 default: 1277 DPRINTF("Bad Soft Int target filter\n"); 1278 mask = ALL_CPU_MASK; 1279 break; 1280 } 1281 GIC_DIST_SET_PENDING(irq, mask); 1282 target_cpu = ctz32(mask); 1283 while (target_cpu < GIC_NCPU) { 1284 s->sgi_pending[irq][target_cpu] |= (1 << cpu); 1285 mask &= ~(1 << target_cpu); 1286 target_cpu = ctz32(mask); 1287 } 1288 gic_update(s); 1289 return; 1290 } 1291 gic_dist_writew(opaque, offset, value & 0xffff, attrs); 1292 gic_dist_writew(opaque, offset + 2, value >> 16, attrs); 1293 } 1294 1295 static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data, 1296 unsigned size, MemTxAttrs attrs) 1297 { 1298 switch (size) { 1299 case 1: 1300 gic_dist_writeb(opaque, offset, data, attrs); 1301 return MEMTX_OK; 1302 case 2: 1303 gic_dist_writew(opaque, offset, data, attrs); 1304 return MEMTX_OK; 1305 case 4: 1306 gic_dist_writel(opaque, offset, data, attrs); 1307 return MEMTX_OK; 1308 default: 1309 return MEMTX_ERROR; 1310 } 1311 } 1312 1313 static inline uint32_t gic_apr_ns_view(GICState *s, int cpu, int regno) 1314 { 1315 /* Return the Nonsecure view of GICC_APR<regno>. This is the 1316 * second half of GICC_NSAPR. 1317 */ 1318 switch (GIC_MIN_BPR) { 1319 case 0: 1320 if (regno < 2) { 1321 return s->nsapr[regno + 2][cpu]; 1322 } 1323 break; 1324 case 1: 1325 if (regno == 0) { 1326 return s->nsapr[regno + 1][cpu]; 1327 } 1328 break; 1329 case 2: 1330 if (regno == 0) { 1331 return extract32(s->nsapr[0][cpu], 16, 16); 1332 } 1333 break; 1334 case 3: 1335 if (regno == 0) { 1336 return extract32(s->nsapr[0][cpu], 8, 8); 1337 } 1338 break; 1339 default: 1340 g_assert_not_reached(); 1341 } 1342 return 0; 1343 } 1344 1345 static inline void gic_apr_write_ns_view(GICState *s, int cpu, int regno, 1346 uint32_t value) 1347 { 1348 /* Write the Nonsecure view of GICC_APR<regno>. */ 1349 switch (GIC_MIN_BPR) { 1350 case 0: 1351 if (regno < 2) { 1352 s->nsapr[regno + 2][cpu] = value; 1353 } 1354 break; 1355 case 1: 1356 if (regno == 0) { 1357 s->nsapr[regno + 1][cpu] = value; 1358 } 1359 break; 1360 case 2: 1361 if (regno == 0) { 1362 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 16, 16, value); 1363 } 1364 break; 1365 case 3: 1366 if (regno == 0) { 1367 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 8, 8, value); 1368 } 1369 break; 1370 default: 1371 g_assert_not_reached(); 1372 } 1373 } 1374 1375 static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, 1376 uint64_t *data, MemTxAttrs attrs) 1377 { 1378 switch (offset) { 1379 case 0x00: /* Control */ 1380 *data = gic_get_cpu_control(s, cpu, attrs); 1381 break; 1382 case 0x04: /* Priority mask */ 1383 *data = gic_get_priority_mask(s, cpu, attrs); 1384 break; 1385 case 0x08: /* Binary Point */ 1386 if (gic_cpu_ns_access(s, cpu, attrs)) { 1387 if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { 1388 /* NS view of BPR when CBPR is 1 */ 1389 *data = MIN(s->bpr[cpu] + 1, 7); 1390 } else { 1391 /* BPR is banked. Non-secure copy stored in ABPR. */ 1392 *data = s->abpr[cpu]; 1393 } 1394 } else { 1395 *data = s->bpr[cpu]; 1396 } 1397 break; 1398 case 0x0c: /* Acknowledge */ 1399 *data = gic_acknowledge_irq(s, cpu, attrs); 1400 break; 1401 case 0x14: /* Running Priority */ 1402 *data = gic_get_running_priority(s, cpu, attrs); 1403 break; 1404 case 0x18: /* Highest Pending Interrupt */ 1405 *data = gic_get_current_pending_irq(s, cpu, attrs); 1406 break; 1407 case 0x1c: /* Aliased Binary Point */ 1408 /* GIC v2, no security: ABPR 1409 * GIC v1, no security: not implemented (RAZ/WI) 1410 * With security extensions, secure access: ABPR (alias of NS BPR) 1411 * With security extensions, nonsecure access: RAZ/WI 1412 */ 1413 if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { 1414 *data = 0; 1415 } else { 1416 *data = s->abpr[cpu]; 1417 } 1418 break; 1419 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 1420 { 1421 int regno = (offset - 0xd0) / 4; 1422 int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS; 1423 1424 if (regno >= nr_aprs || s->revision != 2) { 1425 *data = 0; 1426 } else if (gic_is_vcpu(cpu)) { 1427 *data = s->h_apr[gic_get_vcpu_real_id(cpu)]; 1428 } else if (gic_cpu_ns_access(s, cpu, attrs)) { 1429 /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ 1430 *data = gic_apr_ns_view(s, regno, cpu); 1431 } else { 1432 *data = s->apr[regno][cpu]; 1433 } 1434 break; 1435 } 1436 case 0xe0: case 0xe4: case 0xe8: case 0xec: 1437 { 1438 int regno = (offset - 0xe0) / 4; 1439 1440 if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) || 1441 gic_cpu_ns_access(s, cpu, attrs) || gic_is_vcpu(cpu)) { 1442 *data = 0; 1443 } else { 1444 *data = s->nsapr[regno][cpu]; 1445 } 1446 break; 1447 } 1448 default: 1449 qemu_log_mask(LOG_GUEST_ERROR, 1450 "gic_cpu_read: Bad offset %x\n", (int)offset); 1451 *data = 0; 1452 break; 1453 } 1454 return MEMTX_OK; 1455 } 1456 1457 static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, 1458 uint32_t value, MemTxAttrs attrs) 1459 { 1460 switch (offset) { 1461 case 0x00: /* Control */ 1462 gic_set_cpu_control(s, cpu, value, attrs); 1463 break; 1464 case 0x04: /* Priority mask */ 1465 gic_set_priority_mask(s, cpu, value, attrs); 1466 break; 1467 case 0x08: /* Binary Point */ 1468 if (gic_cpu_ns_access(s, cpu, attrs)) { 1469 if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { 1470 /* WI when CBPR is 1 */ 1471 return MEMTX_OK; 1472 } else { 1473 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 1474 } 1475 } else { 1476 int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; 1477 s->bpr[cpu] = MAX(value & 0x7, min_bpr); 1478 } 1479 break; 1480 case 0x10: /* End Of Interrupt */ 1481 gic_complete_irq(s, cpu, value & 0x3ff, attrs); 1482 return MEMTX_OK; 1483 case 0x1c: /* Aliased Binary Point */ 1484 if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { 1485 /* unimplemented, or NS access: RAZ/WI */ 1486 return MEMTX_OK; 1487 } else { 1488 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 1489 } 1490 break; 1491 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 1492 { 1493 int regno = (offset - 0xd0) / 4; 1494 int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS; 1495 1496 if (regno >= nr_aprs || s->revision != 2) { 1497 return MEMTX_OK; 1498 } 1499 if (gic_is_vcpu(cpu)) { 1500 s->h_apr[gic_get_vcpu_real_id(cpu)] = value; 1501 } else if (gic_cpu_ns_access(s, cpu, attrs)) { 1502 /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ 1503 gic_apr_write_ns_view(s, regno, cpu, value); 1504 } else { 1505 s->apr[regno][cpu] = value; 1506 } 1507 break; 1508 } 1509 case 0xe0: case 0xe4: case 0xe8: case 0xec: 1510 { 1511 int regno = (offset - 0xe0) / 4; 1512 1513 if (regno >= GIC_NR_APRS || s->revision != 2) { 1514 return MEMTX_OK; 1515 } 1516 if (gic_is_vcpu(cpu)) { 1517 return MEMTX_OK; 1518 } 1519 if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { 1520 return MEMTX_OK; 1521 } 1522 s->nsapr[regno][cpu] = value; 1523 break; 1524 } 1525 case 0x1000: 1526 /* GICC_DIR */ 1527 gic_deactivate_irq(s, cpu, value & 0x3ff, attrs); 1528 break; 1529 default: 1530 qemu_log_mask(LOG_GUEST_ERROR, 1531 "gic_cpu_write: Bad offset %x\n", (int)offset); 1532 return MEMTX_OK; 1533 } 1534 gic_update(s); 1535 return MEMTX_OK; 1536 } 1537 1538 /* Wrappers to read/write the GIC CPU interface for the current CPU */ 1539 static MemTxResult gic_thiscpu_read(void *opaque, hwaddr addr, uint64_t *data, 1540 unsigned size, MemTxAttrs attrs) 1541 { 1542 GICState *s = (GICState *)opaque; 1543 return gic_cpu_read(s, gic_get_current_cpu(s), addr, data, attrs); 1544 } 1545 1546 static MemTxResult gic_thiscpu_write(void *opaque, hwaddr addr, 1547 uint64_t value, unsigned size, 1548 MemTxAttrs attrs) 1549 { 1550 GICState *s = (GICState *)opaque; 1551 return gic_cpu_write(s, gic_get_current_cpu(s), addr, value, attrs); 1552 } 1553 1554 /* Wrappers to read/write the GIC CPU interface for a specific CPU. 1555 * These just decode the opaque pointer into GICState* + cpu id. 1556 */ 1557 static MemTxResult gic_do_cpu_read(void *opaque, hwaddr addr, uint64_t *data, 1558 unsigned size, MemTxAttrs attrs) 1559 { 1560 GICState **backref = (GICState **)opaque; 1561 GICState *s = *backref; 1562 int id = (backref - s->backref); 1563 return gic_cpu_read(s, id, addr, data, attrs); 1564 } 1565 1566 static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr, 1567 uint64_t value, unsigned size, 1568 MemTxAttrs attrs) 1569 { 1570 GICState **backref = (GICState **)opaque; 1571 GICState *s = *backref; 1572 int id = (backref - s->backref); 1573 return gic_cpu_write(s, id, addr, value, attrs); 1574 } 1575 1576 static MemTxResult gic_thisvcpu_read(void *opaque, hwaddr addr, uint64_t *data, 1577 unsigned size, MemTxAttrs attrs) 1578 { 1579 GICState *s = (GICState *)opaque; 1580 1581 return gic_cpu_read(s, gic_get_current_vcpu(s), addr, data, attrs); 1582 } 1583 1584 static MemTxResult gic_thisvcpu_write(void *opaque, hwaddr addr, 1585 uint64_t value, unsigned size, 1586 MemTxAttrs attrs) 1587 { 1588 GICState *s = (GICState *)opaque; 1589 1590 return gic_cpu_write(s, gic_get_current_vcpu(s), addr, value, attrs); 1591 } 1592 1593 static uint32_t gic_compute_eisr(GICState *s, int cpu, int lr_start) 1594 { 1595 int lr_idx; 1596 uint32_t ret = 0; 1597 1598 for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { 1599 uint32_t *entry = &s->h_lr[lr_idx][cpu]; 1600 ret = deposit32(ret, lr_idx - lr_start, 1, 1601 gic_lr_entry_is_eoi(*entry)); 1602 } 1603 1604 return ret; 1605 } 1606 1607 static uint32_t gic_compute_elrsr(GICState *s, int cpu, int lr_start) 1608 { 1609 int lr_idx; 1610 uint32_t ret = 0; 1611 1612 for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { 1613 uint32_t *entry = &s->h_lr[lr_idx][cpu]; 1614 ret = deposit32(ret, lr_idx - lr_start, 1, 1615 gic_lr_entry_is_free(*entry)); 1616 } 1617 1618 return ret; 1619 } 1620 1621 static void gic_vmcr_write(GICState *s, uint32_t value, MemTxAttrs attrs) 1622 { 1623 int vcpu = gic_get_current_vcpu(s); 1624 uint32_t ctlr; 1625 uint32_t abpr; 1626 uint32_t bpr; 1627 uint32_t prio_mask; 1628 1629 ctlr = FIELD_EX32(value, GICH_VMCR, VMCCtlr); 1630 abpr = FIELD_EX32(value, GICH_VMCR, VMABP); 1631 bpr = FIELD_EX32(value, GICH_VMCR, VMBP); 1632 prio_mask = FIELD_EX32(value, GICH_VMCR, VMPriMask) << 3; 1633 1634 gic_set_cpu_control(s, vcpu, ctlr, attrs); 1635 s->abpr[vcpu] = MAX(abpr, GIC_VIRT_MIN_ABPR); 1636 s->bpr[vcpu] = MAX(bpr, GIC_VIRT_MIN_BPR); 1637 gic_set_priority_mask(s, vcpu, prio_mask, attrs); 1638 } 1639 1640 static MemTxResult gic_hyp_read(void *opaque, int cpu, hwaddr addr, 1641 uint64_t *data, MemTxAttrs attrs) 1642 { 1643 GICState *s = ARM_GIC(opaque); 1644 int vcpu = cpu + GIC_NCPU; 1645 1646 switch (addr) { 1647 case A_GICH_HCR: /* Hypervisor Control */ 1648 *data = s->h_hcr[cpu]; 1649 break; 1650 1651 case A_GICH_VTR: /* VGIC Type */ 1652 *data = FIELD_DP32(0, GICH_VTR, ListRegs, s->num_lrs - 1); 1653 *data = FIELD_DP32(*data, GICH_VTR, PREbits, 1654 GIC_VIRT_MAX_GROUP_PRIO_BITS - 1); 1655 *data = FIELD_DP32(*data, GICH_VTR, PRIbits, 1656 (7 - GIC_VIRT_MIN_BPR) - 1); 1657 break; 1658 1659 case A_GICH_VMCR: /* Virtual Machine Control */ 1660 *data = FIELD_DP32(0, GICH_VMCR, VMCCtlr, 1661 extract32(s->cpu_ctlr[vcpu], 0, 10)); 1662 *data = FIELD_DP32(*data, GICH_VMCR, VMABP, s->abpr[vcpu]); 1663 *data = FIELD_DP32(*data, GICH_VMCR, VMBP, s->bpr[vcpu]); 1664 *data = FIELD_DP32(*data, GICH_VMCR, VMPriMask, 1665 extract32(s->priority_mask[vcpu], 3, 5)); 1666 break; 1667 1668 case A_GICH_MISR: /* Maintenance Interrupt Status */ 1669 *data = s->h_misr[cpu]; 1670 break; 1671 1672 case A_GICH_EISR0: /* End of Interrupt Status 0 and 1 */ 1673 case A_GICH_EISR1: 1674 *data = gic_compute_eisr(s, cpu, (addr - A_GICH_EISR0) * 8); 1675 break; 1676 1677 case A_GICH_ELRSR0: /* Empty List Status 0 and 1 */ 1678 case A_GICH_ELRSR1: 1679 *data = gic_compute_elrsr(s, cpu, (addr - A_GICH_ELRSR0) * 8); 1680 break; 1681 1682 case A_GICH_APR: /* Active Priorities */ 1683 *data = s->h_apr[cpu]; 1684 break; 1685 1686 case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */ 1687 { 1688 int lr_idx = (addr - A_GICH_LR0) / 4; 1689 1690 if (lr_idx > s->num_lrs) { 1691 *data = 0; 1692 } else { 1693 *data = s->h_lr[lr_idx][cpu]; 1694 } 1695 break; 1696 } 1697 1698 default: 1699 qemu_log_mask(LOG_GUEST_ERROR, 1700 "gic_hyp_read: Bad offset %" HWADDR_PRIx "\n", addr); 1701 return MEMTX_OK; 1702 } 1703 1704 return MEMTX_OK; 1705 } 1706 1707 static MemTxResult gic_hyp_write(void *opaque, int cpu, hwaddr addr, 1708 uint64_t value, MemTxAttrs attrs) 1709 { 1710 GICState *s = ARM_GIC(opaque); 1711 int vcpu = cpu + GIC_NCPU; 1712 1713 switch (addr) { 1714 case A_GICH_HCR: /* Hypervisor Control */ 1715 s->h_hcr[cpu] = value & GICH_HCR_MASK; 1716 break; 1717 1718 case A_GICH_VMCR: /* Virtual Machine Control */ 1719 gic_vmcr_write(s, value, attrs); 1720 break; 1721 1722 case A_GICH_APR: /* Active Priorities */ 1723 s->h_apr[cpu] = value; 1724 s->running_priority[vcpu] = gic_get_prio_from_apr_bits(s, vcpu); 1725 break; 1726 1727 case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */ 1728 { 1729 int lr_idx = (addr - A_GICH_LR0) / 4; 1730 1731 if (lr_idx > s->num_lrs) { 1732 return MEMTX_OK; 1733 } 1734 1735 s->h_lr[lr_idx][cpu] = value & GICH_LR_MASK; 1736 break; 1737 } 1738 1739 default: 1740 qemu_log_mask(LOG_GUEST_ERROR, 1741 "gic_hyp_write: Bad offset %" HWADDR_PRIx "\n", addr); 1742 return MEMTX_OK; 1743 } 1744 1745 return MEMTX_OK; 1746 } 1747 1748 static MemTxResult gic_thiscpu_hyp_read(void *opaque, hwaddr addr, uint64_t *data, 1749 unsigned size, MemTxAttrs attrs) 1750 { 1751 GICState *s = (GICState *)opaque; 1752 1753 return gic_hyp_read(s, gic_get_current_cpu(s), addr, data, attrs); 1754 } 1755 1756 static MemTxResult gic_thiscpu_hyp_write(void *opaque, hwaddr addr, 1757 uint64_t value, unsigned size, 1758 MemTxAttrs attrs) 1759 { 1760 GICState *s = (GICState *)opaque; 1761 1762 return gic_hyp_write(s, gic_get_current_cpu(s), addr, value, attrs); 1763 } 1764 1765 static MemTxResult gic_do_hyp_read(void *opaque, hwaddr addr, uint64_t *data, 1766 unsigned size, MemTxAttrs attrs) 1767 { 1768 GICState **backref = (GICState **)opaque; 1769 GICState *s = *backref; 1770 int id = (backref - s->backref); 1771 1772 return gic_hyp_read(s, id, addr, data, attrs); 1773 } 1774 1775 static MemTxResult gic_do_hyp_write(void *opaque, hwaddr addr, 1776 uint64_t value, unsigned size, 1777 MemTxAttrs attrs) 1778 { 1779 GICState **backref = (GICState **)opaque; 1780 GICState *s = *backref; 1781 int id = (backref - s->backref); 1782 1783 return gic_hyp_write(s, id + GIC_NCPU, addr, value, attrs); 1784 1785 } 1786 1787 static const MemoryRegionOps gic_ops[2] = { 1788 { 1789 .read_with_attrs = gic_dist_read, 1790 .write_with_attrs = gic_dist_write, 1791 .endianness = DEVICE_NATIVE_ENDIAN, 1792 }, 1793 { 1794 .read_with_attrs = gic_thiscpu_read, 1795 .write_with_attrs = gic_thiscpu_write, 1796 .endianness = DEVICE_NATIVE_ENDIAN, 1797 } 1798 }; 1799 1800 static const MemoryRegionOps gic_cpu_ops = { 1801 .read_with_attrs = gic_do_cpu_read, 1802 .write_with_attrs = gic_do_cpu_write, 1803 .endianness = DEVICE_NATIVE_ENDIAN, 1804 }; 1805 1806 static const MemoryRegionOps gic_virt_ops[2] = { 1807 { 1808 .read_with_attrs = gic_thiscpu_hyp_read, 1809 .write_with_attrs = gic_thiscpu_hyp_write, 1810 .endianness = DEVICE_NATIVE_ENDIAN, 1811 }, 1812 { 1813 .read_with_attrs = gic_thisvcpu_read, 1814 .write_with_attrs = gic_thisvcpu_write, 1815 .endianness = DEVICE_NATIVE_ENDIAN, 1816 } 1817 }; 1818 1819 static const MemoryRegionOps gic_viface_ops = { 1820 .read_with_attrs = gic_do_hyp_read, 1821 .write_with_attrs = gic_do_hyp_write, 1822 .endianness = DEVICE_NATIVE_ENDIAN, 1823 }; 1824 1825 static void arm_gic_realize(DeviceState *dev, Error **errp) 1826 { 1827 /* Device instance realize function for the GIC sysbus device */ 1828 int i; 1829 GICState *s = ARM_GIC(dev); 1830 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1831 ARMGICClass *agc = ARM_GIC_GET_CLASS(s); 1832 Error *local_err = NULL; 1833 1834 agc->parent_realize(dev, &local_err); 1835 if (local_err) { 1836 error_propagate(errp, local_err); 1837 return; 1838 } 1839 1840 if (kvm_enabled() && !kvm_arm_supports_user_irq()) { 1841 error_setg(errp, "KVM with user space irqchip only works when the " 1842 "host kernel supports KVM_CAP_ARM_USER_IRQ"); 1843 return; 1844 } 1845 1846 /* This creates distributor, main CPU interface (s->cpuiomem[0]) and if 1847 * enabled, virtualization extensions related interfaces (main virtual 1848 * interface (s->vifaceiomem[0]) and virtual CPU interface). 1849 */ 1850 gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops, gic_virt_ops); 1851 1852 /* Extra core-specific regions for the CPU interfaces. This is 1853 * necessary for "franken-GIC" implementations, for example on 1854 * Exynos 4. 1855 * NB that the memory region size of 0x100 applies for the 11MPCore 1856 * and also cores following the GIC v1 spec (ie A9). 1857 * GIC v2 defines a larger memory region (0x1000) so this will need 1858 * to be extended when we implement A15. 1859 */ 1860 for (i = 0; i < s->num_cpu; i++) { 1861 s->backref[i] = s; 1862 memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops, 1863 &s->backref[i], "gic_cpu", 0x100); 1864 sysbus_init_mmio(sbd, &s->cpuiomem[i+1]); 1865 } 1866 1867 /* Extra core-specific regions for virtual interfaces. This is required by 1868 * the GICv2 specification. 1869 */ 1870 if (s->virt_extn) { 1871 for (i = 0; i < s->num_cpu; i++) { 1872 memory_region_init_io(&s->vifaceiomem[i + 1], OBJECT(s), 1873 &gic_viface_ops, &s->backref[i], 1874 "gic_viface", 0x1000); 1875 sysbus_init_mmio(sbd, &s->vifaceiomem[i + 1]); 1876 } 1877 } 1878 1879 } 1880 1881 static void arm_gic_class_init(ObjectClass *klass, void *data) 1882 { 1883 DeviceClass *dc = DEVICE_CLASS(klass); 1884 ARMGICClass *agc = ARM_GIC_CLASS(klass); 1885 1886 device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize); 1887 } 1888 1889 static const TypeInfo arm_gic_info = { 1890 .name = TYPE_ARM_GIC, 1891 .parent = TYPE_ARM_GIC_COMMON, 1892 .instance_size = sizeof(GICState), 1893 .class_init = arm_gic_class_init, 1894 .class_size = sizeof(ARMGICClass), 1895 }; 1896 1897 static void arm_gic_register_types(void) 1898 { 1899 type_register_static(&arm_gic_info); 1900 } 1901 1902 type_init(arm_gic_register_types) 1903