1 /* 2 * ARM Generic/Distributed Interrupt Controller 3 * 4 * Copyright (c) 2006-2007 CodeSourcery. 5 * Written by Paul Brook 6 * 7 * This code is licensed under the GPL. 8 */ 9 10 /* This file contains implementation code for the RealView EB interrupt 11 * controller, MPCore distributed interrupt controller and ARMv7-M 12 * Nested Vectored Interrupt Controller. 13 * It is compiled in two ways: 14 * (1) as a standalone file to produce a sysbus device which is a GIC 15 * that can be used on the realview board and as one of the builtin 16 * private peripherals for the ARM MP CPUs (11MPCore, A9, etc) 17 * (2) by being directly #included into armv7m_nvic.c to produce the 18 * armv7m_nvic device. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "hw/sysbus.h" 23 #include "gic_internal.h" 24 #include "qapi/error.h" 25 #include "qom/cpu.h" 26 27 //#define DEBUG_GIC 28 29 #ifdef DEBUG_GIC 30 #define DPRINTF(fmt, ...) \ 31 do { fprintf(stderr, "arm_gic: " fmt , ## __VA_ARGS__); } while (0) 32 #else 33 #define DPRINTF(fmt, ...) do {} while(0) 34 #endif 35 36 static const uint8_t gic_id_11mpcore[] = { 37 0x00, 0x00, 0x00, 0x00, 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 38 }; 39 40 static const uint8_t gic_id_gicv1[] = { 41 0x04, 0x00, 0x00, 0x00, 0x90, 0xb3, 0x1b, 0x00, 0x0d, 0xf0, 0x05, 0xb1 42 }; 43 44 static const uint8_t gic_id_gicv2[] = { 45 0x04, 0x00, 0x00, 0x00, 0x90, 0xb4, 0x2b, 0x00, 0x0d, 0xf0, 0x05, 0xb1 46 }; 47 48 static inline int gic_get_current_cpu(GICState *s) 49 { 50 if (s->num_cpu > 1) { 51 return current_cpu->cpu_index; 52 } 53 return 0; 54 } 55 56 /* Return true if this GIC config has interrupt groups, which is 57 * true if we're a GICv2, or a GICv1 with the security extensions. 58 */ 59 static inline bool gic_has_groups(GICState *s) 60 { 61 return s->revision == 2 || s->security_extn; 62 } 63 64 /* TODO: Many places that call this routine could be optimized. */ 65 /* Update interrupt status after enabled or pending bits have been changed. */ 66 void gic_update(GICState *s) 67 { 68 int best_irq; 69 int best_prio; 70 int irq; 71 int irq_level, fiq_level; 72 int cpu; 73 int cm; 74 75 for (cpu = 0; cpu < s->num_cpu; cpu++) { 76 cm = 1 << cpu; 77 s->current_pending[cpu] = 1023; 78 if (!(s->ctlr & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) 79 || !(s->cpu_ctlr[cpu] & (GICC_CTLR_EN_GRP0 | GICC_CTLR_EN_GRP1))) { 80 qemu_irq_lower(s->parent_irq[cpu]); 81 qemu_irq_lower(s->parent_fiq[cpu]); 82 continue; 83 } 84 best_prio = 0x100; 85 best_irq = 1023; 86 for (irq = 0; irq < s->num_irq; irq++) { 87 if (GIC_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) && 88 (irq < GIC_INTERNAL || GIC_TARGET(irq) & cm)) { 89 if (GIC_GET_PRIORITY(irq, cpu) < best_prio) { 90 best_prio = GIC_GET_PRIORITY(irq, cpu); 91 best_irq = irq; 92 } 93 } 94 } 95 96 irq_level = fiq_level = 0; 97 98 if (best_prio < s->priority_mask[cpu]) { 99 s->current_pending[cpu] = best_irq; 100 if (best_prio < s->running_priority[cpu]) { 101 int group = GIC_TEST_GROUP(best_irq, cm); 102 103 if (extract32(s->ctlr, group, 1) && 104 extract32(s->cpu_ctlr[cpu], group, 1)) { 105 if (group == 0 && s->cpu_ctlr[cpu] & GICC_CTLR_FIQ_EN) { 106 DPRINTF("Raised pending FIQ %d (cpu %d)\n", 107 best_irq, cpu); 108 fiq_level = 1; 109 } else { 110 DPRINTF("Raised pending IRQ %d (cpu %d)\n", 111 best_irq, cpu); 112 irq_level = 1; 113 } 114 } 115 } 116 } 117 118 qemu_set_irq(s->parent_irq[cpu], irq_level); 119 qemu_set_irq(s->parent_fiq[cpu], fiq_level); 120 } 121 } 122 123 void gic_set_pending_private(GICState *s, int cpu, int irq) 124 { 125 int cm = 1 << cpu; 126 127 if (gic_test_pending(s, irq, cm)) { 128 return; 129 } 130 131 DPRINTF("Set %d pending cpu %d\n", irq, cpu); 132 GIC_SET_PENDING(irq, cm); 133 gic_update(s); 134 } 135 136 static void gic_set_irq_11mpcore(GICState *s, int irq, int level, 137 int cm, int target) 138 { 139 if (level) { 140 GIC_SET_LEVEL(irq, cm); 141 if (GIC_TEST_EDGE_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) { 142 DPRINTF("Set %d pending mask %x\n", irq, target); 143 GIC_SET_PENDING(irq, target); 144 } 145 } else { 146 GIC_CLEAR_LEVEL(irq, cm); 147 } 148 } 149 150 static void gic_set_irq_generic(GICState *s, int irq, int level, 151 int cm, int target) 152 { 153 if (level) { 154 GIC_SET_LEVEL(irq, cm); 155 DPRINTF("Set %d pending mask %x\n", irq, target); 156 if (GIC_TEST_EDGE_TRIGGER(irq)) { 157 GIC_SET_PENDING(irq, target); 158 } 159 } else { 160 GIC_CLEAR_LEVEL(irq, cm); 161 } 162 } 163 164 /* Process a change in an external IRQ input. */ 165 static void gic_set_irq(void *opaque, int irq, int level) 166 { 167 /* Meaning of the 'irq' parameter: 168 * [0..N-1] : external interrupts 169 * [N..N+31] : PPI (internal) interrupts for CPU 0 170 * [N+32..N+63] : PPI (internal interrupts for CPU 1 171 * ... 172 */ 173 GICState *s = (GICState *)opaque; 174 int cm, target; 175 if (irq < (s->num_irq - GIC_INTERNAL)) { 176 /* The first external input line is internal interrupt 32. */ 177 cm = ALL_CPU_MASK; 178 irq += GIC_INTERNAL; 179 target = GIC_TARGET(irq); 180 } else { 181 int cpu; 182 irq -= (s->num_irq - GIC_INTERNAL); 183 cpu = irq / GIC_INTERNAL; 184 irq %= GIC_INTERNAL; 185 cm = 1 << cpu; 186 target = cm; 187 } 188 189 assert(irq >= GIC_NR_SGIS); 190 191 if (level == GIC_TEST_LEVEL(irq, cm)) { 192 return; 193 } 194 195 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 196 gic_set_irq_11mpcore(s, irq, level, cm, target); 197 } else { 198 gic_set_irq_generic(s, irq, level, cm, target); 199 } 200 201 gic_update(s); 202 } 203 204 static uint16_t gic_get_current_pending_irq(GICState *s, int cpu, 205 MemTxAttrs attrs) 206 { 207 uint16_t pending_irq = s->current_pending[cpu]; 208 209 if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) { 210 int group = GIC_TEST_GROUP(pending_irq, (1 << cpu)); 211 /* On a GIC without the security extensions, reading this register 212 * behaves in the same way as a secure access to a GIC with them. 213 */ 214 bool secure = !s->security_extn || attrs.secure; 215 216 if (group == 0 && !secure) { 217 /* Group0 interrupts hidden from Non-secure access */ 218 return 1023; 219 } 220 if (group == 1 && secure && !(s->cpu_ctlr[cpu] & GICC_CTLR_ACK_CTL)) { 221 /* Group1 interrupts only seen by Secure access if 222 * AckCtl bit set. 223 */ 224 return 1022; 225 } 226 } 227 return pending_irq; 228 } 229 230 static int gic_get_group_priority(GICState *s, int cpu, int irq) 231 { 232 /* Return the group priority of the specified interrupt 233 * (which is the top bits of its priority, with the number 234 * of bits masked determined by the applicable binary point register). 235 */ 236 int bpr; 237 uint32_t mask; 238 239 if (gic_has_groups(s) && 240 !(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) && 241 GIC_TEST_GROUP(irq, (1 << cpu))) { 242 bpr = s->abpr[cpu]; 243 } else { 244 bpr = s->bpr[cpu]; 245 } 246 247 /* a BPR of 0 means the group priority bits are [7:1]; 248 * a BPR of 1 means they are [7:2], and so on down to 249 * a BPR of 7 meaning no group priority bits at all. 250 */ 251 mask = ~0U << ((bpr & 7) + 1); 252 253 return GIC_GET_PRIORITY(irq, cpu) & mask; 254 } 255 256 static void gic_activate_irq(GICState *s, int cpu, int irq) 257 { 258 /* Set the appropriate Active Priority Register bit for this IRQ, 259 * and update the running priority. 260 */ 261 int prio = gic_get_group_priority(s, cpu, irq); 262 int preemption_level = prio >> (GIC_MIN_BPR + 1); 263 int regno = preemption_level / 32; 264 int bitno = preemption_level % 32; 265 266 if (gic_has_groups(s) && GIC_TEST_GROUP(irq, (1 << cpu))) { 267 s->nsapr[regno][cpu] |= (1 << bitno); 268 } else { 269 s->apr[regno][cpu] |= (1 << bitno); 270 } 271 272 s->running_priority[cpu] = prio; 273 GIC_SET_ACTIVE(irq, 1 << cpu); 274 } 275 276 static int gic_get_prio_from_apr_bits(GICState *s, int cpu) 277 { 278 /* Recalculate the current running priority for this CPU based 279 * on the set bits in the Active Priority Registers. 280 */ 281 int i; 282 for (i = 0; i < GIC_NR_APRS; i++) { 283 uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu]; 284 if (!apr) { 285 continue; 286 } 287 return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1); 288 } 289 return 0x100; 290 } 291 292 static void gic_drop_prio(GICState *s, int cpu, int group) 293 { 294 /* Drop the priority of the currently active interrupt in the 295 * specified group. 296 * 297 * Note that we can guarantee (because of the requirement to nest 298 * GICC_IAR reads [which activate an interrupt and raise priority] 299 * with GICC_EOIR writes [which drop the priority for the interrupt]) 300 * that the interrupt we're being called for is the highest priority 301 * active interrupt, meaning that it has the lowest set bit in the 302 * APR registers. 303 * 304 * If the guest does not honour the ordering constraints then the 305 * behaviour of the GIC is UNPREDICTABLE, which for us means that 306 * the values of the APR registers might become incorrect and the 307 * running priority will be wrong, so interrupts that should preempt 308 * might not do so, and interrupts that should not preempt might do so. 309 */ 310 int i; 311 312 for (i = 0; i < GIC_NR_APRS; i++) { 313 uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu]; 314 if (!*papr) { 315 continue; 316 } 317 /* Clear lowest set bit */ 318 *papr &= *papr - 1; 319 break; 320 } 321 322 s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); 323 } 324 325 uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs) 326 { 327 int ret, irq, src; 328 int cm = 1 << cpu; 329 330 /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately 331 * for the case where this GIC supports grouping and the pending interrupt 332 * is in the wrong group. 333 */ 334 irq = gic_get_current_pending_irq(s, cpu, attrs); 335 336 if (irq >= GIC_MAXIRQ) { 337 DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq); 338 return irq; 339 } 340 341 if (GIC_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) { 342 DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq); 343 return 1023; 344 } 345 346 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 347 /* Clear pending flags for both level and edge triggered interrupts. 348 * Level triggered IRQs will be reasserted once they become inactive. 349 */ 350 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 351 ret = irq; 352 } else { 353 if (irq < GIC_NR_SGIS) { 354 /* Lookup the source CPU for the SGI and clear this in the 355 * sgi_pending map. Return the src and clear the overall pending 356 * state on this CPU if the SGI is not pending from any CPUs. 357 */ 358 assert(s->sgi_pending[irq][cpu] != 0); 359 src = ctz32(s->sgi_pending[irq][cpu]); 360 s->sgi_pending[irq][cpu] &= ~(1 << src); 361 if (s->sgi_pending[irq][cpu] == 0) { 362 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 363 } 364 ret = irq | ((src & 0x7) << 10); 365 } else { 366 /* Clear pending state for both level and edge triggered 367 * interrupts. (level triggered interrupts with an active line 368 * remain pending, see gic_test_pending) 369 */ 370 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 371 ret = irq; 372 } 373 } 374 375 gic_activate_irq(s, cpu, irq); 376 gic_update(s); 377 DPRINTF("ACK %d\n", irq); 378 return ret; 379 } 380 381 void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val, 382 MemTxAttrs attrs) 383 { 384 if (s->security_extn && !attrs.secure) { 385 if (!GIC_TEST_GROUP(irq, (1 << cpu))) { 386 return; /* Ignore Non-secure access of Group0 IRQ */ 387 } 388 val = 0x80 | (val >> 1); /* Non-secure view */ 389 } 390 391 if (irq < GIC_INTERNAL) { 392 s->priority1[irq][cpu] = val; 393 } else { 394 s->priority2[(irq) - GIC_INTERNAL] = val; 395 } 396 } 397 398 static uint32_t gic_get_priority(GICState *s, int cpu, int irq, 399 MemTxAttrs attrs) 400 { 401 uint32_t prio = GIC_GET_PRIORITY(irq, cpu); 402 403 if (s->security_extn && !attrs.secure) { 404 if (!GIC_TEST_GROUP(irq, (1 << cpu))) { 405 return 0; /* Non-secure access cannot read priority of Group0 IRQ */ 406 } 407 prio = (prio << 1) & 0xff; /* Non-secure view */ 408 } 409 return prio; 410 } 411 412 static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask, 413 MemTxAttrs attrs) 414 { 415 if (s->security_extn && !attrs.secure) { 416 if (s->priority_mask[cpu] & 0x80) { 417 /* Priority Mask in upper half */ 418 pmask = 0x80 | (pmask >> 1); 419 } else { 420 /* Non-secure write ignored if priority mask is in lower half */ 421 return; 422 } 423 } 424 s->priority_mask[cpu] = pmask; 425 } 426 427 static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs) 428 { 429 uint32_t pmask = s->priority_mask[cpu]; 430 431 if (s->security_extn && !attrs.secure) { 432 if (pmask & 0x80) { 433 /* Priority Mask in upper half, return Non-secure view */ 434 pmask = (pmask << 1) & 0xff; 435 } else { 436 /* Priority Mask in lower half, RAZ */ 437 pmask = 0; 438 } 439 } 440 return pmask; 441 } 442 443 static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs) 444 { 445 uint32_t ret = s->cpu_ctlr[cpu]; 446 447 if (s->security_extn && !attrs.secure) { 448 /* Construct the NS banked view of GICC_CTLR from the correct 449 * bits of the S banked view. We don't need to move the bypass 450 * control bits because we don't implement that (IMPDEF) part 451 * of the GIC architecture. 452 */ 453 ret = (ret & (GICC_CTLR_EN_GRP1 | GICC_CTLR_EOIMODE_NS)) >> 1; 454 } 455 return ret; 456 } 457 458 static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value, 459 MemTxAttrs attrs) 460 { 461 uint32_t mask; 462 463 if (s->security_extn && !attrs.secure) { 464 /* The NS view can only write certain bits in the register; 465 * the rest are unchanged 466 */ 467 mask = GICC_CTLR_EN_GRP1; 468 if (s->revision == 2) { 469 mask |= GICC_CTLR_EOIMODE_NS; 470 } 471 s->cpu_ctlr[cpu] &= ~mask; 472 s->cpu_ctlr[cpu] |= (value << 1) & mask; 473 } else { 474 if (s->revision == 2) { 475 mask = s->security_extn ? GICC_CTLR_V2_S_MASK : GICC_CTLR_V2_MASK; 476 } else { 477 mask = s->security_extn ? GICC_CTLR_V1_S_MASK : GICC_CTLR_V1_MASK; 478 } 479 s->cpu_ctlr[cpu] = value & mask; 480 } 481 DPRINTF("CPU Interface %d: Group0 Interrupts %sabled, " 482 "Group1 Interrupts %sabled\n", cpu, 483 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP0) ? "En" : "Dis", 484 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP1) ? "En" : "Dis"); 485 } 486 487 static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs) 488 { 489 if (s->security_extn && !attrs.secure) { 490 if (s->running_priority[cpu] & 0x80) { 491 /* Running priority in upper half of range: return the Non-secure 492 * view of the priority. 493 */ 494 return s->running_priority[cpu] << 1; 495 } else { 496 /* Running priority in lower half of range: RAZ */ 497 return 0; 498 } 499 } else { 500 return s->running_priority[cpu]; 501 } 502 } 503 504 /* Return true if we should split priority drop and interrupt deactivation, 505 * ie whether the relevant EOIMode bit is set. 506 */ 507 static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs) 508 { 509 if (s->revision != 2) { 510 /* Before GICv2 prio-drop and deactivate are not separable */ 511 return false; 512 } 513 if (s->security_extn && !attrs.secure) { 514 return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS; 515 } 516 return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE; 517 } 518 519 static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) 520 { 521 int cm = 1 << cpu; 522 int group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm); 523 524 if (!gic_eoi_split(s, cpu, attrs)) { 525 /* This is UNPREDICTABLE; we choose to ignore it */ 526 qemu_log_mask(LOG_GUEST_ERROR, 527 "gic_deactivate_irq: GICC_DIR write when EOIMode clear"); 528 return; 529 } 530 531 if (s->security_extn && !attrs.secure && !group) { 532 DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq); 533 return; 534 } 535 536 GIC_CLEAR_ACTIVE(irq, cm); 537 } 538 539 void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) 540 { 541 int cm = 1 << cpu; 542 int group; 543 544 DPRINTF("EOI %d\n", irq); 545 if (irq >= s->num_irq) { 546 /* This handles two cases: 547 * 1. If software writes the ID of a spurious interrupt [ie 1023] 548 * to the GICC_EOIR, the GIC ignores that write. 549 * 2. If software writes the number of a non-existent interrupt 550 * this must be a subcase of "value written does not match the last 551 * valid interrupt value read from the Interrupt Acknowledge 552 * register" and so this is UNPREDICTABLE. We choose to ignore it. 553 */ 554 return; 555 } 556 if (s->running_priority[cpu] == 0x100) { 557 return; /* No active IRQ. */ 558 } 559 560 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 561 /* Mark level triggered interrupts as pending if they are still 562 raised. */ 563 if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm) 564 && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) { 565 DPRINTF("Set %d pending mask %x\n", irq, cm); 566 GIC_SET_PENDING(irq, cm); 567 } 568 } 569 570 group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm); 571 572 if (s->security_extn && !attrs.secure && !group) { 573 DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq); 574 return; 575 } 576 577 /* Secure EOI with GICC_CTLR.AckCtl == 0 when the IRQ is a Group 1 578 * interrupt is UNPREDICTABLE. We choose to handle it as if AckCtl == 1, 579 * i.e. go ahead and complete the irq anyway. 580 */ 581 582 gic_drop_prio(s, cpu, group); 583 584 /* In GICv2 the guest can choose to split priority-drop and deactivate */ 585 if (!gic_eoi_split(s, cpu, attrs)) { 586 GIC_CLEAR_ACTIVE(irq, cm); 587 } 588 gic_update(s); 589 } 590 591 static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) 592 { 593 GICState *s = (GICState *)opaque; 594 uint32_t res; 595 int irq; 596 int i; 597 int cpu; 598 int cm; 599 int mask; 600 601 cpu = gic_get_current_cpu(s); 602 cm = 1 << cpu; 603 if (offset < 0x100) { 604 if (offset == 0) { /* GICD_CTLR */ 605 if (s->security_extn && !attrs.secure) { 606 /* The NS bank of this register is just an alias of the 607 * EnableGrp1 bit in the S bank version. 608 */ 609 return extract32(s->ctlr, 1, 1); 610 } else { 611 return s->ctlr; 612 } 613 } 614 if (offset == 4) 615 /* Interrupt Controller Type Register */ 616 return ((s->num_irq / 32) - 1) 617 | ((s->num_cpu - 1) << 5) 618 | (s->security_extn << 10); 619 if (offset < 0x08) 620 return 0; 621 if (offset >= 0x80) { 622 /* Interrupt Group Registers: these RAZ/WI if this is an NS 623 * access to a GIC with the security extensions, or if the GIC 624 * doesn't have groups at all. 625 */ 626 res = 0; 627 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 628 /* Every byte offset holds 8 group status bits */ 629 irq = (offset - 0x080) * 8 + GIC_BASE_IRQ; 630 if (irq >= s->num_irq) { 631 goto bad_reg; 632 } 633 for (i = 0; i < 8; i++) { 634 if (GIC_TEST_GROUP(irq + i, cm)) { 635 res |= (1 << i); 636 } 637 } 638 } 639 return res; 640 } 641 goto bad_reg; 642 } else if (offset < 0x200) { 643 /* Interrupt Set/Clear Enable. */ 644 if (offset < 0x180) 645 irq = (offset - 0x100) * 8; 646 else 647 irq = (offset - 0x180) * 8; 648 irq += GIC_BASE_IRQ; 649 if (irq >= s->num_irq) 650 goto bad_reg; 651 res = 0; 652 for (i = 0; i < 8; i++) { 653 if (GIC_TEST_ENABLED(irq + i, cm)) { 654 res |= (1 << i); 655 } 656 } 657 } else if (offset < 0x300) { 658 /* Interrupt Set/Clear Pending. */ 659 if (offset < 0x280) 660 irq = (offset - 0x200) * 8; 661 else 662 irq = (offset - 0x280) * 8; 663 irq += GIC_BASE_IRQ; 664 if (irq >= s->num_irq) 665 goto bad_reg; 666 res = 0; 667 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 668 for (i = 0; i < 8; i++) { 669 if (gic_test_pending(s, irq + i, mask)) { 670 res |= (1 << i); 671 } 672 } 673 } else if (offset < 0x400) { 674 /* Interrupt Active. */ 675 irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; 676 if (irq >= s->num_irq) 677 goto bad_reg; 678 res = 0; 679 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 680 for (i = 0; i < 8; i++) { 681 if (GIC_TEST_ACTIVE(irq + i, mask)) { 682 res |= (1 << i); 683 } 684 } 685 } else if (offset < 0x800) { 686 /* Interrupt Priority. */ 687 irq = (offset - 0x400) + GIC_BASE_IRQ; 688 if (irq >= s->num_irq) 689 goto bad_reg; 690 res = gic_get_priority(s, cpu, irq, attrs); 691 } else if (offset < 0xc00) { 692 /* Interrupt CPU Target. */ 693 if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { 694 /* For uniprocessor GICs these RAZ/WI */ 695 res = 0; 696 } else { 697 irq = (offset - 0x800) + GIC_BASE_IRQ; 698 if (irq >= s->num_irq) { 699 goto bad_reg; 700 } 701 if (irq >= 29 && irq <= 31) { 702 res = cm; 703 } else { 704 res = GIC_TARGET(irq); 705 } 706 } 707 } else if (offset < 0xf00) { 708 /* Interrupt Configuration. */ 709 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; 710 if (irq >= s->num_irq) 711 goto bad_reg; 712 res = 0; 713 for (i = 0; i < 4; i++) { 714 if (GIC_TEST_MODEL(irq + i)) 715 res |= (1 << (i * 2)); 716 if (GIC_TEST_EDGE_TRIGGER(irq + i)) 717 res |= (2 << (i * 2)); 718 } 719 } else if (offset < 0xf10) { 720 goto bad_reg; 721 } else if (offset < 0xf30) { 722 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 723 goto bad_reg; 724 } 725 726 if (offset < 0xf20) { 727 /* GICD_CPENDSGIRn */ 728 irq = (offset - 0xf10); 729 } else { 730 irq = (offset - 0xf20); 731 /* GICD_SPENDSGIRn */ 732 } 733 734 res = s->sgi_pending[irq][cpu]; 735 } else if (offset < 0xfd0) { 736 goto bad_reg; 737 } else if (offset < 0x1000) { 738 if (offset & 3) { 739 res = 0; 740 } else { 741 switch (s->revision) { 742 case REV_11MPCORE: 743 res = gic_id_11mpcore[(offset - 0xfd0) >> 2]; 744 break; 745 case 1: 746 res = gic_id_gicv1[(offset - 0xfd0) >> 2]; 747 break; 748 case 2: 749 res = gic_id_gicv2[(offset - 0xfd0) >> 2]; 750 break; 751 case REV_NVIC: 752 /* Shouldn't be able to get here */ 753 abort(); 754 default: 755 res = 0; 756 } 757 } 758 } else { 759 g_assert_not_reached(); 760 } 761 return res; 762 bad_reg: 763 qemu_log_mask(LOG_GUEST_ERROR, 764 "gic_dist_readb: Bad offset %x\n", (int)offset); 765 return 0; 766 } 767 768 static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data, 769 unsigned size, MemTxAttrs attrs) 770 { 771 switch (size) { 772 case 1: 773 *data = gic_dist_readb(opaque, offset, attrs); 774 return MEMTX_OK; 775 case 2: 776 *data = gic_dist_readb(opaque, offset, attrs); 777 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 778 return MEMTX_OK; 779 case 4: 780 *data = gic_dist_readb(opaque, offset, attrs); 781 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 782 *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16; 783 *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24; 784 return MEMTX_OK; 785 default: 786 return MEMTX_ERROR; 787 } 788 } 789 790 static void gic_dist_writeb(void *opaque, hwaddr offset, 791 uint32_t value, MemTxAttrs attrs) 792 { 793 GICState *s = (GICState *)opaque; 794 int irq; 795 int i; 796 int cpu; 797 798 cpu = gic_get_current_cpu(s); 799 if (offset < 0x100) { 800 if (offset == 0) { 801 if (s->security_extn && !attrs.secure) { 802 /* NS version is just an alias of the S version's bit 1 */ 803 s->ctlr = deposit32(s->ctlr, 1, 1, value); 804 } else if (gic_has_groups(s)) { 805 s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1); 806 } else { 807 s->ctlr = value & GICD_CTLR_EN_GRP0; 808 } 809 DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n", 810 s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis", 811 s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis"); 812 } else if (offset < 4) { 813 /* ignored. */ 814 } else if (offset >= 0x80) { 815 /* Interrupt Group Registers: RAZ/WI for NS access to secure 816 * GIC, or for GICs without groups. 817 */ 818 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 819 /* Every byte offset holds 8 group status bits */ 820 irq = (offset - 0x80) * 8 + GIC_BASE_IRQ; 821 if (irq >= s->num_irq) { 822 goto bad_reg; 823 } 824 for (i = 0; i < 8; i++) { 825 /* Group bits are banked for private interrupts */ 826 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 827 if (value & (1 << i)) { 828 /* Group1 (Non-secure) */ 829 GIC_SET_GROUP(irq + i, cm); 830 } else { 831 /* Group0 (Secure) */ 832 GIC_CLEAR_GROUP(irq + i, cm); 833 } 834 } 835 } 836 } else { 837 goto bad_reg; 838 } 839 } else if (offset < 0x180) { 840 /* Interrupt Set Enable. */ 841 irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; 842 if (irq >= s->num_irq) 843 goto bad_reg; 844 if (irq < GIC_NR_SGIS) { 845 value = 0xff; 846 } 847 848 for (i = 0; i < 8; i++) { 849 if (value & (1 << i)) { 850 int mask = 851 (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq + i); 852 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 853 854 if (!GIC_TEST_ENABLED(irq + i, cm)) { 855 DPRINTF("Enabled IRQ %d\n", irq + i); 856 } 857 GIC_SET_ENABLED(irq + i, cm); 858 /* If a raised level triggered IRQ enabled then mark 859 is as pending. */ 860 if (GIC_TEST_LEVEL(irq + i, mask) 861 && !GIC_TEST_EDGE_TRIGGER(irq + i)) { 862 DPRINTF("Set %d pending mask %x\n", irq + i, mask); 863 GIC_SET_PENDING(irq + i, mask); 864 } 865 } 866 } 867 } else if (offset < 0x200) { 868 /* Interrupt Clear Enable. */ 869 irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; 870 if (irq >= s->num_irq) 871 goto bad_reg; 872 if (irq < GIC_NR_SGIS) { 873 value = 0; 874 } 875 876 for (i = 0; i < 8; i++) { 877 if (value & (1 << i)) { 878 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 879 880 if (GIC_TEST_ENABLED(irq + i, cm)) { 881 DPRINTF("Disabled IRQ %d\n", irq + i); 882 } 883 GIC_CLEAR_ENABLED(irq + i, cm); 884 } 885 } 886 } else if (offset < 0x280) { 887 /* Interrupt Set Pending. */ 888 irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; 889 if (irq >= s->num_irq) 890 goto bad_reg; 891 if (irq < GIC_NR_SGIS) { 892 value = 0; 893 } 894 895 for (i = 0; i < 8; i++) { 896 if (value & (1 << i)) { 897 GIC_SET_PENDING(irq + i, GIC_TARGET(irq + i)); 898 } 899 } 900 } else if (offset < 0x300) { 901 /* Interrupt Clear Pending. */ 902 irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; 903 if (irq >= s->num_irq) 904 goto bad_reg; 905 if (irq < GIC_NR_SGIS) { 906 value = 0; 907 } 908 909 for (i = 0; i < 8; i++) { 910 /* ??? This currently clears the pending bit for all CPUs, even 911 for per-CPU interrupts. It's unclear whether this is the 912 corect behavior. */ 913 if (value & (1 << i)) { 914 GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); 915 } 916 } 917 } else if (offset < 0x400) { 918 /* Interrupt Active. */ 919 goto bad_reg; 920 } else if (offset < 0x800) { 921 /* Interrupt Priority. */ 922 irq = (offset - 0x400) + GIC_BASE_IRQ; 923 if (irq >= s->num_irq) 924 goto bad_reg; 925 gic_set_priority(s, cpu, irq, value, attrs); 926 } else if (offset < 0xc00) { 927 /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the 928 * annoying exception of the 11MPCore's GIC. 929 */ 930 if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { 931 irq = (offset - 0x800) + GIC_BASE_IRQ; 932 if (irq >= s->num_irq) { 933 goto bad_reg; 934 } 935 if (irq < 29) { 936 value = 0; 937 } else if (irq < GIC_INTERNAL) { 938 value = ALL_CPU_MASK; 939 } 940 s->irq_target[irq] = value & ALL_CPU_MASK; 941 } 942 } else if (offset < 0xf00) { 943 /* Interrupt Configuration. */ 944 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; 945 if (irq >= s->num_irq) 946 goto bad_reg; 947 if (irq < GIC_NR_SGIS) 948 value |= 0xaa; 949 for (i = 0; i < 4; i++) { 950 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 951 if (value & (1 << (i * 2))) { 952 GIC_SET_MODEL(irq + i); 953 } else { 954 GIC_CLEAR_MODEL(irq + i); 955 } 956 } 957 if (value & (2 << (i * 2))) { 958 GIC_SET_EDGE_TRIGGER(irq + i); 959 } else { 960 GIC_CLEAR_EDGE_TRIGGER(irq + i); 961 } 962 } 963 } else if (offset < 0xf10) { 964 /* 0xf00 is only handled for 32-bit writes. */ 965 goto bad_reg; 966 } else if (offset < 0xf20) { 967 /* GICD_CPENDSGIRn */ 968 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 969 goto bad_reg; 970 } 971 irq = (offset - 0xf10); 972 973 s->sgi_pending[irq][cpu] &= ~value; 974 if (s->sgi_pending[irq][cpu] == 0) { 975 GIC_CLEAR_PENDING(irq, 1 << cpu); 976 } 977 } else if (offset < 0xf30) { 978 /* GICD_SPENDSGIRn */ 979 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 980 goto bad_reg; 981 } 982 irq = (offset - 0xf20); 983 984 GIC_SET_PENDING(irq, 1 << cpu); 985 s->sgi_pending[irq][cpu] |= value; 986 } else { 987 goto bad_reg; 988 } 989 gic_update(s); 990 return; 991 bad_reg: 992 qemu_log_mask(LOG_GUEST_ERROR, 993 "gic_dist_writeb: Bad offset %x\n", (int)offset); 994 } 995 996 static void gic_dist_writew(void *opaque, hwaddr offset, 997 uint32_t value, MemTxAttrs attrs) 998 { 999 gic_dist_writeb(opaque, offset, value & 0xff, attrs); 1000 gic_dist_writeb(opaque, offset + 1, value >> 8, attrs); 1001 } 1002 1003 static void gic_dist_writel(void *opaque, hwaddr offset, 1004 uint32_t value, MemTxAttrs attrs) 1005 { 1006 GICState *s = (GICState *)opaque; 1007 if (offset == 0xf00) { 1008 int cpu; 1009 int irq; 1010 int mask; 1011 int target_cpu; 1012 1013 cpu = gic_get_current_cpu(s); 1014 irq = value & 0x3ff; 1015 switch ((value >> 24) & 3) { 1016 case 0: 1017 mask = (value >> 16) & ALL_CPU_MASK; 1018 break; 1019 case 1: 1020 mask = ALL_CPU_MASK ^ (1 << cpu); 1021 break; 1022 case 2: 1023 mask = 1 << cpu; 1024 break; 1025 default: 1026 DPRINTF("Bad Soft Int target filter\n"); 1027 mask = ALL_CPU_MASK; 1028 break; 1029 } 1030 GIC_SET_PENDING(irq, mask); 1031 target_cpu = ctz32(mask); 1032 while (target_cpu < GIC_NCPU) { 1033 s->sgi_pending[irq][target_cpu] |= (1 << cpu); 1034 mask &= ~(1 << target_cpu); 1035 target_cpu = ctz32(mask); 1036 } 1037 gic_update(s); 1038 return; 1039 } 1040 gic_dist_writew(opaque, offset, value & 0xffff, attrs); 1041 gic_dist_writew(opaque, offset + 2, value >> 16, attrs); 1042 } 1043 1044 static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data, 1045 unsigned size, MemTxAttrs attrs) 1046 { 1047 switch (size) { 1048 case 1: 1049 gic_dist_writeb(opaque, offset, data, attrs); 1050 return MEMTX_OK; 1051 case 2: 1052 gic_dist_writew(opaque, offset, data, attrs); 1053 return MEMTX_OK; 1054 case 4: 1055 gic_dist_writel(opaque, offset, data, attrs); 1056 return MEMTX_OK; 1057 default: 1058 return MEMTX_ERROR; 1059 } 1060 } 1061 1062 static inline uint32_t gic_apr_ns_view(GICState *s, int cpu, int regno) 1063 { 1064 /* Return the Nonsecure view of GICC_APR<regno>. This is the 1065 * second half of GICC_NSAPR. 1066 */ 1067 switch (GIC_MIN_BPR) { 1068 case 0: 1069 if (regno < 2) { 1070 return s->nsapr[regno + 2][cpu]; 1071 } 1072 break; 1073 case 1: 1074 if (regno == 0) { 1075 return s->nsapr[regno + 1][cpu]; 1076 } 1077 break; 1078 case 2: 1079 if (regno == 0) { 1080 return extract32(s->nsapr[0][cpu], 16, 16); 1081 } 1082 break; 1083 case 3: 1084 if (regno == 0) { 1085 return extract32(s->nsapr[0][cpu], 8, 8); 1086 } 1087 break; 1088 default: 1089 g_assert_not_reached(); 1090 } 1091 return 0; 1092 } 1093 1094 static inline void gic_apr_write_ns_view(GICState *s, int cpu, int regno, 1095 uint32_t value) 1096 { 1097 /* Write the Nonsecure view of GICC_APR<regno>. */ 1098 switch (GIC_MIN_BPR) { 1099 case 0: 1100 if (regno < 2) { 1101 s->nsapr[regno + 2][cpu] = value; 1102 } 1103 break; 1104 case 1: 1105 if (regno == 0) { 1106 s->nsapr[regno + 1][cpu] = value; 1107 } 1108 break; 1109 case 2: 1110 if (regno == 0) { 1111 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 16, 16, value); 1112 } 1113 break; 1114 case 3: 1115 if (regno == 0) { 1116 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 8, 8, value); 1117 } 1118 break; 1119 default: 1120 g_assert_not_reached(); 1121 } 1122 } 1123 1124 static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, 1125 uint64_t *data, MemTxAttrs attrs) 1126 { 1127 switch (offset) { 1128 case 0x00: /* Control */ 1129 *data = gic_get_cpu_control(s, cpu, attrs); 1130 break; 1131 case 0x04: /* Priority mask */ 1132 *data = gic_get_priority_mask(s, cpu, attrs); 1133 break; 1134 case 0x08: /* Binary Point */ 1135 if (s->security_extn && !attrs.secure) { 1136 /* BPR is banked. Non-secure copy stored in ABPR. */ 1137 *data = s->abpr[cpu]; 1138 } else { 1139 *data = s->bpr[cpu]; 1140 } 1141 break; 1142 case 0x0c: /* Acknowledge */ 1143 *data = gic_acknowledge_irq(s, cpu, attrs); 1144 break; 1145 case 0x14: /* Running Priority */ 1146 *data = gic_get_running_priority(s, cpu, attrs); 1147 break; 1148 case 0x18: /* Highest Pending Interrupt */ 1149 *data = gic_get_current_pending_irq(s, cpu, attrs); 1150 break; 1151 case 0x1c: /* Aliased Binary Point */ 1152 /* GIC v2, no security: ABPR 1153 * GIC v1, no security: not implemented (RAZ/WI) 1154 * With security extensions, secure access: ABPR (alias of NS BPR) 1155 * With security extensions, nonsecure access: RAZ/WI 1156 */ 1157 if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { 1158 *data = 0; 1159 } else { 1160 *data = s->abpr[cpu]; 1161 } 1162 break; 1163 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 1164 { 1165 int regno = (offset - 0xd0) / 4; 1166 1167 if (regno >= GIC_NR_APRS || s->revision != 2) { 1168 *data = 0; 1169 } else if (s->security_extn && !attrs.secure) { 1170 /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ 1171 *data = gic_apr_ns_view(s, regno, cpu); 1172 } else { 1173 *data = s->apr[regno][cpu]; 1174 } 1175 break; 1176 } 1177 case 0xe0: case 0xe4: case 0xe8: case 0xec: 1178 { 1179 int regno = (offset - 0xe0) / 4; 1180 1181 if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) || 1182 (s->security_extn && !attrs.secure)) { 1183 *data = 0; 1184 } else { 1185 *data = s->nsapr[regno][cpu]; 1186 } 1187 break; 1188 } 1189 default: 1190 qemu_log_mask(LOG_GUEST_ERROR, 1191 "gic_cpu_read: Bad offset %x\n", (int)offset); 1192 return MEMTX_ERROR; 1193 } 1194 return MEMTX_OK; 1195 } 1196 1197 static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, 1198 uint32_t value, MemTxAttrs attrs) 1199 { 1200 switch (offset) { 1201 case 0x00: /* Control */ 1202 gic_set_cpu_control(s, cpu, value, attrs); 1203 break; 1204 case 0x04: /* Priority mask */ 1205 gic_set_priority_mask(s, cpu, value, attrs); 1206 break; 1207 case 0x08: /* Binary Point */ 1208 if (s->security_extn && !attrs.secure) { 1209 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 1210 } else { 1211 s->bpr[cpu] = MAX(value & 0x7, GIC_MIN_BPR); 1212 } 1213 break; 1214 case 0x10: /* End Of Interrupt */ 1215 gic_complete_irq(s, cpu, value & 0x3ff, attrs); 1216 return MEMTX_OK; 1217 case 0x1c: /* Aliased Binary Point */ 1218 if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { 1219 /* unimplemented, or NS access: RAZ/WI */ 1220 return MEMTX_OK; 1221 } else { 1222 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 1223 } 1224 break; 1225 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 1226 { 1227 int regno = (offset - 0xd0) / 4; 1228 1229 if (regno >= GIC_NR_APRS || s->revision != 2) { 1230 return MEMTX_OK; 1231 } 1232 if (s->security_extn && !attrs.secure) { 1233 /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ 1234 gic_apr_write_ns_view(s, regno, cpu, value); 1235 } else { 1236 s->apr[regno][cpu] = value; 1237 } 1238 break; 1239 } 1240 case 0xe0: case 0xe4: case 0xe8: case 0xec: 1241 { 1242 int regno = (offset - 0xe0) / 4; 1243 1244 if (regno >= GIC_NR_APRS || s->revision != 2) { 1245 return MEMTX_OK; 1246 } 1247 if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { 1248 return MEMTX_OK; 1249 } 1250 s->nsapr[regno][cpu] = value; 1251 break; 1252 } 1253 case 0x1000: 1254 /* GICC_DIR */ 1255 gic_deactivate_irq(s, cpu, value & 0x3ff, attrs); 1256 break; 1257 default: 1258 qemu_log_mask(LOG_GUEST_ERROR, 1259 "gic_cpu_write: Bad offset %x\n", (int)offset); 1260 return MEMTX_ERROR; 1261 } 1262 gic_update(s); 1263 return MEMTX_OK; 1264 } 1265 1266 /* Wrappers to read/write the GIC CPU interface for the current CPU */ 1267 static MemTxResult gic_thiscpu_read(void *opaque, hwaddr addr, uint64_t *data, 1268 unsigned size, MemTxAttrs attrs) 1269 { 1270 GICState *s = (GICState *)opaque; 1271 return gic_cpu_read(s, gic_get_current_cpu(s), addr, data, attrs); 1272 } 1273 1274 static MemTxResult gic_thiscpu_write(void *opaque, hwaddr addr, 1275 uint64_t value, unsigned size, 1276 MemTxAttrs attrs) 1277 { 1278 GICState *s = (GICState *)opaque; 1279 return gic_cpu_write(s, gic_get_current_cpu(s), addr, value, attrs); 1280 } 1281 1282 /* Wrappers to read/write the GIC CPU interface for a specific CPU. 1283 * These just decode the opaque pointer into GICState* + cpu id. 1284 */ 1285 static MemTxResult gic_do_cpu_read(void *opaque, hwaddr addr, uint64_t *data, 1286 unsigned size, MemTxAttrs attrs) 1287 { 1288 GICState **backref = (GICState **)opaque; 1289 GICState *s = *backref; 1290 int id = (backref - s->backref); 1291 return gic_cpu_read(s, id, addr, data, attrs); 1292 } 1293 1294 static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr, 1295 uint64_t value, unsigned size, 1296 MemTxAttrs attrs) 1297 { 1298 GICState **backref = (GICState **)opaque; 1299 GICState *s = *backref; 1300 int id = (backref - s->backref); 1301 return gic_cpu_write(s, id, addr, value, attrs); 1302 } 1303 1304 static const MemoryRegionOps gic_ops[2] = { 1305 { 1306 .read_with_attrs = gic_dist_read, 1307 .write_with_attrs = gic_dist_write, 1308 .endianness = DEVICE_NATIVE_ENDIAN, 1309 }, 1310 { 1311 .read_with_attrs = gic_thiscpu_read, 1312 .write_with_attrs = gic_thiscpu_write, 1313 .endianness = DEVICE_NATIVE_ENDIAN, 1314 } 1315 }; 1316 1317 static const MemoryRegionOps gic_cpu_ops = { 1318 .read_with_attrs = gic_do_cpu_read, 1319 .write_with_attrs = gic_do_cpu_write, 1320 .endianness = DEVICE_NATIVE_ENDIAN, 1321 }; 1322 1323 /* This function is used by nvic model */ 1324 void gic_init_irqs_and_distributor(GICState *s) 1325 { 1326 gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops); 1327 } 1328 1329 static void arm_gic_realize(DeviceState *dev, Error **errp) 1330 { 1331 /* Device instance realize function for the GIC sysbus device */ 1332 int i; 1333 GICState *s = ARM_GIC(dev); 1334 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1335 ARMGICClass *agc = ARM_GIC_GET_CLASS(s); 1336 Error *local_err = NULL; 1337 1338 agc->parent_realize(dev, &local_err); 1339 if (local_err) { 1340 error_propagate(errp, local_err); 1341 return; 1342 } 1343 1344 /* This creates distributor and main CPU interface (s->cpuiomem[0]) */ 1345 gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops); 1346 1347 /* Extra core-specific regions for the CPU interfaces. This is 1348 * necessary for "franken-GIC" implementations, for example on 1349 * Exynos 4. 1350 * NB that the memory region size of 0x100 applies for the 11MPCore 1351 * and also cores following the GIC v1 spec (ie A9). 1352 * GIC v2 defines a larger memory region (0x1000) so this will need 1353 * to be extended when we implement A15. 1354 */ 1355 for (i = 0; i < s->num_cpu; i++) { 1356 s->backref[i] = s; 1357 memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops, 1358 &s->backref[i], "gic_cpu", 0x100); 1359 sysbus_init_mmio(sbd, &s->cpuiomem[i+1]); 1360 } 1361 } 1362 1363 static void arm_gic_class_init(ObjectClass *klass, void *data) 1364 { 1365 DeviceClass *dc = DEVICE_CLASS(klass); 1366 ARMGICClass *agc = ARM_GIC_CLASS(klass); 1367 1368 agc->parent_realize = dc->realize; 1369 dc->realize = arm_gic_realize; 1370 } 1371 1372 static const TypeInfo arm_gic_info = { 1373 .name = TYPE_ARM_GIC, 1374 .parent = TYPE_ARM_GIC_COMMON, 1375 .instance_size = sizeof(GICState), 1376 .class_init = arm_gic_class_init, 1377 .class_size = sizeof(ARMGICClass), 1378 }; 1379 1380 static void arm_gic_register_types(void) 1381 { 1382 type_register_static(&arm_gic_info); 1383 } 1384 1385 type_init(arm_gic_register_types) 1386