1 /* 2 * ARM Generic/Distributed Interrupt Controller 3 * 4 * Copyright (c) 2006-2007 CodeSourcery. 5 * Written by Paul Brook 6 * 7 * This code is licensed under the GPL. 8 */ 9 10 /* This file contains implementation code for the RealView EB interrupt 11 * controller, MPCore distributed interrupt controller and ARMv7-M 12 * Nested Vectored Interrupt Controller. 13 * It is compiled in two ways: 14 * (1) as a standalone file to produce a sysbus device which is a GIC 15 * that can be used on the realview board and as one of the builtin 16 * private peripherals for the ARM MP CPUs (11MPCore, A9, etc) 17 * (2) by being directly #included into armv7m_nvic.c to produce the 18 * armv7m_nvic device. 19 */ 20 21 #include "hw/sysbus.h" 22 #include "gic_internal.h" 23 #include "qom/cpu.h" 24 25 //#define DEBUG_GIC 26 27 #ifdef DEBUG_GIC 28 #define DPRINTF(fmt, ...) \ 29 do { fprintf(stderr, "arm_gic: " fmt , ## __VA_ARGS__); } while (0) 30 #else 31 #define DPRINTF(fmt, ...) do {} while(0) 32 #endif 33 34 static const uint8_t gic_id[] = { 35 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 36 }; 37 38 #define NUM_CPU(s) ((s)->num_cpu) 39 40 static inline int gic_get_current_cpu(GICState *s) 41 { 42 if (s->num_cpu > 1) { 43 return current_cpu->cpu_index; 44 } 45 return 0; 46 } 47 48 /* Return true if this GIC config has interrupt groups, which is 49 * true if we're a GICv2, or a GICv1 with the security extensions. 50 */ 51 static inline bool gic_has_groups(GICState *s) 52 { 53 return s->revision == 2 || s->security_extn; 54 } 55 56 /* TODO: Many places that call this routine could be optimized. */ 57 /* Update interrupt status after enabled or pending bits have been changed. */ 58 void gic_update(GICState *s) 59 { 60 int best_irq; 61 int best_prio; 62 int irq; 63 int level; 64 int cpu; 65 int cm; 66 67 for (cpu = 0; cpu < NUM_CPU(s); cpu++) { 68 cm = 1 << cpu; 69 s->current_pending[cpu] = 1023; 70 if (!(s->ctlr & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) 71 || !(s->cpu_ctlr[cpu] & (GICC_CTLR_EN_GRP0 | GICC_CTLR_EN_GRP1))) { 72 qemu_irq_lower(s->parent_irq[cpu]); 73 return; 74 } 75 best_prio = 0x100; 76 best_irq = 1023; 77 for (irq = 0; irq < s->num_irq; irq++) { 78 if (GIC_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) && 79 (irq < GIC_INTERNAL || GIC_TARGET(irq) & cm)) { 80 if (GIC_GET_PRIORITY(irq, cpu) < best_prio) { 81 best_prio = GIC_GET_PRIORITY(irq, cpu); 82 best_irq = irq; 83 } 84 } 85 } 86 level = 0; 87 if (best_prio < s->priority_mask[cpu]) { 88 s->current_pending[cpu] = best_irq; 89 if (best_prio < s->running_priority[cpu]) { 90 DPRINTF("Raised pending IRQ %d (cpu %d)\n", best_irq, cpu); 91 level = 1; 92 } 93 } 94 qemu_set_irq(s->parent_irq[cpu], level); 95 } 96 } 97 98 void gic_set_pending_private(GICState *s, int cpu, int irq) 99 { 100 int cm = 1 << cpu; 101 102 if (gic_test_pending(s, irq, cm)) { 103 return; 104 } 105 106 DPRINTF("Set %d pending cpu %d\n", irq, cpu); 107 GIC_SET_PENDING(irq, cm); 108 gic_update(s); 109 } 110 111 static void gic_set_irq_11mpcore(GICState *s, int irq, int level, 112 int cm, int target) 113 { 114 if (level) { 115 GIC_SET_LEVEL(irq, cm); 116 if (GIC_TEST_EDGE_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) { 117 DPRINTF("Set %d pending mask %x\n", irq, target); 118 GIC_SET_PENDING(irq, target); 119 } 120 } else { 121 GIC_CLEAR_LEVEL(irq, cm); 122 } 123 } 124 125 static void gic_set_irq_generic(GICState *s, int irq, int level, 126 int cm, int target) 127 { 128 if (level) { 129 GIC_SET_LEVEL(irq, cm); 130 DPRINTF("Set %d pending mask %x\n", irq, target); 131 if (GIC_TEST_EDGE_TRIGGER(irq)) { 132 GIC_SET_PENDING(irq, target); 133 } 134 } else { 135 GIC_CLEAR_LEVEL(irq, cm); 136 } 137 } 138 139 /* Process a change in an external IRQ input. */ 140 static void gic_set_irq(void *opaque, int irq, int level) 141 { 142 /* Meaning of the 'irq' parameter: 143 * [0..N-1] : external interrupts 144 * [N..N+31] : PPI (internal) interrupts for CPU 0 145 * [N+32..N+63] : PPI (internal interrupts for CPU 1 146 * ... 147 */ 148 GICState *s = (GICState *)opaque; 149 int cm, target; 150 if (irq < (s->num_irq - GIC_INTERNAL)) { 151 /* The first external input line is internal interrupt 32. */ 152 cm = ALL_CPU_MASK; 153 irq += GIC_INTERNAL; 154 target = GIC_TARGET(irq); 155 } else { 156 int cpu; 157 irq -= (s->num_irq - GIC_INTERNAL); 158 cpu = irq / GIC_INTERNAL; 159 irq %= GIC_INTERNAL; 160 cm = 1 << cpu; 161 target = cm; 162 } 163 164 assert(irq >= GIC_NR_SGIS); 165 166 if (level == GIC_TEST_LEVEL(irq, cm)) { 167 return; 168 } 169 170 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 171 gic_set_irq_11mpcore(s, irq, level, cm, target); 172 } else { 173 gic_set_irq_generic(s, irq, level, cm, target); 174 } 175 176 gic_update(s); 177 } 178 179 static void gic_set_running_irq(GICState *s, int cpu, int irq) 180 { 181 s->running_irq[cpu] = irq; 182 if (irq == 1023) { 183 s->running_priority[cpu] = 0x100; 184 } else { 185 s->running_priority[cpu] = GIC_GET_PRIORITY(irq, cpu); 186 } 187 gic_update(s); 188 } 189 190 uint32_t gic_acknowledge_irq(GICState *s, int cpu) 191 { 192 int ret, irq, src; 193 int cm = 1 << cpu; 194 irq = s->current_pending[cpu]; 195 if (irq == 1023 196 || GIC_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) { 197 DPRINTF("ACK no pending IRQ\n"); 198 return 1023; 199 } 200 s->last_active[irq][cpu] = s->running_irq[cpu]; 201 202 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 203 /* Clear pending flags for both level and edge triggered interrupts. 204 * Level triggered IRQs will be reasserted once they become inactive. 205 */ 206 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 207 ret = irq; 208 } else { 209 if (irq < GIC_NR_SGIS) { 210 /* Lookup the source CPU for the SGI and clear this in the 211 * sgi_pending map. Return the src and clear the overall pending 212 * state on this CPU if the SGI is not pending from any CPUs. 213 */ 214 assert(s->sgi_pending[irq][cpu] != 0); 215 src = ctz32(s->sgi_pending[irq][cpu]); 216 s->sgi_pending[irq][cpu] &= ~(1 << src); 217 if (s->sgi_pending[irq][cpu] == 0) { 218 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 219 } 220 ret = irq | ((src & 0x7) << 10); 221 } else { 222 /* Clear pending state for both level and edge triggered 223 * interrupts. (level triggered interrupts with an active line 224 * remain pending, see gic_test_pending) 225 */ 226 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 227 ret = irq; 228 } 229 } 230 231 gic_set_running_irq(s, cpu, irq); 232 DPRINTF("ACK %d\n", irq); 233 return ret; 234 } 235 236 void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val) 237 { 238 if (irq < GIC_INTERNAL) { 239 s->priority1[irq][cpu] = val; 240 } else { 241 s->priority2[(irq) - GIC_INTERNAL] = val; 242 } 243 } 244 245 static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs) 246 { 247 uint32_t ret = s->cpu_ctlr[cpu]; 248 249 if (s->security_extn && !attrs.secure) { 250 /* Construct the NS banked view of GICC_CTLR from the correct 251 * bits of the S banked view. We don't need to move the bypass 252 * control bits because we don't implement that (IMPDEF) part 253 * of the GIC architecture. 254 */ 255 ret = (ret & (GICC_CTLR_EN_GRP1 | GICC_CTLR_EOIMODE_NS)) >> 1; 256 } 257 return ret; 258 } 259 260 static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value, 261 MemTxAttrs attrs) 262 { 263 uint32_t mask; 264 265 if (s->security_extn && !attrs.secure) { 266 /* The NS view can only write certain bits in the register; 267 * the rest are unchanged 268 */ 269 mask = GICC_CTLR_EN_GRP1; 270 if (s->revision == 2) { 271 mask |= GICC_CTLR_EOIMODE_NS; 272 } 273 s->cpu_ctlr[cpu] &= ~mask; 274 s->cpu_ctlr[cpu] |= (value << 1) & mask; 275 } else { 276 if (s->revision == 2) { 277 mask = s->security_extn ? GICC_CTLR_V2_S_MASK : GICC_CTLR_V2_MASK; 278 } else { 279 mask = s->security_extn ? GICC_CTLR_V1_S_MASK : GICC_CTLR_V1_MASK; 280 } 281 s->cpu_ctlr[cpu] = value & mask; 282 } 283 DPRINTF("CPU Interface %d: Group0 Interrupts %sabled, " 284 "Group1 Interrupts %sabled\n", cpu, 285 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP0) ? "En" : "Dis", 286 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP1) ? "En" : "Dis"); 287 } 288 289 static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs) 290 { 291 if (s->security_extn && !attrs.secure) { 292 if (s->running_priority[cpu] & 0x80) { 293 /* Running priority in upper half of range: return the Non-secure 294 * view of the priority. 295 */ 296 return s->running_priority[cpu] << 1; 297 } else { 298 /* Running priority in lower half of range: RAZ */ 299 return 0; 300 } 301 } else { 302 return s->running_priority[cpu]; 303 } 304 } 305 306 void gic_complete_irq(GICState *s, int cpu, int irq) 307 { 308 int update = 0; 309 int cm = 1 << cpu; 310 DPRINTF("EOI %d\n", irq); 311 if (irq >= s->num_irq) { 312 /* This handles two cases: 313 * 1. If software writes the ID of a spurious interrupt [ie 1023] 314 * to the GICC_EOIR, the GIC ignores that write. 315 * 2. If software writes the number of a non-existent interrupt 316 * this must be a subcase of "value written does not match the last 317 * valid interrupt value read from the Interrupt Acknowledge 318 * register" and so this is UNPREDICTABLE. We choose to ignore it. 319 */ 320 return; 321 } 322 if (s->running_irq[cpu] == 1023) 323 return; /* No active IRQ. */ 324 325 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 326 /* Mark level triggered interrupts as pending if they are still 327 raised. */ 328 if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm) 329 && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) { 330 DPRINTF("Set %d pending mask %x\n", irq, cm); 331 GIC_SET_PENDING(irq, cm); 332 update = 1; 333 } 334 } 335 336 if (irq != s->running_irq[cpu]) { 337 /* Complete an IRQ that is not currently running. */ 338 int tmp = s->running_irq[cpu]; 339 while (s->last_active[tmp][cpu] != 1023) { 340 if (s->last_active[tmp][cpu] == irq) { 341 s->last_active[tmp][cpu] = s->last_active[irq][cpu]; 342 break; 343 } 344 tmp = s->last_active[tmp][cpu]; 345 } 346 if (update) { 347 gic_update(s); 348 } 349 } else { 350 /* Complete the current running IRQ. */ 351 gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]); 352 } 353 } 354 355 static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) 356 { 357 GICState *s = (GICState *)opaque; 358 uint32_t res; 359 int irq; 360 int i; 361 int cpu; 362 int cm; 363 int mask; 364 365 cpu = gic_get_current_cpu(s); 366 cm = 1 << cpu; 367 if (offset < 0x100) { 368 if (offset == 0) { /* GICD_CTLR */ 369 if (s->security_extn && !attrs.secure) { 370 /* The NS bank of this register is just an alias of the 371 * EnableGrp1 bit in the S bank version. 372 */ 373 return extract32(s->ctlr, 1, 1); 374 } else { 375 return s->ctlr; 376 } 377 } 378 if (offset == 4) 379 /* Interrupt Controller Type Register */ 380 return ((s->num_irq / 32) - 1) 381 | ((NUM_CPU(s) - 1) << 5) 382 | (s->security_extn << 10); 383 if (offset < 0x08) 384 return 0; 385 if (offset >= 0x80) { 386 /* Interrupt Group Registers: these RAZ/WI if this is an NS 387 * access to a GIC with the security extensions, or if the GIC 388 * doesn't have groups at all. 389 */ 390 res = 0; 391 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 392 /* Every byte offset holds 8 group status bits */ 393 irq = (offset - 0x080) * 8 + GIC_BASE_IRQ; 394 if (irq >= s->num_irq) { 395 goto bad_reg; 396 } 397 for (i = 0; i < 8; i++) { 398 if (GIC_TEST_GROUP(irq + i, cm)) { 399 res |= (1 << i); 400 } 401 } 402 } 403 return res; 404 } 405 goto bad_reg; 406 } else if (offset < 0x200) { 407 /* Interrupt Set/Clear Enable. */ 408 if (offset < 0x180) 409 irq = (offset - 0x100) * 8; 410 else 411 irq = (offset - 0x180) * 8; 412 irq += GIC_BASE_IRQ; 413 if (irq >= s->num_irq) 414 goto bad_reg; 415 res = 0; 416 for (i = 0; i < 8; i++) { 417 if (GIC_TEST_ENABLED(irq + i, cm)) { 418 res |= (1 << i); 419 } 420 } 421 } else if (offset < 0x300) { 422 /* Interrupt Set/Clear Pending. */ 423 if (offset < 0x280) 424 irq = (offset - 0x200) * 8; 425 else 426 irq = (offset - 0x280) * 8; 427 irq += GIC_BASE_IRQ; 428 if (irq >= s->num_irq) 429 goto bad_reg; 430 res = 0; 431 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 432 for (i = 0; i < 8; i++) { 433 if (gic_test_pending(s, irq + i, mask)) { 434 res |= (1 << i); 435 } 436 } 437 } else if (offset < 0x400) { 438 /* Interrupt Active. */ 439 irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; 440 if (irq >= s->num_irq) 441 goto bad_reg; 442 res = 0; 443 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 444 for (i = 0; i < 8; i++) { 445 if (GIC_TEST_ACTIVE(irq + i, mask)) { 446 res |= (1 << i); 447 } 448 } 449 } else if (offset < 0x800) { 450 /* Interrupt Priority. */ 451 irq = (offset - 0x400) + GIC_BASE_IRQ; 452 if (irq >= s->num_irq) 453 goto bad_reg; 454 res = GIC_GET_PRIORITY(irq, cpu); 455 } else if (offset < 0xc00) { 456 /* Interrupt CPU Target. */ 457 if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { 458 /* For uniprocessor GICs these RAZ/WI */ 459 res = 0; 460 } else { 461 irq = (offset - 0x800) + GIC_BASE_IRQ; 462 if (irq >= s->num_irq) { 463 goto bad_reg; 464 } 465 if (irq >= 29 && irq <= 31) { 466 res = cm; 467 } else { 468 res = GIC_TARGET(irq); 469 } 470 } 471 } else if (offset < 0xf00) { 472 /* Interrupt Configuration. */ 473 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; 474 if (irq >= s->num_irq) 475 goto bad_reg; 476 res = 0; 477 for (i = 0; i < 4; i++) { 478 if (GIC_TEST_MODEL(irq + i)) 479 res |= (1 << (i * 2)); 480 if (GIC_TEST_EDGE_TRIGGER(irq + i)) 481 res |= (2 << (i * 2)); 482 } 483 } else if (offset < 0xf10) { 484 goto bad_reg; 485 } else if (offset < 0xf30) { 486 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 487 goto bad_reg; 488 } 489 490 if (offset < 0xf20) { 491 /* GICD_CPENDSGIRn */ 492 irq = (offset - 0xf10); 493 } else { 494 irq = (offset - 0xf20); 495 /* GICD_SPENDSGIRn */ 496 } 497 498 res = s->sgi_pending[irq][cpu]; 499 } else if (offset < 0xfe0) { 500 goto bad_reg; 501 } else /* offset >= 0xfe0 */ { 502 if (offset & 3) { 503 res = 0; 504 } else { 505 res = gic_id[(offset - 0xfe0) >> 2]; 506 } 507 } 508 return res; 509 bad_reg: 510 qemu_log_mask(LOG_GUEST_ERROR, 511 "gic_dist_readb: Bad offset %x\n", (int)offset); 512 return 0; 513 } 514 515 static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data, 516 unsigned size, MemTxAttrs attrs) 517 { 518 switch (size) { 519 case 1: 520 *data = gic_dist_readb(opaque, offset, attrs); 521 return MEMTX_OK; 522 case 2: 523 *data = gic_dist_readb(opaque, offset, attrs); 524 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 525 return MEMTX_OK; 526 case 4: 527 *data = gic_dist_readb(opaque, offset, attrs); 528 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 529 *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16; 530 *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24; 531 return MEMTX_OK; 532 default: 533 return MEMTX_ERROR; 534 } 535 } 536 537 static void gic_dist_writeb(void *opaque, hwaddr offset, 538 uint32_t value, MemTxAttrs attrs) 539 { 540 GICState *s = (GICState *)opaque; 541 int irq; 542 int i; 543 int cpu; 544 545 cpu = gic_get_current_cpu(s); 546 if (offset < 0x100) { 547 if (offset == 0) { 548 if (s->security_extn && !attrs.secure) { 549 /* NS version is just an alias of the S version's bit 1 */ 550 s->ctlr = deposit32(s->ctlr, 1, 1, value); 551 } else if (gic_has_groups(s)) { 552 s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1); 553 } else { 554 s->ctlr = value & GICD_CTLR_EN_GRP0; 555 } 556 DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n", 557 s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis", 558 s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis"); 559 } else if (offset < 4) { 560 /* ignored. */ 561 } else if (offset >= 0x80) { 562 /* Interrupt Group Registers: RAZ/WI for NS access to secure 563 * GIC, or for GICs without groups. 564 */ 565 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 566 /* Every byte offset holds 8 group status bits */ 567 irq = (offset - 0x80) * 8 + GIC_BASE_IRQ; 568 if (irq >= s->num_irq) { 569 goto bad_reg; 570 } 571 for (i = 0; i < 8; i++) { 572 /* Group bits are banked for private interrupts */ 573 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 574 if (value & (1 << i)) { 575 /* Group1 (Non-secure) */ 576 GIC_SET_GROUP(irq + i, cm); 577 } else { 578 /* Group0 (Secure) */ 579 GIC_CLEAR_GROUP(irq + i, cm); 580 } 581 } 582 } 583 } else { 584 goto bad_reg; 585 } 586 } else if (offset < 0x180) { 587 /* Interrupt Set Enable. */ 588 irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; 589 if (irq >= s->num_irq) 590 goto bad_reg; 591 if (irq < GIC_NR_SGIS) { 592 value = 0xff; 593 } 594 595 for (i = 0; i < 8; i++) { 596 if (value & (1 << i)) { 597 int mask = 598 (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq + i); 599 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 600 601 if (!GIC_TEST_ENABLED(irq + i, cm)) { 602 DPRINTF("Enabled IRQ %d\n", irq + i); 603 } 604 GIC_SET_ENABLED(irq + i, cm); 605 /* If a raised level triggered IRQ enabled then mark 606 is as pending. */ 607 if (GIC_TEST_LEVEL(irq + i, mask) 608 && !GIC_TEST_EDGE_TRIGGER(irq + i)) { 609 DPRINTF("Set %d pending mask %x\n", irq + i, mask); 610 GIC_SET_PENDING(irq + i, mask); 611 } 612 } 613 } 614 } else if (offset < 0x200) { 615 /* Interrupt Clear Enable. */ 616 irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; 617 if (irq >= s->num_irq) 618 goto bad_reg; 619 if (irq < GIC_NR_SGIS) { 620 value = 0; 621 } 622 623 for (i = 0; i < 8; i++) { 624 if (value & (1 << i)) { 625 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 626 627 if (GIC_TEST_ENABLED(irq + i, cm)) { 628 DPRINTF("Disabled IRQ %d\n", irq + i); 629 } 630 GIC_CLEAR_ENABLED(irq + i, cm); 631 } 632 } 633 } else if (offset < 0x280) { 634 /* Interrupt Set Pending. */ 635 irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; 636 if (irq >= s->num_irq) 637 goto bad_reg; 638 if (irq < GIC_NR_SGIS) { 639 value = 0; 640 } 641 642 for (i = 0; i < 8; i++) { 643 if (value & (1 << i)) { 644 GIC_SET_PENDING(irq + i, GIC_TARGET(irq + i)); 645 } 646 } 647 } else if (offset < 0x300) { 648 /* Interrupt Clear Pending. */ 649 irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; 650 if (irq >= s->num_irq) 651 goto bad_reg; 652 if (irq < GIC_NR_SGIS) { 653 value = 0; 654 } 655 656 for (i = 0; i < 8; i++) { 657 /* ??? This currently clears the pending bit for all CPUs, even 658 for per-CPU interrupts. It's unclear whether this is the 659 corect behavior. */ 660 if (value & (1 << i)) { 661 GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); 662 } 663 } 664 } else if (offset < 0x400) { 665 /* Interrupt Active. */ 666 goto bad_reg; 667 } else if (offset < 0x800) { 668 /* Interrupt Priority. */ 669 irq = (offset - 0x400) + GIC_BASE_IRQ; 670 if (irq >= s->num_irq) 671 goto bad_reg; 672 gic_set_priority(s, cpu, irq, value); 673 } else if (offset < 0xc00) { 674 /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the 675 * annoying exception of the 11MPCore's GIC. 676 */ 677 if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { 678 irq = (offset - 0x800) + GIC_BASE_IRQ; 679 if (irq >= s->num_irq) { 680 goto bad_reg; 681 } 682 if (irq < 29) { 683 value = 0; 684 } else if (irq < GIC_INTERNAL) { 685 value = ALL_CPU_MASK; 686 } 687 s->irq_target[irq] = value & ALL_CPU_MASK; 688 } 689 } else if (offset < 0xf00) { 690 /* Interrupt Configuration. */ 691 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; 692 if (irq >= s->num_irq) 693 goto bad_reg; 694 if (irq < GIC_NR_SGIS) 695 value |= 0xaa; 696 for (i = 0; i < 4; i++) { 697 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 698 if (value & (1 << (i * 2))) { 699 GIC_SET_MODEL(irq + i); 700 } else { 701 GIC_CLEAR_MODEL(irq + i); 702 } 703 } 704 if (value & (2 << (i * 2))) { 705 GIC_SET_EDGE_TRIGGER(irq + i); 706 } else { 707 GIC_CLEAR_EDGE_TRIGGER(irq + i); 708 } 709 } 710 } else if (offset < 0xf10) { 711 /* 0xf00 is only handled for 32-bit writes. */ 712 goto bad_reg; 713 } else if (offset < 0xf20) { 714 /* GICD_CPENDSGIRn */ 715 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 716 goto bad_reg; 717 } 718 irq = (offset - 0xf10); 719 720 s->sgi_pending[irq][cpu] &= ~value; 721 if (s->sgi_pending[irq][cpu] == 0) { 722 GIC_CLEAR_PENDING(irq, 1 << cpu); 723 } 724 } else if (offset < 0xf30) { 725 /* GICD_SPENDSGIRn */ 726 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 727 goto bad_reg; 728 } 729 irq = (offset - 0xf20); 730 731 GIC_SET_PENDING(irq, 1 << cpu); 732 s->sgi_pending[irq][cpu] |= value; 733 } else { 734 goto bad_reg; 735 } 736 gic_update(s); 737 return; 738 bad_reg: 739 qemu_log_mask(LOG_GUEST_ERROR, 740 "gic_dist_writeb: Bad offset %x\n", (int)offset); 741 } 742 743 static void gic_dist_writew(void *opaque, hwaddr offset, 744 uint32_t value, MemTxAttrs attrs) 745 { 746 gic_dist_writeb(opaque, offset, value & 0xff, attrs); 747 gic_dist_writeb(opaque, offset + 1, value >> 8, attrs); 748 } 749 750 static void gic_dist_writel(void *opaque, hwaddr offset, 751 uint32_t value, MemTxAttrs attrs) 752 { 753 GICState *s = (GICState *)opaque; 754 if (offset == 0xf00) { 755 int cpu; 756 int irq; 757 int mask; 758 int target_cpu; 759 760 cpu = gic_get_current_cpu(s); 761 irq = value & 0x3ff; 762 switch ((value >> 24) & 3) { 763 case 0: 764 mask = (value >> 16) & ALL_CPU_MASK; 765 break; 766 case 1: 767 mask = ALL_CPU_MASK ^ (1 << cpu); 768 break; 769 case 2: 770 mask = 1 << cpu; 771 break; 772 default: 773 DPRINTF("Bad Soft Int target filter\n"); 774 mask = ALL_CPU_MASK; 775 break; 776 } 777 GIC_SET_PENDING(irq, mask); 778 target_cpu = ctz32(mask); 779 while (target_cpu < GIC_NCPU) { 780 s->sgi_pending[irq][target_cpu] |= (1 << cpu); 781 mask &= ~(1 << target_cpu); 782 target_cpu = ctz32(mask); 783 } 784 gic_update(s); 785 return; 786 } 787 gic_dist_writew(opaque, offset, value & 0xffff, attrs); 788 gic_dist_writew(opaque, offset + 2, value >> 16, attrs); 789 } 790 791 static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data, 792 unsigned size, MemTxAttrs attrs) 793 { 794 switch (size) { 795 case 1: 796 gic_dist_writeb(opaque, offset, data, attrs); 797 return MEMTX_OK; 798 case 2: 799 gic_dist_writew(opaque, offset, data, attrs); 800 return MEMTX_OK; 801 case 4: 802 gic_dist_writel(opaque, offset, data, attrs); 803 return MEMTX_OK; 804 default: 805 return MEMTX_ERROR; 806 } 807 } 808 809 static const MemoryRegionOps gic_dist_ops = { 810 .read_with_attrs = gic_dist_read, 811 .write_with_attrs = gic_dist_write, 812 .endianness = DEVICE_NATIVE_ENDIAN, 813 }; 814 815 static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, 816 uint64_t *data, MemTxAttrs attrs) 817 { 818 switch (offset) { 819 case 0x00: /* Control */ 820 *data = gic_get_cpu_control(s, cpu, attrs); 821 break; 822 case 0x04: /* Priority mask */ 823 *data = s->priority_mask[cpu]; 824 break; 825 case 0x08: /* Binary Point */ 826 if (s->security_extn && !attrs.secure) { 827 /* BPR is banked. Non-secure copy stored in ABPR. */ 828 *data = s->abpr[cpu]; 829 } else { 830 *data = s->bpr[cpu]; 831 } 832 break; 833 case 0x0c: /* Acknowledge */ 834 *data = gic_acknowledge_irq(s, cpu); 835 break; 836 case 0x14: /* Running Priority */ 837 *data = gic_get_running_priority(s, cpu, attrs); 838 break; 839 case 0x18: /* Highest Pending Interrupt */ 840 *data = s->current_pending[cpu]; 841 break; 842 case 0x1c: /* Aliased Binary Point */ 843 /* GIC v2, no security: ABPR 844 * GIC v1, no security: not implemented (RAZ/WI) 845 * With security extensions, secure access: ABPR (alias of NS BPR) 846 * With security extensions, nonsecure access: RAZ/WI 847 */ 848 if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { 849 *data = 0; 850 } else { 851 *data = s->abpr[cpu]; 852 } 853 break; 854 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 855 *data = s->apr[(offset - 0xd0) / 4][cpu]; 856 break; 857 default: 858 qemu_log_mask(LOG_GUEST_ERROR, 859 "gic_cpu_read: Bad offset %x\n", (int)offset); 860 return MEMTX_ERROR; 861 } 862 return MEMTX_OK; 863 } 864 865 static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, 866 uint32_t value, MemTxAttrs attrs) 867 { 868 switch (offset) { 869 case 0x00: /* Control */ 870 gic_set_cpu_control(s, cpu, value, attrs); 871 break; 872 case 0x04: /* Priority mask */ 873 s->priority_mask[cpu] = (value & 0xff); 874 break; 875 case 0x08: /* Binary Point */ 876 if (s->security_extn && !attrs.secure) { 877 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 878 } else { 879 s->bpr[cpu] = MAX(value & 0x7, GIC_MIN_BPR); 880 } 881 break; 882 case 0x10: /* End Of Interrupt */ 883 gic_complete_irq(s, cpu, value & 0x3ff); 884 return MEMTX_OK; 885 case 0x1c: /* Aliased Binary Point */ 886 if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { 887 /* unimplemented, or NS access: RAZ/WI */ 888 return MEMTX_OK; 889 } else { 890 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 891 } 892 break; 893 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 894 qemu_log_mask(LOG_UNIMP, "Writing APR not implemented\n"); 895 break; 896 default: 897 qemu_log_mask(LOG_GUEST_ERROR, 898 "gic_cpu_write: Bad offset %x\n", (int)offset); 899 return MEMTX_ERROR; 900 } 901 gic_update(s); 902 return MEMTX_OK; 903 } 904 905 /* Wrappers to read/write the GIC CPU interface for the current CPU */ 906 static MemTxResult gic_thiscpu_read(void *opaque, hwaddr addr, uint64_t *data, 907 unsigned size, MemTxAttrs attrs) 908 { 909 GICState *s = (GICState *)opaque; 910 return gic_cpu_read(s, gic_get_current_cpu(s), addr, data, attrs); 911 } 912 913 static MemTxResult gic_thiscpu_write(void *opaque, hwaddr addr, 914 uint64_t value, unsigned size, 915 MemTxAttrs attrs) 916 { 917 GICState *s = (GICState *)opaque; 918 return gic_cpu_write(s, gic_get_current_cpu(s), addr, value, attrs); 919 } 920 921 /* Wrappers to read/write the GIC CPU interface for a specific CPU. 922 * These just decode the opaque pointer into GICState* + cpu id. 923 */ 924 static MemTxResult gic_do_cpu_read(void *opaque, hwaddr addr, uint64_t *data, 925 unsigned size, MemTxAttrs attrs) 926 { 927 GICState **backref = (GICState **)opaque; 928 GICState *s = *backref; 929 int id = (backref - s->backref); 930 return gic_cpu_read(s, id, addr, data, attrs); 931 } 932 933 static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr, 934 uint64_t value, unsigned size, 935 MemTxAttrs attrs) 936 { 937 GICState **backref = (GICState **)opaque; 938 GICState *s = *backref; 939 int id = (backref - s->backref); 940 return gic_cpu_write(s, id, addr, value, attrs); 941 } 942 943 static const MemoryRegionOps gic_thiscpu_ops = { 944 .read_with_attrs = gic_thiscpu_read, 945 .write_with_attrs = gic_thiscpu_write, 946 .endianness = DEVICE_NATIVE_ENDIAN, 947 }; 948 949 static const MemoryRegionOps gic_cpu_ops = { 950 .read_with_attrs = gic_do_cpu_read, 951 .write_with_attrs = gic_do_cpu_write, 952 .endianness = DEVICE_NATIVE_ENDIAN, 953 }; 954 955 void gic_init_irqs_and_distributor(GICState *s) 956 { 957 SysBusDevice *sbd = SYS_BUS_DEVICE(s); 958 int i; 959 960 i = s->num_irq - GIC_INTERNAL; 961 /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. 962 * GPIO array layout is thus: 963 * [0..N-1] SPIs 964 * [N..N+31] PPIs for CPU 0 965 * [N+32..N+63] PPIs for CPU 1 966 * ... 967 */ 968 if (s->revision != REV_NVIC) { 969 i += (GIC_INTERNAL * s->num_cpu); 970 } 971 qdev_init_gpio_in(DEVICE(s), gic_set_irq, i); 972 for (i = 0; i < NUM_CPU(s); i++) { 973 sysbus_init_irq(sbd, &s->parent_irq[i]); 974 } 975 for (i = 0; i < NUM_CPU(s); i++) { 976 sysbus_init_irq(sbd, &s->parent_fiq[i]); 977 } 978 memory_region_init_io(&s->iomem, OBJECT(s), &gic_dist_ops, s, 979 "gic_dist", 0x1000); 980 } 981 982 static void arm_gic_realize(DeviceState *dev, Error **errp) 983 { 984 /* Device instance realize function for the GIC sysbus device */ 985 int i; 986 GICState *s = ARM_GIC(dev); 987 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 988 ARMGICClass *agc = ARM_GIC_GET_CLASS(s); 989 Error *local_err = NULL; 990 991 agc->parent_realize(dev, &local_err); 992 if (local_err) { 993 error_propagate(errp, local_err); 994 return; 995 } 996 997 gic_init_irqs_and_distributor(s); 998 999 /* Memory regions for the CPU interfaces (NVIC doesn't have these): 1000 * a region for "CPU interface for this core", then a region for 1001 * "CPU interface for core 0", "for core 1", ... 1002 * NB that the memory region size of 0x100 applies for the 11MPCore 1003 * and also cores following the GIC v1 spec (ie A9). 1004 * GIC v2 defines a larger memory region (0x1000) so this will need 1005 * to be extended when we implement A15. 1006 */ 1007 memory_region_init_io(&s->cpuiomem[0], OBJECT(s), &gic_thiscpu_ops, s, 1008 "gic_cpu", 0x100); 1009 for (i = 0; i < NUM_CPU(s); i++) { 1010 s->backref[i] = s; 1011 memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops, 1012 &s->backref[i], "gic_cpu", 0x100); 1013 } 1014 /* Distributor */ 1015 sysbus_init_mmio(sbd, &s->iomem); 1016 /* cpu interfaces (one for "current cpu" plus one per cpu) */ 1017 for (i = 0; i <= NUM_CPU(s); i++) { 1018 sysbus_init_mmio(sbd, &s->cpuiomem[i]); 1019 } 1020 } 1021 1022 static void arm_gic_class_init(ObjectClass *klass, void *data) 1023 { 1024 DeviceClass *dc = DEVICE_CLASS(klass); 1025 ARMGICClass *agc = ARM_GIC_CLASS(klass); 1026 1027 agc->parent_realize = dc->realize; 1028 dc->realize = arm_gic_realize; 1029 } 1030 1031 static const TypeInfo arm_gic_info = { 1032 .name = TYPE_ARM_GIC, 1033 .parent = TYPE_ARM_GIC_COMMON, 1034 .instance_size = sizeof(GICState), 1035 .class_init = arm_gic_class_init, 1036 .class_size = sizeof(ARMGICClass), 1037 }; 1038 1039 static void arm_gic_register_types(void) 1040 { 1041 type_register_static(&arm_gic_info); 1042 } 1043 1044 type_init(arm_gic_register_types) 1045