1 /* 2 * ARM Generic/Distributed Interrupt Controller 3 * 4 * Copyright (c) 2006-2007 CodeSourcery. 5 * Written by Paul Brook 6 * 7 * This code is licensed under the GPL. 8 */ 9 10 /* This file contains implementation code for the RealView EB interrupt 11 * controller, MPCore distributed interrupt controller and ARMv7-M 12 * Nested Vectored Interrupt Controller. 13 * It is compiled in two ways: 14 * (1) as a standalone file to produce a sysbus device which is a GIC 15 * that can be used on the realview board and as one of the builtin 16 * private peripherals for the ARM MP CPUs (11MPCore, A9, etc) 17 * (2) by being directly #included into armv7m_nvic.c to produce the 18 * armv7m_nvic device. 19 */ 20 21 #include "hw/sysbus.h" 22 #include "gic_internal.h" 23 #include "qom/cpu.h" 24 25 //#define DEBUG_GIC 26 27 #ifdef DEBUG_GIC 28 #define DPRINTF(fmt, ...) \ 29 do { fprintf(stderr, "arm_gic: " fmt , ## __VA_ARGS__); } while (0) 30 #else 31 #define DPRINTF(fmt, ...) do {} while(0) 32 #endif 33 34 static const uint8_t gic_id[] = { 35 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 36 }; 37 38 #define NUM_CPU(s) ((s)->num_cpu) 39 40 static inline int gic_get_current_cpu(GICState *s) 41 { 42 if (s->num_cpu > 1) { 43 return current_cpu->cpu_index; 44 } 45 return 0; 46 } 47 48 /* TODO: Many places that call this routine could be optimized. */ 49 /* Update interrupt status after enabled or pending bits have been changed. */ 50 void gic_update(GICState *s) 51 { 52 int best_irq; 53 int best_prio; 54 int irq; 55 int level; 56 int cpu; 57 int cm; 58 59 for (cpu = 0; cpu < NUM_CPU(s); cpu++) { 60 cm = 1 << cpu; 61 s->current_pending[cpu] = 1023; 62 if (!s->enabled || !s->cpu_enabled[cpu]) { 63 qemu_irq_lower(s->parent_irq[cpu]); 64 return; 65 } 66 best_prio = 0x100; 67 best_irq = 1023; 68 for (irq = 0; irq < s->num_irq; irq++) { 69 if (GIC_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) && 70 (irq < GIC_INTERNAL || GIC_TARGET(irq) & cm)) { 71 if (GIC_GET_PRIORITY(irq, cpu) < best_prio) { 72 best_prio = GIC_GET_PRIORITY(irq, cpu); 73 best_irq = irq; 74 } 75 } 76 } 77 level = 0; 78 if (best_prio < s->priority_mask[cpu]) { 79 s->current_pending[cpu] = best_irq; 80 if (best_prio < s->running_priority[cpu]) { 81 DPRINTF("Raised pending IRQ %d (cpu %d)\n", best_irq, cpu); 82 level = 1; 83 } 84 } 85 qemu_set_irq(s->parent_irq[cpu], level); 86 } 87 } 88 89 void gic_set_pending_private(GICState *s, int cpu, int irq) 90 { 91 int cm = 1 << cpu; 92 93 if (gic_test_pending(s, irq, cm)) { 94 return; 95 } 96 97 DPRINTF("Set %d pending cpu %d\n", irq, cpu); 98 GIC_SET_PENDING(irq, cm); 99 gic_update(s); 100 } 101 102 static void gic_set_irq_11mpcore(GICState *s, int irq, int level, 103 int cm, int target) 104 { 105 if (level) { 106 GIC_SET_LEVEL(irq, cm); 107 if (GIC_TEST_EDGE_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) { 108 DPRINTF("Set %d pending mask %x\n", irq, target); 109 GIC_SET_PENDING(irq, target); 110 } 111 } else { 112 GIC_CLEAR_LEVEL(irq, cm); 113 } 114 } 115 116 static void gic_set_irq_generic(GICState *s, int irq, int level, 117 int cm, int target) 118 { 119 if (level) { 120 GIC_SET_LEVEL(irq, cm); 121 DPRINTF("Set %d pending mask %x\n", irq, target); 122 if (GIC_TEST_EDGE_TRIGGER(irq)) { 123 GIC_SET_PENDING(irq, target); 124 } 125 } else { 126 GIC_CLEAR_LEVEL(irq, cm); 127 } 128 } 129 130 /* Process a change in an external IRQ input. */ 131 static void gic_set_irq(void *opaque, int irq, int level) 132 { 133 /* Meaning of the 'irq' parameter: 134 * [0..N-1] : external interrupts 135 * [N..N+31] : PPI (internal) interrupts for CPU 0 136 * [N+32..N+63] : PPI (internal interrupts for CPU 1 137 * ... 138 */ 139 GICState *s = (GICState *)opaque; 140 int cm, target; 141 if (irq < (s->num_irq - GIC_INTERNAL)) { 142 /* The first external input line is internal interrupt 32. */ 143 cm = ALL_CPU_MASK; 144 irq += GIC_INTERNAL; 145 target = GIC_TARGET(irq); 146 } else { 147 int cpu; 148 irq -= (s->num_irq - GIC_INTERNAL); 149 cpu = irq / GIC_INTERNAL; 150 irq %= GIC_INTERNAL; 151 cm = 1 << cpu; 152 target = cm; 153 } 154 155 assert(irq >= GIC_NR_SGIS); 156 157 if (level == GIC_TEST_LEVEL(irq, cm)) { 158 return; 159 } 160 161 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 162 gic_set_irq_11mpcore(s, irq, level, cm, target); 163 } else { 164 gic_set_irq_generic(s, irq, level, cm, target); 165 } 166 167 gic_update(s); 168 } 169 170 static void gic_set_running_irq(GICState *s, int cpu, int irq) 171 { 172 s->running_irq[cpu] = irq; 173 if (irq == 1023) { 174 s->running_priority[cpu] = 0x100; 175 } else { 176 s->running_priority[cpu] = GIC_GET_PRIORITY(irq, cpu); 177 } 178 gic_update(s); 179 } 180 181 uint32_t gic_acknowledge_irq(GICState *s, int cpu) 182 { 183 int ret, irq, src; 184 int cm = 1 << cpu; 185 irq = s->current_pending[cpu]; 186 if (irq == 1023 187 || GIC_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) { 188 DPRINTF("ACK no pending IRQ\n"); 189 return 1023; 190 } 191 s->last_active[irq][cpu] = s->running_irq[cpu]; 192 193 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 194 /* Clear pending flags for both level and edge triggered interrupts. 195 * Level triggered IRQs will be reasserted once they become inactive. 196 */ 197 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 198 ret = irq; 199 } else { 200 if (irq < GIC_NR_SGIS) { 201 /* Lookup the source CPU for the SGI and clear this in the 202 * sgi_pending map. Return the src and clear the overall pending 203 * state on this CPU if the SGI is not pending from any CPUs. 204 */ 205 assert(s->sgi_pending[irq][cpu] != 0); 206 src = ctz32(s->sgi_pending[irq][cpu]); 207 s->sgi_pending[irq][cpu] &= ~(1 << src); 208 if (s->sgi_pending[irq][cpu] == 0) { 209 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 210 } 211 ret = irq | ((src & 0x7) << 10); 212 } else { 213 /* Clear pending state for both level and edge triggered 214 * interrupts. (level triggered interrupts with an active line 215 * remain pending, see gic_test_pending) 216 */ 217 GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); 218 ret = irq; 219 } 220 } 221 222 gic_set_running_irq(s, cpu, irq); 223 DPRINTF("ACK %d\n", irq); 224 return ret; 225 } 226 227 void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val) 228 { 229 if (irq < GIC_INTERNAL) { 230 s->priority1[irq][cpu] = val; 231 } else { 232 s->priority2[(irq) - GIC_INTERNAL] = val; 233 } 234 } 235 236 void gic_complete_irq(GICState *s, int cpu, int irq) 237 { 238 int update = 0; 239 int cm = 1 << cpu; 240 DPRINTF("EOI %d\n", irq); 241 if (irq >= s->num_irq) { 242 /* This handles two cases: 243 * 1. If software writes the ID of a spurious interrupt [ie 1023] 244 * to the GICC_EOIR, the GIC ignores that write. 245 * 2. If software writes the number of a non-existent interrupt 246 * this must be a subcase of "value written does not match the last 247 * valid interrupt value read from the Interrupt Acknowledge 248 * register" and so this is UNPREDICTABLE. We choose to ignore it. 249 */ 250 return; 251 } 252 if (s->running_irq[cpu] == 1023) 253 return; /* No active IRQ. */ 254 255 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 256 /* Mark level triggered interrupts as pending if they are still 257 raised. */ 258 if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm) 259 && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) { 260 DPRINTF("Set %d pending mask %x\n", irq, cm); 261 GIC_SET_PENDING(irq, cm); 262 update = 1; 263 } 264 } 265 266 if (irq != s->running_irq[cpu]) { 267 /* Complete an IRQ that is not currently running. */ 268 int tmp = s->running_irq[cpu]; 269 while (s->last_active[tmp][cpu] != 1023) { 270 if (s->last_active[tmp][cpu] == irq) { 271 s->last_active[tmp][cpu] = s->last_active[irq][cpu]; 272 break; 273 } 274 tmp = s->last_active[tmp][cpu]; 275 } 276 if (update) { 277 gic_update(s); 278 } 279 } else { 280 /* Complete the current running IRQ. */ 281 gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]); 282 } 283 } 284 285 static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) 286 { 287 GICState *s = (GICState *)opaque; 288 uint32_t res; 289 int irq; 290 int i; 291 int cpu; 292 int cm; 293 int mask; 294 295 cpu = gic_get_current_cpu(s); 296 cm = 1 << cpu; 297 if (offset < 0x100) { 298 if (offset == 0) 299 return s->enabled; 300 if (offset == 4) 301 /* Interrupt Controller Type Register */ 302 return ((s->num_irq / 32) - 1) 303 | ((NUM_CPU(s) - 1) << 5) 304 | (s->security_extn << 10); 305 if (offset < 0x08) 306 return 0; 307 if (offset >= 0x80) { 308 /* Interrupt Security , RAZ/WI */ 309 return 0; 310 } 311 goto bad_reg; 312 } else if (offset < 0x200) { 313 /* Interrupt Set/Clear Enable. */ 314 if (offset < 0x180) 315 irq = (offset - 0x100) * 8; 316 else 317 irq = (offset - 0x180) * 8; 318 irq += GIC_BASE_IRQ; 319 if (irq >= s->num_irq) 320 goto bad_reg; 321 res = 0; 322 for (i = 0; i < 8; i++) { 323 if (GIC_TEST_ENABLED(irq + i, cm)) { 324 res |= (1 << i); 325 } 326 } 327 } else if (offset < 0x300) { 328 /* Interrupt Set/Clear Pending. */ 329 if (offset < 0x280) 330 irq = (offset - 0x200) * 8; 331 else 332 irq = (offset - 0x280) * 8; 333 irq += GIC_BASE_IRQ; 334 if (irq >= s->num_irq) 335 goto bad_reg; 336 res = 0; 337 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 338 for (i = 0; i < 8; i++) { 339 if (gic_test_pending(s, irq + i, mask)) { 340 res |= (1 << i); 341 } 342 } 343 } else if (offset < 0x400) { 344 /* Interrupt Active. */ 345 irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; 346 if (irq >= s->num_irq) 347 goto bad_reg; 348 res = 0; 349 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 350 for (i = 0; i < 8; i++) { 351 if (GIC_TEST_ACTIVE(irq + i, mask)) { 352 res |= (1 << i); 353 } 354 } 355 } else if (offset < 0x800) { 356 /* Interrupt Priority. */ 357 irq = (offset - 0x400) + GIC_BASE_IRQ; 358 if (irq >= s->num_irq) 359 goto bad_reg; 360 res = GIC_GET_PRIORITY(irq, cpu); 361 } else if (offset < 0xc00) { 362 /* Interrupt CPU Target. */ 363 if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { 364 /* For uniprocessor GICs these RAZ/WI */ 365 res = 0; 366 } else { 367 irq = (offset - 0x800) + GIC_BASE_IRQ; 368 if (irq >= s->num_irq) { 369 goto bad_reg; 370 } 371 if (irq >= 29 && irq <= 31) { 372 res = cm; 373 } else { 374 res = GIC_TARGET(irq); 375 } 376 } 377 } else if (offset < 0xf00) { 378 /* Interrupt Configuration. */ 379 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; 380 if (irq >= s->num_irq) 381 goto bad_reg; 382 res = 0; 383 for (i = 0; i < 4; i++) { 384 if (GIC_TEST_MODEL(irq + i)) 385 res |= (1 << (i * 2)); 386 if (GIC_TEST_EDGE_TRIGGER(irq + i)) 387 res |= (2 << (i * 2)); 388 } 389 } else if (offset < 0xf10) { 390 goto bad_reg; 391 } else if (offset < 0xf30) { 392 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 393 goto bad_reg; 394 } 395 396 if (offset < 0xf20) { 397 /* GICD_CPENDSGIRn */ 398 irq = (offset - 0xf10); 399 } else { 400 irq = (offset - 0xf20); 401 /* GICD_SPENDSGIRn */ 402 } 403 404 res = s->sgi_pending[irq][cpu]; 405 } else if (offset < 0xfe0) { 406 goto bad_reg; 407 } else /* offset >= 0xfe0 */ { 408 if (offset & 3) { 409 res = 0; 410 } else { 411 res = gic_id[(offset - 0xfe0) >> 2]; 412 } 413 } 414 return res; 415 bad_reg: 416 qemu_log_mask(LOG_GUEST_ERROR, 417 "gic_dist_readb: Bad offset %x\n", (int)offset); 418 return 0; 419 } 420 421 static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data, 422 unsigned size, MemTxAttrs attrs) 423 { 424 switch (size) { 425 case 1: 426 *data = gic_dist_readb(opaque, offset, attrs); 427 return MEMTX_OK; 428 case 2: 429 *data = gic_dist_readb(opaque, offset, attrs); 430 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 431 return MEMTX_OK; 432 case 4: 433 *data = gic_dist_readb(opaque, offset, attrs); 434 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 435 *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16; 436 *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24; 437 return MEMTX_OK; 438 default: 439 return MEMTX_ERROR; 440 } 441 } 442 443 static void gic_dist_writeb(void *opaque, hwaddr offset, 444 uint32_t value, MemTxAttrs attrs) 445 { 446 GICState *s = (GICState *)opaque; 447 int irq; 448 int i; 449 int cpu; 450 451 cpu = gic_get_current_cpu(s); 452 if (offset < 0x100) { 453 if (offset == 0) { 454 s->enabled = (value & 1); 455 DPRINTF("Distribution %sabled\n", s->enabled ? "En" : "Dis"); 456 } else if (offset < 4) { 457 /* ignored. */ 458 } else if (offset >= 0x80) { 459 /* Interrupt Security Registers, RAZ/WI */ 460 } else { 461 goto bad_reg; 462 } 463 } else if (offset < 0x180) { 464 /* Interrupt Set Enable. */ 465 irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; 466 if (irq >= s->num_irq) 467 goto bad_reg; 468 if (irq < GIC_NR_SGIS) { 469 value = 0xff; 470 } 471 472 for (i = 0; i < 8; i++) { 473 if (value & (1 << i)) { 474 int mask = 475 (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq + i); 476 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 477 478 if (!GIC_TEST_ENABLED(irq + i, cm)) { 479 DPRINTF("Enabled IRQ %d\n", irq + i); 480 } 481 GIC_SET_ENABLED(irq + i, cm); 482 /* If a raised level triggered IRQ enabled then mark 483 is as pending. */ 484 if (GIC_TEST_LEVEL(irq + i, mask) 485 && !GIC_TEST_EDGE_TRIGGER(irq + i)) { 486 DPRINTF("Set %d pending mask %x\n", irq + i, mask); 487 GIC_SET_PENDING(irq + i, mask); 488 } 489 } 490 } 491 } else if (offset < 0x200) { 492 /* Interrupt Clear Enable. */ 493 irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; 494 if (irq >= s->num_irq) 495 goto bad_reg; 496 if (irq < GIC_NR_SGIS) { 497 value = 0; 498 } 499 500 for (i = 0; i < 8; i++) { 501 if (value & (1 << i)) { 502 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 503 504 if (GIC_TEST_ENABLED(irq + i, cm)) { 505 DPRINTF("Disabled IRQ %d\n", irq + i); 506 } 507 GIC_CLEAR_ENABLED(irq + i, cm); 508 } 509 } 510 } else if (offset < 0x280) { 511 /* Interrupt Set Pending. */ 512 irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; 513 if (irq >= s->num_irq) 514 goto bad_reg; 515 if (irq < GIC_NR_SGIS) { 516 value = 0; 517 } 518 519 for (i = 0; i < 8; i++) { 520 if (value & (1 << i)) { 521 GIC_SET_PENDING(irq + i, GIC_TARGET(irq + i)); 522 } 523 } 524 } else if (offset < 0x300) { 525 /* Interrupt Clear Pending. */ 526 irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; 527 if (irq >= s->num_irq) 528 goto bad_reg; 529 if (irq < GIC_NR_SGIS) { 530 value = 0; 531 } 532 533 for (i = 0; i < 8; i++) { 534 /* ??? This currently clears the pending bit for all CPUs, even 535 for per-CPU interrupts. It's unclear whether this is the 536 corect behavior. */ 537 if (value & (1 << i)) { 538 GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); 539 } 540 } 541 } else if (offset < 0x400) { 542 /* Interrupt Active. */ 543 goto bad_reg; 544 } else if (offset < 0x800) { 545 /* Interrupt Priority. */ 546 irq = (offset - 0x400) + GIC_BASE_IRQ; 547 if (irq >= s->num_irq) 548 goto bad_reg; 549 gic_set_priority(s, cpu, irq, value); 550 } else if (offset < 0xc00) { 551 /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the 552 * annoying exception of the 11MPCore's GIC. 553 */ 554 if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { 555 irq = (offset - 0x800) + GIC_BASE_IRQ; 556 if (irq >= s->num_irq) { 557 goto bad_reg; 558 } 559 if (irq < 29) { 560 value = 0; 561 } else if (irq < GIC_INTERNAL) { 562 value = ALL_CPU_MASK; 563 } 564 s->irq_target[irq] = value & ALL_CPU_MASK; 565 } 566 } else if (offset < 0xf00) { 567 /* Interrupt Configuration. */ 568 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; 569 if (irq >= s->num_irq) 570 goto bad_reg; 571 if (irq < GIC_NR_SGIS) 572 value |= 0xaa; 573 for (i = 0; i < 4; i++) { 574 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 575 if (value & (1 << (i * 2))) { 576 GIC_SET_MODEL(irq + i); 577 } else { 578 GIC_CLEAR_MODEL(irq + i); 579 } 580 } 581 if (value & (2 << (i * 2))) { 582 GIC_SET_EDGE_TRIGGER(irq + i); 583 } else { 584 GIC_CLEAR_EDGE_TRIGGER(irq + i); 585 } 586 } 587 } else if (offset < 0xf10) { 588 /* 0xf00 is only handled for 32-bit writes. */ 589 goto bad_reg; 590 } else if (offset < 0xf20) { 591 /* GICD_CPENDSGIRn */ 592 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 593 goto bad_reg; 594 } 595 irq = (offset - 0xf10); 596 597 s->sgi_pending[irq][cpu] &= ~value; 598 if (s->sgi_pending[irq][cpu] == 0) { 599 GIC_CLEAR_PENDING(irq, 1 << cpu); 600 } 601 } else if (offset < 0xf30) { 602 /* GICD_SPENDSGIRn */ 603 if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { 604 goto bad_reg; 605 } 606 irq = (offset - 0xf20); 607 608 GIC_SET_PENDING(irq, 1 << cpu); 609 s->sgi_pending[irq][cpu] |= value; 610 } else { 611 goto bad_reg; 612 } 613 gic_update(s); 614 return; 615 bad_reg: 616 qemu_log_mask(LOG_GUEST_ERROR, 617 "gic_dist_writeb: Bad offset %x\n", (int)offset); 618 } 619 620 static void gic_dist_writew(void *opaque, hwaddr offset, 621 uint32_t value, MemTxAttrs attrs) 622 { 623 gic_dist_writeb(opaque, offset, value & 0xff, attrs); 624 gic_dist_writeb(opaque, offset + 1, value >> 8, attrs); 625 } 626 627 static void gic_dist_writel(void *opaque, hwaddr offset, 628 uint32_t value, MemTxAttrs attrs) 629 { 630 GICState *s = (GICState *)opaque; 631 if (offset == 0xf00) { 632 int cpu; 633 int irq; 634 int mask; 635 int target_cpu; 636 637 cpu = gic_get_current_cpu(s); 638 irq = value & 0x3ff; 639 switch ((value >> 24) & 3) { 640 case 0: 641 mask = (value >> 16) & ALL_CPU_MASK; 642 break; 643 case 1: 644 mask = ALL_CPU_MASK ^ (1 << cpu); 645 break; 646 case 2: 647 mask = 1 << cpu; 648 break; 649 default: 650 DPRINTF("Bad Soft Int target filter\n"); 651 mask = ALL_CPU_MASK; 652 break; 653 } 654 GIC_SET_PENDING(irq, mask); 655 target_cpu = ctz32(mask); 656 while (target_cpu < GIC_NCPU) { 657 s->sgi_pending[irq][target_cpu] |= (1 << cpu); 658 mask &= ~(1 << target_cpu); 659 target_cpu = ctz32(mask); 660 } 661 gic_update(s); 662 return; 663 } 664 gic_dist_writew(opaque, offset, value & 0xffff, attrs); 665 gic_dist_writew(opaque, offset + 2, value >> 16, attrs); 666 } 667 668 static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data, 669 unsigned size, MemTxAttrs attrs) 670 { 671 switch (size) { 672 case 1: 673 gic_dist_writeb(opaque, offset, data, attrs); 674 return MEMTX_OK; 675 case 2: 676 gic_dist_writew(opaque, offset, data, attrs); 677 return MEMTX_OK; 678 case 4: 679 gic_dist_writel(opaque, offset, data, attrs); 680 return MEMTX_OK; 681 default: 682 return MEMTX_ERROR; 683 } 684 } 685 686 static const MemoryRegionOps gic_dist_ops = { 687 .read_with_attrs = gic_dist_read, 688 .write_with_attrs = gic_dist_write, 689 .endianness = DEVICE_NATIVE_ENDIAN, 690 }; 691 692 static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, 693 uint64_t *data, MemTxAttrs attrs) 694 { 695 switch (offset) { 696 case 0x00: /* Control */ 697 *data = s->cpu_enabled[cpu]; 698 break; 699 case 0x04: /* Priority mask */ 700 *data = s->priority_mask[cpu]; 701 break; 702 case 0x08: /* Binary Point */ 703 *data = s->bpr[cpu]; 704 break; 705 case 0x0c: /* Acknowledge */ 706 *data = gic_acknowledge_irq(s, cpu); 707 break; 708 case 0x14: /* Running Priority */ 709 *data = s->running_priority[cpu]; 710 break; 711 case 0x18: /* Highest Pending Interrupt */ 712 *data = s->current_pending[cpu]; 713 break; 714 case 0x1c: /* Aliased Binary Point */ 715 *data = s->abpr[cpu]; 716 break; 717 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 718 *data = s->apr[(offset - 0xd0) / 4][cpu]; 719 break; 720 default: 721 qemu_log_mask(LOG_GUEST_ERROR, 722 "gic_cpu_read: Bad offset %x\n", (int)offset); 723 return MEMTX_ERROR; 724 } 725 return MEMTX_OK; 726 } 727 728 static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, 729 uint32_t value, MemTxAttrs attrs) 730 { 731 switch (offset) { 732 case 0x00: /* Control */ 733 s->cpu_enabled[cpu] = (value & 1); 734 DPRINTF("CPU %d %sabled\n", cpu, s->cpu_enabled[cpu] ? "En" : "Dis"); 735 break; 736 case 0x04: /* Priority mask */ 737 s->priority_mask[cpu] = (value & 0xff); 738 break; 739 case 0x08: /* Binary Point */ 740 s->bpr[cpu] = (value & 0x7); 741 break; 742 case 0x10: /* End Of Interrupt */ 743 gic_complete_irq(s, cpu, value & 0x3ff); 744 return MEMTX_OK; 745 case 0x1c: /* Aliased Binary Point */ 746 if (s->revision >= 2) { 747 s->abpr[cpu] = (value & 0x7); 748 } 749 break; 750 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 751 qemu_log_mask(LOG_UNIMP, "Writing APR not implemented\n"); 752 break; 753 default: 754 qemu_log_mask(LOG_GUEST_ERROR, 755 "gic_cpu_write: Bad offset %x\n", (int)offset); 756 return MEMTX_ERROR; 757 } 758 gic_update(s); 759 return MEMTX_OK; 760 } 761 762 /* Wrappers to read/write the GIC CPU interface for the current CPU */ 763 static MemTxResult gic_thiscpu_read(void *opaque, hwaddr addr, uint64_t *data, 764 unsigned size, MemTxAttrs attrs) 765 { 766 GICState *s = (GICState *)opaque; 767 return gic_cpu_read(s, gic_get_current_cpu(s), addr, data, attrs); 768 } 769 770 static MemTxResult gic_thiscpu_write(void *opaque, hwaddr addr, 771 uint64_t value, unsigned size, 772 MemTxAttrs attrs) 773 { 774 GICState *s = (GICState *)opaque; 775 return gic_cpu_write(s, gic_get_current_cpu(s), addr, value, attrs); 776 } 777 778 /* Wrappers to read/write the GIC CPU interface for a specific CPU. 779 * These just decode the opaque pointer into GICState* + cpu id. 780 */ 781 static MemTxResult gic_do_cpu_read(void *opaque, hwaddr addr, uint64_t *data, 782 unsigned size, MemTxAttrs attrs) 783 { 784 GICState **backref = (GICState **)opaque; 785 GICState *s = *backref; 786 int id = (backref - s->backref); 787 return gic_cpu_read(s, id, addr, data, attrs); 788 } 789 790 static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr, 791 uint64_t value, unsigned size, 792 MemTxAttrs attrs) 793 { 794 GICState **backref = (GICState **)opaque; 795 GICState *s = *backref; 796 int id = (backref - s->backref); 797 return gic_cpu_write(s, id, addr, value, attrs); 798 } 799 800 static const MemoryRegionOps gic_thiscpu_ops = { 801 .read_with_attrs = gic_thiscpu_read, 802 .write_with_attrs = gic_thiscpu_write, 803 .endianness = DEVICE_NATIVE_ENDIAN, 804 }; 805 806 static const MemoryRegionOps gic_cpu_ops = { 807 .read_with_attrs = gic_do_cpu_read, 808 .write_with_attrs = gic_do_cpu_write, 809 .endianness = DEVICE_NATIVE_ENDIAN, 810 }; 811 812 void gic_init_irqs_and_distributor(GICState *s) 813 { 814 SysBusDevice *sbd = SYS_BUS_DEVICE(s); 815 int i; 816 817 i = s->num_irq - GIC_INTERNAL; 818 /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. 819 * GPIO array layout is thus: 820 * [0..N-1] SPIs 821 * [N..N+31] PPIs for CPU 0 822 * [N+32..N+63] PPIs for CPU 1 823 * ... 824 */ 825 if (s->revision != REV_NVIC) { 826 i += (GIC_INTERNAL * s->num_cpu); 827 } 828 qdev_init_gpio_in(DEVICE(s), gic_set_irq, i); 829 for (i = 0; i < NUM_CPU(s); i++) { 830 sysbus_init_irq(sbd, &s->parent_irq[i]); 831 } 832 for (i = 0; i < NUM_CPU(s); i++) { 833 sysbus_init_irq(sbd, &s->parent_fiq[i]); 834 } 835 memory_region_init_io(&s->iomem, OBJECT(s), &gic_dist_ops, s, 836 "gic_dist", 0x1000); 837 } 838 839 static void arm_gic_realize(DeviceState *dev, Error **errp) 840 { 841 /* Device instance realize function for the GIC sysbus device */ 842 int i; 843 GICState *s = ARM_GIC(dev); 844 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 845 ARMGICClass *agc = ARM_GIC_GET_CLASS(s); 846 Error *local_err = NULL; 847 848 agc->parent_realize(dev, &local_err); 849 if (local_err) { 850 error_propagate(errp, local_err); 851 return; 852 } 853 854 gic_init_irqs_and_distributor(s); 855 856 /* Memory regions for the CPU interfaces (NVIC doesn't have these): 857 * a region for "CPU interface for this core", then a region for 858 * "CPU interface for core 0", "for core 1", ... 859 * NB that the memory region size of 0x100 applies for the 11MPCore 860 * and also cores following the GIC v1 spec (ie A9). 861 * GIC v2 defines a larger memory region (0x1000) so this will need 862 * to be extended when we implement A15. 863 */ 864 memory_region_init_io(&s->cpuiomem[0], OBJECT(s), &gic_thiscpu_ops, s, 865 "gic_cpu", 0x100); 866 for (i = 0; i < NUM_CPU(s); i++) { 867 s->backref[i] = s; 868 memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops, 869 &s->backref[i], "gic_cpu", 0x100); 870 } 871 /* Distributor */ 872 sysbus_init_mmio(sbd, &s->iomem); 873 /* cpu interfaces (one for "current cpu" plus one per cpu) */ 874 for (i = 0; i <= NUM_CPU(s); i++) { 875 sysbus_init_mmio(sbd, &s->cpuiomem[i]); 876 } 877 } 878 879 static void arm_gic_class_init(ObjectClass *klass, void *data) 880 { 881 DeviceClass *dc = DEVICE_CLASS(klass); 882 ARMGICClass *agc = ARM_GIC_CLASS(klass); 883 884 agc->parent_realize = dc->realize; 885 dc->realize = arm_gic_realize; 886 } 887 888 static const TypeInfo arm_gic_info = { 889 .name = TYPE_ARM_GIC, 890 .parent = TYPE_ARM_GIC_COMMON, 891 .instance_size = sizeof(GICState), 892 .class_init = arm_gic_class_init, 893 .class_size = sizeof(ARMGICClass), 894 }; 895 896 static void arm_gic_register_types(void) 897 { 898 type_register_static(&arm_gic_info); 899 } 900 901 type_init(arm_gic_register_types) 902