1 /* 2 * ARM Generic/Distributed Interrupt Controller 3 * 4 * Copyright (c) 2006-2007 CodeSourcery. 5 * Written by Paul Brook 6 * 7 * This code is licensed under the GPL. 8 */ 9 10 /* This file contains implementation code for the RealView EB interrupt 11 * controller, MPCore distributed interrupt controller and ARMv7-M 12 * Nested Vectored Interrupt Controller. 13 * It is compiled in two ways: 14 * (1) as a standalone file to produce a sysbus device which is a GIC 15 * that can be used on the realview board and as one of the builtin 16 * private peripherals for the ARM MP CPUs (11MPCore, A9, etc) 17 * (2) by being directly #included into armv7m_nvic.c to produce the 18 * armv7m_nvic device. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "hw/irq.h" 23 #include "hw/sysbus.h" 24 #include "gic_internal.h" 25 #include "qapi/error.h" 26 #include "hw/core/cpu.h" 27 #include "qemu/log.h" 28 #include "qemu/module.h" 29 #include "trace.h" 30 #include "sysemu/kvm.h" 31 32 /* #define DEBUG_GIC */ 33 34 #ifdef DEBUG_GIC 35 #define DEBUG_GIC_GATE 1 36 #else 37 #define DEBUG_GIC_GATE 0 38 #endif 39 40 #define DPRINTF(fmt, ...) do { \ 41 if (DEBUG_GIC_GATE) { \ 42 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 43 } \ 44 } while (0) 45 46 static const uint8_t gic_id_11mpcore[] = { 47 0x00, 0x00, 0x00, 0x00, 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 48 }; 49 50 static const uint8_t gic_id_gicv1[] = { 51 0x04, 0x00, 0x00, 0x00, 0x90, 0xb3, 0x1b, 0x00, 0x0d, 0xf0, 0x05, 0xb1 52 }; 53 54 static const uint8_t gic_id_gicv2[] = { 55 0x04, 0x00, 0x00, 0x00, 0x90, 0xb4, 0x2b, 0x00, 0x0d, 0xf0, 0x05, 0xb1 56 }; 57 58 static inline int gic_get_current_cpu(GICState *s) 59 { 60 if (s->num_cpu > 1) { 61 return current_cpu->cpu_index; 62 } 63 return 0; 64 } 65 66 static inline int gic_get_current_vcpu(GICState *s) 67 { 68 return gic_get_current_cpu(s) + GIC_NCPU; 69 } 70 71 /* Return true if this GIC config has interrupt groups, which is 72 * true if we're a GICv2, or a GICv1 with the security extensions. 73 */ 74 static inline bool gic_has_groups(GICState *s) 75 { 76 return s->revision == 2 || s->security_extn; 77 } 78 79 static inline bool gic_cpu_ns_access(GICState *s, int cpu, MemTxAttrs attrs) 80 { 81 return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure; 82 } 83 84 static inline void gic_get_best_irq(GICState *s, int cpu, 85 int *best_irq, int *best_prio, int *group) 86 { 87 int irq; 88 int cm = 1 << cpu; 89 90 *best_irq = 1023; 91 *best_prio = 0x100; 92 93 for (irq = 0; irq < s->num_irq; irq++) { 94 if (GIC_DIST_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) && 95 (!GIC_DIST_TEST_ACTIVE(irq, cm)) && 96 (irq < GIC_INTERNAL || GIC_DIST_TARGET(irq) & cm)) { 97 if (GIC_DIST_GET_PRIORITY(irq, cpu) < *best_prio) { 98 *best_prio = GIC_DIST_GET_PRIORITY(irq, cpu); 99 *best_irq = irq; 100 } 101 } 102 } 103 104 if (*best_irq < 1023) { 105 *group = GIC_DIST_TEST_GROUP(*best_irq, cm); 106 } 107 } 108 109 static inline void gic_get_best_virq(GICState *s, int cpu, 110 int *best_irq, int *best_prio, int *group) 111 { 112 int lr_idx = 0; 113 114 *best_irq = 1023; 115 *best_prio = 0x100; 116 117 for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { 118 uint32_t lr_entry = s->h_lr[lr_idx][cpu]; 119 int state = GICH_LR_STATE(lr_entry); 120 121 if (state == GICH_LR_STATE_PENDING) { 122 int prio = GICH_LR_PRIORITY(lr_entry); 123 124 if (prio < *best_prio) { 125 *best_prio = prio; 126 *best_irq = GICH_LR_VIRT_ID(lr_entry); 127 *group = GICH_LR_GROUP(lr_entry); 128 } 129 } 130 } 131 } 132 133 /* Return true if IRQ signaling is enabled for the given cpu and at least one 134 * of the given groups: 135 * - in the non-virt case, the distributor must be enabled for one of the 136 * given groups 137 * - in the virt case, the virtual interface must be enabled. 138 * - in all cases, the (v)CPU interface must be enabled for one of the given 139 * groups. 140 */ 141 static inline bool gic_irq_signaling_enabled(GICState *s, int cpu, bool virt, 142 int group_mask) 143 { 144 if (!virt && !(s->ctlr & group_mask)) { 145 return false; 146 } 147 148 if (virt && !(s->h_hcr[cpu] & R_GICH_HCR_EN_MASK)) { 149 return false; 150 } 151 152 if (!(s->cpu_ctlr[cpu] & group_mask)) { 153 return false; 154 } 155 156 return true; 157 } 158 159 /* TODO: Many places that call this routine could be optimized. */ 160 /* Update interrupt status after enabled or pending bits have been changed. */ 161 static inline void gic_update_internal(GICState *s, bool virt) 162 { 163 int best_irq; 164 int best_prio; 165 int irq_level, fiq_level; 166 int cpu, cpu_iface; 167 int group = 0; 168 qemu_irq *irq_lines = virt ? s->parent_virq : s->parent_irq; 169 qemu_irq *fiq_lines = virt ? s->parent_vfiq : s->parent_fiq; 170 171 for (cpu = 0; cpu < s->num_cpu; cpu++) { 172 cpu_iface = virt ? (cpu + GIC_NCPU) : cpu; 173 174 s->current_pending[cpu_iface] = 1023; 175 if (!gic_irq_signaling_enabled(s, cpu, virt, 176 GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) { 177 qemu_irq_lower(irq_lines[cpu]); 178 qemu_irq_lower(fiq_lines[cpu]); 179 continue; 180 } 181 182 if (virt) { 183 gic_get_best_virq(s, cpu, &best_irq, &best_prio, &group); 184 } else { 185 gic_get_best_irq(s, cpu, &best_irq, &best_prio, &group); 186 } 187 188 if (best_irq != 1023) { 189 trace_gic_update_bestirq(virt ? "vcpu" : "cpu", cpu, 190 best_irq, best_prio, 191 s->priority_mask[cpu_iface], 192 s->running_priority[cpu_iface]); 193 } 194 195 irq_level = fiq_level = 0; 196 197 if (best_prio < s->priority_mask[cpu_iface]) { 198 s->current_pending[cpu_iface] = best_irq; 199 if (best_prio < s->running_priority[cpu_iface]) { 200 if (gic_irq_signaling_enabled(s, cpu, virt, 1 << group)) { 201 if (group == 0 && 202 s->cpu_ctlr[cpu_iface] & GICC_CTLR_FIQ_EN) { 203 DPRINTF("Raised pending FIQ %d (cpu %d)\n", 204 best_irq, cpu_iface); 205 fiq_level = 1; 206 trace_gic_update_set_irq(cpu, virt ? "vfiq" : "fiq", 207 fiq_level); 208 } else { 209 DPRINTF("Raised pending IRQ %d (cpu %d)\n", 210 best_irq, cpu_iface); 211 irq_level = 1; 212 trace_gic_update_set_irq(cpu, virt ? "virq" : "irq", 213 irq_level); 214 } 215 } 216 } 217 } 218 219 qemu_set_irq(irq_lines[cpu], irq_level); 220 qemu_set_irq(fiq_lines[cpu], fiq_level); 221 } 222 } 223 224 static void gic_update(GICState *s) 225 { 226 gic_update_internal(s, false); 227 } 228 229 /* Return true if this LR is empty, i.e. the corresponding bit 230 * in ELRSR is set. 231 */ 232 static inline bool gic_lr_entry_is_free(uint32_t entry) 233 { 234 return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID) 235 && (GICH_LR_HW(entry) || !GICH_LR_EOI(entry)); 236 } 237 238 /* Return true if this LR should trigger an EOI maintenance interrupt, i.e. the 239 * corrsponding bit in EISR is set. 240 */ 241 static inline bool gic_lr_entry_is_eoi(uint32_t entry) 242 { 243 return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID) 244 && !GICH_LR_HW(entry) && GICH_LR_EOI(entry); 245 } 246 247 static inline void gic_extract_lr_info(GICState *s, int cpu, 248 int *num_eoi, int *num_valid, int *num_pending) 249 { 250 int lr_idx; 251 252 *num_eoi = 0; 253 *num_valid = 0; 254 *num_pending = 0; 255 256 for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { 257 uint32_t *entry = &s->h_lr[lr_idx][cpu]; 258 259 if (gic_lr_entry_is_eoi(*entry)) { 260 (*num_eoi)++; 261 } 262 263 if (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID) { 264 (*num_valid)++; 265 } 266 267 if (GICH_LR_STATE(*entry) == GICH_LR_STATE_PENDING) { 268 (*num_pending)++; 269 } 270 } 271 } 272 273 static void gic_compute_misr(GICState *s, int cpu) 274 { 275 uint32_t value = 0; 276 int vcpu = cpu + GIC_NCPU; 277 278 int num_eoi, num_valid, num_pending; 279 280 gic_extract_lr_info(s, cpu, &num_eoi, &num_valid, &num_pending); 281 282 /* EOI */ 283 if (num_eoi) { 284 value |= R_GICH_MISR_EOI_MASK; 285 } 286 287 /* U: true if only 0 or 1 LR entry is valid */ 288 if ((s->h_hcr[cpu] & R_GICH_HCR_UIE_MASK) && (num_valid < 2)) { 289 value |= R_GICH_MISR_U_MASK; 290 } 291 292 /* LRENP: EOICount is not 0 */ 293 if ((s->h_hcr[cpu] & R_GICH_HCR_LRENPIE_MASK) && 294 ((s->h_hcr[cpu] & R_GICH_HCR_EOICount_MASK) != 0)) { 295 value |= R_GICH_MISR_LRENP_MASK; 296 } 297 298 /* NP: no pending interrupts */ 299 if ((s->h_hcr[cpu] & R_GICH_HCR_NPIE_MASK) && (num_pending == 0)) { 300 value |= R_GICH_MISR_NP_MASK; 301 } 302 303 /* VGrp0E: group0 virq signaling enabled */ 304 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0EIE_MASK) && 305 (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) { 306 value |= R_GICH_MISR_VGrp0E_MASK; 307 } 308 309 /* VGrp0D: group0 virq signaling disabled */ 310 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0DIE_MASK) && 311 !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) { 312 value |= R_GICH_MISR_VGrp0D_MASK; 313 } 314 315 /* VGrp1E: group1 virq signaling enabled */ 316 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1EIE_MASK) && 317 (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) { 318 value |= R_GICH_MISR_VGrp1E_MASK; 319 } 320 321 /* VGrp1D: group1 virq signaling disabled */ 322 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1DIE_MASK) && 323 !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) { 324 value |= R_GICH_MISR_VGrp1D_MASK; 325 } 326 327 s->h_misr[cpu] = value; 328 } 329 330 static void gic_update_maintenance(GICState *s) 331 { 332 int cpu = 0; 333 int maint_level; 334 335 for (cpu = 0; cpu < s->num_cpu; cpu++) { 336 gic_compute_misr(s, cpu); 337 maint_level = (s->h_hcr[cpu] & R_GICH_HCR_EN_MASK) && s->h_misr[cpu]; 338 339 trace_gic_update_maintenance_irq(cpu, maint_level); 340 qemu_set_irq(s->maintenance_irq[cpu], maint_level); 341 } 342 } 343 344 static void gic_update_virt(GICState *s) 345 { 346 gic_update_internal(s, true); 347 gic_update_maintenance(s); 348 } 349 350 static void gic_set_irq_11mpcore(GICState *s, int irq, int level, 351 int cm, int target) 352 { 353 if (level) { 354 GIC_DIST_SET_LEVEL(irq, cm); 355 if (GIC_DIST_TEST_EDGE_TRIGGER(irq) || GIC_DIST_TEST_ENABLED(irq, cm)) { 356 DPRINTF("Set %d pending mask %x\n", irq, target); 357 GIC_DIST_SET_PENDING(irq, target); 358 } 359 } else { 360 GIC_DIST_CLEAR_LEVEL(irq, cm); 361 } 362 } 363 364 static void gic_set_irq_generic(GICState *s, int irq, int level, 365 int cm, int target) 366 { 367 if (level) { 368 GIC_DIST_SET_LEVEL(irq, cm); 369 DPRINTF("Set %d pending mask %x\n", irq, target); 370 if (GIC_DIST_TEST_EDGE_TRIGGER(irq)) { 371 GIC_DIST_SET_PENDING(irq, target); 372 } 373 } else { 374 GIC_DIST_CLEAR_LEVEL(irq, cm); 375 } 376 } 377 378 /* Process a change in an external IRQ input. */ 379 static void gic_set_irq(void *opaque, int irq, int level) 380 { 381 /* Meaning of the 'irq' parameter: 382 * [0..N-1] : external interrupts 383 * [N..N+31] : PPI (internal) interrupts for CPU 0 384 * [N+32..N+63] : PPI (internal interrupts for CPU 1 385 * ... 386 */ 387 GICState *s = (GICState *)opaque; 388 int cm, target; 389 if (irq < (s->num_irq - GIC_INTERNAL)) { 390 /* The first external input line is internal interrupt 32. */ 391 cm = ALL_CPU_MASK; 392 irq += GIC_INTERNAL; 393 target = GIC_DIST_TARGET(irq); 394 } else { 395 int cpu; 396 irq -= (s->num_irq - GIC_INTERNAL); 397 cpu = irq / GIC_INTERNAL; 398 irq %= GIC_INTERNAL; 399 cm = 1 << cpu; 400 target = cm; 401 } 402 403 assert(irq >= GIC_NR_SGIS); 404 405 if (level == GIC_DIST_TEST_LEVEL(irq, cm)) { 406 return; 407 } 408 409 if (s->revision == REV_11MPCORE) { 410 gic_set_irq_11mpcore(s, irq, level, cm, target); 411 } else { 412 gic_set_irq_generic(s, irq, level, cm, target); 413 } 414 trace_gic_set_irq(irq, level, cm, target); 415 416 gic_update(s); 417 } 418 419 static uint16_t gic_get_current_pending_irq(GICState *s, int cpu, 420 MemTxAttrs attrs) 421 { 422 uint16_t pending_irq = s->current_pending[cpu]; 423 424 if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) { 425 int group = gic_test_group(s, pending_irq, cpu); 426 427 /* On a GIC without the security extensions, reading this register 428 * behaves in the same way as a secure access to a GIC with them. 429 */ 430 bool secure = !gic_cpu_ns_access(s, cpu, attrs); 431 432 if (group == 0 && !secure) { 433 /* Group0 interrupts hidden from Non-secure access */ 434 return 1023; 435 } 436 if (group == 1 && secure && !(s->cpu_ctlr[cpu] & GICC_CTLR_ACK_CTL)) { 437 /* Group1 interrupts only seen by Secure access if 438 * AckCtl bit set. 439 */ 440 return 1022; 441 } 442 } 443 return pending_irq; 444 } 445 446 static int gic_get_group_priority(GICState *s, int cpu, int irq) 447 { 448 /* Return the group priority of the specified interrupt 449 * (which is the top bits of its priority, with the number 450 * of bits masked determined by the applicable binary point register). 451 */ 452 int bpr; 453 uint32_t mask; 454 455 if (gic_has_groups(s) && 456 !(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) && 457 gic_test_group(s, irq, cpu)) { 458 bpr = s->abpr[cpu] - 1; 459 assert(bpr >= 0); 460 } else { 461 bpr = s->bpr[cpu]; 462 } 463 464 /* a BPR of 0 means the group priority bits are [7:1]; 465 * a BPR of 1 means they are [7:2], and so on down to 466 * a BPR of 7 meaning no group priority bits at all. 467 */ 468 mask = ~0U << ((bpr & 7) + 1); 469 470 return gic_get_priority(s, irq, cpu) & mask; 471 } 472 473 static void gic_activate_irq(GICState *s, int cpu, int irq) 474 { 475 /* Set the appropriate Active Priority Register bit for this IRQ, 476 * and update the running priority. 477 */ 478 int prio = gic_get_group_priority(s, cpu, irq); 479 int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; 480 int preemption_level = prio >> (min_bpr + 1); 481 int regno = preemption_level / 32; 482 int bitno = preemption_level % 32; 483 uint32_t *papr = NULL; 484 485 if (gic_is_vcpu(cpu)) { 486 assert(regno == 0); 487 papr = &s->h_apr[gic_get_vcpu_real_id(cpu)]; 488 } else if (gic_has_groups(s) && gic_test_group(s, irq, cpu)) { 489 papr = &s->nsapr[regno][cpu]; 490 } else { 491 papr = &s->apr[regno][cpu]; 492 } 493 494 *papr |= (1 << bitno); 495 496 s->running_priority[cpu] = prio; 497 gic_set_active(s, irq, cpu); 498 } 499 500 static int gic_get_prio_from_apr_bits(GICState *s, int cpu) 501 { 502 /* Recalculate the current running priority for this CPU based 503 * on the set bits in the Active Priority Registers. 504 */ 505 int i; 506 507 if (gic_is_vcpu(cpu)) { 508 uint32_t apr = s->h_apr[gic_get_vcpu_real_id(cpu)]; 509 if (apr) { 510 return ctz32(apr) << (GIC_VIRT_MIN_BPR + 1); 511 } else { 512 return 0x100; 513 } 514 } 515 516 for (i = 0; i < GIC_NR_APRS; i++) { 517 uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu]; 518 if (!apr) { 519 continue; 520 } 521 return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1); 522 } 523 return 0x100; 524 } 525 526 static void gic_drop_prio(GICState *s, int cpu, int group) 527 { 528 /* Drop the priority of the currently active interrupt in the 529 * specified group. 530 * 531 * Note that we can guarantee (because of the requirement to nest 532 * GICC_IAR reads [which activate an interrupt and raise priority] 533 * with GICC_EOIR writes [which drop the priority for the interrupt]) 534 * that the interrupt we're being called for is the highest priority 535 * active interrupt, meaning that it has the lowest set bit in the 536 * APR registers. 537 * 538 * If the guest does not honour the ordering constraints then the 539 * behaviour of the GIC is UNPREDICTABLE, which for us means that 540 * the values of the APR registers might become incorrect and the 541 * running priority will be wrong, so interrupts that should preempt 542 * might not do so, and interrupts that should not preempt might do so. 543 */ 544 if (gic_is_vcpu(cpu)) { 545 int rcpu = gic_get_vcpu_real_id(cpu); 546 547 if (s->h_apr[rcpu]) { 548 /* Clear lowest set bit */ 549 s->h_apr[rcpu] &= s->h_apr[rcpu] - 1; 550 } 551 } else { 552 int i; 553 554 for (i = 0; i < GIC_NR_APRS; i++) { 555 uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu]; 556 if (!*papr) { 557 continue; 558 } 559 /* Clear lowest set bit */ 560 *papr &= *papr - 1; 561 break; 562 } 563 } 564 565 s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); 566 } 567 568 static inline uint32_t gic_clear_pending_sgi(GICState *s, int irq, int cpu) 569 { 570 int src; 571 uint32_t ret; 572 573 if (!gic_is_vcpu(cpu)) { 574 /* Lookup the source CPU for the SGI and clear this in the 575 * sgi_pending map. Return the src and clear the overall pending 576 * state on this CPU if the SGI is not pending from any CPUs. 577 */ 578 assert(s->sgi_pending[irq][cpu] != 0); 579 src = ctz32(s->sgi_pending[irq][cpu]); 580 s->sgi_pending[irq][cpu] &= ~(1 << src); 581 if (s->sgi_pending[irq][cpu] == 0) { 582 gic_clear_pending(s, irq, cpu); 583 } 584 ret = irq | ((src & 0x7) << 10); 585 } else { 586 uint32_t *lr_entry = gic_get_lr_entry(s, irq, cpu); 587 src = GICH_LR_CPUID(*lr_entry); 588 589 gic_clear_pending(s, irq, cpu); 590 ret = irq | (src << 10); 591 } 592 593 return ret; 594 } 595 596 uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs) 597 { 598 int ret, irq; 599 600 /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately 601 * for the case where this GIC supports grouping and the pending interrupt 602 * is in the wrong group. 603 */ 604 irq = gic_get_current_pending_irq(s, cpu, attrs); 605 trace_gic_acknowledge_irq(gic_is_vcpu(cpu) ? "vcpu" : "cpu", 606 gic_get_vcpu_real_id(cpu), irq); 607 608 if (irq >= GIC_MAXIRQ) { 609 DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq); 610 return irq; 611 } 612 613 if (gic_get_priority(s, irq, cpu) >= s->running_priority[cpu]) { 614 DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq); 615 return 1023; 616 } 617 618 gic_activate_irq(s, cpu, irq); 619 620 if (s->revision == REV_11MPCORE) { 621 /* Clear pending flags for both level and edge triggered interrupts. 622 * Level triggered IRQs will be reasserted once they become inactive. 623 */ 624 gic_clear_pending(s, irq, cpu); 625 ret = irq; 626 } else { 627 if (irq < GIC_NR_SGIS) { 628 ret = gic_clear_pending_sgi(s, irq, cpu); 629 } else { 630 gic_clear_pending(s, irq, cpu); 631 ret = irq; 632 } 633 } 634 635 if (gic_is_vcpu(cpu)) { 636 gic_update_virt(s); 637 } else { 638 gic_update(s); 639 } 640 DPRINTF("ACK %d\n", irq); 641 return ret; 642 } 643 644 void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val, 645 MemTxAttrs attrs) 646 { 647 if (s->security_extn && !attrs.secure) { 648 if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) { 649 return; /* Ignore Non-secure access of Group0 IRQ */ 650 } 651 val = 0x80 | (val >> 1); /* Non-secure view */ 652 } 653 654 if (irq < GIC_INTERNAL) { 655 s->priority1[irq][cpu] = val; 656 } else { 657 s->priority2[(irq) - GIC_INTERNAL] = val; 658 } 659 } 660 661 static uint32_t gic_dist_get_priority(GICState *s, int cpu, int irq, 662 MemTxAttrs attrs) 663 { 664 uint32_t prio = GIC_DIST_GET_PRIORITY(irq, cpu); 665 666 if (s->security_extn && !attrs.secure) { 667 if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) { 668 return 0; /* Non-secure access cannot read priority of Group0 IRQ */ 669 } 670 prio = (prio << 1) & 0xff; /* Non-secure view */ 671 } 672 return prio; 673 } 674 675 static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask, 676 MemTxAttrs attrs) 677 { 678 if (gic_cpu_ns_access(s, cpu, attrs)) { 679 if (s->priority_mask[cpu] & 0x80) { 680 /* Priority Mask in upper half */ 681 pmask = 0x80 | (pmask >> 1); 682 } else { 683 /* Non-secure write ignored if priority mask is in lower half */ 684 return; 685 } 686 } 687 s->priority_mask[cpu] = pmask; 688 } 689 690 static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs) 691 { 692 uint32_t pmask = s->priority_mask[cpu]; 693 694 if (gic_cpu_ns_access(s, cpu, attrs)) { 695 if (pmask & 0x80) { 696 /* Priority Mask in upper half, return Non-secure view */ 697 pmask = (pmask << 1) & 0xff; 698 } else { 699 /* Priority Mask in lower half, RAZ */ 700 pmask = 0; 701 } 702 } 703 return pmask; 704 } 705 706 static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs) 707 { 708 uint32_t ret = s->cpu_ctlr[cpu]; 709 710 if (gic_cpu_ns_access(s, cpu, attrs)) { 711 /* Construct the NS banked view of GICC_CTLR from the correct 712 * bits of the S banked view. We don't need to move the bypass 713 * control bits because we don't implement that (IMPDEF) part 714 * of the GIC architecture. 715 */ 716 ret = (ret & (GICC_CTLR_EN_GRP1 | GICC_CTLR_EOIMODE_NS)) >> 1; 717 } 718 return ret; 719 } 720 721 static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value, 722 MemTxAttrs attrs) 723 { 724 uint32_t mask; 725 726 if (gic_cpu_ns_access(s, cpu, attrs)) { 727 /* The NS view can only write certain bits in the register; 728 * the rest are unchanged 729 */ 730 mask = GICC_CTLR_EN_GRP1; 731 if (s->revision == 2) { 732 mask |= GICC_CTLR_EOIMODE_NS; 733 } 734 s->cpu_ctlr[cpu] &= ~mask; 735 s->cpu_ctlr[cpu] |= (value << 1) & mask; 736 } else { 737 if (s->revision == 2) { 738 mask = s->security_extn ? GICC_CTLR_V2_S_MASK : GICC_CTLR_V2_MASK; 739 } else { 740 mask = s->security_extn ? GICC_CTLR_V1_S_MASK : GICC_CTLR_V1_MASK; 741 } 742 s->cpu_ctlr[cpu] = value & mask; 743 } 744 DPRINTF("CPU Interface %d: Group0 Interrupts %sabled, " 745 "Group1 Interrupts %sabled\n", cpu, 746 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP0) ? "En" : "Dis", 747 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP1) ? "En" : "Dis"); 748 } 749 750 static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs) 751 { 752 if ((s->revision != REV_11MPCORE) && (s->running_priority[cpu] > 0xff)) { 753 /* Idle priority */ 754 return 0xff; 755 } 756 757 if (gic_cpu_ns_access(s, cpu, attrs)) { 758 if (s->running_priority[cpu] & 0x80) { 759 /* Running priority in upper half of range: return the Non-secure 760 * view of the priority. 761 */ 762 return s->running_priority[cpu] << 1; 763 } else { 764 /* Running priority in lower half of range: RAZ */ 765 return 0; 766 } 767 } else { 768 return s->running_priority[cpu]; 769 } 770 } 771 772 /* Return true if we should split priority drop and interrupt deactivation, 773 * ie whether the relevant EOIMode bit is set. 774 */ 775 static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs) 776 { 777 if (s->revision != 2) { 778 /* Before GICv2 prio-drop and deactivate are not separable */ 779 return false; 780 } 781 if (gic_cpu_ns_access(s, cpu, attrs)) { 782 return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS; 783 } 784 return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE; 785 } 786 787 static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) 788 { 789 int group; 790 791 if (irq >= GIC_MAXIRQ || (!gic_is_vcpu(cpu) && irq >= s->num_irq)) { 792 /* 793 * This handles two cases: 794 * 1. If software writes the ID of a spurious interrupt [ie 1023] 795 * to the GICC_DIR, the GIC ignores that write. 796 * 2. If software writes the number of a non-existent interrupt 797 * this must be a subcase of "value written is not an active interrupt" 798 * and so this is UNPREDICTABLE. We choose to ignore it. For vCPUs, 799 * all IRQs potentially exist, so this limit does not apply. 800 */ 801 return; 802 } 803 804 if (!gic_eoi_split(s, cpu, attrs)) { 805 /* This is UNPREDICTABLE; we choose to ignore it */ 806 qemu_log_mask(LOG_GUEST_ERROR, 807 "gic_deactivate_irq: GICC_DIR write when EOIMode clear"); 808 return; 809 } 810 811 if (gic_is_vcpu(cpu) && !gic_virq_is_valid(s, irq, cpu)) { 812 /* This vIRQ does not have an LR entry which is either active or 813 * pending and active. Increment EOICount and ignore the write. 814 */ 815 int rcpu = gic_get_vcpu_real_id(cpu); 816 s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; 817 818 /* Update the virtual interface in case a maintenance interrupt should 819 * be raised. 820 */ 821 gic_update_virt(s); 822 return; 823 } 824 825 group = gic_has_groups(s) && gic_test_group(s, irq, cpu); 826 827 if (gic_cpu_ns_access(s, cpu, attrs) && !group) { 828 DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq); 829 return; 830 } 831 832 gic_clear_active(s, irq, cpu); 833 } 834 835 static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) 836 { 837 int cm = 1 << cpu; 838 int group; 839 840 DPRINTF("EOI %d\n", irq); 841 if (gic_is_vcpu(cpu)) { 842 /* The call to gic_prio_drop() will clear a bit in GICH_APR iff the 843 * running prio is < 0x100. 844 */ 845 bool prio_drop = s->running_priority[cpu] < 0x100; 846 847 if (irq >= GIC_MAXIRQ) { 848 /* Ignore spurious interrupt */ 849 return; 850 } 851 852 gic_drop_prio(s, cpu, 0); 853 854 if (!gic_eoi_split(s, cpu, attrs)) { 855 bool valid = gic_virq_is_valid(s, irq, cpu); 856 if (prio_drop && !valid) { 857 /* We are in a situation where: 858 * - V_CTRL.EOIMode is false (no EOI split), 859 * - The call to gic_drop_prio() cleared a bit in GICH_APR, 860 * - This vIRQ does not have an LR entry which is either 861 * active or pending and active. 862 * In that case, we must increment EOICount. 863 */ 864 int rcpu = gic_get_vcpu_real_id(cpu); 865 s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; 866 } else if (valid) { 867 gic_clear_active(s, irq, cpu); 868 } 869 } 870 871 gic_update_virt(s); 872 return; 873 } 874 875 if (irq >= s->num_irq) { 876 /* This handles two cases: 877 * 1. If software writes the ID of a spurious interrupt [ie 1023] 878 * to the GICC_EOIR, the GIC ignores that write. 879 * 2. If software writes the number of a non-existent interrupt 880 * this must be a subcase of "value written does not match the last 881 * valid interrupt value read from the Interrupt Acknowledge 882 * register" and so this is UNPREDICTABLE. We choose to ignore it. 883 */ 884 return; 885 } 886 if (s->running_priority[cpu] == 0x100) { 887 return; /* No active IRQ. */ 888 } 889 890 if (s->revision == REV_11MPCORE) { 891 /* Mark level triggered interrupts as pending if they are still 892 raised. */ 893 if (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_ENABLED(irq, cm) 894 && GIC_DIST_TEST_LEVEL(irq, cm) 895 && (GIC_DIST_TARGET(irq) & cm) != 0) { 896 DPRINTF("Set %d pending mask %x\n", irq, cm); 897 GIC_DIST_SET_PENDING(irq, cm); 898 } 899 } 900 901 group = gic_has_groups(s) && gic_test_group(s, irq, cpu); 902 903 if (gic_cpu_ns_access(s, cpu, attrs) && !group) { 904 DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq); 905 return; 906 } 907 908 /* Secure EOI with GICC_CTLR.AckCtl == 0 when the IRQ is a Group 1 909 * interrupt is UNPREDICTABLE. We choose to handle it as if AckCtl == 1, 910 * i.e. go ahead and complete the irq anyway. 911 */ 912 913 gic_drop_prio(s, cpu, group); 914 915 /* In GICv2 the guest can choose to split priority-drop and deactivate */ 916 if (!gic_eoi_split(s, cpu, attrs)) { 917 gic_clear_active(s, irq, cpu); 918 } 919 gic_update(s); 920 } 921 922 static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) 923 { 924 GICState *s = (GICState *)opaque; 925 uint32_t res; 926 int irq; 927 int i; 928 int cpu; 929 int cm; 930 int mask; 931 932 cpu = gic_get_current_cpu(s); 933 cm = 1 << cpu; 934 if (offset < 0x100) { 935 if (offset == 0) { /* GICD_CTLR */ 936 if (s->security_extn && !attrs.secure) { 937 /* The NS bank of this register is just an alias of the 938 * EnableGrp1 bit in the S bank version. 939 */ 940 return extract32(s->ctlr, 1, 1); 941 } else { 942 return s->ctlr; 943 } 944 } 945 if (offset == 4) 946 /* Interrupt Controller Type Register */ 947 return ((s->num_irq / 32) - 1) 948 | ((s->num_cpu - 1) << 5) 949 | (s->security_extn << 10); 950 if (offset < 0x08) 951 return 0; 952 if (offset >= 0x80) { 953 /* Interrupt Group Registers: these RAZ/WI if this is an NS 954 * access to a GIC with the security extensions, or if the GIC 955 * doesn't have groups at all. 956 */ 957 res = 0; 958 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 959 /* Every byte offset holds 8 group status bits */ 960 irq = (offset - 0x080) * 8; 961 if (irq >= s->num_irq) { 962 goto bad_reg; 963 } 964 for (i = 0; i < 8; i++) { 965 if (GIC_DIST_TEST_GROUP(irq + i, cm)) { 966 res |= (1 << i); 967 } 968 } 969 } 970 return res; 971 } 972 goto bad_reg; 973 } else if (offset < 0x200) { 974 /* Interrupt Set/Clear Enable. */ 975 if (offset < 0x180) 976 irq = (offset - 0x100) * 8; 977 else 978 irq = (offset - 0x180) * 8; 979 if (irq >= s->num_irq) 980 goto bad_reg; 981 res = 0; 982 for (i = 0; i < 8; i++) { 983 if (s->security_extn && !attrs.secure && 984 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 985 continue; /* Ignore Non-secure access of Group0 IRQ */ 986 } 987 988 if (GIC_DIST_TEST_ENABLED(irq + i, cm)) { 989 res |= (1 << i); 990 } 991 } 992 } else if (offset < 0x300) { 993 /* Interrupt Set/Clear Pending. */ 994 if (offset < 0x280) 995 irq = (offset - 0x200) * 8; 996 else 997 irq = (offset - 0x280) * 8; 998 if (irq >= s->num_irq) 999 goto bad_reg; 1000 res = 0; 1001 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 1002 for (i = 0; i < 8; i++) { 1003 if (s->security_extn && !attrs.secure && 1004 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1005 continue; /* Ignore Non-secure access of Group0 IRQ */ 1006 } 1007 1008 if (gic_test_pending(s, irq + i, mask)) { 1009 res |= (1 << i); 1010 } 1011 } 1012 } else if (offset < 0x400) { 1013 /* Interrupt Set/Clear Active. */ 1014 if (offset < 0x380) { 1015 irq = (offset - 0x300) * 8; 1016 } else if (s->revision == 2) { 1017 irq = (offset - 0x380) * 8; 1018 } else { 1019 goto bad_reg; 1020 } 1021 1022 if (irq >= s->num_irq) 1023 goto bad_reg; 1024 res = 0; 1025 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 1026 for (i = 0; i < 8; i++) { 1027 if (s->security_extn && !attrs.secure && 1028 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1029 continue; /* Ignore Non-secure access of Group0 IRQ */ 1030 } 1031 1032 if (GIC_DIST_TEST_ACTIVE(irq + i, mask)) { 1033 res |= (1 << i); 1034 } 1035 } 1036 } else if (offset < 0x800) { 1037 /* Interrupt Priority. */ 1038 irq = (offset - 0x400); 1039 if (irq >= s->num_irq) 1040 goto bad_reg; 1041 res = gic_dist_get_priority(s, cpu, irq, attrs); 1042 } else if (offset < 0xc00) { 1043 /* Interrupt CPU Target. */ 1044 if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { 1045 /* For uniprocessor GICs these RAZ/WI */ 1046 res = 0; 1047 } else { 1048 irq = (offset - 0x800); 1049 if (irq >= s->num_irq) { 1050 goto bad_reg; 1051 } 1052 if (irq < 29 && s->revision == REV_11MPCORE) { 1053 res = 0; 1054 } else if (irq < GIC_INTERNAL) { 1055 res = cm; 1056 } else { 1057 res = GIC_DIST_TARGET(irq); 1058 } 1059 } 1060 } else if (offset < 0xf00) { 1061 /* Interrupt Configuration. */ 1062 irq = (offset - 0xc00) * 4; 1063 if (irq >= s->num_irq) 1064 goto bad_reg; 1065 res = 0; 1066 for (i = 0; i < 4; i++) { 1067 if (s->security_extn && !attrs.secure && 1068 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1069 continue; /* Ignore Non-secure access of Group0 IRQ */ 1070 } 1071 1072 if (GIC_DIST_TEST_MODEL(irq + i)) { 1073 res |= (1 << (i * 2)); 1074 } 1075 if (GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) { 1076 res |= (2 << (i * 2)); 1077 } 1078 } 1079 } else if (offset < 0xf10) { 1080 goto bad_reg; 1081 } else if (offset < 0xf30) { 1082 if (s->revision == REV_11MPCORE) { 1083 goto bad_reg; 1084 } 1085 1086 if (offset < 0xf20) { 1087 /* GICD_CPENDSGIRn */ 1088 irq = (offset - 0xf10); 1089 } else { 1090 irq = (offset - 0xf20); 1091 /* GICD_SPENDSGIRn */ 1092 } 1093 1094 if (s->security_extn && !attrs.secure && 1095 !GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { 1096 res = 0; /* Ignore Non-secure access of Group0 IRQ */ 1097 } else { 1098 res = s->sgi_pending[irq][cpu]; 1099 } 1100 } else if (offset < 0xfd0) { 1101 goto bad_reg; 1102 } else if (offset < 0x1000) { 1103 if (offset & 3) { 1104 res = 0; 1105 } else { 1106 switch (s->revision) { 1107 case REV_11MPCORE: 1108 res = gic_id_11mpcore[(offset - 0xfd0) >> 2]; 1109 break; 1110 case 1: 1111 res = gic_id_gicv1[(offset - 0xfd0) >> 2]; 1112 break; 1113 case 2: 1114 res = gic_id_gicv2[(offset - 0xfd0) >> 2]; 1115 break; 1116 default: 1117 res = 0; 1118 } 1119 } 1120 } else { 1121 g_assert_not_reached(); 1122 } 1123 return res; 1124 bad_reg: 1125 qemu_log_mask(LOG_GUEST_ERROR, 1126 "gic_dist_readb: Bad offset %x\n", (int)offset); 1127 return 0; 1128 } 1129 1130 static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data, 1131 unsigned size, MemTxAttrs attrs) 1132 { 1133 switch (size) { 1134 case 1: 1135 *data = gic_dist_readb(opaque, offset, attrs); 1136 break; 1137 case 2: 1138 *data = gic_dist_readb(opaque, offset, attrs); 1139 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 1140 break; 1141 case 4: 1142 *data = gic_dist_readb(opaque, offset, attrs); 1143 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 1144 *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16; 1145 *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24; 1146 break; 1147 default: 1148 return MEMTX_ERROR; 1149 } 1150 1151 trace_gic_dist_read(offset, size, *data); 1152 return MEMTX_OK; 1153 } 1154 1155 static void gic_dist_writeb(void *opaque, hwaddr offset, 1156 uint32_t value, MemTxAttrs attrs) 1157 { 1158 GICState *s = (GICState *)opaque; 1159 int irq; 1160 int i; 1161 int cpu; 1162 1163 cpu = gic_get_current_cpu(s); 1164 if (offset < 0x100) { 1165 if (offset == 0) { 1166 if (s->security_extn && !attrs.secure) { 1167 /* NS version is just an alias of the S version's bit 1 */ 1168 s->ctlr = deposit32(s->ctlr, 1, 1, value); 1169 } else if (gic_has_groups(s)) { 1170 s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1); 1171 } else { 1172 s->ctlr = value & GICD_CTLR_EN_GRP0; 1173 } 1174 DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n", 1175 s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis", 1176 s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis"); 1177 } else if (offset < 4) { 1178 /* ignored. */ 1179 } else if (offset >= 0x80) { 1180 /* Interrupt Group Registers: RAZ/WI for NS access to secure 1181 * GIC, or for GICs without groups. 1182 */ 1183 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 1184 /* Every byte offset holds 8 group status bits */ 1185 irq = (offset - 0x80) * 8; 1186 if (irq >= s->num_irq) { 1187 goto bad_reg; 1188 } 1189 for (i = 0; i < 8; i++) { 1190 /* Group bits are banked for private interrupts */ 1191 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 1192 if (value & (1 << i)) { 1193 /* Group1 (Non-secure) */ 1194 GIC_DIST_SET_GROUP(irq + i, cm); 1195 } else { 1196 /* Group0 (Secure) */ 1197 GIC_DIST_CLEAR_GROUP(irq + i, cm); 1198 } 1199 } 1200 } 1201 } else { 1202 goto bad_reg; 1203 } 1204 } else if (offset < 0x180) { 1205 /* Interrupt Set Enable. */ 1206 irq = (offset - 0x100) * 8; 1207 if (irq >= s->num_irq) 1208 goto bad_reg; 1209 if (irq < GIC_NR_SGIS) { 1210 value = 0xff; 1211 } 1212 1213 for (i = 0; i < 8; i++) { 1214 if (value & (1 << i)) { 1215 int mask = 1216 (irq < GIC_INTERNAL) ? (1 << cpu) 1217 : GIC_DIST_TARGET(irq + i); 1218 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 1219 1220 if (s->security_extn && !attrs.secure && 1221 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1222 continue; /* Ignore Non-secure access of Group0 IRQ */ 1223 } 1224 1225 if (!GIC_DIST_TEST_ENABLED(irq + i, cm)) { 1226 DPRINTF("Enabled IRQ %d\n", irq + i); 1227 trace_gic_enable_irq(irq + i); 1228 } 1229 GIC_DIST_SET_ENABLED(irq + i, cm); 1230 /* If a raised level triggered IRQ enabled then mark 1231 is as pending. */ 1232 if (GIC_DIST_TEST_LEVEL(irq + i, mask) 1233 && !GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) { 1234 DPRINTF("Set %d pending mask %x\n", irq + i, mask); 1235 GIC_DIST_SET_PENDING(irq + i, mask); 1236 } 1237 } 1238 } 1239 } else if (offset < 0x200) { 1240 /* Interrupt Clear Enable. */ 1241 irq = (offset - 0x180) * 8; 1242 if (irq >= s->num_irq) 1243 goto bad_reg; 1244 if (irq < GIC_NR_SGIS) { 1245 value = 0; 1246 } 1247 1248 for (i = 0; i < 8; i++) { 1249 if (value & (1 << i)) { 1250 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 1251 1252 if (s->security_extn && !attrs.secure && 1253 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1254 continue; /* Ignore Non-secure access of Group0 IRQ */ 1255 } 1256 1257 if (GIC_DIST_TEST_ENABLED(irq + i, cm)) { 1258 DPRINTF("Disabled IRQ %d\n", irq + i); 1259 trace_gic_disable_irq(irq + i); 1260 } 1261 GIC_DIST_CLEAR_ENABLED(irq + i, cm); 1262 } 1263 } 1264 } else if (offset < 0x280) { 1265 /* Interrupt Set Pending. */ 1266 irq = (offset - 0x200) * 8; 1267 if (irq >= s->num_irq) 1268 goto bad_reg; 1269 if (irq < GIC_NR_SGIS) { 1270 value = 0; 1271 } 1272 1273 for (i = 0; i < 8; i++) { 1274 if (value & (1 << i)) { 1275 if (s->security_extn && !attrs.secure && 1276 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1277 continue; /* Ignore Non-secure access of Group0 IRQ */ 1278 } 1279 1280 GIC_DIST_SET_PENDING(irq + i, GIC_DIST_TARGET(irq + i)); 1281 } 1282 } 1283 } else if (offset < 0x300) { 1284 /* Interrupt Clear Pending. */ 1285 irq = (offset - 0x280) * 8; 1286 if (irq >= s->num_irq) 1287 goto bad_reg; 1288 if (irq < GIC_NR_SGIS) { 1289 value = 0; 1290 } 1291 1292 for (i = 0; i < 8; i++) { 1293 if (s->security_extn && !attrs.secure && 1294 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1295 continue; /* Ignore Non-secure access of Group0 IRQ */ 1296 } 1297 1298 /* ??? This currently clears the pending bit for all CPUs, even 1299 for per-CPU interrupts. It's unclear whether this is the 1300 corect behavior. */ 1301 if (value & (1 << i)) { 1302 GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK); 1303 } 1304 } 1305 } else if (offset < 0x380) { 1306 /* Interrupt Set Active. */ 1307 if (s->revision != 2) { 1308 goto bad_reg; 1309 } 1310 1311 irq = (offset - 0x300) * 8; 1312 if (irq >= s->num_irq) { 1313 goto bad_reg; 1314 } 1315 1316 /* This register is banked per-cpu for PPIs */ 1317 int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK; 1318 1319 for (i = 0; i < 8; i++) { 1320 if (s->security_extn && !attrs.secure && 1321 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1322 continue; /* Ignore Non-secure access of Group0 IRQ */ 1323 } 1324 1325 if (value & (1 << i)) { 1326 GIC_DIST_SET_ACTIVE(irq + i, cm); 1327 } 1328 } 1329 } else if (offset < 0x400) { 1330 /* Interrupt Clear Active. */ 1331 if (s->revision != 2) { 1332 goto bad_reg; 1333 } 1334 1335 irq = (offset - 0x380) * 8; 1336 if (irq >= s->num_irq) { 1337 goto bad_reg; 1338 } 1339 1340 /* This register is banked per-cpu for PPIs */ 1341 int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK; 1342 1343 for (i = 0; i < 8; i++) { 1344 if (s->security_extn && !attrs.secure && 1345 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1346 continue; /* Ignore Non-secure access of Group0 IRQ */ 1347 } 1348 1349 if (value & (1 << i)) { 1350 GIC_DIST_CLEAR_ACTIVE(irq + i, cm); 1351 } 1352 } 1353 } else if (offset < 0x800) { 1354 /* Interrupt Priority. */ 1355 irq = (offset - 0x400); 1356 if (irq >= s->num_irq) 1357 goto bad_reg; 1358 gic_dist_set_priority(s, cpu, irq, value, attrs); 1359 } else if (offset < 0xc00) { 1360 /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the 1361 * annoying exception of the 11MPCore's GIC. 1362 */ 1363 if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { 1364 irq = (offset - 0x800); 1365 if (irq >= s->num_irq) { 1366 goto bad_reg; 1367 } 1368 if (irq < 29 && s->revision == REV_11MPCORE) { 1369 value = 0; 1370 } else if (irq < GIC_INTERNAL) { 1371 value = ALL_CPU_MASK; 1372 } 1373 s->irq_target[irq] = value & ALL_CPU_MASK; 1374 } 1375 } else if (offset < 0xf00) { 1376 /* Interrupt Configuration. */ 1377 irq = (offset - 0xc00) * 4; 1378 if (irq >= s->num_irq) 1379 goto bad_reg; 1380 if (irq < GIC_NR_SGIS) 1381 value |= 0xaa; 1382 for (i = 0; i < 4; i++) { 1383 if (s->security_extn && !attrs.secure && 1384 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1385 continue; /* Ignore Non-secure access of Group0 IRQ */ 1386 } 1387 1388 if (s->revision == REV_11MPCORE) { 1389 if (value & (1 << (i * 2))) { 1390 GIC_DIST_SET_MODEL(irq + i); 1391 } else { 1392 GIC_DIST_CLEAR_MODEL(irq + i); 1393 } 1394 } 1395 if (value & (2 << (i * 2))) { 1396 GIC_DIST_SET_EDGE_TRIGGER(irq + i); 1397 } else { 1398 GIC_DIST_CLEAR_EDGE_TRIGGER(irq + i); 1399 } 1400 } 1401 } else if (offset < 0xf10) { 1402 /* 0xf00 is only handled for 32-bit writes. */ 1403 goto bad_reg; 1404 } else if (offset < 0xf20) { 1405 /* GICD_CPENDSGIRn */ 1406 if (s->revision == REV_11MPCORE) { 1407 goto bad_reg; 1408 } 1409 irq = (offset - 0xf10); 1410 1411 if (!s->security_extn || attrs.secure || 1412 GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { 1413 s->sgi_pending[irq][cpu] &= ~value; 1414 if (s->sgi_pending[irq][cpu] == 0) { 1415 GIC_DIST_CLEAR_PENDING(irq, 1 << cpu); 1416 } 1417 } 1418 } else if (offset < 0xf30) { 1419 /* GICD_SPENDSGIRn */ 1420 if (s->revision == REV_11MPCORE) { 1421 goto bad_reg; 1422 } 1423 irq = (offset - 0xf20); 1424 1425 if (!s->security_extn || attrs.secure || 1426 GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { 1427 GIC_DIST_SET_PENDING(irq, 1 << cpu); 1428 s->sgi_pending[irq][cpu] |= value; 1429 } 1430 } else { 1431 goto bad_reg; 1432 } 1433 gic_update(s); 1434 return; 1435 bad_reg: 1436 qemu_log_mask(LOG_GUEST_ERROR, 1437 "gic_dist_writeb: Bad offset %x\n", (int)offset); 1438 } 1439 1440 static void gic_dist_writew(void *opaque, hwaddr offset, 1441 uint32_t value, MemTxAttrs attrs) 1442 { 1443 gic_dist_writeb(opaque, offset, value & 0xff, attrs); 1444 gic_dist_writeb(opaque, offset + 1, value >> 8, attrs); 1445 } 1446 1447 static void gic_dist_writel(void *opaque, hwaddr offset, 1448 uint32_t value, MemTxAttrs attrs) 1449 { 1450 GICState *s = (GICState *)opaque; 1451 if (offset == 0xf00) { 1452 int cpu; 1453 int irq; 1454 int mask; 1455 int target_cpu; 1456 1457 cpu = gic_get_current_cpu(s); 1458 irq = value & 0x3ff; 1459 switch ((value >> 24) & 3) { 1460 case 0: 1461 mask = (value >> 16) & ALL_CPU_MASK; 1462 break; 1463 case 1: 1464 mask = ALL_CPU_MASK ^ (1 << cpu); 1465 break; 1466 case 2: 1467 mask = 1 << cpu; 1468 break; 1469 default: 1470 DPRINTF("Bad Soft Int target filter\n"); 1471 mask = ALL_CPU_MASK; 1472 break; 1473 } 1474 GIC_DIST_SET_PENDING(irq, mask); 1475 target_cpu = ctz32(mask); 1476 while (target_cpu < GIC_NCPU) { 1477 s->sgi_pending[irq][target_cpu] |= (1 << cpu); 1478 mask &= ~(1 << target_cpu); 1479 target_cpu = ctz32(mask); 1480 } 1481 gic_update(s); 1482 return; 1483 } 1484 gic_dist_writew(opaque, offset, value & 0xffff, attrs); 1485 gic_dist_writew(opaque, offset + 2, value >> 16, attrs); 1486 } 1487 1488 static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data, 1489 unsigned size, MemTxAttrs attrs) 1490 { 1491 trace_gic_dist_write(offset, size, data); 1492 1493 switch (size) { 1494 case 1: 1495 gic_dist_writeb(opaque, offset, data, attrs); 1496 return MEMTX_OK; 1497 case 2: 1498 gic_dist_writew(opaque, offset, data, attrs); 1499 return MEMTX_OK; 1500 case 4: 1501 gic_dist_writel(opaque, offset, data, attrs); 1502 return MEMTX_OK; 1503 default: 1504 return MEMTX_ERROR; 1505 } 1506 } 1507 1508 static inline uint32_t gic_apr_ns_view(GICState *s, int cpu, int regno) 1509 { 1510 /* Return the Nonsecure view of GICC_APR<regno>. This is the 1511 * second half of GICC_NSAPR. 1512 */ 1513 switch (GIC_MIN_BPR) { 1514 case 0: 1515 if (regno < 2) { 1516 return s->nsapr[regno + 2][cpu]; 1517 } 1518 break; 1519 case 1: 1520 if (regno == 0) { 1521 return s->nsapr[regno + 1][cpu]; 1522 } 1523 break; 1524 case 2: 1525 if (regno == 0) { 1526 return extract32(s->nsapr[0][cpu], 16, 16); 1527 } 1528 break; 1529 case 3: 1530 if (regno == 0) { 1531 return extract32(s->nsapr[0][cpu], 8, 8); 1532 } 1533 break; 1534 default: 1535 g_assert_not_reached(); 1536 } 1537 return 0; 1538 } 1539 1540 static inline void gic_apr_write_ns_view(GICState *s, int cpu, int regno, 1541 uint32_t value) 1542 { 1543 /* Write the Nonsecure view of GICC_APR<regno>. */ 1544 switch (GIC_MIN_BPR) { 1545 case 0: 1546 if (regno < 2) { 1547 s->nsapr[regno + 2][cpu] = value; 1548 } 1549 break; 1550 case 1: 1551 if (regno == 0) { 1552 s->nsapr[regno + 1][cpu] = value; 1553 } 1554 break; 1555 case 2: 1556 if (regno == 0) { 1557 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 16, 16, value); 1558 } 1559 break; 1560 case 3: 1561 if (regno == 0) { 1562 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 8, 8, value); 1563 } 1564 break; 1565 default: 1566 g_assert_not_reached(); 1567 } 1568 } 1569 1570 static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, 1571 uint64_t *data, MemTxAttrs attrs) 1572 { 1573 switch (offset) { 1574 case 0x00: /* Control */ 1575 *data = gic_get_cpu_control(s, cpu, attrs); 1576 break; 1577 case 0x04: /* Priority mask */ 1578 *data = gic_get_priority_mask(s, cpu, attrs); 1579 break; 1580 case 0x08: /* Binary Point */ 1581 if (gic_cpu_ns_access(s, cpu, attrs)) { 1582 if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { 1583 /* NS view of BPR when CBPR is 1 */ 1584 *data = MIN(s->bpr[cpu] + 1, 7); 1585 } else { 1586 /* BPR is banked. Non-secure copy stored in ABPR. */ 1587 *data = s->abpr[cpu]; 1588 } 1589 } else { 1590 *data = s->bpr[cpu]; 1591 } 1592 break; 1593 case 0x0c: /* Acknowledge */ 1594 *data = gic_acknowledge_irq(s, cpu, attrs); 1595 break; 1596 case 0x14: /* Running Priority */ 1597 *data = gic_get_running_priority(s, cpu, attrs); 1598 break; 1599 case 0x18: /* Highest Pending Interrupt */ 1600 *data = gic_get_current_pending_irq(s, cpu, attrs); 1601 break; 1602 case 0x1c: /* Aliased Binary Point */ 1603 /* GIC v2, no security: ABPR 1604 * GIC v1, no security: not implemented (RAZ/WI) 1605 * With security extensions, secure access: ABPR (alias of NS BPR) 1606 * With security extensions, nonsecure access: RAZ/WI 1607 */ 1608 if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { 1609 *data = 0; 1610 } else { 1611 *data = s->abpr[cpu]; 1612 } 1613 break; 1614 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 1615 { 1616 int regno = (offset - 0xd0) / 4; 1617 int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS; 1618 1619 if (regno >= nr_aprs || s->revision != 2) { 1620 *data = 0; 1621 } else if (gic_is_vcpu(cpu)) { 1622 *data = s->h_apr[gic_get_vcpu_real_id(cpu)]; 1623 } else if (gic_cpu_ns_access(s, cpu, attrs)) { 1624 /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ 1625 *data = gic_apr_ns_view(s, regno, cpu); 1626 } else { 1627 *data = s->apr[regno][cpu]; 1628 } 1629 break; 1630 } 1631 case 0xe0: case 0xe4: case 0xe8: case 0xec: 1632 { 1633 int regno = (offset - 0xe0) / 4; 1634 1635 if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) || 1636 gic_cpu_ns_access(s, cpu, attrs) || gic_is_vcpu(cpu)) { 1637 *data = 0; 1638 } else { 1639 *data = s->nsapr[regno][cpu]; 1640 } 1641 break; 1642 } 1643 default: 1644 qemu_log_mask(LOG_GUEST_ERROR, 1645 "gic_cpu_read: Bad offset %x\n", (int)offset); 1646 *data = 0; 1647 break; 1648 } 1649 1650 trace_gic_cpu_read(gic_is_vcpu(cpu) ? "vcpu" : "cpu", 1651 gic_get_vcpu_real_id(cpu), offset, *data); 1652 return MEMTX_OK; 1653 } 1654 1655 static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, 1656 uint32_t value, MemTxAttrs attrs) 1657 { 1658 trace_gic_cpu_write(gic_is_vcpu(cpu) ? "vcpu" : "cpu", 1659 gic_get_vcpu_real_id(cpu), offset, value); 1660 1661 switch (offset) { 1662 case 0x00: /* Control */ 1663 gic_set_cpu_control(s, cpu, value, attrs); 1664 break; 1665 case 0x04: /* Priority mask */ 1666 gic_set_priority_mask(s, cpu, value, attrs); 1667 break; 1668 case 0x08: /* Binary Point */ 1669 if (gic_cpu_ns_access(s, cpu, attrs)) { 1670 if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { 1671 /* WI when CBPR is 1 */ 1672 return MEMTX_OK; 1673 } else { 1674 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 1675 } 1676 } else { 1677 int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; 1678 s->bpr[cpu] = MAX(value & 0x7, min_bpr); 1679 } 1680 break; 1681 case 0x10: /* End Of Interrupt */ 1682 gic_complete_irq(s, cpu, value & 0x3ff, attrs); 1683 return MEMTX_OK; 1684 case 0x1c: /* Aliased Binary Point */ 1685 if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { 1686 /* unimplemented, or NS access: RAZ/WI */ 1687 return MEMTX_OK; 1688 } else { 1689 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 1690 } 1691 break; 1692 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 1693 { 1694 int regno = (offset - 0xd0) / 4; 1695 int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS; 1696 1697 if (regno >= nr_aprs || s->revision != 2) { 1698 return MEMTX_OK; 1699 } 1700 if (gic_is_vcpu(cpu)) { 1701 s->h_apr[gic_get_vcpu_real_id(cpu)] = value; 1702 } else if (gic_cpu_ns_access(s, cpu, attrs)) { 1703 /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ 1704 gic_apr_write_ns_view(s, regno, cpu, value); 1705 } else { 1706 s->apr[regno][cpu] = value; 1707 } 1708 break; 1709 } 1710 case 0xe0: case 0xe4: case 0xe8: case 0xec: 1711 { 1712 int regno = (offset - 0xe0) / 4; 1713 1714 if (regno >= GIC_NR_APRS || s->revision != 2) { 1715 return MEMTX_OK; 1716 } 1717 if (gic_is_vcpu(cpu)) { 1718 return MEMTX_OK; 1719 } 1720 if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { 1721 return MEMTX_OK; 1722 } 1723 s->nsapr[regno][cpu] = value; 1724 break; 1725 } 1726 case 0x1000: 1727 /* GICC_DIR */ 1728 gic_deactivate_irq(s, cpu, value & 0x3ff, attrs); 1729 break; 1730 default: 1731 qemu_log_mask(LOG_GUEST_ERROR, 1732 "gic_cpu_write: Bad offset %x\n", (int)offset); 1733 return MEMTX_OK; 1734 } 1735 1736 if (gic_is_vcpu(cpu)) { 1737 gic_update_virt(s); 1738 } else { 1739 gic_update(s); 1740 } 1741 1742 return MEMTX_OK; 1743 } 1744 1745 /* Wrappers to read/write the GIC CPU interface for the current CPU */ 1746 static MemTxResult gic_thiscpu_read(void *opaque, hwaddr addr, uint64_t *data, 1747 unsigned size, MemTxAttrs attrs) 1748 { 1749 GICState *s = (GICState *)opaque; 1750 return gic_cpu_read(s, gic_get_current_cpu(s), addr, data, attrs); 1751 } 1752 1753 static MemTxResult gic_thiscpu_write(void *opaque, hwaddr addr, 1754 uint64_t value, unsigned size, 1755 MemTxAttrs attrs) 1756 { 1757 GICState *s = (GICState *)opaque; 1758 return gic_cpu_write(s, gic_get_current_cpu(s), addr, value, attrs); 1759 } 1760 1761 /* Wrappers to read/write the GIC CPU interface for a specific CPU. 1762 * These just decode the opaque pointer into GICState* + cpu id. 1763 */ 1764 static MemTxResult gic_do_cpu_read(void *opaque, hwaddr addr, uint64_t *data, 1765 unsigned size, MemTxAttrs attrs) 1766 { 1767 GICState **backref = (GICState **)opaque; 1768 GICState *s = *backref; 1769 int id = (backref - s->backref); 1770 return gic_cpu_read(s, id, addr, data, attrs); 1771 } 1772 1773 static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr, 1774 uint64_t value, unsigned size, 1775 MemTxAttrs attrs) 1776 { 1777 GICState **backref = (GICState **)opaque; 1778 GICState *s = *backref; 1779 int id = (backref - s->backref); 1780 return gic_cpu_write(s, id, addr, value, attrs); 1781 } 1782 1783 static MemTxResult gic_thisvcpu_read(void *opaque, hwaddr addr, uint64_t *data, 1784 unsigned size, MemTxAttrs attrs) 1785 { 1786 GICState *s = (GICState *)opaque; 1787 1788 return gic_cpu_read(s, gic_get_current_vcpu(s), addr, data, attrs); 1789 } 1790 1791 static MemTxResult gic_thisvcpu_write(void *opaque, hwaddr addr, 1792 uint64_t value, unsigned size, 1793 MemTxAttrs attrs) 1794 { 1795 GICState *s = (GICState *)opaque; 1796 1797 return gic_cpu_write(s, gic_get_current_vcpu(s), addr, value, attrs); 1798 } 1799 1800 static uint32_t gic_compute_eisr(GICState *s, int cpu, int lr_start) 1801 { 1802 int lr_idx; 1803 uint32_t ret = 0; 1804 1805 for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { 1806 uint32_t *entry = &s->h_lr[lr_idx][cpu]; 1807 ret = deposit32(ret, lr_idx - lr_start, 1, 1808 gic_lr_entry_is_eoi(*entry)); 1809 } 1810 1811 return ret; 1812 } 1813 1814 static uint32_t gic_compute_elrsr(GICState *s, int cpu, int lr_start) 1815 { 1816 int lr_idx; 1817 uint32_t ret = 0; 1818 1819 for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { 1820 uint32_t *entry = &s->h_lr[lr_idx][cpu]; 1821 ret = deposit32(ret, lr_idx - lr_start, 1, 1822 gic_lr_entry_is_free(*entry)); 1823 } 1824 1825 return ret; 1826 } 1827 1828 static void gic_vmcr_write(GICState *s, uint32_t value, MemTxAttrs attrs) 1829 { 1830 int vcpu = gic_get_current_vcpu(s); 1831 uint32_t ctlr; 1832 uint32_t abpr; 1833 uint32_t bpr; 1834 uint32_t prio_mask; 1835 1836 ctlr = FIELD_EX32(value, GICH_VMCR, VMCCtlr); 1837 abpr = FIELD_EX32(value, GICH_VMCR, VMABP); 1838 bpr = FIELD_EX32(value, GICH_VMCR, VMBP); 1839 prio_mask = FIELD_EX32(value, GICH_VMCR, VMPriMask) << 3; 1840 1841 gic_set_cpu_control(s, vcpu, ctlr, attrs); 1842 s->abpr[vcpu] = MAX(abpr, GIC_VIRT_MIN_ABPR); 1843 s->bpr[vcpu] = MAX(bpr, GIC_VIRT_MIN_BPR); 1844 gic_set_priority_mask(s, vcpu, prio_mask, attrs); 1845 } 1846 1847 static MemTxResult gic_hyp_read(void *opaque, int cpu, hwaddr addr, 1848 uint64_t *data, MemTxAttrs attrs) 1849 { 1850 GICState *s = ARM_GIC(opaque); 1851 int vcpu = cpu + GIC_NCPU; 1852 1853 switch (addr) { 1854 case A_GICH_HCR: /* Hypervisor Control */ 1855 *data = s->h_hcr[cpu]; 1856 break; 1857 1858 case A_GICH_VTR: /* VGIC Type */ 1859 *data = FIELD_DP32(0, GICH_VTR, ListRegs, s->num_lrs - 1); 1860 *data = FIELD_DP32(*data, GICH_VTR, PREbits, 1861 GIC_VIRT_MAX_GROUP_PRIO_BITS - 1); 1862 *data = FIELD_DP32(*data, GICH_VTR, PRIbits, 1863 (7 - GIC_VIRT_MIN_BPR) - 1); 1864 break; 1865 1866 case A_GICH_VMCR: /* Virtual Machine Control */ 1867 *data = FIELD_DP32(0, GICH_VMCR, VMCCtlr, 1868 extract32(s->cpu_ctlr[vcpu], 0, 10)); 1869 *data = FIELD_DP32(*data, GICH_VMCR, VMABP, s->abpr[vcpu]); 1870 *data = FIELD_DP32(*data, GICH_VMCR, VMBP, s->bpr[vcpu]); 1871 *data = FIELD_DP32(*data, GICH_VMCR, VMPriMask, 1872 extract32(s->priority_mask[vcpu], 3, 5)); 1873 break; 1874 1875 case A_GICH_MISR: /* Maintenance Interrupt Status */ 1876 *data = s->h_misr[cpu]; 1877 break; 1878 1879 case A_GICH_EISR0: /* End of Interrupt Status 0 and 1 */ 1880 case A_GICH_EISR1: 1881 *data = gic_compute_eisr(s, cpu, (addr - A_GICH_EISR0) * 8); 1882 break; 1883 1884 case A_GICH_ELRSR0: /* Empty List Status 0 and 1 */ 1885 case A_GICH_ELRSR1: 1886 *data = gic_compute_elrsr(s, cpu, (addr - A_GICH_ELRSR0) * 8); 1887 break; 1888 1889 case A_GICH_APR: /* Active Priorities */ 1890 *data = s->h_apr[cpu]; 1891 break; 1892 1893 case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */ 1894 { 1895 int lr_idx = (addr - A_GICH_LR0) / 4; 1896 1897 if (lr_idx > s->num_lrs) { 1898 *data = 0; 1899 } else { 1900 *data = s->h_lr[lr_idx][cpu]; 1901 } 1902 break; 1903 } 1904 1905 default: 1906 qemu_log_mask(LOG_GUEST_ERROR, 1907 "gic_hyp_read: Bad offset %" HWADDR_PRIx "\n", addr); 1908 return MEMTX_OK; 1909 } 1910 1911 trace_gic_hyp_read(addr, *data); 1912 return MEMTX_OK; 1913 } 1914 1915 static MemTxResult gic_hyp_write(void *opaque, int cpu, hwaddr addr, 1916 uint64_t value, MemTxAttrs attrs) 1917 { 1918 GICState *s = ARM_GIC(opaque); 1919 int vcpu = cpu + GIC_NCPU; 1920 1921 trace_gic_hyp_write(addr, value); 1922 1923 switch (addr) { 1924 case A_GICH_HCR: /* Hypervisor Control */ 1925 s->h_hcr[cpu] = value & GICH_HCR_MASK; 1926 break; 1927 1928 case A_GICH_VMCR: /* Virtual Machine Control */ 1929 gic_vmcr_write(s, value, attrs); 1930 break; 1931 1932 case A_GICH_APR: /* Active Priorities */ 1933 s->h_apr[cpu] = value; 1934 s->running_priority[vcpu] = gic_get_prio_from_apr_bits(s, vcpu); 1935 break; 1936 1937 case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */ 1938 { 1939 int lr_idx = (addr - A_GICH_LR0) / 4; 1940 1941 if (lr_idx > s->num_lrs) { 1942 return MEMTX_OK; 1943 } 1944 1945 s->h_lr[lr_idx][cpu] = value & GICH_LR_MASK; 1946 trace_gic_lr_entry(cpu, lr_idx, s->h_lr[lr_idx][cpu]); 1947 break; 1948 } 1949 1950 default: 1951 qemu_log_mask(LOG_GUEST_ERROR, 1952 "gic_hyp_write: Bad offset %" HWADDR_PRIx "\n", addr); 1953 return MEMTX_OK; 1954 } 1955 1956 gic_update_virt(s); 1957 return MEMTX_OK; 1958 } 1959 1960 static MemTxResult gic_thiscpu_hyp_read(void *opaque, hwaddr addr, uint64_t *data, 1961 unsigned size, MemTxAttrs attrs) 1962 { 1963 GICState *s = (GICState *)opaque; 1964 1965 return gic_hyp_read(s, gic_get_current_cpu(s), addr, data, attrs); 1966 } 1967 1968 static MemTxResult gic_thiscpu_hyp_write(void *opaque, hwaddr addr, 1969 uint64_t value, unsigned size, 1970 MemTxAttrs attrs) 1971 { 1972 GICState *s = (GICState *)opaque; 1973 1974 return gic_hyp_write(s, gic_get_current_cpu(s), addr, value, attrs); 1975 } 1976 1977 static MemTxResult gic_do_hyp_read(void *opaque, hwaddr addr, uint64_t *data, 1978 unsigned size, MemTxAttrs attrs) 1979 { 1980 GICState **backref = (GICState **)opaque; 1981 GICState *s = *backref; 1982 int id = (backref - s->backref); 1983 1984 return gic_hyp_read(s, id, addr, data, attrs); 1985 } 1986 1987 static MemTxResult gic_do_hyp_write(void *opaque, hwaddr addr, 1988 uint64_t value, unsigned size, 1989 MemTxAttrs attrs) 1990 { 1991 GICState **backref = (GICState **)opaque; 1992 GICState *s = *backref; 1993 int id = (backref - s->backref); 1994 1995 return gic_hyp_write(s, id + GIC_NCPU, addr, value, attrs); 1996 1997 } 1998 1999 static const MemoryRegionOps gic_ops[2] = { 2000 { 2001 .read_with_attrs = gic_dist_read, 2002 .write_with_attrs = gic_dist_write, 2003 .endianness = DEVICE_NATIVE_ENDIAN, 2004 }, 2005 { 2006 .read_with_attrs = gic_thiscpu_read, 2007 .write_with_attrs = gic_thiscpu_write, 2008 .endianness = DEVICE_NATIVE_ENDIAN, 2009 } 2010 }; 2011 2012 static const MemoryRegionOps gic_cpu_ops = { 2013 .read_with_attrs = gic_do_cpu_read, 2014 .write_with_attrs = gic_do_cpu_write, 2015 .endianness = DEVICE_NATIVE_ENDIAN, 2016 }; 2017 2018 static const MemoryRegionOps gic_virt_ops[2] = { 2019 { 2020 .read_with_attrs = gic_thiscpu_hyp_read, 2021 .write_with_attrs = gic_thiscpu_hyp_write, 2022 .endianness = DEVICE_NATIVE_ENDIAN, 2023 }, 2024 { 2025 .read_with_attrs = gic_thisvcpu_read, 2026 .write_with_attrs = gic_thisvcpu_write, 2027 .endianness = DEVICE_NATIVE_ENDIAN, 2028 } 2029 }; 2030 2031 static const MemoryRegionOps gic_viface_ops = { 2032 .read_with_attrs = gic_do_hyp_read, 2033 .write_with_attrs = gic_do_hyp_write, 2034 .endianness = DEVICE_NATIVE_ENDIAN, 2035 }; 2036 2037 static void arm_gic_realize(DeviceState *dev, Error **errp) 2038 { 2039 /* Device instance realize function for the GIC sysbus device */ 2040 int i; 2041 GICState *s = ARM_GIC(dev); 2042 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 2043 ARMGICClass *agc = ARM_GIC_GET_CLASS(s); 2044 Error *local_err = NULL; 2045 2046 agc->parent_realize(dev, &local_err); 2047 if (local_err) { 2048 error_propagate(errp, local_err); 2049 return; 2050 } 2051 2052 if (kvm_enabled() && !kvm_arm_supports_user_irq()) { 2053 error_setg(errp, "KVM with user space irqchip only works when the " 2054 "host kernel supports KVM_CAP_ARM_USER_IRQ"); 2055 return; 2056 } 2057 2058 /* This creates distributor, main CPU interface (s->cpuiomem[0]) and if 2059 * enabled, virtualization extensions related interfaces (main virtual 2060 * interface (s->vifaceiomem[0]) and virtual CPU interface). 2061 */ 2062 gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops, gic_virt_ops); 2063 2064 /* Extra core-specific regions for the CPU interfaces. This is 2065 * necessary for "franken-GIC" implementations, for example on 2066 * Exynos 4. 2067 * NB that the memory region size of 0x100 applies for the 11MPCore 2068 * and also cores following the GIC v1 spec (ie A9). 2069 * GIC v2 defines a larger memory region (0x1000) so this will need 2070 * to be extended when we implement A15. 2071 */ 2072 for (i = 0; i < s->num_cpu; i++) { 2073 s->backref[i] = s; 2074 memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops, 2075 &s->backref[i], "gic_cpu", 0x100); 2076 sysbus_init_mmio(sbd, &s->cpuiomem[i+1]); 2077 } 2078 2079 /* Extra core-specific regions for virtual interfaces. This is required by 2080 * the GICv2 specification. 2081 */ 2082 if (s->virt_extn) { 2083 for (i = 0; i < s->num_cpu; i++) { 2084 memory_region_init_io(&s->vifaceiomem[i + 1], OBJECT(s), 2085 &gic_viface_ops, &s->backref[i], 2086 "gic_viface", 0x200); 2087 sysbus_init_mmio(sbd, &s->vifaceiomem[i + 1]); 2088 } 2089 } 2090 2091 } 2092 2093 static void arm_gic_class_init(ObjectClass *klass, void *data) 2094 { 2095 DeviceClass *dc = DEVICE_CLASS(klass); 2096 ARMGICClass *agc = ARM_GIC_CLASS(klass); 2097 2098 device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize); 2099 } 2100 2101 static const TypeInfo arm_gic_info = { 2102 .name = TYPE_ARM_GIC, 2103 .parent = TYPE_ARM_GIC_COMMON, 2104 .instance_size = sizeof(GICState), 2105 .class_init = arm_gic_class_init, 2106 .class_size = sizeof(ARMGICClass), 2107 }; 2108 2109 static void arm_gic_register_types(void) 2110 { 2111 type_register_static(&arm_gic_info); 2112 } 2113 2114 type_init(arm_gic_register_types) 2115