1 /* 2 * ARM Generic/Distributed Interrupt Controller 3 * 4 * Copyright (c) 2006-2007 CodeSourcery. 5 * Written by Paul Brook 6 * 7 * This code is licensed under the GPL. 8 */ 9 10 /* This file contains implementation code for the RealView EB interrupt 11 * controller, MPCore distributed interrupt controller and ARMv7-M 12 * Nested Vectored Interrupt Controller. 13 * It is compiled in two ways: 14 * (1) as a standalone file to produce a sysbus device which is a GIC 15 * that can be used on the realview board and as one of the builtin 16 * private peripherals for the ARM MP CPUs (11MPCore, A9, etc) 17 * (2) by being directly #included into armv7m_nvic.c to produce the 18 * armv7m_nvic device. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "hw/irq.h" 23 #include "hw/sysbus.h" 24 #include "gic_internal.h" 25 #include "qapi/error.h" 26 #include "hw/core/cpu.h" 27 #include "qemu/log.h" 28 #include "qemu/module.h" 29 #include "trace.h" 30 #include "sysemu/kvm.h" 31 32 /* #define DEBUG_GIC */ 33 34 #ifdef DEBUG_GIC 35 #define DEBUG_GIC_GATE 1 36 #else 37 #define DEBUG_GIC_GATE 0 38 #endif 39 40 #define DPRINTF(fmt, ...) do { \ 41 if (DEBUG_GIC_GATE) { \ 42 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 43 } \ 44 } while (0) 45 46 static const uint8_t gic_id_11mpcore[] = { 47 0x00, 0x00, 0x00, 0x00, 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 48 }; 49 50 static const uint8_t gic_id_gicv1[] = { 51 0x04, 0x00, 0x00, 0x00, 0x90, 0xb3, 0x1b, 0x00, 0x0d, 0xf0, 0x05, 0xb1 52 }; 53 54 static const uint8_t gic_id_gicv2[] = { 55 0x04, 0x00, 0x00, 0x00, 0x90, 0xb4, 0x2b, 0x00, 0x0d, 0xf0, 0x05, 0xb1 56 }; 57 58 static inline int gic_get_current_cpu(GICState *s) 59 { 60 if (s->num_cpu > 1) { 61 return current_cpu->cpu_index; 62 } 63 return 0; 64 } 65 66 static inline int gic_get_current_vcpu(GICState *s) 67 { 68 return gic_get_current_cpu(s) + GIC_NCPU; 69 } 70 71 /* Return true if this GIC config has interrupt groups, which is 72 * true if we're a GICv2, or a GICv1 with the security extensions. 73 */ 74 static inline bool gic_has_groups(GICState *s) 75 { 76 return s->revision == 2 || s->security_extn; 77 } 78 79 static inline bool gic_cpu_ns_access(GICState *s, int cpu, MemTxAttrs attrs) 80 { 81 return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure; 82 } 83 84 static inline void gic_get_best_irq(GICState *s, int cpu, 85 int *best_irq, int *best_prio, int *group) 86 { 87 int irq; 88 int cm = 1 << cpu; 89 90 *best_irq = 1023; 91 *best_prio = 0x100; 92 93 for (irq = 0; irq < s->num_irq; irq++) { 94 if (GIC_DIST_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) && 95 (!GIC_DIST_TEST_ACTIVE(irq, cm)) && 96 (irq < GIC_INTERNAL || GIC_DIST_TARGET(irq) & cm)) { 97 if (GIC_DIST_GET_PRIORITY(irq, cpu) < *best_prio) { 98 *best_prio = GIC_DIST_GET_PRIORITY(irq, cpu); 99 *best_irq = irq; 100 } 101 } 102 } 103 104 if (*best_irq < 1023) { 105 *group = GIC_DIST_TEST_GROUP(*best_irq, cm); 106 } 107 } 108 109 static inline void gic_get_best_virq(GICState *s, int cpu, 110 int *best_irq, int *best_prio, int *group) 111 { 112 int lr_idx = 0; 113 114 *best_irq = 1023; 115 *best_prio = 0x100; 116 117 for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { 118 uint32_t lr_entry = s->h_lr[lr_idx][cpu]; 119 int state = GICH_LR_STATE(lr_entry); 120 121 if (state == GICH_LR_STATE_PENDING) { 122 int prio = GICH_LR_PRIORITY(lr_entry); 123 124 if (prio < *best_prio) { 125 *best_prio = prio; 126 *best_irq = GICH_LR_VIRT_ID(lr_entry); 127 *group = GICH_LR_GROUP(lr_entry); 128 } 129 } 130 } 131 } 132 133 /* Return true if IRQ signaling is enabled for the given cpu and at least one 134 * of the given groups: 135 * - in the non-virt case, the distributor must be enabled for one of the 136 * given groups 137 * - in the virt case, the virtual interface must be enabled. 138 * - in all cases, the (v)CPU interface must be enabled for one of the given 139 * groups. 140 */ 141 static inline bool gic_irq_signaling_enabled(GICState *s, int cpu, bool virt, 142 int group_mask) 143 { 144 int cpu_iface = virt ? (cpu + GIC_NCPU) : cpu; 145 146 if (!virt && !(s->ctlr & group_mask)) { 147 return false; 148 } 149 150 if (virt && !(s->h_hcr[cpu] & R_GICH_HCR_EN_MASK)) { 151 return false; 152 } 153 154 if (!(s->cpu_ctlr[cpu_iface] & group_mask)) { 155 return false; 156 } 157 158 return true; 159 } 160 161 /* TODO: Many places that call this routine could be optimized. */ 162 /* Update interrupt status after enabled or pending bits have been changed. */ 163 static inline void gic_update_internal(GICState *s, bool virt) 164 { 165 int best_irq; 166 int best_prio; 167 int irq_level, fiq_level; 168 int cpu, cpu_iface; 169 int group = 0; 170 qemu_irq *irq_lines = virt ? s->parent_virq : s->parent_irq; 171 qemu_irq *fiq_lines = virt ? s->parent_vfiq : s->parent_fiq; 172 173 for (cpu = 0; cpu < s->num_cpu; cpu++) { 174 cpu_iface = virt ? (cpu + GIC_NCPU) : cpu; 175 176 s->current_pending[cpu_iface] = 1023; 177 if (!gic_irq_signaling_enabled(s, cpu, virt, 178 GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) { 179 qemu_irq_lower(irq_lines[cpu]); 180 qemu_irq_lower(fiq_lines[cpu]); 181 continue; 182 } 183 184 if (virt) { 185 gic_get_best_virq(s, cpu, &best_irq, &best_prio, &group); 186 } else { 187 gic_get_best_irq(s, cpu, &best_irq, &best_prio, &group); 188 } 189 190 if (best_irq != 1023) { 191 trace_gic_update_bestirq(virt ? "vcpu" : "cpu", cpu, 192 best_irq, best_prio, 193 s->priority_mask[cpu_iface], 194 s->running_priority[cpu_iface]); 195 } 196 197 irq_level = fiq_level = 0; 198 199 if (best_prio < s->priority_mask[cpu_iface]) { 200 s->current_pending[cpu_iface] = best_irq; 201 if (best_prio < s->running_priority[cpu_iface]) { 202 if (gic_irq_signaling_enabled(s, cpu, virt, 1 << group)) { 203 if (group == 0 && 204 s->cpu_ctlr[cpu_iface] & GICC_CTLR_FIQ_EN) { 205 DPRINTF("Raised pending FIQ %d (cpu %d)\n", 206 best_irq, cpu_iface); 207 fiq_level = 1; 208 trace_gic_update_set_irq(cpu, virt ? "vfiq" : "fiq", 209 fiq_level); 210 } else { 211 DPRINTF("Raised pending IRQ %d (cpu %d)\n", 212 best_irq, cpu_iface); 213 irq_level = 1; 214 trace_gic_update_set_irq(cpu, virt ? "virq" : "irq", 215 irq_level); 216 } 217 } 218 } 219 } 220 221 qemu_set_irq(irq_lines[cpu], irq_level); 222 qemu_set_irq(fiq_lines[cpu], fiq_level); 223 } 224 } 225 226 static void gic_update(GICState *s) 227 { 228 gic_update_internal(s, false); 229 } 230 231 /* Return true if this LR is empty, i.e. the corresponding bit 232 * in ELRSR is set. 233 */ 234 static inline bool gic_lr_entry_is_free(uint32_t entry) 235 { 236 return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID) 237 && (GICH_LR_HW(entry) || !GICH_LR_EOI(entry)); 238 } 239 240 /* Return true if this LR should trigger an EOI maintenance interrupt, i.e. the 241 * corrsponding bit in EISR is set. 242 */ 243 static inline bool gic_lr_entry_is_eoi(uint32_t entry) 244 { 245 return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID) 246 && !GICH_LR_HW(entry) && GICH_LR_EOI(entry); 247 } 248 249 static inline void gic_extract_lr_info(GICState *s, int cpu, 250 int *num_eoi, int *num_valid, int *num_pending) 251 { 252 int lr_idx; 253 254 *num_eoi = 0; 255 *num_valid = 0; 256 *num_pending = 0; 257 258 for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { 259 uint32_t *entry = &s->h_lr[lr_idx][cpu]; 260 261 if (gic_lr_entry_is_eoi(*entry)) { 262 (*num_eoi)++; 263 } 264 265 if (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID) { 266 (*num_valid)++; 267 } 268 269 if (GICH_LR_STATE(*entry) == GICH_LR_STATE_PENDING) { 270 (*num_pending)++; 271 } 272 } 273 } 274 275 static void gic_compute_misr(GICState *s, int cpu) 276 { 277 uint32_t value = 0; 278 int vcpu = cpu + GIC_NCPU; 279 280 int num_eoi, num_valid, num_pending; 281 282 gic_extract_lr_info(s, cpu, &num_eoi, &num_valid, &num_pending); 283 284 /* EOI */ 285 if (num_eoi) { 286 value |= R_GICH_MISR_EOI_MASK; 287 } 288 289 /* U: true if only 0 or 1 LR entry is valid */ 290 if ((s->h_hcr[cpu] & R_GICH_HCR_UIE_MASK) && (num_valid < 2)) { 291 value |= R_GICH_MISR_U_MASK; 292 } 293 294 /* LRENP: EOICount is not 0 */ 295 if ((s->h_hcr[cpu] & R_GICH_HCR_LRENPIE_MASK) && 296 ((s->h_hcr[cpu] & R_GICH_HCR_EOICount_MASK) != 0)) { 297 value |= R_GICH_MISR_LRENP_MASK; 298 } 299 300 /* NP: no pending interrupts */ 301 if ((s->h_hcr[cpu] & R_GICH_HCR_NPIE_MASK) && (num_pending == 0)) { 302 value |= R_GICH_MISR_NP_MASK; 303 } 304 305 /* VGrp0E: group0 virq signaling enabled */ 306 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0EIE_MASK) && 307 (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) { 308 value |= R_GICH_MISR_VGrp0E_MASK; 309 } 310 311 /* VGrp0D: group0 virq signaling disabled */ 312 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0DIE_MASK) && 313 !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) { 314 value |= R_GICH_MISR_VGrp0D_MASK; 315 } 316 317 /* VGrp1E: group1 virq signaling enabled */ 318 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1EIE_MASK) && 319 (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) { 320 value |= R_GICH_MISR_VGrp1E_MASK; 321 } 322 323 /* VGrp1D: group1 virq signaling disabled */ 324 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1DIE_MASK) && 325 !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) { 326 value |= R_GICH_MISR_VGrp1D_MASK; 327 } 328 329 s->h_misr[cpu] = value; 330 } 331 332 static void gic_update_maintenance(GICState *s) 333 { 334 int cpu = 0; 335 int maint_level; 336 337 for (cpu = 0; cpu < s->num_cpu; cpu++) { 338 gic_compute_misr(s, cpu); 339 maint_level = (s->h_hcr[cpu] & R_GICH_HCR_EN_MASK) && s->h_misr[cpu]; 340 341 trace_gic_update_maintenance_irq(cpu, maint_level); 342 qemu_set_irq(s->maintenance_irq[cpu], maint_level); 343 } 344 } 345 346 static void gic_update_virt(GICState *s) 347 { 348 gic_update_internal(s, true); 349 gic_update_maintenance(s); 350 } 351 352 static void gic_set_irq_11mpcore(GICState *s, int irq, int level, 353 int cm, int target) 354 { 355 if (level) { 356 GIC_DIST_SET_LEVEL(irq, cm); 357 if (GIC_DIST_TEST_EDGE_TRIGGER(irq) || GIC_DIST_TEST_ENABLED(irq, cm)) { 358 DPRINTF("Set %d pending mask %x\n", irq, target); 359 GIC_DIST_SET_PENDING(irq, target); 360 } 361 } else { 362 GIC_DIST_CLEAR_LEVEL(irq, cm); 363 } 364 } 365 366 static void gic_set_irq_generic(GICState *s, int irq, int level, 367 int cm, int target) 368 { 369 if (level) { 370 GIC_DIST_SET_LEVEL(irq, cm); 371 DPRINTF("Set %d pending mask %x\n", irq, target); 372 if (GIC_DIST_TEST_EDGE_TRIGGER(irq)) { 373 GIC_DIST_SET_PENDING(irq, target); 374 } 375 } else { 376 GIC_DIST_CLEAR_LEVEL(irq, cm); 377 } 378 } 379 380 /* Process a change in an external IRQ input. */ 381 static void gic_set_irq(void *opaque, int irq, int level) 382 { 383 /* Meaning of the 'irq' parameter: 384 * [0..N-1] : external interrupts 385 * [N..N+31] : PPI (internal) interrupts for CPU 0 386 * [N+32..N+63] : PPI (internal interrupts for CPU 1 387 * ... 388 */ 389 GICState *s = (GICState *)opaque; 390 int cm, target; 391 if (irq < (s->num_irq - GIC_INTERNAL)) { 392 /* The first external input line is internal interrupt 32. */ 393 cm = ALL_CPU_MASK; 394 irq += GIC_INTERNAL; 395 target = GIC_DIST_TARGET(irq); 396 } else { 397 int cpu; 398 irq -= (s->num_irq - GIC_INTERNAL); 399 cpu = irq / GIC_INTERNAL; 400 irq %= GIC_INTERNAL; 401 cm = 1 << cpu; 402 target = cm; 403 } 404 405 assert(irq >= GIC_NR_SGIS); 406 407 if (level == GIC_DIST_TEST_LEVEL(irq, cm)) { 408 return; 409 } 410 411 if (s->revision == REV_11MPCORE) { 412 gic_set_irq_11mpcore(s, irq, level, cm, target); 413 } else { 414 gic_set_irq_generic(s, irq, level, cm, target); 415 } 416 trace_gic_set_irq(irq, level, cm, target); 417 418 gic_update(s); 419 } 420 421 static uint16_t gic_get_current_pending_irq(GICState *s, int cpu, 422 MemTxAttrs attrs) 423 { 424 uint16_t pending_irq = s->current_pending[cpu]; 425 426 if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) { 427 int group = gic_test_group(s, pending_irq, cpu); 428 429 /* On a GIC without the security extensions, reading this register 430 * behaves in the same way as a secure access to a GIC with them. 431 */ 432 bool secure = !gic_cpu_ns_access(s, cpu, attrs); 433 434 if (group == 0 && !secure) { 435 /* Group0 interrupts hidden from Non-secure access */ 436 return 1023; 437 } 438 if (group == 1 && secure && !(s->cpu_ctlr[cpu] & GICC_CTLR_ACK_CTL)) { 439 /* Group1 interrupts only seen by Secure access if 440 * AckCtl bit set. 441 */ 442 return 1022; 443 } 444 } 445 return pending_irq; 446 } 447 448 static int gic_get_group_priority(GICState *s, int cpu, int irq) 449 { 450 /* Return the group priority of the specified interrupt 451 * (which is the top bits of its priority, with the number 452 * of bits masked determined by the applicable binary point register). 453 */ 454 int bpr; 455 uint32_t mask; 456 457 if (gic_has_groups(s) && 458 !(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) && 459 gic_test_group(s, irq, cpu)) { 460 bpr = s->abpr[cpu] - 1; 461 assert(bpr >= 0); 462 } else { 463 bpr = s->bpr[cpu]; 464 } 465 466 /* a BPR of 0 means the group priority bits are [7:1]; 467 * a BPR of 1 means they are [7:2], and so on down to 468 * a BPR of 7 meaning no group priority bits at all. 469 */ 470 mask = ~0U << ((bpr & 7) + 1); 471 472 return gic_get_priority(s, irq, cpu) & mask; 473 } 474 475 static void gic_activate_irq(GICState *s, int cpu, int irq) 476 { 477 /* Set the appropriate Active Priority Register bit for this IRQ, 478 * and update the running priority. 479 */ 480 int prio = gic_get_group_priority(s, cpu, irq); 481 int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; 482 int preemption_level = prio >> (min_bpr + 1); 483 int regno = preemption_level / 32; 484 int bitno = preemption_level % 32; 485 uint32_t *papr = NULL; 486 487 if (gic_is_vcpu(cpu)) { 488 assert(regno == 0); 489 papr = &s->h_apr[gic_get_vcpu_real_id(cpu)]; 490 } else if (gic_has_groups(s) && gic_test_group(s, irq, cpu)) { 491 papr = &s->nsapr[regno][cpu]; 492 } else { 493 papr = &s->apr[regno][cpu]; 494 } 495 496 *papr |= (1 << bitno); 497 498 s->running_priority[cpu] = prio; 499 gic_set_active(s, irq, cpu); 500 } 501 502 static int gic_get_prio_from_apr_bits(GICState *s, int cpu) 503 { 504 /* Recalculate the current running priority for this CPU based 505 * on the set bits in the Active Priority Registers. 506 */ 507 int i; 508 509 if (gic_is_vcpu(cpu)) { 510 uint32_t apr = s->h_apr[gic_get_vcpu_real_id(cpu)]; 511 if (apr) { 512 return ctz32(apr) << (GIC_VIRT_MIN_BPR + 1); 513 } else { 514 return 0x100; 515 } 516 } 517 518 for (i = 0; i < GIC_NR_APRS; i++) { 519 uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu]; 520 if (!apr) { 521 continue; 522 } 523 return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1); 524 } 525 return 0x100; 526 } 527 528 static void gic_drop_prio(GICState *s, int cpu, int group) 529 { 530 /* Drop the priority of the currently active interrupt in the 531 * specified group. 532 * 533 * Note that we can guarantee (because of the requirement to nest 534 * GICC_IAR reads [which activate an interrupt and raise priority] 535 * with GICC_EOIR writes [which drop the priority for the interrupt]) 536 * that the interrupt we're being called for is the highest priority 537 * active interrupt, meaning that it has the lowest set bit in the 538 * APR registers. 539 * 540 * If the guest does not honour the ordering constraints then the 541 * behaviour of the GIC is UNPREDICTABLE, which for us means that 542 * the values of the APR registers might become incorrect and the 543 * running priority will be wrong, so interrupts that should preempt 544 * might not do so, and interrupts that should not preempt might do so. 545 */ 546 if (gic_is_vcpu(cpu)) { 547 int rcpu = gic_get_vcpu_real_id(cpu); 548 549 if (s->h_apr[rcpu]) { 550 /* Clear lowest set bit */ 551 s->h_apr[rcpu] &= s->h_apr[rcpu] - 1; 552 } 553 } else { 554 int i; 555 556 for (i = 0; i < GIC_NR_APRS; i++) { 557 uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu]; 558 if (!*papr) { 559 continue; 560 } 561 /* Clear lowest set bit */ 562 *papr &= *papr - 1; 563 break; 564 } 565 } 566 567 s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); 568 } 569 570 static inline uint32_t gic_clear_pending_sgi(GICState *s, int irq, int cpu) 571 { 572 int src; 573 uint32_t ret; 574 575 if (!gic_is_vcpu(cpu)) { 576 /* Lookup the source CPU for the SGI and clear this in the 577 * sgi_pending map. Return the src and clear the overall pending 578 * state on this CPU if the SGI is not pending from any CPUs. 579 */ 580 assert(s->sgi_pending[irq][cpu] != 0); 581 src = ctz32(s->sgi_pending[irq][cpu]); 582 s->sgi_pending[irq][cpu] &= ~(1 << src); 583 if (s->sgi_pending[irq][cpu] == 0) { 584 gic_clear_pending(s, irq, cpu); 585 } 586 ret = irq | ((src & 0x7) << 10); 587 } else { 588 uint32_t *lr_entry = gic_get_lr_entry(s, irq, cpu); 589 src = GICH_LR_CPUID(*lr_entry); 590 591 gic_clear_pending(s, irq, cpu); 592 ret = irq | (src << 10); 593 } 594 595 return ret; 596 } 597 598 uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs) 599 { 600 int ret, irq; 601 602 /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately 603 * for the case where this GIC supports grouping and the pending interrupt 604 * is in the wrong group. 605 */ 606 irq = gic_get_current_pending_irq(s, cpu, attrs); 607 trace_gic_acknowledge_irq(gic_is_vcpu(cpu) ? "vcpu" : "cpu", 608 gic_get_vcpu_real_id(cpu), irq); 609 610 if (irq >= GIC_MAXIRQ) { 611 DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq); 612 return irq; 613 } 614 615 if (gic_get_priority(s, irq, cpu) >= s->running_priority[cpu]) { 616 DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq); 617 return 1023; 618 } 619 620 gic_activate_irq(s, cpu, irq); 621 622 if (s->revision == REV_11MPCORE) { 623 /* Clear pending flags for both level and edge triggered interrupts. 624 * Level triggered IRQs will be reasserted once they become inactive. 625 */ 626 gic_clear_pending(s, irq, cpu); 627 ret = irq; 628 } else { 629 if (irq < GIC_NR_SGIS) { 630 ret = gic_clear_pending_sgi(s, irq, cpu); 631 } else { 632 gic_clear_pending(s, irq, cpu); 633 ret = irq; 634 } 635 } 636 637 if (gic_is_vcpu(cpu)) { 638 gic_update_virt(s); 639 } else { 640 gic_update(s); 641 } 642 DPRINTF("ACK %d\n", irq); 643 return ret; 644 } 645 646 static uint32_t gic_fullprio_mask(GICState *s, int cpu) 647 { 648 /* 649 * Return a mask word which clears the unimplemented priority 650 * bits from a priority value for an interrupt. (Not to be 651 * confused with the group priority, whose mask depends on BPR.) 652 */ 653 int priBits; 654 655 if (gic_is_vcpu(cpu)) { 656 priBits = GIC_VIRT_MAX_GROUP_PRIO_BITS; 657 } else { 658 priBits = s->n_prio_bits; 659 } 660 return ~0U << (8 - priBits); 661 } 662 663 void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val, 664 MemTxAttrs attrs) 665 { 666 if (s->security_extn && !attrs.secure) { 667 if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) { 668 return; /* Ignore Non-secure access of Group0 IRQ */ 669 } 670 val = 0x80 | (val >> 1); /* Non-secure view */ 671 } 672 673 val &= gic_fullprio_mask(s, cpu); 674 675 if (irq < GIC_INTERNAL) { 676 s->priority1[irq][cpu] = val; 677 } else { 678 s->priority2[(irq) - GIC_INTERNAL] = val; 679 } 680 } 681 682 static uint32_t gic_dist_get_priority(GICState *s, int cpu, int irq, 683 MemTxAttrs attrs) 684 { 685 uint32_t prio = GIC_DIST_GET_PRIORITY(irq, cpu); 686 687 if (s->security_extn && !attrs.secure) { 688 if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) { 689 return 0; /* Non-secure access cannot read priority of Group0 IRQ */ 690 } 691 prio = (prio << 1) & 0xff; /* Non-secure view */ 692 } 693 return prio & gic_fullprio_mask(s, cpu); 694 } 695 696 static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask, 697 MemTxAttrs attrs) 698 { 699 if (gic_cpu_ns_access(s, cpu, attrs)) { 700 if (s->priority_mask[cpu] & 0x80) { 701 /* Priority Mask in upper half */ 702 pmask = 0x80 | (pmask >> 1); 703 } else { 704 /* Non-secure write ignored if priority mask is in lower half */ 705 return; 706 } 707 } 708 s->priority_mask[cpu] = pmask & gic_fullprio_mask(s, cpu); 709 } 710 711 static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs) 712 { 713 uint32_t pmask = s->priority_mask[cpu]; 714 715 if (gic_cpu_ns_access(s, cpu, attrs)) { 716 if (pmask & 0x80) { 717 /* Priority Mask in upper half, return Non-secure view */ 718 pmask = (pmask << 1) & 0xff; 719 } else { 720 /* Priority Mask in lower half, RAZ */ 721 pmask = 0; 722 } 723 } 724 return pmask; 725 } 726 727 static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs) 728 { 729 uint32_t ret = s->cpu_ctlr[cpu]; 730 731 if (gic_cpu_ns_access(s, cpu, attrs)) { 732 /* Construct the NS banked view of GICC_CTLR from the correct 733 * bits of the S banked view. We don't need to move the bypass 734 * control bits because we don't implement that (IMPDEF) part 735 * of the GIC architecture. 736 */ 737 ret = (ret & (GICC_CTLR_EN_GRP1 | GICC_CTLR_EOIMODE_NS)) >> 1; 738 } 739 return ret; 740 } 741 742 static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value, 743 MemTxAttrs attrs) 744 { 745 uint32_t mask; 746 747 if (gic_cpu_ns_access(s, cpu, attrs)) { 748 /* The NS view can only write certain bits in the register; 749 * the rest are unchanged 750 */ 751 mask = GICC_CTLR_EN_GRP1; 752 if (s->revision == 2) { 753 mask |= GICC_CTLR_EOIMODE_NS; 754 } 755 s->cpu_ctlr[cpu] &= ~mask; 756 s->cpu_ctlr[cpu] |= (value << 1) & mask; 757 } else { 758 if (s->revision == 2) { 759 mask = s->security_extn ? GICC_CTLR_V2_S_MASK : GICC_CTLR_V2_MASK; 760 } else { 761 mask = s->security_extn ? GICC_CTLR_V1_S_MASK : GICC_CTLR_V1_MASK; 762 } 763 s->cpu_ctlr[cpu] = value & mask; 764 } 765 DPRINTF("CPU Interface %d: Group0 Interrupts %sabled, " 766 "Group1 Interrupts %sabled\n", cpu, 767 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP0) ? "En" : "Dis", 768 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP1) ? "En" : "Dis"); 769 } 770 771 static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs) 772 { 773 if ((s->revision != REV_11MPCORE) && (s->running_priority[cpu] > 0xff)) { 774 /* Idle priority */ 775 return 0xff; 776 } 777 778 if (gic_cpu_ns_access(s, cpu, attrs)) { 779 if (s->running_priority[cpu] & 0x80) { 780 /* Running priority in upper half of range: return the Non-secure 781 * view of the priority. 782 */ 783 return s->running_priority[cpu] << 1; 784 } else { 785 /* Running priority in lower half of range: RAZ */ 786 return 0; 787 } 788 } else { 789 return s->running_priority[cpu]; 790 } 791 } 792 793 /* Return true if we should split priority drop and interrupt deactivation, 794 * ie whether the relevant EOIMode bit is set. 795 */ 796 static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs) 797 { 798 if (s->revision != 2) { 799 /* Before GICv2 prio-drop and deactivate are not separable */ 800 return false; 801 } 802 if (gic_cpu_ns_access(s, cpu, attrs)) { 803 return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS; 804 } 805 return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE; 806 } 807 808 static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) 809 { 810 int group; 811 812 if (irq >= GIC_MAXIRQ || (!gic_is_vcpu(cpu) && irq >= s->num_irq)) { 813 /* 814 * This handles two cases: 815 * 1. If software writes the ID of a spurious interrupt [ie 1023] 816 * to the GICC_DIR, the GIC ignores that write. 817 * 2. If software writes the number of a non-existent interrupt 818 * this must be a subcase of "value written is not an active interrupt" 819 * and so this is UNPREDICTABLE. We choose to ignore it. For vCPUs, 820 * all IRQs potentially exist, so this limit does not apply. 821 */ 822 return; 823 } 824 825 if (!gic_eoi_split(s, cpu, attrs)) { 826 /* This is UNPREDICTABLE; we choose to ignore it */ 827 qemu_log_mask(LOG_GUEST_ERROR, 828 "gic_deactivate_irq: GICC_DIR write when EOIMode clear"); 829 return; 830 } 831 832 if (gic_is_vcpu(cpu) && !gic_virq_is_valid(s, irq, cpu)) { 833 /* This vIRQ does not have an LR entry which is either active or 834 * pending and active. Increment EOICount and ignore the write. 835 */ 836 int rcpu = gic_get_vcpu_real_id(cpu); 837 s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; 838 839 /* Update the virtual interface in case a maintenance interrupt should 840 * be raised. 841 */ 842 gic_update_virt(s); 843 return; 844 } 845 846 group = gic_has_groups(s) && gic_test_group(s, irq, cpu); 847 848 if (gic_cpu_ns_access(s, cpu, attrs) && !group) { 849 DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq); 850 return; 851 } 852 853 gic_clear_active(s, irq, cpu); 854 } 855 856 static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) 857 { 858 int cm = 1 << cpu; 859 int group; 860 861 DPRINTF("EOI %d\n", irq); 862 if (gic_is_vcpu(cpu)) { 863 /* The call to gic_prio_drop() will clear a bit in GICH_APR iff the 864 * running prio is < 0x100. 865 */ 866 bool prio_drop = s->running_priority[cpu] < 0x100; 867 868 if (irq >= GIC_MAXIRQ) { 869 /* Ignore spurious interrupt */ 870 return; 871 } 872 873 gic_drop_prio(s, cpu, 0); 874 875 if (!gic_eoi_split(s, cpu, attrs)) { 876 bool valid = gic_virq_is_valid(s, irq, cpu); 877 if (prio_drop && !valid) { 878 /* We are in a situation where: 879 * - V_CTRL.EOIMode is false (no EOI split), 880 * - The call to gic_drop_prio() cleared a bit in GICH_APR, 881 * - This vIRQ does not have an LR entry which is either 882 * active or pending and active. 883 * In that case, we must increment EOICount. 884 */ 885 int rcpu = gic_get_vcpu_real_id(cpu); 886 s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; 887 } else if (valid) { 888 gic_clear_active(s, irq, cpu); 889 } 890 } 891 892 gic_update_virt(s); 893 return; 894 } 895 896 if (irq >= s->num_irq) { 897 /* This handles two cases: 898 * 1. If software writes the ID of a spurious interrupt [ie 1023] 899 * to the GICC_EOIR, the GIC ignores that write. 900 * 2. If software writes the number of a non-existent interrupt 901 * this must be a subcase of "value written does not match the last 902 * valid interrupt value read from the Interrupt Acknowledge 903 * register" and so this is UNPREDICTABLE. We choose to ignore it. 904 */ 905 return; 906 } 907 if (s->running_priority[cpu] == 0x100) { 908 return; /* No active IRQ. */ 909 } 910 911 if (s->revision == REV_11MPCORE) { 912 /* Mark level triggered interrupts as pending if they are still 913 raised. */ 914 if (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_ENABLED(irq, cm) 915 && GIC_DIST_TEST_LEVEL(irq, cm) 916 && (GIC_DIST_TARGET(irq) & cm) != 0) { 917 DPRINTF("Set %d pending mask %x\n", irq, cm); 918 GIC_DIST_SET_PENDING(irq, cm); 919 } 920 } 921 922 group = gic_has_groups(s) && gic_test_group(s, irq, cpu); 923 924 if (gic_cpu_ns_access(s, cpu, attrs) && !group) { 925 DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq); 926 return; 927 } 928 929 /* Secure EOI with GICC_CTLR.AckCtl == 0 when the IRQ is a Group 1 930 * interrupt is UNPREDICTABLE. We choose to handle it as if AckCtl == 1, 931 * i.e. go ahead and complete the irq anyway. 932 */ 933 934 gic_drop_prio(s, cpu, group); 935 936 /* In GICv2 the guest can choose to split priority-drop and deactivate */ 937 if (!gic_eoi_split(s, cpu, attrs)) { 938 gic_clear_active(s, irq, cpu); 939 } 940 gic_update(s); 941 } 942 943 static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) 944 { 945 GICState *s = (GICState *)opaque; 946 uint32_t res; 947 int irq; 948 int i; 949 int cpu; 950 int cm; 951 int mask; 952 953 cpu = gic_get_current_cpu(s); 954 cm = 1 << cpu; 955 if (offset < 0x100) { 956 if (offset == 0) { /* GICD_CTLR */ 957 if (s->security_extn && !attrs.secure) { 958 /* The NS bank of this register is just an alias of the 959 * EnableGrp1 bit in the S bank version. 960 */ 961 return extract32(s->ctlr, 1, 1); 962 } else { 963 return s->ctlr; 964 } 965 } 966 if (offset == 4) 967 /* Interrupt Controller Type Register */ 968 return ((s->num_irq / 32) - 1) 969 | ((s->num_cpu - 1) << 5) 970 | (s->security_extn << 10); 971 if (offset < 0x08) 972 return 0; 973 if (offset >= 0x80) { 974 /* Interrupt Group Registers: these RAZ/WI if this is an NS 975 * access to a GIC with the security extensions, or if the GIC 976 * doesn't have groups at all. 977 */ 978 res = 0; 979 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 980 /* Every byte offset holds 8 group status bits */ 981 irq = (offset - 0x080) * 8; 982 if (irq >= s->num_irq) { 983 goto bad_reg; 984 } 985 for (i = 0; i < 8; i++) { 986 if (GIC_DIST_TEST_GROUP(irq + i, cm)) { 987 res |= (1 << i); 988 } 989 } 990 } 991 return res; 992 } 993 goto bad_reg; 994 } else if (offset < 0x200) { 995 /* Interrupt Set/Clear Enable. */ 996 if (offset < 0x180) 997 irq = (offset - 0x100) * 8; 998 else 999 irq = (offset - 0x180) * 8; 1000 if (irq >= s->num_irq) 1001 goto bad_reg; 1002 res = 0; 1003 for (i = 0; i < 8; i++) { 1004 if (s->security_extn && !attrs.secure && 1005 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1006 continue; /* Ignore Non-secure access of Group0 IRQ */ 1007 } 1008 1009 if (GIC_DIST_TEST_ENABLED(irq + i, cm)) { 1010 res |= (1 << i); 1011 } 1012 } 1013 } else if (offset < 0x300) { 1014 /* Interrupt Set/Clear Pending. */ 1015 if (offset < 0x280) 1016 irq = (offset - 0x200) * 8; 1017 else 1018 irq = (offset - 0x280) * 8; 1019 if (irq >= s->num_irq) 1020 goto bad_reg; 1021 res = 0; 1022 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 1023 for (i = 0; i < 8; i++) { 1024 if (s->security_extn && !attrs.secure && 1025 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1026 continue; /* Ignore Non-secure access of Group0 IRQ */ 1027 } 1028 1029 if (gic_test_pending(s, irq + i, mask)) { 1030 res |= (1 << i); 1031 } 1032 } 1033 } else if (offset < 0x400) { 1034 /* Interrupt Set/Clear Active. */ 1035 if (offset < 0x380) { 1036 irq = (offset - 0x300) * 8; 1037 } else if (s->revision == 2) { 1038 irq = (offset - 0x380) * 8; 1039 } else { 1040 goto bad_reg; 1041 } 1042 1043 if (irq >= s->num_irq) 1044 goto bad_reg; 1045 res = 0; 1046 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; 1047 for (i = 0; i < 8; i++) { 1048 if (s->security_extn && !attrs.secure && 1049 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1050 continue; /* Ignore Non-secure access of Group0 IRQ */ 1051 } 1052 1053 if (GIC_DIST_TEST_ACTIVE(irq + i, mask)) { 1054 res |= (1 << i); 1055 } 1056 } 1057 } else if (offset < 0x800) { 1058 /* Interrupt Priority. */ 1059 irq = (offset - 0x400); 1060 if (irq >= s->num_irq) 1061 goto bad_reg; 1062 res = gic_dist_get_priority(s, cpu, irq, attrs); 1063 } else if (offset < 0xc00) { 1064 /* Interrupt CPU Target. */ 1065 if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { 1066 /* For uniprocessor GICs these RAZ/WI */ 1067 res = 0; 1068 } else { 1069 irq = (offset - 0x800); 1070 if (irq >= s->num_irq) { 1071 goto bad_reg; 1072 } 1073 if (irq < 29 && s->revision == REV_11MPCORE) { 1074 res = 0; 1075 } else if (irq < GIC_INTERNAL) { 1076 res = cm; 1077 } else { 1078 res = GIC_DIST_TARGET(irq); 1079 } 1080 } 1081 } else if (offset < 0xf00) { 1082 /* Interrupt Configuration. */ 1083 irq = (offset - 0xc00) * 4; 1084 if (irq >= s->num_irq) 1085 goto bad_reg; 1086 res = 0; 1087 for (i = 0; i < 4; i++) { 1088 if (s->security_extn && !attrs.secure && 1089 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1090 continue; /* Ignore Non-secure access of Group0 IRQ */ 1091 } 1092 1093 if (GIC_DIST_TEST_MODEL(irq + i)) { 1094 res |= (1 << (i * 2)); 1095 } 1096 if (GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) { 1097 res |= (2 << (i * 2)); 1098 } 1099 } 1100 } else if (offset < 0xf10) { 1101 goto bad_reg; 1102 } else if (offset < 0xf30) { 1103 if (s->revision == REV_11MPCORE) { 1104 goto bad_reg; 1105 } 1106 1107 if (offset < 0xf20) { 1108 /* GICD_CPENDSGIRn */ 1109 irq = (offset - 0xf10); 1110 } else { 1111 irq = (offset - 0xf20); 1112 /* GICD_SPENDSGIRn */ 1113 } 1114 1115 if (s->security_extn && !attrs.secure && 1116 !GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { 1117 res = 0; /* Ignore Non-secure access of Group0 IRQ */ 1118 } else { 1119 res = s->sgi_pending[irq][cpu]; 1120 } 1121 } else if (offset < 0xfd0) { 1122 goto bad_reg; 1123 } else if (offset < 0x1000) { 1124 if (offset & 3) { 1125 res = 0; 1126 } else { 1127 switch (s->revision) { 1128 case REV_11MPCORE: 1129 res = gic_id_11mpcore[(offset - 0xfd0) >> 2]; 1130 break; 1131 case 1: 1132 res = gic_id_gicv1[(offset - 0xfd0) >> 2]; 1133 break; 1134 case 2: 1135 res = gic_id_gicv2[(offset - 0xfd0) >> 2]; 1136 break; 1137 default: 1138 res = 0; 1139 } 1140 } 1141 } else { 1142 g_assert_not_reached(); 1143 } 1144 return res; 1145 bad_reg: 1146 qemu_log_mask(LOG_GUEST_ERROR, 1147 "gic_dist_readb: Bad offset %x\n", (int)offset); 1148 return 0; 1149 } 1150 1151 static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data, 1152 unsigned size, MemTxAttrs attrs) 1153 { 1154 switch (size) { 1155 case 1: 1156 *data = gic_dist_readb(opaque, offset, attrs); 1157 break; 1158 case 2: 1159 *data = gic_dist_readb(opaque, offset, attrs); 1160 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 1161 break; 1162 case 4: 1163 *data = gic_dist_readb(opaque, offset, attrs); 1164 *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; 1165 *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16; 1166 *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24; 1167 break; 1168 default: 1169 return MEMTX_ERROR; 1170 } 1171 1172 trace_gic_dist_read(offset, size, *data); 1173 return MEMTX_OK; 1174 } 1175 1176 static void gic_dist_writeb(void *opaque, hwaddr offset, 1177 uint32_t value, MemTxAttrs attrs) 1178 { 1179 GICState *s = (GICState *)opaque; 1180 int irq; 1181 int i; 1182 int cpu; 1183 1184 cpu = gic_get_current_cpu(s); 1185 if (offset < 0x100) { 1186 if (offset == 0) { 1187 if (s->security_extn && !attrs.secure) { 1188 /* NS version is just an alias of the S version's bit 1 */ 1189 s->ctlr = deposit32(s->ctlr, 1, 1, value); 1190 } else if (gic_has_groups(s)) { 1191 s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1); 1192 } else { 1193 s->ctlr = value & GICD_CTLR_EN_GRP0; 1194 } 1195 DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n", 1196 s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis", 1197 s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis"); 1198 } else if (offset < 4) { 1199 /* ignored. */ 1200 } else if (offset >= 0x80) { 1201 /* Interrupt Group Registers: RAZ/WI for NS access to secure 1202 * GIC, or for GICs without groups. 1203 */ 1204 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { 1205 /* Every byte offset holds 8 group status bits */ 1206 irq = (offset - 0x80) * 8; 1207 if (irq >= s->num_irq) { 1208 goto bad_reg; 1209 } 1210 for (i = 0; i < 8; i++) { 1211 /* Group bits are banked for private interrupts */ 1212 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 1213 if (value & (1 << i)) { 1214 /* Group1 (Non-secure) */ 1215 GIC_DIST_SET_GROUP(irq + i, cm); 1216 } else { 1217 /* Group0 (Secure) */ 1218 GIC_DIST_CLEAR_GROUP(irq + i, cm); 1219 } 1220 } 1221 } 1222 } else { 1223 goto bad_reg; 1224 } 1225 } else if (offset < 0x180) { 1226 /* Interrupt Set Enable. */ 1227 irq = (offset - 0x100) * 8; 1228 if (irq >= s->num_irq) 1229 goto bad_reg; 1230 if (irq < GIC_NR_SGIS) { 1231 value = 0xff; 1232 } 1233 1234 for (i = 0; i < 8; i++) { 1235 if (value & (1 << i)) { 1236 int mask = 1237 (irq < GIC_INTERNAL) ? (1 << cpu) 1238 : GIC_DIST_TARGET(irq + i); 1239 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 1240 1241 if (s->security_extn && !attrs.secure && 1242 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1243 continue; /* Ignore Non-secure access of Group0 IRQ */ 1244 } 1245 1246 if (!GIC_DIST_TEST_ENABLED(irq + i, cm)) { 1247 DPRINTF("Enabled IRQ %d\n", irq + i); 1248 trace_gic_enable_irq(irq + i); 1249 } 1250 GIC_DIST_SET_ENABLED(irq + i, cm); 1251 /* If a raised level triggered IRQ enabled then mark 1252 is as pending. */ 1253 if (GIC_DIST_TEST_LEVEL(irq + i, mask) 1254 && !GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) { 1255 DPRINTF("Set %d pending mask %x\n", irq + i, mask); 1256 GIC_DIST_SET_PENDING(irq + i, mask); 1257 } 1258 } 1259 } 1260 } else if (offset < 0x200) { 1261 /* Interrupt Clear Enable. */ 1262 irq = (offset - 0x180) * 8; 1263 if (irq >= s->num_irq) 1264 goto bad_reg; 1265 if (irq < GIC_NR_SGIS) { 1266 value = 0; 1267 } 1268 1269 for (i = 0; i < 8; i++) { 1270 if (value & (1 << i)) { 1271 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 1272 1273 if (s->security_extn && !attrs.secure && 1274 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1275 continue; /* Ignore Non-secure access of Group0 IRQ */ 1276 } 1277 1278 if (GIC_DIST_TEST_ENABLED(irq + i, cm)) { 1279 DPRINTF("Disabled IRQ %d\n", irq + i); 1280 trace_gic_disable_irq(irq + i); 1281 } 1282 GIC_DIST_CLEAR_ENABLED(irq + i, cm); 1283 } 1284 } 1285 } else if (offset < 0x280) { 1286 /* Interrupt Set Pending. */ 1287 irq = (offset - 0x200) * 8; 1288 if (irq >= s->num_irq) 1289 goto bad_reg; 1290 if (irq < GIC_NR_SGIS) { 1291 value = 0; 1292 } 1293 1294 for (i = 0; i < 8; i++) { 1295 if (value & (1 << i)) { 1296 if (s->security_extn && !attrs.secure && 1297 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1298 continue; /* Ignore Non-secure access of Group0 IRQ */ 1299 } 1300 1301 GIC_DIST_SET_PENDING(irq + i, GIC_DIST_TARGET(irq + i)); 1302 } 1303 } 1304 } else if (offset < 0x300) { 1305 /* Interrupt Clear Pending. */ 1306 irq = (offset - 0x280) * 8; 1307 if (irq >= s->num_irq) 1308 goto bad_reg; 1309 if (irq < GIC_NR_SGIS) { 1310 value = 0; 1311 } 1312 1313 for (i = 0; i < 8; i++) { 1314 if (s->security_extn && !attrs.secure && 1315 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1316 continue; /* Ignore Non-secure access of Group0 IRQ */ 1317 } 1318 1319 /* ??? This currently clears the pending bit for all CPUs, even 1320 for per-CPU interrupts. It's unclear whether this is the 1321 corect behavior. */ 1322 if (value & (1 << i)) { 1323 GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK); 1324 } 1325 } 1326 } else if (offset < 0x380) { 1327 /* Interrupt Set Active. */ 1328 if (s->revision != 2) { 1329 goto bad_reg; 1330 } 1331 1332 irq = (offset - 0x300) * 8; 1333 if (irq >= s->num_irq) { 1334 goto bad_reg; 1335 } 1336 1337 /* This register is banked per-cpu for PPIs */ 1338 int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK; 1339 1340 for (i = 0; i < 8; i++) { 1341 if (s->security_extn && !attrs.secure && 1342 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1343 continue; /* Ignore Non-secure access of Group0 IRQ */ 1344 } 1345 1346 if (value & (1 << i)) { 1347 GIC_DIST_SET_ACTIVE(irq + i, cm); 1348 } 1349 } 1350 } else if (offset < 0x400) { 1351 /* Interrupt Clear Active. */ 1352 if (s->revision != 2) { 1353 goto bad_reg; 1354 } 1355 1356 irq = (offset - 0x380) * 8; 1357 if (irq >= s->num_irq) { 1358 goto bad_reg; 1359 } 1360 1361 /* This register is banked per-cpu for PPIs */ 1362 int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK; 1363 1364 for (i = 0; i < 8; i++) { 1365 if (s->security_extn && !attrs.secure && 1366 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1367 continue; /* Ignore Non-secure access of Group0 IRQ */ 1368 } 1369 1370 if (value & (1 << i)) { 1371 GIC_DIST_CLEAR_ACTIVE(irq + i, cm); 1372 } 1373 } 1374 } else if (offset < 0x800) { 1375 /* Interrupt Priority. */ 1376 irq = (offset - 0x400); 1377 if (irq >= s->num_irq) 1378 goto bad_reg; 1379 gic_dist_set_priority(s, cpu, irq, value, attrs); 1380 } else if (offset < 0xc00) { 1381 /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the 1382 * annoying exception of the 11MPCore's GIC. 1383 */ 1384 if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { 1385 irq = (offset - 0x800); 1386 if (irq >= s->num_irq) { 1387 goto bad_reg; 1388 } 1389 if (irq < 29 && s->revision == REV_11MPCORE) { 1390 value = 0; 1391 } else if (irq < GIC_INTERNAL) { 1392 value = ALL_CPU_MASK; 1393 } 1394 s->irq_target[irq] = value & ALL_CPU_MASK; 1395 } 1396 } else if (offset < 0xf00) { 1397 /* Interrupt Configuration. */ 1398 irq = (offset - 0xc00) * 4; 1399 if (irq >= s->num_irq) 1400 goto bad_reg; 1401 if (irq < GIC_NR_SGIS) 1402 value |= 0xaa; 1403 for (i = 0; i < 4; i++) { 1404 if (s->security_extn && !attrs.secure && 1405 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { 1406 continue; /* Ignore Non-secure access of Group0 IRQ */ 1407 } 1408 1409 if (s->revision == REV_11MPCORE) { 1410 if (value & (1 << (i * 2))) { 1411 GIC_DIST_SET_MODEL(irq + i); 1412 } else { 1413 GIC_DIST_CLEAR_MODEL(irq + i); 1414 } 1415 } 1416 if (value & (2 << (i * 2))) { 1417 GIC_DIST_SET_EDGE_TRIGGER(irq + i); 1418 } else { 1419 GIC_DIST_CLEAR_EDGE_TRIGGER(irq + i); 1420 } 1421 } 1422 } else if (offset < 0xf10) { 1423 /* 0xf00 is only handled for 32-bit writes. */ 1424 goto bad_reg; 1425 } else if (offset < 0xf20) { 1426 /* GICD_CPENDSGIRn */ 1427 if (s->revision == REV_11MPCORE) { 1428 goto bad_reg; 1429 } 1430 irq = (offset - 0xf10); 1431 1432 if (!s->security_extn || attrs.secure || 1433 GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { 1434 s->sgi_pending[irq][cpu] &= ~value; 1435 if (s->sgi_pending[irq][cpu] == 0) { 1436 GIC_DIST_CLEAR_PENDING(irq, 1 << cpu); 1437 } 1438 } 1439 } else if (offset < 0xf30) { 1440 /* GICD_SPENDSGIRn */ 1441 if (s->revision == REV_11MPCORE) { 1442 goto bad_reg; 1443 } 1444 irq = (offset - 0xf20); 1445 1446 if (!s->security_extn || attrs.secure || 1447 GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { 1448 GIC_DIST_SET_PENDING(irq, 1 << cpu); 1449 s->sgi_pending[irq][cpu] |= value; 1450 } 1451 } else { 1452 goto bad_reg; 1453 } 1454 gic_update(s); 1455 return; 1456 bad_reg: 1457 qemu_log_mask(LOG_GUEST_ERROR, 1458 "gic_dist_writeb: Bad offset %x\n", (int)offset); 1459 } 1460 1461 static void gic_dist_writew(void *opaque, hwaddr offset, 1462 uint32_t value, MemTxAttrs attrs) 1463 { 1464 gic_dist_writeb(opaque, offset, value & 0xff, attrs); 1465 gic_dist_writeb(opaque, offset + 1, value >> 8, attrs); 1466 } 1467 1468 static void gic_dist_writel(void *opaque, hwaddr offset, 1469 uint32_t value, MemTxAttrs attrs) 1470 { 1471 GICState *s = (GICState *)opaque; 1472 if (offset == 0xf00) { 1473 int cpu; 1474 int irq; 1475 int mask; 1476 int target_cpu; 1477 1478 cpu = gic_get_current_cpu(s); 1479 irq = value & 0x3ff; 1480 switch ((value >> 24) & 3) { 1481 case 0: 1482 mask = (value >> 16) & ALL_CPU_MASK; 1483 break; 1484 case 1: 1485 mask = ALL_CPU_MASK ^ (1 << cpu); 1486 break; 1487 case 2: 1488 mask = 1 << cpu; 1489 break; 1490 default: 1491 DPRINTF("Bad Soft Int target filter\n"); 1492 mask = ALL_CPU_MASK; 1493 break; 1494 } 1495 GIC_DIST_SET_PENDING(irq, mask); 1496 target_cpu = ctz32(mask); 1497 while (target_cpu < GIC_NCPU) { 1498 s->sgi_pending[irq][target_cpu] |= (1 << cpu); 1499 mask &= ~(1 << target_cpu); 1500 target_cpu = ctz32(mask); 1501 } 1502 gic_update(s); 1503 return; 1504 } 1505 gic_dist_writew(opaque, offset, value & 0xffff, attrs); 1506 gic_dist_writew(opaque, offset + 2, value >> 16, attrs); 1507 } 1508 1509 static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data, 1510 unsigned size, MemTxAttrs attrs) 1511 { 1512 trace_gic_dist_write(offset, size, data); 1513 1514 switch (size) { 1515 case 1: 1516 gic_dist_writeb(opaque, offset, data, attrs); 1517 return MEMTX_OK; 1518 case 2: 1519 gic_dist_writew(opaque, offset, data, attrs); 1520 return MEMTX_OK; 1521 case 4: 1522 gic_dist_writel(opaque, offset, data, attrs); 1523 return MEMTX_OK; 1524 default: 1525 return MEMTX_ERROR; 1526 } 1527 } 1528 1529 static inline uint32_t gic_apr_ns_view(GICState *s, int cpu, int regno) 1530 { 1531 /* Return the Nonsecure view of GICC_APR<regno>. This is the 1532 * second half of GICC_NSAPR. 1533 */ 1534 switch (GIC_MIN_BPR) { 1535 case 0: 1536 if (regno < 2) { 1537 return s->nsapr[regno + 2][cpu]; 1538 } 1539 break; 1540 case 1: 1541 if (regno == 0) { 1542 return s->nsapr[regno + 1][cpu]; 1543 } 1544 break; 1545 case 2: 1546 if (regno == 0) { 1547 return extract32(s->nsapr[0][cpu], 16, 16); 1548 } 1549 break; 1550 case 3: 1551 if (regno == 0) { 1552 return extract32(s->nsapr[0][cpu], 8, 8); 1553 } 1554 break; 1555 default: 1556 g_assert_not_reached(); 1557 } 1558 return 0; 1559 } 1560 1561 static inline void gic_apr_write_ns_view(GICState *s, int cpu, int regno, 1562 uint32_t value) 1563 { 1564 /* Write the Nonsecure view of GICC_APR<regno>. */ 1565 switch (GIC_MIN_BPR) { 1566 case 0: 1567 if (regno < 2) { 1568 s->nsapr[regno + 2][cpu] = value; 1569 } 1570 break; 1571 case 1: 1572 if (regno == 0) { 1573 s->nsapr[regno + 1][cpu] = value; 1574 } 1575 break; 1576 case 2: 1577 if (regno == 0) { 1578 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 16, 16, value); 1579 } 1580 break; 1581 case 3: 1582 if (regno == 0) { 1583 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 8, 8, value); 1584 } 1585 break; 1586 default: 1587 g_assert_not_reached(); 1588 } 1589 } 1590 1591 static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, 1592 uint64_t *data, MemTxAttrs attrs) 1593 { 1594 switch (offset) { 1595 case 0x00: /* Control */ 1596 *data = gic_get_cpu_control(s, cpu, attrs); 1597 break; 1598 case 0x04: /* Priority mask */ 1599 *data = gic_get_priority_mask(s, cpu, attrs); 1600 break; 1601 case 0x08: /* Binary Point */ 1602 if (gic_cpu_ns_access(s, cpu, attrs)) { 1603 if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { 1604 /* NS view of BPR when CBPR is 1 */ 1605 *data = MIN(s->bpr[cpu] + 1, 7); 1606 } else { 1607 /* BPR is banked. Non-secure copy stored in ABPR. */ 1608 *data = s->abpr[cpu]; 1609 } 1610 } else { 1611 *data = s->bpr[cpu]; 1612 } 1613 break; 1614 case 0x0c: /* Acknowledge */ 1615 *data = gic_acknowledge_irq(s, cpu, attrs); 1616 break; 1617 case 0x14: /* Running Priority */ 1618 *data = gic_get_running_priority(s, cpu, attrs); 1619 break; 1620 case 0x18: /* Highest Pending Interrupt */ 1621 *data = gic_get_current_pending_irq(s, cpu, attrs); 1622 break; 1623 case 0x1c: /* Aliased Binary Point */ 1624 /* GIC v2, no security: ABPR 1625 * GIC v1, no security: not implemented (RAZ/WI) 1626 * With security extensions, secure access: ABPR (alias of NS BPR) 1627 * With security extensions, nonsecure access: RAZ/WI 1628 */ 1629 if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { 1630 *data = 0; 1631 } else { 1632 *data = s->abpr[cpu]; 1633 } 1634 break; 1635 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 1636 { 1637 int regno = (offset - 0xd0) / 4; 1638 int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS; 1639 1640 if (regno >= nr_aprs || s->revision != 2) { 1641 *data = 0; 1642 } else if (gic_is_vcpu(cpu)) { 1643 *data = s->h_apr[gic_get_vcpu_real_id(cpu)]; 1644 } else if (gic_cpu_ns_access(s, cpu, attrs)) { 1645 /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ 1646 *data = gic_apr_ns_view(s, regno, cpu); 1647 } else { 1648 *data = s->apr[regno][cpu]; 1649 } 1650 break; 1651 } 1652 case 0xe0: case 0xe4: case 0xe8: case 0xec: 1653 { 1654 int regno = (offset - 0xe0) / 4; 1655 1656 if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) || 1657 gic_cpu_ns_access(s, cpu, attrs) || gic_is_vcpu(cpu)) { 1658 *data = 0; 1659 } else { 1660 *data = s->nsapr[regno][cpu]; 1661 } 1662 break; 1663 } 1664 default: 1665 qemu_log_mask(LOG_GUEST_ERROR, 1666 "gic_cpu_read: Bad offset %x\n", (int)offset); 1667 *data = 0; 1668 break; 1669 } 1670 1671 trace_gic_cpu_read(gic_is_vcpu(cpu) ? "vcpu" : "cpu", 1672 gic_get_vcpu_real_id(cpu), offset, *data); 1673 return MEMTX_OK; 1674 } 1675 1676 static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, 1677 uint32_t value, MemTxAttrs attrs) 1678 { 1679 trace_gic_cpu_write(gic_is_vcpu(cpu) ? "vcpu" : "cpu", 1680 gic_get_vcpu_real_id(cpu), offset, value); 1681 1682 switch (offset) { 1683 case 0x00: /* Control */ 1684 gic_set_cpu_control(s, cpu, value, attrs); 1685 break; 1686 case 0x04: /* Priority mask */ 1687 gic_set_priority_mask(s, cpu, value, attrs); 1688 break; 1689 case 0x08: /* Binary Point */ 1690 if (gic_cpu_ns_access(s, cpu, attrs)) { 1691 if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { 1692 /* WI when CBPR is 1 */ 1693 return MEMTX_OK; 1694 } else { 1695 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 1696 } 1697 } else { 1698 int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; 1699 s->bpr[cpu] = MAX(value & 0x7, min_bpr); 1700 } 1701 break; 1702 case 0x10: /* End Of Interrupt */ 1703 gic_complete_irq(s, cpu, value & 0x3ff, attrs); 1704 return MEMTX_OK; 1705 case 0x1c: /* Aliased Binary Point */ 1706 if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { 1707 /* unimplemented, or NS access: RAZ/WI */ 1708 return MEMTX_OK; 1709 } else { 1710 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); 1711 } 1712 break; 1713 case 0xd0: case 0xd4: case 0xd8: case 0xdc: 1714 { 1715 int regno = (offset - 0xd0) / 4; 1716 int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS; 1717 1718 if (regno >= nr_aprs || s->revision != 2) { 1719 return MEMTX_OK; 1720 } 1721 if (gic_is_vcpu(cpu)) { 1722 s->h_apr[gic_get_vcpu_real_id(cpu)] = value; 1723 } else if (gic_cpu_ns_access(s, cpu, attrs)) { 1724 /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */ 1725 gic_apr_write_ns_view(s, regno, cpu, value); 1726 } else { 1727 s->apr[regno][cpu] = value; 1728 } 1729 break; 1730 } 1731 case 0xe0: case 0xe4: case 0xe8: case 0xec: 1732 { 1733 int regno = (offset - 0xe0) / 4; 1734 1735 if (regno >= GIC_NR_APRS || s->revision != 2) { 1736 return MEMTX_OK; 1737 } 1738 if (gic_is_vcpu(cpu)) { 1739 return MEMTX_OK; 1740 } 1741 if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { 1742 return MEMTX_OK; 1743 } 1744 s->nsapr[regno][cpu] = value; 1745 break; 1746 } 1747 case 0x1000: 1748 /* GICC_DIR */ 1749 gic_deactivate_irq(s, cpu, value & 0x3ff, attrs); 1750 break; 1751 default: 1752 qemu_log_mask(LOG_GUEST_ERROR, 1753 "gic_cpu_write: Bad offset %x\n", (int)offset); 1754 return MEMTX_OK; 1755 } 1756 1757 if (gic_is_vcpu(cpu)) { 1758 gic_update_virt(s); 1759 } else { 1760 gic_update(s); 1761 } 1762 1763 return MEMTX_OK; 1764 } 1765 1766 /* Wrappers to read/write the GIC CPU interface for the current CPU */ 1767 static MemTxResult gic_thiscpu_read(void *opaque, hwaddr addr, uint64_t *data, 1768 unsigned size, MemTxAttrs attrs) 1769 { 1770 GICState *s = (GICState *)opaque; 1771 return gic_cpu_read(s, gic_get_current_cpu(s), addr, data, attrs); 1772 } 1773 1774 static MemTxResult gic_thiscpu_write(void *opaque, hwaddr addr, 1775 uint64_t value, unsigned size, 1776 MemTxAttrs attrs) 1777 { 1778 GICState *s = (GICState *)opaque; 1779 return gic_cpu_write(s, gic_get_current_cpu(s), addr, value, attrs); 1780 } 1781 1782 /* Wrappers to read/write the GIC CPU interface for a specific CPU. 1783 * These just decode the opaque pointer into GICState* + cpu id. 1784 */ 1785 static MemTxResult gic_do_cpu_read(void *opaque, hwaddr addr, uint64_t *data, 1786 unsigned size, MemTxAttrs attrs) 1787 { 1788 GICState **backref = (GICState **)opaque; 1789 GICState *s = *backref; 1790 int id = (backref - s->backref); 1791 return gic_cpu_read(s, id, addr, data, attrs); 1792 } 1793 1794 static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr, 1795 uint64_t value, unsigned size, 1796 MemTxAttrs attrs) 1797 { 1798 GICState **backref = (GICState **)opaque; 1799 GICState *s = *backref; 1800 int id = (backref - s->backref); 1801 return gic_cpu_write(s, id, addr, value, attrs); 1802 } 1803 1804 static MemTxResult gic_thisvcpu_read(void *opaque, hwaddr addr, uint64_t *data, 1805 unsigned size, MemTxAttrs attrs) 1806 { 1807 GICState *s = (GICState *)opaque; 1808 1809 return gic_cpu_read(s, gic_get_current_vcpu(s), addr, data, attrs); 1810 } 1811 1812 static MemTxResult gic_thisvcpu_write(void *opaque, hwaddr addr, 1813 uint64_t value, unsigned size, 1814 MemTxAttrs attrs) 1815 { 1816 GICState *s = (GICState *)opaque; 1817 1818 return gic_cpu_write(s, gic_get_current_vcpu(s), addr, value, attrs); 1819 } 1820 1821 static uint32_t gic_compute_eisr(GICState *s, int cpu, int lr_start) 1822 { 1823 int lr_idx; 1824 uint32_t ret = 0; 1825 1826 for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { 1827 uint32_t *entry = &s->h_lr[lr_idx][cpu]; 1828 ret = deposit32(ret, lr_idx - lr_start, 1, 1829 gic_lr_entry_is_eoi(*entry)); 1830 } 1831 1832 return ret; 1833 } 1834 1835 static uint32_t gic_compute_elrsr(GICState *s, int cpu, int lr_start) 1836 { 1837 int lr_idx; 1838 uint32_t ret = 0; 1839 1840 for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { 1841 uint32_t *entry = &s->h_lr[lr_idx][cpu]; 1842 ret = deposit32(ret, lr_idx - lr_start, 1, 1843 gic_lr_entry_is_free(*entry)); 1844 } 1845 1846 return ret; 1847 } 1848 1849 static void gic_vmcr_write(GICState *s, uint32_t value, MemTxAttrs attrs) 1850 { 1851 int vcpu = gic_get_current_vcpu(s); 1852 uint32_t ctlr; 1853 uint32_t abpr; 1854 uint32_t bpr; 1855 uint32_t prio_mask; 1856 1857 ctlr = FIELD_EX32(value, GICH_VMCR, VMCCtlr); 1858 abpr = FIELD_EX32(value, GICH_VMCR, VMABP); 1859 bpr = FIELD_EX32(value, GICH_VMCR, VMBP); 1860 prio_mask = FIELD_EX32(value, GICH_VMCR, VMPriMask) << 3; 1861 1862 gic_set_cpu_control(s, vcpu, ctlr, attrs); 1863 s->abpr[vcpu] = MAX(abpr, GIC_VIRT_MIN_ABPR); 1864 s->bpr[vcpu] = MAX(bpr, GIC_VIRT_MIN_BPR); 1865 gic_set_priority_mask(s, vcpu, prio_mask, attrs); 1866 } 1867 1868 static MemTxResult gic_hyp_read(void *opaque, int cpu, hwaddr addr, 1869 uint64_t *data, MemTxAttrs attrs) 1870 { 1871 GICState *s = ARM_GIC(opaque); 1872 int vcpu = cpu + GIC_NCPU; 1873 1874 switch (addr) { 1875 case A_GICH_HCR: /* Hypervisor Control */ 1876 *data = s->h_hcr[cpu]; 1877 break; 1878 1879 case A_GICH_VTR: /* VGIC Type */ 1880 *data = FIELD_DP32(0, GICH_VTR, ListRegs, s->num_lrs - 1); 1881 *data = FIELD_DP32(*data, GICH_VTR, PREbits, 1882 GIC_VIRT_MAX_GROUP_PRIO_BITS - 1); 1883 *data = FIELD_DP32(*data, GICH_VTR, PRIbits, 1884 (7 - GIC_VIRT_MIN_BPR) - 1); 1885 break; 1886 1887 case A_GICH_VMCR: /* Virtual Machine Control */ 1888 *data = FIELD_DP32(0, GICH_VMCR, VMCCtlr, 1889 extract32(s->cpu_ctlr[vcpu], 0, 10)); 1890 *data = FIELD_DP32(*data, GICH_VMCR, VMABP, s->abpr[vcpu]); 1891 *data = FIELD_DP32(*data, GICH_VMCR, VMBP, s->bpr[vcpu]); 1892 *data = FIELD_DP32(*data, GICH_VMCR, VMPriMask, 1893 extract32(s->priority_mask[vcpu], 3, 5)); 1894 break; 1895 1896 case A_GICH_MISR: /* Maintenance Interrupt Status */ 1897 *data = s->h_misr[cpu]; 1898 break; 1899 1900 case A_GICH_EISR0: /* End of Interrupt Status 0 and 1 */ 1901 case A_GICH_EISR1: 1902 *data = gic_compute_eisr(s, cpu, (addr - A_GICH_EISR0) * 8); 1903 break; 1904 1905 case A_GICH_ELRSR0: /* Empty List Status 0 and 1 */ 1906 case A_GICH_ELRSR1: 1907 *data = gic_compute_elrsr(s, cpu, (addr - A_GICH_ELRSR0) * 8); 1908 break; 1909 1910 case A_GICH_APR: /* Active Priorities */ 1911 *data = s->h_apr[cpu]; 1912 break; 1913 1914 case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */ 1915 { 1916 int lr_idx = (addr - A_GICH_LR0) / 4; 1917 1918 if (lr_idx > s->num_lrs) { 1919 *data = 0; 1920 } else { 1921 *data = s->h_lr[lr_idx][cpu]; 1922 } 1923 break; 1924 } 1925 1926 default: 1927 qemu_log_mask(LOG_GUEST_ERROR, 1928 "gic_hyp_read: Bad offset %" HWADDR_PRIx "\n", addr); 1929 return MEMTX_OK; 1930 } 1931 1932 trace_gic_hyp_read(addr, *data); 1933 return MEMTX_OK; 1934 } 1935 1936 static MemTxResult gic_hyp_write(void *opaque, int cpu, hwaddr addr, 1937 uint64_t value, MemTxAttrs attrs) 1938 { 1939 GICState *s = ARM_GIC(opaque); 1940 int vcpu = cpu + GIC_NCPU; 1941 1942 trace_gic_hyp_write(addr, value); 1943 1944 switch (addr) { 1945 case A_GICH_HCR: /* Hypervisor Control */ 1946 s->h_hcr[cpu] = value & GICH_HCR_MASK; 1947 break; 1948 1949 case A_GICH_VMCR: /* Virtual Machine Control */ 1950 gic_vmcr_write(s, value, attrs); 1951 break; 1952 1953 case A_GICH_APR: /* Active Priorities */ 1954 s->h_apr[cpu] = value; 1955 s->running_priority[vcpu] = gic_get_prio_from_apr_bits(s, vcpu); 1956 break; 1957 1958 case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */ 1959 { 1960 int lr_idx = (addr - A_GICH_LR0) / 4; 1961 1962 if (lr_idx > s->num_lrs) { 1963 return MEMTX_OK; 1964 } 1965 1966 s->h_lr[lr_idx][cpu] = value & GICH_LR_MASK; 1967 trace_gic_lr_entry(cpu, lr_idx, s->h_lr[lr_idx][cpu]); 1968 break; 1969 } 1970 1971 default: 1972 qemu_log_mask(LOG_GUEST_ERROR, 1973 "gic_hyp_write: Bad offset %" HWADDR_PRIx "\n", addr); 1974 return MEMTX_OK; 1975 } 1976 1977 gic_update_virt(s); 1978 return MEMTX_OK; 1979 } 1980 1981 static MemTxResult gic_thiscpu_hyp_read(void *opaque, hwaddr addr, uint64_t *data, 1982 unsigned size, MemTxAttrs attrs) 1983 { 1984 GICState *s = (GICState *)opaque; 1985 1986 return gic_hyp_read(s, gic_get_current_cpu(s), addr, data, attrs); 1987 } 1988 1989 static MemTxResult gic_thiscpu_hyp_write(void *opaque, hwaddr addr, 1990 uint64_t value, unsigned size, 1991 MemTxAttrs attrs) 1992 { 1993 GICState *s = (GICState *)opaque; 1994 1995 return gic_hyp_write(s, gic_get_current_cpu(s), addr, value, attrs); 1996 } 1997 1998 static MemTxResult gic_do_hyp_read(void *opaque, hwaddr addr, uint64_t *data, 1999 unsigned size, MemTxAttrs attrs) 2000 { 2001 GICState **backref = (GICState **)opaque; 2002 GICState *s = *backref; 2003 int id = (backref - s->backref); 2004 2005 return gic_hyp_read(s, id, addr, data, attrs); 2006 } 2007 2008 static MemTxResult gic_do_hyp_write(void *opaque, hwaddr addr, 2009 uint64_t value, unsigned size, 2010 MemTxAttrs attrs) 2011 { 2012 GICState **backref = (GICState **)opaque; 2013 GICState *s = *backref; 2014 int id = (backref - s->backref); 2015 2016 return gic_hyp_write(s, id + GIC_NCPU, addr, value, attrs); 2017 2018 } 2019 2020 static const MemoryRegionOps gic_ops[2] = { 2021 { 2022 .read_with_attrs = gic_dist_read, 2023 .write_with_attrs = gic_dist_write, 2024 .endianness = DEVICE_NATIVE_ENDIAN, 2025 }, 2026 { 2027 .read_with_attrs = gic_thiscpu_read, 2028 .write_with_attrs = gic_thiscpu_write, 2029 .endianness = DEVICE_NATIVE_ENDIAN, 2030 } 2031 }; 2032 2033 static const MemoryRegionOps gic_cpu_ops = { 2034 .read_with_attrs = gic_do_cpu_read, 2035 .write_with_attrs = gic_do_cpu_write, 2036 .endianness = DEVICE_NATIVE_ENDIAN, 2037 }; 2038 2039 static const MemoryRegionOps gic_virt_ops[2] = { 2040 { 2041 .read_with_attrs = gic_thiscpu_hyp_read, 2042 .write_with_attrs = gic_thiscpu_hyp_write, 2043 .endianness = DEVICE_NATIVE_ENDIAN, 2044 }, 2045 { 2046 .read_with_attrs = gic_thisvcpu_read, 2047 .write_with_attrs = gic_thisvcpu_write, 2048 .endianness = DEVICE_NATIVE_ENDIAN, 2049 } 2050 }; 2051 2052 static const MemoryRegionOps gic_viface_ops = { 2053 .read_with_attrs = gic_do_hyp_read, 2054 .write_with_attrs = gic_do_hyp_write, 2055 .endianness = DEVICE_NATIVE_ENDIAN, 2056 }; 2057 2058 static void arm_gic_realize(DeviceState *dev, Error **errp) 2059 { 2060 /* Device instance realize function for the GIC sysbus device */ 2061 int i; 2062 GICState *s = ARM_GIC(dev); 2063 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 2064 ARMGICClass *agc = ARM_GIC_GET_CLASS(s); 2065 Error *local_err = NULL; 2066 2067 agc->parent_realize(dev, &local_err); 2068 if (local_err) { 2069 error_propagate(errp, local_err); 2070 return; 2071 } 2072 2073 if (kvm_enabled() && !kvm_arm_supports_user_irq()) { 2074 error_setg(errp, "KVM with user space irqchip only works when the " 2075 "host kernel supports KVM_CAP_ARM_USER_IRQ"); 2076 return; 2077 } 2078 2079 if (s->n_prio_bits > GIC_MAX_PRIORITY_BITS || 2080 (s->virt_extn ? s->n_prio_bits < GIC_VIRT_MAX_GROUP_PRIO_BITS : 2081 s->n_prio_bits < GIC_MIN_PRIORITY_BITS)) { 2082 error_setg(errp, "num-priority-bits cannot be greater than %d" 2083 " or less than %d", GIC_MAX_PRIORITY_BITS, 2084 s->virt_extn ? GIC_VIRT_MAX_GROUP_PRIO_BITS : 2085 GIC_MIN_PRIORITY_BITS); 2086 return; 2087 } 2088 2089 /* This creates distributor, main CPU interface (s->cpuiomem[0]) and if 2090 * enabled, virtualization extensions related interfaces (main virtual 2091 * interface (s->vifaceiomem[0]) and virtual CPU interface). 2092 */ 2093 gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops, gic_virt_ops); 2094 2095 /* Extra core-specific regions for the CPU interfaces. This is 2096 * necessary for "franken-GIC" implementations, for example on 2097 * Exynos 4. 2098 * NB that the memory region size of 0x100 applies for the 11MPCore 2099 * and also cores following the GIC v1 spec (ie A9). 2100 * GIC v2 defines a larger memory region (0x1000) so this will need 2101 * to be extended when we implement A15. 2102 */ 2103 for (i = 0; i < s->num_cpu; i++) { 2104 s->backref[i] = s; 2105 memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops, 2106 &s->backref[i], "gic_cpu", 0x100); 2107 sysbus_init_mmio(sbd, &s->cpuiomem[i+1]); 2108 } 2109 2110 /* Extra core-specific regions for virtual interfaces. This is required by 2111 * the GICv2 specification. 2112 */ 2113 if (s->virt_extn) { 2114 for (i = 0; i < s->num_cpu; i++) { 2115 memory_region_init_io(&s->vifaceiomem[i + 1], OBJECT(s), 2116 &gic_viface_ops, &s->backref[i], 2117 "gic_viface", 0x200); 2118 sysbus_init_mmio(sbd, &s->vifaceiomem[i + 1]); 2119 } 2120 } 2121 2122 } 2123 2124 static void arm_gic_class_init(ObjectClass *klass, void *data) 2125 { 2126 DeviceClass *dc = DEVICE_CLASS(klass); 2127 ARMGICClass *agc = ARM_GIC_CLASS(klass); 2128 2129 device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize); 2130 } 2131 2132 static const TypeInfo arm_gic_info = { 2133 .name = TYPE_ARM_GIC, 2134 .parent = TYPE_ARM_GIC_COMMON, 2135 .instance_size = sizeof(GICState), 2136 .class_init = arm_gic_class_init, 2137 .class_size = sizeof(ARMGICClass), 2138 }; 2139 2140 static void arm_gic_register_types(void) 2141 { 2142 type_register_static(&arm_gic_info); 2143 } 2144 2145 type_init(arm_gic_register_types) 2146