1 /* 2 * ARM Generic Interrupt Controller v3 (emulation) 3 * 4 * Copyright (c) 2016 Linaro Limited 5 * Written by Peter Maydell 6 * 7 * This code is licensed under the GPL, version 2 or (at your option) 8 * any later version. 9 */ 10 11 /* This file contains the code for the system register interface 12 * portions of the GICv3. 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qemu/bitops.h" 17 #include "qemu/log.h" 18 #include "qemu/main-loop.h" 19 #include "trace.h" 20 #include "gicv3_internal.h" 21 #include "hw/irq.h" 22 #include "cpu.h" 23 #include "target/arm/cpregs.h" 24 #include "target/arm/cpu-features.h" 25 #include "sysemu/tcg.h" 26 #include "sysemu/qtest.h" 27 28 /* 29 * Special case return value from hppvi_index(); must be larger than 30 * the architecturally maximum possible list register index (which is 15) 31 */ 32 #define HPPVI_INDEX_VLPI 16 33 34 static GICv3CPUState *icc_cs_from_env(CPUARMState *env) 35 { 36 return env->gicv3state; 37 } 38 39 static bool gicv3_use_ns_bank(CPUARMState *env) 40 { 41 /* Return true if we should use the NonSecure bank for a banked GIC 42 * CPU interface register. Note that this differs from the 43 * access_secure_reg() function because GICv3 banked registers are 44 * banked even for AArch64, unlike the other CPU system registers. 45 */ 46 return !arm_is_secure_below_el3(env); 47 } 48 49 /* The minimum BPR for the virtual interface is a configurable property */ 50 static inline int icv_min_vbpr(GICv3CPUState *cs) 51 { 52 return 7 - cs->vprebits; 53 } 54 55 static inline int ich_num_aprs(GICv3CPUState *cs) 56 { 57 /* Return the number of virtual APR registers (1, 2, or 4) */ 58 int aprmax = 1 << (cs->vprebits - 5); 59 assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0])); 60 return aprmax; 61 } 62 63 /* Simple accessor functions for LR fields */ 64 static uint32_t ich_lr_vintid(uint64_t lr) 65 { 66 return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH); 67 } 68 69 static uint32_t ich_lr_pintid(uint64_t lr) 70 { 71 return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH); 72 } 73 74 static uint32_t ich_lr_prio(uint64_t lr) 75 { 76 return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH); 77 } 78 79 static int ich_lr_state(uint64_t lr) 80 { 81 return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH); 82 } 83 84 static bool icv_access(CPUARMState *env, int hcr_flags) 85 { 86 /* Return true if this ICC_ register access should really be 87 * directed to an ICV_ access. hcr_flags is a mask of 88 * HCR_EL2 bits to check: we treat this as an ICV_ access 89 * if we are in NS EL1 and at least one of the specified 90 * HCR_EL2 bits is set. 91 * 92 * ICV registers fall into four categories: 93 * * access if NS EL1 and HCR_EL2.FMO == 1: 94 * all ICV regs with '0' in their name 95 * * access if NS EL1 and HCR_EL2.IMO == 1: 96 * all ICV regs with '1' in their name 97 * * access if NS EL1 and either IMO or FMO == 1: 98 * CTLR, DIR, PMR, RPR 99 */ 100 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 101 bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO); 102 103 return flagmatch && arm_current_el(env) == 1 104 && !arm_is_secure_below_el3(env); 105 } 106 107 static int read_vbpr(GICv3CPUState *cs, int grp) 108 { 109 /* Read VBPR value out of the VMCR field (caller must handle 110 * VCBPR effects if required) 111 */ 112 if (grp == GICV3_G0) { 113 return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT, 114 ICH_VMCR_EL2_VBPR0_LENGTH); 115 } else { 116 return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT, 117 ICH_VMCR_EL2_VBPR1_LENGTH); 118 } 119 } 120 121 static void write_vbpr(GICv3CPUState *cs, int grp, int value) 122 { 123 /* Write new VBPR1 value, handling the "writing a value less than 124 * the minimum sets it to the minimum" semantics. 125 */ 126 int min = icv_min_vbpr(cs); 127 128 if (grp != GICV3_G0) { 129 min++; 130 } 131 132 value = MAX(value, min); 133 134 if (grp == GICV3_G0) { 135 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT, 136 ICH_VMCR_EL2_VBPR0_LENGTH, value); 137 } else { 138 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT, 139 ICH_VMCR_EL2_VBPR1_LENGTH, value); 140 } 141 } 142 143 static uint32_t icv_fullprio_mask(GICv3CPUState *cs) 144 { 145 /* Return a mask word which clears the unimplemented priority bits 146 * from a priority value for a virtual interrupt. (Not to be confused 147 * with the group priority, whose mask depends on the value of VBPR 148 * for the interrupt group.) 149 */ 150 return (~0U << (8 - cs->vpribits)) & 0xff; 151 } 152 153 static int ich_highest_active_virt_prio(GICv3CPUState *cs) 154 { 155 /* Calculate the current running priority based on the set bits 156 * in the ICH Active Priority Registers. 157 */ 158 int i; 159 int aprmax = ich_num_aprs(cs); 160 161 if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) { 162 return 0x0; 163 } 164 165 for (i = 0; i < aprmax; i++) { 166 uint32_t apr = cs->ich_apr[GICV3_G0][i] | 167 cs->ich_apr[GICV3_G1NS][i]; 168 169 if (!apr) { 170 continue; 171 } 172 return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1); 173 } 174 /* No current active interrupts: return idle priority */ 175 return 0xff; 176 } 177 178 static int hppvi_index(GICv3CPUState *cs) 179 { 180 /* 181 * Return the list register index of the highest priority pending 182 * virtual interrupt, as per the HighestPriorityVirtualInterrupt 183 * pseudocode. If no pending virtual interrupts, return -1. 184 * If the highest priority pending virtual interrupt is a vLPI, 185 * return HPPVI_INDEX_VLPI. 186 * (The pseudocode handles checking whether the vLPI is higher 187 * priority than the highest priority list register at every 188 * callsite of HighestPriorityVirtualInterrupt; we check it here.) 189 */ 190 ARMCPU *cpu = ARM_CPU(cs->cpu); 191 CPUARMState *env = &cpu->env; 192 int idx = -1; 193 int i; 194 /* Note that a list register entry with a priority of 0xff will 195 * never be reported by this function; this is the architecturally 196 * correct behaviour. 197 */ 198 int prio = 0xff; 199 bool nmi = false; 200 201 if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) { 202 /* Both groups disabled, definitely nothing to do */ 203 return idx; 204 } 205 206 for (i = 0; i < cs->num_list_regs; i++) { 207 uint64_t lr = cs->ich_lr_el2[i]; 208 bool thisnmi; 209 int thisprio; 210 211 if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) { 212 /* Not Pending */ 213 continue; 214 } 215 216 /* Ignore interrupts if relevant group enable not set */ 217 if (lr & ICH_LR_EL2_GROUP) { 218 if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 219 continue; 220 } 221 } else { 222 if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) { 223 continue; 224 } 225 } 226 227 thisnmi = lr & ICH_LR_EL2_NMI; 228 thisprio = ich_lr_prio(lr); 229 230 if ((thisprio < prio) || ((thisprio == prio) && (thisnmi & (!nmi)))) { 231 prio = thisprio; 232 nmi = thisnmi; 233 idx = i; 234 } 235 } 236 237 /* 238 * "no pending vLPI" is indicated with prio = 0xff, which always 239 * fails the priority check here. vLPIs are only considered 240 * when we are in Non-Secure state. 241 */ 242 if (cs->hppvlpi.prio < prio && !arm_is_secure(env)) { 243 if (cs->hppvlpi.grp == GICV3_G0) { 244 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0) { 245 return HPPVI_INDEX_VLPI; 246 } 247 } else { 248 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1) { 249 return HPPVI_INDEX_VLPI; 250 } 251 } 252 } 253 254 return idx; 255 } 256 257 static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group) 258 { 259 /* Return a mask word which clears the subpriority bits from 260 * a priority value for a virtual interrupt in the specified group. 261 * This depends on the VBPR value. 262 * If using VBPR0 then: 263 * a BPR of 0 means the group priority bits are [7:1]; 264 * a BPR of 1 means they are [7:2], and so on down to 265 * a BPR of 7 meaning no group priority bits at all. 266 * If using VBPR1 then: 267 * a BPR of 0 is impossible (the minimum value is 1) 268 * a BPR of 1 means the group priority bits are [7:1]; 269 * a BPR of 2 means they are [7:2], and so on down to 270 * a BPR of 7 meaning the group priority is [7]. 271 * 272 * Which BPR to use depends on the group of the interrupt and 273 * the current ICH_VMCR_EL2.VCBPR settings. 274 * 275 * This corresponds to the VGroupBits() pseudocode. 276 */ 277 int bpr; 278 279 if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) { 280 group = GICV3_G0; 281 } 282 283 bpr = read_vbpr(cs, group); 284 if (group == GICV3_G1NS) { 285 assert(bpr > 0); 286 bpr--; 287 } 288 289 return ~0U << (bpr + 1); 290 } 291 292 static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr) 293 { 294 /* Return true if we can signal this virtual interrupt defined by 295 * the given list register value; see the pseudocode functions 296 * CanSignalVirtualInterrupt and CanSignalVirtualInt. 297 * Compare also icc_hppi_can_preempt() which is the non-virtual 298 * equivalent of these checks. 299 */ 300 int grp; 301 bool is_nmi; 302 uint32_t mask, prio, rprio, vpmr; 303 304 if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) { 305 /* Virtual interface disabled */ 306 return false; 307 } 308 309 /* We don't need to check that this LR is in Pending state because 310 * that has already been done in hppvi_index(). 311 */ 312 313 prio = ich_lr_prio(lr); 314 is_nmi = lr & ICH_LR_EL2_NMI; 315 vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, 316 ICH_VMCR_EL2_VPMR_LENGTH); 317 318 if (!is_nmi && prio >= vpmr) { 319 /* Priority mask masks this interrupt */ 320 return false; 321 } 322 323 rprio = ich_highest_active_virt_prio(cs); 324 if (rprio == 0xff) { 325 /* No running interrupt so we can preempt */ 326 return true; 327 } 328 329 grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 330 331 mask = icv_gprio_mask(cs, grp); 332 333 /* We only preempt a running interrupt if the pending interrupt's 334 * group priority is sufficient (the subpriorities are not considered). 335 */ 336 if ((prio & mask) < (rprio & mask)) { 337 return true; 338 } 339 340 if ((prio & mask) == (rprio & mask) && is_nmi && 341 !(cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI)) { 342 return true; 343 } 344 345 return false; 346 } 347 348 static bool icv_hppvlpi_can_preempt(GICv3CPUState *cs) 349 { 350 /* 351 * Return true if we can signal the highest priority pending vLPI. 352 * We can assume we're Non-secure because hppvi_index() already 353 * tested for that. 354 */ 355 uint32_t mask, rprio, vpmr; 356 357 if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) { 358 /* Virtual interface disabled */ 359 return false; 360 } 361 362 vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, 363 ICH_VMCR_EL2_VPMR_LENGTH); 364 365 if (cs->hppvlpi.prio >= vpmr) { 366 /* Priority mask masks this interrupt */ 367 return false; 368 } 369 370 rprio = ich_highest_active_virt_prio(cs); 371 if (rprio == 0xff) { 372 /* No running interrupt so we can preempt */ 373 return true; 374 } 375 376 mask = icv_gprio_mask(cs, cs->hppvlpi.grp); 377 378 /* 379 * We only preempt a running interrupt if the pending interrupt's 380 * group priority is sufficient (the subpriorities are not considered). 381 */ 382 if ((cs->hppvlpi.prio & mask) < (rprio & mask)) { 383 return true; 384 } 385 386 return false; 387 } 388 389 static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs, 390 uint32_t *misr) 391 { 392 /* Return a set of bits indicating the EOI maintenance interrupt status 393 * for each list register. The EOI maintenance interrupt status is 394 * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1 395 * (see the GICv3 spec for the ICH_EISR_EL2 register). 396 * If misr is not NULL then we should also collect the information 397 * about the MISR.EOI, MISR.NP and MISR.U bits. 398 */ 399 uint32_t value = 0; 400 int validcount = 0; 401 bool seenpending = false; 402 int i; 403 404 for (i = 0; i < cs->num_list_regs; i++) { 405 uint64_t lr = cs->ich_lr_el2[i]; 406 407 if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI)) 408 == ICH_LR_EL2_EOI) { 409 value |= (1 << i); 410 } 411 if ((lr & ICH_LR_EL2_STATE_MASK)) { 412 validcount++; 413 } 414 if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) { 415 seenpending = true; 416 } 417 } 418 419 if (misr) { 420 if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) { 421 *misr |= ICH_MISR_EL2_U; 422 } 423 if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) { 424 *misr |= ICH_MISR_EL2_NP; 425 } 426 if (value) { 427 *misr |= ICH_MISR_EL2_EOI; 428 } 429 } 430 return value; 431 } 432 433 static uint32_t maintenance_interrupt_state(GICv3CPUState *cs) 434 { 435 /* Return a set of bits indicating the maintenance interrupt status 436 * (as seen in the ICH_MISR_EL2 register). 437 */ 438 uint32_t value = 0; 439 440 /* Scan list registers and fill in the U, NP and EOI bits */ 441 eoi_maintenance_interrupt_state(cs, &value); 442 443 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_LRENPIE) && 444 (cs->ich_hcr_el2 & ICH_HCR_EL2_EOICOUNT_MASK)) { 445 value |= ICH_MISR_EL2_LRENP; 446 } 447 448 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) && 449 (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) { 450 value |= ICH_MISR_EL2_VGRP0E; 451 } 452 453 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) && 454 !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 455 value |= ICH_MISR_EL2_VGRP0D; 456 } 457 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) && 458 (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 459 value |= ICH_MISR_EL2_VGRP1E; 460 } 461 462 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) && 463 !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 464 value |= ICH_MISR_EL2_VGRP1D; 465 } 466 467 return value; 468 } 469 470 void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs) 471 { 472 /* 473 * Tell the CPU about any pending virtual interrupts. 474 * This should only be called for changes that affect the 475 * vIRQ and vFIQ status and do not change the maintenance 476 * interrupt status. This means that unlike gicv3_cpuif_virt_update() 477 * this function won't recursively call back into the GIC code. 478 * The main use of this is when the redistributor has changed the 479 * highest priority pending virtual LPI. 480 */ 481 int idx; 482 int irqlevel = 0; 483 int fiqlevel = 0; 484 int nmilevel = 0; 485 486 idx = hppvi_index(cs); 487 trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx, 488 cs->hppvlpi.irq, cs->hppvlpi.grp, 489 cs->hppvlpi.prio); 490 if (idx == HPPVI_INDEX_VLPI) { 491 if (icv_hppvlpi_can_preempt(cs)) { 492 if (cs->hppvlpi.grp == GICV3_G0) { 493 fiqlevel = 1; 494 } else { 495 irqlevel = 1; 496 } 497 } 498 } else if (idx >= 0) { 499 uint64_t lr = cs->ich_lr_el2[idx]; 500 501 if (icv_hppi_can_preempt(cs, lr)) { 502 /* 503 * Virtual interrupts are simple: G0 are always FIQ, and G1 are 504 * IRQ or NMI which depends on the ICH_LR<n>_EL2.NMI to have 505 * non-maskable property. 506 */ 507 if (lr & ICH_LR_EL2_GROUP) { 508 if (lr & ICH_LR_EL2_NMI) { 509 nmilevel = 1; 510 } else { 511 irqlevel = 1; 512 } 513 } else { 514 fiqlevel = 1; 515 } 516 } 517 } 518 519 trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel); 520 qemu_set_irq(cs->parent_vfiq, fiqlevel); 521 qemu_set_irq(cs->parent_virq, irqlevel); 522 qemu_set_irq(cs->parent_vnmi, nmilevel); 523 } 524 525 static void gicv3_cpuif_virt_update(GICv3CPUState *cs) 526 { 527 /* 528 * Tell the CPU about any pending virtual interrupts or 529 * maintenance interrupts, following a change to the state 530 * of the CPU interface relevant to virtual interrupts. 531 * 532 * CAUTION: this function will call qemu_set_irq() on the 533 * CPU maintenance IRQ line, which is typically wired up 534 * to the GIC as a per-CPU interrupt. This means that it 535 * will recursively call back into the GIC code via 536 * gicv3_redist_set_irq() and thus into the CPU interface code's 537 * gicv3_cpuif_update(). It is therefore important that this 538 * function is only called as the final action of a CPU interface 539 * register write implementation, after all the GIC state 540 * fields have been updated. gicv3_cpuif_update() also must 541 * not cause this function to be called, but that happens 542 * naturally as a result of there being no architectural 543 * linkage between the physical and virtual GIC logic. 544 */ 545 ARMCPU *cpu = ARM_CPU(cs->cpu); 546 int maintlevel = 0; 547 548 gicv3_cpuif_virt_irq_fiq_update(cs); 549 550 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) && 551 maintenance_interrupt_state(cs) != 0) { 552 maintlevel = 1; 553 } 554 555 trace_gicv3_cpuif_virt_set_maint_irq(gicv3_redist_affid(cs), maintlevel); 556 qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel); 557 } 558 559 static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 560 { 561 GICv3CPUState *cs = icc_cs_from_env(env); 562 int regno = ri->opc2 & 3; 563 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; 564 uint64_t value = cs->ich_apr[grp][regno]; 565 566 trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 567 return value; 568 } 569 570 static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 571 uint64_t value) 572 { 573 GICv3CPUState *cs = icc_cs_from_env(env); 574 int regno = ri->opc2 & 3; 575 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; 576 577 trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 578 579 if (cs->nmi_support) { 580 cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI); 581 } else { 582 cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; 583 } 584 585 gicv3_cpuif_virt_irq_fiq_update(cs); 586 return; 587 } 588 589 static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 590 { 591 GICv3CPUState *cs = icc_cs_from_env(env); 592 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS; 593 uint64_t bpr; 594 bool satinc = false; 595 596 if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) { 597 /* reads return bpr0 + 1 saturated to 7, writes ignored */ 598 grp = GICV3_G0; 599 satinc = true; 600 } 601 602 bpr = read_vbpr(cs, grp); 603 604 if (satinc) { 605 bpr++; 606 bpr = MIN(bpr, 7); 607 } 608 609 trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr); 610 611 return bpr; 612 } 613 614 static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri, 615 uint64_t value) 616 { 617 GICv3CPUState *cs = icc_cs_from_env(env); 618 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS; 619 620 trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1, 621 gicv3_redist_affid(cs), value); 622 623 if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) { 624 /* reads return bpr0 + 1 saturated to 7, writes ignored */ 625 return; 626 } 627 628 write_vbpr(cs, grp, value); 629 630 gicv3_cpuif_virt_irq_fiq_update(cs); 631 } 632 633 static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) 634 { 635 GICv3CPUState *cs = icc_cs_from_env(env); 636 uint64_t value; 637 638 value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, 639 ICH_VMCR_EL2_VPMR_LENGTH); 640 641 trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value); 642 return value; 643 } 644 645 static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, 646 uint64_t value) 647 { 648 GICv3CPUState *cs = icc_cs_from_env(env); 649 650 trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value); 651 652 value &= icv_fullprio_mask(cs); 653 654 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, 655 ICH_VMCR_EL2_VPMR_LENGTH, value); 656 657 gicv3_cpuif_virt_irq_fiq_update(cs); 658 } 659 660 static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri) 661 { 662 GICv3CPUState *cs = icc_cs_from_env(env); 663 int enbit; 664 uint64_t value; 665 666 enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT; 667 value = extract64(cs->ich_vmcr_el2, enbit, 1); 668 669 trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0, 670 gicv3_redist_affid(cs), value); 671 return value; 672 } 673 674 static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri, 675 uint64_t value) 676 { 677 GICv3CPUState *cs = icc_cs_from_env(env); 678 int enbit; 679 680 trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0, 681 gicv3_redist_affid(cs), value); 682 683 enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT; 684 685 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value); 686 gicv3_cpuif_virt_update(cs); 687 } 688 689 static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) 690 { 691 GICv3CPUState *cs = icc_cs_from_env(env); 692 uint64_t value; 693 694 /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits) 695 * should match the ones reported in ich_vtr_read(). 696 */ 697 value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | 698 ((cs->vpribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT); 699 700 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) { 701 value |= ICC_CTLR_EL1_EOIMODE; 702 } 703 704 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) { 705 value |= ICC_CTLR_EL1_CBPR; 706 } 707 708 trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value); 709 return value; 710 } 711 712 static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 713 uint64_t value) 714 { 715 GICv3CPUState *cs = icc_cs_from_env(env); 716 717 trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value); 718 719 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT, 720 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0); 721 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT, 722 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0); 723 724 gicv3_cpuif_virt_irq_fiq_update(cs); 725 } 726 727 static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 728 { 729 GICv3CPUState *cs = icc_cs_from_env(env); 730 uint64_t prio = ich_highest_active_virt_prio(cs); 731 732 if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) { 733 prio |= ICV_RPR_EL1_NMI; 734 } 735 736 trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio); 737 return prio; 738 } 739 740 static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri) 741 { 742 GICv3CPUState *cs = icc_cs_from_env(env); 743 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; 744 int idx = hppvi_index(cs); 745 uint64_t value = INTID_SPURIOUS; 746 747 if (idx == HPPVI_INDEX_VLPI) { 748 if (cs->hppvlpi.grp == grp) { 749 value = cs->hppvlpi.irq; 750 } 751 } else if (idx >= 0) { 752 uint64_t lr = cs->ich_lr_el2[idx]; 753 int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 754 755 if (grp == thisgrp) { 756 value = ich_lr_vintid(lr); 757 } 758 } 759 760 trace_gicv3_icv_hppir_read(ri->crm == 8 ? 0 : 1, 761 gicv3_redist_affid(cs), value); 762 return value; 763 } 764 765 static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp) 766 { 767 /* Activate the interrupt in the specified list register 768 * by moving it from Pending to Active state, and update the 769 * Active Priority Registers. 770 */ 771 uint32_t mask = icv_gprio_mask(cs, grp); 772 int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask; 773 bool nmi = cs->ich_lr_el2[idx] & ICH_LR_EL2_NMI; 774 int aprbit = prio >> (8 - cs->vprebits); 775 int regno = aprbit / 32; 776 int regbit = aprbit % 32; 777 778 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; 779 cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT; 780 781 if (nmi) { 782 cs->ich_apr[grp][regno] |= ICV_AP1R_EL1_NMI; 783 } else { 784 cs->ich_apr[grp][regno] |= (1U << regbit); 785 } 786 } 787 788 static void icv_activate_vlpi(GICv3CPUState *cs) 789 { 790 uint32_t mask = icv_gprio_mask(cs, cs->hppvlpi.grp); 791 int prio = cs->hppvlpi.prio & mask; 792 int aprbit = prio >> (8 - cs->vprebits); 793 int regno = aprbit / 32; 794 int regbit = aprbit % 32; 795 796 cs->ich_apr[cs->hppvlpi.grp][regno] |= (1U << regbit); 797 gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0); 798 } 799 800 static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) 801 { 802 GICv3CPUState *cs = icc_cs_from_env(env); 803 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; 804 int idx = hppvi_index(cs); 805 uint64_t intid = INTID_SPURIOUS; 806 int el = arm_current_el(env); 807 808 if (idx == HPPVI_INDEX_VLPI) { 809 if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) { 810 intid = cs->hppvlpi.irq; 811 icv_activate_vlpi(cs); 812 } 813 } else if (idx >= 0) { 814 uint64_t lr = cs->ich_lr_el2[idx]; 815 int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 816 bool nmi = env->cp15.sctlr_el[el] & SCTLR_NMI && lr & ICH_LR_EL2_NMI; 817 818 if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) { 819 intid = ich_lr_vintid(lr); 820 if (!gicv3_intid_is_special(intid)) { 821 if (!nmi) { 822 icv_activate_irq(cs, idx, grp); 823 } else { 824 intid = INTID_NMI; 825 } 826 } else { 827 /* Interrupt goes from Pending to Invalid */ 828 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; 829 /* We will now return the (bogus) ID from the list register, 830 * as per the pseudocode. 831 */ 832 } 833 } 834 } 835 836 trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1, 837 gicv3_redist_affid(cs), intid); 838 839 gicv3_cpuif_virt_update(cs); 840 841 return intid; 842 } 843 844 static uint64_t icv_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri) 845 { 846 GICv3CPUState *cs = icc_cs_from_env(env); 847 int idx = hppvi_index(cs); 848 uint64_t intid = INTID_SPURIOUS; 849 850 if (idx >= 0 && idx != HPPVI_INDEX_VLPI) { 851 uint64_t lr = cs->ich_lr_el2[idx]; 852 int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 853 854 if ((thisgrp == GICV3_G1NS) && icv_hppi_can_preempt(cs, lr)) { 855 intid = ich_lr_vintid(lr); 856 if (!gicv3_intid_is_special(intid)) { 857 if (lr & ICH_LR_EL2_NMI) { 858 icv_activate_irq(cs, idx, GICV3_G1NS); 859 } else { 860 intid = INTID_SPURIOUS; 861 } 862 } else { 863 /* Interrupt goes from Pending to Invalid */ 864 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; 865 /* 866 * We will now return the (bogus) ID from the list register, 867 * as per the pseudocode. 868 */ 869 } 870 } 871 } 872 873 trace_gicv3_icv_nmiar1_read(gicv3_redist_affid(cs), intid); 874 875 gicv3_cpuif_virt_update(cs); 876 877 return intid; 878 } 879 880 static uint32_t icc_fullprio_mask(GICv3CPUState *cs) 881 { 882 /* 883 * Return a mask word which clears the unimplemented priority bits 884 * from a priority value for a physical interrupt. (Not to be confused 885 * with the group priority, whose mask depends on the value of BPR 886 * for the interrupt group.) 887 */ 888 return (~0U << (8 - cs->pribits)) & 0xff; 889 } 890 891 static inline int icc_min_bpr(GICv3CPUState *cs) 892 { 893 /* The minimum BPR for the physical interface. */ 894 return 7 - cs->prebits; 895 } 896 897 static inline int icc_min_bpr_ns(GICv3CPUState *cs) 898 { 899 return icc_min_bpr(cs) + 1; 900 } 901 902 static inline int icc_num_aprs(GICv3CPUState *cs) 903 { 904 /* Return the number of APR registers (1, 2, or 4) */ 905 int aprmax = 1 << MAX(cs->prebits - 5, 0); 906 assert(aprmax <= ARRAY_SIZE(cs->icc_apr[0])); 907 return aprmax; 908 } 909 910 static int icc_highest_active_prio(GICv3CPUState *cs) 911 { 912 /* Calculate the current running priority based on the set bits 913 * in the Active Priority Registers. 914 */ 915 int i; 916 917 if (cs->nmi_support) { 918 /* 919 * If an NMI is active this takes precedence over anything else 920 * for priority purposes; the NMI bit is only in the AP1R0 bit. 921 * We return here the effective priority of the NMI, which is 922 * either 0x0 or 0x80. Callers will need to check NMI again for 923 * purposes of either setting the RPR register bits or for 924 * prioritization of NMI vs non-NMI. 925 */ 926 if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) { 927 return 0; 928 } 929 if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) { 930 return (cs->gic->gicd_ctlr & GICD_CTLR_DS) ? 0 : 0x80; 931 } 932 } 933 934 for (i = 0; i < icc_num_aprs(cs); i++) { 935 uint32_t apr = cs->icc_apr[GICV3_G0][i] | 936 cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i]; 937 938 if (!apr) { 939 continue; 940 } 941 return (i * 32 + ctz32(apr)) << (icc_min_bpr(cs) + 1); 942 } 943 /* No current active interrupts: return idle priority */ 944 return 0xff; 945 } 946 947 static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group) 948 { 949 /* Return a mask word which clears the subpriority bits from 950 * a priority value for an interrupt in the specified group. 951 * This depends on the BPR value. For CBPR0 (S or NS): 952 * a BPR of 0 means the group priority bits are [7:1]; 953 * a BPR of 1 means they are [7:2], and so on down to 954 * a BPR of 7 meaning no group priority bits at all. 955 * For CBPR1 NS: 956 * a BPR of 0 is impossible (the minimum value is 1) 957 * a BPR of 1 means the group priority bits are [7:1]; 958 * a BPR of 2 means they are [7:2], and so on down to 959 * a BPR of 7 meaning the group priority is [7]. 960 * 961 * Which BPR to use depends on the group of the interrupt and 962 * the current ICC_CTLR.CBPR settings. 963 * 964 * This corresponds to the GroupBits() pseudocode. 965 */ 966 int bpr; 967 968 if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) || 969 (group == GICV3_G1NS && 970 cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { 971 group = GICV3_G0; 972 } 973 974 bpr = cs->icc_bpr[group] & 7; 975 976 if (group == GICV3_G1NS) { 977 assert(bpr > 0); 978 bpr--; 979 } 980 981 return ~0U << (bpr + 1); 982 } 983 984 static bool icc_no_enabled_hppi(GICv3CPUState *cs) 985 { 986 /* Return true if there is no pending interrupt, or the 987 * highest priority pending interrupt is in a group which has been 988 * disabled at the CPU interface by the ICC_IGRPEN* register enable bits. 989 */ 990 return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0); 991 } 992 993 static bool icc_hppi_can_preempt(GICv3CPUState *cs) 994 { 995 /* Return true if we have a pending interrupt of sufficient 996 * priority to preempt. 997 */ 998 int rprio; 999 uint32_t mask; 1000 ARMCPU *cpu = ARM_CPU(cs->cpu); 1001 CPUARMState *env = &cpu->env; 1002 1003 if (icc_no_enabled_hppi(cs)) { 1004 return false; 1005 } 1006 1007 if (cs->hppi.nmi) { 1008 if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && 1009 cs->hppi.grp == GICV3_G1NS) { 1010 if (cs->icc_pmr_el1 < 0x80) { 1011 return false; 1012 } 1013 if (arm_is_secure(env) && cs->icc_pmr_el1 == 0x80) { 1014 return false; 1015 } 1016 } 1017 } else if (cs->hppi.prio >= cs->icc_pmr_el1) { 1018 /* Priority mask masks this interrupt */ 1019 return false; 1020 } 1021 1022 rprio = icc_highest_active_prio(cs); 1023 if (rprio == 0xff) { 1024 /* No currently running interrupt so we can preempt */ 1025 return true; 1026 } 1027 1028 mask = icc_gprio_mask(cs, cs->hppi.grp); 1029 1030 /* We only preempt a running interrupt if the pending interrupt's 1031 * group priority is sufficient (the subpriorities are not considered). 1032 */ 1033 if ((cs->hppi.prio & mask) < (rprio & mask)) { 1034 return true; 1035 } 1036 1037 if (cs->hppi.nmi && (cs->hppi.prio & mask) == (rprio & mask)) { 1038 if (!(cs->icc_apr[cs->hppi.grp][0] & ICC_AP1R_EL1_NMI)) { 1039 return true; 1040 } 1041 } 1042 1043 return false; 1044 } 1045 1046 void gicv3_cpuif_update(GICv3CPUState *cs) 1047 { 1048 /* Tell the CPU about its highest priority pending interrupt */ 1049 int irqlevel = 0; 1050 int fiqlevel = 0; 1051 int nmilevel = 0; 1052 ARMCPU *cpu = ARM_CPU(cs->cpu); 1053 CPUARMState *env = &cpu->env; 1054 1055 g_assert(bql_locked()); 1056 1057 trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq, 1058 cs->hppi.grp, cs->hppi.prio); 1059 1060 if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) { 1061 /* If a Security-enabled GIC sends a G1S interrupt to a 1062 * Security-disabled CPU, we must treat it as if it were G0. 1063 */ 1064 cs->hppi.grp = GICV3_G0; 1065 } 1066 1067 if (icc_hppi_can_preempt(cs)) { 1068 /* We have an interrupt: should we signal it as IRQ or FIQ? 1069 * This is described in the GICv3 spec section 4.6.2. 1070 */ 1071 bool isfiq; 1072 1073 switch (cs->hppi.grp) { 1074 case GICV3_G0: 1075 isfiq = true; 1076 break; 1077 case GICV3_G1: 1078 isfiq = (!arm_is_secure(env) || 1079 (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3))); 1080 break; 1081 case GICV3_G1NS: 1082 isfiq = arm_is_secure(env); 1083 break; 1084 default: 1085 g_assert_not_reached(); 1086 } 1087 1088 if (isfiq) { 1089 fiqlevel = 1; 1090 } else if (cs->hppi.nmi) { 1091 nmilevel = 1; 1092 } else { 1093 irqlevel = 1; 1094 } 1095 } 1096 1097 trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel); 1098 1099 qemu_set_irq(cs->parent_fiq, fiqlevel); 1100 qemu_set_irq(cs->parent_irq, irqlevel); 1101 qemu_set_irq(cs->parent_nmi, nmilevel); 1102 } 1103 1104 static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1105 { 1106 GICv3CPUState *cs = icc_cs_from_env(env); 1107 uint32_t value = cs->icc_pmr_el1; 1108 1109 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1110 return icv_pmr_read(env, ri); 1111 } 1112 1113 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) && 1114 (env->cp15.scr_el3 & SCR_FIQ)) { 1115 /* NS access and Group 0 is inaccessible to NS: return the 1116 * NS view of the current priority 1117 */ 1118 if ((value & 0x80) == 0) { 1119 /* Secure priorities not visible to NS */ 1120 value = 0; 1121 } else if (value != 0xff) { 1122 value = (value << 1) & 0xff; 1123 } 1124 } 1125 1126 trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value); 1127 1128 return value; 1129 } 1130 1131 static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1132 uint64_t value) 1133 { 1134 GICv3CPUState *cs = icc_cs_from_env(env); 1135 1136 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1137 return icv_pmr_write(env, ri, value); 1138 } 1139 1140 trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value); 1141 1142 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) && 1143 (env->cp15.scr_el3 & SCR_FIQ)) { 1144 /* NS access and Group 0 is inaccessible to NS: return the 1145 * NS view of the current priority 1146 */ 1147 if (!(cs->icc_pmr_el1 & 0x80)) { 1148 /* Current PMR in the secure range, don't allow NS to change it */ 1149 return; 1150 } 1151 value = (value >> 1) | 0x80; 1152 } 1153 value &= icc_fullprio_mask(cs); 1154 cs->icc_pmr_el1 = value; 1155 gicv3_cpuif_update(cs); 1156 } 1157 1158 static void icc_activate_irq(GICv3CPUState *cs, int irq) 1159 { 1160 /* Move the interrupt from the Pending state to Active, and update 1161 * the Active Priority Registers 1162 */ 1163 uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp); 1164 int prio = cs->hppi.prio & mask; 1165 int aprbit = prio >> (8 - cs->prebits); 1166 int regno = aprbit / 32; 1167 int regbit = aprbit % 32; 1168 bool nmi = cs->hppi.nmi; 1169 1170 if (nmi) { 1171 cs->icc_apr[cs->hppi.grp][regno] |= ICC_AP1R_EL1_NMI; 1172 } else { 1173 cs->icc_apr[cs->hppi.grp][regno] |= (1U << regbit); 1174 } 1175 1176 if (irq < GIC_INTERNAL) { 1177 cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1); 1178 cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0); 1179 gicv3_redist_update(cs); 1180 } else if (irq < GICV3_LPI_INTID_START) { 1181 gicv3_gicd_active_set(cs->gic, irq); 1182 gicv3_gicd_pending_clear(cs->gic, irq); 1183 gicv3_update(cs->gic, irq, 1); 1184 } else { 1185 gicv3_redist_lpi_pending(cs, irq, 0); 1186 } 1187 } 1188 1189 static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env) 1190 { 1191 /* Return the highest priority pending interrupt register value 1192 * for group 0. 1193 */ 1194 bool irq_is_secure; 1195 1196 if (icc_no_enabled_hppi(cs)) { 1197 return INTID_SPURIOUS; 1198 } 1199 1200 /* Check whether we can return the interrupt or if we should return 1201 * a special identifier, as per the CheckGroup0ForSpecialIdentifiers 1202 * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM 1203 * is always zero.) 1204 */ 1205 irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && 1206 (cs->hppi.grp != GICV3_G1NS)); 1207 1208 if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) { 1209 return INTID_SPURIOUS; 1210 } 1211 if (irq_is_secure && !arm_is_secure(env)) { 1212 /* Secure interrupts not visible to Nonsecure */ 1213 return INTID_SPURIOUS; 1214 } 1215 1216 if (cs->hppi.grp != GICV3_G0) { 1217 /* Indicate to EL3 that there's a Group 1 interrupt for the other 1218 * state pending. 1219 */ 1220 return irq_is_secure ? INTID_SECURE : INTID_NONSECURE; 1221 } 1222 1223 return cs->hppi.irq; 1224 } 1225 1226 static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env) 1227 { 1228 /* Return the highest priority pending interrupt register value 1229 * for group 1. 1230 */ 1231 bool irq_is_secure; 1232 1233 if (icc_no_enabled_hppi(cs)) { 1234 return INTID_SPURIOUS; 1235 } 1236 1237 /* Check whether we can return the interrupt or if we should return 1238 * a special identifier, as per the CheckGroup1ForSpecialIdentifiers 1239 * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM 1240 * is always zero.) 1241 */ 1242 irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && 1243 (cs->hppi.grp != GICV3_G1NS)); 1244 1245 if (cs->hppi.grp == GICV3_G0) { 1246 /* Group 0 interrupts not visible via HPPIR1 */ 1247 return INTID_SPURIOUS; 1248 } 1249 if (irq_is_secure) { 1250 if (!arm_is_secure(env)) { 1251 /* Secure interrupts not visible in Non-secure */ 1252 return INTID_SPURIOUS; 1253 } 1254 } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { 1255 /* Group 1 non-secure interrupts not visible in Secure EL1 */ 1256 return INTID_SPURIOUS; 1257 } 1258 1259 return cs->hppi.irq; 1260 } 1261 1262 static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri) 1263 { 1264 GICv3CPUState *cs = icc_cs_from_env(env); 1265 uint64_t intid; 1266 1267 if (icv_access(env, HCR_FMO)) { 1268 return icv_iar_read(env, ri); 1269 } 1270 1271 if (!icc_hppi_can_preempt(cs)) { 1272 intid = INTID_SPURIOUS; 1273 } else { 1274 intid = icc_hppir0_value(cs, env); 1275 } 1276 1277 if (!gicv3_intid_is_special(intid)) { 1278 icc_activate_irq(cs, intid); 1279 } 1280 1281 trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid); 1282 return intid; 1283 } 1284 1285 static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri) 1286 { 1287 GICv3CPUState *cs = icc_cs_from_env(env); 1288 int el = arm_current_el(env); 1289 uint64_t intid; 1290 1291 if (icv_access(env, HCR_IMO)) { 1292 return icv_iar_read(env, ri); 1293 } 1294 1295 if (!icc_hppi_can_preempt(cs)) { 1296 intid = INTID_SPURIOUS; 1297 } else { 1298 intid = icc_hppir1_value(cs, env); 1299 } 1300 1301 if (!gicv3_intid_is_special(intid)) { 1302 if (cs->hppi.nmi && env->cp15.sctlr_el[el] & SCTLR_NMI) { 1303 intid = INTID_NMI; 1304 } else { 1305 icc_activate_irq(cs, intid); 1306 } 1307 } 1308 1309 trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid); 1310 return intid; 1311 } 1312 1313 static uint64_t icc_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri) 1314 { 1315 GICv3CPUState *cs = icc_cs_from_env(env); 1316 uint64_t intid; 1317 1318 if (icv_access(env, HCR_IMO)) { 1319 return icv_nmiar1_read(env, ri); 1320 } 1321 1322 if (!icc_hppi_can_preempt(cs)) { 1323 intid = INTID_SPURIOUS; 1324 } else { 1325 intid = icc_hppir1_value(cs, env); 1326 } 1327 1328 if (!gicv3_intid_is_special(intid)) { 1329 if (!cs->hppi.nmi) { 1330 intid = INTID_SPURIOUS; 1331 } else { 1332 icc_activate_irq(cs, intid); 1333 } 1334 } 1335 1336 trace_gicv3_icc_nmiar1_read(gicv3_redist_affid(cs), intid); 1337 return intid; 1338 } 1339 1340 static void icc_drop_prio(GICv3CPUState *cs, int grp) 1341 { 1342 /* Drop the priority of the currently active interrupt in 1343 * the specified group. 1344 * 1345 * Note that we can guarantee (because of the requirement to nest 1346 * ICC_IAR reads [which activate an interrupt and raise priority] 1347 * with ICC_EOIR writes [which drop the priority for the interrupt]) 1348 * that the interrupt we're being called for is the highest priority 1349 * active interrupt, meaning that it has the lowest set bit in the 1350 * APR registers. 1351 * 1352 * If the guest does not honour the ordering constraints then the 1353 * behaviour of the GIC is UNPREDICTABLE, which for us means that 1354 * the values of the APR registers might become incorrect and the 1355 * running priority will be wrong, so interrupts that should preempt 1356 * might not do so, and interrupts that should not preempt might do so. 1357 */ 1358 int i; 1359 1360 for (i = 0; i < icc_num_aprs(cs); i++) { 1361 uint64_t *papr = &cs->icc_apr[grp][i]; 1362 1363 if (!*papr) { 1364 continue; 1365 } 1366 1367 if (i == 0 && cs->nmi_support && (*papr & ICC_AP1R_EL1_NMI)) { 1368 *papr &= (~ICC_AP1R_EL1_NMI); 1369 break; 1370 } 1371 1372 /* Clear the lowest set bit */ 1373 *papr &= *papr - 1; 1374 break; 1375 } 1376 1377 /* running priority change means we need an update for this cpu i/f */ 1378 gicv3_cpuif_update(cs); 1379 } 1380 1381 static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs) 1382 { 1383 /* Return true if we should split priority drop and interrupt 1384 * deactivation, ie whether the relevant EOIMode bit is set. 1385 */ 1386 if (arm_is_el3_or_mon(env)) { 1387 return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3; 1388 } 1389 if (arm_is_secure_below_el3(env)) { 1390 return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE; 1391 } else { 1392 return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE; 1393 } 1394 } 1395 1396 static int icc_highest_active_group(GICv3CPUState *cs) 1397 { 1398 /* Return the group with the highest priority active interrupt. 1399 * We can do this by just comparing the APRs to see which one 1400 * has the lowest set bit. 1401 * (If more than one group is active at the same priority then 1402 * we're in UNPREDICTABLE territory.) 1403 */ 1404 int i; 1405 1406 if (cs->nmi_support) { 1407 if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) { 1408 return GICV3_G1; 1409 } 1410 if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) { 1411 return GICV3_G1NS; 1412 } 1413 } 1414 1415 for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) { 1416 int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]); 1417 int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]); 1418 int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]); 1419 1420 if (g1nsctz < g0ctz && g1nsctz < g1ctz) { 1421 return GICV3_G1NS; 1422 } 1423 if (g1ctz < g0ctz) { 1424 return GICV3_G1; 1425 } 1426 if (g0ctz < 32) { 1427 return GICV3_G0; 1428 } 1429 } 1430 /* No set active bits? UNPREDICTABLE; return -1 so the caller 1431 * ignores the spurious EOI attempt. 1432 */ 1433 return -1; 1434 } 1435 1436 static void icc_deactivate_irq(GICv3CPUState *cs, int irq) 1437 { 1438 if (irq < GIC_INTERNAL) { 1439 cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0); 1440 gicv3_redist_update(cs); 1441 } else { 1442 gicv3_gicd_active_clear(cs->gic, irq); 1443 gicv3_update(cs->gic, irq, 1); 1444 } 1445 } 1446 1447 static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs) 1448 { 1449 /* Return true if we should split priority drop and interrupt 1450 * deactivation, ie whether the virtual EOIMode bit is set. 1451 */ 1452 return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM; 1453 } 1454 1455 static int icv_find_active(GICv3CPUState *cs, int irq) 1456 { 1457 /* Given an interrupt number for an active interrupt, return the index 1458 * of the corresponding list register, or -1 if there is no match. 1459 * Corresponds to FindActiveVirtualInterrupt pseudocode. 1460 */ 1461 int i; 1462 1463 for (i = 0; i < cs->num_list_regs; i++) { 1464 uint64_t lr = cs->ich_lr_el2[i]; 1465 1466 if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) { 1467 return i; 1468 } 1469 } 1470 1471 return -1; 1472 } 1473 1474 static void icv_deactivate_irq(GICv3CPUState *cs, int idx) 1475 { 1476 /* Deactivate the interrupt in the specified list register index */ 1477 uint64_t lr = cs->ich_lr_el2[idx]; 1478 1479 if (lr & ICH_LR_EL2_HW) { 1480 /* Deactivate the associated physical interrupt */ 1481 int pirq = ich_lr_pintid(lr); 1482 1483 if (pirq < INTID_SECURE) { 1484 icc_deactivate_irq(cs, pirq); 1485 } 1486 } 1487 1488 /* Clear the 'active' part of the state, so ActivePending->Pending 1489 * and Active->Invalid. 1490 */ 1491 lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT; 1492 cs->ich_lr_el2[idx] = lr; 1493 } 1494 1495 static void icv_increment_eoicount(GICv3CPUState *cs) 1496 { 1497 /* Increment the EOICOUNT field in ICH_HCR_EL2 */ 1498 int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT, 1499 ICH_HCR_EL2_EOICOUNT_LENGTH); 1500 1501 cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT, 1502 ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1); 1503 } 1504 1505 static int icv_drop_prio(GICv3CPUState *cs, bool *nmi) 1506 { 1507 /* Drop the priority of the currently active virtual interrupt 1508 * (favouring group 0 if there is a set active bit at 1509 * the same priority for both group 0 and group 1). 1510 * Return the priority value for the bit we just cleared, 1511 * or 0xff if no bits were set in the AP registers at all. 1512 * Note that though the ich_apr[] are uint64_t only the low 1513 * 32 bits are actually relevant. 1514 */ 1515 int i; 1516 int aprmax = ich_num_aprs(cs); 1517 1518 for (i = 0; i < aprmax; i++) { 1519 uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i]; 1520 uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i]; 1521 int apr0count, apr1count; 1522 1523 if (!*papr0 && !*papr1) { 1524 continue; 1525 } 1526 1527 if (i == 0 && cs->nmi_support && (*papr1 & ICV_AP1R_EL1_NMI)) { 1528 *papr1 &= (~ICV_AP1R_EL1_NMI); 1529 *nmi = true; 1530 return 0xff; 1531 } 1532 1533 /* We can't just use the bit-twiddling hack icc_drop_prio() does 1534 * because we need to return the bit number we cleared so 1535 * it can be compared against the list register's priority field. 1536 */ 1537 apr0count = ctz32(*papr0); 1538 apr1count = ctz32(*papr1); 1539 1540 if (apr0count <= apr1count) { 1541 *papr0 &= *papr0 - 1; 1542 return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1); 1543 } else { 1544 *papr1 &= *papr1 - 1; 1545 return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1); 1546 } 1547 } 1548 return 0xff; 1549 } 1550 1551 static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1552 uint64_t value) 1553 { 1554 /* Deactivate interrupt */ 1555 GICv3CPUState *cs = icc_cs_from_env(env); 1556 int idx; 1557 int irq = value & 0xffffff; 1558 1559 trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value); 1560 1561 if (irq >= GICV3_MAXIRQ) { 1562 /* Also catches special interrupt numbers and LPIs */ 1563 return; 1564 } 1565 1566 if (!icv_eoi_split(env, cs)) { 1567 return; 1568 } 1569 1570 idx = icv_find_active(cs, irq); 1571 1572 if (idx < 0) { 1573 /* No list register matching this, so increment the EOI count 1574 * (might trigger a maintenance interrupt) 1575 */ 1576 icv_increment_eoicount(cs); 1577 } else { 1578 icv_deactivate_irq(cs, idx); 1579 } 1580 1581 gicv3_cpuif_virt_update(cs); 1582 } 1583 1584 static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1585 uint64_t value) 1586 { 1587 /* End of Interrupt */ 1588 GICv3CPUState *cs = icc_cs_from_env(env); 1589 int irq = value & 0xffffff; 1590 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; 1591 int idx, dropprio; 1592 bool nmi = false; 1593 1594 trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1, 1595 gicv3_redist_affid(cs), value); 1596 1597 if (gicv3_intid_is_special(irq)) { 1598 return; 1599 } 1600 1601 /* We implement the IMPDEF choice of "drop priority before doing 1602 * error checks" (because that lets us avoid scanning the AP 1603 * registers twice). 1604 */ 1605 dropprio = icv_drop_prio(cs, &nmi); 1606 if (dropprio == 0xff && !nmi) { 1607 /* No active interrupt. It is CONSTRAINED UNPREDICTABLE 1608 * whether the list registers are checked in this 1609 * situation; we choose not to. 1610 */ 1611 return; 1612 } 1613 1614 idx = icv_find_active(cs, irq); 1615 1616 if (idx < 0) { 1617 /* 1618 * No valid list register corresponding to EOI ID; if this is a vLPI 1619 * not in the list regs then do nothing; otherwise increment EOI count 1620 */ 1621 if (irq < GICV3_LPI_INTID_START) { 1622 icv_increment_eoicount(cs); 1623 } 1624 } else { 1625 uint64_t lr = cs->ich_lr_el2[idx]; 1626 int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 1627 int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp); 1628 bool thisnmi = lr & ICH_LR_EL2_NMI; 1629 1630 if (thisgrp == grp && (lr_gprio == dropprio || (thisnmi & nmi))) { 1631 if (!icv_eoi_split(env, cs) || irq >= GICV3_LPI_INTID_START) { 1632 /* 1633 * Priority drop and deactivate not split: deactivate irq now. 1634 * LPIs always get their active state cleared immediately 1635 * because no separate deactivate is expected. 1636 */ 1637 icv_deactivate_irq(cs, idx); 1638 } 1639 } 1640 } 1641 1642 gicv3_cpuif_virt_update(cs); 1643 } 1644 1645 static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1646 uint64_t value) 1647 { 1648 /* End of Interrupt */ 1649 GICv3CPUState *cs = icc_cs_from_env(env); 1650 int irq = value & 0xffffff; 1651 int grp; 1652 bool is_eoir0 = ri->crm == 8; 1653 1654 if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) { 1655 icv_eoir_write(env, ri, value); 1656 return; 1657 } 1658 1659 trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1, 1660 gicv3_redist_affid(cs), value); 1661 1662 if ((irq >= cs->gic->num_irq) && 1663 !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) { 1664 /* This handles two cases: 1665 * 1. If software writes the ID of a spurious interrupt [ie 1020-1023] 1666 * to the GICC_EOIR, the GIC ignores that write. 1667 * 2. If software writes the number of a non-existent interrupt 1668 * this must be a subcase of "value written does not match the last 1669 * valid interrupt value read from the Interrupt Acknowledge 1670 * register" and so this is UNPREDICTABLE. We choose to ignore it. 1671 */ 1672 return; 1673 } 1674 1675 grp = icc_highest_active_group(cs); 1676 switch (grp) { 1677 case GICV3_G0: 1678 if (!is_eoir0) { 1679 return; 1680 } 1681 if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) 1682 && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) { 1683 return; 1684 } 1685 break; 1686 case GICV3_G1: 1687 if (is_eoir0) { 1688 return; 1689 } 1690 if (!arm_is_secure(env)) { 1691 return; 1692 } 1693 break; 1694 case GICV3_G1NS: 1695 if (is_eoir0) { 1696 return; 1697 } 1698 if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { 1699 return; 1700 } 1701 break; 1702 default: 1703 qemu_log_mask(LOG_GUEST_ERROR, 1704 "%s: IRQ %d isn't active\n", __func__, irq); 1705 return; 1706 } 1707 1708 icc_drop_prio(cs, grp); 1709 1710 if (!icc_eoi_split(env, cs)) { 1711 /* Priority drop and deactivate not split: deactivate irq now */ 1712 icc_deactivate_irq(cs, irq); 1713 } 1714 } 1715 1716 static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri) 1717 { 1718 GICv3CPUState *cs = icc_cs_from_env(env); 1719 uint64_t value; 1720 1721 if (icv_access(env, HCR_FMO)) { 1722 return icv_hppir_read(env, ri); 1723 } 1724 1725 value = icc_hppir0_value(cs, env); 1726 trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value); 1727 return value; 1728 } 1729 1730 static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri) 1731 { 1732 GICv3CPUState *cs = icc_cs_from_env(env); 1733 uint64_t value; 1734 1735 if (icv_access(env, HCR_IMO)) { 1736 return icv_hppir_read(env, ri); 1737 } 1738 1739 value = icc_hppir1_value(cs, env); 1740 trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value); 1741 return value; 1742 } 1743 1744 static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1745 { 1746 GICv3CPUState *cs = icc_cs_from_env(env); 1747 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1; 1748 bool satinc = false; 1749 uint64_t bpr; 1750 1751 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1752 return icv_bpr_read(env, ri); 1753 } 1754 1755 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1756 grp = GICV3_G1NS; 1757 } 1758 1759 if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) && 1760 (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) { 1761 /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses 1762 * modify BPR0 1763 */ 1764 grp = GICV3_G0; 1765 } 1766 1767 if (grp == GICV3_G1NS && arm_current_el(env) < 3 && 1768 (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { 1769 /* reads return bpr0 + 1 sat to 7, writes ignored */ 1770 grp = GICV3_G0; 1771 satinc = true; 1772 } 1773 1774 bpr = cs->icc_bpr[grp]; 1775 if (satinc) { 1776 bpr++; 1777 bpr = MIN(bpr, 7); 1778 } 1779 1780 trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr); 1781 1782 return bpr; 1783 } 1784 1785 static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1786 uint64_t value) 1787 { 1788 GICv3CPUState *cs = icc_cs_from_env(env); 1789 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1; 1790 uint64_t minval; 1791 1792 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1793 icv_bpr_write(env, ri, value); 1794 return; 1795 } 1796 1797 trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1, 1798 gicv3_redist_affid(cs), value); 1799 1800 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1801 grp = GICV3_G1NS; 1802 } 1803 1804 if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) && 1805 (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) { 1806 /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses 1807 * modify BPR0 1808 */ 1809 grp = GICV3_G0; 1810 } 1811 1812 if (grp == GICV3_G1NS && arm_current_el(env) < 3 && 1813 (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { 1814 /* reads return bpr0 + 1 sat to 7, writes ignored */ 1815 return; 1816 } 1817 1818 minval = (grp == GICV3_G1NS) ? icc_min_bpr_ns(cs) : icc_min_bpr(cs); 1819 if (value < minval) { 1820 value = minval; 1821 } 1822 1823 cs->icc_bpr[grp] = value & 7; 1824 gicv3_cpuif_update(cs); 1825 } 1826 1827 static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 1828 { 1829 GICv3CPUState *cs = icc_cs_from_env(env); 1830 uint64_t value; 1831 1832 int regno = ri->opc2 & 3; 1833 int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0; 1834 1835 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1836 return icv_ap_read(env, ri); 1837 } 1838 1839 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1840 grp = GICV3_G1NS; 1841 } 1842 1843 value = cs->icc_apr[grp][regno]; 1844 1845 trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 1846 return value; 1847 } 1848 1849 static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 1850 uint64_t value) 1851 { 1852 GICv3CPUState *cs = icc_cs_from_env(env); 1853 1854 int regno = ri->opc2 & 3; 1855 int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0; 1856 1857 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1858 icv_ap_write(env, ri, value); 1859 return; 1860 } 1861 1862 trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 1863 1864 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1865 grp = GICV3_G1NS; 1866 } 1867 1868 /* It's not possible to claim that a Non-secure interrupt is active 1869 * at a priority outside the Non-secure range (128..255), since this 1870 * would otherwise allow malicious NS code to block delivery of S interrupts 1871 * by writing a bad value to these registers. 1872 */ 1873 if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) { 1874 return; 1875 } 1876 1877 if (cs->nmi_support) { 1878 cs->icc_apr[grp][regno] = value & (0xFFFFFFFFU | ICC_AP1R_EL1_NMI); 1879 } else { 1880 cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU; 1881 } 1882 gicv3_cpuif_update(cs); 1883 } 1884 1885 static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1886 uint64_t value) 1887 { 1888 /* Deactivate interrupt */ 1889 GICv3CPUState *cs = icc_cs_from_env(env); 1890 int irq = value & 0xffffff; 1891 bool irq_is_secure, single_sec_state, irq_is_grp0; 1892 bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2; 1893 1894 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1895 icv_dir_write(env, ri, value); 1896 return; 1897 } 1898 1899 trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value); 1900 1901 if (irq >= cs->gic->num_irq) { 1902 /* Also catches special interrupt numbers and LPIs */ 1903 return; 1904 } 1905 1906 if (!icc_eoi_split(env, cs)) { 1907 return; 1908 } 1909 1910 int grp = gicv3_irq_group(cs->gic, cs, irq); 1911 1912 single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS; 1913 irq_is_secure = !single_sec_state && (grp != GICV3_G1NS); 1914 irq_is_grp0 = grp == GICV3_G0; 1915 1916 /* Check whether we're allowed to deactivate this interrupt based 1917 * on its group and the current CPU state. 1918 * These checks are laid out to correspond to the spec's pseudocode. 1919 */ 1920 route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ; 1921 route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ; 1922 /* No need to include !IsSecure in route_*_to_el2 as it's only 1923 * tested in cases where we know !IsSecure is true. 1924 */ 1925 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 1926 route_fiq_to_el2 = hcr_el2 & HCR_FMO; 1927 route_irq_to_el2 = hcr_el2 & HCR_IMO; 1928 1929 switch (arm_current_el(env)) { 1930 case 3: 1931 break; 1932 case 2: 1933 if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) { 1934 break; 1935 } 1936 if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) { 1937 break; 1938 } 1939 return; 1940 case 1: 1941 if (!arm_is_secure_below_el3(env)) { 1942 if (single_sec_state && irq_is_grp0 && 1943 !route_fiq_to_el3 && !route_fiq_to_el2) { 1944 break; 1945 } 1946 if (!irq_is_secure && !irq_is_grp0 && 1947 !route_irq_to_el3 && !route_irq_to_el2) { 1948 break; 1949 } 1950 } else { 1951 if (irq_is_grp0 && !route_fiq_to_el3) { 1952 break; 1953 } 1954 if (!irq_is_grp0 && 1955 (!irq_is_secure || !single_sec_state) && 1956 !route_irq_to_el3) { 1957 break; 1958 } 1959 } 1960 return; 1961 default: 1962 g_assert_not_reached(); 1963 } 1964 1965 icc_deactivate_irq(cs, irq); 1966 } 1967 1968 static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1969 { 1970 GICv3CPUState *cs = icc_cs_from_env(env); 1971 uint64_t prio; 1972 1973 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1974 return icv_rpr_read(env, ri); 1975 } 1976 1977 prio = icc_highest_active_prio(cs); 1978 1979 if (arm_feature(env, ARM_FEATURE_EL3) && 1980 !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) { 1981 /* NS GIC access and Group 0 is inaccessible to NS */ 1982 if ((prio & 0x80) == 0) { 1983 /* NS mustn't see priorities in the Secure half of the range */ 1984 prio = 0; 1985 } else if (prio != 0xff) { 1986 /* Non-idle priority: show the Non-secure view of it */ 1987 prio = (prio << 1) & 0xff; 1988 } 1989 } 1990 1991 if (cs->nmi_support) { 1992 /* NMI info is reported in the high bits of RPR */ 1993 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) { 1994 if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) { 1995 prio |= ICC_RPR_EL1_NMI; 1996 } 1997 } else { 1998 if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) { 1999 prio |= ICC_RPR_EL1_NSNMI; 2000 } 2001 if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) { 2002 prio |= ICC_RPR_EL1_NMI; 2003 } 2004 } 2005 } 2006 2007 trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio); 2008 return prio; 2009 } 2010 2011 static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs, 2012 uint64_t value, int grp, bool ns) 2013 { 2014 GICv3State *s = cs->gic; 2015 2016 /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */ 2017 uint64_t aff = extract64(value, 48, 8) << 16 | 2018 extract64(value, 32, 8) << 8 | 2019 extract64(value, 16, 8); 2020 uint32_t targetlist = extract64(value, 0, 16); 2021 uint32_t irq = extract64(value, 24, 4); 2022 bool irm = extract64(value, 40, 1); 2023 int i; 2024 2025 if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) { 2026 /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1 2027 * interrupts as Group 0 interrupts and must send Secure Group 0 2028 * interrupts to the target CPUs. 2029 */ 2030 grp = GICV3_G0; 2031 } 2032 2033 trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm, 2034 aff, targetlist); 2035 2036 for (i = 0; i < s->num_cpu; i++) { 2037 GICv3CPUState *ocs = &s->cpu[i]; 2038 2039 if (irm) { 2040 /* IRM == 1 : route to all CPUs except self */ 2041 if (cs == ocs) { 2042 continue; 2043 } 2044 } else { 2045 /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15] 2046 * where the corresponding bit is set in targetlist 2047 */ 2048 int aff0; 2049 2050 if (ocs->gicr_typer >> 40 != aff) { 2051 continue; 2052 } 2053 aff0 = extract64(ocs->gicr_typer, 32, 8); 2054 if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) { 2055 continue; 2056 } 2057 } 2058 2059 /* The redistributor will check against its own GICR_NSACR as needed */ 2060 gicv3_redist_send_sgi(ocs, grp, irq, ns); 2061 } 2062 } 2063 2064 static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri, 2065 uint64_t value) 2066 { 2067 /* Generate Secure Group 0 SGI. */ 2068 GICv3CPUState *cs = icc_cs_from_env(env); 2069 bool ns = !arm_is_secure(env); 2070 2071 icc_generate_sgi(env, cs, value, GICV3_G0, ns); 2072 } 2073 2074 static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri, 2075 uint64_t value) 2076 { 2077 /* Generate Group 1 SGI for the current Security state */ 2078 GICv3CPUState *cs = icc_cs_from_env(env); 2079 int grp; 2080 bool ns = !arm_is_secure(env); 2081 2082 grp = ns ? GICV3_G1NS : GICV3_G1; 2083 icc_generate_sgi(env, cs, value, grp, ns); 2084 } 2085 2086 static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri, 2087 uint64_t value) 2088 { 2089 /* Generate Group 1 SGI for the Security state that is not 2090 * the current state 2091 */ 2092 GICv3CPUState *cs = icc_cs_from_env(env); 2093 int grp; 2094 bool ns = !arm_is_secure(env); 2095 2096 grp = ns ? GICV3_G1 : GICV3_G1NS; 2097 icc_generate_sgi(env, cs, value, grp, ns); 2098 } 2099 2100 static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri) 2101 { 2102 GICv3CPUState *cs = icc_cs_from_env(env); 2103 int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0; 2104 uint64_t value; 2105 2106 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 2107 return icv_igrpen_read(env, ri); 2108 } 2109 2110 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 2111 grp = GICV3_G1NS; 2112 } 2113 2114 value = cs->icc_igrpen[grp]; 2115 trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0, 2116 gicv3_redist_affid(cs), value); 2117 return value; 2118 } 2119 2120 static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri, 2121 uint64_t value) 2122 { 2123 GICv3CPUState *cs = icc_cs_from_env(env); 2124 int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0; 2125 2126 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 2127 icv_igrpen_write(env, ri, value); 2128 return; 2129 } 2130 2131 trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0, 2132 gicv3_redist_affid(cs), value); 2133 2134 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 2135 grp = GICV3_G1NS; 2136 } 2137 2138 cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE; 2139 gicv3_cpuif_update(cs); 2140 } 2141 2142 static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri) 2143 { 2144 GICv3CPUState *cs = icc_cs_from_env(env); 2145 uint64_t value; 2146 2147 /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */ 2148 value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1); 2149 trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value); 2150 return value; 2151 } 2152 2153 static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, 2154 uint64_t value) 2155 { 2156 GICv3CPUState *cs = icc_cs_from_env(env); 2157 2158 trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value); 2159 2160 /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */ 2161 cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1); 2162 cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1); 2163 gicv3_cpuif_update(cs); 2164 } 2165 2166 static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri) 2167 { 2168 GICv3CPUState *cs = icc_cs_from_env(env); 2169 int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S; 2170 uint64_t value; 2171 2172 if (icv_access(env, HCR_FMO | HCR_IMO)) { 2173 return icv_ctlr_read(env, ri); 2174 } 2175 2176 value = cs->icc_ctlr_el1[bank]; 2177 trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value); 2178 return value; 2179 } 2180 2181 static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 2182 uint64_t value) 2183 { 2184 GICv3CPUState *cs = icc_cs_from_env(env); 2185 int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S; 2186 uint64_t mask; 2187 2188 if (icv_access(env, HCR_FMO | HCR_IMO)) { 2189 icv_ctlr_write(env, ri, value); 2190 return; 2191 } 2192 2193 trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value); 2194 2195 /* Only CBPR and EOIMODE can be RW; 2196 * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or 2197 * the asseciated priority-based routing of them); 2198 * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO. 2199 */ 2200 if (arm_feature(env, ARM_FEATURE_EL3) && 2201 ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) { 2202 mask = ICC_CTLR_EL1_EOIMODE; 2203 } else { 2204 mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE; 2205 } 2206 2207 cs->icc_ctlr_el1[bank] &= ~mask; 2208 cs->icc_ctlr_el1[bank] |= (value & mask); 2209 gicv3_cpuif_update(cs); 2210 } 2211 2212 2213 static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri) 2214 { 2215 GICv3CPUState *cs = icc_cs_from_env(env); 2216 uint64_t value; 2217 2218 value = cs->icc_ctlr_el3; 2219 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) { 2220 value |= ICC_CTLR_EL3_EOIMODE_EL1NS; 2221 } 2222 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) { 2223 value |= ICC_CTLR_EL3_CBPR_EL1NS; 2224 } 2225 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) { 2226 value |= ICC_CTLR_EL3_EOIMODE_EL1S; 2227 } 2228 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) { 2229 value |= ICC_CTLR_EL3_CBPR_EL1S; 2230 } 2231 2232 trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value); 2233 return value; 2234 } 2235 2236 static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, 2237 uint64_t value) 2238 { 2239 GICv3CPUState *cs = icc_cs_from_env(env); 2240 uint64_t mask; 2241 2242 trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value); 2243 2244 /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */ 2245 cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE); 2246 if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) { 2247 cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE; 2248 } 2249 if (value & ICC_CTLR_EL3_CBPR_EL1NS) { 2250 cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR; 2251 } 2252 2253 cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE); 2254 if (value & ICC_CTLR_EL3_EOIMODE_EL1S) { 2255 cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE; 2256 } 2257 if (value & ICC_CTLR_EL3_CBPR_EL1S) { 2258 cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR; 2259 } 2260 2261 /* The only bit stored in icc_ctlr_el3 which is writable is EOIMODE_EL3: */ 2262 mask = ICC_CTLR_EL3_EOIMODE_EL3; 2263 2264 cs->icc_ctlr_el3 &= ~mask; 2265 cs->icc_ctlr_el3 |= (value & mask); 2266 gicv3_cpuif_update(cs); 2267 } 2268 2269 static CPAccessResult gicv3_irqfiq_access(CPUARMState *env, 2270 const ARMCPRegInfo *ri, bool isread) 2271 { 2272 CPAccessResult r = CP_ACCESS_OK; 2273 GICv3CPUState *cs = icc_cs_from_env(env); 2274 int el = arm_current_el(env); 2275 2276 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) && 2277 el == 1 && !arm_is_secure_below_el3(env)) { 2278 /* Takes priority over a possible EL3 trap */ 2279 return CP_ACCESS_TRAP_EL2; 2280 } 2281 2282 if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) { 2283 switch (el) { 2284 case 1: 2285 /* Note that arm_hcr_el2_eff takes secure state into account. */ 2286 if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) { 2287 r = CP_ACCESS_TRAP_EL3; 2288 } 2289 break; 2290 case 2: 2291 r = CP_ACCESS_TRAP_EL3; 2292 break; 2293 case 3: 2294 if (!is_a64(env) && !arm_is_el3_or_mon(env)) { 2295 r = CP_ACCESS_TRAP_EL3; 2296 } 2297 break; 2298 default: 2299 g_assert_not_reached(); 2300 } 2301 } 2302 2303 if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { 2304 r = CP_ACCESS_TRAP; 2305 } 2306 return r; 2307 } 2308 2309 static CPAccessResult gicv3_dir_access(CPUARMState *env, 2310 const ARMCPRegInfo *ri, bool isread) 2311 { 2312 GICv3CPUState *cs = icc_cs_from_env(env); 2313 2314 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) && 2315 arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) { 2316 /* Takes priority over a possible EL3 trap */ 2317 return CP_ACCESS_TRAP_EL2; 2318 } 2319 2320 return gicv3_irqfiq_access(env, ri, isread); 2321 } 2322 2323 static CPAccessResult gicv3_sgi_access(CPUARMState *env, 2324 const ARMCPRegInfo *ri, bool isread) 2325 { 2326 if (arm_current_el(env) == 1 && 2327 (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) { 2328 /* Takes priority over a possible EL3 trap */ 2329 return CP_ACCESS_TRAP_EL2; 2330 } 2331 2332 return gicv3_irqfiq_access(env, ri, isread); 2333 } 2334 2335 static CPAccessResult gicv3_fiq_access(CPUARMState *env, 2336 const ARMCPRegInfo *ri, bool isread) 2337 { 2338 CPAccessResult r = CP_ACCESS_OK; 2339 GICv3CPUState *cs = icc_cs_from_env(env); 2340 int el = arm_current_el(env); 2341 2342 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) && 2343 el == 1 && !arm_is_secure_below_el3(env)) { 2344 /* Takes priority over a possible EL3 trap */ 2345 return CP_ACCESS_TRAP_EL2; 2346 } 2347 2348 if (env->cp15.scr_el3 & SCR_FIQ) { 2349 switch (el) { 2350 case 1: 2351 if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) { 2352 r = CP_ACCESS_TRAP_EL3; 2353 } 2354 break; 2355 case 2: 2356 r = CP_ACCESS_TRAP_EL3; 2357 break; 2358 case 3: 2359 if (!is_a64(env) && !arm_is_el3_or_mon(env)) { 2360 r = CP_ACCESS_TRAP_EL3; 2361 } 2362 break; 2363 default: 2364 g_assert_not_reached(); 2365 } 2366 } 2367 2368 if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { 2369 r = CP_ACCESS_TRAP; 2370 } 2371 return r; 2372 } 2373 2374 static CPAccessResult gicv3_irq_access(CPUARMState *env, 2375 const ARMCPRegInfo *ri, bool isread) 2376 { 2377 CPAccessResult r = CP_ACCESS_OK; 2378 GICv3CPUState *cs = icc_cs_from_env(env); 2379 int el = arm_current_el(env); 2380 2381 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) && 2382 el == 1 && !arm_is_secure_below_el3(env)) { 2383 /* Takes priority over a possible EL3 trap */ 2384 return CP_ACCESS_TRAP_EL2; 2385 } 2386 2387 if (env->cp15.scr_el3 & SCR_IRQ) { 2388 switch (el) { 2389 case 1: 2390 if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) { 2391 r = CP_ACCESS_TRAP_EL3; 2392 } 2393 break; 2394 case 2: 2395 r = CP_ACCESS_TRAP_EL3; 2396 break; 2397 case 3: 2398 if (!is_a64(env) && !arm_is_el3_or_mon(env)) { 2399 r = CP_ACCESS_TRAP_EL3; 2400 } 2401 break; 2402 default: 2403 g_assert_not_reached(); 2404 } 2405 } 2406 2407 if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { 2408 r = CP_ACCESS_TRAP; 2409 } 2410 return r; 2411 } 2412 2413 static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2414 { 2415 GICv3CPUState *cs = icc_cs_from_env(env); 2416 2417 cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V | 2418 (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | 2419 ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT); 2420 cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V | 2421 (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | 2422 ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT); 2423 cs->icc_pmr_el1 = 0; 2424 cs->icc_bpr[GICV3_G0] = icc_min_bpr(cs); 2425 cs->icc_bpr[GICV3_G1] = icc_min_bpr(cs); 2426 cs->icc_bpr[GICV3_G1NS] = icc_min_bpr_ns(cs); 2427 memset(cs->icc_apr, 0, sizeof(cs->icc_apr)); 2428 memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen)); 2429 cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V | 2430 (1 << ICC_CTLR_EL3_IDBITS_SHIFT) | 2431 ((cs->pribits - 1) << ICC_CTLR_EL3_PRIBITS_SHIFT); 2432 2433 memset(cs->ich_apr, 0, sizeof(cs->ich_apr)); 2434 cs->ich_hcr_el2 = 0; 2435 memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2)); 2436 cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN | 2437 ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) | 2438 (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT); 2439 } 2440 2441 static const ARMCPRegInfo gicv3_cpuif_reginfo[] = { 2442 { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH, 2443 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0, 2444 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2445 .access = PL1_RW, .accessfn = gicv3_irqfiq_access, 2446 .readfn = icc_pmr_read, 2447 .writefn = icc_pmr_write, 2448 /* We hang the whole cpu interface reset routine off here 2449 * rather than parcelling it out into one little function 2450 * per register 2451 */ 2452 .resetfn = icc_reset, 2453 }, 2454 { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH, 2455 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0, 2456 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2457 .access = PL1_R, .accessfn = gicv3_fiq_access, 2458 .readfn = icc_iar0_read, 2459 }, 2460 { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH, 2461 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1, 2462 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2463 .access = PL1_W, .accessfn = gicv3_fiq_access, 2464 .writefn = icc_eoir_write, 2465 }, 2466 { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH, 2467 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2, 2468 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2469 .access = PL1_R, .accessfn = gicv3_fiq_access, 2470 .readfn = icc_hppir0_read, 2471 }, 2472 { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH, 2473 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3, 2474 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2475 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2476 .readfn = icc_bpr_read, 2477 .writefn = icc_bpr_write, 2478 }, 2479 { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH, 2480 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4, 2481 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2482 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2483 .readfn = icc_ap_read, 2484 .writefn = icc_ap_write, 2485 }, 2486 /* All the ICC_AP1R*_EL1 registers are banked */ 2487 { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH, 2488 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0, 2489 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2490 .access = PL1_RW, .accessfn = gicv3_irq_access, 2491 .readfn = icc_ap_read, 2492 .writefn = icc_ap_write, 2493 }, 2494 { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH, 2495 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1, 2496 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2497 .access = PL1_W, .accessfn = gicv3_dir_access, 2498 .writefn = icc_dir_write, 2499 }, 2500 { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH, 2501 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3, 2502 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2503 .access = PL1_R, .accessfn = gicv3_irqfiq_access, 2504 .readfn = icc_rpr_read, 2505 }, 2506 { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64, 2507 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5, 2508 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2509 .access = PL1_W, .accessfn = gicv3_sgi_access, 2510 .writefn = icc_sgi1r_write, 2511 }, 2512 { .name = "ICC_SGI1R", 2513 .cp = 15, .opc1 = 0, .crm = 12, 2514 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, 2515 .access = PL1_W, .accessfn = gicv3_sgi_access, 2516 .writefn = icc_sgi1r_write, 2517 }, 2518 { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64, 2519 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6, 2520 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2521 .access = PL1_W, .accessfn = gicv3_sgi_access, 2522 .writefn = icc_asgi1r_write, 2523 }, 2524 { .name = "ICC_ASGI1R", 2525 .cp = 15, .opc1 = 1, .crm = 12, 2526 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, 2527 .access = PL1_W, .accessfn = gicv3_sgi_access, 2528 .writefn = icc_asgi1r_write, 2529 }, 2530 { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64, 2531 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7, 2532 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2533 .access = PL1_W, .accessfn = gicv3_sgi_access, 2534 .writefn = icc_sgi0r_write, 2535 }, 2536 { .name = "ICC_SGI0R", 2537 .cp = 15, .opc1 = 2, .crm = 12, 2538 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, 2539 .access = PL1_W, .accessfn = gicv3_sgi_access, 2540 .writefn = icc_sgi0r_write, 2541 }, 2542 { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH, 2543 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0, 2544 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2545 .access = PL1_R, .accessfn = gicv3_irq_access, 2546 .readfn = icc_iar1_read, 2547 }, 2548 { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH, 2549 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1, 2550 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2551 .access = PL1_W, .accessfn = gicv3_irq_access, 2552 .writefn = icc_eoir_write, 2553 }, 2554 { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH, 2555 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2, 2556 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2557 .access = PL1_R, .accessfn = gicv3_irq_access, 2558 .readfn = icc_hppir1_read, 2559 }, 2560 /* This register is banked */ 2561 { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH, 2562 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3, 2563 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2564 .access = PL1_RW, .accessfn = gicv3_irq_access, 2565 .readfn = icc_bpr_read, 2566 .writefn = icc_bpr_write, 2567 }, 2568 /* This register is banked */ 2569 { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH, 2570 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4, 2571 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2572 .access = PL1_RW, .accessfn = gicv3_irqfiq_access, 2573 .readfn = icc_ctlr_el1_read, 2574 .writefn = icc_ctlr_el1_write, 2575 }, 2576 { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH, 2577 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5, 2578 .type = ARM_CP_NO_RAW | ARM_CP_CONST, 2579 .access = PL1_RW, 2580 /* We don't support IRQ/FIQ bypass and system registers are 2581 * always enabled, so all our bits are RAZ/WI or RAO/WI. 2582 * This register is banked but since it's constant we don't 2583 * need to do anything special. 2584 */ 2585 .resetvalue = 0x7, 2586 }, 2587 { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH, 2588 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6, 2589 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2590 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2591 .fgt = FGT_ICC_IGRPENN_EL1, 2592 .readfn = icc_igrpen_read, 2593 .writefn = icc_igrpen_write, 2594 }, 2595 /* This register is banked */ 2596 { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH, 2597 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7, 2598 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2599 .access = PL1_RW, .accessfn = gicv3_irq_access, 2600 .fgt = FGT_ICC_IGRPENN_EL1, 2601 .readfn = icc_igrpen_read, 2602 .writefn = icc_igrpen_write, 2603 }, 2604 { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH, 2605 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5, 2606 .type = ARM_CP_NO_RAW | ARM_CP_CONST, 2607 .access = PL2_RW, 2608 /* We don't support IRQ/FIQ bypass and system registers are 2609 * always enabled, so all our bits are RAZ/WI or RAO/WI. 2610 */ 2611 .resetvalue = 0xf, 2612 }, 2613 { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH, 2614 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4, 2615 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2616 .access = PL3_RW, 2617 .readfn = icc_ctlr_el3_read, 2618 .writefn = icc_ctlr_el3_write, 2619 }, 2620 { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH, 2621 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5, 2622 .type = ARM_CP_NO_RAW | ARM_CP_CONST, 2623 .access = PL3_RW, 2624 /* We don't support IRQ/FIQ bypass and system registers are 2625 * always enabled, so all our bits are RAZ/WI or RAO/WI. 2626 */ 2627 .resetvalue = 0xf, 2628 }, 2629 { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH, 2630 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7, 2631 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2632 .access = PL3_RW, 2633 .readfn = icc_igrpen1_el3_read, 2634 .writefn = icc_igrpen1_el3_write, 2635 }, 2636 }; 2637 2638 static const ARMCPRegInfo gicv3_cpuif_icc_apxr1_reginfo[] = { 2639 { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH, 2640 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5, 2641 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2642 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2643 .readfn = icc_ap_read, 2644 .writefn = icc_ap_write, 2645 }, 2646 { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH, 2647 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1, 2648 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2649 .access = PL1_RW, .accessfn = gicv3_irq_access, 2650 .readfn = icc_ap_read, 2651 .writefn = icc_ap_write, 2652 }, 2653 }; 2654 2655 static const ARMCPRegInfo gicv3_cpuif_icc_apxr23_reginfo[] = { 2656 { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH, 2657 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6, 2658 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2659 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2660 .readfn = icc_ap_read, 2661 .writefn = icc_ap_write, 2662 }, 2663 { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH, 2664 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7, 2665 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2666 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2667 .readfn = icc_ap_read, 2668 .writefn = icc_ap_write, 2669 }, 2670 { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH, 2671 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2, 2672 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2673 .access = PL1_RW, .accessfn = gicv3_irq_access, 2674 .readfn = icc_ap_read, 2675 .writefn = icc_ap_write, 2676 }, 2677 { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH, 2678 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3, 2679 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2680 .access = PL1_RW, .accessfn = gicv3_irq_access, 2681 .readfn = icc_ap_read, 2682 .writefn = icc_ap_write, 2683 }, 2684 }; 2685 2686 static const ARMCPRegInfo gicv3_cpuif_gicv3_nmi_reginfo[] = { 2687 { .name = "ICC_NMIAR1_EL1", .state = ARM_CP_STATE_BOTH, 2688 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 5, 2689 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2690 .access = PL1_R, .accessfn = gicv3_irq_access, 2691 .readfn = icc_nmiar1_read, 2692 }, 2693 }; 2694 2695 static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2696 { 2697 GICv3CPUState *cs = icc_cs_from_env(env); 2698 int regno = ri->opc2 & 3; 2699 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; 2700 uint64_t value; 2701 2702 value = cs->ich_apr[grp][regno]; 2703 trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 2704 return value; 2705 } 2706 2707 static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2708 uint64_t value) 2709 { 2710 GICv3CPUState *cs = icc_cs_from_env(env); 2711 int regno = ri->opc2 & 3; 2712 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; 2713 2714 trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 2715 2716 if (cs->nmi_support) { 2717 cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI); 2718 } else { 2719 cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; 2720 } 2721 gicv3_cpuif_virt_irq_fiq_update(cs); 2722 } 2723 2724 static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2725 { 2726 GICv3CPUState *cs = icc_cs_from_env(env); 2727 uint64_t value = cs->ich_hcr_el2; 2728 2729 trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value); 2730 return value; 2731 } 2732 2733 static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2734 uint64_t value) 2735 { 2736 GICv3CPUState *cs = icc_cs_from_env(env); 2737 2738 trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value); 2739 2740 value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE | 2741 ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE | 2742 ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC | 2743 ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI | 2744 ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK; 2745 2746 cs->ich_hcr_el2 = value; 2747 gicv3_cpuif_virt_update(cs); 2748 } 2749 2750 static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2751 { 2752 GICv3CPUState *cs = icc_cs_from_env(env); 2753 uint64_t value = cs->ich_vmcr_el2; 2754 2755 trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value); 2756 return value; 2757 } 2758 2759 static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2760 uint64_t value) 2761 { 2762 GICv3CPUState *cs = icc_cs_from_env(env); 2763 2764 trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value); 2765 2766 value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR | 2767 ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK | 2768 ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK; 2769 value |= ICH_VMCR_EL2_VFIQEN; 2770 2771 cs->ich_vmcr_el2 = value; 2772 /* Enforce "writing BPRs to less than minimum sets them to the minimum" 2773 * by reading and writing back the fields. 2774 */ 2775 write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0)); 2776 write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1)); 2777 2778 gicv3_cpuif_virt_update(cs); 2779 } 2780 2781 static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2782 { 2783 GICv3CPUState *cs = icc_cs_from_env(env); 2784 int regno = ri->opc2 | ((ri->crm & 1) << 3); 2785 uint64_t value; 2786 2787 /* This read function handles all of: 2788 * 64-bit reads of the whole LR 2789 * 32-bit reads of the low half of the LR 2790 * 32-bit reads of the high half of the LR 2791 */ 2792 if (ri->state == ARM_CP_STATE_AA32) { 2793 if (ri->crm >= 14) { 2794 value = extract64(cs->ich_lr_el2[regno], 32, 32); 2795 trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value); 2796 } else { 2797 value = extract64(cs->ich_lr_el2[regno], 0, 32); 2798 trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value); 2799 } 2800 } else { 2801 value = cs->ich_lr_el2[regno]; 2802 trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value); 2803 } 2804 2805 return value; 2806 } 2807 2808 static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2809 uint64_t value) 2810 { 2811 GICv3CPUState *cs = icc_cs_from_env(env); 2812 int regno = ri->opc2 | ((ri->crm & 1) << 3); 2813 2814 /* This write function handles all of: 2815 * 64-bit writes to the whole LR 2816 * 32-bit writes to the low half of the LR 2817 * 32-bit writes to the high half of the LR 2818 */ 2819 if (ri->state == ARM_CP_STATE_AA32) { 2820 if (ri->crm >= 14) { 2821 trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value); 2822 value = deposit64(cs->ich_lr_el2[regno], 32, 32, value); 2823 } else { 2824 trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value); 2825 value = deposit64(cs->ich_lr_el2[regno], 0, 32, value); 2826 } 2827 } else { 2828 trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value); 2829 } 2830 2831 /* Enforce RES0 bits in priority field */ 2832 if (cs->vpribits < 8) { 2833 value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT, 2834 8 - cs->vpribits, 0); 2835 } 2836 2837 /* Enforce RES0 bit in NMI field when FEAT_GICv3_NMI is not implemented */ 2838 if (!cs->nmi_support) { 2839 value &= ~ICH_LR_EL2_NMI; 2840 } 2841 2842 cs->ich_lr_el2[regno] = value; 2843 gicv3_cpuif_virt_update(cs); 2844 } 2845 2846 static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2847 { 2848 GICv3CPUState *cs = icc_cs_from_env(env); 2849 uint64_t value; 2850 2851 value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT) 2852 | ICH_VTR_EL2_TDS | ICH_VTR_EL2_A3V 2853 | (1 << ICH_VTR_EL2_IDBITS_SHIFT) 2854 | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT) 2855 | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT); 2856 2857 if (cs->gic->revision < 4) { 2858 value |= ICH_VTR_EL2_NV4; 2859 } 2860 2861 trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value); 2862 return value; 2863 } 2864 2865 static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2866 { 2867 GICv3CPUState *cs = icc_cs_from_env(env); 2868 uint64_t value = maintenance_interrupt_state(cs); 2869 2870 trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value); 2871 return value; 2872 } 2873 2874 static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2875 { 2876 GICv3CPUState *cs = icc_cs_from_env(env); 2877 uint64_t value = eoi_maintenance_interrupt_state(cs, NULL); 2878 2879 trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value); 2880 return value; 2881 } 2882 2883 static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2884 { 2885 GICv3CPUState *cs = icc_cs_from_env(env); 2886 uint64_t value = 0; 2887 int i; 2888 2889 for (i = 0; i < cs->num_list_regs; i++) { 2890 uint64_t lr = cs->ich_lr_el2[i]; 2891 2892 if ((lr & ICH_LR_EL2_STATE_MASK) == 0 && 2893 ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) { 2894 value |= (1 << i); 2895 } 2896 } 2897 2898 trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value); 2899 return value; 2900 } 2901 2902 static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { 2903 { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH, 2904 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0, 2905 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2906 .nv2_redirect_offset = 0x480, 2907 .access = PL2_RW, 2908 .readfn = ich_ap_read, 2909 .writefn = ich_ap_write, 2910 }, 2911 { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH, 2912 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0, 2913 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2914 .nv2_redirect_offset = 0x4a0, 2915 .access = PL2_RW, 2916 .readfn = ich_ap_read, 2917 .writefn = ich_ap_write, 2918 }, 2919 { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH, 2920 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0, 2921 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2922 .nv2_redirect_offset = 0x4c0, 2923 .access = PL2_RW, 2924 .readfn = ich_hcr_read, 2925 .writefn = ich_hcr_write, 2926 }, 2927 { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH, 2928 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1, 2929 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2930 .access = PL2_R, 2931 .readfn = ich_vtr_read, 2932 }, 2933 { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH, 2934 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2, 2935 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2936 .access = PL2_R, 2937 .readfn = ich_misr_read, 2938 }, 2939 { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH, 2940 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3, 2941 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2942 .access = PL2_R, 2943 .readfn = ich_eisr_read, 2944 }, 2945 { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH, 2946 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5, 2947 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2948 .access = PL2_R, 2949 .readfn = ich_elrsr_read, 2950 }, 2951 { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH, 2952 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7, 2953 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2954 .nv2_redirect_offset = 0x4c8, 2955 .access = PL2_RW, 2956 .readfn = ich_vmcr_read, 2957 .writefn = ich_vmcr_write, 2958 }, 2959 }; 2960 2961 static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = { 2962 { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH, 2963 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1, 2964 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2965 .nv2_redirect_offset = 0x488, 2966 .access = PL2_RW, 2967 .readfn = ich_ap_read, 2968 .writefn = ich_ap_write, 2969 }, 2970 { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH, 2971 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1, 2972 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2973 .nv2_redirect_offset = 0x4a8, 2974 .access = PL2_RW, 2975 .readfn = ich_ap_read, 2976 .writefn = ich_ap_write, 2977 }, 2978 }; 2979 2980 static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { 2981 { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH, 2982 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2, 2983 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2984 .nv2_redirect_offset = 0x490, 2985 .access = PL2_RW, 2986 .readfn = ich_ap_read, 2987 .writefn = ich_ap_write, 2988 }, 2989 { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH, 2990 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3, 2991 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2992 .nv2_redirect_offset = 0x498, 2993 .access = PL2_RW, 2994 .readfn = ich_ap_read, 2995 .writefn = ich_ap_write, 2996 }, 2997 { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH, 2998 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2, 2999 .type = ARM_CP_IO | ARM_CP_NO_RAW, 3000 .nv2_redirect_offset = 0x4b0, 3001 .access = PL2_RW, 3002 .readfn = ich_ap_read, 3003 .writefn = ich_ap_write, 3004 }, 3005 { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH, 3006 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3, 3007 .type = ARM_CP_IO | ARM_CP_NO_RAW, 3008 .nv2_redirect_offset = 0x4b8, 3009 .access = PL2_RW, 3010 .readfn = ich_ap_read, 3011 .writefn = ich_ap_write, 3012 }, 3013 }; 3014 3015 static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque) 3016 { 3017 GICv3CPUState *cs = opaque; 3018 3019 gicv3_cpuif_update(cs); 3020 /* 3021 * Because vLPIs are only pending in NonSecure state, 3022 * an EL change can change the VIRQ/VFIQ status (but 3023 * cannot affect the maintenance interrupt state) 3024 */ 3025 gicv3_cpuif_virt_irq_fiq_update(cs); 3026 } 3027 3028 void gicv3_init_cpuif(GICv3State *s) 3029 { 3030 /* Called from the GICv3 realize function; register our system 3031 * registers with the CPU 3032 */ 3033 int i; 3034 3035 for (i = 0; i < s->num_cpu; i++) { 3036 ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); 3037 GICv3CPUState *cs = &s->cpu[i]; 3038 3039 /* 3040 * If the CPU doesn't define a GICv3 configuration, probably because 3041 * in real hardware it doesn't have one, then we use default values 3042 * matching the one used by most Arm CPUs. This applies to: 3043 * cpu->gic_num_lrs 3044 * cpu->gic_vpribits 3045 * cpu->gic_vprebits 3046 * cpu->gic_pribits 3047 */ 3048 3049 /* Note that we can't just use the GICv3CPUState as an opaque pointer 3050 * in define_arm_cp_regs_with_opaque(), because when we're called back 3051 * it might be with code translated by CPU 0 but run by CPU 1, in 3052 * which case we'd get the wrong value. 3053 * So instead we define the regs with no ri->opaque info, and 3054 * get back to the GICv3CPUState from the CPUARMState. 3055 * 3056 * These CP regs callbacks can be called from either TCG or HVF code. 3057 */ 3058 define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); 3059 3060 /* 3061 * If the CPU implements FEAT_NMI and FEAT_GICv3 it must also 3062 * implement FEAT_GICv3_NMI, which is the CPU interface part 3063 * of NMI support. This is distinct from whether the GIC proper 3064 * (redistributors and distributor) have NMI support. In QEMU 3065 * that is a property of the GIC device in s->nmi_support; 3066 * cs->nmi_support indicates the CPU interface's support. 3067 */ 3068 if (cpu_isar_feature(aa64_nmi, cpu)) { 3069 cs->nmi_support = true; 3070 define_arm_cp_regs(cpu, gicv3_cpuif_gicv3_nmi_reginfo); 3071 } 3072 3073 /* 3074 * The CPU implementation specifies the number of supported 3075 * bits of physical priority. For backwards compatibility 3076 * of migration, we have a compat property that forces use 3077 * of 8 priority bits regardless of what the CPU really has. 3078 */ 3079 if (s->force_8bit_prio) { 3080 cs->pribits = 8; 3081 } else { 3082 cs->pribits = cpu->gic_pribits ?: 5; 3083 } 3084 3085 /* 3086 * The GICv3 has separate ID register fields for virtual priority 3087 * and preemption bit values, but only a single ID register field 3088 * for the physical priority bits. The preemption bit count is 3089 * always the same as the priority bit count, except that 8 bits 3090 * of priority means 7 preemption bits. We precalculate the 3091 * preemption bits because it simplifies the code and makes the 3092 * parallels between the virtual and physical bits of the GIC 3093 * a bit clearer. 3094 */ 3095 cs->prebits = cs->pribits; 3096 if (cs->prebits == 8) { 3097 cs->prebits--; 3098 } 3099 /* 3100 * Check that CPU code defining pribits didn't violate 3101 * architectural constraints our implementation relies on. 3102 */ 3103 g_assert(cs->pribits >= 4 && cs->pribits <= 8); 3104 3105 /* 3106 * gicv3_cpuif_reginfo[] defines ICC_AP*R0_EL1; add definitions 3107 * for ICC_AP*R{1,2,3}_EL1 if the prebits value requires them. 3108 */ 3109 if (cs->prebits >= 6) { 3110 define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr1_reginfo); 3111 } 3112 if (cs->prebits == 7) { 3113 define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr23_reginfo); 3114 } 3115 3116 if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) { 3117 int j; 3118 3119 cs->num_list_regs = cpu->gic_num_lrs ?: 4; 3120 cs->vpribits = cpu->gic_vpribits ?: 5; 3121 cs->vprebits = cpu->gic_vprebits ?: 5; 3122 3123 /* Check against architectural constraints: getting these 3124 * wrong would be a bug in the CPU code defining these, 3125 * and the implementation relies on them holding. 3126 */ 3127 g_assert(cs->vprebits <= cs->vpribits); 3128 g_assert(cs->vprebits >= 5 && cs->vprebits <= 7); 3129 g_assert(cs->vpribits >= 5 && cs->vpribits <= 8); 3130 3131 define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo); 3132 3133 for (j = 0; j < cs->num_list_regs; j++) { 3134 /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs 3135 * are split into two cp15 regs, LR (the low part, with the 3136 * same encoding as the AArch64 LR) and LRC (the high part). 3137 */ 3138 ARMCPRegInfo lr_regset[] = { 3139 { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH, 3140 .opc0 = 3, .opc1 = 4, .crn = 12, 3141 .crm = 12 + (j >> 3), .opc2 = j & 7, 3142 .type = ARM_CP_IO | ARM_CP_NO_RAW, 3143 .nv2_redirect_offset = 0x400 + 8 * j, 3144 .access = PL2_RW, 3145 .readfn = ich_lr_read, 3146 .writefn = ich_lr_write, 3147 }, 3148 { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32, 3149 .cp = 15, .opc1 = 4, .crn = 12, 3150 .crm = 14 + (j >> 3), .opc2 = j & 7, 3151 .type = ARM_CP_IO | ARM_CP_NO_RAW, 3152 .access = PL2_RW, 3153 .readfn = ich_lr_read, 3154 .writefn = ich_lr_write, 3155 }, 3156 }; 3157 define_arm_cp_regs(cpu, lr_regset); 3158 } 3159 if (cs->vprebits >= 6) { 3160 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo); 3161 } 3162 if (cs->vprebits == 7) { 3163 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo); 3164 } 3165 } 3166 if (tcg_enabled() || qtest_enabled()) { 3167 /* 3168 * We can only trap EL changes with TCG. However the GIC interrupt 3169 * state only changes on EL changes involving EL2 or EL3, so for 3170 * the non-TCG case this is OK, as EL2 and EL3 can't exist. 3171 */ 3172 arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs); 3173 } else { 3174 assert(!arm_feature(&cpu->env, ARM_FEATURE_EL2)); 3175 assert(!arm_feature(&cpu->env, ARM_FEATURE_EL3)); 3176 } 3177 } 3178 } 3179