1 /* 2 * ARM Generic Interrupt Controller v3 (emulation) 3 * 4 * Copyright (c) 2016 Linaro Limited 5 * Written by Peter Maydell 6 * 7 * This code is licensed under the GPL, version 2 or (at your option) 8 * any later version. 9 */ 10 11 /* This file contains the code for the system register interface 12 * portions of the GICv3. 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qemu/bitops.h" 17 #include "qemu/log.h" 18 #include "qemu/main-loop.h" 19 #include "trace.h" 20 #include "gicv3_internal.h" 21 #include "hw/irq.h" 22 #include "cpu.h" 23 #include "target/arm/cpregs.h" 24 25 /* 26 * Special case return value from hppvi_index(); must be larger than 27 * the architecturally maximum possible list register index (which is 15) 28 */ 29 #define HPPVI_INDEX_VLPI 16 30 31 static GICv3CPUState *icc_cs_from_env(CPUARMState *env) 32 { 33 return env->gicv3state; 34 } 35 36 static bool gicv3_use_ns_bank(CPUARMState *env) 37 { 38 /* Return true if we should use the NonSecure bank for a banked GIC 39 * CPU interface register. Note that this differs from the 40 * access_secure_reg() function because GICv3 banked registers are 41 * banked even for AArch64, unlike the other CPU system registers. 42 */ 43 return !arm_is_secure_below_el3(env); 44 } 45 46 /* The minimum BPR for the virtual interface is a configurable property */ 47 static inline int icv_min_vbpr(GICv3CPUState *cs) 48 { 49 return 7 - cs->vprebits; 50 } 51 52 /* Simple accessor functions for LR fields */ 53 static uint32_t ich_lr_vintid(uint64_t lr) 54 { 55 return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH); 56 } 57 58 static uint32_t ich_lr_pintid(uint64_t lr) 59 { 60 return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH); 61 } 62 63 static uint32_t ich_lr_prio(uint64_t lr) 64 { 65 return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH); 66 } 67 68 static int ich_lr_state(uint64_t lr) 69 { 70 return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH); 71 } 72 73 static bool icv_access(CPUARMState *env, int hcr_flags) 74 { 75 /* Return true if this ICC_ register access should really be 76 * directed to an ICV_ access. hcr_flags is a mask of 77 * HCR_EL2 bits to check: we treat this as an ICV_ access 78 * if we are in NS EL1 and at least one of the specified 79 * HCR_EL2 bits is set. 80 * 81 * ICV registers fall into four categories: 82 * * access if NS EL1 and HCR_EL2.FMO == 1: 83 * all ICV regs with '0' in their name 84 * * access if NS EL1 and HCR_EL2.IMO == 1: 85 * all ICV regs with '1' in their name 86 * * access if NS EL1 and either IMO or FMO == 1: 87 * CTLR, DIR, PMR, RPR 88 */ 89 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 90 bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO); 91 92 return flagmatch && arm_current_el(env) == 1 93 && !arm_is_secure_below_el3(env); 94 } 95 96 static int read_vbpr(GICv3CPUState *cs, int grp) 97 { 98 /* Read VBPR value out of the VMCR field (caller must handle 99 * VCBPR effects if required) 100 */ 101 if (grp == GICV3_G0) { 102 return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT, 103 ICH_VMCR_EL2_VBPR0_LENGTH); 104 } else { 105 return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT, 106 ICH_VMCR_EL2_VBPR1_LENGTH); 107 } 108 } 109 110 static void write_vbpr(GICv3CPUState *cs, int grp, int value) 111 { 112 /* Write new VBPR1 value, handling the "writing a value less than 113 * the minimum sets it to the minimum" semantics. 114 */ 115 int min = icv_min_vbpr(cs); 116 117 if (grp != GICV3_G0) { 118 min++; 119 } 120 121 value = MAX(value, min); 122 123 if (grp == GICV3_G0) { 124 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT, 125 ICH_VMCR_EL2_VBPR0_LENGTH, value); 126 } else { 127 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT, 128 ICH_VMCR_EL2_VBPR1_LENGTH, value); 129 } 130 } 131 132 static uint32_t icv_fullprio_mask(GICv3CPUState *cs) 133 { 134 /* Return a mask word which clears the unimplemented priority bits 135 * from a priority value for a virtual interrupt. (Not to be confused 136 * with the group priority, whose mask depends on the value of VBPR 137 * for the interrupt group.) 138 */ 139 return ~0U << (8 - cs->vpribits); 140 } 141 142 static int ich_highest_active_virt_prio(GICv3CPUState *cs) 143 { 144 /* Calculate the current running priority based on the set bits 145 * in the ICH Active Priority Registers. 146 */ 147 int i; 148 int aprmax = 1 << (cs->vprebits - 5); 149 150 assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0])); 151 152 for (i = 0; i < aprmax; i++) { 153 uint32_t apr = cs->ich_apr[GICV3_G0][i] | 154 cs->ich_apr[GICV3_G1NS][i]; 155 156 if (!apr) { 157 continue; 158 } 159 return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1); 160 } 161 /* No current active interrupts: return idle priority */ 162 return 0xff; 163 } 164 165 static int hppvi_index(GICv3CPUState *cs) 166 { 167 /* 168 * Return the list register index of the highest priority pending 169 * virtual interrupt, as per the HighestPriorityVirtualInterrupt 170 * pseudocode. If no pending virtual interrupts, return -1. 171 * If the highest priority pending virtual interrupt is a vLPI, 172 * return HPPVI_INDEX_VLPI. 173 * (The pseudocode handles checking whether the vLPI is higher 174 * priority than the highest priority list register at every 175 * callsite of HighestPriorityVirtualInterrupt; we check it here.) 176 */ 177 ARMCPU *cpu = ARM_CPU(cs->cpu); 178 CPUARMState *env = &cpu->env; 179 int idx = -1; 180 int i; 181 /* Note that a list register entry with a priority of 0xff will 182 * never be reported by this function; this is the architecturally 183 * correct behaviour. 184 */ 185 int prio = 0xff; 186 187 if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) { 188 /* Both groups disabled, definitely nothing to do */ 189 return idx; 190 } 191 192 for (i = 0; i < cs->num_list_regs; i++) { 193 uint64_t lr = cs->ich_lr_el2[i]; 194 int thisprio; 195 196 if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) { 197 /* Not Pending */ 198 continue; 199 } 200 201 /* Ignore interrupts if relevant group enable not set */ 202 if (lr & ICH_LR_EL2_GROUP) { 203 if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 204 continue; 205 } 206 } else { 207 if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) { 208 continue; 209 } 210 } 211 212 thisprio = ich_lr_prio(lr); 213 214 if (thisprio < prio) { 215 prio = thisprio; 216 idx = i; 217 } 218 } 219 220 /* 221 * "no pending vLPI" is indicated with prio = 0xff, which always 222 * fails the priority check here. vLPIs are only considered 223 * when we are in Non-Secure state. 224 */ 225 if (cs->hppvlpi.prio < prio && !arm_is_secure(env)) { 226 if (cs->hppvlpi.grp == GICV3_G0) { 227 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0) { 228 return HPPVI_INDEX_VLPI; 229 } 230 } else { 231 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1) { 232 return HPPVI_INDEX_VLPI; 233 } 234 } 235 } 236 237 return idx; 238 } 239 240 static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group) 241 { 242 /* Return a mask word which clears the subpriority bits from 243 * a priority value for a virtual interrupt in the specified group. 244 * This depends on the VBPR value. 245 * If using VBPR0 then: 246 * a BPR of 0 means the group priority bits are [7:1]; 247 * a BPR of 1 means they are [7:2], and so on down to 248 * a BPR of 7 meaning no group priority bits at all. 249 * If using VBPR1 then: 250 * a BPR of 0 is impossible (the minimum value is 1) 251 * a BPR of 1 means the group priority bits are [7:1]; 252 * a BPR of 2 means they are [7:2], and so on down to 253 * a BPR of 7 meaning the group priority is [7]. 254 * 255 * Which BPR to use depends on the group of the interrupt and 256 * the current ICH_VMCR_EL2.VCBPR settings. 257 * 258 * This corresponds to the VGroupBits() pseudocode. 259 */ 260 int bpr; 261 262 if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) { 263 group = GICV3_G0; 264 } 265 266 bpr = read_vbpr(cs, group); 267 if (group == GICV3_G1NS) { 268 assert(bpr > 0); 269 bpr--; 270 } 271 272 return ~0U << (bpr + 1); 273 } 274 275 static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr) 276 { 277 /* Return true if we can signal this virtual interrupt defined by 278 * the given list register value; see the pseudocode functions 279 * CanSignalVirtualInterrupt and CanSignalVirtualInt. 280 * Compare also icc_hppi_can_preempt() which is the non-virtual 281 * equivalent of these checks. 282 */ 283 int grp; 284 uint32_t mask, prio, rprio, vpmr; 285 286 if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) { 287 /* Virtual interface disabled */ 288 return false; 289 } 290 291 /* We don't need to check that this LR is in Pending state because 292 * that has already been done in hppvi_index(). 293 */ 294 295 prio = ich_lr_prio(lr); 296 vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, 297 ICH_VMCR_EL2_VPMR_LENGTH); 298 299 if (prio >= vpmr) { 300 /* Priority mask masks this interrupt */ 301 return false; 302 } 303 304 rprio = ich_highest_active_virt_prio(cs); 305 if (rprio == 0xff) { 306 /* No running interrupt so we can preempt */ 307 return true; 308 } 309 310 grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 311 312 mask = icv_gprio_mask(cs, grp); 313 314 /* We only preempt a running interrupt if the pending interrupt's 315 * group priority is sufficient (the subpriorities are not considered). 316 */ 317 if ((prio & mask) < (rprio & mask)) { 318 return true; 319 } 320 321 return false; 322 } 323 324 static bool icv_hppvlpi_can_preempt(GICv3CPUState *cs) 325 { 326 /* 327 * Return true if we can signal the highest priority pending vLPI. 328 * We can assume we're Non-secure because hppvi_index() already 329 * tested for that. 330 */ 331 uint32_t mask, rprio, vpmr; 332 333 if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) { 334 /* Virtual interface disabled */ 335 return false; 336 } 337 338 vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, 339 ICH_VMCR_EL2_VPMR_LENGTH); 340 341 if (cs->hppvlpi.prio >= vpmr) { 342 /* Priority mask masks this interrupt */ 343 return false; 344 } 345 346 rprio = ich_highest_active_virt_prio(cs); 347 if (rprio == 0xff) { 348 /* No running interrupt so we can preempt */ 349 return true; 350 } 351 352 mask = icv_gprio_mask(cs, cs->hppvlpi.grp); 353 354 /* 355 * We only preempt a running interrupt if the pending interrupt's 356 * group priority is sufficient (the subpriorities are not considered). 357 */ 358 if ((cs->hppvlpi.prio & mask) < (rprio & mask)) { 359 return true; 360 } 361 362 return false; 363 } 364 365 static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs, 366 uint32_t *misr) 367 { 368 /* Return a set of bits indicating the EOI maintenance interrupt status 369 * for each list register. The EOI maintenance interrupt status is 370 * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1 371 * (see the GICv3 spec for the ICH_EISR_EL2 register). 372 * If misr is not NULL then we should also collect the information 373 * about the MISR.EOI, MISR.NP and MISR.U bits. 374 */ 375 uint32_t value = 0; 376 int validcount = 0; 377 bool seenpending = false; 378 int i; 379 380 for (i = 0; i < cs->num_list_regs; i++) { 381 uint64_t lr = cs->ich_lr_el2[i]; 382 383 if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI)) 384 == ICH_LR_EL2_EOI) { 385 value |= (1 << i); 386 } 387 if ((lr & ICH_LR_EL2_STATE_MASK)) { 388 validcount++; 389 } 390 if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) { 391 seenpending = true; 392 } 393 } 394 395 if (misr) { 396 if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) { 397 *misr |= ICH_MISR_EL2_U; 398 } 399 if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) { 400 *misr |= ICH_MISR_EL2_NP; 401 } 402 if (value) { 403 *misr |= ICH_MISR_EL2_EOI; 404 } 405 } 406 return value; 407 } 408 409 static uint32_t maintenance_interrupt_state(GICv3CPUState *cs) 410 { 411 /* Return a set of bits indicating the maintenance interrupt status 412 * (as seen in the ICH_MISR_EL2 register). 413 */ 414 uint32_t value = 0; 415 416 /* Scan list registers and fill in the U, NP and EOI bits */ 417 eoi_maintenance_interrupt_state(cs, &value); 418 419 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_LRENPIE) && 420 (cs->ich_hcr_el2 & ICH_HCR_EL2_EOICOUNT_MASK)) { 421 value |= ICH_MISR_EL2_LRENP; 422 } 423 424 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) && 425 (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) { 426 value |= ICH_MISR_EL2_VGRP0E; 427 } 428 429 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) && 430 !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 431 value |= ICH_MISR_EL2_VGRP0D; 432 } 433 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) && 434 (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 435 value |= ICH_MISR_EL2_VGRP1E; 436 } 437 438 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) && 439 !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { 440 value |= ICH_MISR_EL2_VGRP1D; 441 } 442 443 return value; 444 } 445 446 void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs) 447 { 448 /* 449 * Tell the CPU about any pending virtual interrupts. 450 * This should only be called for changes that affect the 451 * vIRQ and vFIQ status and do not change the maintenance 452 * interrupt status. This means that unlike gicv3_cpuif_virt_update() 453 * this function won't recursively call back into the GIC code. 454 * The main use of this is when the redistributor has changed the 455 * highest priority pending virtual LPI. 456 */ 457 int idx; 458 int irqlevel = 0; 459 int fiqlevel = 0; 460 461 idx = hppvi_index(cs); 462 trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx, 463 cs->hppvlpi.irq, cs->hppvlpi.grp, 464 cs->hppvlpi.prio); 465 if (idx == HPPVI_INDEX_VLPI) { 466 if (icv_hppvlpi_can_preempt(cs)) { 467 if (cs->hppvlpi.grp == GICV3_G0) { 468 fiqlevel = 1; 469 } else { 470 irqlevel = 1; 471 } 472 } 473 } else if (idx >= 0) { 474 uint64_t lr = cs->ich_lr_el2[idx]; 475 476 if (icv_hppi_can_preempt(cs, lr)) { 477 /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */ 478 if (lr & ICH_LR_EL2_GROUP) { 479 irqlevel = 1; 480 } else { 481 fiqlevel = 1; 482 } 483 } 484 } 485 486 trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel); 487 qemu_set_irq(cs->parent_vfiq, fiqlevel); 488 qemu_set_irq(cs->parent_virq, irqlevel); 489 } 490 491 static void gicv3_cpuif_virt_update(GICv3CPUState *cs) 492 { 493 /* 494 * Tell the CPU about any pending virtual interrupts or 495 * maintenance interrupts, following a change to the state 496 * of the CPU interface relevant to virtual interrupts. 497 * 498 * CAUTION: this function will call qemu_set_irq() on the 499 * CPU maintenance IRQ line, which is typically wired up 500 * to the GIC as a per-CPU interrupt. This means that it 501 * will recursively call back into the GIC code via 502 * gicv3_redist_set_irq() and thus into the CPU interface code's 503 * gicv3_cpuif_update(). It is therefore important that this 504 * function is only called as the final action of a CPU interface 505 * register write implementation, after all the GIC state 506 * fields have been updated. gicv3_cpuif_update() also must 507 * not cause this function to be called, but that happens 508 * naturally as a result of there being no architectural 509 * linkage between the physical and virtual GIC logic. 510 */ 511 ARMCPU *cpu = ARM_CPU(cs->cpu); 512 int maintlevel = 0; 513 514 gicv3_cpuif_virt_irq_fiq_update(cs); 515 516 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) && 517 maintenance_interrupt_state(cs) != 0) { 518 maintlevel = 1; 519 } 520 521 trace_gicv3_cpuif_virt_set_maint_irq(gicv3_redist_affid(cs), maintlevel); 522 qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel); 523 } 524 525 static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 526 { 527 GICv3CPUState *cs = icc_cs_from_env(env); 528 int regno = ri->opc2 & 3; 529 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; 530 uint64_t value = cs->ich_apr[grp][regno]; 531 532 trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 533 return value; 534 } 535 536 static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 537 uint64_t value) 538 { 539 GICv3CPUState *cs = icc_cs_from_env(env); 540 int regno = ri->opc2 & 3; 541 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; 542 543 trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 544 545 cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; 546 547 gicv3_cpuif_virt_irq_fiq_update(cs); 548 return; 549 } 550 551 static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 552 { 553 GICv3CPUState *cs = icc_cs_from_env(env); 554 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS; 555 uint64_t bpr; 556 bool satinc = false; 557 558 if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) { 559 /* reads return bpr0 + 1 saturated to 7, writes ignored */ 560 grp = GICV3_G0; 561 satinc = true; 562 } 563 564 bpr = read_vbpr(cs, grp); 565 566 if (satinc) { 567 bpr++; 568 bpr = MIN(bpr, 7); 569 } 570 571 trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr); 572 573 return bpr; 574 } 575 576 static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri, 577 uint64_t value) 578 { 579 GICv3CPUState *cs = icc_cs_from_env(env); 580 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS; 581 582 trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1, 583 gicv3_redist_affid(cs), value); 584 585 if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) { 586 /* reads return bpr0 + 1 saturated to 7, writes ignored */ 587 return; 588 } 589 590 write_vbpr(cs, grp, value); 591 592 gicv3_cpuif_virt_irq_fiq_update(cs); 593 } 594 595 static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) 596 { 597 GICv3CPUState *cs = icc_cs_from_env(env); 598 uint64_t value; 599 600 value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, 601 ICH_VMCR_EL2_VPMR_LENGTH); 602 603 trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value); 604 return value; 605 } 606 607 static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, 608 uint64_t value) 609 { 610 GICv3CPUState *cs = icc_cs_from_env(env); 611 612 trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value); 613 614 value &= icv_fullprio_mask(cs); 615 616 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, 617 ICH_VMCR_EL2_VPMR_LENGTH, value); 618 619 gicv3_cpuif_virt_irq_fiq_update(cs); 620 } 621 622 static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri) 623 { 624 GICv3CPUState *cs = icc_cs_from_env(env); 625 int enbit; 626 uint64_t value; 627 628 enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT; 629 value = extract64(cs->ich_vmcr_el2, enbit, 1); 630 631 trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0, 632 gicv3_redist_affid(cs), value); 633 return value; 634 } 635 636 static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri, 637 uint64_t value) 638 { 639 GICv3CPUState *cs = icc_cs_from_env(env); 640 int enbit; 641 642 trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0, 643 gicv3_redist_affid(cs), value); 644 645 enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT; 646 647 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value); 648 gicv3_cpuif_virt_update(cs); 649 } 650 651 static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) 652 { 653 GICv3CPUState *cs = icc_cs_from_env(env); 654 uint64_t value; 655 656 /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits) 657 * should match the ones reported in ich_vtr_read(). 658 */ 659 value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | 660 (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); 661 662 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) { 663 value |= ICC_CTLR_EL1_EOIMODE; 664 } 665 666 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) { 667 value |= ICC_CTLR_EL1_CBPR; 668 } 669 670 trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value); 671 return value; 672 } 673 674 static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 675 uint64_t value) 676 { 677 GICv3CPUState *cs = icc_cs_from_env(env); 678 679 trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value); 680 681 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT, 682 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0); 683 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT, 684 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0); 685 686 gicv3_cpuif_virt_irq_fiq_update(cs); 687 } 688 689 static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 690 { 691 GICv3CPUState *cs = icc_cs_from_env(env); 692 int prio = ich_highest_active_virt_prio(cs); 693 694 trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio); 695 return prio; 696 } 697 698 static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri) 699 { 700 GICv3CPUState *cs = icc_cs_from_env(env); 701 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; 702 int idx = hppvi_index(cs); 703 uint64_t value = INTID_SPURIOUS; 704 705 if (idx == HPPVI_INDEX_VLPI) { 706 if (cs->hppvlpi.grp == grp) { 707 value = cs->hppvlpi.irq; 708 } 709 } else if (idx >= 0) { 710 uint64_t lr = cs->ich_lr_el2[idx]; 711 int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 712 713 if (grp == thisgrp) { 714 value = ich_lr_vintid(lr); 715 } 716 } 717 718 trace_gicv3_icv_hppir_read(ri->crm == 8 ? 0 : 1, 719 gicv3_redist_affid(cs), value); 720 return value; 721 } 722 723 static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp) 724 { 725 /* Activate the interrupt in the specified list register 726 * by moving it from Pending to Active state, and update the 727 * Active Priority Registers. 728 */ 729 uint32_t mask = icv_gprio_mask(cs, grp); 730 int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask; 731 int aprbit = prio >> (8 - cs->vprebits); 732 int regno = aprbit / 32; 733 int regbit = aprbit % 32; 734 735 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; 736 cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT; 737 cs->ich_apr[grp][regno] |= (1 << regbit); 738 } 739 740 static void icv_activate_vlpi(GICv3CPUState *cs) 741 { 742 uint32_t mask = icv_gprio_mask(cs, cs->hppvlpi.grp); 743 int prio = cs->hppvlpi.prio & mask; 744 int aprbit = prio >> (8 - cs->vprebits); 745 int regno = aprbit / 32; 746 int regbit = aprbit % 32; 747 748 cs->ich_apr[cs->hppvlpi.grp][regno] |= (1 << regbit); 749 gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0); 750 } 751 752 static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) 753 { 754 GICv3CPUState *cs = icc_cs_from_env(env); 755 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; 756 int idx = hppvi_index(cs); 757 uint64_t intid = INTID_SPURIOUS; 758 759 if (idx == HPPVI_INDEX_VLPI) { 760 if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) { 761 intid = cs->hppvlpi.irq; 762 icv_activate_vlpi(cs); 763 } 764 } else if (idx >= 0) { 765 uint64_t lr = cs->ich_lr_el2[idx]; 766 int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 767 768 if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) { 769 intid = ich_lr_vintid(lr); 770 if (!gicv3_intid_is_special(intid)) { 771 icv_activate_irq(cs, idx, grp); 772 } else { 773 /* Interrupt goes from Pending to Invalid */ 774 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; 775 /* We will now return the (bogus) ID from the list register, 776 * as per the pseudocode. 777 */ 778 } 779 } 780 } 781 782 trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1, 783 gicv3_redist_affid(cs), intid); 784 785 gicv3_cpuif_virt_update(cs); 786 787 return intid; 788 } 789 790 static int icc_highest_active_prio(GICv3CPUState *cs) 791 { 792 /* Calculate the current running priority based on the set bits 793 * in the Active Priority Registers. 794 */ 795 int i; 796 797 for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) { 798 uint32_t apr = cs->icc_apr[GICV3_G0][i] | 799 cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i]; 800 801 if (!apr) { 802 continue; 803 } 804 return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1); 805 } 806 /* No current active interrupts: return idle priority */ 807 return 0xff; 808 } 809 810 static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group) 811 { 812 /* Return a mask word which clears the subpriority bits from 813 * a priority value for an interrupt in the specified group. 814 * This depends on the BPR value. For CBPR0 (S or NS): 815 * a BPR of 0 means the group priority bits are [7:1]; 816 * a BPR of 1 means they are [7:2], and so on down to 817 * a BPR of 7 meaning no group priority bits at all. 818 * For CBPR1 NS: 819 * a BPR of 0 is impossible (the minimum value is 1) 820 * a BPR of 1 means the group priority bits are [7:1]; 821 * a BPR of 2 means they are [7:2], and so on down to 822 * a BPR of 7 meaning the group priority is [7]. 823 * 824 * Which BPR to use depends on the group of the interrupt and 825 * the current ICC_CTLR.CBPR settings. 826 * 827 * This corresponds to the GroupBits() pseudocode. 828 */ 829 int bpr; 830 831 if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) || 832 (group == GICV3_G1NS && 833 cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { 834 group = GICV3_G0; 835 } 836 837 bpr = cs->icc_bpr[group] & 7; 838 839 if (group == GICV3_G1NS) { 840 assert(bpr > 0); 841 bpr--; 842 } 843 844 return ~0U << (bpr + 1); 845 } 846 847 static bool icc_no_enabled_hppi(GICv3CPUState *cs) 848 { 849 /* Return true if there is no pending interrupt, or the 850 * highest priority pending interrupt is in a group which has been 851 * disabled at the CPU interface by the ICC_IGRPEN* register enable bits. 852 */ 853 return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0); 854 } 855 856 static bool icc_hppi_can_preempt(GICv3CPUState *cs) 857 { 858 /* Return true if we have a pending interrupt of sufficient 859 * priority to preempt. 860 */ 861 int rprio; 862 uint32_t mask; 863 864 if (icc_no_enabled_hppi(cs)) { 865 return false; 866 } 867 868 if (cs->hppi.prio >= cs->icc_pmr_el1) { 869 /* Priority mask masks this interrupt */ 870 return false; 871 } 872 873 rprio = icc_highest_active_prio(cs); 874 if (rprio == 0xff) { 875 /* No currently running interrupt so we can preempt */ 876 return true; 877 } 878 879 mask = icc_gprio_mask(cs, cs->hppi.grp); 880 881 /* We only preempt a running interrupt if the pending interrupt's 882 * group priority is sufficient (the subpriorities are not considered). 883 */ 884 if ((cs->hppi.prio & mask) < (rprio & mask)) { 885 return true; 886 } 887 888 return false; 889 } 890 891 void gicv3_cpuif_update(GICv3CPUState *cs) 892 { 893 /* Tell the CPU about its highest priority pending interrupt */ 894 int irqlevel = 0; 895 int fiqlevel = 0; 896 ARMCPU *cpu = ARM_CPU(cs->cpu); 897 CPUARMState *env = &cpu->env; 898 899 g_assert(qemu_mutex_iothread_locked()); 900 901 trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq, 902 cs->hppi.grp, cs->hppi.prio); 903 904 if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) { 905 /* If a Security-enabled GIC sends a G1S interrupt to a 906 * Security-disabled CPU, we must treat it as if it were G0. 907 */ 908 cs->hppi.grp = GICV3_G0; 909 } 910 911 if (icc_hppi_can_preempt(cs)) { 912 /* We have an interrupt: should we signal it as IRQ or FIQ? 913 * This is described in the GICv3 spec section 4.6.2. 914 */ 915 bool isfiq; 916 917 switch (cs->hppi.grp) { 918 case GICV3_G0: 919 isfiq = true; 920 break; 921 case GICV3_G1: 922 isfiq = (!arm_is_secure(env) || 923 (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3))); 924 break; 925 case GICV3_G1NS: 926 isfiq = arm_is_secure(env); 927 break; 928 default: 929 g_assert_not_reached(); 930 } 931 932 if (isfiq) { 933 fiqlevel = 1; 934 } else { 935 irqlevel = 1; 936 } 937 } 938 939 trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel); 940 941 qemu_set_irq(cs->parent_fiq, fiqlevel); 942 qemu_set_irq(cs->parent_irq, irqlevel); 943 } 944 945 static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) 946 { 947 GICv3CPUState *cs = icc_cs_from_env(env); 948 uint32_t value = cs->icc_pmr_el1; 949 950 if (icv_access(env, HCR_FMO | HCR_IMO)) { 951 return icv_pmr_read(env, ri); 952 } 953 954 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) && 955 (env->cp15.scr_el3 & SCR_FIQ)) { 956 /* NS access and Group 0 is inaccessible to NS: return the 957 * NS view of the current priority 958 */ 959 if ((value & 0x80) == 0) { 960 /* Secure priorities not visible to NS */ 961 value = 0; 962 } else if (value != 0xff) { 963 value = (value << 1) & 0xff; 964 } 965 } 966 967 trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value); 968 969 return value; 970 } 971 972 static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, 973 uint64_t value) 974 { 975 GICv3CPUState *cs = icc_cs_from_env(env); 976 977 if (icv_access(env, HCR_FMO | HCR_IMO)) { 978 return icv_pmr_write(env, ri, value); 979 } 980 981 trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value); 982 983 value &= 0xff; 984 985 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) && 986 (env->cp15.scr_el3 & SCR_FIQ)) { 987 /* NS access and Group 0 is inaccessible to NS: return the 988 * NS view of the current priority 989 */ 990 if (!(cs->icc_pmr_el1 & 0x80)) { 991 /* Current PMR in the secure range, don't allow NS to change it */ 992 return; 993 } 994 value = (value >> 1) | 0x80; 995 } 996 cs->icc_pmr_el1 = value; 997 gicv3_cpuif_update(cs); 998 } 999 1000 static void icc_activate_irq(GICv3CPUState *cs, int irq) 1001 { 1002 /* Move the interrupt from the Pending state to Active, and update 1003 * the Active Priority Registers 1004 */ 1005 uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp); 1006 int prio = cs->hppi.prio & mask; 1007 int aprbit = prio >> 1; 1008 int regno = aprbit / 32; 1009 int regbit = aprbit % 32; 1010 1011 cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit); 1012 1013 if (irq < GIC_INTERNAL) { 1014 cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1); 1015 cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0); 1016 gicv3_redist_update(cs); 1017 } else if (irq < GICV3_LPI_INTID_START) { 1018 gicv3_gicd_active_set(cs->gic, irq); 1019 gicv3_gicd_pending_clear(cs->gic, irq); 1020 gicv3_update(cs->gic, irq, 1); 1021 } else { 1022 gicv3_redist_lpi_pending(cs, irq, 0); 1023 } 1024 } 1025 1026 static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env) 1027 { 1028 /* Return the highest priority pending interrupt register value 1029 * for group 0. 1030 */ 1031 bool irq_is_secure; 1032 1033 if (cs->hppi.prio == 0xff) { 1034 return INTID_SPURIOUS; 1035 } 1036 1037 /* Check whether we can return the interrupt or if we should return 1038 * a special identifier, as per the CheckGroup0ForSpecialIdentifiers 1039 * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM 1040 * is always zero.) 1041 */ 1042 irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && 1043 (cs->hppi.grp != GICV3_G1NS)); 1044 1045 if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) { 1046 return INTID_SPURIOUS; 1047 } 1048 if (irq_is_secure && !arm_is_secure(env)) { 1049 /* Secure interrupts not visible to Nonsecure */ 1050 return INTID_SPURIOUS; 1051 } 1052 1053 if (cs->hppi.grp != GICV3_G0) { 1054 /* Indicate to EL3 that there's a Group 1 interrupt for the other 1055 * state pending. 1056 */ 1057 return irq_is_secure ? INTID_SECURE : INTID_NONSECURE; 1058 } 1059 1060 return cs->hppi.irq; 1061 } 1062 1063 static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env) 1064 { 1065 /* Return the highest priority pending interrupt register value 1066 * for group 1. 1067 */ 1068 bool irq_is_secure; 1069 1070 if (cs->hppi.prio == 0xff) { 1071 return INTID_SPURIOUS; 1072 } 1073 1074 /* Check whether we can return the interrupt or if we should return 1075 * a special identifier, as per the CheckGroup1ForSpecialIdentifiers 1076 * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM 1077 * is always zero.) 1078 */ 1079 irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && 1080 (cs->hppi.grp != GICV3_G1NS)); 1081 1082 if (cs->hppi.grp == GICV3_G0) { 1083 /* Group 0 interrupts not visible via HPPIR1 */ 1084 return INTID_SPURIOUS; 1085 } 1086 if (irq_is_secure) { 1087 if (!arm_is_secure(env)) { 1088 /* Secure interrupts not visible in Non-secure */ 1089 return INTID_SPURIOUS; 1090 } 1091 } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { 1092 /* Group 1 non-secure interrupts not visible in Secure EL1 */ 1093 return INTID_SPURIOUS; 1094 } 1095 1096 return cs->hppi.irq; 1097 } 1098 1099 static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri) 1100 { 1101 GICv3CPUState *cs = icc_cs_from_env(env); 1102 uint64_t intid; 1103 1104 if (icv_access(env, HCR_FMO)) { 1105 return icv_iar_read(env, ri); 1106 } 1107 1108 if (!icc_hppi_can_preempt(cs)) { 1109 intid = INTID_SPURIOUS; 1110 } else { 1111 intid = icc_hppir0_value(cs, env); 1112 } 1113 1114 if (!gicv3_intid_is_special(intid)) { 1115 icc_activate_irq(cs, intid); 1116 } 1117 1118 trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid); 1119 return intid; 1120 } 1121 1122 static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri) 1123 { 1124 GICv3CPUState *cs = icc_cs_from_env(env); 1125 uint64_t intid; 1126 1127 if (icv_access(env, HCR_IMO)) { 1128 return icv_iar_read(env, ri); 1129 } 1130 1131 if (!icc_hppi_can_preempt(cs)) { 1132 intid = INTID_SPURIOUS; 1133 } else { 1134 intid = icc_hppir1_value(cs, env); 1135 } 1136 1137 if (!gicv3_intid_is_special(intid)) { 1138 icc_activate_irq(cs, intid); 1139 } 1140 1141 trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid); 1142 return intid; 1143 } 1144 1145 static void icc_drop_prio(GICv3CPUState *cs, int grp) 1146 { 1147 /* Drop the priority of the currently active interrupt in 1148 * the specified group. 1149 * 1150 * Note that we can guarantee (because of the requirement to nest 1151 * ICC_IAR reads [which activate an interrupt and raise priority] 1152 * with ICC_EOIR writes [which drop the priority for the interrupt]) 1153 * that the interrupt we're being called for is the highest priority 1154 * active interrupt, meaning that it has the lowest set bit in the 1155 * APR registers. 1156 * 1157 * If the guest does not honour the ordering constraints then the 1158 * behaviour of the GIC is UNPREDICTABLE, which for us means that 1159 * the values of the APR registers might become incorrect and the 1160 * running priority will be wrong, so interrupts that should preempt 1161 * might not do so, and interrupts that should not preempt might do so. 1162 */ 1163 int i; 1164 1165 for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) { 1166 uint64_t *papr = &cs->icc_apr[grp][i]; 1167 1168 if (!*papr) { 1169 continue; 1170 } 1171 /* Clear the lowest set bit */ 1172 *papr &= *papr - 1; 1173 break; 1174 } 1175 1176 /* running priority change means we need an update for this cpu i/f */ 1177 gicv3_cpuif_update(cs); 1178 } 1179 1180 static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs) 1181 { 1182 /* Return true if we should split priority drop and interrupt 1183 * deactivation, ie whether the relevant EOIMode bit is set. 1184 */ 1185 if (arm_is_el3_or_mon(env)) { 1186 return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3; 1187 } 1188 if (arm_is_secure_below_el3(env)) { 1189 return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE; 1190 } else { 1191 return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE; 1192 } 1193 } 1194 1195 static int icc_highest_active_group(GICv3CPUState *cs) 1196 { 1197 /* Return the group with the highest priority active interrupt. 1198 * We can do this by just comparing the APRs to see which one 1199 * has the lowest set bit. 1200 * (If more than one group is active at the same priority then 1201 * we're in UNPREDICTABLE territory.) 1202 */ 1203 int i; 1204 1205 for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) { 1206 int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]); 1207 int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]); 1208 int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]); 1209 1210 if (g1nsctz < g0ctz && g1nsctz < g1ctz) { 1211 return GICV3_G1NS; 1212 } 1213 if (g1ctz < g0ctz) { 1214 return GICV3_G1; 1215 } 1216 if (g0ctz < 32) { 1217 return GICV3_G0; 1218 } 1219 } 1220 /* No set active bits? UNPREDICTABLE; return -1 so the caller 1221 * ignores the spurious EOI attempt. 1222 */ 1223 return -1; 1224 } 1225 1226 static void icc_deactivate_irq(GICv3CPUState *cs, int irq) 1227 { 1228 if (irq < GIC_INTERNAL) { 1229 cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0); 1230 gicv3_redist_update(cs); 1231 } else { 1232 gicv3_gicd_active_clear(cs->gic, irq); 1233 gicv3_update(cs->gic, irq, 1); 1234 } 1235 } 1236 1237 static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs) 1238 { 1239 /* Return true if we should split priority drop and interrupt 1240 * deactivation, ie whether the virtual EOIMode bit is set. 1241 */ 1242 return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM; 1243 } 1244 1245 static int icv_find_active(GICv3CPUState *cs, int irq) 1246 { 1247 /* Given an interrupt number for an active interrupt, return the index 1248 * of the corresponding list register, or -1 if there is no match. 1249 * Corresponds to FindActiveVirtualInterrupt pseudocode. 1250 */ 1251 int i; 1252 1253 for (i = 0; i < cs->num_list_regs; i++) { 1254 uint64_t lr = cs->ich_lr_el2[i]; 1255 1256 if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) { 1257 return i; 1258 } 1259 } 1260 1261 return -1; 1262 } 1263 1264 static void icv_deactivate_irq(GICv3CPUState *cs, int idx) 1265 { 1266 /* Deactivate the interrupt in the specified list register index */ 1267 uint64_t lr = cs->ich_lr_el2[idx]; 1268 1269 if (lr & ICH_LR_EL2_HW) { 1270 /* Deactivate the associated physical interrupt */ 1271 int pirq = ich_lr_pintid(lr); 1272 1273 if (pirq < INTID_SECURE) { 1274 icc_deactivate_irq(cs, pirq); 1275 } 1276 } 1277 1278 /* Clear the 'active' part of the state, so ActivePending->Pending 1279 * and Active->Invalid. 1280 */ 1281 lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT; 1282 cs->ich_lr_el2[idx] = lr; 1283 } 1284 1285 static void icv_increment_eoicount(GICv3CPUState *cs) 1286 { 1287 /* Increment the EOICOUNT field in ICH_HCR_EL2 */ 1288 int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT, 1289 ICH_HCR_EL2_EOICOUNT_LENGTH); 1290 1291 cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT, 1292 ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1); 1293 } 1294 1295 static int icv_drop_prio(GICv3CPUState *cs) 1296 { 1297 /* Drop the priority of the currently active virtual interrupt 1298 * (favouring group 0 if there is a set active bit at 1299 * the same priority for both group 0 and group 1). 1300 * Return the priority value for the bit we just cleared, 1301 * or 0xff if no bits were set in the AP registers at all. 1302 * Note that though the ich_apr[] are uint64_t only the low 1303 * 32 bits are actually relevant. 1304 */ 1305 int i; 1306 int aprmax = 1 << (cs->vprebits - 5); 1307 1308 assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0])); 1309 1310 for (i = 0; i < aprmax; i++) { 1311 uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i]; 1312 uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i]; 1313 int apr0count, apr1count; 1314 1315 if (!*papr0 && !*papr1) { 1316 continue; 1317 } 1318 1319 /* We can't just use the bit-twiddling hack icc_drop_prio() does 1320 * because we need to return the bit number we cleared so 1321 * it can be compared against the list register's priority field. 1322 */ 1323 apr0count = ctz32(*papr0); 1324 apr1count = ctz32(*papr1); 1325 1326 if (apr0count <= apr1count) { 1327 *papr0 &= *papr0 - 1; 1328 return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1); 1329 } else { 1330 *papr1 &= *papr1 - 1; 1331 return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1); 1332 } 1333 } 1334 return 0xff; 1335 } 1336 1337 static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1338 uint64_t value) 1339 { 1340 /* Deactivate interrupt */ 1341 GICv3CPUState *cs = icc_cs_from_env(env); 1342 int idx; 1343 int irq = value & 0xffffff; 1344 1345 trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value); 1346 1347 if (irq >= GICV3_MAXIRQ) { 1348 /* Also catches special interrupt numbers and LPIs */ 1349 return; 1350 } 1351 1352 if (!icv_eoi_split(env, cs)) { 1353 return; 1354 } 1355 1356 idx = icv_find_active(cs, irq); 1357 1358 if (idx < 0) { 1359 /* No list register matching this, so increment the EOI count 1360 * (might trigger a maintenance interrupt) 1361 */ 1362 icv_increment_eoicount(cs); 1363 } else { 1364 icv_deactivate_irq(cs, idx); 1365 } 1366 1367 gicv3_cpuif_virt_update(cs); 1368 } 1369 1370 static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1371 uint64_t value) 1372 { 1373 /* End of Interrupt */ 1374 GICv3CPUState *cs = icc_cs_from_env(env); 1375 int irq = value & 0xffffff; 1376 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; 1377 int idx, dropprio; 1378 1379 trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1, 1380 gicv3_redist_affid(cs), value); 1381 1382 if (gicv3_intid_is_special(irq)) { 1383 return; 1384 } 1385 1386 /* We implement the IMPDEF choice of "drop priority before doing 1387 * error checks" (because that lets us avoid scanning the AP 1388 * registers twice). 1389 */ 1390 dropprio = icv_drop_prio(cs); 1391 if (dropprio == 0xff) { 1392 /* No active interrupt. It is CONSTRAINED UNPREDICTABLE 1393 * whether the list registers are checked in this 1394 * situation; we choose not to. 1395 */ 1396 return; 1397 } 1398 1399 idx = icv_find_active(cs, irq); 1400 1401 if (idx < 0) { 1402 /* No valid list register corresponding to EOI ID */ 1403 icv_increment_eoicount(cs); 1404 } else { 1405 uint64_t lr = cs->ich_lr_el2[idx]; 1406 int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; 1407 int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp); 1408 1409 if (thisgrp == grp && lr_gprio == dropprio) { 1410 if (!icv_eoi_split(env, cs)) { 1411 /* Priority drop and deactivate not split: deactivate irq now */ 1412 icv_deactivate_irq(cs, idx); 1413 } 1414 } 1415 } 1416 1417 gicv3_cpuif_virt_update(cs); 1418 } 1419 1420 static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1421 uint64_t value) 1422 { 1423 /* End of Interrupt */ 1424 GICv3CPUState *cs = icc_cs_from_env(env); 1425 int irq = value & 0xffffff; 1426 int grp; 1427 bool is_eoir0 = ri->crm == 8; 1428 1429 if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) { 1430 icv_eoir_write(env, ri, value); 1431 return; 1432 } 1433 1434 trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1, 1435 gicv3_redist_affid(cs), value); 1436 1437 if ((irq >= cs->gic->num_irq) && 1438 !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) { 1439 /* This handles two cases: 1440 * 1. If software writes the ID of a spurious interrupt [ie 1020-1023] 1441 * to the GICC_EOIR, the GIC ignores that write. 1442 * 2. If software writes the number of a non-existent interrupt 1443 * this must be a subcase of "value written does not match the last 1444 * valid interrupt value read from the Interrupt Acknowledge 1445 * register" and so this is UNPREDICTABLE. We choose to ignore it. 1446 */ 1447 return; 1448 } 1449 1450 grp = icc_highest_active_group(cs); 1451 switch (grp) { 1452 case GICV3_G0: 1453 if (!is_eoir0) { 1454 return; 1455 } 1456 if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) 1457 && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) { 1458 return; 1459 } 1460 break; 1461 case GICV3_G1: 1462 if (is_eoir0) { 1463 return; 1464 } 1465 if (!arm_is_secure(env)) { 1466 return; 1467 } 1468 break; 1469 case GICV3_G1NS: 1470 if (is_eoir0) { 1471 return; 1472 } 1473 if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { 1474 return; 1475 } 1476 break; 1477 default: 1478 qemu_log_mask(LOG_GUEST_ERROR, 1479 "%s: IRQ %d isn't active\n", __func__, irq); 1480 return; 1481 } 1482 1483 icc_drop_prio(cs, grp); 1484 1485 if (!icc_eoi_split(env, cs)) { 1486 /* Priority drop and deactivate not split: deactivate irq now */ 1487 icc_deactivate_irq(cs, irq); 1488 } 1489 } 1490 1491 static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri) 1492 { 1493 GICv3CPUState *cs = icc_cs_from_env(env); 1494 uint64_t value; 1495 1496 if (icv_access(env, HCR_FMO)) { 1497 return icv_hppir_read(env, ri); 1498 } 1499 1500 value = icc_hppir0_value(cs, env); 1501 trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value); 1502 return value; 1503 } 1504 1505 static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri) 1506 { 1507 GICv3CPUState *cs = icc_cs_from_env(env); 1508 uint64_t value; 1509 1510 if (icv_access(env, HCR_IMO)) { 1511 return icv_hppir_read(env, ri); 1512 } 1513 1514 value = icc_hppir1_value(cs, env); 1515 trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value); 1516 return value; 1517 } 1518 1519 static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1520 { 1521 GICv3CPUState *cs = icc_cs_from_env(env); 1522 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1; 1523 bool satinc = false; 1524 uint64_t bpr; 1525 1526 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1527 return icv_bpr_read(env, ri); 1528 } 1529 1530 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1531 grp = GICV3_G1NS; 1532 } 1533 1534 if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) && 1535 (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) { 1536 /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses 1537 * modify BPR0 1538 */ 1539 grp = GICV3_G0; 1540 } 1541 1542 if (grp == GICV3_G1NS && arm_current_el(env) < 3 && 1543 (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { 1544 /* reads return bpr0 + 1 sat to 7, writes ignored */ 1545 grp = GICV3_G0; 1546 satinc = true; 1547 } 1548 1549 bpr = cs->icc_bpr[grp]; 1550 if (satinc) { 1551 bpr++; 1552 bpr = MIN(bpr, 7); 1553 } 1554 1555 trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr); 1556 1557 return bpr; 1558 } 1559 1560 static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1561 uint64_t value) 1562 { 1563 GICv3CPUState *cs = icc_cs_from_env(env); 1564 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1; 1565 uint64_t minval; 1566 1567 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1568 icv_bpr_write(env, ri, value); 1569 return; 1570 } 1571 1572 trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1, 1573 gicv3_redist_affid(cs), value); 1574 1575 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1576 grp = GICV3_G1NS; 1577 } 1578 1579 if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) && 1580 (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) { 1581 /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses 1582 * modify BPR0 1583 */ 1584 grp = GICV3_G0; 1585 } 1586 1587 if (grp == GICV3_G1NS && arm_current_el(env) < 3 && 1588 (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { 1589 /* reads return bpr0 + 1 sat to 7, writes ignored */ 1590 return; 1591 } 1592 1593 minval = (grp == GICV3_G1NS) ? GIC_MIN_BPR_NS : GIC_MIN_BPR; 1594 if (value < minval) { 1595 value = minval; 1596 } 1597 1598 cs->icc_bpr[grp] = value & 7; 1599 gicv3_cpuif_update(cs); 1600 } 1601 1602 static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 1603 { 1604 GICv3CPUState *cs = icc_cs_from_env(env); 1605 uint64_t value; 1606 1607 int regno = ri->opc2 & 3; 1608 int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0; 1609 1610 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1611 return icv_ap_read(env, ri); 1612 } 1613 1614 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1615 grp = GICV3_G1NS; 1616 } 1617 1618 value = cs->icc_apr[grp][regno]; 1619 1620 trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 1621 return value; 1622 } 1623 1624 static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 1625 uint64_t value) 1626 { 1627 GICv3CPUState *cs = icc_cs_from_env(env); 1628 1629 int regno = ri->opc2 & 3; 1630 int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0; 1631 1632 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1633 icv_ap_write(env, ri, value); 1634 return; 1635 } 1636 1637 trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 1638 1639 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1640 grp = GICV3_G1NS; 1641 } 1642 1643 /* It's not possible to claim that a Non-secure interrupt is active 1644 * at a priority outside the Non-secure range (128..255), since this 1645 * would otherwise allow malicious NS code to block delivery of S interrupts 1646 * by writing a bad value to these registers. 1647 */ 1648 if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) { 1649 return; 1650 } 1651 1652 cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU; 1653 gicv3_cpuif_update(cs); 1654 } 1655 1656 static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri, 1657 uint64_t value) 1658 { 1659 /* Deactivate interrupt */ 1660 GICv3CPUState *cs = icc_cs_from_env(env); 1661 int irq = value & 0xffffff; 1662 bool irq_is_secure, single_sec_state, irq_is_grp0; 1663 bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2; 1664 1665 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1666 icv_dir_write(env, ri, value); 1667 return; 1668 } 1669 1670 trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value); 1671 1672 if (irq >= cs->gic->num_irq) { 1673 /* Also catches special interrupt numbers and LPIs */ 1674 return; 1675 } 1676 1677 if (!icc_eoi_split(env, cs)) { 1678 return; 1679 } 1680 1681 int grp = gicv3_irq_group(cs->gic, cs, irq); 1682 1683 single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS; 1684 irq_is_secure = !single_sec_state && (grp != GICV3_G1NS); 1685 irq_is_grp0 = grp == GICV3_G0; 1686 1687 /* Check whether we're allowed to deactivate this interrupt based 1688 * on its group and the current CPU state. 1689 * These checks are laid out to correspond to the spec's pseudocode. 1690 */ 1691 route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ; 1692 route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ; 1693 /* No need to include !IsSecure in route_*_to_el2 as it's only 1694 * tested in cases where we know !IsSecure is true. 1695 */ 1696 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 1697 route_fiq_to_el2 = hcr_el2 & HCR_FMO; 1698 route_irq_to_el2 = hcr_el2 & HCR_IMO; 1699 1700 switch (arm_current_el(env)) { 1701 case 3: 1702 break; 1703 case 2: 1704 if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) { 1705 break; 1706 } 1707 if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) { 1708 break; 1709 } 1710 return; 1711 case 1: 1712 if (!arm_is_secure_below_el3(env)) { 1713 if (single_sec_state && irq_is_grp0 && 1714 !route_fiq_to_el3 && !route_fiq_to_el2) { 1715 break; 1716 } 1717 if (!irq_is_secure && !irq_is_grp0 && 1718 !route_irq_to_el3 && !route_irq_to_el2) { 1719 break; 1720 } 1721 } else { 1722 if (irq_is_grp0 && !route_fiq_to_el3) { 1723 break; 1724 } 1725 if (!irq_is_grp0 && 1726 (!irq_is_secure || !single_sec_state) && 1727 !route_irq_to_el3) { 1728 break; 1729 } 1730 } 1731 return; 1732 default: 1733 g_assert_not_reached(); 1734 } 1735 1736 icc_deactivate_irq(cs, irq); 1737 } 1738 1739 static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1740 { 1741 GICv3CPUState *cs = icc_cs_from_env(env); 1742 int prio; 1743 1744 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1745 return icv_rpr_read(env, ri); 1746 } 1747 1748 prio = icc_highest_active_prio(cs); 1749 1750 if (arm_feature(env, ARM_FEATURE_EL3) && 1751 !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) { 1752 /* NS GIC access and Group 0 is inaccessible to NS */ 1753 if ((prio & 0x80) == 0) { 1754 /* NS mustn't see priorities in the Secure half of the range */ 1755 prio = 0; 1756 } else if (prio != 0xff) { 1757 /* Non-idle priority: show the Non-secure view of it */ 1758 prio = (prio << 1) & 0xff; 1759 } 1760 } 1761 1762 trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio); 1763 return prio; 1764 } 1765 1766 static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs, 1767 uint64_t value, int grp, bool ns) 1768 { 1769 GICv3State *s = cs->gic; 1770 1771 /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */ 1772 uint64_t aff = extract64(value, 48, 8) << 16 | 1773 extract64(value, 32, 8) << 8 | 1774 extract64(value, 16, 8); 1775 uint32_t targetlist = extract64(value, 0, 16); 1776 uint32_t irq = extract64(value, 24, 4); 1777 bool irm = extract64(value, 40, 1); 1778 int i; 1779 1780 if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) { 1781 /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1 1782 * interrupts as Group 0 interrupts and must send Secure Group 0 1783 * interrupts to the target CPUs. 1784 */ 1785 grp = GICV3_G0; 1786 } 1787 1788 trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm, 1789 aff, targetlist); 1790 1791 for (i = 0; i < s->num_cpu; i++) { 1792 GICv3CPUState *ocs = &s->cpu[i]; 1793 1794 if (irm) { 1795 /* IRM == 1 : route to all CPUs except self */ 1796 if (cs == ocs) { 1797 continue; 1798 } 1799 } else { 1800 /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15] 1801 * where the corresponding bit is set in targetlist 1802 */ 1803 int aff0; 1804 1805 if (ocs->gicr_typer >> 40 != aff) { 1806 continue; 1807 } 1808 aff0 = extract64(ocs->gicr_typer, 32, 8); 1809 if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) { 1810 continue; 1811 } 1812 } 1813 1814 /* The redistributor will check against its own GICR_NSACR as needed */ 1815 gicv3_redist_send_sgi(ocs, grp, irq, ns); 1816 } 1817 } 1818 1819 static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri, 1820 uint64_t value) 1821 { 1822 /* Generate Secure Group 0 SGI. */ 1823 GICv3CPUState *cs = icc_cs_from_env(env); 1824 bool ns = !arm_is_secure(env); 1825 1826 icc_generate_sgi(env, cs, value, GICV3_G0, ns); 1827 } 1828 1829 static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri, 1830 uint64_t value) 1831 { 1832 /* Generate Group 1 SGI for the current Security state */ 1833 GICv3CPUState *cs = icc_cs_from_env(env); 1834 int grp; 1835 bool ns = !arm_is_secure(env); 1836 1837 grp = ns ? GICV3_G1NS : GICV3_G1; 1838 icc_generate_sgi(env, cs, value, grp, ns); 1839 } 1840 1841 static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri, 1842 uint64_t value) 1843 { 1844 /* Generate Group 1 SGI for the Security state that is not 1845 * the current state 1846 */ 1847 GICv3CPUState *cs = icc_cs_from_env(env); 1848 int grp; 1849 bool ns = !arm_is_secure(env); 1850 1851 grp = ns ? GICV3_G1 : GICV3_G1NS; 1852 icc_generate_sgi(env, cs, value, grp, ns); 1853 } 1854 1855 static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri) 1856 { 1857 GICv3CPUState *cs = icc_cs_from_env(env); 1858 int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0; 1859 uint64_t value; 1860 1861 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1862 return icv_igrpen_read(env, ri); 1863 } 1864 1865 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1866 grp = GICV3_G1NS; 1867 } 1868 1869 value = cs->icc_igrpen[grp]; 1870 trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0, 1871 gicv3_redist_affid(cs), value); 1872 return value; 1873 } 1874 1875 static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri, 1876 uint64_t value) 1877 { 1878 GICv3CPUState *cs = icc_cs_from_env(env); 1879 int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0; 1880 1881 if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { 1882 icv_igrpen_write(env, ri, value); 1883 return; 1884 } 1885 1886 trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0, 1887 gicv3_redist_affid(cs), value); 1888 1889 if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { 1890 grp = GICV3_G1NS; 1891 } 1892 1893 cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE; 1894 gicv3_cpuif_update(cs); 1895 } 1896 1897 static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri) 1898 { 1899 GICv3CPUState *cs = icc_cs_from_env(env); 1900 uint64_t value; 1901 1902 /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */ 1903 value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1); 1904 trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value); 1905 return value; 1906 } 1907 1908 static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, 1909 uint64_t value) 1910 { 1911 GICv3CPUState *cs = icc_cs_from_env(env); 1912 1913 trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value); 1914 1915 /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */ 1916 cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1); 1917 cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1); 1918 gicv3_cpuif_update(cs); 1919 } 1920 1921 static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri) 1922 { 1923 GICv3CPUState *cs = icc_cs_from_env(env); 1924 int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S; 1925 uint64_t value; 1926 1927 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1928 return icv_ctlr_read(env, ri); 1929 } 1930 1931 value = cs->icc_ctlr_el1[bank]; 1932 trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value); 1933 return value; 1934 } 1935 1936 static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 1937 uint64_t value) 1938 { 1939 GICv3CPUState *cs = icc_cs_from_env(env); 1940 int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S; 1941 uint64_t mask; 1942 1943 if (icv_access(env, HCR_FMO | HCR_IMO)) { 1944 icv_ctlr_write(env, ri, value); 1945 return; 1946 } 1947 1948 trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value); 1949 1950 /* Only CBPR and EOIMODE can be RW; 1951 * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or 1952 * the asseciated priority-based routing of them); 1953 * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO. 1954 */ 1955 if (arm_feature(env, ARM_FEATURE_EL3) && 1956 ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) { 1957 mask = ICC_CTLR_EL1_EOIMODE; 1958 } else { 1959 mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE; 1960 } 1961 1962 cs->icc_ctlr_el1[bank] &= ~mask; 1963 cs->icc_ctlr_el1[bank] |= (value & mask); 1964 gicv3_cpuif_update(cs); 1965 } 1966 1967 1968 static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri) 1969 { 1970 GICv3CPUState *cs = icc_cs_from_env(env); 1971 uint64_t value; 1972 1973 value = cs->icc_ctlr_el3; 1974 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) { 1975 value |= ICC_CTLR_EL3_EOIMODE_EL1NS; 1976 } 1977 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) { 1978 value |= ICC_CTLR_EL3_CBPR_EL1NS; 1979 } 1980 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) { 1981 value |= ICC_CTLR_EL3_EOIMODE_EL1S; 1982 } 1983 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) { 1984 value |= ICC_CTLR_EL3_CBPR_EL1S; 1985 } 1986 1987 trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value); 1988 return value; 1989 } 1990 1991 static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, 1992 uint64_t value) 1993 { 1994 GICv3CPUState *cs = icc_cs_from_env(env); 1995 uint64_t mask; 1996 1997 trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value); 1998 1999 /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */ 2000 cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE); 2001 if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) { 2002 cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE; 2003 } 2004 if (value & ICC_CTLR_EL3_CBPR_EL1NS) { 2005 cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR; 2006 } 2007 2008 cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE); 2009 if (value & ICC_CTLR_EL3_EOIMODE_EL1S) { 2010 cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE; 2011 } 2012 if (value & ICC_CTLR_EL3_CBPR_EL1S) { 2013 cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR; 2014 } 2015 2016 /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */ 2017 mask = ICC_CTLR_EL3_EOIMODE_EL3; 2018 2019 cs->icc_ctlr_el3 &= ~mask; 2020 cs->icc_ctlr_el3 |= (value & mask); 2021 gicv3_cpuif_update(cs); 2022 } 2023 2024 static CPAccessResult gicv3_irqfiq_access(CPUARMState *env, 2025 const ARMCPRegInfo *ri, bool isread) 2026 { 2027 CPAccessResult r = CP_ACCESS_OK; 2028 GICv3CPUState *cs = icc_cs_from_env(env); 2029 int el = arm_current_el(env); 2030 2031 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) && 2032 el == 1 && !arm_is_secure_below_el3(env)) { 2033 /* Takes priority over a possible EL3 trap */ 2034 return CP_ACCESS_TRAP_EL2; 2035 } 2036 2037 if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) { 2038 switch (el) { 2039 case 1: 2040 /* Note that arm_hcr_el2_eff takes secure state into account. */ 2041 if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) { 2042 r = CP_ACCESS_TRAP_EL3; 2043 } 2044 break; 2045 case 2: 2046 r = CP_ACCESS_TRAP_EL3; 2047 break; 2048 case 3: 2049 if (!is_a64(env) && !arm_is_el3_or_mon(env)) { 2050 r = CP_ACCESS_TRAP_EL3; 2051 } 2052 break; 2053 default: 2054 g_assert_not_reached(); 2055 } 2056 } 2057 2058 if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { 2059 r = CP_ACCESS_TRAP; 2060 } 2061 return r; 2062 } 2063 2064 static CPAccessResult gicv3_dir_access(CPUARMState *env, 2065 const ARMCPRegInfo *ri, bool isread) 2066 { 2067 GICv3CPUState *cs = icc_cs_from_env(env); 2068 2069 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) && 2070 arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) { 2071 /* Takes priority over a possible EL3 trap */ 2072 return CP_ACCESS_TRAP_EL2; 2073 } 2074 2075 return gicv3_irqfiq_access(env, ri, isread); 2076 } 2077 2078 static CPAccessResult gicv3_sgi_access(CPUARMState *env, 2079 const ARMCPRegInfo *ri, bool isread) 2080 { 2081 if (arm_current_el(env) == 1 && 2082 (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) { 2083 /* Takes priority over a possible EL3 trap */ 2084 return CP_ACCESS_TRAP_EL2; 2085 } 2086 2087 return gicv3_irqfiq_access(env, ri, isread); 2088 } 2089 2090 static CPAccessResult gicv3_fiq_access(CPUARMState *env, 2091 const ARMCPRegInfo *ri, bool isread) 2092 { 2093 CPAccessResult r = CP_ACCESS_OK; 2094 GICv3CPUState *cs = icc_cs_from_env(env); 2095 int el = arm_current_el(env); 2096 2097 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) && 2098 el == 1 && !arm_is_secure_below_el3(env)) { 2099 /* Takes priority over a possible EL3 trap */ 2100 return CP_ACCESS_TRAP_EL2; 2101 } 2102 2103 if (env->cp15.scr_el3 & SCR_FIQ) { 2104 switch (el) { 2105 case 1: 2106 if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) { 2107 r = CP_ACCESS_TRAP_EL3; 2108 } 2109 break; 2110 case 2: 2111 r = CP_ACCESS_TRAP_EL3; 2112 break; 2113 case 3: 2114 if (!is_a64(env) && !arm_is_el3_or_mon(env)) { 2115 r = CP_ACCESS_TRAP_EL3; 2116 } 2117 break; 2118 default: 2119 g_assert_not_reached(); 2120 } 2121 } 2122 2123 if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { 2124 r = CP_ACCESS_TRAP; 2125 } 2126 return r; 2127 } 2128 2129 static CPAccessResult gicv3_irq_access(CPUARMState *env, 2130 const ARMCPRegInfo *ri, bool isread) 2131 { 2132 CPAccessResult r = CP_ACCESS_OK; 2133 GICv3CPUState *cs = icc_cs_from_env(env); 2134 int el = arm_current_el(env); 2135 2136 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) && 2137 el == 1 && !arm_is_secure_below_el3(env)) { 2138 /* Takes priority over a possible EL3 trap */ 2139 return CP_ACCESS_TRAP_EL2; 2140 } 2141 2142 if (env->cp15.scr_el3 & SCR_IRQ) { 2143 switch (el) { 2144 case 1: 2145 if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) { 2146 r = CP_ACCESS_TRAP_EL3; 2147 } 2148 break; 2149 case 2: 2150 r = CP_ACCESS_TRAP_EL3; 2151 break; 2152 case 3: 2153 if (!is_a64(env) && !arm_is_el3_or_mon(env)) { 2154 r = CP_ACCESS_TRAP_EL3; 2155 } 2156 break; 2157 default: 2158 g_assert_not_reached(); 2159 } 2160 } 2161 2162 if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { 2163 r = CP_ACCESS_TRAP; 2164 } 2165 return r; 2166 } 2167 2168 static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2169 { 2170 GICv3CPUState *cs = icc_cs_from_env(env); 2171 2172 cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V | 2173 (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | 2174 (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); 2175 cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V | 2176 (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | 2177 (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); 2178 cs->icc_pmr_el1 = 0; 2179 cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR; 2180 cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR; 2181 cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS; 2182 memset(cs->icc_apr, 0, sizeof(cs->icc_apr)); 2183 memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen)); 2184 cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V | 2185 (1 << ICC_CTLR_EL3_IDBITS_SHIFT) | 2186 (7 << ICC_CTLR_EL3_PRIBITS_SHIFT); 2187 2188 memset(cs->ich_apr, 0, sizeof(cs->ich_apr)); 2189 cs->ich_hcr_el2 = 0; 2190 memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2)); 2191 cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN | 2192 ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) | 2193 (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT); 2194 } 2195 2196 static const ARMCPRegInfo gicv3_cpuif_reginfo[] = { 2197 { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH, 2198 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0, 2199 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2200 .access = PL1_RW, .accessfn = gicv3_irqfiq_access, 2201 .readfn = icc_pmr_read, 2202 .writefn = icc_pmr_write, 2203 /* We hang the whole cpu interface reset routine off here 2204 * rather than parcelling it out into one little function 2205 * per register 2206 */ 2207 .resetfn = icc_reset, 2208 }, 2209 { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH, 2210 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0, 2211 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2212 .access = PL1_R, .accessfn = gicv3_fiq_access, 2213 .readfn = icc_iar0_read, 2214 }, 2215 { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH, 2216 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1, 2217 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2218 .access = PL1_W, .accessfn = gicv3_fiq_access, 2219 .writefn = icc_eoir_write, 2220 }, 2221 { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH, 2222 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2, 2223 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2224 .access = PL1_R, .accessfn = gicv3_fiq_access, 2225 .readfn = icc_hppir0_read, 2226 }, 2227 { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH, 2228 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3, 2229 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2230 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2231 .readfn = icc_bpr_read, 2232 .writefn = icc_bpr_write, 2233 }, 2234 { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH, 2235 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4, 2236 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2237 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2238 .readfn = icc_ap_read, 2239 .writefn = icc_ap_write, 2240 }, 2241 { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH, 2242 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5, 2243 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2244 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2245 .readfn = icc_ap_read, 2246 .writefn = icc_ap_write, 2247 }, 2248 { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH, 2249 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6, 2250 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2251 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2252 .readfn = icc_ap_read, 2253 .writefn = icc_ap_write, 2254 }, 2255 { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH, 2256 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7, 2257 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2258 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2259 .readfn = icc_ap_read, 2260 .writefn = icc_ap_write, 2261 }, 2262 /* All the ICC_AP1R*_EL1 registers are banked */ 2263 { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH, 2264 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0, 2265 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2266 .access = PL1_RW, .accessfn = gicv3_irq_access, 2267 .readfn = icc_ap_read, 2268 .writefn = icc_ap_write, 2269 }, 2270 { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH, 2271 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1, 2272 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2273 .access = PL1_RW, .accessfn = gicv3_irq_access, 2274 .readfn = icc_ap_read, 2275 .writefn = icc_ap_write, 2276 }, 2277 { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH, 2278 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2, 2279 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2280 .access = PL1_RW, .accessfn = gicv3_irq_access, 2281 .readfn = icc_ap_read, 2282 .writefn = icc_ap_write, 2283 }, 2284 { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH, 2285 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3, 2286 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2287 .access = PL1_RW, .accessfn = gicv3_irq_access, 2288 .readfn = icc_ap_read, 2289 .writefn = icc_ap_write, 2290 }, 2291 { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH, 2292 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1, 2293 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2294 .access = PL1_W, .accessfn = gicv3_dir_access, 2295 .writefn = icc_dir_write, 2296 }, 2297 { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH, 2298 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3, 2299 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2300 .access = PL1_R, .accessfn = gicv3_irqfiq_access, 2301 .readfn = icc_rpr_read, 2302 }, 2303 { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64, 2304 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5, 2305 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2306 .access = PL1_W, .accessfn = gicv3_sgi_access, 2307 .writefn = icc_sgi1r_write, 2308 }, 2309 { .name = "ICC_SGI1R", 2310 .cp = 15, .opc1 = 0, .crm = 12, 2311 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, 2312 .access = PL1_W, .accessfn = gicv3_sgi_access, 2313 .writefn = icc_sgi1r_write, 2314 }, 2315 { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64, 2316 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6, 2317 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2318 .access = PL1_W, .accessfn = gicv3_sgi_access, 2319 .writefn = icc_asgi1r_write, 2320 }, 2321 { .name = "ICC_ASGI1R", 2322 .cp = 15, .opc1 = 1, .crm = 12, 2323 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, 2324 .access = PL1_W, .accessfn = gicv3_sgi_access, 2325 .writefn = icc_asgi1r_write, 2326 }, 2327 { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64, 2328 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7, 2329 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2330 .access = PL1_W, .accessfn = gicv3_sgi_access, 2331 .writefn = icc_sgi0r_write, 2332 }, 2333 { .name = "ICC_SGI0R", 2334 .cp = 15, .opc1 = 2, .crm = 12, 2335 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, 2336 .access = PL1_W, .accessfn = gicv3_sgi_access, 2337 .writefn = icc_sgi0r_write, 2338 }, 2339 { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH, 2340 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0, 2341 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2342 .access = PL1_R, .accessfn = gicv3_irq_access, 2343 .readfn = icc_iar1_read, 2344 }, 2345 { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH, 2346 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1, 2347 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2348 .access = PL1_W, .accessfn = gicv3_irq_access, 2349 .writefn = icc_eoir_write, 2350 }, 2351 { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH, 2352 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2, 2353 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2354 .access = PL1_R, .accessfn = gicv3_irq_access, 2355 .readfn = icc_hppir1_read, 2356 }, 2357 /* This register is banked */ 2358 { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH, 2359 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3, 2360 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2361 .access = PL1_RW, .accessfn = gicv3_irq_access, 2362 .readfn = icc_bpr_read, 2363 .writefn = icc_bpr_write, 2364 }, 2365 /* This register is banked */ 2366 { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH, 2367 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4, 2368 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2369 .access = PL1_RW, .accessfn = gicv3_irqfiq_access, 2370 .readfn = icc_ctlr_el1_read, 2371 .writefn = icc_ctlr_el1_write, 2372 }, 2373 { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH, 2374 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5, 2375 .type = ARM_CP_NO_RAW | ARM_CP_CONST, 2376 .access = PL1_RW, 2377 /* We don't support IRQ/FIQ bypass and system registers are 2378 * always enabled, so all our bits are RAZ/WI or RAO/WI. 2379 * This register is banked but since it's constant we don't 2380 * need to do anything special. 2381 */ 2382 .resetvalue = 0x7, 2383 }, 2384 { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH, 2385 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6, 2386 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2387 .access = PL1_RW, .accessfn = gicv3_fiq_access, 2388 .readfn = icc_igrpen_read, 2389 .writefn = icc_igrpen_write, 2390 }, 2391 /* This register is banked */ 2392 { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH, 2393 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7, 2394 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2395 .access = PL1_RW, .accessfn = gicv3_irq_access, 2396 .readfn = icc_igrpen_read, 2397 .writefn = icc_igrpen_write, 2398 }, 2399 { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH, 2400 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5, 2401 .type = ARM_CP_NO_RAW | ARM_CP_CONST, 2402 .access = PL2_RW, 2403 /* We don't support IRQ/FIQ bypass and system registers are 2404 * always enabled, so all our bits are RAZ/WI or RAO/WI. 2405 */ 2406 .resetvalue = 0xf, 2407 }, 2408 { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH, 2409 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4, 2410 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2411 .access = PL3_RW, 2412 .readfn = icc_ctlr_el3_read, 2413 .writefn = icc_ctlr_el3_write, 2414 }, 2415 { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH, 2416 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5, 2417 .type = ARM_CP_NO_RAW | ARM_CP_CONST, 2418 .access = PL3_RW, 2419 /* We don't support IRQ/FIQ bypass and system registers are 2420 * always enabled, so all our bits are RAZ/WI or RAO/WI. 2421 */ 2422 .resetvalue = 0xf, 2423 }, 2424 { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH, 2425 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7, 2426 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2427 .access = PL3_RW, 2428 .readfn = icc_igrpen1_el3_read, 2429 .writefn = icc_igrpen1_el3_write, 2430 }, 2431 }; 2432 2433 static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2434 { 2435 GICv3CPUState *cs = icc_cs_from_env(env); 2436 int regno = ri->opc2 & 3; 2437 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; 2438 uint64_t value; 2439 2440 value = cs->ich_apr[grp][regno]; 2441 trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 2442 return value; 2443 } 2444 2445 static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2446 uint64_t value) 2447 { 2448 GICv3CPUState *cs = icc_cs_from_env(env); 2449 int regno = ri->opc2 & 3; 2450 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; 2451 2452 trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); 2453 2454 cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; 2455 gicv3_cpuif_virt_irq_fiq_update(cs); 2456 } 2457 2458 static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2459 { 2460 GICv3CPUState *cs = icc_cs_from_env(env); 2461 uint64_t value = cs->ich_hcr_el2; 2462 2463 trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value); 2464 return value; 2465 } 2466 2467 static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2468 uint64_t value) 2469 { 2470 GICv3CPUState *cs = icc_cs_from_env(env); 2471 2472 trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value); 2473 2474 value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE | 2475 ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE | 2476 ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC | 2477 ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI | 2478 ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK; 2479 2480 cs->ich_hcr_el2 = value; 2481 gicv3_cpuif_virt_update(cs); 2482 } 2483 2484 static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2485 { 2486 GICv3CPUState *cs = icc_cs_from_env(env); 2487 uint64_t value = cs->ich_vmcr_el2; 2488 2489 trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value); 2490 return value; 2491 } 2492 2493 static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2494 uint64_t value) 2495 { 2496 GICv3CPUState *cs = icc_cs_from_env(env); 2497 2498 trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value); 2499 2500 value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR | 2501 ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK | 2502 ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK; 2503 value |= ICH_VMCR_EL2_VFIQEN; 2504 2505 cs->ich_vmcr_el2 = value; 2506 /* Enforce "writing BPRs to less than minimum sets them to the minimum" 2507 * by reading and writing back the fields. 2508 */ 2509 write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0)); 2510 write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1)); 2511 2512 gicv3_cpuif_virt_update(cs); 2513 } 2514 2515 static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2516 { 2517 GICv3CPUState *cs = icc_cs_from_env(env); 2518 int regno = ri->opc2 | ((ri->crm & 1) << 3); 2519 uint64_t value; 2520 2521 /* This read function handles all of: 2522 * 64-bit reads of the whole LR 2523 * 32-bit reads of the low half of the LR 2524 * 32-bit reads of the high half of the LR 2525 */ 2526 if (ri->state == ARM_CP_STATE_AA32) { 2527 if (ri->crm >= 14) { 2528 value = extract64(cs->ich_lr_el2[regno], 32, 32); 2529 trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value); 2530 } else { 2531 value = extract64(cs->ich_lr_el2[regno], 0, 32); 2532 trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value); 2533 } 2534 } else { 2535 value = cs->ich_lr_el2[regno]; 2536 trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value); 2537 } 2538 2539 return value; 2540 } 2541 2542 static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2543 uint64_t value) 2544 { 2545 GICv3CPUState *cs = icc_cs_from_env(env); 2546 int regno = ri->opc2 | ((ri->crm & 1) << 3); 2547 2548 /* This write function handles all of: 2549 * 64-bit writes to the whole LR 2550 * 32-bit writes to the low half of the LR 2551 * 32-bit writes to the high half of the LR 2552 */ 2553 if (ri->state == ARM_CP_STATE_AA32) { 2554 if (ri->crm >= 14) { 2555 trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value); 2556 value = deposit64(cs->ich_lr_el2[regno], 32, 32, value); 2557 } else { 2558 trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value); 2559 value = deposit64(cs->ich_lr_el2[regno], 0, 32, value); 2560 } 2561 } else { 2562 trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value); 2563 } 2564 2565 /* Enforce RES0 bits in priority field */ 2566 if (cs->vpribits < 8) { 2567 value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT, 2568 8 - cs->vpribits, 0); 2569 } 2570 2571 cs->ich_lr_el2[regno] = value; 2572 gicv3_cpuif_virt_update(cs); 2573 } 2574 2575 static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2576 { 2577 GICv3CPUState *cs = icc_cs_from_env(env); 2578 uint64_t value; 2579 2580 value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT) 2581 | ICH_VTR_EL2_TDS | ICH_VTR_EL2_A3V 2582 | (1 << ICH_VTR_EL2_IDBITS_SHIFT) 2583 | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT) 2584 | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT); 2585 2586 if (cs->gic->revision < 4) { 2587 value |= ICH_VTR_EL2_NV4; 2588 } 2589 2590 trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value); 2591 return value; 2592 } 2593 2594 static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2595 { 2596 GICv3CPUState *cs = icc_cs_from_env(env); 2597 uint64_t value = maintenance_interrupt_state(cs); 2598 2599 trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value); 2600 return value; 2601 } 2602 2603 static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2604 { 2605 GICv3CPUState *cs = icc_cs_from_env(env); 2606 uint64_t value = eoi_maintenance_interrupt_state(cs, NULL); 2607 2608 trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value); 2609 return value; 2610 } 2611 2612 static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2613 { 2614 GICv3CPUState *cs = icc_cs_from_env(env); 2615 uint64_t value = 0; 2616 int i; 2617 2618 for (i = 0; i < cs->num_list_regs; i++) { 2619 uint64_t lr = cs->ich_lr_el2[i]; 2620 2621 if ((lr & ICH_LR_EL2_STATE_MASK) == 0 && 2622 ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) { 2623 value |= (1 << i); 2624 } 2625 } 2626 2627 trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value); 2628 return value; 2629 } 2630 2631 static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { 2632 { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH, 2633 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0, 2634 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2635 .access = PL2_RW, 2636 .readfn = ich_ap_read, 2637 .writefn = ich_ap_write, 2638 }, 2639 { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH, 2640 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0, 2641 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2642 .access = PL2_RW, 2643 .readfn = ich_ap_read, 2644 .writefn = ich_ap_write, 2645 }, 2646 { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH, 2647 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0, 2648 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2649 .access = PL2_RW, 2650 .readfn = ich_hcr_read, 2651 .writefn = ich_hcr_write, 2652 }, 2653 { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH, 2654 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1, 2655 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2656 .access = PL2_R, 2657 .readfn = ich_vtr_read, 2658 }, 2659 { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH, 2660 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2, 2661 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2662 .access = PL2_R, 2663 .readfn = ich_misr_read, 2664 }, 2665 { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH, 2666 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3, 2667 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2668 .access = PL2_R, 2669 .readfn = ich_eisr_read, 2670 }, 2671 { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH, 2672 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5, 2673 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2674 .access = PL2_R, 2675 .readfn = ich_elrsr_read, 2676 }, 2677 { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH, 2678 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7, 2679 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2680 .access = PL2_RW, 2681 .readfn = ich_vmcr_read, 2682 .writefn = ich_vmcr_write, 2683 }, 2684 }; 2685 2686 static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = { 2687 { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH, 2688 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1, 2689 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2690 .access = PL2_RW, 2691 .readfn = ich_ap_read, 2692 .writefn = ich_ap_write, 2693 }, 2694 { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH, 2695 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1, 2696 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2697 .access = PL2_RW, 2698 .readfn = ich_ap_read, 2699 .writefn = ich_ap_write, 2700 }, 2701 }; 2702 2703 static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { 2704 { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH, 2705 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2, 2706 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2707 .access = PL2_RW, 2708 .readfn = ich_ap_read, 2709 .writefn = ich_ap_write, 2710 }, 2711 { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH, 2712 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3, 2713 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2714 .access = PL2_RW, 2715 .readfn = ich_ap_read, 2716 .writefn = ich_ap_write, 2717 }, 2718 { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH, 2719 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2, 2720 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2721 .access = PL2_RW, 2722 .readfn = ich_ap_read, 2723 .writefn = ich_ap_write, 2724 }, 2725 { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH, 2726 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3, 2727 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2728 .access = PL2_RW, 2729 .readfn = ich_ap_read, 2730 .writefn = ich_ap_write, 2731 }, 2732 }; 2733 2734 static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque) 2735 { 2736 GICv3CPUState *cs = opaque; 2737 2738 gicv3_cpuif_update(cs); 2739 /* 2740 * Because vLPIs are only pending in NonSecure state, 2741 * an EL change can change the VIRQ/VFIQ status (but 2742 * cannot affect the maintenance interrupt state) 2743 */ 2744 gicv3_cpuif_virt_irq_fiq_update(cs); 2745 } 2746 2747 void gicv3_init_cpuif(GICv3State *s) 2748 { 2749 /* Called from the GICv3 realize function; register our system 2750 * registers with the CPU 2751 */ 2752 int i; 2753 2754 for (i = 0; i < s->num_cpu; i++) { 2755 ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); 2756 GICv3CPUState *cs = &s->cpu[i]; 2757 2758 /* Note that we can't just use the GICv3CPUState as an opaque pointer 2759 * in define_arm_cp_regs_with_opaque(), because when we're called back 2760 * it might be with code translated by CPU 0 but run by CPU 1, in 2761 * which case we'd get the wrong value. 2762 * So instead we define the regs with no ri->opaque info, and 2763 * get back to the GICv3CPUState from the CPUARMState. 2764 */ 2765 define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); 2766 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) 2767 && cpu->gic_num_lrs) { 2768 int j; 2769 2770 cs->num_list_regs = cpu->gic_num_lrs; 2771 cs->vpribits = cpu->gic_vpribits; 2772 cs->vprebits = cpu->gic_vprebits; 2773 2774 /* Check against architectural constraints: getting these 2775 * wrong would be a bug in the CPU code defining these, 2776 * and the implementation relies on them holding. 2777 */ 2778 g_assert(cs->vprebits <= cs->vpribits); 2779 g_assert(cs->vprebits >= 5 && cs->vprebits <= 7); 2780 g_assert(cs->vpribits >= 5 && cs->vpribits <= 8); 2781 2782 define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo); 2783 2784 for (j = 0; j < cs->num_list_regs; j++) { 2785 /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs 2786 * are split into two cp15 regs, LR (the low part, with the 2787 * same encoding as the AArch64 LR) and LRC (the high part). 2788 */ 2789 ARMCPRegInfo lr_regset[] = { 2790 { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH, 2791 .opc0 = 3, .opc1 = 4, .crn = 12, 2792 .crm = 12 + (j >> 3), .opc2 = j & 7, 2793 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2794 .access = PL2_RW, 2795 .readfn = ich_lr_read, 2796 .writefn = ich_lr_write, 2797 }, 2798 { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32, 2799 .cp = 15, .opc1 = 4, .crn = 12, 2800 .crm = 14 + (j >> 3), .opc2 = j & 7, 2801 .type = ARM_CP_IO | ARM_CP_NO_RAW, 2802 .access = PL2_RW, 2803 .readfn = ich_lr_read, 2804 .writefn = ich_lr_write, 2805 }, 2806 }; 2807 define_arm_cp_regs(cpu, lr_regset); 2808 } 2809 if (cs->vprebits >= 6) { 2810 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo); 2811 } 2812 if (cs->vprebits == 7) { 2813 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo); 2814 } 2815 } 2816 arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs); 2817 } 2818 } 2819