Lines Matching +full:cs +full:- +full:0
18 #include "qemu/main-loop.h"
24 #include "target/arm/cpu-features.h"
36 return env->gicv3state;
50 static inline int icv_min_vbpr(GICv3CPUState *cs)
52 return 7 - cs->vprebits;
55 static inline int ich_num_aprs(GICv3CPUState *cs)
58 int aprmax = 1 << (cs->vprebits - 5);
59 assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
94 * all ICV regs with '0' in their name
107 static int read_vbpr(GICv3CPUState *cs, int grp)
113 return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
116 return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
121 static void write_vbpr(GICv3CPUState *cs, int grp, int value)
126 int min = icv_min_vbpr(cs);
135 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
138 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
143 static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
150 return (~0U << (8 - cs->vpribits)) & 0xff;
153 static int ich_highest_active_virt_prio(GICv3CPUState *cs)
159 int aprmax = ich_num_aprs(cs);
161 if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) {
162 return 0x0;
165 for (i = 0; i < aprmax; i++) {
166 uint32_t apr = cs->ich_apr[GICV3_G0][i] |
167 cs->ich_apr[GICV3_G1NS][i];
172 return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
175 return 0xff;
178 static int hppvi_index(GICv3CPUState *cs)
183 * pseudocode. If no pending virtual interrupts, return -1.
190 ARMCPU *cpu = ARM_CPU(cs->cpu);
191 CPUARMState *env = &cpu->env;
192 int idx = -1;
194 /* Note that a list register entry with a priority of 0xff will
198 int prio = 0xff;
201 if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
206 for (i = 0; i < cs->num_list_regs; i++) {
207 uint64_t lr = cs->ich_lr_el2[i];
218 if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
222 if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
238 * "no pending vLPI" is indicated with prio = 0xff, which always
240 * when we are in Non-Secure state.
242 if (cs->hppvlpi.prio < prio && !arm_is_secure(env)) {
243 if (cs->hppvlpi.grp == GICV3_G0) {
244 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0) {
248 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1) {
257 static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
263 * a BPR of 0 means the group priority bits are [7:1];
267 * a BPR of 0 is impossible (the minimum value is 1)
279 if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
283 bpr = read_vbpr(cs, group);
285 assert(bpr > 0);
286 bpr--;
289 return ~0U << (bpr + 1);
292 static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
297 * Compare also icc_hppi_can_preempt() which is the non-virtual
304 if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
315 vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
323 rprio = ich_highest_active_virt_prio(cs);
324 if (rprio == 0xff) {
331 mask = icv_gprio_mask(cs, grp);
341 !(cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI)) {
348 static bool icv_hppvlpi_can_preempt(GICv3CPUState *cs)
352 * We can assume we're Non-secure because hppvi_index() already
357 if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
362 vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
365 if (cs->hppvlpi.prio >= vpmr) {
370 rprio = ich_highest_active_virt_prio(cs);
371 if (rprio == 0xff) {
376 mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
382 if ((cs->hppvlpi.prio & mask) < (rprio & mask)) {
389 static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
394 * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
399 uint32_t value = 0;
400 int validcount = 0;
404 for (i = 0; i < cs->num_list_regs; i++) {
405 uint64_t lr = cs->ich_lr_el2[i];
420 if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
423 if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
433 static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
438 uint32_t value = 0;
441 eoi_maintenance_interrupt_state(cs, &value);
443 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_LRENPIE) &&
444 (cs->ich_hcr_el2 & ICH_HCR_EL2_EOICOUNT_MASK)) {
448 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
449 (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
453 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
454 !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
457 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
458 (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
462 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
463 !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
470 void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs)
482 int irqlevel = 0;
483 int fiqlevel = 0;
484 int nmilevel = 0;
486 idx = hppvi_index(cs);
487 trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx,
488 cs->hppvlpi.irq, cs->hppvlpi.grp,
489 cs->hppvlpi.prio);
491 if (icv_hppvlpi_can_preempt(cs)) {
492 if (cs->hppvlpi.grp == GICV3_G0) {
498 } else if (idx >= 0) {
499 uint64_t lr = cs->ich_lr_el2[idx];
501 if (icv_hppi_can_preempt(cs, lr)) {
505 * non-maskable property.
519 trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
520 qemu_set_irq(cs->parent_vfiq, fiqlevel);
521 qemu_set_irq(cs->parent_virq, irqlevel);
522 qemu_set_irq(cs->parent_vnmi, nmilevel);
525 static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
534 * to the GIC as a per-CPU interrupt. This means that it
545 ARMCPU *cpu = ARM_CPU(cs->cpu);
546 int maintlevel = 0;
548 gicv3_cpuif_virt_irq_fiq_update(cs);
550 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
551 maintenance_interrupt_state(cs) != 0) {
555 trace_gicv3_cpuif_virt_set_maint_irq(gicv3_redist_affid(cs), maintlevel);
556 qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
561 GICv3CPUState *cs = icc_cs_from_env(env);
562 int regno = ri->opc2 & 3;
563 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
564 uint64_t value = cs->ich_apr[grp][regno];
566 trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
573 GICv3CPUState *cs = icc_cs_from_env(env);
574 int regno = ri->opc2 & 3;
575 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
577 trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
579 if (cs->nmi_support) {
580 cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI);
582 cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
585 gicv3_cpuif_virt_irq_fiq_update(cs);
591 GICv3CPUState *cs = icc_cs_from_env(env);
592 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
596 if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
602 bpr = read_vbpr(cs, grp);
609 trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
617 GICv3CPUState *cs = icc_cs_from_env(env);
618 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
620 trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
621 gicv3_redist_affid(cs), value);
623 if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
628 write_vbpr(cs, grp, value);
630 gicv3_cpuif_virt_irq_fiq_update(cs);
635 GICv3CPUState *cs = icc_cs_from_env(env);
638 value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
641 trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
648 GICv3CPUState *cs = icc_cs_from_env(env);
650 trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
652 value &= icv_fullprio_mask(cs);
654 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
657 gicv3_cpuif_virt_irq_fiq_update(cs);
662 GICv3CPUState *cs = icc_cs_from_env(env);
666 enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
667 value = extract64(cs->ich_vmcr_el2, enbit, 1);
669 trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
670 gicv3_redist_affid(cs), value);
677 GICv3CPUState *cs = icc_cs_from_env(env);
680 trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
681 gicv3_redist_affid(cs), value);
683 enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
685 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
686 gicv3_cpuif_virt_update(cs);
691 GICv3CPUState *cs = icc_cs_from_env(env);
698 ((cs->vpribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
700 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
704 if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
708 trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
715 GICv3CPUState *cs = icc_cs_from_env(env);
717 trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
719 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
720 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
721 cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
722 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
724 gicv3_cpuif_virt_irq_fiq_update(cs);
729 GICv3CPUState *cs = icc_cs_from_env(env);
730 uint64_t prio = ich_highest_active_virt_prio(cs);
732 if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) {
736 trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
742 GICv3CPUState *cs = icc_cs_from_env(env);
743 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
744 int idx = hppvi_index(cs);
748 if (cs->hppvlpi.grp == grp) {
749 value = cs->hppvlpi.irq;
751 } else if (idx >= 0) {
752 uint64_t lr = cs->ich_lr_el2[idx];
760 trace_gicv3_icv_hppir_read(ri->crm == 8 ? 0 : 1,
761 gicv3_redist_affid(cs), value);
765 static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
771 uint32_t mask = icv_gprio_mask(cs, grp);
772 int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
773 bool nmi = cs->ich_lr_el2[idx] & ICH_LR_EL2_NMI;
774 int aprbit = prio >> (8 - cs->vprebits);
778 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
779 cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
782 cs->ich_apr[grp][regno] |= ICV_AP1R_EL1_NMI;
784 cs->ich_apr[grp][regno] |= (1U << regbit);
788 static void icv_activate_vlpi(GICv3CPUState *cs)
790 uint32_t mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
791 int prio = cs->hppvlpi.prio & mask;
792 int aprbit = prio >> (8 - cs->vprebits);
796 cs->ich_apr[cs->hppvlpi.grp][regno] |= (1U << regbit);
797 gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0);
802 GICv3CPUState *cs = icc_cs_from_env(env);
803 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
804 int idx = hppvi_index(cs);
809 if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) {
810 intid = cs->hppvlpi.irq;
811 icv_activate_vlpi(cs);
813 } else if (idx >= 0) {
814 uint64_t lr = cs->ich_lr_el2[idx];
816 bool nmi = env->cp15.sctlr_el[el] & SCTLR_NMI && lr & ICH_LR_EL2_NMI;
818 if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
822 icv_activate_irq(cs, idx, grp);
828 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
836 trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
837 gicv3_redist_affid(cs), intid);
839 gicv3_cpuif_virt_update(cs);
846 GICv3CPUState *cs = icc_cs_from_env(env);
847 int idx = hppvi_index(cs);
850 if (idx >= 0 && idx != HPPVI_INDEX_VLPI) {
851 uint64_t lr = cs->ich_lr_el2[idx];
854 if ((thisgrp == GICV3_G1NS) && icv_hppi_can_preempt(cs, lr)) {
858 icv_activate_irq(cs, idx, GICV3_G1NS);
864 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
873 trace_gicv3_icv_nmiar1_read(gicv3_redist_affid(cs), intid);
875 gicv3_cpuif_virt_update(cs);
880 static uint32_t icc_fullprio_mask(GICv3CPUState *cs)
888 return (~0U << (8 - cs->pribits)) & 0xff;
891 static inline int icc_min_bpr(GICv3CPUState *cs)
894 return 7 - cs->prebits;
897 static inline int icc_min_bpr_ns(GICv3CPUState *cs)
899 return icc_min_bpr(cs) + 1;
902 static inline int icc_num_aprs(GICv3CPUState *cs)
905 int aprmax = 1 << MAX(cs->prebits - 5, 0);
906 assert(aprmax <= ARRAY_SIZE(cs->icc_apr[0]));
910 static int icc_highest_active_prio(GICv3CPUState *cs)
917 if (cs->nmi_support) {
922 * either 0x0 or 0x80. Callers will need to check NMI again for
924 * prioritization of NMI vs non-NMI.
926 if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
927 return 0;
929 if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
930 return (cs->gic->gicd_ctlr & GICD_CTLR_DS) ? 0 : 0x80;
934 for (i = 0; i < icc_num_aprs(cs); i++) {
935 uint32_t apr = cs->icc_apr[GICV3_G0][i] |
936 cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
941 return (i * 32 + ctz32(apr)) << (icc_min_bpr(cs) + 1);
944 return 0xff;
947 static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
952 * a BPR of 0 means the group priority bits are [7:1];
956 * a BPR of 0 is impossible (the minimum value is 1)
968 if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
970 cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
974 bpr = cs->icc_bpr[group] & 7;
977 assert(bpr > 0);
978 bpr--;
981 return ~0U << (bpr + 1);
984 static bool icc_no_enabled_hppi(GICv3CPUState *cs)
990 return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
993 static bool icc_hppi_can_preempt(GICv3CPUState *cs)
1000 ARMCPU *cpu = ARM_CPU(cs->cpu);
1001 CPUARMState *env = &cpu->env;
1003 if (icc_no_enabled_hppi(cs)) {
1007 if (cs->hppi.nmi) {
1008 if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
1009 cs->hppi.grp == GICV3_G1NS) {
1010 if (cs->icc_pmr_el1 < 0x80) {
1013 if (arm_is_secure(env) && cs->icc_pmr_el1 == 0x80) {
1017 } else if (cs->hppi.prio >= cs->icc_pmr_el1) {
1022 rprio = icc_highest_active_prio(cs);
1023 if (rprio == 0xff) {
1028 mask = icc_gprio_mask(cs, cs->hppi.grp);
1033 if ((cs->hppi.prio & mask) < (rprio & mask)) {
1037 if (cs->hppi.nmi && (cs->hppi.prio & mask) == (rprio & mask)) {
1038 if (!(cs->icc_apr[cs->hppi.grp][0] & ICC_AP1R_EL1_NMI)) {
1046 void gicv3_cpuif_update(GICv3CPUState *cs)
1049 int irqlevel = 0;
1050 int fiqlevel = 0;
1051 int nmilevel = 0;
1052 ARMCPU *cpu = ARM_CPU(cs->cpu);
1053 CPUARMState *env = &cpu->env;
1057 trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
1058 cs->hppi.grp, cs->hppi.prio);
1060 if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
1061 /* If a Security-enabled GIC sends a G1S interrupt to a
1062 * Security-disabled CPU, we must treat it as if it were G0.
1064 cs->hppi.grp = GICV3_G0;
1067 if (icc_hppi_can_preempt(cs)) {
1073 switch (cs->hppi.grp) {
1090 } else if (cs->hppi.nmi) {
1097 trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
1099 qemu_set_irq(cs->parent_fiq, fiqlevel);
1100 qemu_set_irq(cs->parent_irq, irqlevel);
1101 qemu_set_irq(cs->parent_nmi, nmilevel);
1106 GICv3CPUState *cs = icc_cs_from_env(env);
1107 uint32_t value = cs->icc_pmr_el1;
1114 (env->cp15.scr_el3 & SCR_FIQ)) {
1115 /* NS access and Group 0 is inaccessible to NS: return the
1118 if ((value & 0x80) == 0) {
1120 value = 0;
1121 } else if (value != 0xff) {
1122 value = (value << 1) & 0xff;
1126 trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
1134 GICv3CPUState *cs = icc_cs_from_env(env);
1140 trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
1143 (env->cp15.scr_el3 & SCR_FIQ)) {
1144 /* NS access and Group 0 is inaccessible to NS: return the
1147 if (!(cs->icc_pmr_el1 & 0x80)) {
1151 value = (value >> 1) | 0x80;
1153 value &= icc_fullprio_mask(cs);
1154 cs->icc_pmr_el1 = value;
1155 gicv3_cpuif_update(cs);
1158 static void icc_activate_irq(GICv3CPUState *cs, int irq)
1163 uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
1164 int prio = cs->hppi.prio & mask;
1165 int aprbit = prio >> (8 - cs->prebits);
1168 bool nmi = cs->hppi.nmi;
1171 cs->icc_apr[cs->hppi.grp][regno] |= ICC_AP1R_EL1_NMI;
1173 cs->icc_apr[cs->hppi.grp][regno] |= (1U << regbit);
1177 cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
1178 cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
1179 gicv3_redist_update(cs);
1181 gicv3_gicd_active_set(cs->gic, irq);
1182 gicv3_gicd_pending_clear(cs->gic, irq);
1183 gicv3_update(cs->gic, irq, 1);
1185 gicv3_redist_lpi_pending(cs, irq, 0);
1189 static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
1192 * for group 0.
1196 if (icc_no_enabled_hppi(cs)) {
1205 irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
1206 (cs->hppi.grp != GICV3_G1NS));
1208 if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
1216 if (cs->hppi.grp != GICV3_G0) {
1223 return cs->hppi.irq;
1226 static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
1233 if (icc_no_enabled_hppi(cs)) {
1242 irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
1243 (cs->hppi.grp != GICV3_G1NS));
1245 if (cs->hppi.grp == GICV3_G0) {
1246 /* Group 0 interrupts not visible via HPPIR1 */
1251 /* Secure interrupts not visible in Non-secure */
1255 /* Group 1 non-secure interrupts not visible in Secure EL1 */
1259 return cs->hppi.irq;
1264 GICv3CPUState *cs = icc_cs_from_env(env);
1271 if (!icc_hppi_can_preempt(cs)) {
1274 intid = icc_hppir0_value(cs, env);
1278 icc_activate_irq(cs, intid);
1281 trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
1287 GICv3CPUState *cs = icc_cs_from_env(env);
1295 if (!icc_hppi_can_preempt(cs)) {
1298 intid = icc_hppir1_value(cs, env);
1302 if (cs->hppi.nmi && env->cp15.sctlr_el[el] & SCTLR_NMI) {
1305 icc_activate_irq(cs, intid);
1309 trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
1315 GICv3CPUState *cs = icc_cs_from_env(env);
1322 if (!icc_hppi_can_preempt(cs)) {
1325 intid = icc_hppir1_value(cs, env);
1329 if (!cs->hppi.nmi) {
1332 icc_activate_irq(cs, intid);
1336 trace_gicv3_icc_nmiar1_read(gicv3_redist_affid(cs), intid);
1340 static void icc_drop_prio(GICv3CPUState *cs, int grp)
1360 for (i = 0; i < icc_num_aprs(cs); i++) {
1361 uint64_t *papr = &cs->icc_apr[grp][i];
1367 if (i == 0 && cs->nmi_support && (*papr & ICC_AP1R_EL1_NMI)) {
1373 *papr &= *papr - 1;
1378 gicv3_cpuif_update(cs);
1381 static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1387 return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
1390 return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
1392 return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
1396 static int icc_highest_active_group(GICv3CPUState *cs)
1406 if (cs->nmi_support) {
1407 if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
1410 if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
1415 for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
1416 int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
1417 int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
1418 int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
1430 /* No set active bits? UNPREDICTABLE; return -1 so the caller
1433 return -1;
1436 static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
1439 cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
1440 gicv3_redist_update(cs);
1442 gicv3_gicd_active_clear(cs->gic, irq);
1443 gicv3_update(cs->gic, irq, 1);
1447 static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1452 return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
1455 static int icv_find_active(GICv3CPUState *cs, int irq)
1458 * of the corresponding list register, or -1 if there is no match.
1463 for (i = 0; i < cs->num_list_regs; i++) {
1464 uint64_t lr = cs->ich_lr_el2[i];
1471 return -1;
1474 static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
1477 uint64_t lr = cs->ich_lr_el2[idx];
1484 icc_deactivate_irq(cs, pirq);
1488 /* Clear the 'active' part of the state, so ActivePending->Pending
1489 * and Active->Invalid.
1492 cs->ich_lr_el2[idx] = lr;
1495 static void icv_increment_eoicount(GICv3CPUState *cs)
1498 int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1501 cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1505 static int icv_drop_prio(GICv3CPUState *cs, bool *nmi)
1508 * (favouring group 0 if there is a set active bit at
1509 * the same priority for both group 0 and group 1).
1511 * or 0xff if no bits were set in the AP registers at all.
1516 int aprmax = ich_num_aprs(cs);
1518 for (i = 0; i < aprmax; i++) {
1519 uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
1520 uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
1527 if (i == 0 && cs->nmi_support && (*papr1 & ICV_AP1R_EL1_NMI)) {
1530 return 0xff;
1533 /* We can't just use the bit-twiddling hack icc_drop_prio() does
1541 *papr0 &= *papr0 - 1;
1542 return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
1544 *papr1 &= *papr1 - 1;
1545 return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
1548 return 0xff;
1555 GICv3CPUState *cs = icc_cs_from_env(env);
1557 int irq = value & 0xffffff;
1559 trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
1566 if (!icv_eoi_split(env, cs)) {
1570 idx = icv_find_active(cs, irq);
1572 if (idx < 0) {
1576 icv_increment_eoicount(cs);
1578 icv_deactivate_irq(cs, idx);
1581 gicv3_cpuif_virt_update(cs);
1588 GICv3CPUState *cs = icc_cs_from_env(env);
1589 int irq = value & 0xffffff;
1590 int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
1594 trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
1595 gicv3_redist_affid(cs), value);
1605 dropprio = icv_drop_prio(cs, &nmi);
1606 if (dropprio == 0xff && !nmi) {
1614 idx = icv_find_active(cs, irq);
1616 if (idx < 0) {
1622 icv_increment_eoicount(cs);
1625 uint64_t lr = cs->ich_lr_el2[idx];
1627 int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
1631 if (!icv_eoi_split(env, cs) || irq >= GICV3_LPI_INTID_START) {
1637 icv_deactivate_irq(cs, idx);
1642 gicv3_cpuif_virt_update(cs);
1649 GICv3CPUState *cs = icc_cs_from_env(env);
1650 int irq = value & 0xffffff;
1652 bool is_eoir0 = ri->crm == 8;
1659 trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
1660 gicv3_redist_affid(cs), value);
1662 if ((irq >= cs->gic->num_irq) &&
1663 !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) {
1665 * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
1667 * 2. If software writes the number of a non-existent interrupt
1675 grp = icc_highest_active_group(cs);
1681 if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS)
1708 icc_drop_prio(cs, grp);
1710 if (!icc_eoi_split(env, cs)) {
1712 icc_deactivate_irq(cs, irq);
1718 GICv3CPUState *cs = icc_cs_from_env(env);
1725 value = icc_hppir0_value(cs, env);
1726 trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
1732 GICv3CPUState *cs = icc_cs_from_env(env);
1739 value = icc_hppir1_value(cs, env);
1740 trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
1746 GICv3CPUState *cs = icc_cs_from_env(env);
1747 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1760 (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1768 (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1774 bpr = cs->icc_bpr[grp];
1780 trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
1788 GICv3CPUState *cs = icc_cs_from_env(env);
1789 int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1797 trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
1798 gicv3_redist_affid(cs), value);
1805 (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1813 (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1818 minval = (grp == GICV3_G1NS) ? icc_min_bpr_ns(cs) : icc_min_bpr(cs);
1823 cs->icc_bpr[grp] = value & 7;
1824 gicv3_cpuif_update(cs);
1829 GICv3CPUState *cs = icc_cs_from_env(env);
1832 int regno = ri->opc2 & 3;
1833 int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
1843 value = cs->icc_apr[grp][regno];
1845 trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1852 GICv3CPUState *cs = icc_cs_from_env(env);
1854 int regno = ri->opc2 & 3;
1855 int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
1862 trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1868 /* It's not possible to claim that a Non-secure interrupt is active
1869 * at a priority outside the Non-secure range (128..255), since this
1877 if (cs->nmi_support) {
1878 cs->icc_apr[grp][regno] = value & (0xFFFFFFFFU | ICC_AP1R_EL1_NMI);
1880 cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
1882 gicv3_cpuif_update(cs);
1889 GICv3CPUState *cs = icc_cs_from_env(env);
1890 int irq = value & 0xffffff;
1899 trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
1901 if (irq >= cs->gic->num_irq) {
1906 if (!icc_eoi_split(env, cs)) {
1910 int grp = gicv3_irq_group(cs->gic, cs, irq);
1912 single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
1920 route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
1921 route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
1965 icc_deactivate_irq(cs, irq);
1970 GICv3CPUState *cs = icc_cs_from_env(env);
1977 prio = icc_highest_active_prio(cs);
1980 !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
1981 /* NS GIC access and Group 0 is inaccessible to NS */
1982 if ((prio & 0x80) == 0) {
1984 prio = 0;
1985 } else if (prio != 0xff) {
1986 /* Non-idle priority: show the Non-secure view of it */
1987 prio = (prio << 1) & 0xff;
1991 if (cs->nmi_support) {
1994 if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
1998 if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
2001 if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
2007 trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
2011 static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
2014 GICv3State *s = cs->gic;
2020 uint32_t targetlist = extract64(value, 0, 16);
2025 if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
2027 * interrupts as Group 0 interrupts and must send Secure Group 0
2033 trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
2036 for (i = 0; i < s->num_cpu; i++) {
2037 GICv3CPUState *ocs = &s->cpu[i];
2041 if (cs == ocs) {
2045 /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
2050 if (ocs->gicr_typer >> 40 != aff) {
2053 aff0 = extract64(ocs->gicr_typer, 32, 8);
2054 if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
2067 /* Generate Secure Group 0 SGI. */
2068 GICv3CPUState *cs = icc_cs_from_env(env);
2071 icc_generate_sgi(env, cs, value, GICV3_G0, ns);
2078 GICv3CPUState *cs = icc_cs_from_env(env);
2083 icc_generate_sgi(env, cs, value, grp, ns);
2092 GICv3CPUState *cs = icc_cs_from_env(env);
2097 icc_generate_sgi(env, cs, value, grp, ns);
2102 GICv3CPUState *cs = icc_cs_from_env(env);
2103 int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
2114 value = cs->icc_igrpen[grp];
2115 trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
2116 gicv3_redist_affid(cs), value);
2123 GICv3CPUState *cs = icc_cs_from_env(env);
2124 int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
2131 trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
2132 gicv3_redist_affid(cs), value);
2138 cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
2139 gicv3_cpuif_update(cs);
2144 GICv3CPUState *cs = icc_cs_from_env(env);
2147 /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
2148 value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
2149 trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
2156 GICv3CPUState *cs = icc_cs_from_env(env);
2158 trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
2160 /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
2161 cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
2162 cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
2163 gicv3_cpuif_update(cs);
2168 GICv3CPUState *cs = icc_cs_from_env(env);
2176 value = cs->icc_ctlr_el1[bank];
2177 trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
2184 GICv3CPUState *cs = icc_cs_from_env(env);
2193 trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
2196 * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
2197 * the asseciated priority-based routing of them);
2198 * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
2201 ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
2207 cs->icc_ctlr_el1[bank] &= ~mask;
2208 cs->icc_ctlr_el1[bank] |= (value & mask);
2209 gicv3_cpuif_update(cs);
2215 GICv3CPUState *cs = icc_cs_from_env(env);
2218 value = cs->icc_ctlr_el3;
2219 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
2222 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
2225 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
2228 if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
2232 trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
2239 GICv3CPUState *cs = icc_cs_from_env(env);
2242 trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
2245 cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
2247 cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
2250 cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
2253 cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
2255 cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
2258 cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
2264 cs->icc_ctlr_el3 &= ~mask;
2265 cs->icc_ctlr_el3 |= (value & mask);
2266 gicv3_cpuif_update(cs);
2273 GICv3CPUState *cs = icc_cs_from_env(env);
2276 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
2282 if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
2286 if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) {
2309 GICv3CPUState *cs = icc_cs_from_env(env);
2311 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
2324 (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) {
2336 GICv3CPUState *cs = icc_cs_from_env(env);
2339 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
2345 if (env->cp15.scr_el3 & SCR_FIQ) {
2348 if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) {
2372 GICv3CPUState *cs = icc_cs_from_env(env);
2375 if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
2381 if (env->cp15.scr_el3 & SCR_IRQ) {
2384 if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) {
2406 GICv3CPUState *cs = icc_cs_from_env(env);
2408 cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
2410 ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
2411 cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
2413 ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
2414 cs->icc_pmr_el1 = 0;
2415 cs->icc_bpr[GICV3_G0] = icc_min_bpr(cs);
2416 cs->icc_bpr[GICV3_G1] = icc_min_bpr(cs);
2417 cs->icc_bpr[GICV3_G1NS] = icc_min_bpr_ns(cs);
2418 memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
2419 memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
2420 cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
2422 ((cs->pribits - 1) << ICC_CTLR_EL3_PRIBITS_SHIFT);
2424 memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
2425 cs->ich_hcr_el2 = 0;
2426 memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
2427 cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
2428 ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) |
2429 (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
2434 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
2446 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
2452 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
2458 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
2464 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
2471 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
2479 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
2486 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
2492 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
2498 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
2504 .cp = 15, .opc1 = 0, .crm = 12,
2510 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
2522 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
2534 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
2540 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
2546 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
2553 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
2561 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
2568 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
2576 .resetvalue = 0x7,
2579 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
2588 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
2602 .resetvalue = 0xf,
2618 .resetvalue = 0xf,
2631 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
2638 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
2648 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
2655 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
2662 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
2669 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
2679 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 5,
2688 GICv3CPUState *cs = icc_cs_from_env(env);
2689 int regno = ri->opc2 & 3;
2690 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
2693 value = cs->ich_apr[grp][regno];
2694 trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2701 GICv3CPUState *cs = icc_cs_from_env(env);
2702 int regno = ri->opc2 & 3;
2703 int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
2705 trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2707 if (cs->nmi_support) {
2708 cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI);
2710 cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
2712 gicv3_cpuif_virt_irq_fiq_update(cs);
2717 GICv3CPUState *cs = icc_cs_from_env(env);
2718 uint64_t value = cs->ich_hcr_el2;
2720 trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
2727 GICv3CPUState *cs = icc_cs_from_env(env);
2729 trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
2737 cs->ich_hcr_el2 = value;
2738 gicv3_cpuif_virt_update(cs);
2743 GICv3CPUState *cs = icc_cs_from_env(env);
2744 uint64_t value = cs->ich_vmcr_el2;
2746 trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
2753 GICv3CPUState *cs = icc_cs_from_env(env);
2755 trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
2762 cs->ich_vmcr_el2 = value;
2766 write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0));
2767 write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
2769 gicv3_cpuif_virt_update(cs);
2774 GICv3CPUState *cs = icc_cs_from_env(env);
2775 int regno = ri->opc2 | ((ri->crm & 1) << 3);
2779 * 64-bit reads of the whole LR
2780 * 32-bit reads of the low half of the LR
2781 * 32-bit reads of the high half of the LR
2783 if (ri->state == ARM_CP_STATE_AA32) {
2784 if (ri->crm >= 14) {
2785 value = extract64(cs->ich_lr_el2[regno], 32, 32);
2786 trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
2788 value = extract64(cs->ich_lr_el2[regno], 0, 32);
2789 trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
2792 value = cs->ich_lr_el2[regno];
2793 trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
2802 GICv3CPUState *cs = icc_cs_from_env(env);
2803 int regno = ri->opc2 | ((ri->crm & 1) << 3);
2806 * 64-bit writes to the whole LR
2807 * 32-bit writes to the low half of the LR
2808 * 32-bit writes to the high half of the LR
2810 if (ri->state == ARM_CP_STATE_AA32) {
2811 if (ri->crm >= 14) {
2812 trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
2813 value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
2815 trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
2816 value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
2819 trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
2823 if (cs->vpribits < 8) {
2825 8 - cs->vpribits, 0);
2829 if (!cs->nmi_support) {
2833 cs->ich_lr_el2[regno] = value;
2834 gicv3_cpuif_virt_update(cs);
2839 GICv3CPUState *cs = icc_cs_from_env(env);
2842 value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
2845 | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
2846 | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
2848 if (cs->gic->revision < 4) {
2852 trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
2858 GICv3CPUState *cs = icc_cs_from_env(env);
2859 uint64_t value = maintenance_interrupt_state(cs);
2861 trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
2867 GICv3CPUState *cs = icc_cs_from_env(env);
2868 uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
2870 trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
2876 GICv3CPUState *cs = icc_cs_from_env(env);
2877 uint64_t value = 0;
2880 for (i = 0; i < cs->num_list_regs; i++) {
2881 uint64_t lr = cs->ich_lr_el2[i];
2883 if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
2884 ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) {
2889 trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
2895 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
2897 .nv2_redirect_offset = 0x480,
2903 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
2905 .nv2_redirect_offset = 0x4a0,
2911 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
2913 .nv2_redirect_offset = 0x4c0,
2945 .nv2_redirect_offset = 0x4c8,
2956 .nv2_redirect_offset = 0x488,
2964 .nv2_redirect_offset = 0x4a8,
2975 .nv2_redirect_offset = 0x490,
2983 .nv2_redirect_offset = 0x498,
2991 .nv2_redirect_offset = 0x4b0,
2999 .nv2_redirect_offset = 0x4b8,
3008 GICv3CPUState *cs = opaque;
3010 gicv3_cpuif_update(cs);
3016 gicv3_cpuif_virt_irq_fiq_update(cs);
3026 for (i = 0; i < s->num_cpu; i++) {
3028 GICv3CPUState *cs = &s->cpu[i];
3034 * cpu->gic_num_lrs
3035 * cpu->gic_vpribits
3036 * cpu->gic_vprebits
3037 * cpu->gic_pribits
3042 * it might be with code translated by CPU 0 but run by CPU 1, in
3044 * So instead we define the regs with no ri->opaque info, and
3056 * that is a property of the GIC device in s->nmi_support;
3057 * cs->nmi_support indicates the CPU interface's support.
3060 cs->nmi_support = true;
3070 if (s->force_8bit_prio) {
3071 cs->pribits = 8;
3073 cs->pribits = cpu->gic_pribits ?: 5;
3086 cs->prebits = cs->pribits;
3087 if (cs->prebits == 8) {
3088 cs->prebits--;
3094 g_assert(cs->pribits >= 4 && cs->pribits <= 8);
3100 if (cs->prebits >= 6) {
3103 if (cs->prebits == 7) {
3107 if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
3110 cs->num_list_regs = cpu->gic_num_lrs ?: 4;
3111 cs->vpribits = cpu->gic_vpribits ?: 5;
3112 cs->vprebits = cpu->gic_vprebits ?: 5;
3118 g_assert(cs->vprebits <= cs->vpribits);
3119 g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
3120 g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
3124 for (j = 0; j < cs->num_list_regs; j++) {
3125 /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
3134 .nv2_redirect_offset = 0x400 + 8 * j,
3150 if (cs->vprebits >= 6) {
3153 if (cs->vprebits == 7) {
3161 * the non-TCG case this is OK, as EL2 and EL3 can't exist.
3163 arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
3165 assert(!arm_feature(&cpu->env, ARM_FEATURE_EL2));
3166 assert(!arm_feature(&cpu->env, ARM_FEATURE_EL3));