xref: /openbmc/qemu/hw/intc/arm_gicv3_cpuif.c (revision 77620ba65ef32121de20848f9635c4afe233a1ce)
1 /*
2  * ARM Generic Interrupt Controller v3
3  *
4  * Copyright (c) 2016 Linaro Limited
5  * Written by Peter Maydell
6  *
7  * This code is licensed under the GPL, version 2 or (at your option)
8  * any later version.
9  */
10 
11 /* This file contains the code for the system register interface
12  * portions of the GICv3.
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qemu/bitops.h"
17 #include "trace.h"
18 #include "gicv3_internal.h"
19 #include "cpu.h"
20 
21 static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
22 {
23     /* Given the CPU, find the right GICv3CPUState struct.
24      * Since we registered the CPU interface with the EL change hook as
25      * the opaque pointer, we can just directly get from the CPU to it.
26      */
27     return arm_get_el_change_hook_opaque(arm_env_get_cpu(env));
28 }
29 
30 static bool gicv3_use_ns_bank(CPUARMState *env)
31 {
32     /* Return true if we should use the NonSecure bank for a banked GIC
33      * CPU interface register. Note that this differs from the
34      * access_secure_reg() function because GICv3 banked registers are
35      * banked even for AArch64, unlike the other CPU system registers.
36      */
37     return !arm_is_secure_below_el3(env);
38 }
39 
40 /* The minimum BPR for the virtual interface is a configurable property */
41 static inline int icv_min_vbpr(GICv3CPUState *cs)
42 {
43     return 7 - cs->vprebits;
44 }
45 
46 /* Simple accessor functions for LR fields */
47 static int ich_lr_state(uint64_t lr)
48 {
49     return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
50 }
51 
52 static bool icv_access(CPUARMState *env, int hcr_flags)
53 {
54     /* Return true if this ICC_ register access should really be
55      * directed to an ICV_ access. hcr_flags is a mask of
56      * HCR_EL2 bits to check: we treat this as an ICV_ access
57      * if we are in NS EL1 and at least one of the specified
58      * HCR_EL2 bits is set.
59      *
60      * ICV registers fall into four categories:
61      *  * access if NS EL1 and HCR_EL2.FMO == 1:
62      *    all ICV regs with '0' in their name
63      *  * access if NS EL1 and HCR_EL2.IMO == 1:
64      *    all ICV regs with '1' in their name
65      *  * access if NS EL1 and either IMO or FMO == 1:
66      *    CTLR, DIR, PMR, RPR
67      */
68     return (env->cp15.hcr_el2 & hcr_flags) && arm_current_el(env) == 1
69         && !arm_is_secure_below_el3(env);
70 }
71 
72 static int read_vbpr(GICv3CPUState *cs, int grp)
73 {
74     /* Read VBPR value out of the VMCR field (caller must handle
75      * VCBPR effects if required)
76      */
77     if (grp == GICV3_G0) {
78         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
79                      ICH_VMCR_EL2_VBPR0_LENGTH);
80     } else {
81         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
82                          ICH_VMCR_EL2_VBPR1_LENGTH);
83     }
84 }
85 
86 static void write_vbpr(GICv3CPUState *cs, int grp, int value)
87 {
88     /* Write new VBPR1 value, handling the "writing a value less than
89      * the minimum sets it to the minimum" semantics.
90      */
91     int min = icv_min_vbpr(cs);
92 
93     if (grp != GICV3_G0) {
94         min++;
95     }
96 
97     value = MAX(value, min);
98 
99     if (grp == GICV3_G0) {
100         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
101                                      ICH_VMCR_EL2_VBPR0_LENGTH, value);
102     } else {
103         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
104                                      ICH_VMCR_EL2_VBPR1_LENGTH, value);
105     }
106 }
107 
108 static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
109 {
110     /* Return a mask word which clears the unimplemented priority bits
111      * from a priority value for a virtual interrupt. (Not to be confused
112      * with the group priority, whose mask depends on the value of VBPR
113      * for the interrupt group.)
114      */
115     return ~0U << (8 - cs->vpribits);
116 }
117 
118 static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
119                                                 uint32_t *misr)
120 {
121     /* Return a set of bits indicating the EOI maintenance interrupt status
122      * for each list register. The EOI maintenance interrupt status is
123      * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
124      * (see the GICv3 spec for the ICH_EISR_EL2 register).
125      * If misr is not NULL then we should also collect the information
126      * about the MISR.EOI, MISR.NP and MISR.U bits.
127      */
128     uint32_t value = 0;
129     int validcount = 0;
130     bool seenpending = false;
131     int i;
132 
133     for (i = 0; i < cs->num_list_regs; i++) {
134         uint64_t lr = cs->ich_lr_el2[i];
135 
136         if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
137             == ICH_LR_EL2_EOI) {
138             value |= (1 << i);
139         }
140         if ((lr & ICH_LR_EL2_STATE_MASK)) {
141             validcount++;
142         }
143         if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
144             seenpending = true;
145         }
146     }
147 
148     if (misr) {
149         if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
150             *misr |= ICH_MISR_EL2_U;
151         }
152         if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
153             *misr |= ICH_MISR_EL2_NP;
154         }
155         if (value) {
156             *misr |= ICH_MISR_EL2_EOI;
157         }
158     }
159     return value;
160 }
161 
162 static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
163 {
164     /* Return a set of bits indicating the maintenance interrupt status
165      * (as seen in the ICH_MISR_EL2 register).
166      */
167     uint32_t value = 0;
168 
169     /* Scan list registers and fill in the U, NP and EOI bits */
170     eoi_maintenance_interrupt_state(cs, &value);
171 
172     if (cs->ich_hcr_el2 & (ICH_HCR_EL2_LRENPIE | ICH_HCR_EL2_EOICOUNT_MASK)) {
173         value |= ICH_MISR_EL2_LRENP;
174     }
175 
176     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
177         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
178         value |= ICH_MISR_EL2_VGRP0E;
179     }
180 
181     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
182         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
183         value |= ICH_MISR_EL2_VGRP0D;
184     }
185     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
186         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
187         value |= ICH_MISR_EL2_VGRP1E;
188     }
189 
190     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
191         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
192         value |= ICH_MISR_EL2_VGRP1D;
193     }
194 
195     return value;
196 }
197 
198 static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
199 {
200 }
201 
202 static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
203 {
204     GICv3CPUState *cs = icc_cs_from_env(env);
205     int regno = ri->opc2 & 3;
206     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
207     uint64_t value = cs->ich_apr[grp][regno];
208 
209     trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
210     return value;
211 }
212 
213 static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
214                          uint64_t value)
215 {
216     GICv3CPUState *cs = icc_cs_from_env(env);
217     int regno = ri->opc2 & 3;
218     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
219 
220     trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
221 
222     cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
223 
224     gicv3_cpuif_virt_update(cs);
225     return;
226 }
227 
228 static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
229 {
230     GICv3CPUState *cs = icc_cs_from_env(env);
231     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
232     uint64_t bpr;
233     bool satinc = false;
234 
235     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
236         /* reads return bpr0 + 1 saturated to 7, writes ignored */
237         grp = GICV3_G0;
238         satinc = true;
239     }
240 
241     bpr = read_vbpr(cs, grp);
242 
243     if (satinc) {
244         bpr++;
245         bpr = MIN(bpr, 7);
246     }
247 
248     trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
249 
250     return bpr;
251 }
252 
253 static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
254                           uint64_t value)
255 {
256     GICv3CPUState *cs = icc_cs_from_env(env);
257     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
258 
259     trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
260                               gicv3_redist_affid(cs), value);
261 
262     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
263         /* reads return bpr0 + 1 saturated to 7, writes ignored */
264         return;
265     }
266 
267     write_vbpr(cs, grp, value);
268 
269     gicv3_cpuif_virt_update(cs);
270 }
271 
272 static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
273 {
274     GICv3CPUState *cs = icc_cs_from_env(env);
275     uint64_t value;
276 
277     value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
278                       ICH_VMCR_EL2_VPMR_LENGTH);
279 
280     trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
281     return value;
282 }
283 
284 static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
285                           uint64_t value)
286 {
287     GICv3CPUState *cs = icc_cs_from_env(env);
288 
289     trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
290 
291     value &= icv_fullprio_mask(cs);
292 
293     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
294                                  ICH_VMCR_EL2_VPMR_LENGTH, value);
295 
296     gicv3_cpuif_virt_update(cs);
297 }
298 
299 static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
300 {
301     GICv3CPUState *cs = icc_cs_from_env(env);
302     int enbit;
303     uint64_t value;
304 
305     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
306     value = extract64(cs->ich_vmcr_el2, enbit, 1);
307 
308     trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
309                                 gicv3_redist_affid(cs), value);
310     return value;
311 }
312 
313 static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
314                              uint64_t value)
315 {
316     GICv3CPUState *cs = icc_cs_from_env(env);
317     int enbit;
318 
319     trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
320                                  gicv3_redist_affid(cs), value);
321 
322     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
323 
324     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
325     gicv3_cpuif_virt_update(cs);
326 }
327 
328 static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
329 {
330     GICv3CPUState *cs = icc_cs_from_env(env);
331     uint64_t value;
332 
333     /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
334      * should match the ones reported in ich_vtr_read().
335      */
336     value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
337         (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
338 
339     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
340         value |= ICC_CTLR_EL1_EOIMODE;
341     }
342 
343     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
344         value |= ICC_CTLR_EL1_CBPR;
345     }
346 
347     trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
348     return value;
349 }
350 
351 static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
352                                uint64_t value)
353 {
354     GICv3CPUState *cs = icc_cs_from_env(env);
355 
356     trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
357 
358     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
359                                  1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
360     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
361                                  1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
362 
363     gicv3_cpuif_virt_update(cs);
364 }
365 
366 static int icc_highest_active_prio(GICv3CPUState *cs)
367 {
368     /* Calculate the current running priority based on the set bits
369      * in the Active Priority Registers.
370      */
371     int i;
372 
373     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
374         uint32_t apr = cs->icc_apr[GICV3_G0][i] |
375             cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
376 
377         if (!apr) {
378             continue;
379         }
380         return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1);
381     }
382     /* No current active interrupts: return idle priority */
383     return 0xff;
384 }
385 
386 static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
387 {
388     /* Return a mask word which clears the subpriority bits from
389      * a priority value for an interrupt in the specified group.
390      * This depends on the BPR value:
391      *  a BPR of 0 means the group priority bits are [7:1];
392      *  a BPR of 1 means they are [7:2], and so on down to
393      *  a BPR of 7 meaning no group priority bits at all.
394      * Which BPR to use depends on the group of the interrupt and
395      * the current ICC_CTLR.CBPR settings.
396      */
397     if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
398         (group == GICV3_G1NS &&
399          cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
400         group = GICV3_G0;
401     }
402 
403     return ~0U << ((cs->icc_bpr[group] & 7) + 1);
404 }
405 
406 static bool icc_no_enabled_hppi(GICv3CPUState *cs)
407 {
408     /* Return true if there is no pending interrupt, or the
409      * highest priority pending interrupt is in a group which has been
410      * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
411      */
412     return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
413 }
414 
415 static bool icc_hppi_can_preempt(GICv3CPUState *cs)
416 {
417     /* Return true if we have a pending interrupt of sufficient
418      * priority to preempt.
419      */
420     int rprio;
421     uint32_t mask;
422 
423     if (icc_no_enabled_hppi(cs)) {
424         return false;
425     }
426 
427     if (cs->hppi.prio >= cs->icc_pmr_el1) {
428         /* Priority mask masks this interrupt */
429         return false;
430     }
431 
432     rprio = icc_highest_active_prio(cs);
433     if (rprio == 0xff) {
434         /* No currently running interrupt so we can preempt */
435         return true;
436     }
437 
438     mask = icc_gprio_mask(cs, cs->hppi.grp);
439 
440     /* We only preempt a running interrupt if the pending interrupt's
441      * group priority is sufficient (the subpriorities are not considered).
442      */
443     if ((cs->hppi.prio & mask) < (rprio & mask)) {
444         return true;
445     }
446 
447     return false;
448 }
449 
450 void gicv3_cpuif_update(GICv3CPUState *cs)
451 {
452     /* Tell the CPU about its highest priority pending interrupt */
453     int irqlevel = 0;
454     int fiqlevel = 0;
455     ARMCPU *cpu = ARM_CPU(cs->cpu);
456     CPUARMState *env = &cpu->env;
457 
458     trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
459                              cs->hppi.grp, cs->hppi.prio);
460 
461     if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
462         /* If a Security-enabled GIC sends a G1S interrupt to a
463          * Security-disabled CPU, we must treat it as if it were G0.
464          */
465         cs->hppi.grp = GICV3_G0;
466     }
467 
468     if (icc_hppi_can_preempt(cs)) {
469         /* We have an interrupt: should we signal it as IRQ or FIQ?
470          * This is described in the GICv3 spec section 4.6.2.
471          */
472         bool isfiq;
473 
474         switch (cs->hppi.grp) {
475         case GICV3_G0:
476             isfiq = true;
477             break;
478         case GICV3_G1:
479             isfiq = (!arm_is_secure(env) ||
480                      (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
481             break;
482         case GICV3_G1NS:
483             isfiq = arm_is_secure(env);
484             break;
485         default:
486             g_assert_not_reached();
487         }
488 
489         if (isfiq) {
490             fiqlevel = 1;
491         } else {
492             irqlevel = 1;
493         }
494     }
495 
496     trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
497 
498     qemu_set_irq(cs->parent_fiq, fiqlevel);
499     qemu_set_irq(cs->parent_irq, irqlevel);
500 }
501 
502 static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
503 {
504     GICv3CPUState *cs = icc_cs_from_env(env);
505     uint32_t value = cs->icc_pmr_el1;
506 
507     if (icv_access(env, HCR_FMO | HCR_IMO)) {
508         return icv_pmr_read(env, ri);
509     }
510 
511     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
512         (env->cp15.scr_el3 & SCR_FIQ)) {
513         /* NS access and Group 0 is inaccessible to NS: return the
514          * NS view of the current priority
515          */
516         if (value & 0x80) {
517             /* Secure priorities not visible to NS */
518             value = 0;
519         } else if (value != 0xff) {
520             value = (value << 1) & 0xff;
521         }
522     }
523 
524     trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
525 
526     return value;
527 }
528 
529 static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
530                           uint64_t value)
531 {
532     GICv3CPUState *cs = icc_cs_from_env(env);
533 
534     if (icv_access(env, HCR_FMO | HCR_IMO)) {
535         return icv_pmr_write(env, ri, value);
536     }
537 
538     trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
539 
540     value &= 0xff;
541 
542     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
543         (env->cp15.scr_el3 & SCR_FIQ)) {
544         /* NS access and Group 0 is inaccessible to NS: return the
545          * NS view of the current priority
546          */
547         if (!(cs->icc_pmr_el1 & 0x80)) {
548             /* Current PMR in the secure range, don't allow NS to change it */
549             return;
550         }
551         value = (value >> 1) & 0x80;
552     }
553     cs->icc_pmr_el1 = value;
554     gicv3_cpuif_update(cs);
555 }
556 
557 static void icc_activate_irq(GICv3CPUState *cs, int irq)
558 {
559     /* Move the interrupt from the Pending state to Active, and update
560      * the Active Priority Registers
561      */
562     uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
563     int prio = cs->hppi.prio & mask;
564     int aprbit = prio >> 1;
565     int regno = aprbit / 32;
566     int regbit = aprbit % 32;
567 
568     cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
569 
570     if (irq < GIC_INTERNAL) {
571         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
572         cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
573         gicv3_redist_update(cs);
574     } else {
575         gicv3_gicd_active_set(cs->gic, irq);
576         gicv3_gicd_pending_clear(cs->gic, irq);
577         gicv3_update(cs->gic, irq, 1);
578     }
579 }
580 
581 static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
582 {
583     /* Return the highest priority pending interrupt register value
584      * for group 0.
585      */
586     bool irq_is_secure;
587 
588     if (cs->hppi.prio == 0xff) {
589         return INTID_SPURIOUS;
590     }
591 
592     /* Check whether we can return the interrupt or if we should return
593      * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
594      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
595      * is always zero.)
596      */
597     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
598                      (cs->hppi.grp != GICV3_G1NS));
599 
600     if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
601         return INTID_SPURIOUS;
602     }
603     if (irq_is_secure && !arm_is_secure(env)) {
604         /* Secure interrupts not visible to Nonsecure */
605         return INTID_SPURIOUS;
606     }
607 
608     if (cs->hppi.grp != GICV3_G0) {
609         /* Indicate to EL3 that there's a Group 1 interrupt for the other
610          * state pending.
611          */
612         return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
613     }
614 
615     return cs->hppi.irq;
616 }
617 
618 static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
619 {
620     /* Return the highest priority pending interrupt register value
621      * for group 1.
622      */
623     bool irq_is_secure;
624 
625     if (cs->hppi.prio == 0xff) {
626         return INTID_SPURIOUS;
627     }
628 
629     /* Check whether we can return the interrupt or if we should return
630      * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
631      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
632      * is always zero.)
633      */
634     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
635                      (cs->hppi.grp != GICV3_G1NS));
636 
637     if (cs->hppi.grp == GICV3_G0) {
638         /* Group 0 interrupts not visible via HPPIR1 */
639         return INTID_SPURIOUS;
640     }
641     if (irq_is_secure) {
642         if (!arm_is_secure(env)) {
643             /* Secure interrupts not visible in Non-secure */
644             return INTID_SPURIOUS;
645         }
646     } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
647         /* Group 1 non-secure interrupts not visible in Secure EL1 */
648         return INTID_SPURIOUS;
649     }
650 
651     return cs->hppi.irq;
652 }
653 
654 static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
655 {
656     GICv3CPUState *cs = icc_cs_from_env(env);
657     uint64_t intid;
658 
659     if (!icc_hppi_can_preempt(cs)) {
660         intid = INTID_SPURIOUS;
661     } else {
662         intid = icc_hppir0_value(cs, env);
663     }
664 
665     if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
666         icc_activate_irq(cs, intid);
667     }
668 
669     trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
670     return intid;
671 }
672 
673 static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
674 {
675     GICv3CPUState *cs = icc_cs_from_env(env);
676     uint64_t intid;
677 
678     if (!icc_hppi_can_preempt(cs)) {
679         intid = INTID_SPURIOUS;
680     } else {
681         intid = icc_hppir1_value(cs, env);
682     }
683 
684     if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
685         icc_activate_irq(cs, intid);
686     }
687 
688     trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
689     return intid;
690 }
691 
692 static void icc_drop_prio(GICv3CPUState *cs, int grp)
693 {
694     /* Drop the priority of the currently active interrupt in
695      * the specified group.
696      *
697      * Note that we can guarantee (because of the requirement to nest
698      * ICC_IAR reads [which activate an interrupt and raise priority]
699      * with ICC_EOIR writes [which drop the priority for the interrupt])
700      * that the interrupt we're being called for is the highest priority
701      * active interrupt, meaning that it has the lowest set bit in the
702      * APR registers.
703      *
704      * If the guest does not honour the ordering constraints then the
705      * behaviour of the GIC is UNPREDICTABLE, which for us means that
706      * the values of the APR registers might become incorrect and the
707      * running priority will be wrong, so interrupts that should preempt
708      * might not do so, and interrupts that should not preempt might do so.
709      */
710     int i;
711 
712     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) {
713         uint64_t *papr = &cs->icc_apr[grp][i];
714 
715         if (!*papr) {
716             continue;
717         }
718         /* Clear the lowest set bit */
719         *papr &= *papr - 1;
720         break;
721     }
722 
723     /* running priority change means we need an update for this cpu i/f */
724     gicv3_cpuif_update(cs);
725 }
726 
727 static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
728 {
729     /* Return true if we should split priority drop and interrupt
730      * deactivation, ie whether the relevant EOIMode bit is set.
731      */
732     if (arm_is_el3_or_mon(env)) {
733         return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
734     }
735     if (arm_is_secure_below_el3(env)) {
736         return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
737     } else {
738         return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
739     }
740 }
741 
742 static int icc_highest_active_group(GICv3CPUState *cs)
743 {
744     /* Return the group with the highest priority active interrupt.
745      * We can do this by just comparing the APRs to see which one
746      * has the lowest set bit.
747      * (If more than one group is active at the same priority then
748      * we're in UNPREDICTABLE territory.)
749      */
750     int i;
751 
752     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
753         int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
754         int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
755         int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
756 
757         if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
758             return GICV3_G1NS;
759         }
760         if (g1ctz < g0ctz) {
761             return GICV3_G1;
762         }
763         if (g0ctz < 32) {
764             return GICV3_G0;
765         }
766     }
767     /* No set active bits? UNPREDICTABLE; return -1 so the caller
768      * ignores the spurious EOI attempt.
769      */
770     return -1;
771 }
772 
773 static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
774 {
775     if (irq < GIC_INTERNAL) {
776         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
777         gicv3_redist_update(cs);
778     } else {
779         gicv3_gicd_active_clear(cs->gic, irq);
780         gicv3_update(cs->gic, irq, 1);
781     }
782 }
783 
784 static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
785                            uint64_t value)
786 {
787     /* End of Interrupt */
788     GICv3CPUState *cs = icc_cs_from_env(env);
789     int irq = value & 0xffffff;
790     int grp;
791 
792     trace_gicv3_icc_eoir_write(ri->crm == 8 ? 0 : 1,
793                                gicv3_redist_affid(cs), value);
794 
795     if (ri->crm == 8) {
796         /* EOIR0 */
797         grp = GICV3_G0;
798     } else {
799         /* EOIR1 */
800         if (arm_is_secure(env)) {
801             grp = GICV3_G1;
802         } else {
803             grp = GICV3_G1NS;
804         }
805     }
806 
807     if (irq >= cs->gic->num_irq) {
808         /* This handles two cases:
809          * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
810          * to the GICC_EOIR, the GIC ignores that write.
811          * 2. If software writes the number of a non-existent interrupt
812          * this must be a subcase of "value written does not match the last
813          * valid interrupt value read from the Interrupt Acknowledge
814          * register" and so this is UNPREDICTABLE. We choose to ignore it.
815          */
816         return;
817     }
818 
819     if (icc_highest_active_group(cs) != grp) {
820         return;
821     }
822 
823     icc_drop_prio(cs, grp);
824 
825     if (!icc_eoi_split(env, cs)) {
826         /* Priority drop and deactivate not split: deactivate irq now */
827         icc_deactivate_irq(cs, irq);
828     }
829 }
830 
831 static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
832 {
833     GICv3CPUState *cs = icc_cs_from_env(env);
834     uint64_t value = icc_hppir0_value(cs, env);
835 
836     trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
837     return value;
838 }
839 
840 static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
841 {
842     GICv3CPUState *cs = icc_cs_from_env(env);
843     uint64_t value = icc_hppir1_value(cs, env);
844 
845     trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
846     return value;
847 }
848 
849 static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
850 {
851     GICv3CPUState *cs = icc_cs_from_env(env);
852     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
853     bool satinc = false;
854     uint64_t bpr;
855 
856     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
857         return icv_bpr_read(env, ri);
858     }
859 
860     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
861         grp = GICV3_G1NS;
862     }
863 
864     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
865         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
866         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
867          * modify BPR0
868          */
869         grp = GICV3_G0;
870     }
871 
872     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
873         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
874         /* reads return bpr0 + 1 sat to 7, writes ignored */
875         grp = GICV3_G0;
876         satinc = true;
877     }
878 
879     bpr = cs->icc_bpr[grp];
880     if (satinc) {
881         bpr++;
882         bpr = MIN(bpr, 7);
883     }
884 
885     trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
886 
887     return bpr;
888 }
889 
890 static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
891                           uint64_t value)
892 {
893     GICv3CPUState *cs = icc_cs_from_env(env);
894     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
895 
896     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
897         icv_bpr_write(env, ri, value);
898         return;
899     }
900 
901     trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
902                               gicv3_redist_affid(cs), value);
903 
904     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
905         grp = GICV3_G1NS;
906     }
907 
908     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
909         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
910         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
911          * modify BPR0
912          */
913         grp = GICV3_G0;
914     }
915 
916     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
917         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
918         /* reads return bpr0 + 1 sat to 7, writes ignored */
919         return;
920     }
921 
922     cs->icc_bpr[grp] = value & 7;
923     gicv3_cpuif_update(cs);
924 }
925 
926 static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
927 {
928     GICv3CPUState *cs = icc_cs_from_env(env);
929     uint64_t value;
930 
931     int regno = ri->opc2 & 3;
932     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
933 
934     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
935         return icv_ap_read(env, ri);
936     }
937 
938     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
939         grp = GICV3_G1NS;
940     }
941 
942     value = cs->icc_apr[grp][regno];
943 
944     trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
945     return value;
946 }
947 
948 static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
949                          uint64_t value)
950 {
951     GICv3CPUState *cs = icc_cs_from_env(env);
952 
953     int regno = ri->opc2 & 3;
954     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
955 
956     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
957         icv_ap_write(env, ri, value);
958         return;
959     }
960 
961     trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
962 
963     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
964         grp = GICV3_G1NS;
965     }
966 
967     /* It's not possible to claim that a Non-secure interrupt is active
968      * at a priority outside the Non-secure range (128..255), since this
969      * would otherwise allow malicious NS code to block delivery of S interrupts
970      * by writing a bad value to these registers.
971      */
972     if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
973         return;
974     }
975 
976     cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
977     gicv3_cpuif_update(cs);
978 }
979 
980 static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
981                           uint64_t value)
982 {
983     /* Deactivate interrupt */
984     GICv3CPUState *cs = icc_cs_from_env(env);
985     int irq = value & 0xffffff;
986     bool irq_is_secure, single_sec_state, irq_is_grp0;
987     bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
988 
989     trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
990 
991     if (irq >= cs->gic->num_irq) {
992         /* Also catches special interrupt numbers and LPIs */
993         return;
994     }
995 
996     if (!icc_eoi_split(env, cs)) {
997         return;
998     }
999 
1000     int grp = gicv3_irq_group(cs->gic, cs, irq);
1001 
1002     single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
1003     irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
1004     irq_is_grp0 = grp == GICV3_G0;
1005 
1006     /* Check whether we're allowed to deactivate this interrupt based
1007      * on its group and the current CPU state.
1008      * These checks are laid out to correspond to the spec's pseudocode.
1009      */
1010     route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
1011     route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
1012     /* No need to include !IsSecure in route_*_to_el2 as it's only
1013      * tested in cases where we know !IsSecure is true.
1014      */
1015     route_fiq_to_el2 = env->cp15.hcr_el2 & HCR_FMO;
1016     route_irq_to_el2 = env->cp15.hcr_el2 & HCR_FMO;
1017 
1018     switch (arm_current_el(env)) {
1019     case 3:
1020         break;
1021     case 2:
1022         if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
1023             break;
1024         }
1025         if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
1026             break;
1027         }
1028         return;
1029     case 1:
1030         if (!arm_is_secure_below_el3(env)) {
1031             if (single_sec_state && irq_is_grp0 &&
1032                 !route_fiq_to_el3 && !route_fiq_to_el2) {
1033                 break;
1034             }
1035             if (!irq_is_secure && !irq_is_grp0 &&
1036                 !route_irq_to_el3 && !route_irq_to_el2) {
1037                 break;
1038             }
1039         } else {
1040             if (irq_is_grp0 && !route_fiq_to_el3) {
1041                 break;
1042             }
1043             if (!irq_is_grp0 &&
1044                 (!irq_is_secure || !single_sec_state) &&
1045                 !route_irq_to_el3) {
1046                 break;
1047             }
1048         }
1049         return;
1050     default:
1051         g_assert_not_reached();
1052     }
1053 
1054     icc_deactivate_irq(cs, irq);
1055 }
1056 
1057 static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1058 {
1059     GICv3CPUState *cs = icc_cs_from_env(env);
1060     int prio = icc_highest_active_prio(cs);
1061 
1062     if (arm_feature(env, ARM_FEATURE_EL3) &&
1063         !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
1064         /* NS GIC access and Group 0 is inaccessible to NS */
1065         if (prio & 0x80) {
1066             /* NS mustn't see priorities in the Secure half of the range */
1067             prio = 0;
1068         } else if (prio != 0xff) {
1069             /* Non-idle priority: show the Non-secure view of it */
1070             prio = (prio << 1) & 0xff;
1071         }
1072     }
1073 
1074     trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
1075     return prio;
1076 }
1077 
1078 static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
1079                              uint64_t value, int grp, bool ns)
1080 {
1081     GICv3State *s = cs->gic;
1082 
1083     /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
1084     uint64_t aff = extract64(value, 48, 8) << 16 |
1085         extract64(value, 32, 8) << 8 |
1086         extract64(value, 16, 8);
1087     uint32_t targetlist = extract64(value, 0, 16);
1088     uint32_t irq = extract64(value, 24, 4);
1089     bool irm = extract64(value, 40, 1);
1090     int i;
1091 
1092     if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
1093         /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
1094          * interrupts as Group 0 interrupts and must send Secure Group 0
1095          * interrupts to the target CPUs.
1096          */
1097         grp = GICV3_G0;
1098     }
1099 
1100     trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
1101                                  aff, targetlist);
1102 
1103     for (i = 0; i < s->num_cpu; i++) {
1104         GICv3CPUState *ocs = &s->cpu[i];
1105 
1106         if (irm) {
1107             /* IRM == 1 : route to all CPUs except self */
1108             if (cs == ocs) {
1109                 continue;
1110             }
1111         } else {
1112             /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
1113              * where the corresponding bit is set in targetlist
1114              */
1115             int aff0;
1116 
1117             if (ocs->gicr_typer >> 40 != aff) {
1118                 continue;
1119             }
1120             aff0 = extract64(ocs->gicr_typer, 32, 8);
1121             if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
1122                 continue;
1123             }
1124         }
1125 
1126         /* The redistributor will check against its own GICR_NSACR as needed */
1127         gicv3_redist_send_sgi(ocs, grp, irq, ns);
1128     }
1129 }
1130 
1131 static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1132                            uint64_t value)
1133 {
1134     /* Generate Secure Group 0 SGI. */
1135     GICv3CPUState *cs = icc_cs_from_env(env);
1136     bool ns = !arm_is_secure(env);
1137 
1138     icc_generate_sgi(env, cs, value, GICV3_G0, ns);
1139 }
1140 
1141 static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1142                            uint64_t value)
1143 {
1144     /* Generate Group 1 SGI for the current Security state */
1145     GICv3CPUState *cs = icc_cs_from_env(env);
1146     int grp;
1147     bool ns = !arm_is_secure(env);
1148 
1149     grp = ns ? GICV3_G1NS : GICV3_G1;
1150     icc_generate_sgi(env, cs, value, grp, ns);
1151 }
1152 
1153 static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1154                              uint64_t value)
1155 {
1156     /* Generate Group 1 SGI for the Security state that is not
1157      * the current state
1158      */
1159     GICv3CPUState *cs = icc_cs_from_env(env);
1160     int grp;
1161     bool ns = !arm_is_secure(env);
1162 
1163     grp = ns ? GICV3_G1 : GICV3_G1NS;
1164     icc_generate_sgi(env, cs, value, grp, ns);
1165 }
1166 
1167 static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
1168 {
1169     GICv3CPUState *cs = icc_cs_from_env(env);
1170     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
1171     uint64_t value;
1172 
1173     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1174         return icv_igrpen_read(env, ri);
1175     }
1176 
1177     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1178         grp = GICV3_G1NS;
1179     }
1180 
1181     value = cs->icc_igrpen[grp];
1182     trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
1183                                 gicv3_redist_affid(cs), value);
1184     return value;
1185 }
1186 
1187 static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
1188                              uint64_t value)
1189 {
1190     GICv3CPUState *cs = icc_cs_from_env(env);
1191     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
1192 
1193     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1194         icv_igrpen_write(env, ri, value);
1195         return;
1196     }
1197 
1198     trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
1199                                  gicv3_redist_affid(cs), value);
1200 
1201     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1202         grp = GICV3_G1NS;
1203     }
1204 
1205     cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
1206     gicv3_cpuif_update(cs);
1207 }
1208 
1209 static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
1210 {
1211     GICv3CPUState *cs = icc_cs_from_env(env);
1212     uint64_t value;
1213 
1214     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
1215     value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
1216     trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
1217     return value;
1218 }
1219 
1220 static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
1221                                   uint64_t value)
1222 {
1223     GICv3CPUState *cs = icc_cs_from_env(env);
1224 
1225     trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
1226 
1227     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
1228     cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
1229     cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
1230     gicv3_cpuif_update(cs);
1231 }
1232 
1233 static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1234 {
1235     GICv3CPUState *cs = icc_cs_from_env(env);
1236     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
1237     uint64_t value;
1238 
1239     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1240         return icv_ctlr_read(env, ri);
1241     }
1242 
1243     value = cs->icc_ctlr_el1[bank];
1244     trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
1245     return value;
1246 }
1247 
1248 static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
1249                                uint64_t value)
1250 {
1251     GICv3CPUState *cs = icc_cs_from_env(env);
1252     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
1253     uint64_t mask;
1254 
1255     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1256         icv_ctlr_write(env, ri, value);
1257         return;
1258     }
1259 
1260     trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
1261 
1262     /* Only CBPR and EOIMODE can be RW;
1263      * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
1264      * the asseciated priority-based routing of them);
1265      * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
1266      */
1267     if (arm_feature(env, ARM_FEATURE_EL3) &&
1268         ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
1269         mask = ICC_CTLR_EL1_EOIMODE;
1270     } else {
1271         mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
1272     }
1273 
1274     cs->icc_ctlr_el1[bank] &= ~mask;
1275     cs->icc_ctlr_el1[bank] |= (value & mask);
1276     gicv3_cpuif_update(cs);
1277 }
1278 
1279 
1280 static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
1281 {
1282     GICv3CPUState *cs = icc_cs_from_env(env);
1283     uint64_t value;
1284 
1285     value = cs->icc_ctlr_el3;
1286     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
1287         value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
1288     }
1289     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
1290         value |= ICC_CTLR_EL3_CBPR_EL1NS;
1291     }
1292     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
1293         value |= ICC_CTLR_EL3_EOIMODE_EL1S;
1294     }
1295     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
1296         value |= ICC_CTLR_EL3_CBPR_EL1S;
1297     }
1298 
1299     trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
1300     return value;
1301 }
1302 
1303 static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
1304                                uint64_t value)
1305 {
1306     GICv3CPUState *cs = icc_cs_from_env(env);
1307     uint64_t mask;
1308 
1309     trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
1310 
1311     /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
1312     cs->icc_ctlr_el1[GICV3_NS] &= (ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
1313     if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
1314         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
1315     }
1316     if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
1317         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
1318     }
1319 
1320     cs->icc_ctlr_el1[GICV3_S] &= (ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
1321     if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
1322         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
1323     }
1324     if (value & ICC_CTLR_EL3_CBPR_EL1S) {
1325         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
1326     }
1327 
1328     /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */
1329     mask = ICC_CTLR_EL3_EOIMODE_EL3;
1330 
1331     cs->icc_ctlr_el3 &= ~mask;
1332     cs->icc_ctlr_el3 |= (value & mask);
1333     gicv3_cpuif_update(cs);
1334 }
1335 
1336 static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
1337                                           const ARMCPRegInfo *ri, bool isread)
1338 {
1339     CPAccessResult r = CP_ACCESS_OK;
1340 
1341     if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
1342         switch (arm_current_el(env)) {
1343         case 1:
1344             if (arm_is_secure_below_el3(env) ||
1345                 ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) == 0)) {
1346                 r = CP_ACCESS_TRAP_EL3;
1347             }
1348             break;
1349         case 2:
1350             r = CP_ACCESS_TRAP_EL3;
1351             break;
1352         case 3:
1353             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
1354                 r = CP_ACCESS_TRAP_EL3;
1355             }
1356             break;
1357         default:
1358             g_assert_not_reached();
1359         }
1360     }
1361 
1362     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
1363         r = CP_ACCESS_TRAP;
1364     }
1365     return r;
1366 }
1367 
1368 static CPAccessResult gicv3_fiq_access(CPUARMState *env,
1369                                        const ARMCPRegInfo *ri, bool isread)
1370 {
1371     CPAccessResult r = CP_ACCESS_OK;
1372 
1373     if (env->cp15.scr_el3 & SCR_FIQ) {
1374         switch (arm_current_el(env)) {
1375         case 1:
1376             if (arm_is_secure_below_el3(env) ||
1377                 ((env->cp15.hcr_el2 & HCR_FMO) == 0)) {
1378                 r = CP_ACCESS_TRAP_EL3;
1379             }
1380             break;
1381         case 2:
1382             r = CP_ACCESS_TRAP_EL3;
1383             break;
1384         case 3:
1385             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
1386                 r = CP_ACCESS_TRAP_EL3;
1387             }
1388             break;
1389         default:
1390             g_assert_not_reached();
1391         }
1392     }
1393 
1394     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
1395         r = CP_ACCESS_TRAP;
1396     }
1397     return r;
1398 }
1399 
1400 static CPAccessResult gicv3_irq_access(CPUARMState *env,
1401                                        const ARMCPRegInfo *ri, bool isread)
1402 {
1403     CPAccessResult r = CP_ACCESS_OK;
1404 
1405     if (env->cp15.scr_el3 & SCR_IRQ) {
1406         switch (arm_current_el(env)) {
1407         case 1:
1408             if (arm_is_secure_below_el3(env) ||
1409                 ((env->cp15.hcr_el2 & HCR_IMO) == 0)) {
1410                 r = CP_ACCESS_TRAP_EL3;
1411             }
1412             break;
1413         case 2:
1414             r = CP_ACCESS_TRAP_EL3;
1415             break;
1416         case 3:
1417             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
1418                 r = CP_ACCESS_TRAP_EL3;
1419             }
1420             break;
1421         default:
1422             g_assert_not_reached();
1423         }
1424     }
1425 
1426     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
1427         r = CP_ACCESS_TRAP;
1428     }
1429     return r;
1430 }
1431 
1432 static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1433 {
1434     GICv3CPUState *cs = icc_cs_from_env(env);
1435 
1436     cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
1437         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
1438         (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
1439     cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
1440         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
1441         (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
1442     cs->icc_pmr_el1 = 0;
1443     cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
1444     cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
1445     if (arm_feature(env, ARM_FEATURE_EL3)) {
1446         cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS;
1447     } else {
1448         cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR;
1449     }
1450     memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
1451     memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
1452     cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
1453         (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
1454         (7 << ICC_CTLR_EL3_PRIBITS_SHIFT);
1455 
1456     memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
1457     cs->ich_hcr_el2 = 0;
1458     memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
1459     cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
1460         (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR1_SHIFT) |
1461         (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
1462 }
1463 
1464 static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
1465     { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
1466       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
1467       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1468       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
1469       .readfn = icc_pmr_read,
1470       .writefn = icc_pmr_write,
1471       /* We hang the whole cpu interface reset routine off here
1472        * rather than parcelling it out into one little function
1473        * per register
1474        */
1475       .resetfn = icc_reset,
1476     },
1477     { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
1478       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
1479       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1480       .access = PL1_R, .accessfn = gicv3_fiq_access,
1481       .readfn = icc_iar0_read,
1482     },
1483     { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
1484       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
1485       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1486       .access = PL1_W, .accessfn = gicv3_fiq_access,
1487       .writefn = icc_eoir_write,
1488     },
1489     { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
1490       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
1491       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1492       .access = PL1_R, .accessfn = gicv3_fiq_access,
1493       .readfn = icc_hppir0_read,
1494     },
1495     { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
1496       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
1497       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1498       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1499       .readfn = icc_bpr_read,
1500       .writefn = icc_bpr_write,
1501     },
1502     { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
1503       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
1504       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1505       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1506       .readfn = icc_ap_read,
1507       .writefn = icc_ap_write,
1508     },
1509     { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
1510       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
1511       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1512       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1513       .readfn = icc_ap_read,
1514       .writefn = icc_ap_write,
1515     },
1516     { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
1517       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
1518       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1519       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1520       .readfn = icc_ap_read,
1521       .writefn = icc_ap_write,
1522     },
1523     { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
1524       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
1525       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1526       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1527       .readfn = icc_ap_read,
1528       .writefn = icc_ap_write,
1529     },
1530     /* All the ICC_AP1R*_EL1 registers are banked */
1531     { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
1532       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
1533       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1534       .access = PL1_RW, .accessfn = gicv3_irq_access,
1535       .readfn = icc_ap_read,
1536       .writefn = icc_ap_write,
1537     },
1538     { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
1539       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
1540       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1541       .access = PL1_RW, .accessfn = gicv3_irq_access,
1542       .readfn = icc_ap_read,
1543       .writefn = icc_ap_write,
1544     },
1545     { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
1546       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
1547       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1548       .access = PL1_RW, .accessfn = gicv3_irq_access,
1549       .readfn = icc_ap_read,
1550       .writefn = icc_ap_write,
1551     },
1552     { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
1553       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
1554       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1555       .access = PL1_RW, .accessfn = gicv3_irq_access,
1556       .readfn = icc_ap_read,
1557       .writefn = icc_ap_write,
1558     },
1559     { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
1560       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
1561       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1562       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1563       .writefn = icc_dir_write,
1564     },
1565     { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
1566       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
1567       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1568       .access = PL1_R, .accessfn = gicv3_irqfiq_access,
1569       .readfn = icc_rpr_read,
1570     },
1571     { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
1572       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
1573       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1574       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1575       .writefn = icc_sgi1r_write,
1576     },
1577     { .name = "ICC_SGI1R",
1578       .cp = 15, .opc1 = 0, .crm = 12,
1579       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
1580       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1581       .writefn = icc_sgi1r_write,
1582     },
1583     { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
1584       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
1585       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1586       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1587       .writefn = icc_asgi1r_write,
1588     },
1589     { .name = "ICC_ASGI1R",
1590       .cp = 15, .opc1 = 1, .crm = 12,
1591       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
1592       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1593       .writefn = icc_asgi1r_write,
1594     },
1595     { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
1596       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
1597       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1598       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1599       .writefn = icc_sgi0r_write,
1600     },
1601     { .name = "ICC_SGI0R",
1602       .cp = 15, .opc1 = 2, .crm = 12,
1603       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
1604       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1605       .writefn = icc_sgi0r_write,
1606     },
1607     { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
1608       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
1609       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1610       .access = PL1_R, .accessfn = gicv3_irq_access,
1611       .readfn = icc_iar1_read,
1612     },
1613     { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
1614       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
1615       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1616       .access = PL1_W, .accessfn = gicv3_irq_access,
1617       .writefn = icc_eoir_write,
1618     },
1619     { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
1620       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
1621       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1622       .access = PL1_R, .accessfn = gicv3_irq_access,
1623       .readfn = icc_hppir1_read,
1624     },
1625     /* This register is banked */
1626     { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
1627       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
1628       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1629       .access = PL1_RW, .accessfn = gicv3_irq_access,
1630       .readfn = icc_bpr_read,
1631       .writefn = icc_bpr_write,
1632     },
1633     /* This register is banked */
1634     { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
1635       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
1636       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1637       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
1638       .readfn = icc_ctlr_el1_read,
1639       .writefn = icc_ctlr_el1_write,
1640     },
1641     { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
1642       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
1643       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
1644       .access = PL1_RW,
1645       /* We don't support IRQ/FIQ bypass and system registers are
1646        * always enabled, so all our bits are RAZ/WI or RAO/WI.
1647        * This register is banked but since it's constant we don't
1648        * need to do anything special.
1649        */
1650       .resetvalue = 0x7,
1651     },
1652     { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
1653       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
1654       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1655       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1656       .readfn = icc_igrpen_read,
1657       .writefn = icc_igrpen_write,
1658     },
1659     /* This register is banked */
1660     { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
1661       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
1662       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1663       .access = PL1_RW, .accessfn = gicv3_irq_access,
1664       .readfn = icc_igrpen_read,
1665       .writefn = icc_igrpen_write,
1666     },
1667     { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
1668       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
1669       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
1670       .access = PL2_RW,
1671       /* We don't support IRQ/FIQ bypass and system registers are
1672        * always enabled, so all our bits are RAZ/WI or RAO/WI.
1673        */
1674       .resetvalue = 0xf,
1675     },
1676     { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
1677       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
1678       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1679       .access = PL3_RW,
1680       .readfn = icc_ctlr_el3_read,
1681       .writefn = icc_ctlr_el3_write,
1682     },
1683     { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
1684       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
1685       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
1686       .access = PL3_RW,
1687       /* We don't support IRQ/FIQ bypass and system registers are
1688        * always enabled, so all our bits are RAZ/WI or RAO/WI.
1689        */
1690       .resetvalue = 0xf,
1691     },
1692     { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
1693       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
1694       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1695       .access = PL3_RW,
1696       .readfn = icc_igrpen1_el3_read,
1697       .writefn = icc_igrpen1_el3_write,
1698     },
1699     REGINFO_SENTINEL
1700 };
1701 
1702 static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1703 {
1704     GICv3CPUState *cs = icc_cs_from_env(env);
1705     int regno = ri->opc2 & 3;
1706     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
1707     uint64_t value;
1708 
1709     value = cs->ich_apr[grp][regno];
1710     trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1711     return value;
1712 }
1713 
1714 static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1715                          uint64_t value)
1716 {
1717     GICv3CPUState *cs = icc_cs_from_env(env);
1718     int regno = ri->opc2 & 3;
1719     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
1720 
1721     trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1722 
1723     cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
1724     gicv3_cpuif_virt_update(cs);
1725 }
1726 
1727 static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1728 {
1729     GICv3CPUState *cs = icc_cs_from_env(env);
1730     uint64_t value = cs->ich_hcr_el2;
1731 
1732     trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
1733     return value;
1734 }
1735 
1736 static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1737                           uint64_t value)
1738 {
1739     GICv3CPUState *cs = icc_cs_from_env(env);
1740 
1741     trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
1742 
1743     value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
1744         ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
1745         ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
1746         ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
1747         ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
1748 
1749     cs->ich_hcr_el2 = value;
1750     gicv3_cpuif_virt_update(cs);
1751 }
1752 
1753 static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1754 {
1755     GICv3CPUState *cs = icc_cs_from_env(env);
1756     uint64_t value = cs->ich_vmcr_el2;
1757 
1758     trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
1759     return value;
1760 }
1761 
1762 static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1763                          uint64_t value)
1764 {
1765     GICv3CPUState *cs = icc_cs_from_env(env);
1766 
1767     trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
1768 
1769     value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
1770         ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
1771         ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
1772     value |= ICH_VMCR_EL2_VFIQEN;
1773 
1774     cs->ich_vmcr_el2 = value;
1775     /* Enforce "writing BPRs to less than minimum sets them to the minimum"
1776      * by reading and writing back the fields.
1777      */
1778     write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G0));
1779     write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
1780 
1781     gicv3_cpuif_virt_update(cs);
1782 }
1783 
1784 static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1785 {
1786     GICv3CPUState *cs = icc_cs_from_env(env);
1787     int regno = ri->opc2 | ((ri->crm & 1) << 3);
1788     uint64_t value;
1789 
1790     /* This read function handles all of:
1791      * 64-bit reads of the whole LR
1792      * 32-bit reads of the low half of the LR
1793      * 32-bit reads of the high half of the LR
1794      */
1795     if (ri->state == ARM_CP_STATE_AA32) {
1796         if (ri->crm >= 14) {
1797             value = extract64(cs->ich_lr_el2[regno], 32, 32);
1798             trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
1799         } else {
1800             value = extract64(cs->ich_lr_el2[regno], 0, 32);
1801             trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
1802         }
1803     } else {
1804         value = cs->ich_lr_el2[regno];
1805         trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
1806     }
1807 
1808     return value;
1809 }
1810 
1811 static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1812                          uint64_t value)
1813 {
1814     GICv3CPUState *cs = icc_cs_from_env(env);
1815     int regno = ri->opc2 | ((ri->crm & 1) << 3);
1816 
1817     /* This write function handles all of:
1818      * 64-bit writes to the whole LR
1819      * 32-bit writes to the low half of the LR
1820      * 32-bit writes to the high half of the LR
1821      */
1822     if (ri->state == ARM_CP_STATE_AA32) {
1823         if (ri->crm >= 14) {
1824             trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
1825             value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
1826         } else {
1827             trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
1828             value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
1829         }
1830     } else {
1831         trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
1832     }
1833 
1834     /* Enforce RES0 bits in priority field */
1835     if (cs->vpribits < 8) {
1836         value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
1837                           8 - cs->vpribits, 0);
1838     }
1839 
1840     cs->ich_lr_el2[regno] = value;
1841     gicv3_cpuif_virt_update(cs);
1842 }
1843 
1844 static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1845 {
1846     GICv3CPUState *cs = icc_cs_from_env(env);
1847     uint64_t value;
1848 
1849     value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
1850         | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V
1851         | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
1852         | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
1853         | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
1854 
1855     trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
1856     return value;
1857 }
1858 
1859 static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1860 {
1861     GICv3CPUState *cs = icc_cs_from_env(env);
1862     uint64_t value = maintenance_interrupt_state(cs);
1863 
1864     trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
1865     return value;
1866 }
1867 
1868 static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1869 {
1870     GICv3CPUState *cs = icc_cs_from_env(env);
1871     uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
1872 
1873     trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
1874     return value;
1875 }
1876 
1877 static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1878 {
1879     GICv3CPUState *cs = icc_cs_from_env(env);
1880     uint64_t value = 0;
1881     int i;
1882 
1883     for (i = 0; i < cs->num_list_regs; i++) {
1884         uint64_t lr = cs->ich_lr_el2[i];
1885 
1886         if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
1887             ((lr & ICH_LR_EL2_HW) == 1 || (lr & ICH_LR_EL2_EOI) == 0)) {
1888             value |= (1 << i);
1889         }
1890     }
1891 
1892     trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
1893     return value;
1894 }
1895 
1896 static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
1897     { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
1898       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
1899       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1900       .access = PL2_RW,
1901       .readfn = ich_ap_read,
1902       .writefn = ich_ap_write,
1903     },
1904     { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
1905       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
1906       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1907       .access = PL2_RW,
1908       .readfn = ich_ap_read,
1909       .writefn = ich_ap_write,
1910     },
1911     { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
1912       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
1913       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1914       .access = PL2_RW,
1915       .readfn = ich_hcr_read,
1916       .writefn = ich_hcr_write,
1917     },
1918     { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
1919       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
1920       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1921       .access = PL2_R,
1922       .readfn = ich_vtr_read,
1923     },
1924     { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
1925       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
1926       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1927       .access = PL2_R,
1928       .readfn = ich_misr_read,
1929     },
1930     { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
1931       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
1932       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1933       .access = PL2_R,
1934       .readfn = ich_eisr_read,
1935     },
1936     { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
1937       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
1938       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1939       .access = PL2_R,
1940       .readfn = ich_elrsr_read,
1941     },
1942     { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
1943       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
1944       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1945       .access = PL2_RW,
1946       .readfn = ich_vmcr_read,
1947       .writefn = ich_vmcr_write,
1948     },
1949     REGINFO_SENTINEL
1950 };
1951 
1952 static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
1953     { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
1954       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
1955       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1956       .access = PL2_RW,
1957       .readfn = ich_ap_read,
1958       .writefn = ich_ap_write,
1959     },
1960     { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
1961       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
1962       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1963       .access = PL2_RW,
1964       .readfn = ich_ap_read,
1965       .writefn = ich_ap_write,
1966     },
1967     REGINFO_SENTINEL
1968 };
1969 
1970 static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
1971     { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
1972       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
1973       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1974       .access = PL2_RW,
1975       .readfn = ich_ap_read,
1976       .writefn = ich_ap_write,
1977     },
1978     { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
1979       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
1980       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1981       .access = PL2_RW,
1982       .readfn = ich_ap_read,
1983       .writefn = ich_ap_write,
1984     },
1985     { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
1986       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
1987       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1988       .access = PL2_RW,
1989       .readfn = ich_ap_read,
1990       .writefn = ich_ap_write,
1991     },
1992     { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
1993       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
1994       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1995       .access = PL2_RW,
1996       .readfn = ich_ap_read,
1997       .writefn = ich_ap_write,
1998     },
1999     REGINFO_SENTINEL
2000 };
2001 
2002 static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
2003 {
2004     GICv3CPUState *cs = opaque;
2005 
2006     gicv3_cpuif_update(cs);
2007 }
2008 
2009 void gicv3_init_cpuif(GICv3State *s)
2010 {
2011     /* Called from the GICv3 realize function; register our system
2012      * registers with the CPU
2013      */
2014     int i;
2015 
2016     for (i = 0; i < s->num_cpu; i++) {
2017         ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
2018         GICv3CPUState *cs = &s->cpu[i];
2019 
2020         /* Note that we can't just use the GICv3CPUState as an opaque pointer
2021          * in define_arm_cp_regs_with_opaque(), because when we're called back
2022          * it might be with code translated by CPU 0 but run by CPU 1, in
2023          * which case we'd get the wrong value.
2024          * So instead we define the regs with no ri->opaque info, and
2025          * get back to the GICv3CPUState from the ARMCPU by reading back
2026          * the opaque pointer from the el_change_hook, which we're going
2027          * to need to register anyway.
2028          */
2029         define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
2030         if (arm_feature(&cpu->env, ARM_FEATURE_EL2)
2031             && cpu->gic_num_lrs) {
2032             int j;
2033 
2034             cs->num_list_regs = cpu->gic_num_lrs;
2035             cs->vpribits = cpu->gic_vpribits;
2036             cs->vprebits = cpu->gic_vprebits;
2037 
2038             /* Check against architectural constraints: getting these
2039              * wrong would be a bug in the CPU code defining these,
2040              * and the implementation relies on them holding.
2041              */
2042             g_assert(cs->vprebits <= cs->vpribits);
2043             g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
2044             g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
2045 
2046             define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
2047 
2048             for (j = 0; j < cs->num_list_regs; j++) {
2049                 /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
2050                  * are split into two cp15 regs, LR (the low part, with the
2051                  * same encoding as the AArch64 LR) and LRC (the high part).
2052                  */
2053                 ARMCPRegInfo lr_regset[] = {
2054                     { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
2055                       .opc0 = 3, .opc1 = 4, .crn = 12,
2056                       .crm = 12 + (j >> 3), .opc2 = j & 7,
2057                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2058                       .access = PL2_RW,
2059                       .readfn = ich_lr_read,
2060                       .writefn = ich_lr_write,
2061                     },
2062                     { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
2063                       .cp = 15, .opc1 = 4, .crn = 12,
2064                       .crm = 14 + (j >> 3), .opc2 = j & 7,
2065                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2066                       .access = PL2_RW,
2067                       .readfn = ich_lr_read,
2068                       .writefn = ich_lr_write,
2069                     },
2070                     REGINFO_SENTINEL
2071                 };
2072                 define_arm_cp_regs(cpu, lr_regset);
2073             }
2074             if (cs->vprebits >= 6) {
2075                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
2076             }
2077             if (cs->vprebits == 7) {
2078                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
2079             }
2080         }
2081         arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
2082     }
2083 }
2084