xref: /openbmc/qemu/hw/intc/arm_gicv3_cpuif.c (revision 7562f907)
1 /*
2  * ARM Generic Interrupt Controller v3
3  *
4  * Copyright (c) 2016 Linaro Limited
5  * Written by Peter Maydell
6  *
7  * This code is licensed under the GPL, version 2 or (at your option)
8  * any later version.
9  */
10 
11 /* This file contains the code for the system register interface
12  * portions of the GICv3.
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qemu/bitops.h"
17 #include "qemu/main-loop.h"
18 #include "trace.h"
19 #include "gicv3_internal.h"
20 #include "cpu.h"
21 
22 static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
23 {
24     /* Given the CPU, find the right GICv3CPUState struct.
25      * Since we registered the CPU interface with the EL change hook as
26      * the opaque pointer, we can just directly get from the CPU to it.
27      */
28     return arm_get_el_change_hook_opaque(arm_env_get_cpu(env));
29 }
30 
31 static bool gicv3_use_ns_bank(CPUARMState *env)
32 {
33     /* Return true if we should use the NonSecure bank for a banked GIC
34      * CPU interface register. Note that this differs from the
35      * access_secure_reg() function because GICv3 banked registers are
36      * banked even for AArch64, unlike the other CPU system registers.
37      */
38     return !arm_is_secure_below_el3(env);
39 }
40 
41 /* The minimum BPR for the virtual interface is a configurable property */
42 static inline int icv_min_vbpr(GICv3CPUState *cs)
43 {
44     return 7 - cs->vprebits;
45 }
46 
47 /* Simple accessor functions for LR fields */
48 static uint32_t ich_lr_vintid(uint64_t lr)
49 {
50     return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
51 }
52 
53 static uint32_t ich_lr_pintid(uint64_t lr)
54 {
55     return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
56 }
57 
58 static uint32_t ich_lr_prio(uint64_t lr)
59 {
60     return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
61 }
62 
63 static int ich_lr_state(uint64_t lr)
64 {
65     return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
66 }
67 
68 static bool icv_access(CPUARMState *env, int hcr_flags)
69 {
70     /* Return true if this ICC_ register access should really be
71      * directed to an ICV_ access. hcr_flags is a mask of
72      * HCR_EL2 bits to check: we treat this as an ICV_ access
73      * if we are in NS EL1 and at least one of the specified
74      * HCR_EL2 bits is set.
75      *
76      * ICV registers fall into four categories:
77      *  * access if NS EL1 and HCR_EL2.FMO == 1:
78      *    all ICV regs with '0' in their name
79      *  * access if NS EL1 and HCR_EL2.IMO == 1:
80      *    all ICV regs with '1' in their name
81      *  * access if NS EL1 and either IMO or FMO == 1:
82      *    CTLR, DIR, PMR, RPR
83      */
84     return (env->cp15.hcr_el2 & hcr_flags) && arm_current_el(env) == 1
85         && !arm_is_secure_below_el3(env);
86 }
87 
88 static int read_vbpr(GICv3CPUState *cs, int grp)
89 {
90     /* Read VBPR value out of the VMCR field (caller must handle
91      * VCBPR effects if required)
92      */
93     if (grp == GICV3_G0) {
94         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
95                      ICH_VMCR_EL2_VBPR0_LENGTH);
96     } else {
97         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
98                          ICH_VMCR_EL2_VBPR1_LENGTH);
99     }
100 }
101 
102 static void write_vbpr(GICv3CPUState *cs, int grp, int value)
103 {
104     /* Write new VBPR1 value, handling the "writing a value less than
105      * the minimum sets it to the minimum" semantics.
106      */
107     int min = icv_min_vbpr(cs);
108 
109     if (grp != GICV3_G0) {
110         min++;
111     }
112 
113     value = MAX(value, min);
114 
115     if (grp == GICV3_G0) {
116         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
117                                      ICH_VMCR_EL2_VBPR0_LENGTH, value);
118     } else {
119         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
120                                      ICH_VMCR_EL2_VBPR1_LENGTH, value);
121     }
122 }
123 
124 static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
125 {
126     /* Return a mask word which clears the unimplemented priority bits
127      * from a priority value for a virtual interrupt. (Not to be confused
128      * with the group priority, whose mask depends on the value of VBPR
129      * for the interrupt group.)
130      */
131     return ~0U << (8 - cs->vpribits);
132 }
133 
134 static int ich_highest_active_virt_prio(GICv3CPUState *cs)
135 {
136     /* Calculate the current running priority based on the set bits
137      * in the ICH Active Priority Registers.
138      */
139     int i;
140     int aprmax = 1 << (cs->vprebits - 5);
141 
142     assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
143 
144     for (i = 0; i < aprmax; i++) {
145         uint32_t apr = cs->ich_apr[GICV3_G0][i] |
146             cs->ich_apr[GICV3_G1NS][i];
147 
148         if (!apr) {
149             continue;
150         }
151         return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
152     }
153     /* No current active interrupts: return idle priority */
154     return 0xff;
155 }
156 
157 static int hppvi_index(GICv3CPUState *cs)
158 {
159     /* Return the list register index of the highest priority pending
160      * virtual interrupt, as per the HighestPriorityVirtualInterrupt
161      * pseudocode. If no pending virtual interrupts, return -1.
162      */
163     int idx = -1;
164     int i;
165     /* Note that a list register entry with a priority of 0xff will
166      * never be reported by this function; this is the architecturally
167      * correct behaviour.
168      */
169     int prio = 0xff;
170 
171     if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
172         /* Both groups disabled, definitely nothing to do */
173         return idx;
174     }
175 
176     for (i = 0; i < cs->num_list_regs; i++) {
177         uint64_t lr = cs->ich_lr_el2[i];
178         int thisprio;
179 
180         if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
181             /* Not Pending */
182             continue;
183         }
184 
185         /* Ignore interrupts if relevant group enable not set */
186         if (lr & ICH_LR_EL2_GROUP) {
187             if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
188                 continue;
189             }
190         } else {
191             if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
192                 continue;
193             }
194         }
195 
196         thisprio = ich_lr_prio(lr);
197 
198         if (thisprio < prio) {
199             prio = thisprio;
200             idx = i;
201         }
202     }
203 
204     return idx;
205 }
206 
207 static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
208 {
209     /* Return a mask word which clears the subpriority bits from
210      * a priority value for a virtual interrupt in the specified group.
211      * This depends on the VBPR value:
212      *  a BPR of 0 means the group priority bits are [7:1];
213      *  a BPR of 1 means they are [7:2], and so on down to
214      *  a BPR of 7 meaning no group priority bits at all.
215      * Which BPR to use depends on the group of the interrupt and
216      * the current ICH_VMCR_EL2.VCBPR settings.
217      */
218     if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
219         group = GICV3_G0;
220     }
221 
222     return ~0U << (read_vbpr(cs, group) + 1);
223 }
224 
225 static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
226 {
227     /* Return true if we can signal this virtual interrupt defined by
228      * the given list register value; see the pseudocode functions
229      * CanSignalVirtualInterrupt and CanSignalVirtualInt.
230      * Compare also icc_hppi_can_preempt() which is the non-virtual
231      * equivalent of these checks.
232      */
233     int grp;
234     uint32_t mask, prio, rprio, vpmr;
235 
236     if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
237         /* Virtual interface disabled */
238         return false;
239     }
240 
241     /* We don't need to check that this LR is in Pending state because
242      * that has already been done in hppvi_index().
243      */
244 
245     prio = ich_lr_prio(lr);
246     vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
247                      ICH_VMCR_EL2_VPMR_LENGTH);
248 
249     if (prio >= vpmr) {
250         /* Priority mask masks this interrupt */
251         return false;
252     }
253 
254     rprio = ich_highest_active_virt_prio(cs);
255     if (rprio == 0xff) {
256         /* No running interrupt so we can preempt */
257         return true;
258     }
259 
260     grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
261 
262     mask = icv_gprio_mask(cs, grp);
263 
264     /* We only preempt a running interrupt if the pending interrupt's
265      * group priority is sufficient (the subpriorities are not considered).
266      */
267     if ((prio & mask) < (rprio & mask)) {
268         return true;
269     }
270 
271     return false;
272 }
273 
274 static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
275                                                 uint32_t *misr)
276 {
277     /* Return a set of bits indicating the EOI maintenance interrupt status
278      * for each list register. The EOI maintenance interrupt status is
279      * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
280      * (see the GICv3 spec for the ICH_EISR_EL2 register).
281      * If misr is not NULL then we should also collect the information
282      * about the MISR.EOI, MISR.NP and MISR.U bits.
283      */
284     uint32_t value = 0;
285     int validcount = 0;
286     bool seenpending = false;
287     int i;
288 
289     for (i = 0; i < cs->num_list_regs; i++) {
290         uint64_t lr = cs->ich_lr_el2[i];
291 
292         if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
293             == ICH_LR_EL2_EOI) {
294             value |= (1 << i);
295         }
296         if ((lr & ICH_LR_EL2_STATE_MASK)) {
297             validcount++;
298         }
299         if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
300             seenpending = true;
301         }
302     }
303 
304     if (misr) {
305         if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
306             *misr |= ICH_MISR_EL2_U;
307         }
308         if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
309             *misr |= ICH_MISR_EL2_NP;
310         }
311         if (value) {
312             *misr |= ICH_MISR_EL2_EOI;
313         }
314     }
315     return value;
316 }
317 
318 static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
319 {
320     /* Return a set of bits indicating the maintenance interrupt status
321      * (as seen in the ICH_MISR_EL2 register).
322      */
323     uint32_t value = 0;
324 
325     /* Scan list registers and fill in the U, NP and EOI bits */
326     eoi_maintenance_interrupt_state(cs, &value);
327 
328     if (cs->ich_hcr_el2 & (ICH_HCR_EL2_LRENPIE | ICH_HCR_EL2_EOICOUNT_MASK)) {
329         value |= ICH_MISR_EL2_LRENP;
330     }
331 
332     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
333         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
334         value |= ICH_MISR_EL2_VGRP0E;
335     }
336 
337     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
338         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
339         value |= ICH_MISR_EL2_VGRP0D;
340     }
341     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
342         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
343         value |= ICH_MISR_EL2_VGRP1E;
344     }
345 
346     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
347         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
348         value |= ICH_MISR_EL2_VGRP1D;
349     }
350 
351     return value;
352 }
353 
354 static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
355 {
356     /* Tell the CPU about any pending virtual interrupts or
357      * maintenance interrupts, following a change to the state
358      * of the CPU interface relevant to virtual interrupts.
359      *
360      * CAUTION: this function will call qemu_set_irq() on the
361      * CPU maintenance IRQ line, which is typically wired up
362      * to the GIC as a per-CPU interrupt. This means that it
363      * will recursively call back into the GIC code via
364      * gicv3_redist_set_irq() and thus into the CPU interface code's
365      * gicv3_cpuif_update(). It is therefore important that this
366      * function is only called as the final action of a CPU interface
367      * register write implementation, after all the GIC state
368      * fields have been updated. gicv3_cpuif_update() also must
369      * not cause this function to be called, but that happens
370      * naturally as a result of there being no architectural
371      * linkage between the physical and virtual GIC logic.
372      */
373     int idx;
374     int irqlevel = 0;
375     int fiqlevel = 0;
376     int maintlevel = 0;
377 
378     idx = hppvi_index(cs);
379     trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx);
380     if (idx >= 0) {
381         uint64_t lr = cs->ich_lr_el2[idx];
382 
383         if (icv_hppi_can_preempt(cs, lr)) {
384             /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
385             if (lr & ICH_LR_EL2_GROUP) {
386                 irqlevel = 1;
387             } else {
388                 fiqlevel = 1;
389             }
390         }
391     }
392 
393     if (cs->ich_hcr_el2 & ICH_HCR_EL2_EN) {
394         maintlevel = maintenance_interrupt_state(cs);
395     }
396 
397     trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel,
398                                     irqlevel, maintlevel);
399 
400     qemu_set_irq(cs->parent_vfiq, fiqlevel);
401     qemu_set_irq(cs->parent_virq, irqlevel);
402     qemu_set_irq(cs->maintenance_irq, maintlevel);
403 }
404 
405 static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
406 {
407     GICv3CPUState *cs = icc_cs_from_env(env);
408     int regno = ri->opc2 & 3;
409     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
410     uint64_t value = cs->ich_apr[grp][regno];
411 
412     trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
413     return value;
414 }
415 
416 static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
417                          uint64_t value)
418 {
419     GICv3CPUState *cs = icc_cs_from_env(env);
420     int regno = ri->opc2 & 3;
421     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
422 
423     trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
424 
425     cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
426 
427     gicv3_cpuif_virt_update(cs);
428     return;
429 }
430 
431 static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
432 {
433     GICv3CPUState *cs = icc_cs_from_env(env);
434     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
435     uint64_t bpr;
436     bool satinc = false;
437 
438     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
439         /* reads return bpr0 + 1 saturated to 7, writes ignored */
440         grp = GICV3_G0;
441         satinc = true;
442     }
443 
444     bpr = read_vbpr(cs, grp);
445 
446     if (satinc) {
447         bpr++;
448         bpr = MIN(bpr, 7);
449     }
450 
451     trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
452 
453     return bpr;
454 }
455 
456 static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
457                           uint64_t value)
458 {
459     GICv3CPUState *cs = icc_cs_from_env(env);
460     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
461 
462     trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
463                               gicv3_redist_affid(cs), value);
464 
465     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
466         /* reads return bpr0 + 1 saturated to 7, writes ignored */
467         return;
468     }
469 
470     write_vbpr(cs, grp, value);
471 
472     gicv3_cpuif_virt_update(cs);
473 }
474 
475 static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
476 {
477     GICv3CPUState *cs = icc_cs_from_env(env);
478     uint64_t value;
479 
480     value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
481                       ICH_VMCR_EL2_VPMR_LENGTH);
482 
483     trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
484     return value;
485 }
486 
487 static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
488                           uint64_t value)
489 {
490     GICv3CPUState *cs = icc_cs_from_env(env);
491 
492     trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
493 
494     value &= icv_fullprio_mask(cs);
495 
496     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
497                                  ICH_VMCR_EL2_VPMR_LENGTH, value);
498 
499     gicv3_cpuif_virt_update(cs);
500 }
501 
502 static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
503 {
504     GICv3CPUState *cs = icc_cs_from_env(env);
505     int enbit;
506     uint64_t value;
507 
508     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
509     value = extract64(cs->ich_vmcr_el2, enbit, 1);
510 
511     trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
512                                 gicv3_redist_affid(cs), value);
513     return value;
514 }
515 
516 static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
517                              uint64_t value)
518 {
519     GICv3CPUState *cs = icc_cs_from_env(env);
520     int enbit;
521 
522     trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
523                                  gicv3_redist_affid(cs), value);
524 
525     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
526 
527     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
528     gicv3_cpuif_virt_update(cs);
529 }
530 
531 static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
532 {
533     GICv3CPUState *cs = icc_cs_from_env(env);
534     uint64_t value;
535 
536     /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
537      * should match the ones reported in ich_vtr_read().
538      */
539     value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
540         (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
541 
542     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
543         value |= ICC_CTLR_EL1_EOIMODE;
544     }
545 
546     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
547         value |= ICC_CTLR_EL1_CBPR;
548     }
549 
550     trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
551     return value;
552 }
553 
554 static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
555                                uint64_t value)
556 {
557     GICv3CPUState *cs = icc_cs_from_env(env);
558 
559     trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
560 
561     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
562                                  1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
563     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
564                                  1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
565 
566     gicv3_cpuif_virt_update(cs);
567 }
568 
569 static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
570 {
571     GICv3CPUState *cs = icc_cs_from_env(env);
572     int prio = ich_highest_active_virt_prio(cs);
573 
574     trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
575     return prio;
576 }
577 
578 static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
579 {
580     GICv3CPUState *cs = icc_cs_from_env(env);
581     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
582     int idx = hppvi_index(cs);
583     uint64_t value = INTID_SPURIOUS;
584 
585     if (idx >= 0) {
586         uint64_t lr = cs->ich_lr_el2[idx];
587         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
588 
589         if (grp == thisgrp) {
590             value = ich_lr_vintid(lr);
591         }
592     }
593 
594     trace_gicv3_icv_hppir_read(grp, gicv3_redist_affid(cs), value);
595     return value;
596 }
597 
598 static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
599 {
600     /* Activate the interrupt in the specified list register
601      * by moving it from Pending to Active state, and update the
602      * Active Priority Registers.
603      */
604     uint32_t mask = icv_gprio_mask(cs, grp);
605     int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
606     int aprbit = prio >> (8 - cs->vprebits);
607     int regno = aprbit / 32;
608     int regbit = aprbit % 32;
609 
610     cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
611     cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
612     cs->ich_apr[grp][regno] |= (1 << regbit);
613 }
614 
615 static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
616 {
617     GICv3CPUState *cs = icc_cs_from_env(env);
618     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
619     int idx = hppvi_index(cs);
620     uint64_t intid = INTID_SPURIOUS;
621 
622     if (idx >= 0) {
623         uint64_t lr = cs->ich_lr_el2[idx];
624         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
625 
626         if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
627             intid = ich_lr_vintid(lr);
628             if (intid < INTID_SECURE) {
629                 icv_activate_irq(cs, idx, grp);
630             } else {
631                 /* Interrupt goes from Pending to Invalid */
632                 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
633                 /* We will now return the (bogus) ID from the list register,
634                  * as per the pseudocode.
635                  */
636             }
637         }
638     }
639 
640     trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
641                              gicv3_redist_affid(cs), intid);
642     return intid;
643 }
644 
645 static int icc_highest_active_prio(GICv3CPUState *cs)
646 {
647     /* Calculate the current running priority based on the set bits
648      * in the Active Priority Registers.
649      */
650     int i;
651 
652     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
653         uint32_t apr = cs->icc_apr[GICV3_G0][i] |
654             cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
655 
656         if (!apr) {
657             continue;
658         }
659         return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1);
660     }
661     /* No current active interrupts: return idle priority */
662     return 0xff;
663 }
664 
665 static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
666 {
667     /* Return a mask word which clears the subpriority bits from
668      * a priority value for an interrupt in the specified group.
669      * This depends on the BPR value:
670      *  a BPR of 0 means the group priority bits are [7:1];
671      *  a BPR of 1 means they are [7:2], and so on down to
672      *  a BPR of 7 meaning no group priority bits at all.
673      * Which BPR to use depends on the group of the interrupt and
674      * the current ICC_CTLR.CBPR settings.
675      */
676     if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
677         (group == GICV3_G1NS &&
678          cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
679         group = GICV3_G0;
680     }
681 
682     return ~0U << ((cs->icc_bpr[group] & 7) + 1);
683 }
684 
685 static bool icc_no_enabled_hppi(GICv3CPUState *cs)
686 {
687     /* Return true if there is no pending interrupt, or the
688      * highest priority pending interrupt is in a group which has been
689      * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
690      */
691     return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
692 }
693 
694 static bool icc_hppi_can_preempt(GICv3CPUState *cs)
695 {
696     /* Return true if we have a pending interrupt of sufficient
697      * priority to preempt.
698      */
699     int rprio;
700     uint32_t mask;
701 
702     if (icc_no_enabled_hppi(cs)) {
703         return false;
704     }
705 
706     if (cs->hppi.prio >= cs->icc_pmr_el1) {
707         /* Priority mask masks this interrupt */
708         return false;
709     }
710 
711     rprio = icc_highest_active_prio(cs);
712     if (rprio == 0xff) {
713         /* No currently running interrupt so we can preempt */
714         return true;
715     }
716 
717     mask = icc_gprio_mask(cs, cs->hppi.grp);
718 
719     /* We only preempt a running interrupt if the pending interrupt's
720      * group priority is sufficient (the subpriorities are not considered).
721      */
722     if ((cs->hppi.prio & mask) < (rprio & mask)) {
723         return true;
724     }
725 
726     return false;
727 }
728 
729 void gicv3_cpuif_update(GICv3CPUState *cs)
730 {
731     /* Tell the CPU about its highest priority pending interrupt */
732     int irqlevel = 0;
733     int fiqlevel = 0;
734     ARMCPU *cpu = ARM_CPU(cs->cpu);
735     CPUARMState *env = &cpu->env;
736 
737     g_assert(qemu_mutex_iothread_locked());
738 
739     trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
740                              cs->hppi.grp, cs->hppi.prio);
741 
742     if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
743         /* If a Security-enabled GIC sends a G1S interrupt to a
744          * Security-disabled CPU, we must treat it as if it were G0.
745          */
746         cs->hppi.grp = GICV3_G0;
747     }
748 
749     if (icc_hppi_can_preempt(cs)) {
750         /* We have an interrupt: should we signal it as IRQ or FIQ?
751          * This is described in the GICv3 spec section 4.6.2.
752          */
753         bool isfiq;
754 
755         switch (cs->hppi.grp) {
756         case GICV3_G0:
757             isfiq = true;
758             break;
759         case GICV3_G1:
760             isfiq = (!arm_is_secure(env) ||
761                      (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
762             break;
763         case GICV3_G1NS:
764             isfiq = arm_is_secure(env);
765             break;
766         default:
767             g_assert_not_reached();
768         }
769 
770         if (isfiq) {
771             fiqlevel = 1;
772         } else {
773             irqlevel = 1;
774         }
775     }
776 
777     trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
778 
779     qemu_set_irq(cs->parent_fiq, fiqlevel);
780     qemu_set_irq(cs->parent_irq, irqlevel);
781 }
782 
783 static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
784 {
785     GICv3CPUState *cs = icc_cs_from_env(env);
786     uint32_t value = cs->icc_pmr_el1;
787 
788     if (icv_access(env, HCR_FMO | HCR_IMO)) {
789         return icv_pmr_read(env, ri);
790     }
791 
792     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
793         (env->cp15.scr_el3 & SCR_FIQ)) {
794         /* NS access and Group 0 is inaccessible to NS: return the
795          * NS view of the current priority
796          */
797         if (value & 0x80) {
798             /* Secure priorities not visible to NS */
799             value = 0;
800         } else if (value != 0xff) {
801             value = (value << 1) & 0xff;
802         }
803     }
804 
805     trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
806 
807     return value;
808 }
809 
810 static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
811                           uint64_t value)
812 {
813     GICv3CPUState *cs = icc_cs_from_env(env);
814 
815     if (icv_access(env, HCR_FMO | HCR_IMO)) {
816         return icv_pmr_write(env, ri, value);
817     }
818 
819     trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
820 
821     value &= 0xff;
822 
823     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
824         (env->cp15.scr_el3 & SCR_FIQ)) {
825         /* NS access and Group 0 is inaccessible to NS: return the
826          * NS view of the current priority
827          */
828         if (!(cs->icc_pmr_el1 & 0x80)) {
829             /* Current PMR in the secure range, don't allow NS to change it */
830             return;
831         }
832         value = (value >> 1) & 0x80;
833     }
834     cs->icc_pmr_el1 = value;
835     gicv3_cpuif_update(cs);
836 }
837 
838 static void icc_activate_irq(GICv3CPUState *cs, int irq)
839 {
840     /* Move the interrupt from the Pending state to Active, and update
841      * the Active Priority Registers
842      */
843     uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
844     int prio = cs->hppi.prio & mask;
845     int aprbit = prio >> 1;
846     int regno = aprbit / 32;
847     int regbit = aprbit % 32;
848 
849     cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
850 
851     if (irq < GIC_INTERNAL) {
852         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
853         cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
854         gicv3_redist_update(cs);
855     } else {
856         gicv3_gicd_active_set(cs->gic, irq);
857         gicv3_gicd_pending_clear(cs->gic, irq);
858         gicv3_update(cs->gic, irq, 1);
859     }
860 }
861 
862 static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
863 {
864     /* Return the highest priority pending interrupt register value
865      * for group 0.
866      */
867     bool irq_is_secure;
868 
869     if (cs->hppi.prio == 0xff) {
870         return INTID_SPURIOUS;
871     }
872 
873     /* Check whether we can return the interrupt or if we should return
874      * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
875      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
876      * is always zero.)
877      */
878     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
879                      (cs->hppi.grp != GICV3_G1NS));
880 
881     if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
882         return INTID_SPURIOUS;
883     }
884     if (irq_is_secure && !arm_is_secure(env)) {
885         /* Secure interrupts not visible to Nonsecure */
886         return INTID_SPURIOUS;
887     }
888 
889     if (cs->hppi.grp != GICV3_G0) {
890         /* Indicate to EL3 that there's a Group 1 interrupt for the other
891          * state pending.
892          */
893         return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
894     }
895 
896     return cs->hppi.irq;
897 }
898 
899 static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
900 {
901     /* Return the highest priority pending interrupt register value
902      * for group 1.
903      */
904     bool irq_is_secure;
905 
906     if (cs->hppi.prio == 0xff) {
907         return INTID_SPURIOUS;
908     }
909 
910     /* Check whether we can return the interrupt or if we should return
911      * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
912      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
913      * is always zero.)
914      */
915     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
916                      (cs->hppi.grp != GICV3_G1NS));
917 
918     if (cs->hppi.grp == GICV3_G0) {
919         /* Group 0 interrupts not visible via HPPIR1 */
920         return INTID_SPURIOUS;
921     }
922     if (irq_is_secure) {
923         if (!arm_is_secure(env)) {
924             /* Secure interrupts not visible in Non-secure */
925             return INTID_SPURIOUS;
926         }
927     } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
928         /* Group 1 non-secure interrupts not visible in Secure EL1 */
929         return INTID_SPURIOUS;
930     }
931 
932     return cs->hppi.irq;
933 }
934 
935 static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
936 {
937     GICv3CPUState *cs = icc_cs_from_env(env);
938     uint64_t intid;
939 
940     if (icv_access(env, HCR_FMO)) {
941         return icv_iar_read(env, ri);
942     }
943 
944     if (!icc_hppi_can_preempt(cs)) {
945         intid = INTID_SPURIOUS;
946     } else {
947         intid = icc_hppir0_value(cs, env);
948     }
949 
950     if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
951         icc_activate_irq(cs, intid);
952     }
953 
954     trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
955     return intid;
956 }
957 
958 static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
959 {
960     GICv3CPUState *cs = icc_cs_from_env(env);
961     uint64_t intid;
962 
963     if (icv_access(env, HCR_IMO)) {
964         return icv_iar_read(env, ri);
965     }
966 
967     if (!icc_hppi_can_preempt(cs)) {
968         intid = INTID_SPURIOUS;
969     } else {
970         intid = icc_hppir1_value(cs, env);
971     }
972 
973     if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
974         icc_activate_irq(cs, intid);
975     }
976 
977     trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
978     return intid;
979 }
980 
981 static void icc_drop_prio(GICv3CPUState *cs, int grp)
982 {
983     /* Drop the priority of the currently active interrupt in
984      * the specified group.
985      *
986      * Note that we can guarantee (because of the requirement to nest
987      * ICC_IAR reads [which activate an interrupt and raise priority]
988      * with ICC_EOIR writes [which drop the priority for the interrupt])
989      * that the interrupt we're being called for is the highest priority
990      * active interrupt, meaning that it has the lowest set bit in the
991      * APR registers.
992      *
993      * If the guest does not honour the ordering constraints then the
994      * behaviour of the GIC is UNPREDICTABLE, which for us means that
995      * the values of the APR registers might become incorrect and the
996      * running priority will be wrong, so interrupts that should preempt
997      * might not do so, and interrupts that should not preempt might do so.
998      */
999     int i;
1000 
1001     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) {
1002         uint64_t *papr = &cs->icc_apr[grp][i];
1003 
1004         if (!*papr) {
1005             continue;
1006         }
1007         /* Clear the lowest set bit */
1008         *papr &= *papr - 1;
1009         break;
1010     }
1011 
1012     /* running priority change means we need an update for this cpu i/f */
1013     gicv3_cpuif_update(cs);
1014 }
1015 
1016 static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1017 {
1018     /* Return true if we should split priority drop and interrupt
1019      * deactivation, ie whether the relevant EOIMode bit is set.
1020      */
1021     if (arm_is_el3_or_mon(env)) {
1022         return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
1023     }
1024     if (arm_is_secure_below_el3(env)) {
1025         return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
1026     } else {
1027         return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
1028     }
1029 }
1030 
1031 static int icc_highest_active_group(GICv3CPUState *cs)
1032 {
1033     /* Return the group with the highest priority active interrupt.
1034      * We can do this by just comparing the APRs to see which one
1035      * has the lowest set bit.
1036      * (If more than one group is active at the same priority then
1037      * we're in UNPREDICTABLE territory.)
1038      */
1039     int i;
1040 
1041     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
1042         int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
1043         int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
1044         int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
1045 
1046         if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
1047             return GICV3_G1NS;
1048         }
1049         if (g1ctz < g0ctz) {
1050             return GICV3_G1;
1051         }
1052         if (g0ctz < 32) {
1053             return GICV3_G0;
1054         }
1055     }
1056     /* No set active bits? UNPREDICTABLE; return -1 so the caller
1057      * ignores the spurious EOI attempt.
1058      */
1059     return -1;
1060 }
1061 
1062 static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
1063 {
1064     if (irq < GIC_INTERNAL) {
1065         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
1066         gicv3_redist_update(cs);
1067     } else {
1068         gicv3_gicd_active_clear(cs->gic, irq);
1069         gicv3_update(cs->gic, irq, 1);
1070     }
1071 }
1072 
1073 static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1074 {
1075     /* Return true if we should split priority drop and interrupt
1076      * deactivation, ie whether the virtual EOIMode bit is set.
1077      */
1078     return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
1079 }
1080 
1081 static int icv_find_active(GICv3CPUState *cs, int irq)
1082 {
1083     /* Given an interrupt number for an active interrupt, return the index
1084      * of the corresponding list register, or -1 if there is no match.
1085      * Corresponds to FindActiveVirtualInterrupt pseudocode.
1086      */
1087     int i;
1088 
1089     for (i = 0; i < cs->num_list_regs; i++) {
1090         uint64_t lr = cs->ich_lr_el2[i];
1091 
1092         if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
1093             return i;
1094         }
1095     }
1096 
1097     return -1;
1098 }
1099 
1100 static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
1101 {
1102     /* Deactivate the interrupt in the specified list register index */
1103     uint64_t lr = cs->ich_lr_el2[idx];
1104 
1105     if (lr & ICH_LR_EL2_HW) {
1106         /* Deactivate the associated physical interrupt */
1107         int pirq = ich_lr_pintid(lr);
1108 
1109         if (pirq < INTID_SECURE) {
1110             icc_deactivate_irq(cs, pirq);
1111         }
1112     }
1113 
1114     /* Clear the 'active' part of the state, so ActivePending->Pending
1115      * and Active->Invalid.
1116      */
1117     lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
1118     cs->ich_lr_el2[idx] = lr;
1119 }
1120 
1121 static void icv_increment_eoicount(GICv3CPUState *cs)
1122 {
1123     /* Increment the EOICOUNT field in ICH_HCR_EL2 */
1124     int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1125                              ICH_HCR_EL2_EOICOUNT_LENGTH);
1126 
1127     cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1128                                 ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
1129 }
1130 
1131 static int icv_drop_prio(GICv3CPUState *cs)
1132 {
1133     /* Drop the priority of the currently active virtual interrupt
1134      * (favouring group 0 if there is a set active bit at
1135      * the same priority for both group 0 and group 1).
1136      * Return the priority value for the bit we just cleared,
1137      * or 0xff if no bits were set in the AP registers at all.
1138      * Note that though the ich_apr[] are uint64_t only the low
1139      * 32 bits are actually relevant.
1140      */
1141     int i;
1142     int aprmax = 1 << (cs->vprebits - 5);
1143 
1144     assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
1145 
1146     for (i = 0; i < aprmax; i++) {
1147         uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
1148         uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
1149         int apr0count, apr1count;
1150 
1151         if (!*papr0 && !*papr1) {
1152             continue;
1153         }
1154 
1155         /* We can't just use the bit-twiddling hack icc_drop_prio() does
1156          * because we need to return the bit number we cleared so
1157          * it can be compared against the list register's priority field.
1158          */
1159         apr0count = ctz32(*papr0);
1160         apr1count = ctz32(*papr1);
1161 
1162         if (apr0count <= apr1count) {
1163             *papr0 &= *papr0 - 1;
1164             return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
1165         } else {
1166             *papr1 &= *papr1 - 1;
1167             return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
1168         }
1169     }
1170     return 0xff;
1171 }
1172 
1173 static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1174                           uint64_t value)
1175 {
1176     /* Deactivate interrupt */
1177     GICv3CPUState *cs = icc_cs_from_env(env);
1178     int idx;
1179     int irq = value & 0xffffff;
1180 
1181     trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
1182 
1183     if (irq >= cs->gic->num_irq) {
1184         /* Also catches special interrupt numbers and LPIs */
1185         return;
1186     }
1187 
1188     if (!icv_eoi_split(env, cs)) {
1189         return;
1190     }
1191 
1192     idx = icv_find_active(cs, irq);
1193 
1194     if (idx < 0) {
1195         /* No list register matching this, so increment the EOI count
1196          * (might trigger a maintenance interrupt)
1197          */
1198         icv_increment_eoicount(cs);
1199     } else {
1200         icv_deactivate_irq(cs, idx);
1201     }
1202 
1203     gicv3_cpuif_virt_update(cs);
1204 }
1205 
1206 static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1207                            uint64_t value)
1208 {
1209     /* End of Interrupt */
1210     GICv3CPUState *cs = icc_cs_from_env(env);
1211     int irq = value & 0xffffff;
1212     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
1213     int idx, dropprio;
1214 
1215     trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
1216                                gicv3_redist_affid(cs), value);
1217 
1218     if (irq >= cs->gic->num_irq) {
1219         /* Also catches special interrupt numbers and LPIs */
1220         return;
1221     }
1222 
1223     /* We implement the IMPDEF choice of "drop priority before doing
1224      * error checks" (because that lets us avoid scanning the AP
1225      * registers twice).
1226      */
1227     dropprio = icv_drop_prio(cs);
1228     if (dropprio == 0xff) {
1229         /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
1230          * whether the list registers are checked in this
1231          * situation; we choose not to.
1232          */
1233         return;
1234     }
1235 
1236     idx = icv_find_active(cs, irq);
1237 
1238     if (idx < 0) {
1239         /* No valid list register corresponding to EOI ID */
1240         icv_increment_eoicount(cs);
1241     } else {
1242         uint64_t lr = cs->ich_lr_el2[idx];
1243         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
1244         int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
1245 
1246         if (thisgrp == grp && lr_gprio == dropprio) {
1247             if (!icv_eoi_split(env, cs)) {
1248                 /* Priority drop and deactivate not split: deactivate irq now */
1249                 icv_deactivate_irq(cs, idx);
1250             }
1251         }
1252     }
1253 
1254     gicv3_cpuif_virt_update(cs);
1255 }
1256 
1257 static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1258                            uint64_t value)
1259 {
1260     /* End of Interrupt */
1261     GICv3CPUState *cs = icc_cs_from_env(env);
1262     int irq = value & 0xffffff;
1263     int grp;
1264 
1265     if (icv_access(env, ri->crm == 8 ? HCR_FMO : HCR_IMO)) {
1266         icv_eoir_write(env, ri, value);
1267         return;
1268     }
1269 
1270     trace_gicv3_icc_eoir_write(ri->crm == 8 ? 0 : 1,
1271                                gicv3_redist_affid(cs), value);
1272 
1273     if (ri->crm == 8) {
1274         /* EOIR0 */
1275         grp = GICV3_G0;
1276     } else {
1277         /* EOIR1 */
1278         if (arm_is_secure(env)) {
1279             grp = GICV3_G1;
1280         } else {
1281             grp = GICV3_G1NS;
1282         }
1283     }
1284 
1285     if (irq >= cs->gic->num_irq) {
1286         /* This handles two cases:
1287          * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
1288          * to the GICC_EOIR, the GIC ignores that write.
1289          * 2. If software writes the number of a non-existent interrupt
1290          * this must be a subcase of "value written does not match the last
1291          * valid interrupt value read from the Interrupt Acknowledge
1292          * register" and so this is UNPREDICTABLE. We choose to ignore it.
1293          */
1294         return;
1295     }
1296 
1297     if (icc_highest_active_group(cs) != grp) {
1298         return;
1299     }
1300 
1301     icc_drop_prio(cs, grp);
1302 
1303     if (!icc_eoi_split(env, cs)) {
1304         /* Priority drop and deactivate not split: deactivate irq now */
1305         icc_deactivate_irq(cs, irq);
1306     }
1307 }
1308 
1309 static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
1310 {
1311     GICv3CPUState *cs = icc_cs_from_env(env);
1312     uint64_t value;
1313 
1314     if (icv_access(env, HCR_FMO)) {
1315         return icv_hppir_read(env, ri);
1316     }
1317 
1318     value = icc_hppir0_value(cs, env);
1319     trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
1320     return value;
1321 }
1322 
1323 static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1324 {
1325     GICv3CPUState *cs = icc_cs_from_env(env);
1326     uint64_t value;
1327 
1328     if (icv_access(env, HCR_IMO)) {
1329         return icv_hppir_read(env, ri);
1330     }
1331 
1332     value = icc_hppir1_value(cs, env);
1333     trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
1334     return value;
1335 }
1336 
1337 static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1338 {
1339     GICv3CPUState *cs = icc_cs_from_env(env);
1340     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1341     bool satinc = false;
1342     uint64_t bpr;
1343 
1344     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1345         return icv_bpr_read(env, ri);
1346     }
1347 
1348     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1349         grp = GICV3_G1NS;
1350     }
1351 
1352     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1353         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1354         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1355          * modify BPR0
1356          */
1357         grp = GICV3_G0;
1358     }
1359 
1360     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1361         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1362         /* reads return bpr0 + 1 sat to 7, writes ignored */
1363         grp = GICV3_G0;
1364         satinc = true;
1365     }
1366 
1367     bpr = cs->icc_bpr[grp];
1368     if (satinc) {
1369         bpr++;
1370         bpr = MIN(bpr, 7);
1371     }
1372 
1373     trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
1374 
1375     return bpr;
1376 }
1377 
1378 static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1379                           uint64_t value)
1380 {
1381     GICv3CPUState *cs = icc_cs_from_env(env);
1382     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1383 
1384     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1385         icv_bpr_write(env, ri, value);
1386         return;
1387     }
1388 
1389     trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
1390                               gicv3_redist_affid(cs), value);
1391 
1392     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1393         grp = GICV3_G1NS;
1394     }
1395 
1396     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1397         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1398         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1399          * modify BPR0
1400          */
1401         grp = GICV3_G0;
1402     }
1403 
1404     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1405         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1406         /* reads return bpr0 + 1 sat to 7, writes ignored */
1407         return;
1408     }
1409 
1410     cs->icc_bpr[grp] = value & 7;
1411     gicv3_cpuif_update(cs);
1412 }
1413 
1414 static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1415 {
1416     GICv3CPUState *cs = icc_cs_from_env(env);
1417     uint64_t value;
1418 
1419     int regno = ri->opc2 & 3;
1420     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
1421 
1422     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1423         return icv_ap_read(env, ri);
1424     }
1425 
1426     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1427         grp = GICV3_G1NS;
1428     }
1429 
1430     value = cs->icc_apr[grp][regno];
1431 
1432     trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1433     return value;
1434 }
1435 
1436 static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1437                          uint64_t value)
1438 {
1439     GICv3CPUState *cs = icc_cs_from_env(env);
1440 
1441     int regno = ri->opc2 & 3;
1442     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
1443 
1444     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1445         icv_ap_write(env, ri, value);
1446         return;
1447     }
1448 
1449     trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1450 
1451     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1452         grp = GICV3_G1NS;
1453     }
1454 
1455     /* It's not possible to claim that a Non-secure interrupt is active
1456      * at a priority outside the Non-secure range (128..255), since this
1457      * would otherwise allow malicious NS code to block delivery of S interrupts
1458      * by writing a bad value to these registers.
1459      */
1460     if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
1461         return;
1462     }
1463 
1464     cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
1465     gicv3_cpuif_update(cs);
1466 }
1467 
1468 static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1469                           uint64_t value)
1470 {
1471     /* Deactivate interrupt */
1472     GICv3CPUState *cs = icc_cs_from_env(env);
1473     int irq = value & 0xffffff;
1474     bool irq_is_secure, single_sec_state, irq_is_grp0;
1475     bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
1476 
1477     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1478         icv_dir_write(env, ri, value);
1479         return;
1480     }
1481 
1482     trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
1483 
1484     if (irq >= cs->gic->num_irq) {
1485         /* Also catches special interrupt numbers and LPIs */
1486         return;
1487     }
1488 
1489     if (!icc_eoi_split(env, cs)) {
1490         return;
1491     }
1492 
1493     int grp = gicv3_irq_group(cs->gic, cs, irq);
1494 
1495     single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
1496     irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
1497     irq_is_grp0 = grp == GICV3_G0;
1498 
1499     /* Check whether we're allowed to deactivate this interrupt based
1500      * on its group and the current CPU state.
1501      * These checks are laid out to correspond to the spec's pseudocode.
1502      */
1503     route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
1504     route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
1505     /* No need to include !IsSecure in route_*_to_el2 as it's only
1506      * tested in cases where we know !IsSecure is true.
1507      */
1508     route_fiq_to_el2 = env->cp15.hcr_el2 & HCR_FMO;
1509     route_irq_to_el2 = env->cp15.hcr_el2 & HCR_FMO;
1510 
1511     switch (arm_current_el(env)) {
1512     case 3:
1513         break;
1514     case 2:
1515         if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
1516             break;
1517         }
1518         if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
1519             break;
1520         }
1521         return;
1522     case 1:
1523         if (!arm_is_secure_below_el3(env)) {
1524             if (single_sec_state && irq_is_grp0 &&
1525                 !route_fiq_to_el3 && !route_fiq_to_el2) {
1526                 break;
1527             }
1528             if (!irq_is_secure && !irq_is_grp0 &&
1529                 !route_irq_to_el3 && !route_irq_to_el2) {
1530                 break;
1531             }
1532         } else {
1533             if (irq_is_grp0 && !route_fiq_to_el3) {
1534                 break;
1535             }
1536             if (!irq_is_grp0 &&
1537                 (!irq_is_secure || !single_sec_state) &&
1538                 !route_irq_to_el3) {
1539                 break;
1540             }
1541         }
1542         return;
1543     default:
1544         g_assert_not_reached();
1545     }
1546 
1547     icc_deactivate_irq(cs, irq);
1548 }
1549 
1550 static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1551 {
1552     GICv3CPUState *cs = icc_cs_from_env(env);
1553     int prio;
1554 
1555     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1556         return icv_rpr_read(env, ri);
1557     }
1558 
1559     prio = icc_highest_active_prio(cs);
1560 
1561     if (arm_feature(env, ARM_FEATURE_EL3) &&
1562         !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
1563         /* NS GIC access and Group 0 is inaccessible to NS */
1564         if (prio & 0x80) {
1565             /* NS mustn't see priorities in the Secure half of the range */
1566             prio = 0;
1567         } else if (prio != 0xff) {
1568             /* Non-idle priority: show the Non-secure view of it */
1569             prio = (prio << 1) & 0xff;
1570         }
1571     }
1572 
1573     trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
1574     return prio;
1575 }
1576 
1577 static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
1578                              uint64_t value, int grp, bool ns)
1579 {
1580     GICv3State *s = cs->gic;
1581 
1582     /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
1583     uint64_t aff = extract64(value, 48, 8) << 16 |
1584         extract64(value, 32, 8) << 8 |
1585         extract64(value, 16, 8);
1586     uint32_t targetlist = extract64(value, 0, 16);
1587     uint32_t irq = extract64(value, 24, 4);
1588     bool irm = extract64(value, 40, 1);
1589     int i;
1590 
1591     if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
1592         /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
1593          * interrupts as Group 0 interrupts and must send Secure Group 0
1594          * interrupts to the target CPUs.
1595          */
1596         grp = GICV3_G0;
1597     }
1598 
1599     trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
1600                                  aff, targetlist);
1601 
1602     for (i = 0; i < s->num_cpu; i++) {
1603         GICv3CPUState *ocs = &s->cpu[i];
1604 
1605         if (irm) {
1606             /* IRM == 1 : route to all CPUs except self */
1607             if (cs == ocs) {
1608                 continue;
1609             }
1610         } else {
1611             /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
1612              * where the corresponding bit is set in targetlist
1613              */
1614             int aff0;
1615 
1616             if (ocs->gicr_typer >> 40 != aff) {
1617                 continue;
1618             }
1619             aff0 = extract64(ocs->gicr_typer, 32, 8);
1620             if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
1621                 continue;
1622             }
1623         }
1624 
1625         /* The redistributor will check against its own GICR_NSACR as needed */
1626         gicv3_redist_send_sgi(ocs, grp, irq, ns);
1627     }
1628 }
1629 
1630 static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1631                            uint64_t value)
1632 {
1633     /* Generate Secure Group 0 SGI. */
1634     GICv3CPUState *cs = icc_cs_from_env(env);
1635     bool ns = !arm_is_secure(env);
1636 
1637     icc_generate_sgi(env, cs, value, GICV3_G0, ns);
1638 }
1639 
1640 static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1641                            uint64_t value)
1642 {
1643     /* Generate Group 1 SGI for the current Security state */
1644     GICv3CPUState *cs = icc_cs_from_env(env);
1645     int grp;
1646     bool ns = !arm_is_secure(env);
1647 
1648     grp = ns ? GICV3_G1NS : GICV3_G1;
1649     icc_generate_sgi(env, cs, value, grp, ns);
1650 }
1651 
1652 static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1653                              uint64_t value)
1654 {
1655     /* Generate Group 1 SGI for the Security state that is not
1656      * the current state
1657      */
1658     GICv3CPUState *cs = icc_cs_from_env(env);
1659     int grp;
1660     bool ns = !arm_is_secure(env);
1661 
1662     grp = ns ? GICV3_G1 : GICV3_G1NS;
1663     icc_generate_sgi(env, cs, value, grp, ns);
1664 }
1665 
1666 static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
1667 {
1668     GICv3CPUState *cs = icc_cs_from_env(env);
1669     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
1670     uint64_t value;
1671 
1672     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1673         return icv_igrpen_read(env, ri);
1674     }
1675 
1676     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1677         grp = GICV3_G1NS;
1678     }
1679 
1680     value = cs->icc_igrpen[grp];
1681     trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
1682                                 gicv3_redist_affid(cs), value);
1683     return value;
1684 }
1685 
1686 static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
1687                              uint64_t value)
1688 {
1689     GICv3CPUState *cs = icc_cs_from_env(env);
1690     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
1691 
1692     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1693         icv_igrpen_write(env, ri, value);
1694         return;
1695     }
1696 
1697     trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
1698                                  gicv3_redist_affid(cs), value);
1699 
1700     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1701         grp = GICV3_G1NS;
1702     }
1703 
1704     cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
1705     gicv3_cpuif_update(cs);
1706 }
1707 
1708 static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
1709 {
1710     GICv3CPUState *cs = icc_cs_from_env(env);
1711     uint64_t value;
1712 
1713     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
1714     value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
1715     trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
1716     return value;
1717 }
1718 
1719 static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
1720                                   uint64_t value)
1721 {
1722     GICv3CPUState *cs = icc_cs_from_env(env);
1723 
1724     trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
1725 
1726     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
1727     cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
1728     cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
1729     gicv3_cpuif_update(cs);
1730 }
1731 
1732 static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1733 {
1734     GICv3CPUState *cs = icc_cs_from_env(env);
1735     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
1736     uint64_t value;
1737 
1738     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1739         return icv_ctlr_read(env, ri);
1740     }
1741 
1742     value = cs->icc_ctlr_el1[bank];
1743     trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
1744     return value;
1745 }
1746 
1747 static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
1748                                uint64_t value)
1749 {
1750     GICv3CPUState *cs = icc_cs_from_env(env);
1751     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
1752     uint64_t mask;
1753 
1754     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1755         icv_ctlr_write(env, ri, value);
1756         return;
1757     }
1758 
1759     trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
1760 
1761     /* Only CBPR and EOIMODE can be RW;
1762      * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
1763      * the asseciated priority-based routing of them);
1764      * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
1765      */
1766     if (arm_feature(env, ARM_FEATURE_EL3) &&
1767         ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
1768         mask = ICC_CTLR_EL1_EOIMODE;
1769     } else {
1770         mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
1771     }
1772 
1773     cs->icc_ctlr_el1[bank] &= ~mask;
1774     cs->icc_ctlr_el1[bank] |= (value & mask);
1775     gicv3_cpuif_update(cs);
1776 }
1777 
1778 
1779 static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
1780 {
1781     GICv3CPUState *cs = icc_cs_from_env(env);
1782     uint64_t value;
1783 
1784     value = cs->icc_ctlr_el3;
1785     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
1786         value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
1787     }
1788     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
1789         value |= ICC_CTLR_EL3_CBPR_EL1NS;
1790     }
1791     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
1792         value |= ICC_CTLR_EL3_EOIMODE_EL1S;
1793     }
1794     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
1795         value |= ICC_CTLR_EL3_CBPR_EL1S;
1796     }
1797 
1798     trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
1799     return value;
1800 }
1801 
1802 static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
1803                                uint64_t value)
1804 {
1805     GICv3CPUState *cs = icc_cs_from_env(env);
1806     uint64_t mask;
1807 
1808     trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
1809 
1810     /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
1811     cs->icc_ctlr_el1[GICV3_NS] &= (ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
1812     if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
1813         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
1814     }
1815     if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
1816         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
1817     }
1818 
1819     cs->icc_ctlr_el1[GICV3_S] &= (ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
1820     if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
1821         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
1822     }
1823     if (value & ICC_CTLR_EL3_CBPR_EL1S) {
1824         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
1825     }
1826 
1827     /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */
1828     mask = ICC_CTLR_EL3_EOIMODE_EL3;
1829 
1830     cs->icc_ctlr_el3 &= ~mask;
1831     cs->icc_ctlr_el3 |= (value & mask);
1832     gicv3_cpuif_update(cs);
1833 }
1834 
1835 static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
1836                                           const ARMCPRegInfo *ri, bool isread)
1837 {
1838     CPAccessResult r = CP_ACCESS_OK;
1839     GICv3CPUState *cs = icc_cs_from_env(env);
1840     int el = arm_current_el(env);
1841 
1842     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
1843         el == 1 && !arm_is_secure_below_el3(env)) {
1844         /* Takes priority over a possible EL3 trap */
1845         return CP_ACCESS_TRAP_EL2;
1846     }
1847 
1848     if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
1849         switch (el) {
1850         case 1:
1851             if (arm_is_secure_below_el3(env) ||
1852                 ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) == 0)) {
1853                 r = CP_ACCESS_TRAP_EL3;
1854             }
1855             break;
1856         case 2:
1857             r = CP_ACCESS_TRAP_EL3;
1858             break;
1859         case 3:
1860             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
1861                 r = CP_ACCESS_TRAP_EL3;
1862             }
1863             break;
1864         default:
1865             g_assert_not_reached();
1866         }
1867     }
1868 
1869     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
1870         r = CP_ACCESS_TRAP;
1871     }
1872     return r;
1873 }
1874 
1875 static CPAccessResult gicv3_dir_access(CPUARMState *env,
1876                                        const ARMCPRegInfo *ri, bool isread)
1877 {
1878     GICv3CPUState *cs = icc_cs_from_env(env);
1879 
1880     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
1881         arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
1882         /* Takes priority over a possible EL3 trap */
1883         return CP_ACCESS_TRAP_EL2;
1884     }
1885 
1886     return gicv3_irqfiq_access(env, ri, isread);
1887 }
1888 
1889 static CPAccessResult gicv3_sgi_access(CPUARMState *env,
1890                                        const ARMCPRegInfo *ri, bool isread)
1891 {
1892     if ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) &&
1893         arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
1894         /* Takes priority over a possible EL3 trap */
1895         return CP_ACCESS_TRAP_EL2;
1896     }
1897 
1898     return gicv3_irqfiq_access(env, ri, isread);
1899 }
1900 
1901 static CPAccessResult gicv3_fiq_access(CPUARMState *env,
1902                                        const ARMCPRegInfo *ri, bool isread)
1903 {
1904     CPAccessResult r = CP_ACCESS_OK;
1905     GICv3CPUState *cs = icc_cs_from_env(env);
1906     int el = arm_current_el(env);
1907 
1908     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
1909         el == 1 && !arm_is_secure_below_el3(env)) {
1910         /* Takes priority over a possible EL3 trap */
1911         return CP_ACCESS_TRAP_EL2;
1912     }
1913 
1914     if (env->cp15.scr_el3 & SCR_FIQ) {
1915         switch (el) {
1916         case 1:
1917             if (arm_is_secure_below_el3(env) ||
1918                 ((env->cp15.hcr_el2 & HCR_FMO) == 0)) {
1919                 r = CP_ACCESS_TRAP_EL3;
1920             }
1921             break;
1922         case 2:
1923             r = CP_ACCESS_TRAP_EL3;
1924             break;
1925         case 3:
1926             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
1927                 r = CP_ACCESS_TRAP_EL3;
1928             }
1929             break;
1930         default:
1931             g_assert_not_reached();
1932         }
1933     }
1934 
1935     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
1936         r = CP_ACCESS_TRAP;
1937     }
1938     return r;
1939 }
1940 
1941 static CPAccessResult gicv3_irq_access(CPUARMState *env,
1942                                        const ARMCPRegInfo *ri, bool isread)
1943 {
1944     CPAccessResult r = CP_ACCESS_OK;
1945     GICv3CPUState *cs = icc_cs_from_env(env);
1946     int el = arm_current_el(env);
1947 
1948     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
1949         el == 1 && !arm_is_secure_below_el3(env)) {
1950         /* Takes priority over a possible EL3 trap */
1951         return CP_ACCESS_TRAP_EL2;
1952     }
1953 
1954     if (env->cp15.scr_el3 & SCR_IRQ) {
1955         switch (el) {
1956         case 1:
1957             if (arm_is_secure_below_el3(env) ||
1958                 ((env->cp15.hcr_el2 & HCR_IMO) == 0)) {
1959                 r = CP_ACCESS_TRAP_EL3;
1960             }
1961             break;
1962         case 2:
1963             r = CP_ACCESS_TRAP_EL3;
1964             break;
1965         case 3:
1966             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
1967                 r = CP_ACCESS_TRAP_EL3;
1968             }
1969             break;
1970         default:
1971             g_assert_not_reached();
1972         }
1973     }
1974 
1975     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
1976         r = CP_ACCESS_TRAP;
1977     }
1978     return r;
1979 }
1980 
1981 static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1982 {
1983     GICv3CPUState *cs = icc_cs_from_env(env);
1984 
1985     cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
1986         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
1987         (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
1988     cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
1989         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
1990         (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
1991     cs->icc_pmr_el1 = 0;
1992     cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
1993     cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
1994     if (arm_feature(env, ARM_FEATURE_EL3)) {
1995         cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS;
1996     } else {
1997         cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR;
1998     }
1999     memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
2000     memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
2001     cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
2002         (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
2003         (7 << ICC_CTLR_EL3_PRIBITS_SHIFT);
2004 
2005     memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
2006     cs->ich_hcr_el2 = 0;
2007     memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
2008     cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
2009         (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR1_SHIFT) |
2010         (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
2011 }
2012 
2013 static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
2014     { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
2015       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
2016       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2017       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
2018       .readfn = icc_pmr_read,
2019       .writefn = icc_pmr_write,
2020       /* We hang the whole cpu interface reset routine off here
2021        * rather than parcelling it out into one little function
2022        * per register
2023        */
2024       .resetfn = icc_reset,
2025     },
2026     { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
2027       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
2028       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2029       .access = PL1_R, .accessfn = gicv3_fiq_access,
2030       .readfn = icc_iar0_read,
2031     },
2032     { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
2033       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
2034       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2035       .access = PL1_W, .accessfn = gicv3_fiq_access,
2036       .writefn = icc_eoir_write,
2037     },
2038     { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
2039       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
2040       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2041       .access = PL1_R, .accessfn = gicv3_fiq_access,
2042       .readfn = icc_hppir0_read,
2043     },
2044     { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
2045       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
2046       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2047       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2048       .readfn = icc_bpr_read,
2049       .writefn = icc_bpr_write,
2050     },
2051     { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
2052       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
2053       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2054       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2055       .readfn = icc_ap_read,
2056       .writefn = icc_ap_write,
2057     },
2058     { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
2059       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
2060       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2061       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2062       .readfn = icc_ap_read,
2063       .writefn = icc_ap_write,
2064     },
2065     { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
2066       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
2067       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2068       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2069       .readfn = icc_ap_read,
2070       .writefn = icc_ap_write,
2071     },
2072     { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
2073       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
2074       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2075       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2076       .readfn = icc_ap_read,
2077       .writefn = icc_ap_write,
2078     },
2079     /* All the ICC_AP1R*_EL1 registers are banked */
2080     { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
2081       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
2082       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2083       .access = PL1_RW, .accessfn = gicv3_irq_access,
2084       .readfn = icc_ap_read,
2085       .writefn = icc_ap_write,
2086     },
2087     { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
2088       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
2089       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2090       .access = PL1_RW, .accessfn = gicv3_irq_access,
2091       .readfn = icc_ap_read,
2092       .writefn = icc_ap_write,
2093     },
2094     { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
2095       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
2096       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2097       .access = PL1_RW, .accessfn = gicv3_irq_access,
2098       .readfn = icc_ap_read,
2099       .writefn = icc_ap_write,
2100     },
2101     { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
2102       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
2103       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2104       .access = PL1_RW, .accessfn = gicv3_irq_access,
2105       .readfn = icc_ap_read,
2106       .writefn = icc_ap_write,
2107     },
2108     { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
2109       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
2110       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2111       .access = PL1_W, .accessfn = gicv3_dir_access,
2112       .writefn = icc_dir_write,
2113     },
2114     { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
2115       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
2116       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2117       .access = PL1_R, .accessfn = gicv3_irqfiq_access,
2118       .readfn = icc_rpr_read,
2119     },
2120     { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
2121       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
2122       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2123       .access = PL1_W, .accessfn = gicv3_sgi_access,
2124       .writefn = icc_sgi1r_write,
2125     },
2126     { .name = "ICC_SGI1R",
2127       .cp = 15, .opc1 = 0, .crm = 12,
2128       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2129       .access = PL1_W, .accessfn = gicv3_sgi_access,
2130       .writefn = icc_sgi1r_write,
2131     },
2132     { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
2133       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
2134       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2135       .access = PL1_W, .accessfn = gicv3_sgi_access,
2136       .writefn = icc_asgi1r_write,
2137     },
2138     { .name = "ICC_ASGI1R",
2139       .cp = 15, .opc1 = 1, .crm = 12,
2140       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2141       .access = PL1_W, .accessfn = gicv3_sgi_access,
2142       .writefn = icc_asgi1r_write,
2143     },
2144     { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
2145       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
2146       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2147       .access = PL1_W, .accessfn = gicv3_sgi_access,
2148       .writefn = icc_sgi0r_write,
2149     },
2150     { .name = "ICC_SGI0R",
2151       .cp = 15, .opc1 = 2, .crm = 12,
2152       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2153       .access = PL1_W, .accessfn = gicv3_sgi_access,
2154       .writefn = icc_sgi0r_write,
2155     },
2156     { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
2157       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
2158       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2159       .access = PL1_R, .accessfn = gicv3_irq_access,
2160       .readfn = icc_iar1_read,
2161     },
2162     { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
2163       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
2164       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2165       .access = PL1_W, .accessfn = gicv3_irq_access,
2166       .writefn = icc_eoir_write,
2167     },
2168     { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
2169       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
2170       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2171       .access = PL1_R, .accessfn = gicv3_irq_access,
2172       .readfn = icc_hppir1_read,
2173     },
2174     /* This register is banked */
2175     { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
2176       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
2177       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2178       .access = PL1_RW, .accessfn = gicv3_irq_access,
2179       .readfn = icc_bpr_read,
2180       .writefn = icc_bpr_write,
2181     },
2182     /* This register is banked */
2183     { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
2184       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
2185       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2186       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
2187       .readfn = icc_ctlr_el1_read,
2188       .writefn = icc_ctlr_el1_write,
2189     },
2190     { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
2191       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
2192       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2193       .access = PL1_RW,
2194       /* We don't support IRQ/FIQ bypass and system registers are
2195        * always enabled, so all our bits are RAZ/WI or RAO/WI.
2196        * This register is banked but since it's constant we don't
2197        * need to do anything special.
2198        */
2199       .resetvalue = 0x7,
2200     },
2201     { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
2202       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
2203       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2204       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2205       .readfn = icc_igrpen_read,
2206       .writefn = icc_igrpen_write,
2207     },
2208     /* This register is banked */
2209     { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
2210       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
2211       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2212       .access = PL1_RW, .accessfn = gicv3_irq_access,
2213       .readfn = icc_igrpen_read,
2214       .writefn = icc_igrpen_write,
2215     },
2216     { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
2217       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
2218       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2219       .access = PL2_RW,
2220       /* We don't support IRQ/FIQ bypass and system registers are
2221        * always enabled, so all our bits are RAZ/WI or RAO/WI.
2222        */
2223       .resetvalue = 0xf,
2224     },
2225     { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
2226       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
2227       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2228       .access = PL3_RW,
2229       .readfn = icc_ctlr_el3_read,
2230       .writefn = icc_ctlr_el3_write,
2231     },
2232     { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
2233       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
2234       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2235       .access = PL3_RW,
2236       /* We don't support IRQ/FIQ bypass and system registers are
2237        * always enabled, so all our bits are RAZ/WI or RAO/WI.
2238        */
2239       .resetvalue = 0xf,
2240     },
2241     { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
2242       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
2243       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2244       .access = PL3_RW,
2245       .readfn = icc_igrpen1_el3_read,
2246       .writefn = icc_igrpen1_el3_write,
2247     },
2248     REGINFO_SENTINEL
2249 };
2250 
2251 static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2252 {
2253     GICv3CPUState *cs = icc_cs_from_env(env);
2254     int regno = ri->opc2 & 3;
2255     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
2256     uint64_t value;
2257 
2258     value = cs->ich_apr[grp][regno];
2259     trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2260     return value;
2261 }
2262 
2263 static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2264                          uint64_t value)
2265 {
2266     GICv3CPUState *cs = icc_cs_from_env(env);
2267     int regno = ri->opc2 & 3;
2268     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
2269 
2270     trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2271 
2272     cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
2273     gicv3_cpuif_virt_update(cs);
2274 }
2275 
2276 static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2277 {
2278     GICv3CPUState *cs = icc_cs_from_env(env);
2279     uint64_t value = cs->ich_hcr_el2;
2280 
2281     trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
2282     return value;
2283 }
2284 
2285 static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2286                           uint64_t value)
2287 {
2288     GICv3CPUState *cs = icc_cs_from_env(env);
2289 
2290     trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
2291 
2292     value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
2293         ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
2294         ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
2295         ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
2296         ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
2297 
2298     cs->ich_hcr_el2 = value;
2299     gicv3_cpuif_virt_update(cs);
2300 }
2301 
2302 static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2303 {
2304     GICv3CPUState *cs = icc_cs_from_env(env);
2305     uint64_t value = cs->ich_vmcr_el2;
2306 
2307     trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
2308     return value;
2309 }
2310 
2311 static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2312                          uint64_t value)
2313 {
2314     GICv3CPUState *cs = icc_cs_from_env(env);
2315 
2316     trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
2317 
2318     value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
2319         ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
2320         ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
2321     value |= ICH_VMCR_EL2_VFIQEN;
2322 
2323     cs->ich_vmcr_el2 = value;
2324     /* Enforce "writing BPRs to less than minimum sets them to the minimum"
2325      * by reading and writing back the fields.
2326      */
2327     write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G0));
2328     write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
2329 
2330     gicv3_cpuif_virt_update(cs);
2331 }
2332 
2333 static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2334 {
2335     GICv3CPUState *cs = icc_cs_from_env(env);
2336     int regno = ri->opc2 | ((ri->crm & 1) << 3);
2337     uint64_t value;
2338 
2339     /* This read function handles all of:
2340      * 64-bit reads of the whole LR
2341      * 32-bit reads of the low half of the LR
2342      * 32-bit reads of the high half of the LR
2343      */
2344     if (ri->state == ARM_CP_STATE_AA32) {
2345         if (ri->crm >= 14) {
2346             value = extract64(cs->ich_lr_el2[regno], 32, 32);
2347             trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
2348         } else {
2349             value = extract64(cs->ich_lr_el2[regno], 0, 32);
2350             trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
2351         }
2352     } else {
2353         value = cs->ich_lr_el2[regno];
2354         trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
2355     }
2356 
2357     return value;
2358 }
2359 
2360 static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2361                          uint64_t value)
2362 {
2363     GICv3CPUState *cs = icc_cs_from_env(env);
2364     int regno = ri->opc2 | ((ri->crm & 1) << 3);
2365 
2366     /* This write function handles all of:
2367      * 64-bit writes to the whole LR
2368      * 32-bit writes to the low half of the LR
2369      * 32-bit writes to the high half of the LR
2370      */
2371     if (ri->state == ARM_CP_STATE_AA32) {
2372         if (ri->crm >= 14) {
2373             trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
2374             value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
2375         } else {
2376             trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
2377             value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
2378         }
2379     } else {
2380         trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
2381     }
2382 
2383     /* Enforce RES0 bits in priority field */
2384     if (cs->vpribits < 8) {
2385         value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
2386                           8 - cs->vpribits, 0);
2387     }
2388 
2389     cs->ich_lr_el2[regno] = value;
2390     gicv3_cpuif_virt_update(cs);
2391 }
2392 
2393 static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2394 {
2395     GICv3CPUState *cs = icc_cs_from_env(env);
2396     uint64_t value;
2397 
2398     value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
2399         | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V
2400         | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
2401         | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
2402         | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
2403 
2404     trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
2405     return value;
2406 }
2407 
2408 static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2409 {
2410     GICv3CPUState *cs = icc_cs_from_env(env);
2411     uint64_t value = maintenance_interrupt_state(cs);
2412 
2413     trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
2414     return value;
2415 }
2416 
2417 static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2418 {
2419     GICv3CPUState *cs = icc_cs_from_env(env);
2420     uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
2421 
2422     trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
2423     return value;
2424 }
2425 
2426 static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2427 {
2428     GICv3CPUState *cs = icc_cs_from_env(env);
2429     uint64_t value = 0;
2430     int i;
2431 
2432     for (i = 0; i < cs->num_list_regs; i++) {
2433         uint64_t lr = cs->ich_lr_el2[i];
2434 
2435         if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
2436             ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) {
2437             value |= (1 << i);
2438         }
2439     }
2440 
2441     trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
2442     return value;
2443 }
2444 
2445 static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
2446     { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
2447       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
2448       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2449       .access = PL2_RW,
2450       .readfn = ich_ap_read,
2451       .writefn = ich_ap_write,
2452     },
2453     { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
2454       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
2455       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2456       .access = PL2_RW,
2457       .readfn = ich_ap_read,
2458       .writefn = ich_ap_write,
2459     },
2460     { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
2461       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
2462       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2463       .access = PL2_RW,
2464       .readfn = ich_hcr_read,
2465       .writefn = ich_hcr_write,
2466     },
2467     { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
2468       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
2469       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2470       .access = PL2_R,
2471       .readfn = ich_vtr_read,
2472     },
2473     { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
2474       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
2475       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2476       .access = PL2_R,
2477       .readfn = ich_misr_read,
2478     },
2479     { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
2480       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
2481       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2482       .access = PL2_R,
2483       .readfn = ich_eisr_read,
2484     },
2485     { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
2486       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
2487       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2488       .access = PL2_R,
2489       .readfn = ich_elrsr_read,
2490     },
2491     { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
2492       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
2493       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2494       .access = PL2_RW,
2495       .readfn = ich_vmcr_read,
2496       .writefn = ich_vmcr_write,
2497     },
2498     REGINFO_SENTINEL
2499 };
2500 
2501 static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
2502     { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
2503       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
2504       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2505       .access = PL2_RW,
2506       .readfn = ich_ap_read,
2507       .writefn = ich_ap_write,
2508     },
2509     { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
2510       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
2511       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2512       .access = PL2_RW,
2513       .readfn = ich_ap_read,
2514       .writefn = ich_ap_write,
2515     },
2516     REGINFO_SENTINEL
2517 };
2518 
2519 static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
2520     { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
2521       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
2522       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2523       .access = PL2_RW,
2524       .readfn = ich_ap_read,
2525       .writefn = ich_ap_write,
2526     },
2527     { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
2528       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
2529       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2530       .access = PL2_RW,
2531       .readfn = ich_ap_read,
2532       .writefn = ich_ap_write,
2533     },
2534     { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
2535       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
2536       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2537       .access = PL2_RW,
2538       .readfn = ich_ap_read,
2539       .writefn = ich_ap_write,
2540     },
2541     { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
2542       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
2543       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2544       .access = PL2_RW,
2545       .readfn = ich_ap_read,
2546       .writefn = ich_ap_write,
2547     },
2548     REGINFO_SENTINEL
2549 };
2550 
2551 static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
2552 {
2553     GICv3CPUState *cs = opaque;
2554 
2555     gicv3_cpuif_update(cs);
2556 }
2557 
2558 void gicv3_init_cpuif(GICv3State *s)
2559 {
2560     /* Called from the GICv3 realize function; register our system
2561      * registers with the CPU
2562      */
2563     int i;
2564 
2565     for (i = 0; i < s->num_cpu; i++) {
2566         ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
2567         GICv3CPUState *cs = &s->cpu[i];
2568 
2569         /* Note that we can't just use the GICv3CPUState as an opaque pointer
2570          * in define_arm_cp_regs_with_opaque(), because when we're called back
2571          * it might be with code translated by CPU 0 but run by CPU 1, in
2572          * which case we'd get the wrong value.
2573          * So instead we define the regs with no ri->opaque info, and
2574          * get back to the GICv3CPUState from the ARMCPU by reading back
2575          * the opaque pointer from the el_change_hook, which we're going
2576          * to need to register anyway.
2577          */
2578         define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
2579         if (arm_feature(&cpu->env, ARM_FEATURE_EL2)
2580             && cpu->gic_num_lrs) {
2581             int j;
2582 
2583             cs->maintenance_irq = cpu->gicv3_maintenance_interrupt;
2584 
2585             cs->num_list_regs = cpu->gic_num_lrs;
2586             cs->vpribits = cpu->gic_vpribits;
2587             cs->vprebits = cpu->gic_vprebits;
2588 
2589             /* Check against architectural constraints: getting these
2590              * wrong would be a bug in the CPU code defining these,
2591              * and the implementation relies on them holding.
2592              */
2593             g_assert(cs->vprebits <= cs->vpribits);
2594             g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
2595             g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
2596 
2597             define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
2598 
2599             for (j = 0; j < cs->num_list_regs; j++) {
2600                 /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
2601                  * are split into two cp15 regs, LR (the low part, with the
2602                  * same encoding as the AArch64 LR) and LRC (the high part).
2603                  */
2604                 ARMCPRegInfo lr_regset[] = {
2605                     { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
2606                       .opc0 = 3, .opc1 = 4, .crn = 12,
2607                       .crm = 12 + (j >> 3), .opc2 = j & 7,
2608                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2609                       .access = PL2_RW,
2610                       .readfn = ich_lr_read,
2611                       .writefn = ich_lr_write,
2612                     },
2613                     { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
2614                       .cp = 15, .opc1 = 4, .crn = 12,
2615                       .crm = 14 + (j >> 3), .opc2 = j & 7,
2616                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2617                       .access = PL2_RW,
2618                       .readfn = ich_lr_read,
2619                       .writefn = ich_lr_write,
2620                     },
2621                     REGINFO_SENTINEL
2622                 };
2623                 define_arm_cp_regs(cpu, lr_regset);
2624             }
2625             if (cs->vprebits >= 6) {
2626                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
2627             }
2628             if (cs->vprebits == 7) {
2629                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
2630             }
2631         }
2632         arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
2633     }
2634 }
2635