xref: /openbmc/qemu/hw/intc/arm_gicv3_cpuif.c (revision df313f481f4167bf0af0a0d362b77aa22574ff56)
1 /*
2  * ARM Generic Interrupt Controller v3
3  *
4  * Copyright (c) 2016 Linaro Limited
5  * Written by Peter Maydell
6  *
7  * This code is licensed under the GPL, version 2 or (at your option)
8  * any later version.
9  */
10 
11 /* This file contains the code for the system register interface
12  * portions of the GICv3.
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qemu/bitops.h"
17 #include "trace.h"
18 #include "gicv3_internal.h"
19 #include "cpu.h"
20 
21 static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
22 {
23     /* Given the CPU, find the right GICv3CPUState struct.
24      * Since we registered the CPU interface with the EL change hook as
25      * the opaque pointer, we can just directly get from the CPU to it.
26      */
27     return arm_get_el_change_hook_opaque(arm_env_get_cpu(env));
28 }
29 
30 static bool gicv3_use_ns_bank(CPUARMState *env)
31 {
32     /* Return true if we should use the NonSecure bank for a banked GIC
33      * CPU interface register. Note that this differs from the
34      * access_secure_reg() function because GICv3 banked registers are
35      * banked even for AArch64, unlike the other CPU system registers.
36      */
37     return !arm_is_secure_below_el3(env);
38 }
39 
40 /* The minimum BPR for the virtual interface is a configurable property */
41 static inline int icv_min_vbpr(GICv3CPUState *cs)
42 {
43     return 7 - cs->vprebits;
44 }
45 
46 /* Simple accessor functions for LR fields */
47 static uint32_t ich_lr_vintid(uint64_t lr)
48 {
49     return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
50 }
51 
52 static uint32_t ich_lr_pintid(uint64_t lr)
53 {
54     return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
55 }
56 
57 static uint32_t ich_lr_prio(uint64_t lr)
58 {
59     return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
60 }
61 
62 static int ich_lr_state(uint64_t lr)
63 {
64     return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
65 }
66 
67 static bool icv_access(CPUARMState *env, int hcr_flags)
68 {
69     /* Return true if this ICC_ register access should really be
70      * directed to an ICV_ access. hcr_flags is a mask of
71      * HCR_EL2 bits to check: we treat this as an ICV_ access
72      * if we are in NS EL1 and at least one of the specified
73      * HCR_EL2 bits is set.
74      *
75      * ICV registers fall into four categories:
76      *  * access if NS EL1 and HCR_EL2.FMO == 1:
77      *    all ICV regs with '0' in their name
78      *  * access if NS EL1 and HCR_EL2.IMO == 1:
79      *    all ICV regs with '1' in their name
80      *  * access if NS EL1 and either IMO or FMO == 1:
81      *    CTLR, DIR, PMR, RPR
82      */
83     return (env->cp15.hcr_el2 & hcr_flags) && arm_current_el(env) == 1
84         && !arm_is_secure_below_el3(env);
85 }
86 
87 static int read_vbpr(GICv3CPUState *cs, int grp)
88 {
89     /* Read VBPR value out of the VMCR field (caller must handle
90      * VCBPR effects if required)
91      */
92     if (grp == GICV3_G0) {
93         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
94                      ICH_VMCR_EL2_VBPR0_LENGTH);
95     } else {
96         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
97                          ICH_VMCR_EL2_VBPR1_LENGTH);
98     }
99 }
100 
101 static void write_vbpr(GICv3CPUState *cs, int grp, int value)
102 {
103     /* Write new VBPR1 value, handling the "writing a value less than
104      * the minimum sets it to the minimum" semantics.
105      */
106     int min = icv_min_vbpr(cs);
107 
108     if (grp != GICV3_G0) {
109         min++;
110     }
111 
112     value = MAX(value, min);
113 
114     if (grp == GICV3_G0) {
115         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
116                                      ICH_VMCR_EL2_VBPR0_LENGTH, value);
117     } else {
118         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
119                                      ICH_VMCR_EL2_VBPR1_LENGTH, value);
120     }
121 }
122 
123 static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
124 {
125     /* Return a mask word which clears the unimplemented priority bits
126      * from a priority value for a virtual interrupt. (Not to be confused
127      * with the group priority, whose mask depends on the value of VBPR
128      * for the interrupt group.)
129      */
130     return ~0U << (8 - cs->vpribits);
131 }
132 
133 static int ich_highest_active_virt_prio(GICv3CPUState *cs)
134 {
135     /* Calculate the current running priority based on the set bits
136      * in the ICH Active Priority Registers.
137      */
138     int i;
139     int aprmax = 1 << (cs->vprebits - 5);
140 
141     assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
142 
143     for (i = 0; i < aprmax; i++) {
144         uint32_t apr = cs->ich_apr[GICV3_G0][i] |
145             cs->ich_apr[GICV3_G1NS][i];
146 
147         if (!apr) {
148             continue;
149         }
150         return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
151     }
152     /* No current active interrupts: return idle priority */
153     return 0xff;
154 }
155 
156 static int hppvi_index(GICv3CPUState *cs)
157 {
158     /* Return the list register index of the highest priority pending
159      * virtual interrupt, as per the HighestPriorityVirtualInterrupt
160      * pseudocode. If no pending virtual interrupts, return -1.
161      */
162     int idx = -1;
163     int i;
164     /* Note that a list register entry with a priority of 0xff will
165      * never be reported by this function; this is the architecturally
166      * correct behaviour.
167      */
168     int prio = 0xff;
169 
170     if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
171         /* Both groups disabled, definitely nothing to do */
172         return idx;
173     }
174 
175     for (i = 0; i < cs->num_list_regs; i++) {
176         uint64_t lr = cs->ich_lr_el2[i];
177         int thisprio;
178 
179         if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
180             /* Not Pending */
181             continue;
182         }
183 
184         /* Ignore interrupts if relevant group enable not set */
185         if (lr & ICH_LR_EL2_GROUP) {
186             if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
187                 continue;
188             }
189         } else {
190             if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
191                 continue;
192             }
193         }
194 
195         thisprio = ich_lr_prio(lr);
196 
197         if (thisprio < prio) {
198             prio = thisprio;
199             idx = i;
200         }
201     }
202 
203     return idx;
204 }
205 
206 static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
207                                                 uint32_t *misr)
208 {
209     /* Return a set of bits indicating the EOI maintenance interrupt status
210      * for each list register. The EOI maintenance interrupt status is
211      * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
212      * (see the GICv3 spec for the ICH_EISR_EL2 register).
213      * If misr is not NULL then we should also collect the information
214      * about the MISR.EOI, MISR.NP and MISR.U bits.
215      */
216     uint32_t value = 0;
217     int validcount = 0;
218     bool seenpending = false;
219     int i;
220 
221     for (i = 0; i < cs->num_list_regs; i++) {
222         uint64_t lr = cs->ich_lr_el2[i];
223 
224         if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
225             == ICH_LR_EL2_EOI) {
226             value |= (1 << i);
227         }
228         if ((lr & ICH_LR_EL2_STATE_MASK)) {
229             validcount++;
230         }
231         if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
232             seenpending = true;
233         }
234     }
235 
236     if (misr) {
237         if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
238             *misr |= ICH_MISR_EL2_U;
239         }
240         if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
241             *misr |= ICH_MISR_EL2_NP;
242         }
243         if (value) {
244             *misr |= ICH_MISR_EL2_EOI;
245         }
246     }
247     return value;
248 }
249 
250 static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
251 {
252     /* Return a set of bits indicating the maintenance interrupt status
253      * (as seen in the ICH_MISR_EL2 register).
254      */
255     uint32_t value = 0;
256 
257     /* Scan list registers and fill in the U, NP and EOI bits */
258     eoi_maintenance_interrupt_state(cs, &value);
259 
260     if (cs->ich_hcr_el2 & (ICH_HCR_EL2_LRENPIE | ICH_HCR_EL2_EOICOUNT_MASK)) {
261         value |= ICH_MISR_EL2_LRENP;
262     }
263 
264     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
265         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
266         value |= ICH_MISR_EL2_VGRP0E;
267     }
268 
269     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
270         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
271         value |= ICH_MISR_EL2_VGRP0D;
272     }
273     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
274         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
275         value |= ICH_MISR_EL2_VGRP1E;
276     }
277 
278     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
279         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
280         value |= ICH_MISR_EL2_VGRP1D;
281     }
282 
283     return value;
284 }
285 
286 static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
287 {
288 }
289 
290 static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
291 {
292     GICv3CPUState *cs = icc_cs_from_env(env);
293     int regno = ri->opc2 & 3;
294     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
295     uint64_t value = cs->ich_apr[grp][regno];
296 
297     trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
298     return value;
299 }
300 
301 static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
302                          uint64_t value)
303 {
304     GICv3CPUState *cs = icc_cs_from_env(env);
305     int regno = ri->opc2 & 3;
306     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
307 
308     trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
309 
310     cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
311 
312     gicv3_cpuif_virt_update(cs);
313     return;
314 }
315 
316 static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
317 {
318     GICv3CPUState *cs = icc_cs_from_env(env);
319     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
320     uint64_t bpr;
321     bool satinc = false;
322 
323     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
324         /* reads return bpr0 + 1 saturated to 7, writes ignored */
325         grp = GICV3_G0;
326         satinc = true;
327     }
328 
329     bpr = read_vbpr(cs, grp);
330 
331     if (satinc) {
332         bpr++;
333         bpr = MIN(bpr, 7);
334     }
335 
336     trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
337 
338     return bpr;
339 }
340 
341 static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
342                           uint64_t value)
343 {
344     GICv3CPUState *cs = icc_cs_from_env(env);
345     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
346 
347     trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
348                               gicv3_redist_affid(cs), value);
349 
350     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
351         /* reads return bpr0 + 1 saturated to 7, writes ignored */
352         return;
353     }
354 
355     write_vbpr(cs, grp, value);
356 
357     gicv3_cpuif_virt_update(cs);
358 }
359 
360 static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
361 {
362     GICv3CPUState *cs = icc_cs_from_env(env);
363     uint64_t value;
364 
365     value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
366                       ICH_VMCR_EL2_VPMR_LENGTH);
367 
368     trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
369     return value;
370 }
371 
372 static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
373                           uint64_t value)
374 {
375     GICv3CPUState *cs = icc_cs_from_env(env);
376 
377     trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
378 
379     value &= icv_fullprio_mask(cs);
380 
381     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
382                                  ICH_VMCR_EL2_VPMR_LENGTH, value);
383 
384     gicv3_cpuif_virt_update(cs);
385 }
386 
387 static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
388 {
389     GICv3CPUState *cs = icc_cs_from_env(env);
390     int enbit;
391     uint64_t value;
392 
393     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
394     value = extract64(cs->ich_vmcr_el2, enbit, 1);
395 
396     trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
397                                 gicv3_redist_affid(cs), value);
398     return value;
399 }
400 
401 static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
402                              uint64_t value)
403 {
404     GICv3CPUState *cs = icc_cs_from_env(env);
405     int enbit;
406 
407     trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
408                                  gicv3_redist_affid(cs), value);
409 
410     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
411 
412     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
413     gicv3_cpuif_virt_update(cs);
414 }
415 
416 static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
417 {
418     GICv3CPUState *cs = icc_cs_from_env(env);
419     uint64_t value;
420 
421     /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
422      * should match the ones reported in ich_vtr_read().
423      */
424     value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
425         (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
426 
427     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
428         value |= ICC_CTLR_EL1_EOIMODE;
429     }
430 
431     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
432         value |= ICC_CTLR_EL1_CBPR;
433     }
434 
435     trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
436     return value;
437 }
438 
439 static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
440                                uint64_t value)
441 {
442     GICv3CPUState *cs = icc_cs_from_env(env);
443 
444     trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
445 
446     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
447                                  1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
448     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
449                                  1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
450 
451     gicv3_cpuif_virt_update(cs);
452 }
453 
454 static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
455 {
456     GICv3CPUState *cs = icc_cs_from_env(env);
457     int prio = ich_highest_active_virt_prio(cs);
458 
459     trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
460     return prio;
461 }
462 
463 static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
464 {
465     GICv3CPUState *cs = icc_cs_from_env(env);
466     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
467     int idx = hppvi_index(cs);
468     uint64_t value = INTID_SPURIOUS;
469 
470     if (idx >= 0) {
471         uint64_t lr = cs->ich_lr_el2[idx];
472         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
473 
474         if (grp == thisgrp) {
475             value = ich_lr_vintid(lr);
476         }
477     }
478 
479     trace_gicv3_icv_hppir_read(grp, gicv3_redist_affid(cs), value);
480     return value;
481 }
482 
483 static int icc_highest_active_prio(GICv3CPUState *cs)
484 {
485     /* Calculate the current running priority based on the set bits
486      * in the Active Priority Registers.
487      */
488     int i;
489 
490     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
491         uint32_t apr = cs->icc_apr[GICV3_G0][i] |
492             cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
493 
494         if (!apr) {
495             continue;
496         }
497         return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1);
498     }
499     /* No current active interrupts: return idle priority */
500     return 0xff;
501 }
502 
503 static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
504 {
505     /* Return a mask word which clears the subpriority bits from
506      * a priority value for an interrupt in the specified group.
507      * This depends on the BPR value:
508      *  a BPR of 0 means the group priority bits are [7:1];
509      *  a BPR of 1 means they are [7:2], and so on down to
510      *  a BPR of 7 meaning no group priority bits at all.
511      * Which BPR to use depends on the group of the interrupt and
512      * the current ICC_CTLR.CBPR settings.
513      */
514     if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
515         (group == GICV3_G1NS &&
516          cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
517         group = GICV3_G0;
518     }
519 
520     return ~0U << ((cs->icc_bpr[group] & 7) + 1);
521 }
522 
523 static bool icc_no_enabled_hppi(GICv3CPUState *cs)
524 {
525     /* Return true if there is no pending interrupt, or the
526      * highest priority pending interrupt is in a group which has been
527      * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
528      */
529     return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
530 }
531 
532 static bool icc_hppi_can_preempt(GICv3CPUState *cs)
533 {
534     /* Return true if we have a pending interrupt of sufficient
535      * priority to preempt.
536      */
537     int rprio;
538     uint32_t mask;
539 
540     if (icc_no_enabled_hppi(cs)) {
541         return false;
542     }
543 
544     if (cs->hppi.prio >= cs->icc_pmr_el1) {
545         /* Priority mask masks this interrupt */
546         return false;
547     }
548 
549     rprio = icc_highest_active_prio(cs);
550     if (rprio == 0xff) {
551         /* No currently running interrupt so we can preempt */
552         return true;
553     }
554 
555     mask = icc_gprio_mask(cs, cs->hppi.grp);
556 
557     /* We only preempt a running interrupt if the pending interrupt's
558      * group priority is sufficient (the subpriorities are not considered).
559      */
560     if ((cs->hppi.prio & mask) < (rprio & mask)) {
561         return true;
562     }
563 
564     return false;
565 }
566 
567 void gicv3_cpuif_update(GICv3CPUState *cs)
568 {
569     /* Tell the CPU about its highest priority pending interrupt */
570     int irqlevel = 0;
571     int fiqlevel = 0;
572     ARMCPU *cpu = ARM_CPU(cs->cpu);
573     CPUARMState *env = &cpu->env;
574 
575     trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
576                              cs->hppi.grp, cs->hppi.prio);
577 
578     if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
579         /* If a Security-enabled GIC sends a G1S interrupt to a
580          * Security-disabled CPU, we must treat it as if it were G0.
581          */
582         cs->hppi.grp = GICV3_G0;
583     }
584 
585     if (icc_hppi_can_preempt(cs)) {
586         /* We have an interrupt: should we signal it as IRQ or FIQ?
587          * This is described in the GICv3 spec section 4.6.2.
588          */
589         bool isfiq;
590 
591         switch (cs->hppi.grp) {
592         case GICV3_G0:
593             isfiq = true;
594             break;
595         case GICV3_G1:
596             isfiq = (!arm_is_secure(env) ||
597                      (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
598             break;
599         case GICV3_G1NS:
600             isfiq = arm_is_secure(env);
601             break;
602         default:
603             g_assert_not_reached();
604         }
605 
606         if (isfiq) {
607             fiqlevel = 1;
608         } else {
609             irqlevel = 1;
610         }
611     }
612 
613     trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
614 
615     qemu_set_irq(cs->parent_fiq, fiqlevel);
616     qemu_set_irq(cs->parent_irq, irqlevel);
617 }
618 
619 static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
620 {
621     GICv3CPUState *cs = icc_cs_from_env(env);
622     uint32_t value = cs->icc_pmr_el1;
623 
624     if (icv_access(env, HCR_FMO | HCR_IMO)) {
625         return icv_pmr_read(env, ri);
626     }
627 
628     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
629         (env->cp15.scr_el3 & SCR_FIQ)) {
630         /* NS access and Group 0 is inaccessible to NS: return the
631          * NS view of the current priority
632          */
633         if (value & 0x80) {
634             /* Secure priorities not visible to NS */
635             value = 0;
636         } else if (value != 0xff) {
637             value = (value << 1) & 0xff;
638         }
639     }
640 
641     trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
642 
643     return value;
644 }
645 
646 static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
647                           uint64_t value)
648 {
649     GICv3CPUState *cs = icc_cs_from_env(env);
650 
651     if (icv_access(env, HCR_FMO | HCR_IMO)) {
652         return icv_pmr_write(env, ri, value);
653     }
654 
655     trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
656 
657     value &= 0xff;
658 
659     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
660         (env->cp15.scr_el3 & SCR_FIQ)) {
661         /* NS access and Group 0 is inaccessible to NS: return the
662          * NS view of the current priority
663          */
664         if (!(cs->icc_pmr_el1 & 0x80)) {
665             /* Current PMR in the secure range, don't allow NS to change it */
666             return;
667         }
668         value = (value >> 1) & 0x80;
669     }
670     cs->icc_pmr_el1 = value;
671     gicv3_cpuif_update(cs);
672 }
673 
674 static void icc_activate_irq(GICv3CPUState *cs, int irq)
675 {
676     /* Move the interrupt from the Pending state to Active, and update
677      * the Active Priority Registers
678      */
679     uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
680     int prio = cs->hppi.prio & mask;
681     int aprbit = prio >> 1;
682     int regno = aprbit / 32;
683     int regbit = aprbit % 32;
684 
685     cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
686 
687     if (irq < GIC_INTERNAL) {
688         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
689         cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
690         gicv3_redist_update(cs);
691     } else {
692         gicv3_gicd_active_set(cs->gic, irq);
693         gicv3_gicd_pending_clear(cs->gic, irq);
694         gicv3_update(cs->gic, irq, 1);
695     }
696 }
697 
698 static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
699 {
700     /* Return the highest priority pending interrupt register value
701      * for group 0.
702      */
703     bool irq_is_secure;
704 
705     if (cs->hppi.prio == 0xff) {
706         return INTID_SPURIOUS;
707     }
708 
709     /* Check whether we can return the interrupt or if we should return
710      * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
711      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
712      * is always zero.)
713      */
714     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
715                      (cs->hppi.grp != GICV3_G1NS));
716 
717     if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
718         return INTID_SPURIOUS;
719     }
720     if (irq_is_secure && !arm_is_secure(env)) {
721         /* Secure interrupts not visible to Nonsecure */
722         return INTID_SPURIOUS;
723     }
724 
725     if (cs->hppi.grp != GICV3_G0) {
726         /* Indicate to EL3 that there's a Group 1 interrupt for the other
727          * state pending.
728          */
729         return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
730     }
731 
732     return cs->hppi.irq;
733 }
734 
735 static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
736 {
737     /* Return the highest priority pending interrupt register value
738      * for group 1.
739      */
740     bool irq_is_secure;
741 
742     if (cs->hppi.prio == 0xff) {
743         return INTID_SPURIOUS;
744     }
745 
746     /* Check whether we can return the interrupt or if we should return
747      * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
748      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
749      * is always zero.)
750      */
751     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
752                      (cs->hppi.grp != GICV3_G1NS));
753 
754     if (cs->hppi.grp == GICV3_G0) {
755         /* Group 0 interrupts not visible via HPPIR1 */
756         return INTID_SPURIOUS;
757     }
758     if (irq_is_secure) {
759         if (!arm_is_secure(env)) {
760             /* Secure interrupts not visible in Non-secure */
761             return INTID_SPURIOUS;
762         }
763     } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
764         /* Group 1 non-secure interrupts not visible in Secure EL1 */
765         return INTID_SPURIOUS;
766     }
767 
768     return cs->hppi.irq;
769 }
770 
771 static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
772 {
773     GICv3CPUState *cs = icc_cs_from_env(env);
774     uint64_t intid;
775 
776     if (!icc_hppi_can_preempt(cs)) {
777         intid = INTID_SPURIOUS;
778     } else {
779         intid = icc_hppir0_value(cs, env);
780     }
781 
782     if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
783         icc_activate_irq(cs, intid);
784     }
785 
786     trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
787     return intid;
788 }
789 
790 static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
791 {
792     GICv3CPUState *cs = icc_cs_from_env(env);
793     uint64_t intid;
794 
795     if (!icc_hppi_can_preempt(cs)) {
796         intid = INTID_SPURIOUS;
797     } else {
798         intid = icc_hppir1_value(cs, env);
799     }
800 
801     if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
802         icc_activate_irq(cs, intid);
803     }
804 
805     trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
806     return intid;
807 }
808 
809 static void icc_drop_prio(GICv3CPUState *cs, int grp)
810 {
811     /* Drop the priority of the currently active interrupt in
812      * the specified group.
813      *
814      * Note that we can guarantee (because of the requirement to nest
815      * ICC_IAR reads [which activate an interrupt and raise priority]
816      * with ICC_EOIR writes [which drop the priority for the interrupt])
817      * that the interrupt we're being called for is the highest priority
818      * active interrupt, meaning that it has the lowest set bit in the
819      * APR registers.
820      *
821      * If the guest does not honour the ordering constraints then the
822      * behaviour of the GIC is UNPREDICTABLE, which for us means that
823      * the values of the APR registers might become incorrect and the
824      * running priority will be wrong, so interrupts that should preempt
825      * might not do so, and interrupts that should not preempt might do so.
826      */
827     int i;
828 
829     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) {
830         uint64_t *papr = &cs->icc_apr[grp][i];
831 
832         if (!*papr) {
833             continue;
834         }
835         /* Clear the lowest set bit */
836         *papr &= *papr - 1;
837         break;
838     }
839 
840     /* running priority change means we need an update for this cpu i/f */
841     gicv3_cpuif_update(cs);
842 }
843 
844 static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
845 {
846     /* Return true if we should split priority drop and interrupt
847      * deactivation, ie whether the relevant EOIMode bit is set.
848      */
849     if (arm_is_el3_or_mon(env)) {
850         return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
851     }
852     if (arm_is_secure_below_el3(env)) {
853         return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
854     } else {
855         return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
856     }
857 }
858 
859 static int icc_highest_active_group(GICv3CPUState *cs)
860 {
861     /* Return the group with the highest priority active interrupt.
862      * We can do this by just comparing the APRs to see which one
863      * has the lowest set bit.
864      * (If more than one group is active at the same priority then
865      * we're in UNPREDICTABLE territory.)
866      */
867     int i;
868 
869     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
870         int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
871         int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
872         int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
873 
874         if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
875             return GICV3_G1NS;
876         }
877         if (g1ctz < g0ctz) {
878             return GICV3_G1;
879         }
880         if (g0ctz < 32) {
881             return GICV3_G0;
882         }
883     }
884     /* No set active bits? UNPREDICTABLE; return -1 so the caller
885      * ignores the spurious EOI attempt.
886      */
887     return -1;
888 }
889 
890 static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
891 {
892     if (irq < GIC_INTERNAL) {
893         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
894         gicv3_redist_update(cs);
895     } else {
896         gicv3_gicd_active_clear(cs->gic, irq);
897         gicv3_update(cs->gic, irq, 1);
898     }
899 }
900 
901 static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
902 {
903     /* Return true if we should split priority drop and interrupt
904      * deactivation, ie whether the virtual EOIMode bit is set.
905      */
906     return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
907 }
908 
909 static int icv_find_active(GICv3CPUState *cs, int irq)
910 {
911     /* Given an interrupt number for an active interrupt, return the index
912      * of the corresponding list register, or -1 if there is no match.
913      * Corresponds to FindActiveVirtualInterrupt pseudocode.
914      */
915     int i;
916 
917     for (i = 0; i < cs->num_list_regs; i++) {
918         uint64_t lr = cs->ich_lr_el2[i];
919 
920         if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
921             return i;
922         }
923     }
924 
925     return -1;
926 }
927 
928 static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
929 {
930     /* Deactivate the interrupt in the specified list register index */
931     uint64_t lr = cs->ich_lr_el2[idx];
932 
933     if (lr & ICH_LR_EL2_HW) {
934         /* Deactivate the associated physical interrupt */
935         int pirq = ich_lr_pintid(lr);
936 
937         if (pirq < INTID_SECURE) {
938             icc_deactivate_irq(cs, pirq);
939         }
940     }
941 
942     /* Clear the 'active' part of the state, so ActivePending->Pending
943      * and Active->Invalid.
944      */
945     lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
946     cs->ich_lr_el2[idx] = lr;
947 }
948 
949 static void icv_increment_eoicount(GICv3CPUState *cs)
950 {
951     /* Increment the EOICOUNT field in ICH_HCR_EL2 */
952     int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
953                              ICH_HCR_EL2_EOICOUNT_LENGTH);
954 
955     cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
956                                 ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
957 }
958 
959 static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
960                           uint64_t value)
961 {
962     /* Deactivate interrupt */
963     GICv3CPUState *cs = icc_cs_from_env(env);
964     int idx;
965     int irq = value & 0xffffff;
966 
967     trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
968 
969     if (irq >= cs->gic->num_irq) {
970         /* Also catches special interrupt numbers and LPIs */
971         return;
972     }
973 
974     if (!icv_eoi_split(env, cs)) {
975         return;
976     }
977 
978     idx = icv_find_active(cs, irq);
979 
980     if (idx < 0) {
981         /* No list register matching this, so increment the EOI count
982          * (might trigger a maintenance interrupt)
983          */
984         icv_increment_eoicount(cs);
985     } else {
986         icv_deactivate_irq(cs, idx);
987     }
988 
989     gicv3_cpuif_virt_update(cs);
990 }
991 
992 static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
993                            uint64_t value)
994 {
995     /* End of Interrupt */
996     GICv3CPUState *cs = icc_cs_from_env(env);
997     int irq = value & 0xffffff;
998     int grp;
999 
1000     trace_gicv3_icc_eoir_write(ri->crm == 8 ? 0 : 1,
1001                                gicv3_redist_affid(cs), value);
1002 
1003     if (ri->crm == 8) {
1004         /* EOIR0 */
1005         grp = GICV3_G0;
1006     } else {
1007         /* EOIR1 */
1008         if (arm_is_secure(env)) {
1009             grp = GICV3_G1;
1010         } else {
1011             grp = GICV3_G1NS;
1012         }
1013     }
1014 
1015     if (irq >= cs->gic->num_irq) {
1016         /* This handles two cases:
1017          * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
1018          * to the GICC_EOIR, the GIC ignores that write.
1019          * 2. If software writes the number of a non-existent interrupt
1020          * this must be a subcase of "value written does not match the last
1021          * valid interrupt value read from the Interrupt Acknowledge
1022          * register" and so this is UNPREDICTABLE. We choose to ignore it.
1023          */
1024         return;
1025     }
1026 
1027     if (icc_highest_active_group(cs) != grp) {
1028         return;
1029     }
1030 
1031     icc_drop_prio(cs, grp);
1032 
1033     if (!icc_eoi_split(env, cs)) {
1034         /* Priority drop and deactivate not split: deactivate irq now */
1035         icc_deactivate_irq(cs, irq);
1036     }
1037 }
1038 
1039 static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
1040 {
1041     GICv3CPUState *cs = icc_cs_from_env(env);
1042     uint64_t value;
1043 
1044     if (icv_access(env, HCR_FMO)) {
1045         return icv_hppir_read(env, ri);
1046     }
1047 
1048     value = icc_hppir0_value(cs, env);
1049     trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
1050     return value;
1051 }
1052 
1053 static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1054 {
1055     GICv3CPUState *cs = icc_cs_from_env(env);
1056     uint64_t value;
1057 
1058     if (icv_access(env, HCR_IMO)) {
1059         return icv_hppir_read(env, ri);
1060     }
1061 
1062     value = icc_hppir1_value(cs, env);
1063     trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
1064     return value;
1065 }
1066 
1067 static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1068 {
1069     GICv3CPUState *cs = icc_cs_from_env(env);
1070     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1071     bool satinc = false;
1072     uint64_t bpr;
1073 
1074     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1075         return icv_bpr_read(env, ri);
1076     }
1077 
1078     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1079         grp = GICV3_G1NS;
1080     }
1081 
1082     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1083         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1084         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1085          * modify BPR0
1086          */
1087         grp = GICV3_G0;
1088     }
1089 
1090     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1091         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1092         /* reads return bpr0 + 1 sat to 7, writes ignored */
1093         grp = GICV3_G0;
1094         satinc = true;
1095     }
1096 
1097     bpr = cs->icc_bpr[grp];
1098     if (satinc) {
1099         bpr++;
1100         bpr = MIN(bpr, 7);
1101     }
1102 
1103     trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
1104 
1105     return bpr;
1106 }
1107 
1108 static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1109                           uint64_t value)
1110 {
1111     GICv3CPUState *cs = icc_cs_from_env(env);
1112     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1113 
1114     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1115         icv_bpr_write(env, ri, value);
1116         return;
1117     }
1118 
1119     trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
1120                               gicv3_redist_affid(cs), value);
1121 
1122     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1123         grp = GICV3_G1NS;
1124     }
1125 
1126     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1127         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1128         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1129          * modify BPR0
1130          */
1131         grp = GICV3_G0;
1132     }
1133 
1134     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1135         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1136         /* reads return bpr0 + 1 sat to 7, writes ignored */
1137         return;
1138     }
1139 
1140     cs->icc_bpr[grp] = value & 7;
1141     gicv3_cpuif_update(cs);
1142 }
1143 
1144 static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1145 {
1146     GICv3CPUState *cs = icc_cs_from_env(env);
1147     uint64_t value;
1148 
1149     int regno = ri->opc2 & 3;
1150     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
1151 
1152     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1153         return icv_ap_read(env, ri);
1154     }
1155 
1156     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1157         grp = GICV3_G1NS;
1158     }
1159 
1160     value = cs->icc_apr[grp][regno];
1161 
1162     trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1163     return value;
1164 }
1165 
1166 static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1167                          uint64_t value)
1168 {
1169     GICv3CPUState *cs = icc_cs_from_env(env);
1170 
1171     int regno = ri->opc2 & 3;
1172     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
1173 
1174     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1175         icv_ap_write(env, ri, value);
1176         return;
1177     }
1178 
1179     trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1180 
1181     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1182         grp = GICV3_G1NS;
1183     }
1184 
1185     /* It's not possible to claim that a Non-secure interrupt is active
1186      * at a priority outside the Non-secure range (128..255), since this
1187      * would otherwise allow malicious NS code to block delivery of S interrupts
1188      * by writing a bad value to these registers.
1189      */
1190     if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
1191         return;
1192     }
1193 
1194     cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
1195     gicv3_cpuif_update(cs);
1196 }
1197 
1198 static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1199                           uint64_t value)
1200 {
1201     /* Deactivate interrupt */
1202     GICv3CPUState *cs = icc_cs_from_env(env);
1203     int irq = value & 0xffffff;
1204     bool irq_is_secure, single_sec_state, irq_is_grp0;
1205     bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
1206 
1207     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1208         icv_dir_write(env, ri, value);
1209         return;
1210     }
1211 
1212     trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
1213 
1214     if (irq >= cs->gic->num_irq) {
1215         /* Also catches special interrupt numbers and LPIs */
1216         return;
1217     }
1218 
1219     if (!icc_eoi_split(env, cs)) {
1220         return;
1221     }
1222 
1223     int grp = gicv3_irq_group(cs->gic, cs, irq);
1224 
1225     single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
1226     irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
1227     irq_is_grp0 = grp == GICV3_G0;
1228 
1229     /* Check whether we're allowed to deactivate this interrupt based
1230      * on its group and the current CPU state.
1231      * These checks are laid out to correspond to the spec's pseudocode.
1232      */
1233     route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
1234     route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
1235     /* No need to include !IsSecure in route_*_to_el2 as it's only
1236      * tested in cases where we know !IsSecure is true.
1237      */
1238     route_fiq_to_el2 = env->cp15.hcr_el2 & HCR_FMO;
1239     route_irq_to_el2 = env->cp15.hcr_el2 & HCR_FMO;
1240 
1241     switch (arm_current_el(env)) {
1242     case 3:
1243         break;
1244     case 2:
1245         if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
1246             break;
1247         }
1248         if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
1249             break;
1250         }
1251         return;
1252     case 1:
1253         if (!arm_is_secure_below_el3(env)) {
1254             if (single_sec_state && irq_is_grp0 &&
1255                 !route_fiq_to_el3 && !route_fiq_to_el2) {
1256                 break;
1257             }
1258             if (!irq_is_secure && !irq_is_grp0 &&
1259                 !route_irq_to_el3 && !route_irq_to_el2) {
1260                 break;
1261             }
1262         } else {
1263             if (irq_is_grp0 && !route_fiq_to_el3) {
1264                 break;
1265             }
1266             if (!irq_is_grp0 &&
1267                 (!irq_is_secure || !single_sec_state) &&
1268                 !route_irq_to_el3) {
1269                 break;
1270             }
1271         }
1272         return;
1273     default:
1274         g_assert_not_reached();
1275     }
1276 
1277     icc_deactivate_irq(cs, irq);
1278 }
1279 
1280 static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1281 {
1282     GICv3CPUState *cs = icc_cs_from_env(env);
1283     int prio;
1284 
1285     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1286         return icv_rpr_read(env, ri);
1287     }
1288 
1289     prio = icc_highest_active_prio(cs);
1290 
1291     if (arm_feature(env, ARM_FEATURE_EL3) &&
1292         !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
1293         /* NS GIC access and Group 0 is inaccessible to NS */
1294         if (prio & 0x80) {
1295             /* NS mustn't see priorities in the Secure half of the range */
1296             prio = 0;
1297         } else if (prio != 0xff) {
1298             /* Non-idle priority: show the Non-secure view of it */
1299             prio = (prio << 1) & 0xff;
1300         }
1301     }
1302 
1303     trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
1304     return prio;
1305 }
1306 
1307 static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
1308                              uint64_t value, int grp, bool ns)
1309 {
1310     GICv3State *s = cs->gic;
1311 
1312     /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
1313     uint64_t aff = extract64(value, 48, 8) << 16 |
1314         extract64(value, 32, 8) << 8 |
1315         extract64(value, 16, 8);
1316     uint32_t targetlist = extract64(value, 0, 16);
1317     uint32_t irq = extract64(value, 24, 4);
1318     bool irm = extract64(value, 40, 1);
1319     int i;
1320 
1321     if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
1322         /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
1323          * interrupts as Group 0 interrupts and must send Secure Group 0
1324          * interrupts to the target CPUs.
1325          */
1326         grp = GICV3_G0;
1327     }
1328 
1329     trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
1330                                  aff, targetlist);
1331 
1332     for (i = 0; i < s->num_cpu; i++) {
1333         GICv3CPUState *ocs = &s->cpu[i];
1334 
1335         if (irm) {
1336             /* IRM == 1 : route to all CPUs except self */
1337             if (cs == ocs) {
1338                 continue;
1339             }
1340         } else {
1341             /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
1342              * where the corresponding bit is set in targetlist
1343              */
1344             int aff0;
1345 
1346             if (ocs->gicr_typer >> 40 != aff) {
1347                 continue;
1348             }
1349             aff0 = extract64(ocs->gicr_typer, 32, 8);
1350             if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
1351                 continue;
1352             }
1353         }
1354 
1355         /* The redistributor will check against its own GICR_NSACR as needed */
1356         gicv3_redist_send_sgi(ocs, grp, irq, ns);
1357     }
1358 }
1359 
1360 static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1361                            uint64_t value)
1362 {
1363     /* Generate Secure Group 0 SGI. */
1364     GICv3CPUState *cs = icc_cs_from_env(env);
1365     bool ns = !arm_is_secure(env);
1366 
1367     icc_generate_sgi(env, cs, value, GICV3_G0, ns);
1368 }
1369 
1370 static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1371                            uint64_t value)
1372 {
1373     /* Generate Group 1 SGI for the current Security state */
1374     GICv3CPUState *cs = icc_cs_from_env(env);
1375     int grp;
1376     bool ns = !arm_is_secure(env);
1377 
1378     grp = ns ? GICV3_G1NS : GICV3_G1;
1379     icc_generate_sgi(env, cs, value, grp, ns);
1380 }
1381 
1382 static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1383                              uint64_t value)
1384 {
1385     /* Generate Group 1 SGI for the Security state that is not
1386      * the current state
1387      */
1388     GICv3CPUState *cs = icc_cs_from_env(env);
1389     int grp;
1390     bool ns = !arm_is_secure(env);
1391 
1392     grp = ns ? GICV3_G1 : GICV3_G1NS;
1393     icc_generate_sgi(env, cs, value, grp, ns);
1394 }
1395 
1396 static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
1397 {
1398     GICv3CPUState *cs = icc_cs_from_env(env);
1399     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
1400     uint64_t value;
1401 
1402     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1403         return icv_igrpen_read(env, ri);
1404     }
1405 
1406     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1407         grp = GICV3_G1NS;
1408     }
1409 
1410     value = cs->icc_igrpen[grp];
1411     trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
1412                                 gicv3_redist_affid(cs), value);
1413     return value;
1414 }
1415 
1416 static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
1417                              uint64_t value)
1418 {
1419     GICv3CPUState *cs = icc_cs_from_env(env);
1420     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
1421 
1422     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1423         icv_igrpen_write(env, ri, value);
1424         return;
1425     }
1426 
1427     trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
1428                                  gicv3_redist_affid(cs), value);
1429 
1430     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1431         grp = GICV3_G1NS;
1432     }
1433 
1434     cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
1435     gicv3_cpuif_update(cs);
1436 }
1437 
1438 static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
1439 {
1440     GICv3CPUState *cs = icc_cs_from_env(env);
1441     uint64_t value;
1442 
1443     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
1444     value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
1445     trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
1446     return value;
1447 }
1448 
1449 static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
1450                                   uint64_t value)
1451 {
1452     GICv3CPUState *cs = icc_cs_from_env(env);
1453 
1454     trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
1455 
1456     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
1457     cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
1458     cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
1459     gicv3_cpuif_update(cs);
1460 }
1461 
1462 static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1463 {
1464     GICv3CPUState *cs = icc_cs_from_env(env);
1465     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
1466     uint64_t value;
1467 
1468     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1469         return icv_ctlr_read(env, ri);
1470     }
1471 
1472     value = cs->icc_ctlr_el1[bank];
1473     trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
1474     return value;
1475 }
1476 
1477 static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
1478                                uint64_t value)
1479 {
1480     GICv3CPUState *cs = icc_cs_from_env(env);
1481     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
1482     uint64_t mask;
1483 
1484     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1485         icv_ctlr_write(env, ri, value);
1486         return;
1487     }
1488 
1489     trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
1490 
1491     /* Only CBPR and EOIMODE can be RW;
1492      * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
1493      * the asseciated priority-based routing of them);
1494      * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
1495      */
1496     if (arm_feature(env, ARM_FEATURE_EL3) &&
1497         ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
1498         mask = ICC_CTLR_EL1_EOIMODE;
1499     } else {
1500         mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
1501     }
1502 
1503     cs->icc_ctlr_el1[bank] &= ~mask;
1504     cs->icc_ctlr_el1[bank] |= (value & mask);
1505     gicv3_cpuif_update(cs);
1506 }
1507 
1508 
1509 static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
1510 {
1511     GICv3CPUState *cs = icc_cs_from_env(env);
1512     uint64_t value;
1513 
1514     value = cs->icc_ctlr_el3;
1515     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
1516         value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
1517     }
1518     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
1519         value |= ICC_CTLR_EL3_CBPR_EL1NS;
1520     }
1521     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
1522         value |= ICC_CTLR_EL3_EOIMODE_EL1S;
1523     }
1524     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
1525         value |= ICC_CTLR_EL3_CBPR_EL1S;
1526     }
1527 
1528     trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
1529     return value;
1530 }
1531 
1532 static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
1533                                uint64_t value)
1534 {
1535     GICv3CPUState *cs = icc_cs_from_env(env);
1536     uint64_t mask;
1537 
1538     trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
1539 
1540     /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
1541     cs->icc_ctlr_el1[GICV3_NS] &= (ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
1542     if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
1543         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
1544     }
1545     if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
1546         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
1547     }
1548 
1549     cs->icc_ctlr_el1[GICV3_S] &= (ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
1550     if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
1551         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
1552     }
1553     if (value & ICC_CTLR_EL3_CBPR_EL1S) {
1554         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
1555     }
1556 
1557     /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */
1558     mask = ICC_CTLR_EL3_EOIMODE_EL3;
1559 
1560     cs->icc_ctlr_el3 &= ~mask;
1561     cs->icc_ctlr_el3 |= (value & mask);
1562     gicv3_cpuif_update(cs);
1563 }
1564 
1565 static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
1566                                           const ARMCPRegInfo *ri, bool isread)
1567 {
1568     CPAccessResult r = CP_ACCESS_OK;
1569 
1570     if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
1571         switch (arm_current_el(env)) {
1572         case 1:
1573             if (arm_is_secure_below_el3(env) ||
1574                 ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) == 0)) {
1575                 r = CP_ACCESS_TRAP_EL3;
1576             }
1577             break;
1578         case 2:
1579             r = CP_ACCESS_TRAP_EL3;
1580             break;
1581         case 3:
1582             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
1583                 r = CP_ACCESS_TRAP_EL3;
1584             }
1585             break;
1586         default:
1587             g_assert_not_reached();
1588         }
1589     }
1590 
1591     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
1592         r = CP_ACCESS_TRAP;
1593     }
1594     return r;
1595 }
1596 
1597 static CPAccessResult gicv3_fiq_access(CPUARMState *env,
1598                                        const ARMCPRegInfo *ri, bool isread)
1599 {
1600     CPAccessResult r = CP_ACCESS_OK;
1601 
1602     if (env->cp15.scr_el3 & SCR_FIQ) {
1603         switch (arm_current_el(env)) {
1604         case 1:
1605             if (arm_is_secure_below_el3(env) ||
1606                 ((env->cp15.hcr_el2 & HCR_FMO) == 0)) {
1607                 r = CP_ACCESS_TRAP_EL3;
1608             }
1609             break;
1610         case 2:
1611             r = CP_ACCESS_TRAP_EL3;
1612             break;
1613         case 3:
1614             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
1615                 r = CP_ACCESS_TRAP_EL3;
1616             }
1617             break;
1618         default:
1619             g_assert_not_reached();
1620         }
1621     }
1622 
1623     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
1624         r = CP_ACCESS_TRAP;
1625     }
1626     return r;
1627 }
1628 
1629 static CPAccessResult gicv3_irq_access(CPUARMState *env,
1630                                        const ARMCPRegInfo *ri, bool isread)
1631 {
1632     CPAccessResult r = CP_ACCESS_OK;
1633 
1634     if (env->cp15.scr_el3 & SCR_IRQ) {
1635         switch (arm_current_el(env)) {
1636         case 1:
1637             if (arm_is_secure_below_el3(env) ||
1638                 ((env->cp15.hcr_el2 & HCR_IMO) == 0)) {
1639                 r = CP_ACCESS_TRAP_EL3;
1640             }
1641             break;
1642         case 2:
1643             r = CP_ACCESS_TRAP_EL3;
1644             break;
1645         case 3:
1646             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
1647                 r = CP_ACCESS_TRAP_EL3;
1648             }
1649             break;
1650         default:
1651             g_assert_not_reached();
1652         }
1653     }
1654 
1655     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
1656         r = CP_ACCESS_TRAP;
1657     }
1658     return r;
1659 }
1660 
1661 static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1662 {
1663     GICv3CPUState *cs = icc_cs_from_env(env);
1664 
1665     cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
1666         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
1667         (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
1668     cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
1669         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
1670         (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
1671     cs->icc_pmr_el1 = 0;
1672     cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
1673     cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
1674     if (arm_feature(env, ARM_FEATURE_EL3)) {
1675         cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS;
1676     } else {
1677         cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR;
1678     }
1679     memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
1680     memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
1681     cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
1682         (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
1683         (7 << ICC_CTLR_EL3_PRIBITS_SHIFT);
1684 
1685     memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
1686     cs->ich_hcr_el2 = 0;
1687     memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
1688     cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
1689         (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR1_SHIFT) |
1690         (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
1691 }
1692 
1693 static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
1694     { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
1695       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
1696       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1697       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
1698       .readfn = icc_pmr_read,
1699       .writefn = icc_pmr_write,
1700       /* We hang the whole cpu interface reset routine off here
1701        * rather than parcelling it out into one little function
1702        * per register
1703        */
1704       .resetfn = icc_reset,
1705     },
1706     { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
1707       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
1708       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1709       .access = PL1_R, .accessfn = gicv3_fiq_access,
1710       .readfn = icc_iar0_read,
1711     },
1712     { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
1713       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
1714       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1715       .access = PL1_W, .accessfn = gicv3_fiq_access,
1716       .writefn = icc_eoir_write,
1717     },
1718     { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
1719       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
1720       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1721       .access = PL1_R, .accessfn = gicv3_fiq_access,
1722       .readfn = icc_hppir0_read,
1723     },
1724     { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
1725       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
1726       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1727       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1728       .readfn = icc_bpr_read,
1729       .writefn = icc_bpr_write,
1730     },
1731     { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
1732       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
1733       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1734       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1735       .readfn = icc_ap_read,
1736       .writefn = icc_ap_write,
1737     },
1738     { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
1739       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
1740       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1741       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1742       .readfn = icc_ap_read,
1743       .writefn = icc_ap_write,
1744     },
1745     { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
1746       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
1747       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1748       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1749       .readfn = icc_ap_read,
1750       .writefn = icc_ap_write,
1751     },
1752     { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
1753       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
1754       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1755       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1756       .readfn = icc_ap_read,
1757       .writefn = icc_ap_write,
1758     },
1759     /* All the ICC_AP1R*_EL1 registers are banked */
1760     { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
1761       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
1762       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1763       .access = PL1_RW, .accessfn = gicv3_irq_access,
1764       .readfn = icc_ap_read,
1765       .writefn = icc_ap_write,
1766     },
1767     { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
1768       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
1769       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1770       .access = PL1_RW, .accessfn = gicv3_irq_access,
1771       .readfn = icc_ap_read,
1772       .writefn = icc_ap_write,
1773     },
1774     { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
1775       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
1776       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1777       .access = PL1_RW, .accessfn = gicv3_irq_access,
1778       .readfn = icc_ap_read,
1779       .writefn = icc_ap_write,
1780     },
1781     { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
1782       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
1783       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1784       .access = PL1_RW, .accessfn = gicv3_irq_access,
1785       .readfn = icc_ap_read,
1786       .writefn = icc_ap_write,
1787     },
1788     { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
1789       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
1790       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1791       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1792       .writefn = icc_dir_write,
1793     },
1794     { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
1795       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
1796       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1797       .access = PL1_R, .accessfn = gicv3_irqfiq_access,
1798       .readfn = icc_rpr_read,
1799     },
1800     { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
1801       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
1802       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1803       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1804       .writefn = icc_sgi1r_write,
1805     },
1806     { .name = "ICC_SGI1R",
1807       .cp = 15, .opc1 = 0, .crm = 12,
1808       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
1809       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1810       .writefn = icc_sgi1r_write,
1811     },
1812     { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
1813       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
1814       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1815       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1816       .writefn = icc_asgi1r_write,
1817     },
1818     { .name = "ICC_ASGI1R",
1819       .cp = 15, .opc1 = 1, .crm = 12,
1820       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
1821       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1822       .writefn = icc_asgi1r_write,
1823     },
1824     { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
1825       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
1826       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1827       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1828       .writefn = icc_sgi0r_write,
1829     },
1830     { .name = "ICC_SGI0R",
1831       .cp = 15, .opc1 = 2, .crm = 12,
1832       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
1833       .access = PL1_W, .accessfn = gicv3_irqfiq_access,
1834       .writefn = icc_sgi0r_write,
1835     },
1836     { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
1837       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
1838       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1839       .access = PL1_R, .accessfn = gicv3_irq_access,
1840       .readfn = icc_iar1_read,
1841     },
1842     { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
1843       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
1844       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1845       .access = PL1_W, .accessfn = gicv3_irq_access,
1846       .writefn = icc_eoir_write,
1847     },
1848     { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
1849       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
1850       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1851       .access = PL1_R, .accessfn = gicv3_irq_access,
1852       .readfn = icc_hppir1_read,
1853     },
1854     /* This register is banked */
1855     { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
1856       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
1857       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1858       .access = PL1_RW, .accessfn = gicv3_irq_access,
1859       .readfn = icc_bpr_read,
1860       .writefn = icc_bpr_write,
1861     },
1862     /* This register is banked */
1863     { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
1864       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
1865       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1866       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
1867       .readfn = icc_ctlr_el1_read,
1868       .writefn = icc_ctlr_el1_write,
1869     },
1870     { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
1871       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
1872       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
1873       .access = PL1_RW,
1874       /* We don't support IRQ/FIQ bypass and system registers are
1875        * always enabled, so all our bits are RAZ/WI or RAO/WI.
1876        * This register is banked but since it's constant we don't
1877        * need to do anything special.
1878        */
1879       .resetvalue = 0x7,
1880     },
1881     { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
1882       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
1883       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1884       .access = PL1_RW, .accessfn = gicv3_fiq_access,
1885       .readfn = icc_igrpen_read,
1886       .writefn = icc_igrpen_write,
1887     },
1888     /* This register is banked */
1889     { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
1890       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
1891       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1892       .access = PL1_RW, .accessfn = gicv3_irq_access,
1893       .readfn = icc_igrpen_read,
1894       .writefn = icc_igrpen_write,
1895     },
1896     { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
1897       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
1898       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
1899       .access = PL2_RW,
1900       /* We don't support IRQ/FIQ bypass and system registers are
1901        * always enabled, so all our bits are RAZ/WI or RAO/WI.
1902        */
1903       .resetvalue = 0xf,
1904     },
1905     { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
1906       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
1907       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1908       .access = PL3_RW,
1909       .readfn = icc_ctlr_el3_read,
1910       .writefn = icc_ctlr_el3_write,
1911     },
1912     { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
1913       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
1914       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
1915       .access = PL3_RW,
1916       /* We don't support IRQ/FIQ bypass and system registers are
1917        * always enabled, so all our bits are RAZ/WI or RAO/WI.
1918        */
1919       .resetvalue = 0xf,
1920     },
1921     { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
1922       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
1923       .type = ARM_CP_IO | ARM_CP_NO_RAW,
1924       .access = PL3_RW,
1925       .readfn = icc_igrpen1_el3_read,
1926       .writefn = icc_igrpen1_el3_write,
1927     },
1928     REGINFO_SENTINEL
1929 };
1930 
1931 static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1932 {
1933     GICv3CPUState *cs = icc_cs_from_env(env);
1934     int regno = ri->opc2 & 3;
1935     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
1936     uint64_t value;
1937 
1938     value = cs->ich_apr[grp][regno];
1939     trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1940     return value;
1941 }
1942 
1943 static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1944                          uint64_t value)
1945 {
1946     GICv3CPUState *cs = icc_cs_from_env(env);
1947     int regno = ri->opc2 & 3;
1948     int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
1949 
1950     trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1951 
1952     cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
1953     gicv3_cpuif_virt_update(cs);
1954 }
1955 
1956 static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1957 {
1958     GICv3CPUState *cs = icc_cs_from_env(env);
1959     uint64_t value = cs->ich_hcr_el2;
1960 
1961     trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
1962     return value;
1963 }
1964 
1965 static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1966                           uint64_t value)
1967 {
1968     GICv3CPUState *cs = icc_cs_from_env(env);
1969 
1970     trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
1971 
1972     value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
1973         ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
1974         ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
1975         ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
1976         ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
1977 
1978     cs->ich_hcr_el2 = value;
1979     gicv3_cpuif_virt_update(cs);
1980 }
1981 
1982 static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1983 {
1984     GICv3CPUState *cs = icc_cs_from_env(env);
1985     uint64_t value = cs->ich_vmcr_el2;
1986 
1987     trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
1988     return value;
1989 }
1990 
1991 static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1992                          uint64_t value)
1993 {
1994     GICv3CPUState *cs = icc_cs_from_env(env);
1995 
1996     trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
1997 
1998     value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
1999         ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
2000         ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
2001     value |= ICH_VMCR_EL2_VFIQEN;
2002 
2003     cs->ich_vmcr_el2 = value;
2004     /* Enforce "writing BPRs to less than minimum sets them to the minimum"
2005      * by reading and writing back the fields.
2006      */
2007     write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G0));
2008     write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
2009 
2010     gicv3_cpuif_virt_update(cs);
2011 }
2012 
2013 static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2014 {
2015     GICv3CPUState *cs = icc_cs_from_env(env);
2016     int regno = ri->opc2 | ((ri->crm & 1) << 3);
2017     uint64_t value;
2018 
2019     /* This read function handles all of:
2020      * 64-bit reads of the whole LR
2021      * 32-bit reads of the low half of the LR
2022      * 32-bit reads of the high half of the LR
2023      */
2024     if (ri->state == ARM_CP_STATE_AA32) {
2025         if (ri->crm >= 14) {
2026             value = extract64(cs->ich_lr_el2[regno], 32, 32);
2027             trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
2028         } else {
2029             value = extract64(cs->ich_lr_el2[regno], 0, 32);
2030             trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
2031         }
2032     } else {
2033         value = cs->ich_lr_el2[regno];
2034         trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
2035     }
2036 
2037     return value;
2038 }
2039 
2040 static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2041                          uint64_t value)
2042 {
2043     GICv3CPUState *cs = icc_cs_from_env(env);
2044     int regno = ri->opc2 | ((ri->crm & 1) << 3);
2045 
2046     /* This write function handles all of:
2047      * 64-bit writes to the whole LR
2048      * 32-bit writes to the low half of the LR
2049      * 32-bit writes to the high half of the LR
2050      */
2051     if (ri->state == ARM_CP_STATE_AA32) {
2052         if (ri->crm >= 14) {
2053             trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
2054             value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
2055         } else {
2056             trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
2057             value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
2058         }
2059     } else {
2060         trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
2061     }
2062 
2063     /* Enforce RES0 bits in priority field */
2064     if (cs->vpribits < 8) {
2065         value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
2066                           8 - cs->vpribits, 0);
2067     }
2068 
2069     cs->ich_lr_el2[regno] = value;
2070     gicv3_cpuif_virt_update(cs);
2071 }
2072 
2073 static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2074 {
2075     GICv3CPUState *cs = icc_cs_from_env(env);
2076     uint64_t value;
2077 
2078     value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
2079         | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V
2080         | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
2081         | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
2082         | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
2083 
2084     trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
2085     return value;
2086 }
2087 
2088 static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2089 {
2090     GICv3CPUState *cs = icc_cs_from_env(env);
2091     uint64_t value = maintenance_interrupt_state(cs);
2092 
2093     trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
2094     return value;
2095 }
2096 
2097 static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2098 {
2099     GICv3CPUState *cs = icc_cs_from_env(env);
2100     uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
2101 
2102     trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
2103     return value;
2104 }
2105 
2106 static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2107 {
2108     GICv3CPUState *cs = icc_cs_from_env(env);
2109     uint64_t value = 0;
2110     int i;
2111 
2112     for (i = 0; i < cs->num_list_regs; i++) {
2113         uint64_t lr = cs->ich_lr_el2[i];
2114 
2115         if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
2116             ((lr & ICH_LR_EL2_HW) == 1 || (lr & ICH_LR_EL2_EOI) == 0)) {
2117             value |= (1 << i);
2118         }
2119     }
2120 
2121     trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
2122     return value;
2123 }
2124 
2125 static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
2126     { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
2127       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
2128       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2129       .access = PL2_RW,
2130       .readfn = ich_ap_read,
2131       .writefn = ich_ap_write,
2132     },
2133     { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
2134       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
2135       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2136       .access = PL2_RW,
2137       .readfn = ich_ap_read,
2138       .writefn = ich_ap_write,
2139     },
2140     { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
2141       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
2142       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2143       .access = PL2_RW,
2144       .readfn = ich_hcr_read,
2145       .writefn = ich_hcr_write,
2146     },
2147     { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
2148       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
2149       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2150       .access = PL2_R,
2151       .readfn = ich_vtr_read,
2152     },
2153     { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
2154       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
2155       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2156       .access = PL2_R,
2157       .readfn = ich_misr_read,
2158     },
2159     { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
2160       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
2161       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2162       .access = PL2_R,
2163       .readfn = ich_eisr_read,
2164     },
2165     { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
2166       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
2167       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2168       .access = PL2_R,
2169       .readfn = ich_elrsr_read,
2170     },
2171     { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
2172       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
2173       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2174       .access = PL2_RW,
2175       .readfn = ich_vmcr_read,
2176       .writefn = ich_vmcr_write,
2177     },
2178     REGINFO_SENTINEL
2179 };
2180 
2181 static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
2182     { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
2183       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
2184       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2185       .access = PL2_RW,
2186       .readfn = ich_ap_read,
2187       .writefn = ich_ap_write,
2188     },
2189     { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
2190       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
2191       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2192       .access = PL2_RW,
2193       .readfn = ich_ap_read,
2194       .writefn = ich_ap_write,
2195     },
2196     REGINFO_SENTINEL
2197 };
2198 
2199 static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
2200     { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
2201       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
2202       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2203       .access = PL2_RW,
2204       .readfn = ich_ap_read,
2205       .writefn = ich_ap_write,
2206     },
2207     { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
2208       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
2209       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2210       .access = PL2_RW,
2211       .readfn = ich_ap_read,
2212       .writefn = ich_ap_write,
2213     },
2214     { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
2215       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
2216       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2217       .access = PL2_RW,
2218       .readfn = ich_ap_read,
2219       .writefn = ich_ap_write,
2220     },
2221     { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
2222       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
2223       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2224       .access = PL2_RW,
2225       .readfn = ich_ap_read,
2226       .writefn = ich_ap_write,
2227     },
2228     REGINFO_SENTINEL
2229 };
2230 
2231 static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
2232 {
2233     GICv3CPUState *cs = opaque;
2234 
2235     gicv3_cpuif_update(cs);
2236 }
2237 
2238 void gicv3_init_cpuif(GICv3State *s)
2239 {
2240     /* Called from the GICv3 realize function; register our system
2241      * registers with the CPU
2242      */
2243     int i;
2244 
2245     for (i = 0; i < s->num_cpu; i++) {
2246         ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
2247         GICv3CPUState *cs = &s->cpu[i];
2248 
2249         /* Note that we can't just use the GICv3CPUState as an opaque pointer
2250          * in define_arm_cp_regs_with_opaque(), because when we're called back
2251          * it might be with code translated by CPU 0 but run by CPU 1, in
2252          * which case we'd get the wrong value.
2253          * So instead we define the regs with no ri->opaque info, and
2254          * get back to the GICv3CPUState from the ARMCPU by reading back
2255          * the opaque pointer from the el_change_hook, which we're going
2256          * to need to register anyway.
2257          */
2258         define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
2259         if (arm_feature(&cpu->env, ARM_FEATURE_EL2)
2260             && cpu->gic_num_lrs) {
2261             int j;
2262 
2263             cs->num_list_regs = cpu->gic_num_lrs;
2264             cs->vpribits = cpu->gic_vpribits;
2265             cs->vprebits = cpu->gic_vprebits;
2266 
2267             /* Check against architectural constraints: getting these
2268              * wrong would be a bug in the CPU code defining these,
2269              * and the implementation relies on them holding.
2270              */
2271             g_assert(cs->vprebits <= cs->vpribits);
2272             g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
2273             g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
2274 
2275             define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
2276 
2277             for (j = 0; j < cs->num_list_regs; j++) {
2278                 /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
2279                  * are split into two cp15 regs, LR (the low part, with the
2280                  * same encoding as the AArch64 LR) and LRC (the high part).
2281                  */
2282                 ARMCPRegInfo lr_regset[] = {
2283                     { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
2284                       .opc0 = 3, .opc1 = 4, .crn = 12,
2285                       .crm = 12 + (j >> 3), .opc2 = j & 7,
2286                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2287                       .access = PL2_RW,
2288                       .readfn = ich_lr_read,
2289                       .writefn = ich_lr_write,
2290                     },
2291                     { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
2292                       .cp = 15, .opc1 = 4, .crn = 12,
2293                       .crm = 14 + (j >> 3), .opc2 = j & 7,
2294                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2295                       .access = PL2_RW,
2296                       .readfn = ich_lr_read,
2297                       .writefn = ich_lr_write,
2298                     },
2299                     REGINFO_SENTINEL
2300                 };
2301                 define_arm_cp_regs(cpu, lr_regset);
2302             }
2303             if (cs->vprebits >= 6) {
2304                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
2305             }
2306             if (cs->vprebits == 7) {
2307                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
2308             }
2309         }
2310         arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
2311     }
2312 }
2313