xref: /openbmc/qemu/hw/intc/arm_gicv3_cpuif.c (revision d2c0c6aab6c6748726149c37159a75751ec6ac92)
1 /*
2  * ARM Generic Interrupt Controller v3 (emulation)
3  *
4  * Copyright (c) 2016 Linaro Limited
5  * Written by Peter Maydell
6  *
7  * This code is licensed under the GPL, version 2 or (at your option)
8  * any later version.
9  */
10 
11 /* This file contains the code for the system register interface
12  * portions of the GICv3.
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qemu/bitops.h"
17 #include "qemu/log.h"
18 #include "qemu/main-loop.h"
19 #include "trace.h"
20 #include "gicv3_internal.h"
21 #include "hw/irq.h"
22 #include "cpu.h"
23 #include "target/arm/cpregs.h"
24 #include "target/arm/cpu-features.h"
25 #include "sysemu/tcg.h"
26 #include "sysemu/qtest.h"
27 
28 /*
29  * Special case return value from hppvi_index(); must be larger than
30  * the architecturally maximum possible list register index (which is 15)
31  */
32 #define HPPVI_INDEX_VLPI 16
33 
34 static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
35 {
36     return env->gicv3state;
37 }
38 
39 static bool gicv3_use_ns_bank(CPUARMState *env)
40 {
41     /* Return true if we should use the NonSecure bank for a banked GIC
42      * CPU interface register. Note that this differs from the
43      * access_secure_reg() function because GICv3 banked registers are
44      * banked even for AArch64, unlike the other CPU system registers.
45      */
46     return !arm_is_secure_below_el3(env);
47 }
48 
49 /* The minimum BPR for the virtual interface is a configurable property */
50 static inline int icv_min_vbpr(GICv3CPUState *cs)
51 {
52     return 7 - cs->vprebits;
53 }
54 
55 static inline int ich_num_aprs(GICv3CPUState *cs)
56 {
57     /* Return the number of virtual APR registers (1, 2, or 4) */
58     int aprmax = 1 << (cs->vprebits - 5);
59     assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
60     return aprmax;
61 }
62 
63 /* Simple accessor functions for LR fields */
64 static uint32_t ich_lr_vintid(uint64_t lr)
65 {
66     return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
67 }
68 
69 static uint32_t ich_lr_pintid(uint64_t lr)
70 {
71     return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
72 }
73 
74 static uint32_t ich_lr_prio(uint64_t lr)
75 {
76     return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
77 }
78 
79 static int ich_lr_state(uint64_t lr)
80 {
81     return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
82 }
83 
84 static bool icv_access(CPUARMState *env, int hcr_flags)
85 {
86     /* Return true if this ICC_ register access should really be
87      * directed to an ICV_ access. hcr_flags is a mask of
88      * HCR_EL2 bits to check: we treat this as an ICV_ access
89      * if we are in NS EL1 and at least one of the specified
90      * HCR_EL2 bits is set.
91      *
92      * ICV registers fall into four categories:
93      *  * access if NS EL1 and HCR_EL2.FMO == 1:
94      *    all ICV regs with '0' in their name
95      *  * access if NS EL1 and HCR_EL2.IMO == 1:
96      *    all ICV regs with '1' in their name
97      *  * access if NS EL1 and either IMO or FMO == 1:
98      *    CTLR, DIR, PMR, RPR
99      */
100     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
101     bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO);
102 
103     return flagmatch && arm_current_el(env) == 1
104         && !arm_is_secure_below_el3(env);
105 }
106 
107 static int read_vbpr(GICv3CPUState *cs, int grp)
108 {
109     /* Read VBPR value out of the VMCR field (caller must handle
110      * VCBPR effects if required)
111      */
112     if (grp == GICV3_G0) {
113         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
114                      ICH_VMCR_EL2_VBPR0_LENGTH);
115     } else {
116         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
117                          ICH_VMCR_EL2_VBPR1_LENGTH);
118     }
119 }
120 
121 static void write_vbpr(GICv3CPUState *cs, int grp, int value)
122 {
123     /* Write new VBPR1 value, handling the "writing a value less than
124      * the minimum sets it to the minimum" semantics.
125      */
126     int min = icv_min_vbpr(cs);
127 
128     if (grp != GICV3_G0) {
129         min++;
130     }
131 
132     value = MAX(value, min);
133 
134     if (grp == GICV3_G0) {
135         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
136                                      ICH_VMCR_EL2_VBPR0_LENGTH, value);
137     } else {
138         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
139                                      ICH_VMCR_EL2_VBPR1_LENGTH, value);
140     }
141 }
142 
143 static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
144 {
145     /* Return a mask word which clears the unimplemented priority bits
146      * from a priority value for a virtual interrupt. (Not to be confused
147      * with the group priority, whose mask depends on the value of VBPR
148      * for the interrupt group.)
149      */
150     return (~0U << (8 - cs->vpribits)) & 0xff;
151 }
152 
153 static int ich_highest_active_virt_prio(GICv3CPUState *cs)
154 {
155     /* Calculate the current running priority based on the set bits
156      * in the ICH Active Priority Registers.
157      */
158     int i;
159     int aprmax = ich_num_aprs(cs);
160 
161     if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) {
162         return 0x0;
163     }
164 
165     for (i = 0; i < aprmax; i++) {
166         uint32_t apr = cs->ich_apr[GICV3_G0][i] |
167             cs->ich_apr[GICV3_G1NS][i];
168 
169         if (!apr) {
170             continue;
171         }
172         return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
173     }
174     /* No current active interrupts: return idle priority */
175     return 0xff;
176 }
177 
178 static int hppvi_index(GICv3CPUState *cs)
179 {
180     /*
181      * Return the list register index of the highest priority pending
182      * virtual interrupt, as per the HighestPriorityVirtualInterrupt
183      * pseudocode. If no pending virtual interrupts, return -1.
184      * If the highest priority pending virtual interrupt is a vLPI,
185      * return HPPVI_INDEX_VLPI.
186      * (The pseudocode handles checking whether the vLPI is higher
187      * priority than the highest priority list register at every
188      * callsite of HighestPriorityVirtualInterrupt; we check it here.)
189      */
190     ARMCPU *cpu = ARM_CPU(cs->cpu);
191     CPUARMState *env = &cpu->env;
192     int idx = -1;
193     int i;
194     /* Note that a list register entry with a priority of 0xff will
195      * never be reported by this function; this is the architecturally
196      * correct behaviour.
197      */
198     int prio = 0xff;
199     bool nmi = false;
200 
201     if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
202         /* Both groups disabled, definitely nothing to do */
203         return idx;
204     }
205 
206     for (i = 0; i < cs->num_list_regs; i++) {
207         uint64_t lr = cs->ich_lr_el2[i];
208         bool thisnmi;
209         int thisprio;
210 
211         if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
212             /* Not Pending */
213             continue;
214         }
215 
216         /* Ignore interrupts if relevant group enable not set */
217         if (lr & ICH_LR_EL2_GROUP) {
218             if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
219                 continue;
220             }
221         } else {
222             if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
223                 continue;
224             }
225         }
226 
227         thisnmi = lr & ICH_LR_EL2_NMI;
228         thisprio = ich_lr_prio(lr);
229 
230         if ((thisprio < prio) || ((thisprio == prio) && (thisnmi & (!nmi)))) {
231             prio = thisprio;
232             nmi = thisnmi;
233             idx = i;
234         }
235     }
236 
237     /*
238      * "no pending vLPI" is indicated with prio = 0xff, which always
239      * fails the priority check here. vLPIs are only considered
240      * when we are in Non-Secure state.
241      */
242     if (cs->hppvlpi.prio < prio && !arm_is_secure(env)) {
243         if (cs->hppvlpi.grp == GICV3_G0) {
244             if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0) {
245                 return HPPVI_INDEX_VLPI;
246             }
247         } else {
248             if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1) {
249                 return HPPVI_INDEX_VLPI;
250             }
251         }
252     }
253 
254     return idx;
255 }
256 
257 static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
258 {
259     /* Return a mask word which clears the subpriority bits from
260      * a priority value for a virtual interrupt in the specified group.
261      * This depends on the VBPR value.
262      * If using VBPR0 then:
263      *  a BPR of 0 means the group priority bits are [7:1];
264      *  a BPR of 1 means they are [7:2], and so on down to
265      *  a BPR of 7 meaning no group priority bits at all.
266      * If using VBPR1 then:
267      *  a BPR of 0 is impossible (the minimum value is 1)
268      *  a BPR of 1 means the group priority bits are [7:1];
269      *  a BPR of 2 means they are [7:2], and so on down to
270      *  a BPR of 7 meaning the group priority is [7].
271      *
272      * Which BPR to use depends on the group of the interrupt and
273      * the current ICH_VMCR_EL2.VCBPR settings.
274      *
275      * This corresponds to the VGroupBits() pseudocode.
276      */
277     int bpr;
278 
279     if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
280         group = GICV3_G0;
281     }
282 
283     bpr = read_vbpr(cs, group);
284     if (group == GICV3_G1NS) {
285         assert(bpr > 0);
286         bpr--;
287     }
288 
289     return ~0U << (bpr + 1);
290 }
291 
292 static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
293 {
294     /* Return true if we can signal this virtual interrupt defined by
295      * the given list register value; see the pseudocode functions
296      * CanSignalVirtualInterrupt and CanSignalVirtualInt.
297      * Compare also icc_hppi_can_preempt() which is the non-virtual
298      * equivalent of these checks.
299      */
300     int grp;
301     bool is_nmi;
302     uint32_t mask, prio, rprio, vpmr;
303 
304     if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
305         /* Virtual interface disabled */
306         return false;
307     }
308 
309     /* We don't need to check that this LR is in Pending state because
310      * that has already been done in hppvi_index().
311      */
312 
313     prio = ich_lr_prio(lr);
314     is_nmi = lr & ICH_LR_EL2_NMI;
315     vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
316                      ICH_VMCR_EL2_VPMR_LENGTH);
317 
318     if (!is_nmi && prio >= vpmr) {
319         /* Priority mask masks this interrupt */
320         return false;
321     }
322 
323     rprio = ich_highest_active_virt_prio(cs);
324     if (rprio == 0xff) {
325         /* No running interrupt so we can preempt */
326         return true;
327     }
328 
329     grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
330 
331     mask = icv_gprio_mask(cs, grp);
332 
333     /* We only preempt a running interrupt if the pending interrupt's
334      * group priority is sufficient (the subpriorities are not considered).
335      */
336     if ((prio & mask) < (rprio & mask)) {
337         return true;
338     }
339 
340     if ((prio & mask) == (rprio & mask) && is_nmi &&
341         !(cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI)) {
342         return true;
343     }
344 
345     return false;
346 }
347 
348 static bool icv_hppvlpi_can_preempt(GICv3CPUState *cs)
349 {
350     /*
351      * Return true if we can signal the highest priority pending vLPI.
352      * We can assume we're Non-secure because hppvi_index() already
353      * tested for that.
354      */
355     uint32_t mask, rprio, vpmr;
356 
357     if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
358         /* Virtual interface disabled */
359         return false;
360     }
361 
362     vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
363                      ICH_VMCR_EL2_VPMR_LENGTH);
364 
365     if (cs->hppvlpi.prio >= vpmr) {
366         /* Priority mask masks this interrupt */
367         return false;
368     }
369 
370     rprio = ich_highest_active_virt_prio(cs);
371     if (rprio == 0xff) {
372         /* No running interrupt so we can preempt */
373         return true;
374     }
375 
376     mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
377 
378     /*
379      * We only preempt a running interrupt if the pending interrupt's
380      * group priority is sufficient (the subpriorities are not considered).
381      */
382     if ((cs->hppvlpi.prio & mask) < (rprio & mask)) {
383         return true;
384     }
385 
386     return false;
387 }
388 
389 static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
390                                                 uint32_t *misr)
391 {
392     /* Return a set of bits indicating the EOI maintenance interrupt status
393      * for each list register. The EOI maintenance interrupt status is
394      * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
395      * (see the GICv3 spec for the ICH_EISR_EL2 register).
396      * If misr is not NULL then we should also collect the information
397      * about the MISR.EOI, MISR.NP and MISR.U bits.
398      */
399     uint32_t value = 0;
400     int validcount = 0;
401     bool seenpending = false;
402     int i;
403 
404     for (i = 0; i < cs->num_list_regs; i++) {
405         uint64_t lr = cs->ich_lr_el2[i];
406 
407         if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
408             == ICH_LR_EL2_EOI) {
409             value |= (1 << i);
410         }
411         if ((lr & ICH_LR_EL2_STATE_MASK)) {
412             validcount++;
413         }
414         if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
415             seenpending = true;
416         }
417     }
418 
419     if (misr) {
420         if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
421             *misr |= ICH_MISR_EL2_U;
422         }
423         if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
424             *misr |= ICH_MISR_EL2_NP;
425         }
426         if (value) {
427             *misr |= ICH_MISR_EL2_EOI;
428         }
429     }
430     return value;
431 }
432 
433 static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
434 {
435     /* Return a set of bits indicating the maintenance interrupt status
436      * (as seen in the ICH_MISR_EL2 register).
437      */
438     uint32_t value = 0;
439 
440     /* Scan list registers and fill in the U, NP and EOI bits */
441     eoi_maintenance_interrupt_state(cs, &value);
442 
443     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_LRENPIE) &&
444         (cs->ich_hcr_el2 & ICH_HCR_EL2_EOICOUNT_MASK)) {
445         value |= ICH_MISR_EL2_LRENP;
446     }
447 
448     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
449         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
450         value |= ICH_MISR_EL2_VGRP0E;
451     }
452 
453     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
454         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
455         value |= ICH_MISR_EL2_VGRP0D;
456     }
457     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
458         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
459         value |= ICH_MISR_EL2_VGRP1E;
460     }
461 
462     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
463         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
464         value |= ICH_MISR_EL2_VGRP1D;
465     }
466 
467     return value;
468 }
469 
470 void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs)
471 {
472     /*
473      * Tell the CPU about any pending virtual interrupts.
474      * This should only be called for changes that affect the
475      * vIRQ and vFIQ status and do not change the maintenance
476      * interrupt status. This means that unlike gicv3_cpuif_virt_update()
477      * this function won't recursively call back into the GIC code.
478      * The main use of this is when the redistributor has changed the
479      * highest priority pending virtual LPI.
480      */
481     int idx;
482     int irqlevel = 0;
483     int fiqlevel = 0;
484 
485     idx = hppvi_index(cs);
486     trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx,
487                                   cs->hppvlpi.irq, cs->hppvlpi.grp,
488                                   cs->hppvlpi.prio);
489     if (idx == HPPVI_INDEX_VLPI) {
490         if (icv_hppvlpi_can_preempt(cs)) {
491             if (cs->hppvlpi.grp == GICV3_G0) {
492                 fiqlevel = 1;
493             } else {
494                 irqlevel = 1;
495             }
496         }
497     } else if (idx >= 0) {
498         uint64_t lr = cs->ich_lr_el2[idx];
499 
500         if (icv_hppi_can_preempt(cs, lr)) {
501             /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
502             if (lr & ICH_LR_EL2_GROUP) {
503                 irqlevel = 1;
504             } else {
505                 fiqlevel = 1;
506             }
507         }
508     }
509 
510     trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
511     qemu_set_irq(cs->parent_vfiq, fiqlevel);
512     qemu_set_irq(cs->parent_virq, irqlevel);
513 }
514 
515 static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
516 {
517     /*
518      * Tell the CPU about any pending virtual interrupts or
519      * maintenance interrupts, following a change to the state
520      * of the CPU interface relevant to virtual interrupts.
521      *
522      * CAUTION: this function will call qemu_set_irq() on the
523      * CPU maintenance IRQ line, which is typically wired up
524      * to the GIC as a per-CPU interrupt. This means that it
525      * will recursively call back into the GIC code via
526      * gicv3_redist_set_irq() and thus into the CPU interface code's
527      * gicv3_cpuif_update(). It is therefore important that this
528      * function is only called as the final action of a CPU interface
529      * register write implementation, after all the GIC state
530      * fields have been updated. gicv3_cpuif_update() also must
531      * not cause this function to be called, but that happens
532      * naturally as a result of there being no architectural
533      * linkage between the physical and virtual GIC logic.
534      */
535     ARMCPU *cpu = ARM_CPU(cs->cpu);
536     int maintlevel = 0;
537 
538     gicv3_cpuif_virt_irq_fiq_update(cs);
539 
540     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
541         maintenance_interrupt_state(cs) != 0) {
542         maintlevel = 1;
543     }
544 
545     trace_gicv3_cpuif_virt_set_maint_irq(gicv3_redist_affid(cs), maintlevel);
546     qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
547 }
548 
549 static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
550 {
551     GICv3CPUState *cs = icc_cs_from_env(env);
552     int regno = ri->opc2 & 3;
553     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
554     uint64_t value = cs->ich_apr[grp][regno];
555 
556     trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
557     return value;
558 }
559 
560 static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
561                          uint64_t value)
562 {
563     GICv3CPUState *cs = icc_cs_from_env(env);
564     int regno = ri->opc2 & 3;
565     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
566 
567     trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
568 
569     if (cs->nmi_support) {
570         cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI);
571     } else {
572         cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
573     }
574 
575     gicv3_cpuif_virt_irq_fiq_update(cs);
576     return;
577 }
578 
579 static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
580 {
581     GICv3CPUState *cs = icc_cs_from_env(env);
582     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
583     uint64_t bpr;
584     bool satinc = false;
585 
586     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
587         /* reads return bpr0 + 1 saturated to 7, writes ignored */
588         grp = GICV3_G0;
589         satinc = true;
590     }
591 
592     bpr = read_vbpr(cs, grp);
593 
594     if (satinc) {
595         bpr++;
596         bpr = MIN(bpr, 7);
597     }
598 
599     trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
600 
601     return bpr;
602 }
603 
604 static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
605                           uint64_t value)
606 {
607     GICv3CPUState *cs = icc_cs_from_env(env);
608     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
609 
610     trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
611                               gicv3_redist_affid(cs), value);
612 
613     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
614         /* reads return bpr0 + 1 saturated to 7, writes ignored */
615         return;
616     }
617 
618     write_vbpr(cs, grp, value);
619 
620     gicv3_cpuif_virt_irq_fiq_update(cs);
621 }
622 
623 static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
624 {
625     GICv3CPUState *cs = icc_cs_from_env(env);
626     uint64_t value;
627 
628     value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
629                       ICH_VMCR_EL2_VPMR_LENGTH);
630 
631     trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
632     return value;
633 }
634 
635 static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
636                           uint64_t value)
637 {
638     GICv3CPUState *cs = icc_cs_from_env(env);
639 
640     trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
641 
642     value &= icv_fullprio_mask(cs);
643 
644     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
645                                  ICH_VMCR_EL2_VPMR_LENGTH, value);
646 
647     gicv3_cpuif_virt_irq_fiq_update(cs);
648 }
649 
650 static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
651 {
652     GICv3CPUState *cs = icc_cs_from_env(env);
653     int enbit;
654     uint64_t value;
655 
656     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
657     value = extract64(cs->ich_vmcr_el2, enbit, 1);
658 
659     trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
660                                 gicv3_redist_affid(cs), value);
661     return value;
662 }
663 
664 static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
665                              uint64_t value)
666 {
667     GICv3CPUState *cs = icc_cs_from_env(env);
668     int enbit;
669 
670     trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
671                                  gicv3_redist_affid(cs), value);
672 
673     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
674 
675     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
676     gicv3_cpuif_virt_update(cs);
677 }
678 
679 static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
680 {
681     GICv3CPUState *cs = icc_cs_from_env(env);
682     uint64_t value;
683 
684     /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
685      * should match the ones reported in ich_vtr_read().
686      */
687     value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
688         ((cs->vpribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
689 
690     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
691         value |= ICC_CTLR_EL1_EOIMODE;
692     }
693 
694     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
695         value |= ICC_CTLR_EL1_CBPR;
696     }
697 
698     trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
699     return value;
700 }
701 
702 static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
703                                uint64_t value)
704 {
705     GICv3CPUState *cs = icc_cs_from_env(env);
706 
707     trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
708 
709     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
710                                  1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
711     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
712                                  1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
713 
714     gicv3_cpuif_virt_irq_fiq_update(cs);
715 }
716 
717 static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
718 {
719     GICv3CPUState *cs = icc_cs_from_env(env);
720     uint64_t prio = ich_highest_active_virt_prio(cs);
721 
722     if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) {
723         prio |= ICV_RPR_EL1_NMI;
724     }
725 
726     trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
727     return prio;
728 }
729 
730 static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
731 {
732     GICv3CPUState *cs = icc_cs_from_env(env);
733     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
734     int idx = hppvi_index(cs);
735     uint64_t value = INTID_SPURIOUS;
736 
737     if (idx == HPPVI_INDEX_VLPI) {
738         if (cs->hppvlpi.grp == grp) {
739             value = cs->hppvlpi.irq;
740         }
741     } else if (idx >= 0) {
742         uint64_t lr = cs->ich_lr_el2[idx];
743         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
744 
745         if (grp == thisgrp) {
746             value = ich_lr_vintid(lr);
747         }
748     }
749 
750     trace_gicv3_icv_hppir_read(ri->crm == 8 ? 0 : 1,
751                                gicv3_redist_affid(cs), value);
752     return value;
753 }
754 
755 static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
756 {
757     /* Activate the interrupt in the specified list register
758      * by moving it from Pending to Active state, and update the
759      * Active Priority Registers.
760      */
761     uint32_t mask = icv_gprio_mask(cs, grp);
762     int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
763     bool nmi = cs->ich_lr_el2[idx] & ICH_LR_EL2_NMI;
764     int aprbit = prio >> (8 - cs->vprebits);
765     int regno = aprbit / 32;
766     int regbit = aprbit % 32;
767 
768     cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
769     cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
770 
771     if (nmi) {
772         cs->ich_apr[grp][regno] |= ICV_AP1R_EL1_NMI;
773     } else {
774         cs->ich_apr[grp][regno] |= (1 << regbit);
775     }
776 }
777 
778 static void icv_activate_vlpi(GICv3CPUState *cs)
779 {
780     uint32_t mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
781     int prio = cs->hppvlpi.prio & mask;
782     int aprbit = prio >> (8 - cs->vprebits);
783     int regno = aprbit / 32;
784     int regbit = aprbit % 32;
785 
786     cs->ich_apr[cs->hppvlpi.grp][regno] |= (1 << regbit);
787     gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0);
788 }
789 
790 static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
791 {
792     GICv3CPUState *cs = icc_cs_from_env(env);
793     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
794     int idx = hppvi_index(cs);
795     uint64_t intid = INTID_SPURIOUS;
796     int el = arm_current_el(env);
797 
798     if (idx == HPPVI_INDEX_VLPI) {
799         if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) {
800             intid = cs->hppvlpi.irq;
801             icv_activate_vlpi(cs);
802         }
803     } else if (idx >= 0) {
804         uint64_t lr = cs->ich_lr_el2[idx];
805         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
806         bool nmi = env->cp15.sctlr_el[el] & SCTLR_NMI && lr & ICH_LR_EL2_NMI;
807 
808         if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
809             intid = ich_lr_vintid(lr);
810             if (!gicv3_intid_is_special(intid)) {
811                 if (!nmi) {
812                     icv_activate_irq(cs, idx, grp);
813                 } else {
814                     intid = INTID_NMI;
815                 }
816             } else {
817                 /* Interrupt goes from Pending to Invalid */
818                 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
819                 /* We will now return the (bogus) ID from the list register,
820                  * as per the pseudocode.
821                  */
822             }
823         }
824     }
825 
826     trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
827                              gicv3_redist_affid(cs), intid);
828 
829     gicv3_cpuif_virt_update(cs);
830 
831     return intid;
832 }
833 
834 static uint64_t icv_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
835 {
836     GICv3CPUState *cs = icc_cs_from_env(env);
837     int idx = hppvi_index(cs);
838     uint64_t intid = INTID_SPURIOUS;
839 
840     if (idx >= 0 && idx != HPPVI_INDEX_VLPI) {
841         uint64_t lr = cs->ich_lr_el2[idx];
842         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
843 
844         if ((thisgrp == GICV3_G1NS) && icv_hppi_can_preempt(cs, lr)) {
845             intid = ich_lr_vintid(lr);
846             if (!gicv3_intid_is_special(intid)) {
847                 if (lr & ICH_LR_EL2_NMI) {
848                     icv_activate_irq(cs, idx, GICV3_G1NS);
849                 } else {
850                     intid = INTID_SPURIOUS;
851                 }
852             } else {
853                 /* Interrupt goes from Pending to Invalid */
854                 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
855                 /*
856                  * We will now return the (bogus) ID from the list register,
857                  * as per the pseudocode.
858                  */
859             }
860         }
861     }
862 
863     trace_gicv3_icv_nmiar1_read(gicv3_redist_affid(cs), intid);
864 
865     gicv3_cpuif_virt_update(cs);
866 
867     return intid;
868 }
869 
870 static uint32_t icc_fullprio_mask(GICv3CPUState *cs)
871 {
872     /*
873      * Return a mask word which clears the unimplemented priority bits
874      * from a priority value for a physical interrupt. (Not to be confused
875      * with the group priority, whose mask depends on the value of BPR
876      * for the interrupt group.)
877      */
878     return (~0U << (8 - cs->pribits)) & 0xff;
879 }
880 
881 static inline int icc_min_bpr(GICv3CPUState *cs)
882 {
883     /* The minimum BPR for the physical interface. */
884     return 7 - cs->prebits;
885 }
886 
887 static inline int icc_min_bpr_ns(GICv3CPUState *cs)
888 {
889     return icc_min_bpr(cs) + 1;
890 }
891 
892 static inline int icc_num_aprs(GICv3CPUState *cs)
893 {
894     /* Return the number of APR registers (1, 2, or 4) */
895     int aprmax = 1 << MAX(cs->prebits - 5, 0);
896     assert(aprmax <= ARRAY_SIZE(cs->icc_apr[0]));
897     return aprmax;
898 }
899 
900 static int icc_highest_active_prio(GICv3CPUState *cs)
901 {
902     /* Calculate the current running priority based on the set bits
903      * in the Active Priority Registers.
904      */
905     int i;
906 
907     if (cs->nmi_support) {
908         /*
909          * If an NMI is active this takes precedence over anything else
910          * for priority purposes; the NMI bit is only in the AP1R0 bit.
911          * We return here the effective priority of the NMI, which is
912          * either 0x0 or 0x80. Callers will need to check NMI again for
913          * purposes of either setting the RPR register bits or for
914          * prioritization of NMI vs non-NMI.
915          */
916         if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
917             return 0;
918         }
919         if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
920             return (cs->gic->gicd_ctlr & GICD_CTLR_DS) ? 0 : 0x80;
921         }
922     }
923 
924     for (i = 0; i < icc_num_aprs(cs); i++) {
925         uint32_t apr = cs->icc_apr[GICV3_G0][i] |
926             cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
927 
928         if (!apr) {
929             continue;
930         }
931         return (i * 32 + ctz32(apr)) << (icc_min_bpr(cs) + 1);
932     }
933     /* No current active interrupts: return idle priority */
934     return 0xff;
935 }
936 
937 static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
938 {
939     /* Return a mask word which clears the subpriority bits from
940      * a priority value for an interrupt in the specified group.
941      * This depends on the BPR value. For CBPR0 (S or NS):
942      *  a BPR of 0 means the group priority bits are [7:1];
943      *  a BPR of 1 means they are [7:2], and so on down to
944      *  a BPR of 7 meaning no group priority bits at all.
945      * For CBPR1 NS:
946      *  a BPR of 0 is impossible (the minimum value is 1)
947      *  a BPR of 1 means the group priority bits are [7:1];
948      *  a BPR of 2 means they are [7:2], and so on down to
949      *  a BPR of 7 meaning the group priority is [7].
950      *
951      * Which BPR to use depends on the group of the interrupt and
952      * the current ICC_CTLR.CBPR settings.
953      *
954      * This corresponds to the GroupBits() pseudocode.
955      */
956     int bpr;
957 
958     if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
959         (group == GICV3_G1NS &&
960          cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
961         group = GICV3_G0;
962     }
963 
964     bpr = cs->icc_bpr[group] & 7;
965 
966     if (group == GICV3_G1NS) {
967         assert(bpr > 0);
968         bpr--;
969     }
970 
971     return ~0U << (bpr + 1);
972 }
973 
974 static bool icc_no_enabled_hppi(GICv3CPUState *cs)
975 {
976     /* Return true if there is no pending interrupt, or the
977      * highest priority pending interrupt is in a group which has been
978      * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
979      */
980     return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
981 }
982 
983 static bool icc_hppi_can_preempt(GICv3CPUState *cs)
984 {
985     /* Return true if we have a pending interrupt of sufficient
986      * priority to preempt.
987      */
988     int rprio;
989     uint32_t mask;
990     ARMCPU *cpu = ARM_CPU(cs->cpu);
991     CPUARMState *env = &cpu->env;
992 
993     if (icc_no_enabled_hppi(cs)) {
994         return false;
995     }
996 
997     if (cs->hppi.nmi) {
998         if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
999             cs->hppi.grp == GICV3_G1NS) {
1000             if (cs->icc_pmr_el1 < 0x80) {
1001                 return false;
1002             }
1003             if (arm_is_secure(env) && cs->icc_pmr_el1 == 0x80) {
1004                 return false;
1005             }
1006         }
1007     } else if (cs->hppi.prio >= cs->icc_pmr_el1) {
1008         /* Priority mask masks this interrupt */
1009         return false;
1010     }
1011 
1012     rprio = icc_highest_active_prio(cs);
1013     if (rprio == 0xff) {
1014         /* No currently running interrupt so we can preempt */
1015         return true;
1016     }
1017 
1018     mask = icc_gprio_mask(cs, cs->hppi.grp);
1019 
1020     /* We only preempt a running interrupt if the pending interrupt's
1021      * group priority is sufficient (the subpriorities are not considered).
1022      */
1023     if ((cs->hppi.prio & mask) < (rprio & mask)) {
1024         return true;
1025     }
1026 
1027     if (cs->hppi.nmi && (cs->hppi.prio & mask) == (rprio & mask)) {
1028         if (!(cs->icc_apr[cs->hppi.grp][0] & ICC_AP1R_EL1_NMI)) {
1029             return true;
1030         }
1031     }
1032 
1033     return false;
1034 }
1035 
1036 void gicv3_cpuif_update(GICv3CPUState *cs)
1037 {
1038     /* Tell the CPU about its highest priority pending interrupt */
1039     int irqlevel = 0;
1040     int fiqlevel = 0;
1041     ARMCPU *cpu = ARM_CPU(cs->cpu);
1042     CPUARMState *env = &cpu->env;
1043 
1044     g_assert(bql_locked());
1045 
1046     trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
1047                              cs->hppi.grp, cs->hppi.prio);
1048 
1049     if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
1050         /* If a Security-enabled GIC sends a G1S interrupt to a
1051          * Security-disabled CPU, we must treat it as if it were G0.
1052          */
1053         cs->hppi.grp = GICV3_G0;
1054     }
1055 
1056     if (icc_hppi_can_preempt(cs)) {
1057         /* We have an interrupt: should we signal it as IRQ or FIQ?
1058          * This is described in the GICv3 spec section 4.6.2.
1059          */
1060         bool isfiq;
1061 
1062         switch (cs->hppi.grp) {
1063         case GICV3_G0:
1064             isfiq = true;
1065             break;
1066         case GICV3_G1:
1067             isfiq = (!arm_is_secure(env) ||
1068                      (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
1069             break;
1070         case GICV3_G1NS:
1071             isfiq = arm_is_secure(env);
1072             break;
1073         default:
1074             g_assert_not_reached();
1075         }
1076 
1077         if (isfiq) {
1078             fiqlevel = 1;
1079         } else {
1080             irqlevel = 1;
1081         }
1082     }
1083 
1084     trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
1085 
1086     qemu_set_irq(cs->parent_fiq, fiqlevel);
1087     qemu_set_irq(cs->parent_irq, irqlevel);
1088 }
1089 
1090 static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1091 {
1092     GICv3CPUState *cs = icc_cs_from_env(env);
1093     uint32_t value = cs->icc_pmr_el1;
1094 
1095     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1096         return icv_pmr_read(env, ri);
1097     }
1098 
1099     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
1100         (env->cp15.scr_el3 & SCR_FIQ)) {
1101         /* NS access and Group 0 is inaccessible to NS: return the
1102          * NS view of the current priority
1103          */
1104         if ((value & 0x80) == 0) {
1105             /* Secure priorities not visible to NS */
1106             value = 0;
1107         } else if (value != 0xff) {
1108             value = (value << 1) & 0xff;
1109         }
1110     }
1111 
1112     trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
1113 
1114     return value;
1115 }
1116 
1117 static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1118                           uint64_t value)
1119 {
1120     GICv3CPUState *cs = icc_cs_from_env(env);
1121 
1122     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1123         return icv_pmr_write(env, ri, value);
1124     }
1125 
1126     trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
1127 
1128     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
1129         (env->cp15.scr_el3 & SCR_FIQ)) {
1130         /* NS access and Group 0 is inaccessible to NS: return the
1131          * NS view of the current priority
1132          */
1133         if (!(cs->icc_pmr_el1 & 0x80)) {
1134             /* Current PMR in the secure range, don't allow NS to change it */
1135             return;
1136         }
1137         value = (value >> 1) | 0x80;
1138     }
1139     value &= icc_fullprio_mask(cs);
1140     cs->icc_pmr_el1 = value;
1141     gicv3_cpuif_update(cs);
1142 }
1143 
1144 static void icc_activate_irq(GICv3CPUState *cs, int irq)
1145 {
1146     /* Move the interrupt from the Pending state to Active, and update
1147      * the Active Priority Registers
1148      */
1149     uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
1150     int prio = cs->hppi.prio & mask;
1151     int aprbit = prio >> (8 - cs->prebits);
1152     int regno = aprbit / 32;
1153     int regbit = aprbit % 32;
1154     bool nmi = cs->hppi.nmi;
1155 
1156     if (nmi) {
1157         cs->icc_apr[cs->hppi.grp][regno] |= ICC_AP1R_EL1_NMI;
1158     } else {
1159         cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
1160     }
1161 
1162     if (irq < GIC_INTERNAL) {
1163         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
1164         cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
1165         gicv3_redist_update(cs);
1166     } else if (irq < GICV3_LPI_INTID_START) {
1167         gicv3_gicd_active_set(cs->gic, irq);
1168         gicv3_gicd_pending_clear(cs->gic, irq);
1169         gicv3_update(cs->gic, irq, 1);
1170     } else {
1171         gicv3_redist_lpi_pending(cs, irq, 0);
1172     }
1173 }
1174 
1175 static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
1176 {
1177     /* Return the highest priority pending interrupt register value
1178      * for group 0.
1179      */
1180     bool irq_is_secure;
1181 
1182     if (icc_no_enabled_hppi(cs)) {
1183         return INTID_SPURIOUS;
1184     }
1185 
1186     /* Check whether we can return the interrupt or if we should return
1187      * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
1188      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
1189      * is always zero.)
1190      */
1191     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
1192                      (cs->hppi.grp != GICV3_G1NS));
1193 
1194     if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
1195         return INTID_SPURIOUS;
1196     }
1197     if (irq_is_secure && !arm_is_secure(env)) {
1198         /* Secure interrupts not visible to Nonsecure */
1199         return INTID_SPURIOUS;
1200     }
1201 
1202     if (cs->hppi.grp != GICV3_G0) {
1203         /* Indicate to EL3 that there's a Group 1 interrupt for the other
1204          * state pending.
1205          */
1206         return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
1207     }
1208 
1209     return cs->hppi.irq;
1210 }
1211 
1212 static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
1213 {
1214     /* Return the highest priority pending interrupt register value
1215      * for group 1.
1216      */
1217     bool irq_is_secure;
1218 
1219     if (icc_no_enabled_hppi(cs)) {
1220         return INTID_SPURIOUS;
1221     }
1222 
1223     /* Check whether we can return the interrupt or if we should return
1224      * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
1225      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
1226      * is always zero.)
1227      */
1228     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
1229                      (cs->hppi.grp != GICV3_G1NS));
1230 
1231     if (cs->hppi.grp == GICV3_G0) {
1232         /* Group 0 interrupts not visible via HPPIR1 */
1233         return INTID_SPURIOUS;
1234     }
1235     if (irq_is_secure) {
1236         if (!arm_is_secure(env)) {
1237             /* Secure interrupts not visible in Non-secure */
1238             return INTID_SPURIOUS;
1239         }
1240     } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
1241         /* Group 1 non-secure interrupts not visible in Secure EL1 */
1242         return INTID_SPURIOUS;
1243     }
1244 
1245     return cs->hppi.irq;
1246 }
1247 
1248 static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
1249 {
1250     GICv3CPUState *cs = icc_cs_from_env(env);
1251     uint64_t intid;
1252 
1253     if (icv_access(env, HCR_FMO)) {
1254         return icv_iar_read(env, ri);
1255     }
1256 
1257     if (!icc_hppi_can_preempt(cs)) {
1258         intid = INTID_SPURIOUS;
1259     } else {
1260         intid = icc_hppir0_value(cs, env);
1261     }
1262 
1263     if (!gicv3_intid_is_special(intid)) {
1264         icc_activate_irq(cs, intid);
1265     }
1266 
1267     trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
1268     return intid;
1269 }
1270 
1271 static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1272 {
1273     GICv3CPUState *cs = icc_cs_from_env(env);
1274     int el = arm_current_el(env);
1275     uint64_t intid;
1276 
1277     if (icv_access(env, HCR_IMO)) {
1278         return icv_iar_read(env, ri);
1279     }
1280 
1281     if (!icc_hppi_can_preempt(cs)) {
1282         intid = INTID_SPURIOUS;
1283     } else {
1284         intid = icc_hppir1_value(cs, env);
1285     }
1286 
1287     if (!gicv3_intid_is_special(intid)) {
1288         if (cs->hppi.nmi && env->cp15.sctlr_el[el] & SCTLR_NMI) {
1289             intid = INTID_NMI;
1290         } else {
1291             icc_activate_irq(cs, intid);
1292         }
1293     }
1294 
1295     trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
1296     return intid;
1297 }
1298 
1299 static uint64_t icc_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1300 {
1301     GICv3CPUState *cs = icc_cs_from_env(env);
1302     uint64_t intid;
1303 
1304     if (icv_access(env, HCR_IMO)) {
1305         return icv_nmiar1_read(env, ri);
1306     }
1307 
1308     if (!icc_hppi_can_preempt(cs)) {
1309         intid = INTID_SPURIOUS;
1310     } else {
1311         intid = icc_hppir1_value(cs, env);
1312     }
1313 
1314     if (!gicv3_intid_is_special(intid)) {
1315         if (!cs->hppi.nmi) {
1316             intid = INTID_SPURIOUS;
1317         } else {
1318             icc_activate_irq(cs, intid);
1319         }
1320     }
1321 
1322     trace_gicv3_icc_nmiar1_read(gicv3_redist_affid(cs), intid);
1323     return intid;
1324 }
1325 
1326 static void icc_drop_prio(GICv3CPUState *cs, int grp)
1327 {
1328     /* Drop the priority of the currently active interrupt in
1329      * the specified group.
1330      *
1331      * Note that we can guarantee (because of the requirement to nest
1332      * ICC_IAR reads [which activate an interrupt and raise priority]
1333      * with ICC_EOIR writes [which drop the priority for the interrupt])
1334      * that the interrupt we're being called for is the highest priority
1335      * active interrupt, meaning that it has the lowest set bit in the
1336      * APR registers.
1337      *
1338      * If the guest does not honour the ordering constraints then the
1339      * behaviour of the GIC is UNPREDICTABLE, which for us means that
1340      * the values of the APR registers might become incorrect and the
1341      * running priority will be wrong, so interrupts that should preempt
1342      * might not do so, and interrupts that should not preempt might do so.
1343      */
1344     int i;
1345 
1346     for (i = 0; i < icc_num_aprs(cs); i++) {
1347         uint64_t *papr = &cs->icc_apr[grp][i];
1348 
1349         if (!*papr) {
1350             continue;
1351         }
1352 
1353         if (i == 0 && cs->nmi_support && (*papr & ICC_AP1R_EL1_NMI)) {
1354             *papr &= (~ICC_AP1R_EL1_NMI);
1355             break;
1356         }
1357 
1358         /* Clear the lowest set bit */
1359         *papr &= *papr - 1;
1360         break;
1361     }
1362 
1363     /* running priority change means we need an update for this cpu i/f */
1364     gicv3_cpuif_update(cs);
1365 }
1366 
1367 static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1368 {
1369     /* Return true if we should split priority drop and interrupt
1370      * deactivation, ie whether the relevant EOIMode bit is set.
1371      */
1372     if (arm_is_el3_or_mon(env)) {
1373         return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
1374     }
1375     if (arm_is_secure_below_el3(env)) {
1376         return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
1377     } else {
1378         return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
1379     }
1380 }
1381 
1382 static int icc_highest_active_group(GICv3CPUState *cs)
1383 {
1384     /* Return the group with the highest priority active interrupt.
1385      * We can do this by just comparing the APRs to see which one
1386      * has the lowest set bit.
1387      * (If more than one group is active at the same priority then
1388      * we're in UNPREDICTABLE territory.)
1389      */
1390     int i;
1391 
1392     if (cs->nmi_support) {
1393         if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
1394             return GICV3_G1;
1395         }
1396         if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
1397             return GICV3_G1NS;
1398         }
1399     }
1400 
1401     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
1402         int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
1403         int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
1404         int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
1405 
1406         if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
1407             return GICV3_G1NS;
1408         }
1409         if (g1ctz < g0ctz) {
1410             return GICV3_G1;
1411         }
1412         if (g0ctz < 32) {
1413             return GICV3_G0;
1414         }
1415     }
1416     /* No set active bits? UNPREDICTABLE; return -1 so the caller
1417      * ignores the spurious EOI attempt.
1418      */
1419     return -1;
1420 }
1421 
1422 static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
1423 {
1424     if (irq < GIC_INTERNAL) {
1425         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
1426         gicv3_redist_update(cs);
1427     } else {
1428         gicv3_gicd_active_clear(cs->gic, irq);
1429         gicv3_update(cs->gic, irq, 1);
1430     }
1431 }
1432 
1433 static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1434 {
1435     /* Return true if we should split priority drop and interrupt
1436      * deactivation, ie whether the virtual EOIMode bit is set.
1437      */
1438     return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
1439 }
1440 
1441 static int icv_find_active(GICv3CPUState *cs, int irq)
1442 {
1443     /* Given an interrupt number for an active interrupt, return the index
1444      * of the corresponding list register, or -1 if there is no match.
1445      * Corresponds to FindActiveVirtualInterrupt pseudocode.
1446      */
1447     int i;
1448 
1449     for (i = 0; i < cs->num_list_regs; i++) {
1450         uint64_t lr = cs->ich_lr_el2[i];
1451 
1452         if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
1453             return i;
1454         }
1455     }
1456 
1457     return -1;
1458 }
1459 
1460 static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
1461 {
1462     /* Deactivate the interrupt in the specified list register index */
1463     uint64_t lr = cs->ich_lr_el2[idx];
1464 
1465     if (lr & ICH_LR_EL2_HW) {
1466         /* Deactivate the associated physical interrupt */
1467         int pirq = ich_lr_pintid(lr);
1468 
1469         if (pirq < INTID_SECURE) {
1470             icc_deactivate_irq(cs, pirq);
1471         }
1472     }
1473 
1474     /* Clear the 'active' part of the state, so ActivePending->Pending
1475      * and Active->Invalid.
1476      */
1477     lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
1478     cs->ich_lr_el2[idx] = lr;
1479 }
1480 
1481 static void icv_increment_eoicount(GICv3CPUState *cs)
1482 {
1483     /* Increment the EOICOUNT field in ICH_HCR_EL2 */
1484     int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1485                              ICH_HCR_EL2_EOICOUNT_LENGTH);
1486 
1487     cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1488                                 ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
1489 }
1490 
1491 static int icv_drop_prio(GICv3CPUState *cs, bool *nmi)
1492 {
1493     /* Drop the priority of the currently active virtual interrupt
1494      * (favouring group 0 if there is a set active bit at
1495      * the same priority for both group 0 and group 1).
1496      * Return the priority value for the bit we just cleared,
1497      * or 0xff if no bits were set in the AP registers at all.
1498      * Note that though the ich_apr[] are uint64_t only the low
1499      * 32 bits are actually relevant.
1500      */
1501     int i;
1502     int aprmax = ich_num_aprs(cs);
1503 
1504     for (i = 0; i < aprmax; i++) {
1505         uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
1506         uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
1507         int apr0count, apr1count;
1508 
1509         if (!*papr0 && !*papr1) {
1510             continue;
1511         }
1512 
1513         if (i == 0 && cs->nmi_support && (*papr1 & ICV_AP1R_EL1_NMI)) {
1514             *papr1 &= (~ICV_AP1R_EL1_NMI);
1515             *nmi = true;
1516             return 0xff;
1517         }
1518 
1519         /* We can't just use the bit-twiddling hack icc_drop_prio() does
1520          * because we need to return the bit number we cleared so
1521          * it can be compared against the list register's priority field.
1522          */
1523         apr0count = ctz32(*papr0);
1524         apr1count = ctz32(*papr1);
1525 
1526         if (apr0count <= apr1count) {
1527             *papr0 &= *papr0 - 1;
1528             return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
1529         } else {
1530             *papr1 &= *papr1 - 1;
1531             return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
1532         }
1533     }
1534     return 0xff;
1535 }
1536 
1537 static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1538                           uint64_t value)
1539 {
1540     /* Deactivate interrupt */
1541     GICv3CPUState *cs = icc_cs_from_env(env);
1542     int idx;
1543     int irq = value & 0xffffff;
1544 
1545     trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
1546 
1547     if (irq >= GICV3_MAXIRQ) {
1548         /* Also catches special interrupt numbers and LPIs */
1549         return;
1550     }
1551 
1552     if (!icv_eoi_split(env, cs)) {
1553         return;
1554     }
1555 
1556     idx = icv_find_active(cs, irq);
1557 
1558     if (idx < 0) {
1559         /* No list register matching this, so increment the EOI count
1560          * (might trigger a maintenance interrupt)
1561          */
1562         icv_increment_eoicount(cs);
1563     } else {
1564         icv_deactivate_irq(cs, idx);
1565     }
1566 
1567     gicv3_cpuif_virt_update(cs);
1568 }
1569 
1570 static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1571                            uint64_t value)
1572 {
1573     /* End of Interrupt */
1574     GICv3CPUState *cs = icc_cs_from_env(env);
1575     int irq = value & 0xffffff;
1576     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
1577     int idx, dropprio;
1578     bool nmi = false;
1579 
1580     trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
1581                                gicv3_redist_affid(cs), value);
1582 
1583     if (gicv3_intid_is_special(irq)) {
1584         return;
1585     }
1586 
1587     /* We implement the IMPDEF choice of "drop priority before doing
1588      * error checks" (because that lets us avoid scanning the AP
1589      * registers twice).
1590      */
1591     dropprio = icv_drop_prio(cs, &nmi);
1592     if (dropprio == 0xff && !nmi) {
1593         /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
1594          * whether the list registers are checked in this
1595          * situation; we choose not to.
1596          */
1597         return;
1598     }
1599 
1600     idx = icv_find_active(cs, irq);
1601 
1602     if (idx < 0) {
1603         /*
1604          * No valid list register corresponding to EOI ID; if this is a vLPI
1605          * not in the list regs then do nothing; otherwise increment EOI count
1606          */
1607         if (irq < GICV3_LPI_INTID_START) {
1608             icv_increment_eoicount(cs);
1609         }
1610     } else {
1611         uint64_t lr = cs->ich_lr_el2[idx];
1612         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
1613         int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
1614         bool thisnmi = lr & ICH_LR_EL2_NMI;
1615 
1616         if (thisgrp == grp && (lr_gprio == dropprio || (thisnmi & nmi))) {
1617             if (!icv_eoi_split(env, cs) || irq >= GICV3_LPI_INTID_START) {
1618                 /*
1619                  * Priority drop and deactivate not split: deactivate irq now.
1620                  * LPIs always get their active state cleared immediately
1621                  * because no separate deactivate is expected.
1622                  */
1623                 icv_deactivate_irq(cs, idx);
1624             }
1625         }
1626     }
1627 
1628     gicv3_cpuif_virt_update(cs);
1629 }
1630 
1631 static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1632                            uint64_t value)
1633 {
1634     /* End of Interrupt */
1635     GICv3CPUState *cs = icc_cs_from_env(env);
1636     int irq = value & 0xffffff;
1637     int grp;
1638     bool is_eoir0 = ri->crm == 8;
1639 
1640     if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) {
1641         icv_eoir_write(env, ri, value);
1642         return;
1643     }
1644 
1645     trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
1646                                gicv3_redist_affid(cs), value);
1647 
1648     if ((irq >= cs->gic->num_irq) &&
1649         !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) {
1650         /* This handles two cases:
1651          * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
1652          * to the GICC_EOIR, the GIC ignores that write.
1653          * 2. If software writes the number of a non-existent interrupt
1654          * this must be a subcase of "value written does not match the last
1655          * valid interrupt value read from the Interrupt Acknowledge
1656          * register" and so this is UNPREDICTABLE. We choose to ignore it.
1657          */
1658         return;
1659     }
1660 
1661     grp = icc_highest_active_group(cs);
1662     switch (grp) {
1663     case GICV3_G0:
1664         if (!is_eoir0) {
1665             return;
1666         }
1667         if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS)
1668             && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
1669             return;
1670         }
1671         break;
1672     case GICV3_G1:
1673         if (is_eoir0) {
1674             return;
1675         }
1676         if (!arm_is_secure(env)) {
1677             return;
1678         }
1679         break;
1680     case GICV3_G1NS:
1681         if (is_eoir0) {
1682             return;
1683         }
1684         if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
1685             return;
1686         }
1687         break;
1688     default:
1689         qemu_log_mask(LOG_GUEST_ERROR,
1690                       "%s: IRQ %d isn't active\n", __func__, irq);
1691         return;
1692     }
1693 
1694     icc_drop_prio(cs, grp);
1695 
1696     if (!icc_eoi_split(env, cs)) {
1697         /* Priority drop and deactivate not split: deactivate irq now */
1698         icc_deactivate_irq(cs, irq);
1699     }
1700 }
1701 
1702 static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
1703 {
1704     GICv3CPUState *cs = icc_cs_from_env(env);
1705     uint64_t value;
1706 
1707     if (icv_access(env, HCR_FMO)) {
1708         return icv_hppir_read(env, ri);
1709     }
1710 
1711     value = icc_hppir0_value(cs, env);
1712     trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
1713     return value;
1714 }
1715 
1716 static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1717 {
1718     GICv3CPUState *cs = icc_cs_from_env(env);
1719     uint64_t value;
1720 
1721     if (icv_access(env, HCR_IMO)) {
1722         return icv_hppir_read(env, ri);
1723     }
1724 
1725     value = icc_hppir1_value(cs, env);
1726     trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
1727     return value;
1728 }
1729 
1730 static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1731 {
1732     GICv3CPUState *cs = icc_cs_from_env(env);
1733     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1734     bool satinc = false;
1735     uint64_t bpr;
1736 
1737     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1738         return icv_bpr_read(env, ri);
1739     }
1740 
1741     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1742         grp = GICV3_G1NS;
1743     }
1744 
1745     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1746         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1747         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1748          * modify BPR0
1749          */
1750         grp = GICV3_G0;
1751     }
1752 
1753     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1754         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1755         /* reads return bpr0 + 1 sat to 7, writes ignored */
1756         grp = GICV3_G0;
1757         satinc = true;
1758     }
1759 
1760     bpr = cs->icc_bpr[grp];
1761     if (satinc) {
1762         bpr++;
1763         bpr = MIN(bpr, 7);
1764     }
1765 
1766     trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
1767 
1768     return bpr;
1769 }
1770 
1771 static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1772                           uint64_t value)
1773 {
1774     GICv3CPUState *cs = icc_cs_from_env(env);
1775     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1776     uint64_t minval;
1777 
1778     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1779         icv_bpr_write(env, ri, value);
1780         return;
1781     }
1782 
1783     trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
1784                               gicv3_redist_affid(cs), value);
1785 
1786     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1787         grp = GICV3_G1NS;
1788     }
1789 
1790     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1791         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1792         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1793          * modify BPR0
1794          */
1795         grp = GICV3_G0;
1796     }
1797 
1798     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1799         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1800         /* reads return bpr0 + 1 sat to 7, writes ignored */
1801         return;
1802     }
1803 
1804     minval = (grp == GICV3_G1NS) ? icc_min_bpr_ns(cs) : icc_min_bpr(cs);
1805     if (value < minval) {
1806         value = minval;
1807     }
1808 
1809     cs->icc_bpr[grp] = value & 7;
1810     gicv3_cpuif_update(cs);
1811 }
1812 
1813 static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1814 {
1815     GICv3CPUState *cs = icc_cs_from_env(env);
1816     uint64_t value;
1817 
1818     int regno = ri->opc2 & 3;
1819     int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
1820 
1821     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1822         return icv_ap_read(env, ri);
1823     }
1824 
1825     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1826         grp = GICV3_G1NS;
1827     }
1828 
1829     value = cs->icc_apr[grp][regno];
1830 
1831     trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1832     return value;
1833 }
1834 
1835 static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1836                          uint64_t value)
1837 {
1838     GICv3CPUState *cs = icc_cs_from_env(env);
1839 
1840     int regno = ri->opc2 & 3;
1841     int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
1842 
1843     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1844         icv_ap_write(env, ri, value);
1845         return;
1846     }
1847 
1848     trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1849 
1850     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1851         grp = GICV3_G1NS;
1852     }
1853 
1854     /* It's not possible to claim that a Non-secure interrupt is active
1855      * at a priority outside the Non-secure range (128..255), since this
1856      * would otherwise allow malicious NS code to block delivery of S interrupts
1857      * by writing a bad value to these registers.
1858      */
1859     if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
1860         return;
1861     }
1862 
1863     if (cs->nmi_support) {
1864         cs->icc_apr[grp][regno] = value & (0xFFFFFFFFU | ICC_AP1R_EL1_NMI);
1865     } else {
1866         cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
1867     }
1868     gicv3_cpuif_update(cs);
1869 }
1870 
1871 static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1872                           uint64_t value)
1873 {
1874     /* Deactivate interrupt */
1875     GICv3CPUState *cs = icc_cs_from_env(env);
1876     int irq = value & 0xffffff;
1877     bool irq_is_secure, single_sec_state, irq_is_grp0;
1878     bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
1879 
1880     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1881         icv_dir_write(env, ri, value);
1882         return;
1883     }
1884 
1885     trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
1886 
1887     if (irq >= cs->gic->num_irq) {
1888         /* Also catches special interrupt numbers and LPIs */
1889         return;
1890     }
1891 
1892     if (!icc_eoi_split(env, cs)) {
1893         return;
1894     }
1895 
1896     int grp = gicv3_irq_group(cs->gic, cs, irq);
1897 
1898     single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
1899     irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
1900     irq_is_grp0 = grp == GICV3_G0;
1901 
1902     /* Check whether we're allowed to deactivate this interrupt based
1903      * on its group and the current CPU state.
1904      * These checks are laid out to correspond to the spec's pseudocode.
1905      */
1906     route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
1907     route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
1908     /* No need to include !IsSecure in route_*_to_el2 as it's only
1909      * tested in cases where we know !IsSecure is true.
1910      */
1911     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
1912     route_fiq_to_el2 = hcr_el2 & HCR_FMO;
1913     route_irq_to_el2 = hcr_el2 & HCR_IMO;
1914 
1915     switch (arm_current_el(env)) {
1916     case 3:
1917         break;
1918     case 2:
1919         if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
1920             break;
1921         }
1922         if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
1923             break;
1924         }
1925         return;
1926     case 1:
1927         if (!arm_is_secure_below_el3(env)) {
1928             if (single_sec_state && irq_is_grp0 &&
1929                 !route_fiq_to_el3 && !route_fiq_to_el2) {
1930                 break;
1931             }
1932             if (!irq_is_secure && !irq_is_grp0 &&
1933                 !route_irq_to_el3 && !route_irq_to_el2) {
1934                 break;
1935             }
1936         } else {
1937             if (irq_is_grp0 && !route_fiq_to_el3) {
1938                 break;
1939             }
1940             if (!irq_is_grp0 &&
1941                 (!irq_is_secure || !single_sec_state) &&
1942                 !route_irq_to_el3) {
1943                 break;
1944             }
1945         }
1946         return;
1947     default:
1948         g_assert_not_reached();
1949     }
1950 
1951     icc_deactivate_irq(cs, irq);
1952 }
1953 
1954 static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1955 {
1956     GICv3CPUState *cs = icc_cs_from_env(env);
1957     uint64_t prio;
1958 
1959     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1960         return icv_rpr_read(env, ri);
1961     }
1962 
1963     prio = icc_highest_active_prio(cs);
1964 
1965     if (arm_feature(env, ARM_FEATURE_EL3) &&
1966         !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
1967         /* NS GIC access and Group 0 is inaccessible to NS */
1968         if ((prio & 0x80) == 0) {
1969             /* NS mustn't see priorities in the Secure half of the range */
1970             prio = 0;
1971         } else if (prio != 0xff) {
1972             /* Non-idle priority: show the Non-secure view of it */
1973             prio = (prio << 1) & 0xff;
1974         }
1975     }
1976 
1977     if (cs->nmi_support) {
1978         /* NMI info is reported in the high bits of RPR */
1979         if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
1980             if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
1981                 prio |= ICC_RPR_EL1_NMI;
1982             }
1983         } else {
1984             if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
1985                 prio |= ICC_RPR_EL1_NSNMI;
1986             }
1987             if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
1988                 prio |= ICC_RPR_EL1_NMI;
1989             }
1990         }
1991     }
1992 
1993     trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
1994     return prio;
1995 }
1996 
1997 static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
1998                              uint64_t value, int grp, bool ns)
1999 {
2000     GICv3State *s = cs->gic;
2001 
2002     /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
2003     uint64_t aff = extract64(value, 48, 8) << 16 |
2004         extract64(value, 32, 8) << 8 |
2005         extract64(value, 16, 8);
2006     uint32_t targetlist = extract64(value, 0, 16);
2007     uint32_t irq = extract64(value, 24, 4);
2008     bool irm = extract64(value, 40, 1);
2009     int i;
2010 
2011     if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
2012         /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
2013          * interrupts as Group 0 interrupts and must send Secure Group 0
2014          * interrupts to the target CPUs.
2015          */
2016         grp = GICV3_G0;
2017     }
2018 
2019     trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
2020                                  aff, targetlist);
2021 
2022     for (i = 0; i < s->num_cpu; i++) {
2023         GICv3CPUState *ocs = &s->cpu[i];
2024 
2025         if (irm) {
2026             /* IRM == 1 : route to all CPUs except self */
2027             if (cs == ocs) {
2028                 continue;
2029             }
2030         } else {
2031             /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
2032              * where the corresponding bit is set in targetlist
2033              */
2034             int aff0;
2035 
2036             if (ocs->gicr_typer >> 40 != aff) {
2037                 continue;
2038             }
2039             aff0 = extract64(ocs->gicr_typer, 32, 8);
2040             if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
2041                 continue;
2042             }
2043         }
2044 
2045         /* The redistributor will check against its own GICR_NSACR as needed */
2046         gicv3_redist_send_sgi(ocs, grp, irq, ns);
2047     }
2048 }
2049 
2050 static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
2051                            uint64_t value)
2052 {
2053     /* Generate Secure Group 0 SGI. */
2054     GICv3CPUState *cs = icc_cs_from_env(env);
2055     bool ns = !arm_is_secure(env);
2056 
2057     icc_generate_sgi(env, cs, value, GICV3_G0, ns);
2058 }
2059 
2060 static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
2061                            uint64_t value)
2062 {
2063     /* Generate Group 1 SGI for the current Security state */
2064     GICv3CPUState *cs = icc_cs_from_env(env);
2065     int grp;
2066     bool ns = !arm_is_secure(env);
2067 
2068     grp = ns ? GICV3_G1NS : GICV3_G1;
2069     icc_generate_sgi(env, cs, value, grp, ns);
2070 }
2071 
2072 static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
2073                              uint64_t value)
2074 {
2075     /* Generate Group 1 SGI for the Security state that is not
2076      * the current state
2077      */
2078     GICv3CPUState *cs = icc_cs_from_env(env);
2079     int grp;
2080     bool ns = !arm_is_secure(env);
2081 
2082     grp = ns ? GICV3_G1 : GICV3_G1NS;
2083     icc_generate_sgi(env, cs, value, grp, ns);
2084 }
2085 
2086 static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
2087 {
2088     GICv3CPUState *cs = icc_cs_from_env(env);
2089     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
2090     uint64_t value;
2091 
2092     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
2093         return icv_igrpen_read(env, ri);
2094     }
2095 
2096     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
2097         grp = GICV3_G1NS;
2098     }
2099 
2100     value = cs->icc_igrpen[grp];
2101     trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
2102                                 gicv3_redist_affid(cs), value);
2103     return value;
2104 }
2105 
2106 static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
2107                              uint64_t value)
2108 {
2109     GICv3CPUState *cs = icc_cs_from_env(env);
2110     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
2111 
2112     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
2113         icv_igrpen_write(env, ri, value);
2114         return;
2115     }
2116 
2117     trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
2118                                  gicv3_redist_affid(cs), value);
2119 
2120     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
2121         grp = GICV3_G1NS;
2122     }
2123 
2124     cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
2125     gicv3_cpuif_update(cs);
2126 }
2127 
2128 static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
2129 {
2130     GICv3CPUState *cs = icc_cs_from_env(env);
2131     uint64_t value;
2132 
2133     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
2134     value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
2135     trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
2136     return value;
2137 }
2138 
2139 static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2140                                   uint64_t value)
2141 {
2142     GICv3CPUState *cs = icc_cs_from_env(env);
2143 
2144     trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
2145 
2146     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
2147     cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
2148     cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
2149     gicv3_cpuif_update(cs);
2150 }
2151 
2152 static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
2153 {
2154     GICv3CPUState *cs = icc_cs_from_env(env);
2155     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
2156     uint64_t value;
2157 
2158     if (icv_access(env, HCR_FMO | HCR_IMO)) {
2159         return icv_ctlr_read(env, ri);
2160     }
2161 
2162     value = cs->icc_ctlr_el1[bank];
2163     trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
2164     return value;
2165 }
2166 
2167 static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2168                                uint64_t value)
2169 {
2170     GICv3CPUState *cs = icc_cs_from_env(env);
2171     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
2172     uint64_t mask;
2173 
2174     if (icv_access(env, HCR_FMO | HCR_IMO)) {
2175         icv_ctlr_write(env, ri, value);
2176         return;
2177     }
2178 
2179     trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
2180 
2181     /* Only CBPR and EOIMODE can be RW;
2182      * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
2183      * the asseciated priority-based routing of them);
2184      * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
2185      */
2186     if (arm_feature(env, ARM_FEATURE_EL3) &&
2187         ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
2188         mask = ICC_CTLR_EL1_EOIMODE;
2189     } else {
2190         mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
2191     }
2192 
2193     cs->icc_ctlr_el1[bank] &= ~mask;
2194     cs->icc_ctlr_el1[bank] |= (value & mask);
2195     gicv3_cpuif_update(cs);
2196 }
2197 
2198 
2199 static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
2200 {
2201     GICv3CPUState *cs = icc_cs_from_env(env);
2202     uint64_t value;
2203 
2204     value = cs->icc_ctlr_el3;
2205     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
2206         value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
2207     }
2208     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
2209         value |= ICC_CTLR_EL3_CBPR_EL1NS;
2210     }
2211     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
2212         value |= ICC_CTLR_EL3_EOIMODE_EL1S;
2213     }
2214     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
2215         value |= ICC_CTLR_EL3_CBPR_EL1S;
2216     }
2217 
2218     trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
2219     return value;
2220 }
2221 
2222 static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2223                                uint64_t value)
2224 {
2225     GICv3CPUState *cs = icc_cs_from_env(env);
2226     uint64_t mask;
2227 
2228     trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
2229 
2230     /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
2231     cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
2232     if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
2233         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
2234     }
2235     if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
2236         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
2237     }
2238 
2239     cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
2240     if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
2241         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
2242     }
2243     if (value & ICC_CTLR_EL3_CBPR_EL1S) {
2244         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
2245     }
2246 
2247     /* The only bit stored in icc_ctlr_el3 which is writable is EOIMODE_EL3: */
2248     mask = ICC_CTLR_EL3_EOIMODE_EL3;
2249 
2250     cs->icc_ctlr_el3 &= ~mask;
2251     cs->icc_ctlr_el3 |= (value & mask);
2252     gicv3_cpuif_update(cs);
2253 }
2254 
2255 static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
2256                                           const ARMCPRegInfo *ri, bool isread)
2257 {
2258     CPAccessResult r = CP_ACCESS_OK;
2259     GICv3CPUState *cs = icc_cs_from_env(env);
2260     int el = arm_current_el(env);
2261 
2262     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
2263         el == 1 && !arm_is_secure_below_el3(env)) {
2264         /* Takes priority over a possible EL3 trap */
2265         return CP_ACCESS_TRAP_EL2;
2266     }
2267 
2268     if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
2269         switch (el) {
2270         case 1:
2271             /* Note that arm_hcr_el2_eff takes secure state into account.  */
2272             if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) {
2273                 r = CP_ACCESS_TRAP_EL3;
2274             }
2275             break;
2276         case 2:
2277             r = CP_ACCESS_TRAP_EL3;
2278             break;
2279         case 3:
2280             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
2281                 r = CP_ACCESS_TRAP_EL3;
2282             }
2283             break;
2284         default:
2285             g_assert_not_reached();
2286         }
2287     }
2288 
2289     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
2290         r = CP_ACCESS_TRAP;
2291     }
2292     return r;
2293 }
2294 
2295 static CPAccessResult gicv3_dir_access(CPUARMState *env,
2296                                        const ARMCPRegInfo *ri, bool isread)
2297 {
2298     GICv3CPUState *cs = icc_cs_from_env(env);
2299 
2300     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
2301         arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
2302         /* Takes priority over a possible EL3 trap */
2303         return CP_ACCESS_TRAP_EL2;
2304     }
2305 
2306     return gicv3_irqfiq_access(env, ri, isread);
2307 }
2308 
2309 static CPAccessResult gicv3_sgi_access(CPUARMState *env,
2310                                        const ARMCPRegInfo *ri, bool isread)
2311 {
2312     if (arm_current_el(env) == 1 &&
2313         (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) {
2314         /* Takes priority over a possible EL3 trap */
2315         return CP_ACCESS_TRAP_EL2;
2316     }
2317 
2318     return gicv3_irqfiq_access(env, ri, isread);
2319 }
2320 
2321 static CPAccessResult gicv3_fiq_access(CPUARMState *env,
2322                                        const ARMCPRegInfo *ri, bool isread)
2323 {
2324     CPAccessResult r = CP_ACCESS_OK;
2325     GICv3CPUState *cs = icc_cs_from_env(env);
2326     int el = arm_current_el(env);
2327 
2328     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
2329         el == 1 && !arm_is_secure_below_el3(env)) {
2330         /* Takes priority over a possible EL3 trap */
2331         return CP_ACCESS_TRAP_EL2;
2332     }
2333 
2334     if (env->cp15.scr_el3 & SCR_FIQ) {
2335         switch (el) {
2336         case 1:
2337             if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) {
2338                 r = CP_ACCESS_TRAP_EL3;
2339             }
2340             break;
2341         case 2:
2342             r = CP_ACCESS_TRAP_EL3;
2343             break;
2344         case 3:
2345             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
2346                 r = CP_ACCESS_TRAP_EL3;
2347             }
2348             break;
2349         default:
2350             g_assert_not_reached();
2351         }
2352     }
2353 
2354     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
2355         r = CP_ACCESS_TRAP;
2356     }
2357     return r;
2358 }
2359 
2360 static CPAccessResult gicv3_irq_access(CPUARMState *env,
2361                                        const ARMCPRegInfo *ri, bool isread)
2362 {
2363     CPAccessResult r = CP_ACCESS_OK;
2364     GICv3CPUState *cs = icc_cs_from_env(env);
2365     int el = arm_current_el(env);
2366 
2367     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
2368         el == 1 && !arm_is_secure_below_el3(env)) {
2369         /* Takes priority over a possible EL3 trap */
2370         return CP_ACCESS_TRAP_EL2;
2371     }
2372 
2373     if (env->cp15.scr_el3 & SCR_IRQ) {
2374         switch (el) {
2375         case 1:
2376             if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) {
2377                 r = CP_ACCESS_TRAP_EL3;
2378             }
2379             break;
2380         case 2:
2381             r = CP_ACCESS_TRAP_EL3;
2382             break;
2383         case 3:
2384             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
2385                 r = CP_ACCESS_TRAP_EL3;
2386             }
2387             break;
2388         default:
2389             g_assert_not_reached();
2390         }
2391     }
2392 
2393     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
2394         r = CP_ACCESS_TRAP;
2395     }
2396     return r;
2397 }
2398 
2399 static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2400 {
2401     GICv3CPUState *cs = icc_cs_from_env(env);
2402 
2403     cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
2404         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
2405         ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
2406     cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
2407         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
2408         ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
2409     cs->icc_pmr_el1 = 0;
2410     cs->icc_bpr[GICV3_G0] = icc_min_bpr(cs);
2411     cs->icc_bpr[GICV3_G1] = icc_min_bpr(cs);
2412     cs->icc_bpr[GICV3_G1NS] = icc_min_bpr_ns(cs);
2413     memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
2414     memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
2415     cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
2416         (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
2417         ((cs->pribits - 1) << ICC_CTLR_EL3_PRIBITS_SHIFT);
2418 
2419     memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
2420     cs->ich_hcr_el2 = 0;
2421     memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
2422     cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
2423         ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) |
2424         (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
2425 }
2426 
2427 static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
2428     { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
2429       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
2430       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2431       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
2432       .readfn = icc_pmr_read,
2433       .writefn = icc_pmr_write,
2434       /* We hang the whole cpu interface reset routine off here
2435        * rather than parcelling it out into one little function
2436        * per register
2437        */
2438       .resetfn = icc_reset,
2439     },
2440     { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
2441       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
2442       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2443       .access = PL1_R, .accessfn = gicv3_fiq_access,
2444       .readfn = icc_iar0_read,
2445     },
2446     { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
2447       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
2448       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2449       .access = PL1_W, .accessfn = gicv3_fiq_access,
2450       .writefn = icc_eoir_write,
2451     },
2452     { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
2453       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
2454       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2455       .access = PL1_R, .accessfn = gicv3_fiq_access,
2456       .readfn = icc_hppir0_read,
2457     },
2458     { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
2459       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
2460       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2461       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2462       .readfn = icc_bpr_read,
2463       .writefn = icc_bpr_write,
2464     },
2465     { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
2466       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
2467       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2468       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2469       .readfn = icc_ap_read,
2470       .writefn = icc_ap_write,
2471     },
2472     /* All the ICC_AP1R*_EL1 registers are banked */
2473     { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
2474       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
2475       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2476       .access = PL1_RW, .accessfn = gicv3_irq_access,
2477       .readfn = icc_ap_read,
2478       .writefn = icc_ap_write,
2479     },
2480     { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
2481       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
2482       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2483       .access = PL1_W, .accessfn = gicv3_dir_access,
2484       .writefn = icc_dir_write,
2485     },
2486     { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
2487       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
2488       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2489       .access = PL1_R, .accessfn = gicv3_irqfiq_access,
2490       .readfn = icc_rpr_read,
2491     },
2492     { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
2493       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
2494       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2495       .access = PL1_W, .accessfn = gicv3_sgi_access,
2496       .writefn = icc_sgi1r_write,
2497     },
2498     { .name = "ICC_SGI1R",
2499       .cp = 15, .opc1 = 0, .crm = 12,
2500       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2501       .access = PL1_W, .accessfn = gicv3_sgi_access,
2502       .writefn = icc_sgi1r_write,
2503     },
2504     { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
2505       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
2506       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2507       .access = PL1_W, .accessfn = gicv3_sgi_access,
2508       .writefn = icc_asgi1r_write,
2509     },
2510     { .name = "ICC_ASGI1R",
2511       .cp = 15, .opc1 = 1, .crm = 12,
2512       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2513       .access = PL1_W, .accessfn = gicv3_sgi_access,
2514       .writefn = icc_asgi1r_write,
2515     },
2516     { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
2517       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
2518       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2519       .access = PL1_W, .accessfn = gicv3_sgi_access,
2520       .writefn = icc_sgi0r_write,
2521     },
2522     { .name = "ICC_SGI0R",
2523       .cp = 15, .opc1 = 2, .crm = 12,
2524       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2525       .access = PL1_W, .accessfn = gicv3_sgi_access,
2526       .writefn = icc_sgi0r_write,
2527     },
2528     { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
2529       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
2530       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2531       .access = PL1_R, .accessfn = gicv3_irq_access,
2532       .readfn = icc_iar1_read,
2533     },
2534     { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
2535       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
2536       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2537       .access = PL1_W, .accessfn = gicv3_irq_access,
2538       .writefn = icc_eoir_write,
2539     },
2540     { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
2541       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
2542       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2543       .access = PL1_R, .accessfn = gicv3_irq_access,
2544       .readfn = icc_hppir1_read,
2545     },
2546     /* This register is banked */
2547     { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
2548       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
2549       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2550       .access = PL1_RW, .accessfn = gicv3_irq_access,
2551       .readfn = icc_bpr_read,
2552       .writefn = icc_bpr_write,
2553     },
2554     /* This register is banked */
2555     { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
2556       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
2557       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2558       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
2559       .readfn = icc_ctlr_el1_read,
2560       .writefn = icc_ctlr_el1_write,
2561     },
2562     { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
2563       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
2564       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2565       .access = PL1_RW,
2566       /* We don't support IRQ/FIQ bypass and system registers are
2567        * always enabled, so all our bits are RAZ/WI or RAO/WI.
2568        * This register is banked but since it's constant we don't
2569        * need to do anything special.
2570        */
2571       .resetvalue = 0x7,
2572     },
2573     { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
2574       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
2575       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2576       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2577       .fgt = FGT_ICC_IGRPENN_EL1,
2578       .readfn = icc_igrpen_read,
2579       .writefn = icc_igrpen_write,
2580     },
2581     /* This register is banked */
2582     { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
2583       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
2584       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2585       .access = PL1_RW, .accessfn = gicv3_irq_access,
2586       .fgt = FGT_ICC_IGRPENN_EL1,
2587       .readfn = icc_igrpen_read,
2588       .writefn = icc_igrpen_write,
2589     },
2590     { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
2591       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
2592       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2593       .access = PL2_RW,
2594       /* We don't support IRQ/FIQ bypass and system registers are
2595        * always enabled, so all our bits are RAZ/WI or RAO/WI.
2596        */
2597       .resetvalue = 0xf,
2598     },
2599     { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
2600       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
2601       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2602       .access = PL3_RW,
2603       .readfn = icc_ctlr_el3_read,
2604       .writefn = icc_ctlr_el3_write,
2605     },
2606     { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
2607       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
2608       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2609       .access = PL3_RW,
2610       /* We don't support IRQ/FIQ bypass and system registers are
2611        * always enabled, so all our bits are RAZ/WI or RAO/WI.
2612        */
2613       .resetvalue = 0xf,
2614     },
2615     { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
2616       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
2617       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2618       .access = PL3_RW,
2619       .readfn = icc_igrpen1_el3_read,
2620       .writefn = icc_igrpen1_el3_write,
2621     },
2622 };
2623 
2624 static const ARMCPRegInfo gicv3_cpuif_icc_apxr1_reginfo[] = {
2625     { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
2626       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
2627       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2628       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2629       .readfn = icc_ap_read,
2630       .writefn = icc_ap_write,
2631     },
2632     { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
2633       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
2634       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2635       .access = PL1_RW, .accessfn = gicv3_irq_access,
2636       .readfn = icc_ap_read,
2637       .writefn = icc_ap_write,
2638     },
2639 };
2640 
2641 static const ARMCPRegInfo gicv3_cpuif_icc_apxr23_reginfo[] = {
2642     { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
2643       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
2644       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2645       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2646       .readfn = icc_ap_read,
2647       .writefn = icc_ap_write,
2648     },
2649     { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
2650       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
2651       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2652       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2653       .readfn = icc_ap_read,
2654       .writefn = icc_ap_write,
2655     },
2656     { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
2657       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
2658       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2659       .access = PL1_RW, .accessfn = gicv3_irq_access,
2660       .readfn = icc_ap_read,
2661       .writefn = icc_ap_write,
2662     },
2663     { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
2664       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
2665       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2666       .access = PL1_RW, .accessfn = gicv3_irq_access,
2667       .readfn = icc_ap_read,
2668       .writefn = icc_ap_write,
2669     },
2670 };
2671 
2672 static const ARMCPRegInfo gicv3_cpuif_gicv3_nmi_reginfo[] = {
2673     { .name = "ICC_NMIAR1_EL1", .state = ARM_CP_STATE_BOTH,
2674       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 5,
2675       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2676       .access = PL1_R, .accessfn = gicv3_irq_access,
2677       .readfn = icc_nmiar1_read,
2678     },
2679 };
2680 
2681 static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2682 {
2683     GICv3CPUState *cs = icc_cs_from_env(env);
2684     int regno = ri->opc2 & 3;
2685     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
2686     uint64_t value;
2687 
2688     value = cs->ich_apr[grp][regno];
2689     trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2690     return value;
2691 }
2692 
2693 static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2694                          uint64_t value)
2695 {
2696     GICv3CPUState *cs = icc_cs_from_env(env);
2697     int regno = ri->opc2 & 3;
2698     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
2699 
2700     trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2701 
2702     if (cs->nmi_support) {
2703         cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI);
2704     } else {
2705         cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
2706     }
2707     gicv3_cpuif_virt_irq_fiq_update(cs);
2708 }
2709 
2710 static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2711 {
2712     GICv3CPUState *cs = icc_cs_from_env(env);
2713     uint64_t value = cs->ich_hcr_el2;
2714 
2715     trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
2716     return value;
2717 }
2718 
2719 static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2720                           uint64_t value)
2721 {
2722     GICv3CPUState *cs = icc_cs_from_env(env);
2723 
2724     trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
2725 
2726     value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
2727         ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
2728         ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
2729         ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
2730         ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
2731 
2732     cs->ich_hcr_el2 = value;
2733     gicv3_cpuif_virt_update(cs);
2734 }
2735 
2736 static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2737 {
2738     GICv3CPUState *cs = icc_cs_from_env(env);
2739     uint64_t value = cs->ich_vmcr_el2;
2740 
2741     trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
2742     return value;
2743 }
2744 
2745 static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2746                          uint64_t value)
2747 {
2748     GICv3CPUState *cs = icc_cs_from_env(env);
2749 
2750     trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
2751 
2752     value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
2753         ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
2754         ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
2755     value |= ICH_VMCR_EL2_VFIQEN;
2756 
2757     cs->ich_vmcr_el2 = value;
2758     /* Enforce "writing BPRs to less than minimum sets them to the minimum"
2759      * by reading and writing back the fields.
2760      */
2761     write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0));
2762     write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
2763 
2764     gicv3_cpuif_virt_update(cs);
2765 }
2766 
2767 static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2768 {
2769     GICv3CPUState *cs = icc_cs_from_env(env);
2770     int regno = ri->opc2 | ((ri->crm & 1) << 3);
2771     uint64_t value;
2772 
2773     /* This read function handles all of:
2774      * 64-bit reads of the whole LR
2775      * 32-bit reads of the low half of the LR
2776      * 32-bit reads of the high half of the LR
2777      */
2778     if (ri->state == ARM_CP_STATE_AA32) {
2779         if (ri->crm >= 14) {
2780             value = extract64(cs->ich_lr_el2[regno], 32, 32);
2781             trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
2782         } else {
2783             value = extract64(cs->ich_lr_el2[regno], 0, 32);
2784             trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
2785         }
2786     } else {
2787         value = cs->ich_lr_el2[regno];
2788         trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
2789     }
2790 
2791     return value;
2792 }
2793 
2794 static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2795                          uint64_t value)
2796 {
2797     GICv3CPUState *cs = icc_cs_from_env(env);
2798     int regno = ri->opc2 | ((ri->crm & 1) << 3);
2799 
2800     /* This write function handles all of:
2801      * 64-bit writes to the whole LR
2802      * 32-bit writes to the low half of the LR
2803      * 32-bit writes to the high half of the LR
2804      */
2805     if (ri->state == ARM_CP_STATE_AA32) {
2806         if (ri->crm >= 14) {
2807             trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
2808             value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
2809         } else {
2810             trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
2811             value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
2812         }
2813     } else {
2814         trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
2815     }
2816 
2817     /* Enforce RES0 bits in priority field */
2818     if (cs->vpribits < 8) {
2819         value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
2820                           8 - cs->vpribits, 0);
2821     }
2822 
2823     /* Enforce RES0 bit in NMI field when FEAT_GICv3_NMI is not implemented */
2824     if (!cs->nmi_support) {
2825         value &= ~ICH_LR_EL2_NMI;
2826     }
2827 
2828     cs->ich_lr_el2[regno] = value;
2829     gicv3_cpuif_virt_update(cs);
2830 }
2831 
2832 static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2833 {
2834     GICv3CPUState *cs = icc_cs_from_env(env);
2835     uint64_t value;
2836 
2837     value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
2838         | ICH_VTR_EL2_TDS | ICH_VTR_EL2_A3V
2839         | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
2840         | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
2841         | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
2842 
2843     if (cs->gic->revision < 4) {
2844         value |= ICH_VTR_EL2_NV4;
2845     }
2846 
2847     trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
2848     return value;
2849 }
2850 
2851 static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2852 {
2853     GICv3CPUState *cs = icc_cs_from_env(env);
2854     uint64_t value = maintenance_interrupt_state(cs);
2855 
2856     trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
2857     return value;
2858 }
2859 
2860 static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2861 {
2862     GICv3CPUState *cs = icc_cs_from_env(env);
2863     uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
2864 
2865     trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
2866     return value;
2867 }
2868 
2869 static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2870 {
2871     GICv3CPUState *cs = icc_cs_from_env(env);
2872     uint64_t value = 0;
2873     int i;
2874 
2875     for (i = 0; i < cs->num_list_regs; i++) {
2876         uint64_t lr = cs->ich_lr_el2[i];
2877 
2878         if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
2879             ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) {
2880             value |= (1 << i);
2881         }
2882     }
2883 
2884     trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
2885     return value;
2886 }
2887 
2888 static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
2889     { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
2890       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
2891       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2892       .nv2_redirect_offset = 0x480,
2893       .access = PL2_RW,
2894       .readfn = ich_ap_read,
2895       .writefn = ich_ap_write,
2896     },
2897     { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
2898       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
2899       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2900       .nv2_redirect_offset = 0x4a0,
2901       .access = PL2_RW,
2902       .readfn = ich_ap_read,
2903       .writefn = ich_ap_write,
2904     },
2905     { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
2906       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
2907       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2908       .nv2_redirect_offset = 0x4c0,
2909       .access = PL2_RW,
2910       .readfn = ich_hcr_read,
2911       .writefn = ich_hcr_write,
2912     },
2913     { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
2914       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
2915       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2916       .access = PL2_R,
2917       .readfn = ich_vtr_read,
2918     },
2919     { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
2920       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
2921       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2922       .access = PL2_R,
2923       .readfn = ich_misr_read,
2924     },
2925     { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
2926       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
2927       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2928       .access = PL2_R,
2929       .readfn = ich_eisr_read,
2930     },
2931     { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
2932       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
2933       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2934       .access = PL2_R,
2935       .readfn = ich_elrsr_read,
2936     },
2937     { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
2938       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
2939       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2940       .nv2_redirect_offset = 0x4c8,
2941       .access = PL2_RW,
2942       .readfn = ich_vmcr_read,
2943       .writefn = ich_vmcr_write,
2944     },
2945 };
2946 
2947 static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
2948     { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
2949       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
2950       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2951       .nv2_redirect_offset = 0x488,
2952       .access = PL2_RW,
2953       .readfn = ich_ap_read,
2954       .writefn = ich_ap_write,
2955     },
2956     { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
2957       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
2958       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2959       .nv2_redirect_offset = 0x4a8,
2960       .access = PL2_RW,
2961       .readfn = ich_ap_read,
2962       .writefn = ich_ap_write,
2963     },
2964 };
2965 
2966 static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
2967     { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
2968       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
2969       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2970       .nv2_redirect_offset = 0x490,
2971       .access = PL2_RW,
2972       .readfn = ich_ap_read,
2973       .writefn = ich_ap_write,
2974     },
2975     { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
2976       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
2977       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2978       .nv2_redirect_offset = 0x498,
2979       .access = PL2_RW,
2980       .readfn = ich_ap_read,
2981       .writefn = ich_ap_write,
2982     },
2983     { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
2984       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
2985       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2986       .nv2_redirect_offset = 0x4b0,
2987       .access = PL2_RW,
2988       .readfn = ich_ap_read,
2989       .writefn = ich_ap_write,
2990     },
2991     { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
2992       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
2993       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2994       .nv2_redirect_offset = 0x4b8,
2995       .access = PL2_RW,
2996       .readfn = ich_ap_read,
2997       .writefn = ich_ap_write,
2998     },
2999 };
3000 
3001 static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
3002 {
3003     GICv3CPUState *cs = opaque;
3004 
3005     gicv3_cpuif_update(cs);
3006     /*
3007      * Because vLPIs are only pending in NonSecure state,
3008      * an EL change can change the VIRQ/VFIQ status (but
3009      * cannot affect the maintenance interrupt state)
3010      */
3011     gicv3_cpuif_virt_irq_fiq_update(cs);
3012 }
3013 
3014 void gicv3_init_cpuif(GICv3State *s)
3015 {
3016     /* Called from the GICv3 realize function; register our system
3017      * registers with the CPU
3018      */
3019     int i;
3020 
3021     for (i = 0; i < s->num_cpu; i++) {
3022         ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
3023         GICv3CPUState *cs = &s->cpu[i];
3024 
3025         /*
3026          * If the CPU doesn't define a GICv3 configuration, probably because
3027          * in real hardware it doesn't have one, then we use default values
3028          * matching the one used by most Arm CPUs. This applies to:
3029          *  cpu->gic_num_lrs
3030          *  cpu->gic_vpribits
3031          *  cpu->gic_vprebits
3032          *  cpu->gic_pribits
3033          */
3034 
3035         /* Note that we can't just use the GICv3CPUState as an opaque pointer
3036          * in define_arm_cp_regs_with_opaque(), because when we're called back
3037          * it might be with code translated by CPU 0 but run by CPU 1, in
3038          * which case we'd get the wrong value.
3039          * So instead we define the regs with no ri->opaque info, and
3040          * get back to the GICv3CPUState from the CPUARMState.
3041          *
3042          * These CP regs callbacks can be called from either TCG or HVF code.
3043          */
3044         define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
3045 
3046         /*
3047          * If the CPU implements FEAT_NMI and FEAT_GICv3 it must also
3048          * implement FEAT_GICv3_NMI, which is the CPU interface part
3049          * of NMI support. This is distinct from whether the GIC proper
3050          * (redistributors and distributor) have NMI support. In QEMU
3051          * that is a property of the GIC device in s->nmi_support;
3052          * cs->nmi_support indicates the CPU interface's support.
3053          */
3054         if (cpu_isar_feature(aa64_nmi, cpu)) {
3055             cs->nmi_support = true;
3056             define_arm_cp_regs(cpu, gicv3_cpuif_gicv3_nmi_reginfo);
3057         }
3058 
3059         /*
3060          * The CPU implementation specifies the number of supported
3061          * bits of physical priority. For backwards compatibility
3062          * of migration, we have a compat property that forces use
3063          * of 8 priority bits regardless of what the CPU really has.
3064          */
3065         if (s->force_8bit_prio) {
3066             cs->pribits = 8;
3067         } else {
3068             cs->pribits = cpu->gic_pribits ?: 5;
3069         }
3070 
3071         /*
3072          * The GICv3 has separate ID register fields for virtual priority
3073          * and preemption bit values, but only a single ID register field
3074          * for the physical priority bits. The preemption bit count is
3075          * always the same as the priority bit count, except that 8 bits
3076          * of priority means 7 preemption bits. We precalculate the
3077          * preemption bits because it simplifies the code and makes the
3078          * parallels between the virtual and physical bits of the GIC
3079          * a bit clearer.
3080          */
3081         cs->prebits = cs->pribits;
3082         if (cs->prebits == 8) {
3083             cs->prebits--;
3084         }
3085         /*
3086          * Check that CPU code defining pribits didn't violate
3087          * architectural constraints our implementation relies on.
3088          */
3089         g_assert(cs->pribits >= 4 && cs->pribits <= 8);
3090 
3091         /*
3092          * gicv3_cpuif_reginfo[] defines ICC_AP*R0_EL1; add definitions
3093          * for ICC_AP*R{1,2,3}_EL1 if the prebits value requires them.
3094          */
3095         if (cs->prebits >= 6) {
3096             define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr1_reginfo);
3097         }
3098         if (cs->prebits == 7) {
3099             define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr23_reginfo);
3100         }
3101 
3102         if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
3103             int j;
3104 
3105             cs->num_list_regs = cpu->gic_num_lrs ?: 4;
3106             cs->vpribits = cpu->gic_vpribits ?: 5;
3107             cs->vprebits = cpu->gic_vprebits ?: 5;
3108 
3109             /* Check against architectural constraints: getting these
3110              * wrong would be a bug in the CPU code defining these,
3111              * and the implementation relies on them holding.
3112              */
3113             g_assert(cs->vprebits <= cs->vpribits);
3114             g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
3115             g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
3116 
3117             define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
3118 
3119             for (j = 0; j < cs->num_list_regs; j++) {
3120                 /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
3121                  * are split into two cp15 regs, LR (the low part, with the
3122                  * same encoding as the AArch64 LR) and LRC (the high part).
3123                  */
3124                 ARMCPRegInfo lr_regset[] = {
3125                     { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
3126                       .opc0 = 3, .opc1 = 4, .crn = 12,
3127                       .crm = 12 + (j >> 3), .opc2 = j & 7,
3128                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
3129                       .nv2_redirect_offset = 0x400 + 8 * j,
3130                       .access = PL2_RW,
3131                       .readfn = ich_lr_read,
3132                       .writefn = ich_lr_write,
3133                     },
3134                     { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
3135                       .cp = 15, .opc1 = 4, .crn = 12,
3136                       .crm = 14 + (j >> 3), .opc2 = j & 7,
3137                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
3138                       .access = PL2_RW,
3139                       .readfn = ich_lr_read,
3140                       .writefn = ich_lr_write,
3141                     },
3142                 };
3143                 define_arm_cp_regs(cpu, lr_regset);
3144             }
3145             if (cs->vprebits >= 6) {
3146                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
3147             }
3148             if (cs->vprebits == 7) {
3149                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
3150             }
3151         }
3152         if (tcg_enabled() || qtest_enabled()) {
3153             /*
3154              * We can only trap EL changes with TCG. However the GIC interrupt
3155              * state only changes on EL changes involving EL2 or EL3, so for
3156              * the non-TCG case this is OK, as EL2 and EL3 can't exist.
3157              */
3158             arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
3159         } else {
3160             assert(!arm_feature(&cpu->env, ARM_FEATURE_EL2));
3161             assert(!arm_feature(&cpu->env, ARM_FEATURE_EL3));
3162         }
3163     }
3164 }
3165