xref: /openbmc/qemu/hw/intc/arm_gicv3_cpuif.c (revision 7a9fa398560eee55ccb47bade3b83f367d9a44b2)
1  /*
2   * ARM Generic Interrupt Controller v3 (emulation)
3   *
4   * Copyright (c) 2016 Linaro Limited
5   * Written by Peter Maydell
6   *
7   * This code is licensed under the GPL, version 2 or (at your option)
8   * any later version.
9   */
10  
11  /* This file contains the code for the system register interface
12   * portions of the GICv3.
13   */
14  
15  #include "qemu/osdep.h"
16  #include "qemu/bitops.h"
17  #include "qemu/log.h"
18  #include "qemu/main-loop.h"
19  #include "trace.h"
20  #include "gicv3_internal.h"
21  #include "hw/irq.h"
22  #include "cpu.h"
23  #include "target/arm/cpregs.h"
24  #include "target/arm/cpu-features.h"
25  #include "sysemu/tcg.h"
26  #include "sysemu/qtest.h"
27  
28  /*
29   * Special case return value from hppvi_index(); must be larger than
30   * the architecturally maximum possible list register index (which is 15)
31   */
32  #define HPPVI_INDEX_VLPI 16
33  
icc_cs_from_env(CPUARMState * env)34  static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
35  {
36      return env->gicv3state;
37  }
38  
gicv3_use_ns_bank(CPUARMState * env)39  static bool gicv3_use_ns_bank(CPUARMState *env)
40  {
41      /* Return true if we should use the NonSecure bank for a banked GIC
42       * CPU interface register. Note that this differs from the
43       * access_secure_reg() function because GICv3 banked registers are
44       * banked even for AArch64, unlike the other CPU system registers.
45       */
46      return !arm_is_secure_below_el3(env);
47  }
48  
49  /* The minimum BPR for the virtual interface is a configurable property */
icv_min_vbpr(GICv3CPUState * cs)50  static inline int icv_min_vbpr(GICv3CPUState *cs)
51  {
52      return 7 - cs->vprebits;
53  }
54  
ich_num_aprs(GICv3CPUState * cs)55  static inline int ich_num_aprs(GICv3CPUState *cs)
56  {
57      /* Return the number of virtual APR registers (1, 2, or 4) */
58      int aprmax = 1 << (cs->vprebits - 5);
59      assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
60      return aprmax;
61  }
62  
63  /* Simple accessor functions for LR fields */
ich_lr_vintid(uint64_t lr)64  static uint32_t ich_lr_vintid(uint64_t lr)
65  {
66      return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
67  }
68  
ich_lr_pintid(uint64_t lr)69  static uint32_t ich_lr_pintid(uint64_t lr)
70  {
71      return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
72  }
73  
ich_lr_prio(uint64_t lr)74  static uint32_t ich_lr_prio(uint64_t lr)
75  {
76      return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
77  }
78  
ich_lr_state(uint64_t lr)79  static int ich_lr_state(uint64_t lr)
80  {
81      return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
82  }
83  
icv_access(CPUARMState * env,int hcr_flags)84  static bool icv_access(CPUARMState *env, int hcr_flags)
85  {
86      /* Return true if this ICC_ register access should really be
87       * directed to an ICV_ access. hcr_flags is a mask of
88       * HCR_EL2 bits to check: we treat this as an ICV_ access
89       * if we are in NS EL1 and at least one of the specified
90       * HCR_EL2 bits is set.
91       *
92       * ICV registers fall into four categories:
93       *  * access if NS EL1 and HCR_EL2.FMO == 1:
94       *    all ICV regs with '0' in their name
95       *  * access if NS EL1 and HCR_EL2.IMO == 1:
96       *    all ICV regs with '1' in their name
97       *  * access if NS EL1 and either IMO or FMO == 1:
98       *    CTLR, DIR, PMR, RPR
99       */
100      uint64_t hcr_el2 = arm_hcr_el2_eff(env);
101      bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO);
102  
103      return flagmatch && arm_current_el(env) == 1
104          && !arm_is_secure_below_el3(env);
105  }
106  
read_vbpr(GICv3CPUState * cs,int grp)107  static int read_vbpr(GICv3CPUState *cs, int grp)
108  {
109      /* Read VBPR value out of the VMCR field (caller must handle
110       * VCBPR effects if required)
111       */
112      if (grp == GICV3_G0) {
113          return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
114                       ICH_VMCR_EL2_VBPR0_LENGTH);
115      } else {
116          return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
117                           ICH_VMCR_EL2_VBPR1_LENGTH);
118      }
119  }
120  
write_vbpr(GICv3CPUState * cs,int grp,int value)121  static void write_vbpr(GICv3CPUState *cs, int grp, int value)
122  {
123      /* Write new VBPR1 value, handling the "writing a value less than
124       * the minimum sets it to the minimum" semantics.
125       */
126      int min = icv_min_vbpr(cs);
127  
128      if (grp != GICV3_G0) {
129          min++;
130      }
131  
132      value = MAX(value, min);
133  
134      if (grp == GICV3_G0) {
135          cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
136                                       ICH_VMCR_EL2_VBPR0_LENGTH, value);
137      } else {
138          cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
139                                       ICH_VMCR_EL2_VBPR1_LENGTH, value);
140      }
141  }
142  
icv_fullprio_mask(GICv3CPUState * cs)143  static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
144  {
145      /* Return a mask word which clears the unimplemented priority bits
146       * from a priority value for a virtual interrupt. (Not to be confused
147       * with the group priority, whose mask depends on the value of VBPR
148       * for the interrupt group.)
149       */
150      return (~0U << (8 - cs->vpribits)) & 0xff;
151  }
152  
ich_highest_active_virt_prio(GICv3CPUState * cs)153  static int ich_highest_active_virt_prio(GICv3CPUState *cs)
154  {
155      /* Calculate the current running priority based on the set bits
156       * in the ICH Active Priority Registers.
157       */
158      int i;
159      int aprmax = ich_num_aprs(cs);
160  
161      if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) {
162          return 0x0;
163      }
164  
165      for (i = 0; i < aprmax; i++) {
166          uint32_t apr = cs->ich_apr[GICV3_G0][i] |
167              cs->ich_apr[GICV3_G1NS][i];
168  
169          if (!apr) {
170              continue;
171          }
172          return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
173      }
174      /* No current active interrupts: return idle priority */
175      return 0xff;
176  }
177  
hppvi_index(GICv3CPUState * cs)178  static int hppvi_index(GICv3CPUState *cs)
179  {
180      /*
181       * Return the list register index of the highest priority pending
182       * virtual interrupt, as per the HighestPriorityVirtualInterrupt
183       * pseudocode. If no pending virtual interrupts, return -1.
184       * If the highest priority pending virtual interrupt is a vLPI,
185       * return HPPVI_INDEX_VLPI.
186       * (The pseudocode handles checking whether the vLPI is higher
187       * priority than the highest priority list register at every
188       * callsite of HighestPriorityVirtualInterrupt; we check it here.)
189       */
190      ARMCPU *cpu = ARM_CPU(cs->cpu);
191      CPUARMState *env = &cpu->env;
192      int idx = -1;
193      int i;
194      /* Note that a list register entry with a priority of 0xff will
195       * never be reported by this function; this is the architecturally
196       * correct behaviour.
197       */
198      int prio = 0xff;
199      bool nmi = false;
200  
201      if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
202          /* Both groups disabled, definitely nothing to do */
203          return idx;
204      }
205  
206      for (i = 0; i < cs->num_list_regs; i++) {
207          uint64_t lr = cs->ich_lr_el2[i];
208          bool thisnmi;
209          int thisprio;
210  
211          if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
212              /* Not Pending */
213              continue;
214          }
215  
216          /* Ignore interrupts if relevant group enable not set */
217          if (lr & ICH_LR_EL2_GROUP) {
218              if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
219                  continue;
220              }
221          } else {
222              if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
223                  continue;
224              }
225          }
226  
227          thisnmi = lr & ICH_LR_EL2_NMI;
228          thisprio = ich_lr_prio(lr);
229  
230          if ((thisprio < prio) || ((thisprio == prio) && (thisnmi & (!nmi)))) {
231              prio = thisprio;
232              nmi = thisnmi;
233              idx = i;
234          }
235      }
236  
237      /*
238       * "no pending vLPI" is indicated with prio = 0xff, which always
239       * fails the priority check here. vLPIs are only considered
240       * when we are in Non-Secure state.
241       */
242      if (cs->hppvlpi.prio < prio && !arm_is_secure(env)) {
243          if (cs->hppvlpi.grp == GICV3_G0) {
244              if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0) {
245                  return HPPVI_INDEX_VLPI;
246              }
247          } else {
248              if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1) {
249                  return HPPVI_INDEX_VLPI;
250              }
251          }
252      }
253  
254      return idx;
255  }
256  
icv_gprio_mask(GICv3CPUState * cs,int group)257  static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
258  {
259      /* Return a mask word which clears the subpriority bits from
260       * a priority value for a virtual interrupt in the specified group.
261       * This depends on the VBPR value.
262       * If using VBPR0 then:
263       *  a BPR of 0 means the group priority bits are [7:1];
264       *  a BPR of 1 means they are [7:2], and so on down to
265       *  a BPR of 7 meaning no group priority bits at all.
266       * If using VBPR1 then:
267       *  a BPR of 0 is impossible (the minimum value is 1)
268       *  a BPR of 1 means the group priority bits are [7:1];
269       *  a BPR of 2 means they are [7:2], and so on down to
270       *  a BPR of 7 meaning the group priority is [7].
271       *
272       * Which BPR to use depends on the group of the interrupt and
273       * the current ICH_VMCR_EL2.VCBPR settings.
274       *
275       * This corresponds to the VGroupBits() pseudocode.
276       */
277      int bpr;
278  
279      if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
280          group = GICV3_G0;
281      }
282  
283      bpr = read_vbpr(cs, group);
284      if (group == GICV3_G1NS) {
285          assert(bpr > 0);
286          bpr--;
287      }
288  
289      return ~0U << (bpr + 1);
290  }
291  
icv_hppi_can_preempt(GICv3CPUState * cs,uint64_t lr)292  static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
293  {
294      /* Return true if we can signal this virtual interrupt defined by
295       * the given list register value; see the pseudocode functions
296       * CanSignalVirtualInterrupt and CanSignalVirtualInt.
297       * Compare also icc_hppi_can_preempt() which is the non-virtual
298       * equivalent of these checks.
299       */
300      int grp;
301      bool is_nmi;
302      uint32_t mask, prio, rprio, vpmr;
303  
304      if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
305          /* Virtual interface disabled */
306          return false;
307      }
308  
309      /* We don't need to check that this LR is in Pending state because
310       * that has already been done in hppvi_index().
311       */
312  
313      prio = ich_lr_prio(lr);
314      is_nmi = lr & ICH_LR_EL2_NMI;
315      vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
316                       ICH_VMCR_EL2_VPMR_LENGTH);
317  
318      if (!is_nmi && prio >= vpmr) {
319          /* Priority mask masks this interrupt */
320          return false;
321      }
322  
323      rprio = ich_highest_active_virt_prio(cs);
324      if (rprio == 0xff) {
325          /* No running interrupt so we can preempt */
326          return true;
327      }
328  
329      grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
330  
331      mask = icv_gprio_mask(cs, grp);
332  
333      /* We only preempt a running interrupt if the pending interrupt's
334       * group priority is sufficient (the subpriorities are not considered).
335       */
336      if ((prio & mask) < (rprio & mask)) {
337          return true;
338      }
339  
340      if ((prio & mask) == (rprio & mask) && is_nmi &&
341          !(cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI)) {
342          return true;
343      }
344  
345      return false;
346  }
347  
icv_hppvlpi_can_preempt(GICv3CPUState * cs)348  static bool icv_hppvlpi_can_preempt(GICv3CPUState *cs)
349  {
350      /*
351       * Return true if we can signal the highest priority pending vLPI.
352       * We can assume we're Non-secure because hppvi_index() already
353       * tested for that.
354       */
355      uint32_t mask, rprio, vpmr;
356  
357      if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
358          /* Virtual interface disabled */
359          return false;
360      }
361  
362      vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
363                       ICH_VMCR_EL2_VPMR_LENGTH);
364  
365      if (cs->hppvlpi.prio >= vpmr) {
366          /* Priority mask masks this interrupt */
367          return false;
368      }
369  
370      rprio = ich_highest_active_virt_prio(cs);
371      if (rprio == 0xff) {
372          /* No running interrupt so we can preempt */
373          return true;
374      }
375  
376      mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
377  
378      /*
379       * We only preempt a running interrupt if the pending interrupt's
380       * group priority is sufficient (the subpriorities are not considered).
381       */
382      if ((cs->hppvlpi.prio & mask) < (rprio & mask)) {
383          return true;
384      }
385  
386      return false;
387  }
388  
eoi_maintenance_interrupt_state(GICv3CPUState * cs,uint32_t * misr)389  static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
390                                                  uint32_t *misr)
391  {
392      /* Return a set of bits indicating the EOI maintenance interrupt status
393       * for each list register. The EOI maintenance interrupt status is
394       * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
395       * (see the GICv3 spec for the ICH_EISR_EL2 register).
396       * If misr is not NULL then we should also collect the information
397       * about the MISR.EOI, MISR.NP and MISR.U bits.
398       */
399      uint32_t value = 0;
400      int validcount = 0;
401      bool seenpending = false;
402      int i;
403  
404      for (i = 0; i < cs->num_list_regs; i++) {
405          uint64_t lr = cs->ich_lr_el2[i];
406  
407          if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
408              == ICH_LR_EL2_EOI) {
409              value |= (1 << i);
410          }
411          if ((lr & ICH_LR_EL2_STATE_MASK)) {
412              validcount++;
413          }
414          if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
415              seenpending = true;
416          }
417      }
418  
419      if (misr) {
420          if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
421              *misr |= ICH_MISR_EL2_U;
422          }
423          if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
424              *misr |= ICH_MISR_EL2_NP;
425          }
426          if (value) {
427              *misr |= ICH_MISR_EL2_EOI;
428          }
429      }
430      return value;
431  }
432  
maintenance_interrupt_state(GICv3CPUState * cs)433  static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
434  {
435      /* Return a set of bits indicating the maintenance interrupt status
436       * (as seen in the ICH_MISR_EL2 register).
437       */
438      uint32_t value = 0;
439  
440      /* Scan list registers and fill in the U, NP and EOI bits */
441      eoi_maintenance_interrupt_state(cs, &value);
442  
443      if ((cs->ich_hcr_el2 & ICH_HCR_EL2_LRENPIE) &&
444          (cs->ich_hcr_el2 & ICH_HCR_EL2_EOICOUNT_MASK)) {
445          value |= ICH_MISR_EL2_LRENP;
446      }
447  
448      if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
449          (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
450          value |= ICH_MISR_EL2_VGRP0E;
451      }
452  
453      if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
454          !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
455          value |= ICH_MISR_EL2_VGRP0D;
456      }
457      if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
458          (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
459          value |= ICH_MISR_EL2_VGRP1E;
460      }
461  
462      if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
463          !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
464          value |= ICH_MISR_EL2_VGRP1D;
465      }
466  
467      return value;
468  }
469  
gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState * cs)470  void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs)
471  {
472      /*
473       * Tell the CPU about any pending virtual interrupts.
474       * This should only be called for changes that affect the
475       * vIRQ and vFIQ status and do not change the maintenance
476       * interrupt status. This means that unlike gicv3_cpuif_virt_update()
477       * this function won't recursively call back into the GIC code.
478       * The main use of this is when the redistributor has changed the
479       * highest priority pending virtual LPI.
480       */
481      int idx;
482      int irqlevel = 0;
483      int fiqlevel = 0;
484      int nmilevel = 0;
485  
486      idx = hppvi_index(cs);
487      trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx,
488                                    cs->hppvlpi.irq, cs->hppvlpi.grp,
489                                    cs->hppvlpi.prio);
490      if (idx == HPPVI_INDEX_VLPI) {
491          if (icv_hppvlpi_can_preempt(cs)) {
492              if (cs->hppvlpi.grp == GICV3_G0) {
493                  fiqlevel = 1;
494              } else {
495                  irqlevel = 1;
496              }
497          }
498      } else if (idx >= 0) {
499          uint64_t lr = cs->ich_lr_el2[idx];
500  
501          if (icv_hppi_can_preempt(cs, lr)) {
502              /*
503               * Virtual interrupts are simple: G0 are always FIQ, and G1 are
504               * IRQ or NMI which depends on the ICH_LR<n>_EL2.NMI to have
505               * non-maskable property.
506               */
507              if (lr & ICH_LR_EL2_GROUP) {
508                  if (lr & ICH_LR_EL2_NMI) {
509                      nmilevel = 1;
510                  } else {
511                      irqlevel = 1;
512                  }
513              } else {
514                  fiqlevel = 1;
515              }
516          }
517      }
518  
519      trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
520      qemu_set_irq(cs->parent_vfiq, fiqlevel);
521      qemu_set_irq(cs->parent_virq, irqlevel);
522      qemu_set_irq(cs->parent_vnmi, nmilevel);
523  }
524  
gicv3_cpuif_virt_update(GICv3CPUState * cs)525  static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
526  {
527      /*
528       * Tell the CPU about any pending virtual interrupts or
529       * maintenance interrupts, following a change to the state
530       * of the CPU interface relevant to virtual interrupts.
531       *
532       * CAUTION: this function will call qemu_set_irq() on the
533       * CPU maintenance IRQ line, which is typically wired up
534       * to the GIC as a per-CPU interrupt. This means that it
535       * will recursively call back into the GIC code via
536       * gicv3_redist_set_irq() and thus into the CPU interface code's
537       * gicv3_cpuif_update(). It is therefore important that this
538       * function is only called as the final action of a CPU interface
539       * register write implementation, after all the GIC state
540       * fields have been updated. gicv3_cpuif_update() also must
541       * not cause this function to be called, but that happens
542       * naturally as a result of there being no architectural
543       * linkage between the physical and virtual GIC logic.
544       */
545      ARMCPU *cpu = ARM_CPU(cs->cpu);
546      int maintlevel = 0;
547  
548      gicv3_cpuif_virt_irq_fiq_update(cs);
549  
550      if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
551          maintenance_interrupt_state(cs) != 0) {
552          maintlevel = 1;
553      }
554  
555      trace_gicv3_cpuif_virt_set_maint_irq(gicv3_redist_affid(cs), maintlevel);
556      qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
557  }
558  
icv_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)559  static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
560  {
561      GICv3CPUState *cs = icc_cs_from_env(env);
562      int regno = ri->opc2 & 3;
563      int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
564      uint64_t value = cs->ich_apr[grp][regno];
565  
566      trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
567      return value;
568  }
569  
icv_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)570  static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
571                           uint64_t value)
572  {
573      GICv3CPUState *cs = icc_cs_from_env(env);
574      int regno = ri->opc2 & 3;
575      int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
576  
577      trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
578  
579      if (cs->nmi_support) {
580          cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI);
581      } else {
582          cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
583      }
584  
585      gicv3_cpuif_virt_irq_fiq_update(cs);
586      return;
587  }
588  
icv_bpr_read(CPUARMState * env,const ARMCPRegInfo * ri)589  static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
590  {
591      GICv3CPUState *cs = icc_cs_from_env(env);
592      int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
593      uint64_t bpr;
594      bool satinc = false;
595  
596      if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
597          /* reads return bpr0 + 1 saturated to 7, writes ignored */
598          grp = GICV3_G0;
599          satinc = true;
600      }
601  
602      bpr = read_vbpr(cs, grp);
603  
604      if (satinc) {
605          bpr++;
606          bpr = MIN(bpr, 7);
607      }
608  
609      trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
610  
611      return bpr;
612  }
613  
icv_bpr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)614  static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
615                            uint64_t value)
616  {
617      GICv3CPUState *cs = icc_cs_from_env(env);
618      int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
619  
620      trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
621                                gicv3_redist_affid(cs), value);
622  
623      if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
624          /* reads return bpr0 + 1 saturated to 7, writes ignored */
625          return;
626      }
627  
628      write_vbpr(cs, grp, value);
629  
630      gicv3_cpuif_virt_irq_fiq_update(cs);
631  }
632  
icv_pmr_read(CPUARMState * env,const ARMCPRegInfo * ri)633  static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
634  {
635      GICv3CPUState *cs = icc_cs_from_env(env);
636      uint64_t value;
637  
638      value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
639                        ICH_VMCR_EL2_VPMR_LENGTH);
640  
641      trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
642      return value;
643  }
644  
icv_pmr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)645  static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
646                            uint64_t value)
647  {
648      GICv3CPUState *cs = icc_cs_from_env(env);
649  
650      trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
651  
652      value &= icv_fullprio_mask(cs);
653  
654      cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
655                                   ICH_VMCR_EL2_VPMR_LENGTH, value);
656  
657      gicv3_cpuif_virt_irq_fiq_update(cs);
658  }
659  
icv_igrpen_read(CPUARMState * env,const ARMCPRegInfo * ri)660  static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
661  {
662      GICv3CPUState *cs = icc_cs_from_env(env);
663      int enbit;
664      uint64_t value;
665  
666      enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
667      value = extract64(cs->ich_vmcr_el2, enbit, 1);
668  
669      trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
670                                  gicv3_redist_affid(cs), value);
671      return value;
672  }
673  
icv_igrpen_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)674  static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
675                               uint64_t value)
676  {
677      GICv3CPUState *cs = icc_cs_from_env(env);
678      int enbit;
679  
680      trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
681                                   gicv3_redist_affid(cs), value);
682  
683      enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
684  
685      cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
686      gicv3_cpuif_virt_update(cs);
687  }
688  
icv_ctlr_read(CPUARMState * env,const ARMCPRegInfo * ri)689  static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
690  {
691      GICv3CPUState *cs = icc_cs_from_env(env);
692      uint64_t value;
693  
694      /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
695       * should match the ones reported in ich_vtr_read().
696       */
697      value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
698          ((cs->vpribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
699  
700      if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
701          value |= ICC_CTLR_EL1_EOIMODE;
702      }
703  
704      if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
705          value |= ICC_CTLR_EL1_CBPR;
706      }
707  
708      trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
709      return value;
710  }
711  
icv_ctlr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)712  static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
713                                 uint64_t value)
714  {
715      GICv3CPUState *cs = icc_cs_from_env(env);
716  
717      trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
718  
719      cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
720                                   1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
721      cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
722                                   1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
723  
724      gicv3_cpuif_virt_irq_fiq_update(cs);
725  }
726  
icv_rpr_read(CPUARMState * env,const ARMCPRegInfo * ri)727  static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
728  {
729      GICv3CPUState *cs = icc_cs_from_env(env);
730      uint64_t prio = ich_highest_active_virt_prio(cs);
731  
732      if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) {
733          prio |= ICV_RPR_EL1_NMI;
734      }
735  
736      trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
737      return prio;
738  }
739  
icv_hppir_read(CPUARMState * env,const ARMCPRegInfo * ri)740  static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
741  {
742      GICv3CPUState *cs = icc_cs_from_env(env);
743      int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
744      int idx = hppvi_index(cs);
745      uint64_t value = INTID_SPURIOUS;
746  
747      if (idx == HPPVI_INDEX_VLPI) {
748          if (cs->hppvlpi.grp == grp) {
749              value = cs->hppvlpi.irq;
750          }
751      } else if (idx >= 0) {
752          uint64_t lr = cs->ich_lr_el2[idx];
753          int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
754  
755          if (grp == thisgrp) {
756              value = ich_lr_vintid(lr);
757          }
758      }
759  
760      trace_gicv3_icv_hppir_read(ri->crm == 8 ? 0 : 1,
761                                 gicv3_redist_affid(cs), value);
762      return value;
763  }
764  
icv_activate_irq(GICv3CPUState * cs,int idx,int grp)765  static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
766  {
767      /* Activate the interrupt in the specified list register
768       * by moving it from Pending to Active state, and update the
769       * Active Priority Registers.
770       */
771      uint32_t mask = icv_gprio_mask(cs, grp);
772      int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
773      bool nmi = cs->ich_lr_el2[idx] & ICH_LR_EL2_NMI;
774      int aprbit = prio >> (8 - cs->vprebits);
775      int regno = aprbit / 32;
776      int regbit = aprbit % 32;
777  
778      cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
779      cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
780  
781      if (nmi) {
782          cs->ich_apr[grp][regno] |= ICV_AP1R_EL1_NMI;
783      } else {
784          cs->ich_apr[grp][regno] |= (1U << regbit);
785      }
786  }
787  
icv_activate_vlpi(GICv3CPUState * cs)788  static void icv_activate_vlpi(GICv3CPUState *cs)
789  {
790      uint32_t mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
791      int prio = cs->hppvlpi.prio & mask;
792      int aprbit = prio >> (8 - cs->vprebits);
793      int regno = aprbit / 32;
794      int regbit = aprbit % 32;
795  
796      cs->ich_apr[cs->hppvlpi.grp][regno] |= (1U << regbit);
797      gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0);
798  }
799  
icv_iar_read(CPUARMState * env,const ARMCPRegInfo * ri)800  static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
801  {
802      GICv3CPUState *cs = icc_cs_from_env(env);
803      int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
804      int idx = hppvi_index(cs);
805      uint64_t intid = INTID_SPURIOUS;
806      int el = arm_current_el(env);
807  
808      if (idx == HPPVI_INDEX_VLPI) {
809          if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) {
810              intid = cs->hppvlpi.irq;
811              icv_activate_vlpi(cs);
812          }
813      } else if (idx >= 0) {
814          uint64_t lr = cs->ich_lr_el2[idx];
815          int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
816          bool nmi = env->cp15.sctlr_el[el] & SCTLR_NMI && lr & ICH_LR_EL2_NMI;
817  
818          if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
819              intid = ich_lr_vintid(lr);
820              if (!gicv3_intid_is_special(intid)) {
821                  if (!nmi) {
822                      icv_activate_irq(cs, idx, grp);
823                  } else {
824                      intid = INTID_NMI;
825                  }
826              } else {
827                  /* Interrupt goes from Pending to Invalid */
828                  cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
829                  /* We will now return the (bogus) ID from the list register,
830                   * as per the pseudocode.
831                   */
832              }
833          }
834      }
835  
836      trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
837                               gicv3_redist_affid(cs), intid);
838  
839      gicv3_cpuif_virt_update(cs);
840  
841      return intid;
842  }
843  
icv_nmiar1_read(CPUARMState * env,const ARMCPRegInfo * ri)844  static uint64_t icv_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
845  {
846      GICv3CPUState *cs = icc_cs_from_env(env);
847      int idx = hppvi_index(cs);
848      uint64_t intid = INTID_SPURIOUS;
849  
850      if (idx >= 0 && idx != HPPVI_INDEX_VLPI) {
851          uint64_t lr = cs->ich_lr_el2[idx];
852          int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
853  
854          if ((thisgrp == GICV3_G1NS) && icv_hppi_can_preempt(cs, lr)) {
855              intid = ich_lr_vintid(lr);
856              if (!gicv3_intid_is_special(intid)) {
857                  if (lr & ICH_LR_EL2_NMI) {
858                      icv_activate_irq(cs, idx, GICV3_G1NS);
859                  } else {
860                      intid = INTID_SPURIOUS;
861                  }
862              } else {
863                  /* Interrupt goes from Pending to Invalid */
864                  cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
865                  /*
866                   * We will now return the (bogus) ID from the list register,
867                   * as per the pseudocode.
868                   */
869              }
870          }
871      }
872  
873      trace_gicv3_icv_nmiar1_read(gicv3_redist_affid(cs), intid);
874  
875      gicv3_cpuif_virt_update(cs);
876  
877      return intid;
878  }
879  
icc_fullprio_mask(GICv3CPUState * cs)880  static uint32_t icc_fullprio_mask(GICv3CPUState *cs)
881  {
882      /*
883       * Return a mask word which clears the unimplemented priority bits
884       * from a priority value for a physical interrupt. (Not to be confused
885       * with the group priority, whose mask depends on the value of BPR
886       * for the interrupt group.)
887       */
888      return (~0U << (8 - cs->pribits)) & 0xff;
889  }
890  
icc_min_bpr(GICv3CPUState * cs)891  static inline int icc_min_bpr(GICv3CPUState *cs)
892  {
893      /* The minimum BPR for the physical interface. */
894      return 7 - cs->prebits;
895  }
896  
icc_min_bpr_ns(GICv3CPUState * cs)897  static inline int icc_min_bpr_ns(GICv3CPUState *cs)
898  {
899      return icc_min_bpr(cs) + 1;
900  }
901  
icc_num_aprs(GICv3CPUState * cs)902  static inline int icc_num_aprs(GICv3CPUState *cs)
903  {
904      /* Return the number of APR registers (1, 2, or 4) */
905      int aprmax = 1 << MAX(cs->prebits - 5, 0);
906      assert(aprmax <= ARRAY_SIZE(cs->icc_apr[0]));
907      return aprmax;
908  }
909  
icc_highest_active_prio(GICv3CPUState * cs)910  static int icc_highest_active_prio(GICv3CPUState *cs)
911  {
912      /* Calculate the current running priority based on the set bits
913       * in the Active Priority Registers.
914       */
915      int i;
916  
917      if (cs->nmi_support) {
918          /*
919           * If an NMI is active this takes precedence over anything else
920           * for priority purposes; the NMI bit is only in the AP1R0 bit.
921           * We return here the effective priority of the NMI, which is
922           * either 0x0 or 0x80. Callers will need to check NMI again for
923           * purposes of either setting the RPR register bits or for
924           * prioritization of NMI vs non-NMI.
925           */
926          if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
927              return 0;
928          }
929          if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
930              return (cs->gic->gicd_ctlr & GICD_CTLR_DS) ? 0 : 0x80;
931          }
932      }
933  
934      for (i = 0; i < icc_num_aprs(cs); i++) {
935          uint32_t apr = cs->icc_apr[GICV3_G0][i] |
936              cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
937  
938          if (!apr) {
939              continue;
940          }
941          return (i * 32 + ctz32(apr)) << (icc_min_bpr(cs) + 1);
942      }
943      /* No current active interrupts: return idle priority */
944      return 0xff;
945  }
946  
icc_gprio_mask(GICv3CPUState * cs,int group)947  static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
948  {
949      /* Return a mask word which clears the subpriority bits from
950       * a priority value for an interrupt in the specified group.
951       * This depends on the BPR value. For CBPR0 (S or NS):
952       *  a BPR of 0 means the group priority bits are [7:1];
953       *  a BPR of 1 means they are [7:2], and so on down to
954       *  a BPR of 7 meaning no group priority bits at all.
955       * For CBPR1 NS:
956       *  a BPR of 0 is impossible (the minimum value is 1)
957       *  a BPR of 1 means the group priority bits are [7:1];
958       *  a BPR of 2 means they are [7:2], and so on down to
959       *  a BPR of 7 meaning the group priority is [7].
960       *
961       * Which BPR to use depends on the group of the interrupt and
962       * the current ICC_CTLR.CBPR settings.
963       *
964       * This corresponds to the GroupBits() pseudocode.
965       */
966      int bpr;
967  
968      if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
969          (group == GICV3_G1NS &&
970           cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
971          group = GICV3_G0;
972      }
973  
974      bpr = cs->icc_bpr[group] & 7;
975  
976      if (group == GICV3_G1NS) {
977          assert(bpr > 0);
978          bpr--;
979      }
980  
981      return ~0U << (bpr + 1);
982  }
983  
icc_no_enabled_hppi(GICv3CPUState * cs)984  static bool icc_no_enabled_hppi(GICv3CPUState *cs)
985  {
986      /* Return true if there is no pending interrupt, or the
987       * highest priority pending interrupt is in a group which has been
988       * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
989       */
990      return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
991  }
992  
icc_hppi_can_preempt(GICv3CPUState * cs)993  static bool icc_hppi_can_preempt(GICv3CPUState *cs)
994  {
995      /* Return true if we have a pending interrupt of sufficient
996       * priority to preempt.
997       */
998      int rprio;
999      uint32_t mask;
1000      ARMCPU *cpu = ARM_CPU(cs->cpu);
1001      CPUARMState *env = &cpu->env;
1002  
1003      if (icc_no_enabled_hppi(cs)) {
1004          return false;
1005      }
1006  
1007      if (cs->hppi.nmi) {
1008          if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
1009              cs->hppi.grp == GICV3_G1NS) {
1010              if (cs->icc_pmr_el1 < 0x80) {
1011                  return false;
1012              }
1013              if (arm_is_secure(env) && cs->icc_pmr_el1 == 0x80) {
1014                  return false;
1015              }
1016          }
1017      } else if (cs->hppi.prio >= cs->icc_pmr_el1) {
1018          /* Priority mask masks this interrupt */
1019          return false;
1020      }
1021  
1022      rprio = icc_highest_active_prio(cs);
1023      if (rprio == 0xff) {
1024          /* No currently running interrupt so we can preempt */
1025          return true;
1026      }
1027  
1028      mask = icc_gprio_mask(cs, cs->hppi.grp);
1029  
1030      /* We only preempt a running interrupt if the pending interrupt's
1031       * group priority is sufficient (the subpriorities are not considered).
1032       */
1033      if ((cs->hppi.prio & mask) < (rprio & mask)) {
1034          return true;
1035      }
1036  
1037      if (cs->hppi.nmi && (cs->hppi.prio & mask) == (rprio & mask)) {
1038          if (!(cs->icc_apr[cs->hppi.grp][0] & ICC_AP1R_EL1_NMI)) {
1039              return true;
1040          }
1041      }
1042  
1043      return false;
1044  }
1045  
gicv3_cpuif_update(GICv3CPUState * cs)1046  void gicv3_cpuif_update(GICv3CPUState *cs)
1047  {
1048      /* Tell the CPU about its highest priority pending interrupt */
1049      int irqlevel = 0;
1050      int fiqlevel = 0;
1051      int nmilevel = 0;
1052      ARMCPU *cpu = ARM_CPU(cs->cpu);
1053      CPUARMState *env = &cpu->env;
1054  
1055      g_assert(bql_locked());
1056  
1057      trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
1058                               cs->hppi.grp, cs->hppi.prio);
1059  
1060      if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
1061          /* If a Security-enabled GIC sends a G1S interrupt to a
1062           * Security-disabled CPU, we must treat it as if it were G0.
1063           */
1064          cs->hppi.grp = GICV3_G0;
1065      }
1066  
1067      if (icc_hppi_can_preempt(cs)) {
1068          /* We have an interrupt: should we signal it as IRQ or FIQ?
1069           * This is described in the GICv3 spec section 4.6.2.
1070           */
1071          bool isfiq;
1072  
1073          switch (cs->hppi.grp) {
1074          case GICV3_G0:
1075              isfiq = true;
1076              break;
1077          case GICV3_G1:
1078              isfiq = (!arm_is_secure(env) ||
1079                       (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
1080              break;
1081          case GICV3_G1NS:
1082              isfiq = arm_is_secure(env);
1083              break;
1084          default:
1085              g_assert_not_reached();
1086          }
1087  
1088          if (isfiq) {
1089              fiqlevel = 1;
1090          } else if (cs->hppi.nmi) {
1091              nmilevel = 1;
1092          } else {
1093              irqlevel = 1;
1094          }
1095      }
1096  
1097      trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
1098  
1099      qemu_set_irq(cs->parent_fiq, fiqlevel);
1100      qemu_set_irq(cs->parent_irq, irqlevel);
1101      qemu_set_irq(cs->parent_nmi, nmilevel);
1102  }
1103  
icc_pmr_read(CPUARMState * env,const ARMCPRegInfo * ri)1104  static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1105  {
1106      GICv3CPUState *cs = icc_cs_from_env(env);
1107      uint32_t value = cs->icc_pmr_el1;
1108  
1109      if (icv_access(env, HCR_FMO | HCR_IMO)) {
1110          return icv_pmr_read(env, ri);
1111      }
1112  
1113      if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
1114          (env->cp15.scr_el3 & SCR_FIQ)) {
1115          /* NS access and Group 0 is inaccessible to NS: return the
1116           * NS view of the current priority
1117           */
1118          if ((value & 0x80) == 0) {
1119              /* Secure priorities not visible to NS */
1120              value = 0;
1121          } else if (value != 0xff) {
1122              value = (value << 1) & 0xff;
1123          }
1124      }
1125  
1126      trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
1127  
1128      return value;
1129  }
1130  
icc_pmr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1131  static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1132                            uint64_t value)
1133  {
1134      GICv3CPUState *cs = icc_cs_from_env(env);
1135  
1136      if (icv_access(env, HCR_FMO | HCR_IMO)) {
1137          return icv_pmr_write(env, ri, value);
1138      }
1139  
1140      trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
1141  
1142      if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
1143          (env->cp15.scr_el3 & SCR_FIQ)) {
1144          /* NS access and Group 0 is inaccessible to NS: return the
1145           * NS view of the current priority
1146           */
1147          if (!(cs->icc_pmr_el1 & 0x80)) {
1148              /* Current PMR in the secure range, don't allow NS to change it */
1149              return;
1150          }
1151          value = (value >> 1) | 0x80;
1152      }
1153      value &= icc_fullprio_mask(cs);
1154      cs->icc_pmr_el1 = value;
1155      gicv3_cpuif_update(cs);
1156  }
1157  
icc_activate_irq(GICv3CPUState * cs,int irq)1158  static void icc_activate_irq(GICv3CPUState *cs, int irq)
1159  {
1160      /* Move the interrupt from the Pending state to Active, and update
1161       * the Active Priority Registers
1162       */
1163      uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
1164      int prio = cs->hppi.prio & mask;
1165      int aprbit = prio >> (8 - cs->prebits);
1166      int regno = aprbit / 32;
1167      int regbit = aprbit % 32;
1168      bool nmi = cs->hppi.nmi;
1169  
1170      if (nmi) {
1171          cs->icc_apr[cs->hppi.grp][regno] |= ICC_AP1R_EL1_NMI;
1172      } else {
1173          cs->icc_apr[cs->hppi.grp][regno] |= (1U << regbit);
1174      }
1175  
1176      if (irq < GIC_INTERNAL) {
1177          cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
1178          cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
1179          gicv3_redist_update(cs);
1180      } else if (irq < GICV3_LPI_INTID_START) {
1181          gicv3_gicd_active_set(cs->gic, irq);
1182          gicv3_gicd_pending_clear(cs->gic, irq);
1183          gicv3_update(cs->gic, irq, 1);
1184      } else {
1185          gicv3_redist_lpi_pending(cs, irq, 0);
1186      }
1187  }
1188  
icc_hppir0_value(GICv3CPUState * cs,CPUARMState * env)1189  static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
1190  {
1191      /* Return the highest priority pending interrupt register value
1192       * for group 0.
1193       */
1194      bool irq_is_secure;
1195  
1196      if (icc_no_enabled_hppi(cs)) {
1197          return INTID_SPURIOUS;
1198      }
1199  
1200      /* Check whether we can return the interrupt or if we should return
1201       * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
1202       * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
1203       * is always zero.)
1204       */
1205      irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
1206                       (cs->hppi.grp != GICV3_G1NS));
1207  
1208      if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
1209          return INTID_SPURIOUS;
1210      }
1211      if (irq_is_secure && !arm_is_secure(env)) {
1212          /* Secure interrupts not visible to Nonsecure */
1213          return INTID_SPURIOUS;
1214      }
1215  
1216      if (cs->hppi.grp != GICV3_G0) {
1217          /* Indicate to EL3 that there's a Group 1 interrupt for the other
1218           * state pending.
1219           */
1220          return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
1221      }
1222  
1223      return cs->hppi.irq;
1224  }
1225  
icc_hppir1_value(GICv3CPUState * cs,CPUARMState * env)1226  static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
1227  {
1228      /* Return the highest priority pending interrupt register value
1229       * for group 1.
1230       */
1231      bool irq_is_secure;
1232  
1233      if (icc_no_enabled_hppi(cs)) {
1234          return INTID_SPURIOUS;
1235      }
1236  
1237      /* Check whether we can return the interrupt or if we should return
1238       * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
1239       * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
1240       * is always zero.)
1241       */
1242      irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
1243                       (cs->hppi.grp != GICV3_G1NS));
1244  
1245      if (cs->hppi.grp == GICV3_G0) {
1246          /* Group 0 interrupts not visible via HPPIR1 */
1247          return INTID_SPURIOUS;
1248      }
1249      if (irq_is_secure) {
1250          if (!arm_is_secure(env)) {
1251              /* Secure interrupts not visible in Non-secure */
1252              return INTID_SPURIOUS;
1253          }
1254      } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
1255          /* Group 1 non-secure interrupts not visible in Secure EL1 */
1256          return INTID_SPURIOUS;
1257      }
1258  
1259      return cs->hppi.irq;
1260  }
1261  
icc_iar0_read(CPUARMState * env,const ARMCPRegInfo * ri)1262  static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
1263  {
1264      GICv3CPUState *cs = icc_cs_from_env(env);
1265      uint64_t intid;
1266  
1267      if (icv_access(env, HCR_FMO)) {
1268          return icv_iar_read(env, ri);
1269      }
1270  
1271      if (!icc_hppi_can_preempt(cs)) {
1272          intid = INTID_SPURIOUS;
1273      } else {
1274          intid = icc_hppir0_value(cs, env);
1275      }
1276  
1277      if (!gicv3_intid_is_special(intid)) {
1278          icc_activate_irq(cs, intid);
1279      }
1280  
1281      trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
1282      return intid;
1283  }
1284  
icc_iar1_read(CPUARMState * env,const ARMCPRegInfo * ri)1285  static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1286  {
1287      GICv3CPUState *cs = icc_cs_from_env(env);
1288      int el = arm_current_el(env);
1289      uint64_t intid;
1290  
1291      if (icv_access(env, HCR_IMO)) {
1292          return icv_iar_read(env, ri);
1293      }
1294  
1295      if (!icc_hppi_can_preempt(cs)) {
1296          intid = INTID_SPURIOUS;
1297      } else {
1298          intid = icc_hppir1_value(cs, env);
1299      }
1300  
1301      if (!gicv3_intid_is_special(intid)) {
1302          if (cs->hppi.nmi && env->cp15.sctlr_el[el] & SCTLR_NMI) {
1303              intid = INTID_NMI;
1304          } else {
1305              icc_activate_irq(cs, intid);
1306          }
1307      }
1308  
1309      trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
1310      return intid;
1311  }
1312  
icc_nmiar1_read(CPUARMState * env,const ARMCPRegInfo * ri)1313  static uint64_t icc_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1314  {
1315      GICv3CPUState *cs = icc_cs_from_env(env);
1316      uint64_t intid;
1317  
1318      if (icv_access(env, HCR_IMO)) {
1319          return icv_nmiar1_read(env, ri);
1320      }
1321  
1322      if (!icc_hppi_can_preempt(cs)) {
1323          intid = INTID_SPURIOUS;
1324      } else {
1325          intid = icc_hppir1_value(cs, env);
1326      }
1327  
1328      if (!gicv3_intid_is_special(intid)) {
1329          if (!cs->hppi.nmi) {
1330              intid = INTID_SPURIOUS;
1331          } else {
1332              icc_activate_irq(cs, intid);
1333          }
1334      }
1335  
1336      trace_gicv3_icc_nmiar1_read(gicv3_redist_affid(cs), intid);
1337      return intid;
1338  }
1339  
icc_drop_prio(GICv3CPUState * cs,int grp)1340  static void icc_drop_prio(GICv3CPUState *cs, int grp)
1341  {
1342      /* Drop the priority of the currently active interrupt in
1343       * the specified group.
1344       *
1345       * Note that we can guarantee (because of the requirement to nest
1346       * ICC_IAR reads [which activate an interrupt and raise priority]
1347       * with ICC_EOIR writes [which drop the priority for the interrupt])
1348       * that the interrupt we're being called for is the highest priority
1349       * active interrupt, meaning that it has the lowest set bit in the
1350       * APR registers.
1351       *
1352       * If the guest does not honour the ordering constraints then the
1353       * behaviour of the GIC is UNPREDICTABLE, which for us means that
1354       * the values of the APR registers might become incorrect and the
1355       * running priority will be wrong, so interrupts that should preempt
1356       * might not do so, and interrupts that should not preempt might do so.
1357       */
1358      int i;
1359  
1360      for (i = 0; i < icc_num_aprs(cs); i++) {
1361          uint64_t *papr = &cs->icc_apr[grp][i];
1362  
1363          if (!*papr) {
1364              continue;
1365          }
1366  
1367          if (i == 0 && cs->nmi_support && (*papr & ICC_AP1R_EL1_NMI)) {
1368              *papr &= (~ICC_AP1R_EL1_NMI);
1369              break;
1370          }
1371  
1372          /* Clear the lowest set bit */
1373          *papr &= *papr - 1;
1374          break;
1375      }
1376  
1377      /* running priority change means we need an update for this cpu i/f */
1378      gicv3_cpuif_update(cs);
1379  }
1380  
icc_eoi_split(CPUARMState * env,GICv3CPUState * cs)1381  static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1382  {
1383      /* Return true if we should split priority drop and interrupt
1384       * deactivation, ie whether the relevant EOIMode bit is set.
1385       */
1386      if (arm_is_el3_or_mon(env)) {
1387          return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
1388      }
1389      if (arm_is_secure_below_el3(env)) {
1390          return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
1391      } else {
1392          return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
1393      }
1394  }
1395  
icc_highest_active_group(GICv3CPUState * cs)1396  static int icc_highest_active_group(GICv3CPUState *cs)
1397  {
1398      /* Return the group with the highest priority active interrupt.
1399       * We can do this by just comparing the APRs to see which one
1400       * has the lowest set bit.
1401       * (If more than one group is active at the same priority then
1402       * we're in UNPREDICTABLE territory.)
1403       */
1404      int i;
1405  
1406      if (cs->nmi_support) {
1407          if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
1408              return GICV3_G1;
1409          }
1410          if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
1411              return GICV3_G1NS;
1412          }
1413      }
1414  
1415      for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
1416          int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
1417          int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
1418          int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
1419  
1420          if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
1421              return GICV3_G1NS;
1422          }
1423          if (g1ctz < g0ctz) {
1424              return GICV3_G1;
1425          }
1426          if (g0ctz < 32) {
1427              return GICV3_G0;
1428          }
1429      }
1430      /* No set active bits? UNPREDICTABLE; return -1 so the caller
1431       * ignores the spurious EOI attempt.
1432       */
1433      return -1;
1434  }
1435  
icc_deactivate_irq(GICv3CPUState * cs,int irq)1436  static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
1437  {
1438      if (irq < GIC_INTERNAL) {
1439          cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
1440          gicv3_redist_update(cs);
1441      } else {
1442          gicv3_gicd_active_clear(cs->gic, irq);
1443          gicv3_update(cs->gic, irq, 1);
1444      }
1445  }
1446  
icv_eoi_split(CPUARMState * env,GICv3CPUState * cs)1447  static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1448  {
1449      /* Return true if we should split priority drop and interrupt
1450       * deactivation, ie whether the virtual EOIMode bit is set.
1451       */
1452      return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
1453  }
1454  
icv_find_active(GICv3CPUState * cs,int irq)1455  static int icv_find_active(GICv3CPUState *cs, int irq)
1456  {
1457      /* Given an interrupt number for an active interrupt, return the index
1458       * of the corresponding list register, or -1 if there is no match.
1459       * Corresponds to FindActiveVirtualInterrupt pseudocode.
1460       */
1461      int i;
1462  
1463      for (i = 0; i < cs->num_list_regs; i++) {
1464          uint64_t lr = cs->ich_lr_el2[i];
1465  
1466          if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
1467              return i;
1468          }
1469      }
1470  
1471      return -1;
1472  }
1473  
icv_deactivate_irq(GICv3CPUState * cs,int idx)1474  static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
1475  {
1476      /* Deactivate the interrupt in the specified list register index */
1477      uint64_t lr = cs->ich_lr_el2[idx];
1478  
1479      if (lr & ICH_LR_EL2_HW) {
1480          /* Deactivate the associated physical interrupt */
1481          int pirq = ich_lr_pintid(lr);
1482  
1483          if (pirq < INTID_SECURE) {
1484              icc_deactivate_irq(cs, pirq);
1485          }
1486      }
1487  
1488      /* Clear the 'active' part of the state, so ActivePending->Pending
1489       * and Active->Invalid.
1490       */
1491      lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
1492      cs->ich_lr_el2[idx] = lr;
1493  }
1494  
icv_increment_eoicount(GICv3CPUState * cs)1495  static void icv_increment_eoicount(GICv3CPUState *cs)
1496  {
1497      /* Increment the EOICOUNT field in ICH_HCR_EL2 */
1498      int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1499                               ICH_HCR_EL2_EOICOUNT_LENGTH);
1500  
1501      cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1502                                  ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
1503  }
1504  
icv_drop_prio(GICv3CPUState * cs,bool * nmi)1505  static int icv_drop_prio(GICv3CPUState *cs, bool *nmi)
1506  {
1507      /* Drop the priority of the currently active virtual interrupt
1508       * (favouring group 0 if there is a set active bit at
1509       * the same priority for both group 0 and group 1).
1510       * Return the priority value for the bit we just cleared,
1511       * or 0xff if no bits were set in the AP registers at all.
1512       * Note that though the ich_apr[] are uint64_t only the low
1513       * 32 bits are actually relevant.
1514       */
1515      int i;
1516      int aprmax = ich_num_aprs(cs);
1517  
1518      for (i = 0; i < aprmax; i++) {
1519          uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
1520          uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
1521          int apr0count, apr1count;
1522  
1523          if (!*papr0 && !*papr1) {
1524              continue;
1525          }
1526  
1527          if (i == 0 && cs->nmi_support && (*papr1 & ICV_AP1R_EL1_NMI)) {
1528              *papr1 &= (~ICV_AP1R_EL1_NMI);
1529              *nmi = true;
1530              return 0xff;
1531          }
1532  
1533          /* We can't just use the bit-twiddling hack icc_drop_prio() does
1534           * because we need to return the bit number we cleared so
1535           * it can be compared against the list register's priority field.
1536           */
1537          apr0count = ctz32(*papr0);
1538          apr1count = ctz32(*papr1);
1539  
1540          if (apr0count <= apr1count) {
1541              *papr0 &= *papr0 - 1;
1542              return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
1543          } else {
1544              *papr1 &= *papr1 - 1;
1545              return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
1546          }
1547      }
1548      return 0xff;
1549  }
1550  
icv_dir_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1551  static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1552                            uint64_t value)
1553  {
1554      /* Deactivate interrupt */
1555      GICv3CPUState *cs = icc_cs_from_env(env);
1556      int idx;
1557      int irq = value & 0xffffff;
1558  
1559      trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
1560  
1561      if (irq >= GICV3_MAXIRQ) {
1562          /* Also catches special interrupt numbers and LPIs */
1563          return;
1564      }
1565  
1566      if (!icv_eoi_split(env, cs)) {
1567          return;
1568      }
1569  
1570      idx = icv_find_active(cs, irq);
1571  
1572      if (idx < 0) {
1573          /* No list register matching this, so increment the EOI count
1574           * (might trigger a maintenance interrupt)
1575           */
1576          icv_increment_eoicount(cs);
1577      } else {
1578          icv_deactivate_irq(cs, idx);
1579      }
1580  
1581      gicv3_cpuif_virt_update(cs);
1582  }
1583  
icv_eoir_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1584  static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1585                             uint64_t value)
1586  {
1587      /* End of Interrupt */
1588      GICv3CPUState *cs = icc_cs_from_env(env);
1589      int irq = value & 0xffffff;
1590      int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
1591      int idx, dropprio;
1592      bool nmi = false;
1593  
1594      trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
1595                                 gicv3_redist_affid(cs), value);
1596  
1597      if (gicv3_intid_is_special(irq)) {
1598          return;
1599      }
1600  
1601      /* We implement the IMPDEF choice of "drop priority before doing
1602       * error checks" (because that lets us avoid scanning the AP
1603       * registers twice).
1604       */
1605      dropprio = icv_drop_prio(cs, &nmi);
1606      if (dropprio == 0xff && !nmi) {
1607          /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
1608           * whether the list registers are checked in this
1609           * situation; we choose not to.
1610           */
1611          return;
1612      }
1613  
1614      idx = icv_find_active(cs, irq);
1615  
1616      if (idx < 0) {
1617          /*
1618           * No valid list register corresponding to EOI ID; if this is a vLPI
1619           * not in the list regs then do nothing; otherwise increment EOI count
1620           */
1621          if (irq < GICV3_LPI_INTID_START) {
1622              icv_increment_eoicount(cs);
1623          }
1624      } else {
1625          uint64_t lr = cs->ich_lr_el2[idx];
1626          int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
1627          int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
1628          bool thisnmi = lr & ICH_LR_EL2_NMI;
1629  
1630          if (thisgrp == grp && (lr_gprio == dropprio || (thisnmi & nmi))) {
1631              if (!icv_eoi_split(env, cs) || irq >= GICV3_LPI_INTID_START) {
1632                  /*
1633                   * Priority drop and deactivate not split: deactivate irq now.
1634                   * LPIs always get their active state cleared immediately
1635                   * because no separate deactivate is expected.
1636                   */
1637                  icv_deactivate_irq(cs, idx);
1638              }
1639          }
1640      }
1641  
1642      gicv3_cpuif_virt_update(cs);
1643  }
1644  
icc_eoir_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1645  static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1646                             uint64_t value)
1647  {
1648      /* End of Interrupt */
1649      GICv3CPUState *cs = icc_cs_from_env(env);
1650      int irq = value & 0xffffff;
1651      int grp;
1652      bool is_eoir0 = ri->crm == 8;
1653  
1654      if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) {
1655          icv_eoir_write(env, ri, value);
1656          return;
1657      }
1658  
1659      trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
1660                                 gicv3_redist_affid(cs), value);
1661  
1662      if ((irq >= cs->gic->num_irq) &&
1663          !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) {
1664          /* This handles two cases:
1665           * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
1666           * to the GICC_EOIR, the GIC ignores that write.
1667           * 2. If software writes the number of a non-existent interrupt
1668           * this must be a subcase of "value written does not match the last
1669           * valid interrupt value read from the Interrupt Acknowledge
1670           * register" and so this is UNPREDICTABLE. We choose to ignore it.
1671           */
1672          return;
1673      }
1674  
1675      grp = icc_highest_active_group(cs);
1676      switch (grp) {
1677      case GICV3_G0:
1678          if (!is_eoir0) {
1679              return;
1680          }
1681          if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS)
1682              && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
1683              return;
1684          }
1685          break;
1686      case GICV3_G1:
1687          if (is_eoir0) {
1688              return;
1689          }
1690          if (!arm_is_secure(env)) {
1691              return;
1692          }
1693          break;
1694      case GICV3_G1NS:
1695          if (is_eoir0) {
1696              return;
1697          }
1698          if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
1699              return;
1700          }
1701          break;
1702      default:
1703          qemu_log_mask(LOG_GUEST_ERROR,
1704                        "%s: IRQ %d isn't active\n", __func__, irq);
1705          return;
1706      }
1707  
1708      icc_drop_prio(cs, grp);
1709  
1710      if (!icc_eoi_split(env, cs)) {
1711          /* Priority drop and deactivate not split: deactivate irq now */
1712          icc_deactivate_irq(cs, irq);
1713      }
1714  }
1715  
icc_hppir0_read(CPUARMState * env,const ARMCPRegInfo * ri)1716  static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
1717  {
1718      GICv3CPUState *cs = icc_cs_from_env(env);
1719      uint64_t value;
1720  
1721      if (icv_access(env, HCR_FMO)) {
1722          return icv_hppir_read(env, ri);
1723      }
1724  
1725      value = icc_hppir0_value(cs, env);
1726      trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
1727      return value;
1728  }
1729  
icc_hppir1_read(CPUARMState * env,const ARMCPRegInfo * ri)1730  static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1731  {
1732      GICv3CPUState *cs = icc_cs_from_env(env);
1733      uint64_t value;
1734  
1735      if (icv_access(env, HCR_IMO)) {
1736          return icv_hppir_read(env, ri);
1737      }
1738  
1739      value = icc_hppir1_value(cs, env);
1740      trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
1741      return value;
1742  }
1743  
icc_bpr_read(CPUARMState * env,const ARMCPRegInfo * ri)1744  static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1745  {
1746      GICv3CPUState *cs = icc_cs_from_env(env);
1747      int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1748      bool satinc = false;
1749      uint64_t bpr;
1750  
1751      if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1752          return icv_bpr_read(env, ri);
1753      }
1754  
1755      if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1756          grp = GICV3_G1NS;
1757      }
1758  
1759      if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1760          (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1761          /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1762           * modify BPR0
1763           */
1764          grp = GICV3_G0;
1765      }
1766  
1767      if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1768          (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1769          /* reads return bpr0 + 1 sat to 7, writes ignored */
1770          grp = GICV3_G0;
1771          satinc = true;
1772      }
1773  
1774      bpr = cs->icc_bpr[grp];
1775      if (satinc) {
1776          bpr++;
1777          bpr = MIN(bpr, 7);
1778      }
1779  
1780      trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
1781  
1782      return bpr;
1783  }
1784  
icc_bpr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1785  static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1786                            uint64_t value)
1787  {
1788      GICv3CPUState *cs = icc_cs_from_env(env);
1789      int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1790      uint64_t minval;
1791  
1792      if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1793          icv_bpr_write(env, ri, value);
1794          return;
1795      }
1796  
1797      trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
1798                                gicv3_redist_affid(cs), value);
1799  
1800      if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1801          grp = GICV3_G1NS;
1802      }
1803  
1804      if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1805          (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1806          /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1807           * modify BPR0
1808           */
1809          grp = GICV3_G0;
1810      }
1811  
1812      if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1813          (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1814          /* reads return bpr0 + 1 sat to 7, writes ignored */
1815          return;
1816      }
1817  
1818      minval = (grp == GICV3_G1NS) ? icc_min_bpr_ns(cs) : icc_min_bpr(cs);
1819      if (value < minval) {
1820          value = minval;
1821      }
1822  
1823      cs->icc_bpr[grp] = value & 7;
1824      gicv3_cpuif_update(cs);
1825  }
1826  
icc_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)1827  static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1828  {
1829      GICv3CPUState *cs = icc_cs_from_env(env);
1830      uint64_t value;
1831  
1832      int regno = ri->opc2 & 3;
1833      int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
1834  
1835      if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1836          return icv_ap_read(env, ri);
1837      }
1838  
1839      if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1840          grp = GICV3_G1NS;
1841      }
1842  
1843      value = cs->icc_apr[grp][regno];
1844  
1845      trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1846      return value;
1847  }
1848  
icc_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1849  static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1850                           uint64_t value)
1851  {
1852      GICv3CPUState *cs = icc_cs_from_env(env);
1853  
1854      int regno = ri->opc2 & 3;
1855      int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
1856  
1857      if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1858          icv_ap_write(env, ri, value);
1859          return;
1860      }
1861  
1862      trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1863  
1864      if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1865          grp = GICV3_G1NS;
1866      }
1867  
1868      /* It's not possible to claim that a Non-secure interrupt is active
1869       * at a priority outside the Non-secure range (128..255), since this
1870       * would otherwise allow malicious NS code to block delivery of S interrupts
1871       * by writing a bad value to these registers.
1872       */
1873      if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
1874          return;
1875      }
1876  
1877      if (cs->nmi_support) {
1878          cs->icc_apr[grp][regno] = value & (0xFFFFFFFFU | ICC_AP1R_EL1_NMI);
1879      } else {
1880          cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
1881      }
1882      gicv3_cpuif_update(cs);
1883  }
1884  
icc_dir_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1885  static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1886                            uint64_t value)
1887  {
1888      /* Deactivate interrupt */
1889      GICv3CPUState *cs = icc_cs_from_env(env);
1890      int irq = value & 0xffffff;
1891      bool irq_is_secure, single_sec_state, irq_is_grp0;
1892      bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
1893  
1894      if (icv_access(env, HCR_FMO | HCR_IMO)) {
1895          icv_dir_write(env, ri, value);
1896          return;
1897      }
1898  
1899      trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
1900  
1901      if (irq >= cs->gic->num_irq) {
1902          /* Also catches special interrupt numbers and LPIs */
1903          return;
1904      }
1905  
1906      if (!icc_eoi_split(env, cs)) {
1907          return;
1908      }
1909  
1910      int grp = gicv3_irq_group(cs->gic, cs, irq);
1911  
1912      single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
1913      irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
1914      irq_is_grp0 = grp == GICV3_G0;
1915  
1916      /* Check whether we're allowed to deactivate this interrupt based
1917       * on its group and the current CPU state.
1918       * These checks are laid out to correspond to the spec's pseudocode.
1919       */
1920      route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
1921      route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
1922      /* No need to include !IsSecure in route_*_to_el2 as it's only
1923       * tested in cases where we know !IsSecure is true.
1924       */
1925      uint64_t hcr_el2 = arm_hcr_el2_eff(env);
1926      route_fiq_to_el2 = hcr_el2 & HCR_FMO;
1927      route_irq_to_el2 = hcr_el2 & HCR_IMO;
1928  
1929      switch (arm_current_el(env)) {
1930      case 3:
1931          break;
1932      case 2:
1933          if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
1934              break;
1935          }
1936          if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
1937              break;
1938          }
1939          return;
1940      case 1:
1941          if (!arm_is_secure_below_el3(env)) {
1942              if (single_sec_state && irq_is_grp0 &&
1943                  !route_fiq_to_el3 && !route_fiq_to_el2) {
1944                  break;
1945              }
1946              if (!irq_is_secure && !irq_is_grp0 &&
1947                  !route_irq_to_el3 && !route_irq_to_el2) {
1948                  break;
1949              }
1950          } else {
1951              if (irq_is_grp0 && !route_fiq_to_el3) {
1952                  break;
1953              }
1954              if (!irq_is_grp0 &&
1955                  (!irq_is_secure || !single_sec_state) &&
1956                  !route_irq_to_el3) {
1957                  break;
1958              }
1959          }
1960          return;
1961      default:
1962          g_assert_not_reached();
1963      }
1964  
1965      icc_deactivate_irq(cs, irq);
1966  }
1967  
icc_rpr_read(CPUARMState * env,const ARMCPRegInfo * ri)1968  static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1969  {
1970      GICv3CPUState *cs = icc_cs_from_env(env);
1971      uint64_t prio;
1972  
1973      if (icv_access(env, HCR_FMO | HCR_IMO)) {
1974          return icv_rpr_read(env, ri);
1975      }
1976  
1977      prio = icc_highest_active_prio(cs);
1978  
1979      if (arm_feature(env, ARM_FEATURE_EL3) &&
1980          !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
1981          /* NS GIC access and Group 0 is inaccessible to NS */
1982          if ((prio & 0x80) == 0) {
1983              /* NS mustn't see priorities in the Secure half of the range */
1984              prio = 0;
1985          } else if (prio != 0xff) {
1986              /* Non-idle priority: show the Non-secure view of it */
1987              prio = (prio << 1) & 0xff;
1988          }
1989      }
1990  
1991      if (cs->nmi_support) {
1992          /* NMI info is reported in the high bits of RPR */
1993          if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
1994              if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
1995                  prio |= ICC_RPR_EL1_NMI;
1996              }
1997          } else {
1998              if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
1999                  prio |= ICC_RPR_EL1_NSNMI;
2000              }
2001              if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
2002                  prio |= ICC_RPR_EL1_NMI;
2003              }
2004          }
2005      }
2006  
2007      trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
2008      return prio;
2009  }
2010  
icc_generate_sgi(CPUARMState * env,GICv3CPUState * cs,uint64_t value,int grp,bool ns)2011  static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
2012                               uint64_t value, int grp, bool ns)
2013  {
2014      GICv3State *s = cs->gic;
2015  
2016      /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
2017      uint64_t aff = extract64(value, 48, 8) << 16 |
2018          extract64(value, 32, 8) << 8 |
2019          extract64(value, 16, 8);
2020      uint32_t targetlist = extract64(value, 0, 16);
2021      uint32_t irq = extract64(value, 24, 4);
2022      bool irm = extract64(value, 40, 1);
2023      int i;
2024  
2025      if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
2026          /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
2027           * interrupts as Group 0 interrupts and must send Secure Group 0
2028           * interrupts to the target CPUs.
2029           */
2030          grp = GICV3_G0;
2031      }
2032  
2033      trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
2034                                   aff, targetlist);
2035  
2036      for (i = 0; i < s->num_cpu; i++) {
2037          GICv3CPUState *ocs = &s->cpu[i];
2038  
2039          if (irm) {
2040              /* IRM == 1 : route to all CPUs except self */
2041              if (cs == ocs) {
2042                  continue;
2043              }
2044          } else {
2045              /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
2046               * where the corresponding bit is set in targetlist
2047               */
2048              int aff0;
2049  
2050              if (ocs->gicr_typer >> 40 != aff) {
2051                  continue;
2052              }
2053              aff0 = extract64(ocs->gicr_typer, 32, 8);
2054              if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
2055                  continue;
2056              }
2057          }
2058  
2059          /* The redistributor will check against its own GICR_NSACR as needed */
2060          gicv3_redist_send_sgi(ocs, grp, irq, ns);
2061      }
2062  }
2063  
icc_sgi0r_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2064  static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
2065                             uint64_t value)
2066  {
2067      /* Generate Secure Group 0 SGI. */
2068      GICv3CPUState *cs = icc_cs_from_env(env);
2069      bool ns = !arm_is_secure(env);
2070  
2071      icc_generate_sgi(env, cs, value, GICV3_G0, ns);
2072  }
2073  
icc_sgi1r_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2074  static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
2075                             uint64_t value)
2076  {
2077      /* Generate Group 1 SGI for the current Security state */
2078      GICv3CPUState *cs = icc_cs_from_env(env);
2079      int grp;
2080      bool ns = !arm_is_secure(env);
2081  
2082      grp = ns ? GICV3_G1NS : GICV3_G1;
2083      icc_generate_sgi(env, cs, value, grp, ns);
2084  }
2085  
icc_asgi1r_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2086  static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
2087                               uint64_t value)
2088  {
2089      /* Generate Group 1 SGI for the Security state that is not
2090       * the current state
2091       */
2092      GICv3CPUState *cs = icc_cs_from_env(env);
2093      int grp;
2094      bool ns = !arm_is_secure(env);
2095  
2096      grp = ns ? GICV3_G1 : GICV3_G1NS;
2097      icc_generate_sgi(env, cs, value, grp, ns);
2098  }
2099  
icc_igrpen_read(CPUARMState * env,const ARMCPRegInfo * ri)2100  static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
2101  {
2102      GICv3CPUState *cs = icc_cs_from_env(env);
2103      int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
2104      uint64_t value;
2105  
2106      if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
2107          return icv_igrpen_read(env, ri);
2108      }
2109  
2110      if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
2111          grp = GICV3_G1NS;
2112      }
2113  
2114      value = cs->icc_igrpen[grp];
2115      trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
2116                                  gicv3_redist_affid(cs), value);
2117      return value;
2118  }
2119  
icc_igrpen_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2120  static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
2121                               uint64_t value)
2122  {
2123      GICv3CPUState *cs = icc_cs_from_env(env);
2124      int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
2125  
2126      if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
2127          icv_igrpen_write(env, ri, value);
2128          return;
2129      }
2130  
2131      trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
2132                                   gicv3_redist_affid(cs), value);
2133  
2134      if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
2135          grp = GICV3_G1NS;
2136      }
2137  
2138      cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
2139      gicv3_cpuif_update(cs);
2140  }
2141  
icc_igrpen1_el3_read(CPUARMState * env,const ARMCPRegInfo * ri)2142  static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
2143  {
2144      GICv3CPUState *cs = icc_cs_from_env(env);
2145      uint64_t value;
2146  
2147      /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
2148      value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
2149      trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
2150      return value;
2151  }
2152  
icc_igrpen1_el3_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2153  static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2154                                    uint64_t value)
2155  {
2156      GICv3CPUState *cs = icc_cs_from_env(env);
2157  
2158      trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
2159  
2160      /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
2161      cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
2162      cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
2163      gicv3_cpuif_update(cs);
2164  }
2165  
icc_ctlr_el1_read(CPUARMState * env,const ARMCPRegInfo * ri)2166  static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
2167  {
2168      GICv3CPUState *cs = icc_cs_from_env(env);
2169      int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
2170      uint64_t value;
2171  
2172      if (icv_access(env, HCR_FMO | HCR_IMO)) {
2173          return icv_ctlr_read(env, ri);
2174      }
2175  
2176      value = cs->icc_ctlr_el1[bank];
2177      trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
2178      return value;
2179  }
2180  
icc_ctlr_el1_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2181  static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2182                                 uint64_t value)
2183  {
2184      GICv3CPUState *cs = icc_cs_from_env(env);
2185      int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
2186      uint64_t mask;
2187  
2188      if (icv_access(env, HCR_FMO | HCR_IMO)) {
2189          icv_ctlr_write(env, ri, value);
2190          return;
2191      }
2192  
2193      trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
2194  
2195      /* Only CBPR and EOIMODE can be RW;
2196       * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
2197       * the asseciated priority-based routing of them);
2198       * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
2199       */
2200      if (arm_feature(env, ARM_FEATURE_EL3) &&
2201          ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
2202          mask = ICC_CTLR_EL1_EOIMODE;
2203      } else {
2204          mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
2205      }
2206  
2207      cs->icc_ctlr_el1[bank] &= ~mask;
2208      cs->icc_ctlr_el1[bank] |= (value & mask);
2209      gicv3_cpuif_update(cs);
2210  }
2211  
2212  
icc_ctlr_el3_read(CPUARMState * env,const ARMCPRegInfo * ri)2213  static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
2214  {
2215      GICv3CPUState *cs = icc_cs_from_env(env);
2216      uint64_t value;
2217  
2218      value = cs->icc_ctlr_el3;
2219      if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
2220          value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
2221      }
2222      if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
2223          value |= ICC_CTLR_EL3_CBPR_EL1NS;
2224      }
2225      if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
2226          value |= ICC_CTLR_EL3_EOIMODE_EL1S;
2227      }
2228      if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
2229          value |= ICC_CTLR_EL3_CBPR_EL1S;
2230      }
2231  
2232      trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
2233      return value;
2234  }
2235  
icc_ctlr_el3_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2236  static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2237                                 uint64_t value)
2238  {
2239      GICv3CPUState *cs = icc_cs_from_env(env);
2240      uint64_t mask;
2241  
2242      trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
2243  
2244      /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
2245      cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
2246      if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
2247          cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
2248      }
2249      if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
2250          cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
2251      }
2252  
2253      cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
2254      if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
2255          cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
2256      }
2257      if (value & ICC_CTLR_EL3_CBPR_EL1S) {
2258          cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
2259      }
2260  
2261      /* The only bit stored in icc_ctlr_el3 which is writable is EOIMODE_EL3: */
2262      mask = ICC_CTLR_EL3_EOIMODE_EL3;
2263  
2264      cs->icc_ctlr_el3 &= ~mask;
2265      cs->icc_ctlr_el3 |= (value & mask);
2266      gicv3_cpuif_update(cs);
2267  }
2268  
gicv3_irqfiq_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2269  static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
2270                                            const ARMCPRegInfo *ri, bool isread)
2271  {
2272      CPAccessResult r = CP_ACCESS_OK;
2273      GICv3CPUState *cs = icc_cs_from_env(env);
2274      int el = arm_current_el(env);
2275  
2276      if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
2277          el == 1 && !arm_is_secure_below_el3(env)) {
2278          /* Takes priority over a possible EL3 trap */
2279          return CP_ACCESS_TRAP_EL2;
2280      }
2281  
2282      if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
2283          switch (el) {
2284          case 1:
2285              /* Note that arm_hcr_el2_eff takes secure state into account.  */
2286              if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) {
2287                  r = CP_ACCESS_TRAP_EL3;
2288              }
2289              break;
2290          case 2:
2291              r = CP_ACCESS_TRAP_EL3;
2292              break;
2293          case 3:
2294              if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
2295                  r = CP_ACCESS_TRAP_EL3;
2296              }
2297              break;
2298          default:
2299              g_assert_not_reached();
2300          }
2301      }
2302  
2303      return r;
2304  }
2305  
gicv3_dir_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2306  static CPAccessResult gicv3_dir_access(CPUARMState *env,
2307                                         const ARMCPRegInfo *ri, bool isread)
2308  {
2309      GICv3CPUState *cs = icc_cs_from_env(env);
2310  
2311      if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
2312          arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
2313          /* Takes priority over a possible EL3 trap */
2314          return CP_ACCESS_TRAP_EL2;
2315      }
2316  
2317      return gicv3_irqfiq_access(env, ri, isread);
2318  }
2319  
gicv3_sgi_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2320  static CPAccessResult gicv3_sgi_access(CPUARMState *env,
2321                                         const ARMCPRegInfo *ri, bool isread)
2322  {
2323      if (arm_current_el(env) == 1 &&
2324          (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) {
2325          /* Takes priority over a possible EL3 trap */
2326          return CP_ACCESS_TRAP_EL2;
2327      }
2328  
2329      return gicv3_irqfiq_access(env, ri, isread);
2330  }
2331  
gicv3_fiq_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2332  static CPAccessResult gicv3_fiq_access(CPUARMState *env,
2333                                         const ARMCPRegInfo *ri, bool isread)
2334  {
2335      CPAccessResult r = CP_ACCESS_OK;
2336      GICv3CPUState *cs = icc_cs_from_env(env);
2337      int el = arm_current_el(env);
2338  
2339      if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
2340          el == 1 && !arm_is_secure_below_el3(env)) {
2341          /* Takes priority over a possible EL3 trap */
2342          return CP_ACCESS_TRAP_EL2;
2343      }
2344  
2345      if (env->cp15.scr_el3 & SCR_FIQ) {
2346          switch (el) {
2347          case 1:
2348              if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) {
2349                  r = CP_ACCESS_TRAP_EL3;
2350              }
2351              break;
2352          case 2:
2353              r = CP_ACCESS_TRAP_EL3;
2354              break;
2355          case 3:
2356              if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
2357                  r = CP_ACCESS_TRAP_EL3;
2358              }
2359              break;
2360          default:
2361              g_assert_not_reached();
2362          }
2363      }
2364  
2365      return r;
2366  }
2367  
gicv3_irq_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2368  static CPAccessResult gicv3_irq_access(CPUARMState *env,
2369                                         const ARMCPRegInfo *ri, bool isread)
2370  {
2371      CPAccessResult r = CP_ACCESS_OK;
2372      GICv3CPUState *cs = icc_cs_from_env(env);
2373      int el = arm_current_el(env);
2374  
2375      if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
2376          el == 1 && !arm_is_secure_below_el3(env)) {
2377          /* Takes priority over a possible EL3 trap */
2378          return CP_ACCESS_TRAP_EL2;
2379      }
2380  
2381      if (env->cp15.scr_el3 & SCR_IRQ) {
2382          switch (el) {
2383          case 1:
2384              if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) {
2385                  r = CP_ACCESS_TRAP_EL3;
2386              }
2387              break;
2388          case 2:
2389              r = CP_ACCESS_TRAP_EL3;
2390              break;
2391          case 3:
2392              if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
2393                  r = CP_ACCESS_TRAP_EL3;
2394              }
2395              break;
2396          default:
2397              g_assert_not_reached();
2398          }
2399      }
2400  
2401      return r;
2402  }
2403  
icc_reset(CPUARMState * env,const ARMCPRegInfo * ri)2404  static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2405  {
2406      GICv3CPUState *cs = icc_cs_from_env(env);
2407  
2408      cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
2409          (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
2410          ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
2411      cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
2412          (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
2413          ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
2414      cs->icc_pmr_el1 = 0;
2415      cs->icc_bpr[GICV3_G0] = icc_min_bpr(cs);
2416      cs->icc_bpr[GICV3_G1] = icc_min_bpr(cs);
2417      cs->icc_bpr[GICV3_G1NS] = icc_min_bpr_ns(cs);
2418      memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
2419      memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
2420      cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
2421          (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
2422          ((cs->pribits - 1) << ICC_CTLR_EL3_PRIBITS_SHIFT);
2423  
2424      memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
2425      cs->ich_hcr_el2 = 0;
2426      memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
2427      cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
2428          ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) |
2429          (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
2430  }
2431  
2432  static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
2433      { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
2434        .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
2435        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2436        .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
2437        .readfn = icc_pmr_read,
2438        .writefn = icc_pmr_write,
2439        /* We hang the whole cpu interface reset routine off here
2440         * rather than parcelling it out into one little function
2441         * per register
2442         */
2443        .resetfn = icc_reset,
2444      },
2445      { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
2446        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
2447        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2448        .access = PL1_R, .accessfn = gicv3_fiq_access,
2449        .readfn = icc_iar0_read,
2450      },
2451      { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
2452        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
2453        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2454        .access = PL1_W, .accessfn = gicv3_fiq_access,
2455        .writefn = icc_eoir_write,
2456      },
2457      { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
2458        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
2459        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2460        .access = PL1_R, .accessfn = gicv3_fiq_access,
2461        .readfn = icc_hppir0_read,
2462      },
2463      { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
2464        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
2465        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2466        .access = PL1_RW, .accessfn = gicv3_fiq_access,
2467        .readfn = icc_bpr_read,
2468        .writefn = icc_bpr_write,
2469      },
2470      { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
2471        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
2472        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2473        .access = PL1_RW, .accessfn = gicv3_fiq_access,
2474        .readfn = icc_ap_read,
2475        .writefn = icc_ap_write,
2476      },
2477      /* All the ICC_AP1R*_EL1 registers are banked */
2478      { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
2479        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
2480        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2481        .access = PL1_RW, .accessfn = gicv3_irq_access,
2482        .readfn = icc_ap_read,
2483        .writefn = icc_ap_write,
2484      },
2485      { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
2486        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
2487        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2488        .access = PL1_W, .accessfn = gicv3_dir_access,
2489        .writefn = icc_dir_write,
2490      },
2491      { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
2492        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
2493        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2494        .access = PL1_R, .accessfn = gicv3_irqfiq_access,
2495        .readfn = icc_rpr_read,
2496      },
2497      { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
2498        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
2499        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2500        .access = PL1_W, .accessfn = gicv3_sgi_access,
2501        .writefn = icc_sgi1r_write,
2502      },
2503      { .name = "ICC_SGI1R",
2504        .cp = 15, .opc1 = 0, .crm = 12,
2505        .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2506        .access = PL1_W, .accessfn = gicv3_sgi_access,
2507        .writefn = icc_sgi1r_write,
2508      },
2509      { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
2510        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
2511        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2512        .access = PL1_W, .accessfn = gicv3_sgi_access,
2513        .writefn = icc_asgi1r_write,
2514      },
2515      { .name = "ICC_ASGI1R",
2516        .cp = 15, .opc1 = 1, .crm = 12,
2517        .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2518        .access = PL1_W, .accessfn = gicv3_sgi_access,
2519        .writefn = icc_asgi1r_write,
2520      },
2521      { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
2522        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
2523        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2524        .access = PL1_W, .accessfn = gicv3_sgi_access,
2525        .writefn = icc_sgi0r_write,
2526      },
2527      { .name = "ICC_SGI0R",
2528        .cp = 15, .opc1 = 2, .crm = 12,
2529        .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2530        .access = PL1_W, .accessfn = gicv3_sgi_access,
2531        .writefn = icc_sgi0r_write,
2532      },
2533      { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
2534        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
2535        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2536        .access = PL1_R, .accessfn = gicv3_irq_access,
2537        .readfn = icc_iar1_read,
2538      },
2539      { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
2540        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
2541        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2542        .access = PL1_W, .accessfn = gicv3_irq_access,
2543        .writefn = icc_eoir_write,
2544      },
2545      { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
2546        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
2547        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2548        .access = PL1_R, .accessfn = gicv3_irq_access,
2549        .readfn = icc_hppir1_read,
2550      },
2551      /* This register is banked */
2552      { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
2553        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
2554        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2555        .access = PL1_RW, .accessfn = gicv3_irq_access,
2556        .readfn = icc_bpr_read,
2557        .writefn = icc_bpr_write,
2558      },
2559      /* This register is banked */
2560      { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
2561        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
2562        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2563        .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
2564        .readfn = icc_ctlr_el1_read,
2565        .writefn = icc_ctlr_el1_write,
2566      },
2567      { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
2568        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
2569        .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2570        .access = PL1_RW,
2571        /* We don't support IRQ/FIQ bypass and system registers are
2572         * always enabled, so all our bits are RAZ/WI or RAO/WI.
2573         * This register is banked but since it's constant we don't
2574         * need to do anything special.
2575         */
2576        .resetvalue = 0x7,
2577      },
2578      { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
2579        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
2580        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2581        .access = PL1_RW, .accessfn = gicv3_fiq_access,
2582        .fgt = FGT_ICC_IGRPENN_EL1,
2583        .readfn = icc_igrpen_read,
2584        .writefn = icc_igrpen_write,
2585      },
2586      /* This register is banked */
2587      { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
2588        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
2589        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2590        .access = PL1_RW, .accessfn = gicv3_irq_access,
2591        .fgt = FGT_ICC_IGRPENN_EL1,
2592        .readfn = icc_igrpen_read,
2593        .writefn = icc_igrpen_write,
2594      },
2595      { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
2596        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
2597        .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2598        .access = PL2_RW,
2599        /* We don't support IRQ/FIQ bypass and system registers are
2600         * always enabled, so all our bits are RAZ/WI or RAO/WI.
2601         */
2602        .resetvalue = 0xf,
2603      },
2604      { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
2605        .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
2606        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2607        .access = PL3_RW,
2608        .readfn = icc_ctlr_el3_read,
2609        .writefn = icc_ctlr_el3_write,
2610      },
2611      { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
2612        .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
2613        .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2614        .access = PL3_RW,
2615        /* We don't support IRQ/FIQ bypass and system registers are
2616         * always enabled, so all our bits are RAZ/WI or RAO/WI.
2617         */
2618        .resetvalue = 0xf,
2619      },
2620      { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
2621        .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
2622        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2623        .access = PL3_RW,
2624        .readfn = icc_igrpen1_el3_read,
2625        .writefn = icc_igrpen1_el3_write,
2626      },
2627  };
2628  
2629  static const ARMCPRegInfo gicv3_cpuif_icc_apxr1_reginfo[] = {
2630      { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
2631        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
2632        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2633        .access = PL1_RW, .accessfn = gicv3_fiq_access,
2634        .readfn = icc_ap_read,
2635        .writefn = icc_ap_write,
2636      },
2637      { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
2638        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
2639        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2640        .access = PL1_RW, .accessfn = gicv3_irq_access,
2641        .readfn = icc_ap_read,
2642        .writefn = icc_ap_write,
2643      },
2644  };
2645  
2646  static const ARMCPRegInfo gicv3_cpuif_icc_apxr23_reginfo[] = {
2647      { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
2648        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
2649        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2650        .access = PL1_RW, .accessfn = gicv3_fiq_access,
2651        .readfn = icc_ap_read,
2652        .writefn = icc_ap_write,
2653      },
2654      { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
2655        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
2656        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2657        .access = PL1_RW, .accessfn = gicv3_fiq_access,
2658        .readfn = icc_ap_read,
2659        .writefn = icc_ap_write,
2660      },
2661      { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
2662        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
2663        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2664        .access = PL1_RW, .accessfn = gicv3_irq_access,
2665        .readfn = icc_ap_read,
2666        .writefn = icc_ap_write,
2667      },
2668      { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
2669        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
2670        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2671        .access = PL1_RW, .accessfn = gicv3_irq_access,
2672        .readfn = icc_ap_read,
2673        .writefn = icc_ap_write,
2674      },
2675  };
2676  
2677  static const ARMCPRegInfo gicv3_cpuif_gicv3_nmi_reginfo[] = {
2678      { .name = "ICC_NMIAR1_EL1", .state = ARM_CP_STATE_BOTH,
2679        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 5,
2680        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2681        .access = PL1_R, .accessfn = gicv3_irq_access,
2682        .readfn = icc_nmiar1_read,
2683      },
2684  };
2685  
ich_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)2686  static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2687  {
2688      GICv3CPUState *cs = icc_cs_from_env(env);
2689      int regno = ri->opc2 & 3;
2690      int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
2691      uint64_t value;
2692  
2693      value = cs->ich_apr[grp][regno];
2694      trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2695      return value;
2696  }
2697  
ich_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2698  static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2699                           uint64_t value)
2700  {
2701      GICv3CPUState *cs = icc_cs_from_env(env);
2702      int regno = ri->opc2 & 3;
2703      int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
2704  
2705      trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2706  
2707      if (cs->nmi_support) {
2708          cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI);
2709      } else {
2710          cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
2711      }
2712      gicv3_cpuif_virt_irq_fiq_update(cs);
2713  }
2714  
ich_hcr_read(CPUARMState * env,const ARMCPRegInfo * ri)2715  static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2716  {
2717      GICv3CPUState *cs = icc_cs_from_env(env);
2718      uint64_t value = cs->ich_hcr_el2;
2719  
2720      trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
2721      return value;
2722  }
2723  
ich_hcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2724  static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2725                            uint64_t value)
2726  {
2727      GICv3CPUState *cs = icc_cs_from_env(env);
2728  
2729      trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
2730  
2731      value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
2732          ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
2733          ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
2734          ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
2735          ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
2736  
2737      cs->ich_hcr_el2 = value;
2738      gicv3_cpuif_virt_update(cs);
2739  }
2740  
ich_vmcr_read(CPUARMState * env,const ARMCPRegInfo * ri)2741  static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2742  {
2743      GICv3CPUState *cs = icc_cs_from_env(env);
2744      uint64_t value = cs->ich_vmcr_el2;
2745  
2746      trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
2747      return value;
2748  }
2749  
ich_vmcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2750  static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2751                           uint64_t value)
2752  {
2753      GICv3CPUState *cs = icc_cs_from_env(env);
2754  
2755      trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
2756  
2757      value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
2758          ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
2759          ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
2760      value |= ICH_VMCR_EL2_VFIQEN;
2761  
2762      cs->ich_vmcr_el2 = value;
2763      /* Enforce "writing BPRs to less than minimum sets them to the minimum"
2764       * by reading and writing back the fields.
2765       */
2766      write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0));
2767      write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
2768  
2769      gicv3_cpuif_virt_update(cs);
2770  }
2771  
ich_lr_read(CPUARMState * env,const ARMCPRegInfo * ri)2772  static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2773  {
2774      GICv3CPUState *cs = icc_cs_from_env(env);
2775      int regno = ri->opc2 | ((ri->crm & 1) << 3);
2776      uint64_t value;
2777  
2778      /* This read function handles all of:
2779       * 64-bit reads of the whole LR
2780       * 32-bit reads of the low half of the LR
2781       * 32-bit reads of the high half of the LR
2782       */
2783      if (ri->state == ARM_CP_STATE_AA32) {
2784          if (ri->crm >= 14) {
2785              value = extract64(cs->ich_lr_el2[regno], 32, 32);
2786              trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
2787          } else {
2788              value = extract64(cs->ich_lr_el2[regno], 0, 32);
2789              trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
2790          }
2791      } else {
2792          value = cs->ich_lr_el2[regno];
2793          trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
2794      }
2795  
2796      return value;
2797  }
2798  
ich_lr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2799  static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2800                           uint64_t value)
2801  {
2802      GICv3CPUState *cs = icc_cs_from_env(env);
2803      int regno = ri->opc2 | ((ri->crm & 1) << 3);
2804  
2805      /* This write function handles all of:
2806       * 64-bit writes to the whole LR
2807       * 32-bit writes to the low half of the LR
2808       * 32-bit writes to the high half of the LR
2809       */
2810      if (ri->state == ARM_CP_STATE_AA32) {
2811          if (ri->crm >= 14) {
2812              trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
2813              value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
2814          } else {
2815              trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
2816              value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
2817          }
2818      } else {
2819          trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
2820      }
2821  
2822      /* Enforce RES0 bits in priority field */
2823      if (cs->vpribits < 8) {
2824          value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
2825                            8 - cs->vpribits, 0);
2826      }
2827  
2828      /* Enforce RES0 bit in NMI field when FEAT_GICv3_NMI is not implemented */
2829      if (!cs->nmi_support) {
2830          value &= ~ICH_LR_EL2_NMI;
2831      }
2832  
2833      cs->ich_lr_el2[regno] = value;
2834      gicv3_cpuif_virt_update(cs);
2835  }
2836  
ich_vtr_read(CPUARMState * env,const ARMCPRegInfo * ri)2837  static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2838  {
2839      GICv3CPUState *cs = icc_cs_from_env(env);
2840      uint64_t value;
2841  
2842      value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
2843          | ICH_VTR_EL2_TDS | ICH_VTR_EL2_A3V
2844          | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
2845          | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
2846          | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
2847  
2848      if (cs->gic->revision < 4) {
2849          value |= ICH_VTR_EL2_NV4;
2850      }
2851  
2852      trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
2853      return value;
2854  }
2855  
ich_misr_read(CPUARMState * env,const ARMCPRegInfo * ri)2856  static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2857  {
2858      GICv3CPUState *cs = icc_cs_from_env(env);
2859      uint64_t value = maintenance_interrupt_state(cs);
2860  
2861      trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
2862      return value;
2863  }
2864  
ich_eisr_read(CPUARMState * env,const ARMCPRegInfo * ri)2865  static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2866  {
2867      GICv3CPUState *cs = icc_cs_from_env(env);
2868      uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
2869  
2870      trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
2871      return value;
2872  }
2873  
ich_elrsr_read(CPUARMState * env,const ARMCPRegInfo * ri)2874  static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2875  {
2876      GICv3CPUState *cs = icc_cs_from_env(env);
2877      uint64_t value = 0;
2878      int i;
2879  
2880      for (i = 0; i < cs->num_list_regs; i++) {
2881          uint64_t lr = cs->ich_lr_el2[i];
2882  
2883          if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
2884              ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) {
2885              value |= (1 << i);
2886          }
2887      }
2888  
2889      trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
2890      return value;
2891  }
2892  
2893  static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
2894      { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
2895        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
2896        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2897        .nv2_redirect_offset = 0x480,
2898        .access = PL2_RW,
2899        .readfn = ich_ap_read,
2900        .writefn = ich_ap_write,
2901      },
2902      { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
2903        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
2904        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2905        .nv2_redirect_offset = 0x4a0,
2906        .access = PL2_RW,
2907        .readfn = ich_ap_read,
2908        .writefn = ich_ap_write,
2909      },
2910      { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
2911        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
2912        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2913        .nv2_redirect_offset = 0x4c0,
2914        .access = PL2_RW,
2915        .readfn = ich_hcr_read,
2916        .writefn = ich_hcr_write,
2917      },
2918      { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
2919        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
2920        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2921        .access = PL2_R,
2922        .readfn = ich_vtr_read,
2923      },
2924      { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
2925        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
2926        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2927        .access = PL2_R,
2928        .readfn = ich_misr_read,
2929      },
2930      { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
2931        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
2932        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2933        .access = PL2_R,
2934        .readfn = ich_eisr_read,
2935      },
2936      { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
2937        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
2938        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2939        .access = PL2_R,
2940        .readfn = ich_elrsr_read,
2941      },
2942      { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
2943        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
2944        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2945        .nv2_redirect_offset = 0x4c8,
2946        .access = PL2_RW,
2947        .readfn = ich_vmcr_read,
2948        .writefn = ich_vmcr_write,
2949      },
2950  };
2951  
2952  static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
2953      { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
2954        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
2955        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2956        .nv2_redirect_offset = 0x488,
2957        .access = PL2_RW,
2958        .readfn = ich_ap_read,
2959        .writefn = ich_ap_write,
2960      },
2961      { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
2962        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
2963        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2964        .nv2_redirect_offset = 0x4a8,
2965        .access = PL2_RW,
2966        .readfn = ich_ap_read,
2967        .writefn = ich_ap_write,
2968      },
2969  };
2970  
2971  static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
2972      { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
2973        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
2974        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2975        .nv2_redirect_offset = 0x490,
2976        .access = PL2_RW,
2977        .readfn = ich_ap_read,
2978        .writefn = ich_ap_write,
2979      },
2980      { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
2981        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
2982        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2983        .nv2_redirect_offset = 0x498,
2984        .access = PL2_RW,
2985        .readfn = ich_ap_read,
2986        .writefn = ich_ap_write,
2987      },
2988      { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
2989        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
2990        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2991        .nv2_redirect_offset = 0x4b0,
2992        .access = PL2_RW,
2993        .readfn = ich_ap_read,
2994        .writefn = ich_ap_write,
2995      },
2996      { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
2997        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
2998        .type = ARM_CP_IO | ARM_CP_NO_RAW,
2999        .nv2_redirect_offset = 0x4b8,
3000        .access = PL2_RW,
3001        .readfn = ich_ap_read,
3002        .writefn = ich_ap_write,
3003      },
3004  };
3005  
gicv3_cpuif_el_change_hook(ARMCPU * cpu,void * opaque)3006  static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
3007  {
3008      GICv3CPUState *cs = opaque;
3009  
3010      gicv3_cpuif_update(cs);
3011      /*
3012       * Because vLPIs are only pending in NonSecure state,
3013       * an EL change can change the VIRQ/VFIQ status (but
3014       * cannot affect the maintenance interrupt state)
3015       */
3016      gicv3_cpuif_virt_irq_fiq_update(cs);
3017  }
3018  
gicv3_init_cpuif(GICv3State * s)3019  void gicv3_init_cpuif(GICv3State *s)
3020  {
3021      /* Called from the GICv3 realize function; register our system
3022       * registers with the CPU
3023       */
3024      int i;
3025  
3026      for (i = 0; i < s->num_cpu; i++) {
3027          ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
3028          GICv3CPUState *cs = &s->cpu[i];
3029  
3030          /*
3031           * If the CPU doesn't define a GICv3 configuration, probably because
3032           * in real hardware it doesn't have one, then we use default values
3033           * matching the one used by most Arm CPUs. This applies to:
3034           *  cpu->gic_num_lrs
3035           *  cpu->gic_vpribits
3036           *  cpu->gic_vprebits
3037           *  cpu->gic_pribits
3038           */
3039  
3040          /* Note that we can't just use the GICv3CPUState as an opaque pointer
3041           * in define_arm_cp_regs_with_opaque(), because when we're called back
3042           * it might be with code translated by CPU 0 but run by CPU 1, in
3043           * which case we'd get the wrong value.
3044           * So instead we define the regs with no ri->opaque info, and
3045           * get back to the GICv3CPUState from the CPUARMState.
3046           *
3047           * These CP regs callbacks can be called from either TCG or HVF code.
3048           */
3049          define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
3050  
3051          /*
3052           * If the CPU implements FEAT_NMI and FEAT_GICv3 it must also
3053           * implement FEAT_GICv3_NMI, which is the CPU interface part
3054           * of NMI support. This is distinct from whether the GIC proper
3055           * (redistributors and distributor) have NMI support. In QEMU
3056           * that is a property of the GIC device in s->nmi_support;
3057           * cs->nmi_support indicates the CPU interface's support.
3058           */
3059          if (cpu_isar_feature(aa64_nmi, cpu)) {
3060              cs->nmi_support = true;
3061              define_arm_cp_regs(cpu, gicv3_cpuif_gicv3_nmi_reginfo);
3062          }
3063  
3064          /*
3065           * The CPU implementation specifies the number of supported
3066           * bits of physical priority. For backwards compatibility
3067           * of migration, we have a compat property that forces use
3068           * of 8 priority bits regardless of what the CPU really has.
3069           */
3070          if (s->force_8bit_prio) {
3071              cs->pribits = 8;
3072          } else {
3073              cs->pribits = cpu->gic_pribits ?: 5;
3074          }
3075  
3076          /*
3077           * The GICv3 has separate ID register fields for virtual priority
3078           * and preemption bit values, but only a single ID register field
3079           * for the physical priority bits. The preemption bit count is
3080           * always the same as the priority bit count, except that 8 bits
3081           * of priority means 7 preemption bits. We precalculate the
3082           * preemption bits because it simplifies the code and makes the
3083           * parallels between the virtual and physical bits of the GIC
3084           * a bit clearer.
3085           */
3086          cs->prebits = cs->pribits;
3087          if (cs->prebits == 8) {
3088              cs->prebits--;
3089          }
3090          /*
3091           * Check that CPU code defining pribits didn't violate
3092           * architectural constraints our implementation relies on.
3093           */
3094          g_assert(cs->pribits >= 4 && cs->pribits <= 8);
3095  
3096          /*
3097           * gicv3_cpuif_reginfo[] defines ICC_AP*R0_EL1; add definitions
3098           * for ICC_AP*R{1,2,3}_EL1 if the prebits value requires them.
3099           */
3100          if (cs->prebits >= 6) {
3101              define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr1_reginfo);
3102          }
3103          if (cs->prebits == 7) {
3104              define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr23_reginfo);
3105          }
3106  
3107          if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
3108              int j;
3109  
3110              cs->num_list_regs = cpu->gic_num_lrs ?: 4;
3111              cs->vpribits = cpu->gic_vpribits ?: 5;
3112              cs->vprebits = cpu->gic_vprebits ?: 5;
3113  
3114              /* Check against architectural constraints: getting these
3115               * wrong would be a bug in the CPU code defining these,
3116               * and the implementation relies on them holding.
3117               */
3118              g_assert(cs->vprebits <= cs->vpribits);
3119              g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
3120              g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
3121  
3122              define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
3123  
3124              for (j = 0; j < cs->num_list_regs; j++) {
3125                  /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
3126                   * are split into two cp15 regs, LR (the low part, with the
3127                   * same encoding as the AArch64 LR) and LRC (the high part).
3128                   */
3129                  ARMCPRegInfo lr_regset[] = {
3130                      { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
3131                        .opc0 = 3, .opc1 = 4, .crn = 12,
3132                        .crm = 12 + (j >> 3), .opc2 = j & 7,
3133                        .type = ARM_CP_IO | ARM_CP_NO_RAW,
3134                        .nv2_redirect_offset = 0x400 + 8 * j,
3135                        .access = PL2_RW,
3136                        .readfn = ich_lr_read,
3137                        .writefn = ich_lr_write,
3138                      },
3139                      { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
3140                        .cp = 15, .opc1 = 4, .crn = 12,
3141                        .crm = 14 + (j >> 3), .opc2 = j & 7,
3142                        .type = ARM_CP_IO | ARM_CP_NO_RAW,
3143                        .access = PL2_RW,
3144                        .readfn = ich_lr_read,
3145                        .writefn = ich_lr_write,
3146                      },
3147                  };
3148                  define_arm_cp_regs(cpu, lr_regset);
3149              }
3150              if (cs->vprebits >= 6) {
3151                  define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
3152              }
3153              if (cs->vprebits == 7) {
3154                  define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
3155              }
3156          }
3157          if (tcg_enabled() || qtest_enabled()) {
3158              /*
3159               * We can only trap EL changes with TCG. However the GIC interrupt
3160               * state only changes on EL changes involving EL2 or EL3, so for
3161               * the non-TCG case this is OK, as EL2 and EL3 can't exist.
3162               */
3163              arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
3164          } else {
3165              assert(!arm_feature(&cpu->env, ARM_FEATURE_EL2));
3166              assert(!arm_feature(&cpu->env, ARM_FEATURE_EL3));
3167          }
3168      }
3169  }
3170