1cec93a93SShlomo Pongratz /*
2cec93a93SShlomo Pongratz * ARM GICv3 emulation: Redistributor
3cec93a93SShlomo Pongratz *
4cec93a93SShlomo Pongratz * Copyright (c) 2015 Huawei.
5cec93a93SShlomo Pongratz * Copyright (c) 2016 Linaro Limited.
6cec93a93SShlomo Pongratz * Written by Shlomo Pongratz, Peter Maydell
7cec93a93SShlomo Pongratz *
8cec93a93SShlomo Pongratz * This code is licensed under the GPL, version 2 or (at your option)
9cec93a93SShlomo Pongratz * any later version.
10cec93a93SShlomo Pongratz */
11cec93a93SShlomo Pongratz
12cec93a93SShlomo Pongratz #include "qemu/osdep.h"
13b1e3493bSPeter Maydell #include "qemu/log.h"
14cec93a93SShlomo Pongratz #include "trace.h"
15cec93a93SShlomo Pongratz #include "gicv3_internal.h"
16cec93a93SShlomo Pongratz
mask_group(GICv3CPUState * cs,MemTxAttrs attrs)17cec93a93SShlomo Pongratz static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs)
18cec93a93SShlomo Pongratz {
19cec93a93SShlomo Pongratz /* Return a 32-bit mask which should be applied for this set of 32
20cec93a93SShlomo Pongratz * interrupts; each bit is 1 if access is permitted by the
21cec93a93SShlomo Pongratz * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
22cec93a93SShlomo Pongratz * not affect config register accesses, unlike GICD_NSACR.)
23cec93a93SShlomo Pongratz */
24cec93a93SShlomo Pongratz if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
25cec93a93SShlomo Pongratz /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
26cec93a93SShlomo Pongratz return cs->gicr_igroupr0;
27cec93a93SShlomo Pongratz }
28cec93a93SShlomo Pongratz return 0xFFFFFFFFU;
29cec93a93SShlomo Pongratz }
30cec93a93SShlomo Pongratz
gicr_ns_access(GICv3CPUState * cs,int irq)31b1a0eb77SPeter Maydell static int gicr_ns_access(GICv3CPUState *cs, int irq)
32b1a0eb77SPeter Maydell {
33b1a0eb77SPeter Maydell /* Return the 2 bit NSACR.NS_access field for this SGI */
34b1a0eb77SPeter Maydell assert(irq < 16);
35b1a0eb77SPeter Maydell return extract32(cs->gicr_nsacr, irq * 2, 2);
36b1a0eb77SPeter Maydell }
37b1a0eb77SPeter Maydell
gicr_write_bitmap_reg(GICv3CPUState * cs,MemTxAttrs attrs,uint32_t * reg,uint32_t val)387c79d98dSJinjie Ruan static void gicr_write_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
397c79d98dSJinjie Ruan uint32_t *reg, uint32_t val)
407c79d98dSJinjie Ruan {
417c79d98dSJinjie Ruan /* Helper routine to implement writing to a "set" register */
427c79d98dSJinjie Ruan val &= mask_group(cs, attrs);
437c79d98dSJinjie Ruan *reg = val;
447c79d98dSJinjie Ruan gicv3_redist_update(cs);
457c79d98dSJinjie Ruan }
467c79d98dSJinjie Ruan
gicr_write_set_bitmap_reg(GICv3CPUState * cs,MemTxAttrs attrs,uint32_t * reg,uint32_t val)47cec93a93SShlomo Pongratz static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
48cec93a93SShlomo Pongratz uint32_t *reg, uint32_t val)
49cec93a93SShlomo Pongratz {
50cec93a93SShlomo Pongratz /* Helper routine to implement writing to a "set-bitmap" register */
51cec93a93SShlomo Pongratz val &= mask_group(cs, attrs);
52cec93a93SShlomo Pongratz *reg |= val;
53cec93a93SShlomo Pongratz gicv3_redist_update(cs);
54cec93a93SShlomo Pongratz }
55cec93a93SShlomo Pongratz
gicr_write_clear_bitmap_reg(GICv3CPUState * cs,MemTxAttrs attrs,uint32_t * reg,uint32_t val)56cec93a93SShlomo Pongratz static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
57cec93a93SShlomo Pongratz uint32_t *reg, uint32_t val)
58cec93a93SShlomo Pongratz {
59cec93a93SShlomo Pongratz /* Helper routine to implement writing to a "clear-bitmap" register */
60cec93a93SShlomo Pongratz val &= mask_group(cs, attrs);
61cec93a93SShlomo Pongratz *reg &= ~val;
62cec93a93SShlomo Pongratz gicv3_redist_update(cs);
63cec93a93SShlomo Pongratz }
64cec93a93SShlomo Pongratz
gicr_read_bitmap_reg(GICv3CPUState * cs,MemTxAttrs attrs,uint32_t reg)65cec93a93SShlomo Pongratz static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
66cec93a93SShlomo Pongratz uint32_t reg)
67cec93a93SShlomo Pongratz {
68cec93a93SShlomo Pongratz reg &= mask_group(cs, attrs);
69cec93a93SShlomo Pongratz return reg;
70cec93a93SShlomo Pongratz }
71cec93a93SShlomo Pongratz
vcpu_resident(GICv3CPUState * cs,uint64_t vptaddr)72d7d39749SPeter Maydell static bool vcpu_resident(GICv3CPUState *cs, uint64_t vptaddr)
73d7d39749SPeter Maydell {
74d7d39749SPeter Maydell /*
75d7d39749SPeter Maydell * Return true if a vCPU is resident, which is defined by
76d7d39749SPeter Maydell * whether the GICR_VPENDBASER register is marked VALID and
77d7d39749SPeter Maydell * has the right virtual pending table address.
78d7d39749SPeter Maydell */
79d7d39749SPeter Maydell if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) {
80d7d39749SPeter Maydell return false;
81d7d39749SPeter Maydell }
82d7d39749SPeter Maydell return vptaddr == (cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK);
83d7d39749SPeter Maydell }
84d7d39749SPeter Maydell
85e97be73cSPeter Maydell /**
86e97be73cSPeter Maydell * update_for_one_lpi: Update pending information if this LPI is better
87e97be73cSPeter Maydell *
88e97be73cSPeter Maydell * @cs: GICv3CPUState
89e97be73cSPeter Maydell * @irq: interrupt to look up in the LPI Configuration table
90e97be73cSPeter Maydell * @ctbase: physical address of the LPI Configuration table to use
91e97be73cSPeter Maydell * @ds: true if priority value should not be shifted
92e97be73cSPeter Maydell * @hpp: points to pending information to update
93e97be73cSPeter Maydell *
94e97be73cSPeter Maydell * Look up @irq in the Configuration table specified by @ctbase
95e97be73cSPeter Maydell * to see if it is enabled and what its priority is. If it is an
96e97be73cSPeter Maydell * enabled interrupt with a higher priority than that currently
97e97be73cSPeter Maydell * recorded in @hpp, update @hpp.
98e97be73cSPeter Maydell */
update_for_one_lpi(GICv3CPUState * cs,int irq,uint64_t ctbase,bool ds,PendingIrq * hpp)99e97be73cSPeter Maydell static void update_for_one_lpi(GICv3CPUState *cs, int irq,
100e97be73cSPeter Maydell uint64_t ctbase, bool ds, PendingIrq *hpp)
101e97be73cSPeter Maydell {
102e97be73cSPeter Maydell uint8_t lpite;
103e97be73cSPeter Maydell uint8_t prio;
104e97be73cSPeter Maydell
105e97be73cSPeter Maydell address_space_read(&cs->gic->dma_as,
106e97be73cSPeter Maydell ctbase + ((irq - GICV3_LPI_INTID_START) * sizeof(lpite)),
107e97be73cSPeter Maydell MEMTXATTRS_UNSPECIFIED, &lpite, sizeof(lpite));
108e97be73cSPeter Maydell
109e97be73cSPeter Maydell if (!(lpite & LPI_CTE_ENABLED)) {
110e97be73cSPeter Maydell return;
111e97be73cSPeter Maydell }
112e97be73cSPeter Maydell
113e97be73cSPeter Maydell if (ds) {
114e97be73cSPeter Maydell prio = lpite & LPI_PRIORITY_MASK;
115e97be73cSPeter Maydell } else {
116e97be73cSPeter Maydell prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80;
117e97be73cSPeter Maydell }
118e97be73cSPeter Maydell
119e97be73cSPeter Maydell if ((prio < hpp->prio) ||
120e97be73cSPeter Maydell ((prio == hpp->prio) && (irq <= hpp->irq))) {
121e97be73cSPeter Maydell hpp->irq = irq;
122e97be73cSPeter Maydell hpp->prio = prio;
123*d89daa89SJinjie Ruan hpp->nmi = false;
124e97be73cSPeter Maydell /* LPIs and vLPIs are always non-secure Grp1 interrupts */
125e97be73cSPeter Maydell hpp->grp = GICV3_G1NS;
126e97be73cSPeter Maydell }
127e97be73cSPeter Maydell }
128e97be73cSPeter Maydell
12999ba56d2SPeter Maydell /**
13099ba56d2SPeter Maydell * update_for_all_lpis: Fully scan LPI tables and find best pending LPI
13199ba56d2SPeter Maydell *
13299ba56d2SPeter Maydell * @cs: GICv3CPUState
13399ba56d2SPeter Maydell * @ptbase: physical address of LPI Pending table
13499ba56d2SPeter Maydell * @ctbase: physical address of LPI Configuration table
13599ba56d2SPeter Maydell * @ptsizebits: size of tables, specified as number of interrupt ID bits minus 1
13699ba56d2SPeter Maydell * @ds: true if priority value should not be shifted
13799ba56d2SPeter Maydell * @hpp: points to pending information to set
13899ba56d2SPeter Maydell *
13999ba56d2SPeter Maydell * Recalculate the highest priority pending enabled LPI from scratch,
14099ba56d2SPeter Maydell * and set @hpp accordingly.
14199ba56d2SPeter Maydell *
14299ba56d2SPeter Maydell * We scan the LPI pending table @ptbase; for each pending LPI, we read the
14399ba56d2SPeter Maydell * corresponding entry in the LPI configuration table @ctbase to extract
14499ba56d2SPeter Maydell * the priority and enabled information.
14599ba56d2SPeter Maydell *
14699ba56d2SPeter Maydell * We take @ptsizebits in the form idbits-1 because this is the way that
14799ba56d2SPeter Maydell * LPI table sizes are architecturally specified in GICR_PROPBASER.IDBits
14899ba56d2SPeter Maydell * and in the VMAPP command's VPT_size field.
14999ba56d2SPeter Maydell */
update_for_all_lpis(GICv3CPUState * cs,uint64_t ptbase,uint64_t ctbase,unsigned ptsizebits,bool ds,PendingIrq * hpp)15099ba56d2SPeter Maydell static void update_for_all_lpis(GICv3CPUState *cs, uint64_t ptbase,
15199ba56d2SPeter Maydell uint64_t ctbase, unsigned ptsizebits,
15299ba56d2SPeter Maydell bool ds, PendingIrq *hpp)
15399ba56d2SPeter Maydell {
15499ba56d2SPeter Maydell AddressSpace *as = &cs->gic->dma_as;
15599ba56d2SPeter Maydell uint8_t pend;
15699ba56d2SPeter Maydell uint32_t pendt_size = (1ULL << (ptsizebits + 1));
15799ba56d2SPeter Maydell int i, bit;
15899ba56d2SPeter Maydell
15999ba56d2SPeter Maydell hpp->prio = 0xff;
160*d89daa89SJinjie Ruan hpp->nmi = false;
16199ba56d2SPeter Maydell
16299ba56d2SPeter Maydell for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
16399ba56d2SPeter Maydell address_space_read(as, ptbase + i, MEMTXATTRS_UNSPECIFIED, &pend, 1);
16499ba56d2SPeter Maydell while (pend) {
16599ba56d2SPeter Maydell bit = ctz32(pend);
16699ba56d2SPeter Maydell update_for_one_lpi(cs, i * 8 + bit, ctbase, ds, hpp);
16799ba56d2SPeter Maydell pend &= ~(1 << bit);
16899ba56d2SPeter Maydell }
16999ba56d2SPeter Maydell }
17099ba56d2SPeter Maydell }
17199ba56d2SPeter Maydell
172b76eb5f4SPeter Maydell /**
173b76eb5f4SPeter Maydell * set_lpi_pending_bit: Set or clear pending bit for an LPI
174b76eb5f4SPeter Maydell *
175b76eb5f4SPeter Maydell * @cs: GICv3CPUState
176b76eb5f4SPeter Maydell * @ptbase: physical address of LPI Pending table
177b76eb5f4SPeter Maydell * @irq: LPI to change pending state for
178b76eb5f4SPeter Maydell * @level: false to clear pending state, true to set
179b76eb5f4SPeter Maydell *
180b76eb5f4SPeter Maydell * Returns true if we needed to do something, false if the pending bit
181b76eb5f4SPeter Maydell * was already at @level.
182b76eb5f4SPeter Maydell */
set_pending_table_bit(GICv3CPUState * cs,uint64_t ptbase,int irq,bool level)183b76eb5f4SPeter Maydell static bool set_pending_table_bit(GICv3CPUState *cs, uint64_t ptbase,
184b76eb5f4SPeter Maydell int irq, bool level)
185b76eb5f4SPeter Maydell {
186b76eb5f4SPeter Maydell AddressSpace *as = &cs->gic->dma_as;
187b76eb5f4SPeter Maydell uint64_t addr = ptbase + irq / 8;
188b76eb5f4SPeter Maydell uint8_t pend;
189b76eb5f4SPeter Maydell
190b76eb5f4SPeter Maydell address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
191b76eb5f4SPeter Maydell if (extract32(pend, irq % 8, 1) == level) {
192b76eb5f4SPeter Maydell /* Bit already at requested state, no action required */
193b76eb5f4SPeter Maydell return false;
194b76eb5f4SPeter Maydell }
195b76eb5f4SPeter Maydell pend = deposit32(pend, irq % 8, 1, level ? 1 : 0);
196b76eb5f4SPeter Maydell address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
197b76eb5f4SPeter Maydell return true;
198b76eb5f4SPeter Maydell }
199b76eb5f4SPeter Maydell
gicr_read_ipriorityr(GICv3CPUState * cs,MemTxAttrs attrs,int irq)200cec93a93SShlomo Pongratz static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
201cec93a93SShlomo Pongratz int irq)
202cec93a93SShlomo Pongratz {
203cec93a93SShlomo Pongratz /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
204cec93a93SShlomo Pongratz * honouring security state (these are RAZ/WI for Group 0 or Secure
205cec93a93SShlomo Pongratz * Group 1 interrupts).
206cec93a93SShlomo Pongratz */
207cec93a93SShlomo Pongratz uint32_t prio;
208cec93a93SShlomo Pongratz
209cec93a93SShlomo Pongratz prio = cs->gicr_ipriorityr[irq];
210cec93a93SShlomo Pongratz
211cec93a93SShlomo Pongratz if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
212cec93a93SShlomo Pongratz if (!(cs->gicr_igroupr0 & (1U << irq))) {
213cec93a93SShlomo Pongratz /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
214cec93a93SShlomo Pongratz return 0;
215cec93a93SShlomo Pongratz }
216cec93a93SShlomo Pongratz /* NS view of the interrupt priority */
217cec93a93SShlomo Pongratz prio = (prio << 1) & 0xff;
218cec93a93SShlomo Pongratz }
219cec93a93SShlomo Pongratz return prio;
220cec93a93SShlomo Pongratz }
221cec93a93SShlomo Pongratz
gicr_write_ipriorityr(GICv3CPUState * cs,MemTxAttrs attrs,int irq,uint8_t value)222cec93a93SShlomo Pongratz static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
223cec93a93SShlomo Pongratz uint8_t value)
224cec93a93SShlomo Pongratz {
225cec93a93SShlomo Pongratz /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
226cec93a93SShlomo Pongratz * honouring security state (these are RAZ/WI for Group 0 or Secure
227cec93a93SShlomo Pongratz * Group 1 interrupts).
228cec93a93SShlomo Pongratz */
229cec93a93SShlomo Pongratz if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
230cec93a93SShlomo Pongratz if (!(cs->gicr_igroupr0 & (1U << irq))) {
231cec93a93SShlomo Pongratz /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
232cec93a93SShlomo Pongratz return;
233cec93a93SShlomo Pongratz }
234cec93a93SShlomo Pongratz /* NS view of the interrupt priority */
235cec93a93SShlomo Pongratz value = 0x80 | (value >> 1);
236cec93a93SShlomo Pongratz }
237cec93a93SShlomo Pongratz cs->gicr_ipriorityr[irq] = value;
238cec93a93SShlomo Pongratz }
239cec93a93SShlomo Pongratz
gicv3_redist_update_vlpi_only(GICv3CPUState * cs)2406631480cSPeter Maydell static void gicv3_redist_update_vlpi_only(GICv3CPUState *cs)
2416631480cSPeter Maydell {
2426631480cSPeter Maydell uint64_t ptbase, ctbase, idbits;
2436631480cSPeter Maydell
2446631480cSPeter Maydell if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) {
2456631480cSPeter Maydell cs->hppvlpi.prio = 0xff;
246*d89daa89SJinjie Ruan cs->hppvlpi.nmi = false;
2476631480cSPeter Maydell return;
2486631480cSPeter Maydell }
2496631480cSPeter Maydell
2506631480cSPeter Maydell ptbase = cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK;
2516631480cSPeter Maydell ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
2526631480cSPeter Maydell idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS);
2536631480cSPeter Maydell
2546631480cSPeter Maydell update_for_all_lpis(cs, ptbase, ctbase, idbits, true, &cs->hppvlpi);
2556631480cSPeter Maydell }
2566631480cSPeter Maydell
gicv3_redist_update_vlpi(GICv3CPUState * cs)2576631480cSPeter Maydell static void gicv3_redist_update_vlpi(GICv3CPUState *cs)
2586631480cSPeter Maydell {
2596631480cSPeter Maydell gicv3_redist_update_vlpi_only(cs);
2606631480cSPeter Maydell gicv3_cpuif_virt_irq_fiq_update(cs);
2616631480cSPeter Maydell }
2626631480cSPeter Maydell
gicr_write_vpendbaser(GICv3CPUState * cs,uint64_t newval)2636631480cSPeter Maydell static void gicr_write_vpendbaser(GICv3CPUState *cs, uint64_t newval)
2646631480cSPeter Maydell {
2656631480cSPeter Maydell /* Write @newval to GICR_VPENDBASER, handling its effects */
2666631480cSPeter Maydell bool oldvalid = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID);
2676631480cSPeter Maydell bool newvalid = FIELD_EX64(newval, GICR_VPENDBASER, VALID);
2686631480cSPeter Maydell bool pendinglast;
2696631480cSPeter Maydell
2706631480cSPeter Maydell /*
2716631480cSPeter Maydell * The DIRTY bit is read-only and for us is always zero;
2729323e79fSPeter Maydell * other fields are writable.
2736631480cSPeter Maydell */
2746631480cSPeter Maydell newval &= R_GICR_VPENDBASER_INNERCACHE_MASK |
2756631480cSPeter Maydell R_GICR_VPENDBASER_SHAREABILITY_MASK |
2766631480cSPeter Maydell R_GICR_VPENDBASER_PHYADDR_MASK |
2776631480cSPeter Maydell R_GICR_VPENDBASER_OUTERCACHE_MASK |
2786631480cSPeter Maydell R_GICR_VPENDBASER_PENDINGLAST_MASK |
2796631480cSPeter Maydell R_GICR_VPENDBASER_IDAI_MASK |
2806631480cSPeter Maydell R_GICR_VPENDBASER_VALID_MASK;
2816631480cSPeter Maydell
2826631480cSPeter Maydell if (oldvalid && newvalid) {
2836631480cSPeter Maydell /*
2846631480cSPeter Maydell * Changing other fields while VALID is 1 is UNPREDICTABLE;
2856631480cSPeter Maydell * we choose to log and ignore the write.
2866631480cSPeter Maydell */
2876631480cSPeter Maydell if (cs->gicr_vpendbaser ^ newval) {
2886631480cSPeter Maydell qemu_log_mask(LOG_GUEST_ERROR,
2896631480cSPeter Maydell "%s: Changing GICR_VPENDBASER when VALID=1 "
2906631480cSPeter Maydell "is UNPREDICTABLE\n", __func__);
2916631480cSPeter Maydell }
2926631480cSPeter Maydell return;
2936631480cSPeter Maydell }
2946631480cSPeter Maydell if (!oldvalid && !newvalid) {
2956631480cSPeter Maydell cs->gicr_vpendbaser = newval;
2966631480cSPeter Maydell return;
2976631480cSPeter Maydell }
2986631480cSPeter Maydell
2996631480cSPeter Maydell if (newvalid) {
3006631480cSPeter Maydell /*
3016631480cSPeter Maydell * Valid going from 0 to 1: update hppvlpi from tables.
3026631480cSPeter Maydell * If IDAI is 0 we are allowed to use the info we cached in
3036631480cSPeter Maydell * the IMPDEF area of the table.
3046631480cSPeter Maydell * PendingLast is RES1 when we make this transition.
3056631480cSPeter Maydell */
3066631480cSPeter Maydell pendinglast = true;
3076631480cSPeter Maydell } else {
3086631480cSPeter Maydell /*
3096631480cSPeter Maydell * Valid going from 1 to 0:
3106631480cSPeter Maydell * Set PendingLast if there was a pending enabled interrupt
3116631480cSPeter Maydell * for the vPE that was just descheduled.
3126631480cSPeter Maydell * If we cache info in the IMPDEF area, write it out here.
3136631480cSPeter Maydell */
3146631480cSPeter Maydell pendinglast = cs->hppvlpi.prio != 0xff;
3156631480cSPeter Maydell }
3166631480cSPeter Maydell
3176631480cSPeter Maydell newval = FIELD_DP64(newval, GICR_VPENDBASER, PENDINGLAST, pendinglast);
3186631480cSPeter Maydell cs->gicr_vpendbaser = newval;
3196631480cSPeter Maydell gicv3_redist_update_vlpi(cs);
3206631480cSPeter Maydell }
3216631480cSPeter Maydell
gicr_readb(GICv3CPUState * cs,hwaddr offset,uint64_t * data,MemTxAttrs attrs)322cec93a93SShlomo Pongratz static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
323cec93a93SShlomo Pongratz uint64_t *data, MemTxAttrs attrs)
324cec93a93SShlomo Pongratz {
325cec93a93SShlomo Pongratz switch (offset) {
326cec93a93SShlomo Pongratz case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
327cec93a93SShlomo Pongratz *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR);
328cec93a93SShlomo Pongratz return MEMTX_OK;
329cec93a93SShlomo Pongratz default:
330cec93a93SShlomo Pongratz return MEMTX_ERROR;
331cec93a93SShlomo Pongratz }
332cec93a93SShlomo Pongratz }
333cec93a93SShlomo Pongratz
gicr_writeb(GICv3CPUState * cs,hwaddr offset,uint64_t value,MemTxAttrs attrs)334cec93a93SShlomo Pongratz static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset,
335cec93a93SShlomo Pongratz uint64_t value, MemTxAttrs attrs)
336cec93a93SShlomo Pongratz {
337cec93a93SShlomo Pongratz switch (offset) {
338cec93a93SShlomo Pongratz case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
339cec93a93SShlomo Pongratz gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value);
340cec93a93SShlomo Pongratz gicv3_redist_update(cs);
341cec93a93SShlomo Pongratz return MEMTX_OK;
342cec93a93SShlomo Pongratz default:
343cec93a93SShlomo Pongratz return MEMTX_ERROR;
344cec93a93SShlomo Pongratz }
345cec93a93SShlomo Pongratz }
346cec93a93SShlomo Pongratz
gicr_readl(GICv3CPUState * cs,hwaddr offset,uint64_t * data,MemTxAttrs attrs)347cec93a93SShlomo Pongratz static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
348cec93a93SShlomo Pongratz uint64_t *data, MemTxAttrs attrs)
349cec93a93SShlomo Pongratz {
350cec93a93SShlomo Pongratz switch (offset) {
351cec93a93SShlomo Pongratz case GICR_CTLR:
352cec93a93SShlomo Pongratz *data = cs->gicr_ctlr;
353cec93a93SShlomo Pongratz return MEMTX_OK;
354cec93a93SShlomo Pongratz case GICR_IIDR:
355cec93a93SShlomo Pongratz *data = gicv3_iidr();
356cec93a93SShlomo Pongratz return MEMTX_OK;
357cec93a93SShlomo Pongratz case GICR_TYPER:
358cec93a93SShlomo Pongratz *data = extract64(cs->gicr_typer, 0, 32);
359cec93a93SShlomo Pongratz return MEMTX_OK;
360cec93a93SShlomo Pongratz case GICR_TYPER + 4:
361cec93a93SShlomo Pongratz *data = extract64(cs->gicr_typer, 32, 32);
362cec93a93SShlomo Pongratz return MEMTX_OK;
363cec93a93SShlomo Pongratz case GICR_STATUSR:
364cec93a93SShlomo Pongratz /* RAZ/WI for us (this is an optional register and our implementation
365cec93a93SShlomo Pongratz * does not track RO/WO/reserved violations to report them to the guest)
366cec93a93SShlomo Pongratz */
367cec93a93SShlomo Pongratz *data = 0;
368cec93a93SShlomo Pongratz return MEMTX_OK;
369cec93a93SShlomo Pongratz case GICR_WAKER:
370cec93a93SShlomo Pongratz *data = cs->gicr_waker;
371cec93a93SShlomo Pongratz return MEMTX_OK;
372cec93a93SShlomo Pongratz case GICR_PROPBASER:
373cec93a93SShlomo Pongratz *data = extract64(cs->gicr_propbaser, 0, 32);
374cec93a93SShlomo Pongratz return MEMTX_OK;
375cec93a93SShlomo Pongratz case GICR_PROPBASER + 4:
376cec93a93SShlomo Pongratz *data = extract64(cs->gicr_propbaser, 32, 32);
377cec93a93SShlomo Pongratz return MEMTX_OK;
378cec93a93SShlomo Pongratz case GICR_PENDBASER:
379cec93a93SShlomo Pongratz *data = extract64(cs->gicr_pendbaser, 0, 32);
380cec93a93SShlomo Pongratz return MEMTX_OK;
381cec93a93SShlomo Pongratz case GICR_PENDBASER + 4:
382cec93a93SShlomo Pongratz *data = extract64(cs->gicr_pendbaser, 32, 32);
383cec93a93SShlomo Pongratz return MEMTX_OK;
384cec93a93SShlomo Pongratz case GICR_IGROUPR0:
385cec93a93SShlomo Pongratz if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
386cec93a93SShlomo Pongratz *data = 0;
387cec93a93SShlomo Pongratz return MEMTX_OK;
388cec93a93SShlomo Pongratz }
389cec93a93SShlomo Pongratz *data = cs->gicr_igroupr0;
390cec93a93SShlomo Pongratz return MEMTX_OK;
391cec93a93SShlomo Pongratz case GICR_ISENABLER0:
392cec93a93SShlomo Pongratz case GICR_ICENABLER0:
393cec93a93SShlomo Pongratz *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0);
394cec93a93SShlomo Pongratz return MEMTX_OK;
395cec93a93SShlomo Pongratz case GICR_ISPENDR0:
396cec93a93SShlomo Pongratz case GICR_ICPENDR0:
397cec93a93SShlomo Pongratz {
398cec93a93SShlomo Pongratz /* The pending register reads as the logical OR of the pending
399cec93a93SShlomo Pongratz * latch and the input line level for level-triggered interrupts.
400cec93a93SShlomo Pongratz */
401cec93a93SShlomo Pongratz uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
402cec93a93SShlomo Pongratz *data = gicr_read_bitmap_reg(cs, attrs, val);
403cec93a93SShlomo Pongratz return MEMTX_OK;
404cec93a93SShlomo Pongratz }
405cec93a93SShlomo Pongratz case GICR_ISACTIVER0:
406cec93a93SShlomo Pongratz case GICR_ICACTIVER0:
407cec93a93SShlomo Pongratz *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0);
408cec93a93SShlomo Pongratz return MEMTX_OK;
409cec93a93SShlomo Pongratz case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
410cec93a93SShlomo Pongratz {
411cec93a93SShlomo Pongratz int i, irq = offset - GICR_IPRIORITYR;
412cec93a93SShlomo Pongratz uint32_t value = 0;
413cec93a93SShlomo Pongratz
414d419890cSAmol Surati for (i = irq + 3; i >= irq; i--) {
415d419890cSAmol Surati value <<= 8;
416cec93a93SShlomo Pongratz value |= gicr_read_ipriorityr(cs, attrs, i);
417cec93a93SShlomo Pongratz }
418cec93a93SShlomo Pongratz *data = value;
419cec93a93SShlomo Pongratz return MEMTX_OK;
420cec93a93SShlomo Pongratz }
4217c79d98dSJinjie Ruan case GICR_INMIR0:
4227c79d98dSJinjie Ruan *data = cs->gic->nmi_support ?
4237c79d98dSJinjie Ruan gicr_read_bitmap_reg(cs, attrs, cs->gicr_inmir0) : 0;
4247c79d98dSJinjie Ruan return MEMTX_OK;
425cec93a93SShlomo Pongratz case GICR_ICFGR0:
426cec93a93SShlomo Pongratz case GICR_ICFGR1:
427cec93a93SShlomo Pongratz {
428cec93a93SShlomo Pongratz /* Our edge_trigger bitmap is one bit per irq; take the correct
429cec93a93SShlomo Pongratz * half of it, and spread it out into the odd bits.
430cec93a93SShlomo Pongratz */
431cec93a93SShlomo Pongratz uint32_t value;
432cec93a93SShlomo Pongratz
433cec93a93SShlomo Pongratz value = cs->edge_trigger & mask_group(cs, attrs);
434cec93a93SShlomo Pongratz value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16);
435cec93a93SShlomo Pongratz value = half_shuffle32(value) << 1;
436cec93a93SShlomo Pongratz *data = value;
437cec93a93SShlomo Pongratz return MEMTX_OK;
438cec93a93SShlomo Pongratz }
439cec93a93SShlomo Pongratz case GICR_IGRPMODR0:
440cec93a93SShlomo Pongratz if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
441cec93a93SShlomo Pongratz /* RAZ/WI if security disabled, or if
442cec93a93SShlomo Pongratz * security enabled and this is an NS access
443cec93a93SShlomo Pongratz */
444cec93a93SShlomo Pongratz *data = 0;
445cec93a93SShlomo Pongratz return MEMTX_OK;
446cec93a93SShlomo Pongratz }
447cec93a93SShlomo Pongratz *data = cs->gicr_igrpmodr0;
448cec93a93SShlomo Pongratz return MEMTX_OK;
449cec93a93SShlomo Pongratz case GICR_NSACR:
450cec93a93SShlomo Pongratz if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
451cec93a93SShlomo Pongratz /* RAZ/WI if security disabled, or if
452cec93a93SShlomo Pongratz * security enabled and this is an NS access
453cec93a93SShlomo Pongratz */
454cec93a93SShlomo Pongratz *data = 0;
455cec93a93SShlomo Pongratz return MEMTX_OK;
456cec93a93SShlomo Pongratz }
457cec93a93SShlomo Pongratz *data = cs->gicr_nsacr;
458cec93a93SShlomo Pongratz return MEMTX_OK;
459e40f6073SPeter Maydell case GICR_IDREGS ... GICR_IDREGS + 0x2f:
460e2d5e189SPeter Maydell *data = gicv3_idreg(cs->gic, offset - GICR_IDREGS, GICV3_PIDR0_REDIST);
461cec93a93SShlomo Pongratz return MEMTX_OK;
462641be697SPeter Maydell /*
463641be697SPeter Maydell * VLPI frame registers. We don't need a version check for
464641be697SPeter Maydell * VPROPBASER and VPENDBASER because gicv3_redist_size() will
465641be697SPeter Maydell * prevent pre-v4 GIC from passing us offsets this high.
466641be697SPeter Maydell */
467641be697SPeter Maydell case GICR_VPROPBASER:
468641be697SPeter Maydell *data = extract64(cs->gicr_vpropbaser, 0, 32);
469641be697SPeter Maydell return MEMTX_OK;
470641be697SPeter Maydell case GICR_VPROPBASER + 4:
471641be697SPeter Maydell *data = extract64(cs->gicr_vpropbaser, 32, 32);
472641be697SPeter Maydell return MEMTX_OK;
473641be697SPeter Maydell case GICR_VPENDBASER:
474641be697SPeter Maydell *data = extract64(cs->gicr_vpendbaser, 0, 32);
475641be697SPeter Maydell return MEMTX_OK;
476641be697SPeter Maydell case GICR_VPENDBASER + 4:
477641be697SPeter Maydell *data = extract64(cs->gicr_vpendbaser, 32, 32);
478641be697SPeter Maydell return MEMTX_OK;
479cec93a93SShlomo Pongratz default:
480cec93a93SShlomo Pongratz return MEMTX_ERROR;
481cec93a93SShlomo Pongratz }
482cec93a93SShlomo Pongratz }
483cec93a93SShlomo Pongratz
gicr_writel(GICv3CPUState * cs,hwaddr offset,uint64_t value,MemTxAttrs attrs)484cec93a93SShlomo Pongratz static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
485cec93a93SShlomo Pongratz uint64_t value, MemTxAttrs attrs)
486cec93a93SShlomo Pongratz {
487cec93a93SShlomo Pongratz switch (offset) {
488cec93a93SShlomo Pongratz case GICR_CTLR:
489cec93a93SShlomo Pongratz /* For our implementation, GICR_TYPER.DPGS is 0 and so all
490cec93a93SShlomo Pongratz * the DPG bits are RAZ/WI. We don't do anything asynchronously,
491ac30dec3SShashi Mallela * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
492ac30dec3SShashi Mallela * implement LPIs) so Enable_LPIs is programmable.
493cec93a93SShlomo Pongratz */
494ac30dec3SShashi Mallela if (cs->gicr_typer & GICR_TYPER_PLPIS) {
495ac30dec3SShashi Mallela if (value & GICR_CTLR_ENABLE_LPIS) {
496ac30dec3SShashi Mallela cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS;
49717fb5e36SShashi Mallela /* Check for any pending interr in pending table */
49817fb5e36SShashi Mallela gicv3_redist_update_lpi(cs);
499ac30dec3SShashi Mallela } else {
500ac30dec3SShashi Mallela cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS;
501101f27f3SPeter Maydell /* cs->hppi might have been an LPI; recalculate */
502101f27f3SPeter Maydell gicv3_redist_update(cs);
503ac30dec3SShashi Mallela }
504ac30dec3SShashi Mallela }
505cec93a93SShlomo Pongratz return MEMTX_OK;
506cec93a93SShlomo Pongratz case GICR_STATUSR:
507cec93a93SShlomo Pongratz /* RAZ/WI for our implementation */
508cec93a93SShlomo Pongratz return MEMTX_OK;
509cec93a93SShlomo Pongratz case GICR_WAKER:
5109323e79fSPeter Maydell /* Only the ProcessorSleep bit is writable. When the guest sets
5117a21bee2SDaniel P. Berrangé * it, it requests that we transition the channel between the
512cec93a93SShlomo Pongratz * redistributor and the cpu interface to quiescent, and that
513673d8215SMichael Tokarev * we set the ChildrenAsleep bit once the interface has reached the
514cec93a93SShlomo Pongratz * quiescent state.
515cec93a93SShlomo Pongratz * Setting the ProcessorSleep to 0 reverses the quiescing, and
516cec93a93SShlomo Pongratz * ChildrenAsleep is cleared once the transition is complete.
517cec93a93SShlomo Pongratz * Since our interface is not asynchronous, we complete these
518cec93a93SShlomo Pongratz * transitions instantaneously, so we set ChildrenAsleep to the
519cec93a93SShlomo Pongratz * same value as ProcessorSleep here.
520cec93a93SShlomo Pongratz */
521cec93a93SShlomo Pongratz value &= GICR_WAKER_ProcessorSleep;
522cec93a93SShlomo Pongratz if (value & GICR_WAKER_ProcessorSleep) {
523cec93a93SShlomo Pongratz value |= GICR_WAKER_ChildrenAsleep;
524cec93a93SShlomo Pongratz }
525cec93a93SShlomo Pongratz cs->gicr_waker = value;
526cec93a93SShlomo Pongratz return MEMTX_OK;
527cec93a93SShlomo Pongratz case GICR_PROPBASER:
528cec93a93SShlomo Pongratz cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value);
529cec93a93SShlomo Pongratz return MEMTX_OK;
530cec93a93SShlomo Pongratz case GICR_PROPBASER + 4:
531cec93a93SShlomo Pongratz cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value);
532cec93a93SShlomo Pongratz return MEMTX_OK;
533cec93a93SShlomo Pongratz case GICR_PENDBASER:
534cec93a93SShlomo Pongratz cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value);
535cec93a93SShlomo Pongratz return MEMTX_OK;
536cec93a93SShlomo Pongratz case GICR_PENDBASER + 4:
537cec93a93SShlomo Pongratz cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value);
538cec93a93SShlomo Pongratz return MEMTX_OK;
539cec93a93SShlomo Pongratz case GICR_IGROUPR0:
540cec93a93SShlomo Pongratz if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
541cec93a93SShlomo Pongratz return MEMTX_OK;
542cec93a93SShlomo Pongratz }
543cec93a93SShlomo Pongratz cs->gicr_igroupr0 = value;
544cec93a93SShlomo Pongratz gicv3_redist_update(cs);
545cec93a93SShlomo Pongratz return MEMTX_OK;
546cec93a93SShlomo Pongratz case GICR_ISENABLER0:
547cec93a93SShlomo Pongratz gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
548cec93a93SShlomo Pongratz return MEMTX_OK;
549cec93a93SShlomo Pongratz case GICR_ICENABLER0:
550cec93a93SShlomo Pongratz gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
551cec93a93SShlomo Pongratz return MEMTX_OK;
552cec93a93SShlomo Pongratz case GICR_ISPENDR0:
553cec93a93SShlomo Pongratz gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
554cec93a93SShlomo Pongratz return MEMTX_OK;
555cec93a93SShlomo Pongratz case GICR_ICPENDR0:
556cec93a93SShlomo Pongratz gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
557cec93a93SShlomo Pongratz return MEMTX_OK;
558cec93a93SShlomo Pongratz case GICR_ISACTIVER0:
559cec93a93SShlomo Pongratz gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
560cec93a93SShlomo Pongratz return MEMTX_OK;
561cec93a93SShlomo Pongratz case GICR_ICACTIVER0:
562cec93a93SShlomo Pongratz gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
563cec93a93SShlomo Pongratz return MEMTX_OK;
564cec93a93SShlomo Pongratz case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
565cec93a93SShlomo Pongratz {
566cec93a93SShlomo Pongratz int i, irq = offset - GICR_IPRIORITYR;
567cec93a93SShlomo Pongratz
568cec93a93SShlomo Pongratz for (i = irq; i < irq + 4; i++, value >>= 8) {
569cec93a93SShlomo Pongratz gicr_write_ipriorityr(cs, attrs, i, value);
570cec93a93SShlomo Pongratz }
571cec93a93SShlomo Pongratz gicv3_redist_update(cs);
572cec93a93SShlomo Pongratz return MEMTX_OK;
573cec93a93SShlomo Pongratz }
5747c79d98dSJinjie Ruan case GICR_INMIR0:
5757c79d98dSJinjie Ruan if (cs->gic->nmi_support) {
5767c79d98dSJinjie Ruan gicr_write_bitmap_reg(cs, attrs, &cs->gicr_inmir0, value);
5777c79d98dSJinjie Ruan }
5787c79d98dSJinjie Ruan return MEMTX_OK;
5797c79d98dSJinjie Ruan
580cec93a93SShlomo Pongratz case GICR_ICFGR0:
581cec93a93SShlomo Pongratz /* Register is all RAZ/WI or RAO/WI bits */
582cec93a93SShlomo Pongratz return MEMTX_OK;
583cec93a93SShlomo Pongratz case GICR_ICFGR1:
584cec93a93SShlomo Pongratz {
585cec93a93SShlomo Pongratz uint32_t mask;
586cec93a93SShlomo Pongratz
587cec93a93SShlomo Pongratz /* Since our edge_trigger bitmap is one bit per irq, our input
588cec93a93SShlomo Pongratz * 32-bits will compress down into 16 bits which we need
589cec93a93SShlomo Pongratz * to write into the bitmap.
590cec93a93SShlomo Pongratz */
591cec93a93SShlomo Pongratz value = half_unshuffle32(value >> 1) << 16;
592cec93a93SShlomo Pongratz mask = mask_group(cs, attrs) & 0xffff0000U;
593cec93a93SShlomo Pongratz
594cec93a93SShlomo Pongratz cs->edge_trigger &= ~mask;
595cec93a93SShlomo Pongratz cs->edge_trigger |= (value & mask);
596cec93a93SShlomo Pongratz
597cec93a93SShlomo Pongratz gicv3_redist_update(cs);
598cec93a93SShlomo Pongratz return MEMTX_OK;
599cec93a93SShlomo Pongratz }
600cec93a93SShlomo Pongratz case GICR_IGRPMODR0:
601cec93a93SShlomo Pongratz if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
602cec93a93SShlomo Pongratz /* RAZ/WI if security disabled, or if
603cec93a93SShlomo Pongratz * security enabled and this is an NS access
604cec93a93SShlomo Pongratz */
605cec93a93SShlomo Pongratz return MEMTX_OK;
606cec93a93SShlomo Pongratz }
607cec93a93SShlomo Pongratz cs->gicr_igrpmodr0 = value;
608cec93a93SShlomo Pongratz gicv3_redist_update(cs);
609cec93a93SShlomo Pongratz return MEMTX_OK;
610cec93a93SShlomo Pongratz case GICR_NSACR:
611cec93a93SShlomo Pongratz if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
612cec93a93SShlomo Pongratz /* RAZ/WI if security disabled, or if
613cec93a93SShlomo Pongratz * security enabled and this is an NS access
614cec93a93SShlomo Pongratz */
615cec93a93SShlomo Pongratz return MEMTX_OK;
616cec93a93SShlomo Pongratz }
617cec93a93SShlomo Pongratz cs->gicr_nsacr = value;
618cec93a93SShlomo Pongratz /* no update required as this only affects access permission checks */
619cec93a93SShlomo Pongratz return MEMTX_OK;
620cec93a93SShlomo Pongratz case GICR_IIDR:
621cec93a93SShlomo Pongratz case GICR_TYPER:
622e40f6073SPeter Maydell case GICR_IDREGS ... GICR_IDREGS + 0x2f:
623cec93a93SShlomo Pongratz /* RO registers, ignore the write */
624cec93a93SShlomo Pongratz qemu_log_mask(LOG_GUEST_ERROR,
625cec93a93SShlomo Pongratz "%s: invalid guest write to RO register at offset "
626883f2c59SPhilippe Mathieu-Daudé HWADDR_FMT_plx "\n", __func__, offset);
627cec93a93SShlomo Pongratz return MEMTX_OK;
628641be697SPeter Maydell /*
629641be697SPeter Maydell * VLPI frame registers. We don't need a version check for
630641be697SPeter Maydell * VPROPBASER and VPENDBASER because gicv3_redist_size() will
631641be697SPeter Maydell * prevent pre-v4 GIC from passing us offsets this high.
632641be697SPeter Maydell */
633641be697SPeter Maydell case GICR_VPROPBASER:
634641be697SPeter Maydell cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 0, 32, value);
635641be697SPeter Maydell return MEMTX_OK;
636641be697SPeter Maydell case GICR_VPROPBASER + 4:
637641be697SPeter Maydell cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 32, 32, value);
638641be697SPeter Maydell return MEMTX_OK;
639641be697SPeter Maydell case GICR_VPENDBASER:
6406631480cSPeter Maydell gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 0, 32, value));
641641be697SPeter Maydell return MEMTX_OK;
642641be697SPeter Maydell case GICR_VPENDBASER + 4:
6436631480cSPeter Maydell gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 32, 32, value));
644641be697SPeter Maydell return MEMTX_OK;
645cec93a93SShlomo Pongratz default:
646cec93a93SShlomo Pongratz return MEMTX_ERROR;
647cec93a93SShlomo Pongratz }
648cec93a93SShlomo Pongratz }
649cec93a93SShlomo Pongratz
gicr_readll(GICv3CPUState * cs,hwaddr offset,uint64_t * data,MemTxAttrs attrs)650cec93a93SShlomo Pongratz static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
651cec93a93SShlomo Pongratz uint64_t *data, MemTxAttrs attrs)
652cec93a93SShlomo Pongratz {
653cec93a93SShlomo Pongratz switch (offset) {
654cec93a93SShlomo Pongratz case GICR_TYPER:
655cec93a93SShlomo Pongratz *data = cs->gicr_typer;
656cec93a93SShlomo Pongratz return MEMTX_OK;
657cec93a93SShlomo Pongratz case GICR_PROPBASER:
658cec93a93SShlomo Pongratz *data = cs->gicr_propbaser;
659cec93a93SShlomo Pongratz return MEMTX_OK;
660cec93a93SShlomo Pongratz case GICR_PENDBASER:
661cec93a93SShlomo Pongratz *data = cs->gicr_pendbaser;
662cec93a93SShlomo Pongratz return MEMTX_OK;
663641be697SPeter Maydell /*
664641be697SPeter Maydell * VLPI frame registers. We don't need a version check for
665641be697SPeter Maydell * VPROPBASER and VPENDBASER because gicv3_redist_size() will
666641be697SPeter Maydell * prevent pre-v4 GIC from passing us offsets this high.
667641be697SPeter Maydell */
668641be697SPeter Maydell case GICR_VPROPBASER:
669641be697SPeter Maydell *data = cs->gicr_vpropbaser;
670641be697SPeter Maydell return MEMTX_OK;
671641be697SPeter Maydell case GICR_VPENDBASER:
672641be697SPeter Maydell *data = cs->gicr_vpendbaser;
673641be697SPeter Maydell return MEMTX_OK;
674cec93a93SShlomo Pongratz default:
675cec93a93SShlomo Pongratz return MEMTX_ERROR;
676cec93a93SShlomo Pongratz }
677cec93a93SShlomo Pongratz }
678cec93a93SShlomo Pongratz
gicr_writell(GICv3CPUState * cs,hwaddr offset,uint64_t value,MemTxAttrs attrs)679cec93a93SShlomo Pongratz static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
680cec93a93SShlomo Pongratz uint64_t value, MemTxAttrs attrs)
681cec93a93SShlomo Pongratz {
682cec93a93SShlomo Pongratz switch (offset) {
683cec93a93SShlomo Pongratz case GICR_PROPBASER:
684cec93a93SShlomo Pongratz cs->gicr_propbaser = value;
685cec93a93SShlomo Pongratz return MEMTX_OK;
686cec93a93SShlomo Pongratz case GICR_PENDBASER:
687cec93a93SShlomo Pongratz cs->gicr_pendbaser = value;
688cec93a93SShlomo Pongratz return MEMTX_OK;
689cec93a93SShlomo Pongratz case GICR_TYPER:
690cec93a93SShlomo Pongratz /* RO register, ignore the write */
691cec93a93SShlomo Pongratz qemu_log_mask(LOG_GUEST_ERROR,
692cec93a93SShlomo Pongratz "%s: invalid guest write to RO register at offset "
693883f2c59SPhilippe Mathieu-Daudé HWADDR_FMT_plx "\n", __func__, offset);
694cec93a93SShlomo Pongratz return MEMTX_OK;
695641be697SPeter Maydell /*
696641be697SPeter Maydell * VLPI frame registers. We don't need a version check for
697641be697SPeter Maydell * VPROPBASER and VPENDBASER because gicv3_redist_size() will
698641be697SPeter Maydell * prevent pre-v4 GIC from passing us offsets this high.
699641be697SPeter Maydell */
700641be697SPeter Maydell case GICR_VPROPBASER:
701641be697SPeter Maydell cs->gicr_vpropbaser = value;
702641be697SPeter Maydell return MEMTX_OK;
703641be697SPeter Maydell case GICR_VPENDBASER:
7046631480cSPeter Maydell gicr_write_vpendbaser(cs, value);
705641be697SPeter Maydell return MEMTX_OK;
706cec93a93SShlomo Pongratz default:
707cec93a93SShlomo Pongratz return MEMTX_ERROR;
708cec93a93SShlomo Pongratz }
709cec93a93SShlomo Pongratz }
710cec93a93SShlomo Pongratz
gicv3_redist_read(void * opaque,hwaddr offset,uint64_t * data,unsigned size,MemTxAttrs attrs)711cec93a93SShlomo Pongratz MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
712cec93a93SShlomo Pongratz unsigned size, MemTxAttrs attrs)
713cec93a93SShlomo Pongratz {
714e5cba10eSPeter Maydell GICv3RedistRegion *region = opaque;
715e5cba10eSPeter Maydell GICv3State *s = region->gic;
716cec93a93SShlomo Pongratz GICv3CPUState *cs;
717cec93a93SShlomo Pongratz MemTxResult r;
718cec93a93SShlomo Pongratz int cpuidx;
719cec93a93SShlomo Pongratz
720acd82796SPeter Maydell assert((offset & (size - 1)) == 0);
721acd82796SPeter Maydell
722e5cba10eSPeter Maydell /*
723e5cba10eSPeter Maydell * There are (for GICv3) two 64K redistributor pages per CPU.
724e5cba10eSPeter Maydell * In some cases the redistributor pages for all CPUs are not
725e5cba10eSPeter Maydell * contiguous (eg on the virt board they are split into two
726e5cba10eSPeter Maydell * parts if there are too many CPUs to all fit in the same place
727e5cba10eSPeter Maydell * in the memory map); if so then the GIC has multiple MemoryRegions
728e5cba10eSPeter Maydell * for the redistributors.
729cec93a93SShlomo Pongratz */
730ae3b3ba1SPeter Maydell cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
731ae3b3ba1SPeter Maydell offset %= gicv3_redist_size(s);
732cec93a93SShlomo Pongratz
733cec93a93SShlomo Pongratz cs = &s->cpu[cpuidx];
734cec93a93SShlomo Pongratz
735cec93a93SShlomo Pongratz switch (size) {
736cec93a93SShlomo Pongratz case 1:
737cec93a93SShlomo Pongratz r = gicr_readb(cs, offset, data, attrs);
738cec93a93SShlomo Pongratz break;
739cec93a93SShlomo Pongratz case 4:
740cec93a93SShlomo Pongratz r = gicr_readl(cs, offset, data, attrs);
741cec93a93SShlomo Pongratz break;
742cec93a93SShlomo Pongratz case 8:
743cec93a93SShlomo Pongratz r = gicr_readll(cs, offset, data, attrs);
744cec93a93SShlomo Pongratz break;
745cec93a93SShlomo Pongratz default:
746cec93a93SShlomo Pongratz r = MEMTX_ERROR;
747cec93a93SShlomo Pongratz break;
748cec93a93SShlomo Pongratz }
749cec93a93SShlomo Pongratz
750b9d383abSPhilippe Mathieu-Daudé if (r != MEMTX_OK) {
751cec93a93SShlomo Pongratz qemu_log_mask(LOG_GUEST_ERROR,
752883f2c59SPhilippe Mathieu-Daudé "%s: invalid guest read at offset " HWADDR_FMT_plx
753cec93a93SShlomo Pongratz " size %u\n", __func__, offset, size);
754cec93a93SShlomo Pongratz trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
755cec93a93SShlomo Pongratz size, attrs.secure);
756f1945632SPeter Maydell /* The spec requires that reserved registers are RAZ/WI;
757f1945632SPeter Maydell * so use MEMTX_ERROR returns from leaf functions as a way to
758f1945632SPeter Maydell * trigger the guest-error logging but don't return it to
759f1945632SPeter Maydell * the caller, or we'll cause a spurious guest data abort.
760f1945632SPeter Maydell */
761f1945632SPeter Maydell r = MEMTX_OK;
762f1945632SPeter Maydell *data = 0;
763cec93a93SShlomo Pongratz } else {
764cec93a93SShlomo Pongratz trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data,
765cec93a93SShlomo Pongratz size, attrs.secure);
766cec93a93SShlomo Pongratz }
767cec93a93SShlomo Pongratz return r;
768cec93a93SShlomo Pongratz }
769cec93a93SShlomo Pongratz
gicv3_redist_write(void * opaque,hwaddr offset,uint64_t data,unsigned size,MemTxAttrs attrs)770cec93a93SShlomo Pongratz MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
771cec93a93SShlomo Pongratz unsigned size, MemTxAttrs attrs)
772cec93a93SShlomo Pongratz {
773e5cba10eSPeter Maydell GICv3RedistRegion *region = opaque;
774e5cba10eSPeter Maydell GICv3State *s = region->gic;
775cec93a93SShlomo Pongratz GICv3CPUState *cs;
776cec93a93SShlomo Pongratz MemTxResult r;
777cec93a93SShlomo Pongratz int cpuidx;
778cec93a93SShlomo Pongratz
779acd82796SPeter Maydell assert((offset & (size - 1)) == 0);
780acd82796SPeter Maydell
781e5cba10eSPeter Maydell /*
782e5cba10eSPeter Maydell * There are (for GICv3) two 64K redistributor pages per CPU.
783e5cba10eSPeter Maydell * In some cases the redistributor pages for all CPUs are not
784e5cba10eSPeter Maydell * contiguous (eg on the virt board they are split into two
785e5cba10eSPeter Maydell * parts if there are too many CPUs to all fit in the same place
786e5cba10eSPeter Maydell * in the memory map); if so then the GIC has multiple MemoryRegions
787e5cba10eSPeter Maydell * for the redistributors.
788cec93a93SShlomo Pongratz */
789ae3b3ba1SPeter Maydell cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
790ae3b3ba1SPeter Maydell offset %= gicv3_redist_size(s);
791cec93a93SShlomo Pongratz
792cec93a93SShlomo Pongratz cs = &s->cpu[cpuidx];
793cec93a93SShlomo Pongratz
794cec93a93SShlomo Pongratz switch (size) {
795cec93a93SShlomo Pongratz case 1:
796cec93a93SShlomo Pongratz r = gicr_writeb(cs, offset, data, attrs);
797cec93a93SShlomo Pongratz break;
798cec93a93SShlomo Pongratz case 4:
799cec93a93SShlomo Pongratz r = gicr_writel(cs, offset, data, attrs);
800cec93a93SShlomo Pongratz break;
801cec93a93SShlomo Pongratz case 8:
802cec93a93SShlomo Pongratz r = gicr_writell(cs, offset, data, attrs);
803cec93a93SShlomo Pongratz break;
804cec93a93SShlomo Pongratz default:
805cec93a93SShlomo Pongratz r = MEMTX_ERROR;
806cec93a93SShlomo Pongratz break;
807cec93a93SShlomo Pongratz }
808cec93a93SShlomo Pongratz
809b9d383abSPhilippe Mathieu-Daudé if (r != MEMTX_OK) {
810cec93a93SShlomo Pongratz qemu_log_mask(LOG_GUEST_ERROR,
811883f2c59SPhilippe Mathieu-Daudé "%s: invalid guest write at offset " HWADDR_FMT_plx
812cec93a93SShlomo Pongratz " size %u\n", __func__, offset, size);
813cec93a93SShlomo Pongratz trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
814cec93a93SShlomo Pongratz size, attrs.secure);
815f1945632SPeter Maydell /* The spec requires that reserved registers are RAZ/WI;
816f1945632SPeter Maydell * so use MEMTX_ERROR returns from leaf functions as a way to
817f1945632SPeter Maydell * trigger the guest-error logging but don't return it to
818f1945632SPeter Maydell * the caller, or we'll cause a spurious guest data abort.
819f1945632SPeter Maydell */
820f1945632SPeter Maydell r = MEMTX_OK;
821cec93a93SShlomo Pongratz } else {
822cec93a93SShlomo Pongratz trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data,
823cec93a93SShlomo Pongratz size, attrs.secure);
824cec93a93SShlomo Pongratz }
825cec93a93SShlomo Pongratz return r;
826cec93a93SShlomo Pongratz }
827c84428b3SPeter Maydell
gicv3_redist_check_lpi_priority(GICv3CPUState * cs,int irq)82817fb5e36SShashi Mallela static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq)
82917fb5e36SShashi Mallela {
830e97be73cSPeter Maydell uint64_t lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
83117fb5e36SShashi Mallela
832e97be73cSPeter Maydell update_for_one_lpi(cs, irq, lpict_baddr,
833e97be73cSPeter Maydell cs->gic->gicd_ctlr & GICD_CTLR_DS,
834e97be73cSPeter Maydell &cs->hpplpi);
83517fb5e36SShashi Mallela }
83617fb5e36SShashi Mallela
gicv3_redist_update_lpi_only(GICv3CPUState * cs)837101f27f3SPeter Maydell void gicv3_redist_update_lpi_only(GICv3CPUState *cs)
83817fb5e36SShashi Mallela {
83917fb5e36SShashi Mallela /*
84017fb5e36SShashi Mallela * This function scans the LPI pending table and for each pending
84117fb5e36SShashi Mallela * LPI, reads the corresponding entry from LPI configuration table
84217fb5e36SShashi Mallela * to extract the priority info and determine if the current LPI
84317fb5e36SShashi Mallela * priority is lower than the last computed high priority lpi interrupt.
84417fb5e36SShashi Mallela * If yes, replace current LPI as the new high priority lpi interrupt.
84517fb5e36SShashi Mallela */
84699ba56d2SPeter Maydell uint64_t lpipt_baddr, lpict_baddr;
84717fb5e36SShashi Mallela uint64_t idbits;
84817fb5e36SShashi Mallela
84917fb5e36SShashi Mallela idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
85017fb5e36SShashi Mallela GICD_TYPER_IDBITS);
85117fb5e36SShashi Mallela
852d7d19c0aSPeter Maydell if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
85317fb5e36SShashi Mallela return;
85417fb5e36SShashi Mallela }
85517fb5e36SShashi Mallela
85617fb5e36SShashi Mallela lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
85799ba56d2SPeter Maydell lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
85817fb5e36SShashi Mallela
85999ba56d2SPeter Maydell update_for_all_lpis(cs, lpipt_baddr, lpict_baddr, idbits,
86099ba56d2SPeter Maydell cs->gic->gicd_ctlr & GICD_CTLR_DS, &cs->hpplpi);
86117fb5e36SShashi Mallela }
86217fb5e36SShashi Mallela
gicv3_redist_update_lpi(GICv3CPUState * cs)863101f27f3SPeter Maydell void gicv3_redist_update_lpi(GICv3CPUState *cs)
864101f27f3SPeter Maydell {
865101f27f3SPeter Maydell gicv3_redist_update_lpi_only(cs);
866101f27f3SPeter Maydell gicv3_redist_update(cs);
867101f27f3SPeter Maydell }
868101f27f3SPeter Maydell
gicv3_redist_lpi_pending(GICv3CPUState * cs,int irq,int level)86917fb5e36SShashi Mallela void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level)
87017fb5e36SShashi Mallela {
87117fb5e36SShashi Mallela /*
87217fb5e36SShashi Mallela * This function updates the pending bit in lpi pending table for
87317fb5e36SShashi Mallela * the irq being activated or deactivated.
87417fb5e36SShashi Mallela */
87517fb5e36SShashi Mallela uint64_t lpipt_baddr;
87617fb5e36SShashi Mallela
87717fb5e36SShashi Mallela lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
878b76eb5f4SPeter Maydell if (!set_pending_table_bit(cs, lpipt_baddr, irq, level)) {
87917fb5e36SShashi Mallela /* no change in the value of pending bit, return */
88017fb5e36SShashi Mallela return;
88117fb5e36SShashi Mallela }
88217fb5e36SShashi Mallela
88317fb5e36SShashi Mallela /*
88417fb5e36SShashi Mallela * check if this LPI is better than the current hpplpi, if yes
88517fb5e36SShashi Mallela * just set hpplpi.prio and .irq without doing a full rescan
88617fb5e36SShashi Mallela */
88717fb5e36SShashi Mallela if (level) {
88817fb5e36SShashi Mallela gicv3_redist_check_lpi_priority(cs, irq);
889101f27f3SPeter Maydell gicv3_redist_update(cs);
89017fb5e36SShashi Mallela } else {
89117fb5e36SShashi Mallela if (irq == cs->hpplpi.irq) {
89217fb5e36SShashi Mallela gicv3_redist_update_lpi(cs);
89317fb5e36SShashi Mallela }
89417fb5e36SShashi Mallela }
89517fb5e36SShashi Mallela }
89617fb5e36SShashi Mallela
gicv3_redist_process_lpi(GICv3CPUState * cs,int irq,int level)89717fb5e36SShashi Mallela void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
89817fb5e36SShashi Mallela {
89917fb5e36SShashi Mallela uint64_t idbits;
90017fb5e36SShashi Mallela
90117fb5e36SShashi Mallela idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
90217fb5e36SShashi Mallela GICD_TYPER_IDBITS);
90317fb5e36SShashi Mallela
904d7d19c0aSPeter Maydell if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
905d7d19c0aSPeter Maydell (irq > (1ULL << (idbits + 1)) - 1) || irq < GICV3_LPI_INTID_START) {
90617fb5e36SShashi Mallela return;
90717fb5e36SShashi Mallela }
90817fb5e36SShashi Mallela
90917fb5e36SShashi Mallela /* set/clear the pending bit for this irq */
91017fb5e36SShashi Mallela gicv3_redist_lpi_pending(cs, irq, level);
91117fb5e36SShashi Mallela }
91217fb5e36SShashi Mallela
gicv3_redist_inv_lpi(GICv3CPUState * cs,int irq)913a686e85dSPeter Maydell void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq)
914a686e85dSPeter Maydell {
915a686e85dSPeter Maydell /*
916a686e85dSPeter Maydell * The only cached information for LPIs we have is the HPPLPI.
917a686e85dSPeter Maydell * We could be cleverer about identifying when we don't need
918a686e85dSPeter Maydell * to do a full rescan of the pending table, but until we find
919a686e85dSPeter Maydell * this is a performance issue, just always recalculate.
920a686e85dSPeter Maydell */
921a686e85dSPeter Maydell gicv3_redist_update_lpi(cs);
922a686e85dSPeter Maydell }
923a686e85dSPeter Maydell
gicv3_redist_mov_lpi(GICv3CPUState * src,GICv3CPUState * dest,int irq)924961b4912SPeter Maydell void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq)
925961b4912SPeter Maydell {
926961b4912SPeter Maydell /*
927961b4912SPeter Maydell * Move the specified LPI's pending state from the source redistributor
928961b4912SPeter Maydell * to the destination.
929961b4912SPeter Maydell *
930961b4912SPeter Maydell * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
931961b4912SPeter Maydell * we choose to NOP. If LPIs are disabled on source there's nothing
932961b4912SPeter Maydell * to be transferred anyway.
933961b4912SPeter Maydell */
934961b4912SPeter Maydell uint64_t idbits;
935961b4912SPeter Maydell uint32_t pendt_size;
936961b4912SPeter Maydell uint64_t src_baddr;
937961b4912SPeter Maydell
938961b4912SPeter Maydell if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
939961b4912SPeter Maydell !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
940961b4912SPeter Maydell return;
941961b4912SPeter Maydell }
942961b4912SPeter Maydell
943961b4912SPeter Maydell idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
944961b4912SPeter Maydell GICD_TYPER_IDBITS);
945961b4912SPeter Maydell idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
946961b4912SPeter Maydell idbits);
947961b4912SPeter Maydell
948961b4912SPeter Maydell pendt_size = 1ULL << (idbits + 1);
949961b4912SPeter Maydell if ((irq / 8) >= pendt_size) {
950961b4912SPeter Maydell return;
951961b4912SPeter Maydell }
952961b4912SPeter Maydell
953961b4912SPeter Maydell src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
954961b4912SPeter Maydell
955ab6ef251SPeter Maydell if (!set_pending_table_bit(src, src_baddr, irq, 0)) {
956961b4912SPeter Maydell /* Not pending on source, nothing to do */
957961b4912SPeter Maydell return;
958961b4912SPeter Maydell }
959961b4912SPeter Maydell if (irq == src->hpplpi.irq) {
960961b4912SPeter Maydell /*
961961b4912SPeter Maydell * We just made this LPI not-pending so only need to update
962961b4912SPeter Maydell * if it was previously the highest priority pending LPI
963961b4912SPeter Maydell */
964961b4912SPeter Maydell gicv3_redist_update_lpi(src);
965961b4912SPeter Maydell }
966961b4912SPeter Maydell /* Mark it pending on the destination */
967961b4912SPeter Maydell gicv3_redist_lpi_pending(dest, irq, 1);
968961b4912SPeter Maydell }
969961b4912SPeter Maydell
gicv3_redist_movall_lpis(GICv3CPUState * src,GICv3CPUState * dest)970f6d1d9b4SPeter Maydell void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest)
971f6d1d9b4SPeter Maydell {
972f6d1d9b4SPeter Maydell /*
973f6d1d9b4SPeter Maydell * We must move all pending LPIs from the source redistributor
974f6d1d9b4SPeter Maydell * to the destination. That is, for every pending LPI X on
975f6d1d9b4SPeter Maydell * src, we must set it not-pending on src and pending on dest.
976f6d1d9b4SPeter Maydell * LPIs that are already pending on dest are not cleared.
977f6d1d9b4SPeter Maydell *
978f6d1d9b4SPeter Maydell * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
979f6d1d9b4SPeter Maydell * we choose to NOP. If LPIs are disabled on source there's nothing
980f6d1d9b4SPeter Maydell * to be transferred anyway.
981f6d1d9b4SPeter Maydell */
982f6d1d9b4SPeter Maydell AddressSpace *as = &src->gic->dma_as;
983f6d1d9b4SPeter Maydell uint64_t idbits;
984f6d1d9b4SPeter Maydell uint32_t pendt_size;
985f6d1d9b4SPeter Maydell uint64_t src_baddr, dest_baddr;
986f6d1d9b4SPeter Maydell int i;
987f6d1d9b4SPeter Maydell
988f6d1d9b4SPeter Maydell if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
989f6d1d9b4SPeter Maydell !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
990f6d1d9b4SPeter Maydell return;
991f6d1d9b4SPeter Maydell }
992f6d1d9b4SPeter Maydell
993f6d1d9b4SPeter Maydell idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
994f6d1d9b4SPeter Maydell GICD_TYPER_IDBITS);
995f6d1d9b4SPeter Maydell idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
996f6d1d9b4SPeter Maydell idbits);
997f6d1d9b4SPeter Maydell
998f6d1d9b4SPeter Maydell pendt_size = 1ULL << (idbits + 1);
999f6d1d9b4SPeter Maydell src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
1000f6d1d9b4SPeter Maydell dest_baddr = dest->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
1001f6d1d9b4SPeter Maydell
1002f6d1d9b4SPeter Maydell for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
1003f6d1d9b4SPeter Maydell uint8_t src_pend, dest_pend;
1004f6d1d9b4SPeter Maydell
1005f6d1d9b4SPeter Maydell address_space_read(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
1006f6d1d9b4SPeter Maydell &src_pend, sizeof(src_pend));
1007f6d1d9b4SPeter Maydell if (!src_pend) {
1008f6d1d9b4SPeter Maydell continue;
1009f6d1d9b4SPeter Maydell }
1010f6d1d9b4SPeter Maydell address_space_read(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
1011f6d1d9b4SPeter Maydell &dest_pend, sizeof(dest_pend));
1012f6d1d9b4SPeter Maydell dest_pend |= src_pend;
1013f6d1d9b4SPeter Maydell src_pend = 0;
1014f6d1d9b4SPeter Maydell address_space_write(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
1015f6d1d9b4SPeter Maydell &src_pend, sizeof(src_pend));
1016f6d1d9b4SPeter Maydell address_space_write(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
1017f6d1d9b4SPeter Maydell &dest_pend, sizeof(dest_pend));
1018f6d1d9b4SPeter Maydell }
1019f6d1d9b4SPeter Maydell
1020f6d1d9b4SPeter Maydell gicv3_redist_update_lpi(src);
1021f6d1d9b4SPeter Maydell gicv3_redist_update_lpi(dest);
1022f6d1d9b4SPeter Maydell }
1023f6d1d9b4SPeter Maydell
gicv3_redist_vlpi_pending(GICv3CPUState * cs,int irq,int level)1024c3f21b06SPeter Maydell void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level)
1025c3f21b06SPeter Maydell {
1026c3f21b06SPeter Maydell /*
1027932f0480SPeter Maydell * Change the pending state of the specified vLPI.
1028932f0480SPeter Maydell * Unlike gicv3_redist_process_vlpi(), we know here that the
1029932f0480SPeter Maydell * vCPU is definitely resident on this redistributor, and that
1030932f0480SPeter Maydell * the irq is in range.
1031c3f21b06SPeter Maydell */
1032932f0480SPeter Maydell uint64_t vptbase, ctbase;
1033932f0480SPeter Maydell
1034932f0480SPeter Maydell vptbase = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, PHYADDR) << 16;
1035932f0480SPeter Maydell
1036932f0480SPeter Maydell if (set_pending_table_bit(cs, vptbase, irq, level)) {
1037932f0480SPeter Maydell if (level) {
1038932f0480SPeter Maydell /* Check whether this vLPI is now the best */
1039932f0480SPeter Maydell ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
1040932f0480SPeter Maydell update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi);
1041932f0480SPeter Maydell gicv3_cpuif_virt_irq_fiq_update(cs);
1042932f0480SPeter Maydell } else {
1043932f0480SPeter Maydell /* Only need to recalculate if this was previously the best vLPI */
1044932f0480SPeter Maydell if (irq == cs->hppvlpi.irq) {
1045932f0480SPeter Maydell gicv3_redist_update_vlpi(cs);
1046932f0480SPeter Maydell }
1047932f0480SPeter Maydell }
1048932f0480SPeter Maydell }
1049c3f21b06SPeter Maydell }
1050c3f21b06SPeter Maydell
gicv3_redist_process_vlpi(GICv3CPUState * cs,int irq,uint64_t vptaddr,int doorbell,int level)1051469cf23bSPeter Maydell void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr,
1052469cf23bSPeter Maydell int doorbell, int level)
1053469cf23bSPeter Maydell {
1054d7d39749SPeter Maydell bool bit_changed;
1055d7d39749SPeter Maydell bool resident = vcpu_resident(cs, vptaddr);
1056d7d39749SPeter Maydell uint64_t ctbase;
1057d7d39749SPeter Maydell
1058d7d39749SPeter Maydell if (resident) {
1059d7d39749SPeter Maydell uint32_t idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS);
1060d7d39749SPeter Maydell if (irq >= (1ULL << (idbits + 1))) {
1061d7d39749SPeter Maydell return;
1062d7d39749SPeter Maydell }
1063d7d39749SPeter Maydell }
1064d7d39749SPeter Maydell
1065d7d39749SPeter Maydell bit_changed = set_pending_table_bit(cs, vptaddr, irq, level);
1066d7d39749SPeter Maydell if (resident && bit_changed) {
1067d7d39749SPeter Maydell if (level) {
1068d7d39749SPeter Maydell /* Check whether this vLPI is now the best */
1069d7d39749SPeter Maydell ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
1070d7d39749SPeter Maydell update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi);
1071d7d39749SPeter Maydell gicv3_cpuif_virt_irq_fiq_update(cs);
1072d7d39749SPeter Maydell } else {
1073d7d39749SPeter Maydell /* Only need to recalculate if this was previously the best vLPI */
1074d7d39749SPeter Maydell if (irq == cs->hppvlpi.irq) {
1075d7d39749SPeter Maydell gicv3_redist_update_vlpi(cs);
1076d7d39749SPeter Maydell }
1077d7d39749SPeter Maydell }
1078d7d39749SPeter Maydell }
1079d7d39749SPeter Maydell
1080d7d39749SPeter Maydell if (!resident && level && doorbell != INTID_SPURIOUS &&
1081d7d39749SPeter Maydell (cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
1082d7d39749SPeter Maydell /* vCPU is not currently resident: ring the doorbell */
1083d7d39749SPeter Maydell gicv3_redist_process_lpi(cs, doorbell, 1);
1084d7d39749SPeter Maydell }
1085469cf23bSPeter Maydell }
1086469cf23bSPeter Maydell
gicv3_redist_mov_vlpi(GICv3CPUState * src,uint64_t src_vptaddr,GICv3CPUState * dest,uint64_t dest_vptaddr,int irq,int doorbell)10873c64a42cSPeter Maydell void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr,
10883c64a42cSPeter Maydell GICv3CPUState *dest, uint64_t dest_vptaddr,
10893c64a42cSPeter Maydell int irq, int doorbell)
10903c64a42cSPeter Maydell {
10913c64a42cSPeter Maydell /*
1092c6f797d5SPeter Maydell * Move the specified vLPI's pending state from the source redistributor
1093c6f797d5SPeter Maydell * to the destination.
10943c64a42cSPeter Maydell */
1095c6f797d5SPeter Maydell if (!set_pending_table_bit(src, src_vptaddr, irq, 0)) {
1096c6f797d5SPeter Maydell /* Not pending on source, nothing to do */
1097c6f797d5SPeter Maydell return;
1098c6f797d5SPeter Maydell }
1099c6f797d5SPeter Maydell if (vcpu_resident(src, src_vptaddr) && irq == src->hppvlpi.irq) {
1100c6f797d5SPeter Maydell /*
1101c6f797d5SPeter Maydell * Update src's cached highest-priority pending vLPI if we just made
1102c6f797d5SPeter Maydell * it not-pending
1103c6f797d5SPeter Maydell */
1104c6f797d5SPeter Maydell gicv3_redist_update_vlpi(src);
1105c6f797d5SPeter Maydell }
1106c6f797d5SPeter Maydell /*
1107c6f797d5SPeter Maydell * Mark the vLPI pending on the destination (ringing the doorbell
1108c6f797d5SPeter Maydell * if the vCPU isn't resident)
1109c6f797d5SPeter Maydell */
1110c6f797d5SPeter Maydell gicv3_redist_process_vlpi(dest, irq, dest_vptaddr, doorbell, irq);
11113c64a42cSPeter Maydell }
11123c64a42cSPeter Maydell
gicv3_redist_vinvall(GICv3CPUState * cs,uint64_t vptaddr)1113c6dd2f99SPeter Maydell void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr)
1114c6dd2f99SPeter Maydell {
1115e031346dSPeter Maydell if (!vcpu_resident(cs, vptaddr)) {
1116e031346dSPeter Maydell /* We don't have anything cached if the vCPU isn't resident */
1117e031346dSPeter Maydell return;
1118e031346dSPeter Maydell }
1119e031346dSPeter Maydell
1120e031346dSPeter Maydell /* Otherwise, our only cached information is the HPPVLPI info */
1121e031346dSPeter Maydell gicv3_redist_update_vlpi(cs);
1122c6dd2f99SPeter Maydell }
1123c6dd2f99SPeter Maydell
gicv3_redist_inv_vlpi(GICv3CPUState * cs,int irq,uint64_t vptaddr)1124d4014320SPeter Maydell void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr)
1125d4014320SPeter Maydell {
1126d4014320SPeter Maydell /*
11271b19ccfaSPeter Maydell * The only cached information for LPIs we have is the HPPLPI.
11281b19ccfaSPeter Maydell * We could be cleverer about identifying when we don't need
11291b19ccfaSPeter Maydell * to do a full rescan of the pending table, but until we find
11301b19ccfaSPeter Maydell * this is a performance issue, just always recalculate.
1131d4014320SPeter Maydell */
11321b19ccfaSPeter Maydell gicv3_redist_vinvall(cs, vptaddr);
1133d4014320SPeter Maydell }
1134d4014320SPeter Maydell
gicv3_redist_set_irq(GICv3CPUState * cs,int irq,int level)1135c84428b3SPeter Maydell void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
1136c84428b3SPeter Maydell {
1137c84428b3SPeter Maydell /* Update redistributor state for a change in an external PPI input line */
1138c84428b3SPeter Maydell if (level == extract32(cs->level, irq, 1)) {
1139c84428b3SPeter Maydell return;
1140c84428b3SPeter Maydell }
1141c84428b3SPeter Maydell
1142c84428b3SPeter Maydell trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level);
1143c84428b3SPeter Maydell
1144c84428b3SPeter Maydell cs->level = deposit32(cs->level, irq, 1, level);
1145c84428b3SPeter Maydell
1146c84428b3SPeter Maydell if (level) {
1147c84428b3SPeter Maydell /* 0->1 edges latch the pending bit for edge-triggered interrupts */
1148c84428b3SPeter Maydell if (extract32(cs->edge_trigger, irq, 1)) {
1149c84428b3SPeter Maydell cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
1150c84428b3SPeter Maydell }
1151c84428b3SPeter Maydell }
1152c84428b3SPeter Maydell
1153c84428b3SPeter Maydell gicv3_redist_update(cs);
1154c84428b3SPeter Maydell }
1155b1a0eb77SPeter Maydell
gicv3_redist_send_sgi(GICv3CPUState * cs,int grp,int irq,bool ns)1156b1a0eb77SPeter Maydell void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns)
1157b1a0eb77SPeter Maydell {
1158b1a0eb77SPeter Maydell /* Update redistributor state for a generated SGI */
1159b1a0eb77SPeter Maydell int irqgrp = gicv3_irq_group(cs->gic, cs, irq);
1160b1a0eb77SPeter Maydell
1161b1a0eb77SPeter Maydell /* If we are asked for a Secure Group 1 SGI and it's actually
1162b1a0eb77SPeter Maydell * configured as Secure Group 0 this is OK (subject to the usual
1163b1a0eb77SPeter Maydell * NSACR checks).
1164b1a0eb77SPeter Maydell */
1165b1a0eb77SPeter Maydell if (grp == GICV3_G1 && irqgrp == GICV3_G0) {
1166b1a0eb77SPeter Maydell grp = GICV3_G0;
1167b1a0eb77SPeter Maydell }
1168b1a0eb77SPeter Maydell
1169b1a0eb77SPeter Maydell if (grp != irqgrp) {
1170b1a0eb77SPeter Maydell return;
1171b1a0eb77SPeter Maydell }
1172b1a0eb77SPeter Maydell
1173b1a0eb77SPeter Maydell if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
1174b1a0eb77SPeter Maydell /* If security is enabled we must test the NSACR bits */
1175b1a0eb77SPeter Maydell int nsaccess = gicr_ns_access(cs, irq);
1176b1a0eb77SPeter Maydell
1177b1a0eb77SPeter Maydell if ((irqgrp == GICV3_G0 && nsaccess < 1) ||
1178b1a0eb77SPeter Maydell (irqgrp == GICV3_G1 && nsaccess < 2)) {
1179b1a0eb77SPeter Maydell return;
1180b1a0eb77SPeter Maydell }
1181b1a0eb77SPeter Maydell }
1182b1a0eb77SPeter Maydell
1183b1a0eb77SPeter Maydell /* OK, we can accept the SGI */
1184b1a0eb77SPeter Maydell trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq);
1185b1a0eb77SPeter Maydell cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
1186b1a0eb77SPeter Maydell gicv3_redist_update(cs);
1187b1a0eb77SPeter Maydell }
1188