xref: /openbmc/qemu/hw/intc/arm_gicv3_redist.c (revision 7c79d98d2e4de5b8c919002e6ead6bae7f46003d)
1 /*
2  * ARM GICv3 emulation: Redistributor
3  *
4  * Copyright (c) 2015 Huawei.
5  * Copyright (c) 2016 Linaro Limited.
6  * Written by Shlomo Pongratz, Peter Maydell
7  *
8  * This code is licensed under the GPL, version 2 or (at your option)
9  * any later version.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/log.h"
14 #include "trace.h"
15 #include "gicv3_internal.h"
16 
17 static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs)
18 {
19     /* Return a 32-bit mask which should be applied for this set of 32
20      * interrupts; each bit is 1 if access is permitted by the
21      * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
22      * not affect config register accesses, unlike GICD_NSACR.)
23      */
24     if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
25         /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
26         return cs->gicr_igroupr0;
27     }
28     return 0xFFFFFFFFU;
29 }
30 
31 static int gicr_ns_access(GICv3CPUState *cs, int irq)
32 {
33     /* Return the 2 bit NSACR.NS_access field for this SGI */
34     assert(irq < 16);
35     return extract32(cs->gicr_nsacr, irq * 2, 2);
36 }
37 
38 static void gicr_write_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
39                                   uint32_t *reg, uint32_t val)
40 {
41     /* Helper routine to implement writing to a "set" register */
42     val &= mask_group(cs, attrs);
43     *reg = val;
44     gicv3_redist_update(cs);
45 }
46 
47 static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
48                                       uint32_t *reg, uint32_t val)
49 {
50     /* Helper routine to implement writing to a "set-bitmap" register */
51     val &= mask_group(cs, attrs);
52     *reg |= val;
53     gicv3_redist_update(cs);
54 }
55 
56 static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
57                                         uint32_t *reg, uint32_t val)
58 {
59     /* Helper routine to implement writing to a "clear-bitmap" register */
60     val &= mask_group(cs, attrs);
61     *reg &= ~val;
62     gicv3_redist_update(cs);
63 }
64 
65 static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
66                                      uint32_t reg)
67 {
68     reg &= mask_group(cs, attrs);
69     return reg;
70 }
71 
72 static bool vcpu_resident(GICv3CPUState *cs, uint64_t vptaddr)
73 {
74     /*
75      * Return true if a vCPU is resident, which is defined by
76      * whether the GICR_VPENDBASER register is marked VALID and
77      * has the right virtual pending table address.
78      */
79     if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) {
80         return false;
81     }
82     return vptaddr == (cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK);
83 }
84 
85 /**
86  * update_for_one_lpi: Update pending information if this LPI is better
87  *
88  * @cs: GICv3CPUState
89  * @irq: interrupt to look up in the LPI Configuration table
90  * @ctbase: physical address of the LPI Configuration table to use
91  * @ds: true if priority value should not be shifted
92  * @hpp: points to pending information to update
93  *
94  * Look up @irq in the Configuration table specified by @ctbase
95  * to see if it is enabled and what its priority is. If it is an
96  * enabled interrupt with a higher priority than that currently
97  * recorded in @hpp, update @hpp.
98  */
99 static void update_for_one_lpi(GICv3CPUState *cs, int irq,
100                                uint64_t ctbase, bool ds, PendingIrq *hpp)
101 {
102     uint8_t lpite;
103     uint8_t prio;
104 
105     address_space_read(&cs->gic->dma_as,
106                        ctbase + ((irq - GICV3_LPI_INTID_START) * sizeof(lpite)),
107                        MEMTXATTRS_UNSPECIFIED, &lpite, sizeof(lpite));
108 
109     if (!(lpite & LPI_CTE_ENABLED)) {
110         return;
111     }
112 
113     if (ds) {
114         prio = lpite & LPI_PRIORITY_MASK;
115     } else {
116         prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80;
117     }
118 
119     if ((prio < hpp->prio) ||
120         ((prio == hpp->prio) && (irq <= hpp->irq))) {
121         hpp->irq = irq;
122         hpp->prio = prio;
123         /* LPIs and vLPIs are always non-secure Grp1 interrupts */
124         hpp->grp = GICV3_G1NS;
125     }
126 }
127 
128 /**
129  * update_for_all_lpis: Fully scan LPI tables and find best pending LPI
130  *
131  * @cs: GICv3CPUState
132  * @ptbase: physical address of LPI Pending table
133  * @ctbase: physical address of LPI Configuration table
134  * @ptsizebits: size of tables, specified as number of interrupt ID bits minus 1
135  * @ds: true if priority value should not be shifted
136  * @hpp: points to pending information to set
137  *
138  * Recalculate the highest priority pending enabled LPI from scratch,
139  * and set @hpp accordingly.
140  *
141  * We scan the LPI pending table @ptbase; for each pending LPI, we read the
142  * corresponding entry in the LPI configuration table @ctbase to extract
143  * the priority and enabled information.
144  *
145  * We take @ptsizebits in the form idbits-1 because this is the way that
146  * LPI table sizes are architecturally specified in GICR_PROPBASER.IDBits
147  * and in the VMAPP command's VPT_size field.
148  */
149 static void update_for_all_lpis(GICv3CPUState *cs, uint64_t ptbase,
150                                 uint64_t ctbase, unsigned ptsizebits,
151                                 bool ds, PendingIrq *hpp)
152 {
153     AddressSpace *as = &cs->gic->dma_as;
154     uint8_t pend;
155     uint32_t pendt_size = (1ULL << (ptsizebits + 1));
156     int i, bit;
157 
158     hpp->prio = 0xff;
159 
160     for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
161         address_space_read(as, ptbase + i, MEMTXATTRS_UNSPECIFIED, &pend, 1);
162         while (pend) {
163             bit = ctz32(pend);
164             update_for_one_lpi(cs, i * 8 + bit, ctbase, ds, hpp);
165             pend &= ~(1 << bit);
166         }
167     }
168 }
169 
170 /**
171  * set_lpi_pending_bit: Set or clear pending bit for an LPI
172  *
173  * @cs: GICv3CPUState
174  * @ptbase: physical address of LPI Pending table
175  * @irq: LPI to change pending state for
176  * @level: false to clear pending state, true to set
177  *
178  * Returns true if we needed to do something, false if the pending bit
179  * was already at @level.
180  */
181 static bool set_pending_table_bit(GICv3CPUState *cs, uint64_t ptbase,
182                                   int irq, bool level)
183 {
184     AddressSpace *as = &cs->gic->dma_as;
185     uint64_t addr = ptbase + irq / 8;
186     uint8_t pend;
187 
188     address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
189     if (extract32(pend, irq % 8, 1) == level) {
190         /* Bit already at requested state, no action required */
191         return false;
192     }
193     pend = deposit32(pend, irq % 8, 1, level ? 1 : 0);
194     address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
195     return true;
196 }
197 
198 static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
199                                     int irq)
200 {
201     /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
202      * honouring security state (these are RAZ/WI for Group 0 or Secure
203      * Group 1 interrupts).
204      */
205     uint32_t prio;
206 
207     prio = cs->gicr_ipriorityr[irq];
208 
209     if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
210         if (!(cs->gicr_igroupr0 & (1U << irq))) {
211             /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
212             return 0;
213         }
214         /* NS view of the interrupt priority */
215         prio = (prio << 1) & 0xff;
216     }
217     return prio;
218 }
219 
220 static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
221                                   uint8_t value)
222 {
223     /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
224      * honouring security state (these are RAZ/WI for Group 0 or Secure
225      * Group 1 interrupts).
226      */
227     if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
228         if (!(cs->gicr_igroupr0 & (1U << irq))) {
229             /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
230             return;
231         }
232         /* NS view of the interrupt priority */
233         value = 0x80 | (value >> 1);
234     }
235     cs->gicr_ipriorityr[irq] = value;
236 }
237 
238 static void gicv3_redist_update_vlpi_only(GICv3CPUState *cs)
239 {
240     uint64_t ptbase, ctbase, idbits;
241 
242     if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) {
243         cs->hppvlpi.prio = 0xff;
244         return;
245     }
246 
247     ptbase = cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK;
248     ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
249     idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS);
250 
251     update_for_all_lpis(cs, ptbase, ctbase, idbits, true, &cs->hppvlpi);
252 }
253 
254 static void gicv3_redist_update_vlpi(GICv3CPUState *cs)
255 {
256     gicv3_redist_update_vlpi_only(cs);
257     gicv3_cpuif_virt_irq_fiq_update(cs);
258 }
259 
260 static void gicr_write_vpendbaser(GICv3CPUState *cs, uint64_t newval)
261 {
262     /* Write @newval to GICR_VPENDBASER, handling its effects */
263     bool oldvalid = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID);
264     bool newvalid = FIELD_EX64(newval, GICR_VPENDBASER, VALID);
265     bool pendinglast;
266 
267     /*
268      * The DIRTY bit is read-only and for us is always zero;
269      * other fields are writable.
270      */
271     newval &= R_GICR_VPENDBASER_INNERCACHE_MASK |
272         R_GICR_VPENDBASER_SHAREABILITY_MASK |
273         R_GICR_VPENDBASER_PHYADDR_MASK |
274         R_GICR_VPENDBASER_OUTERCACHE_MASK |
275         R_GICR_VPENDBASER_PENDINGLAST_MASK |
276         R_GICR_VPENDBASER_IDAI_MASK |
277         R_GICR_VPENDBASER_VALID_MASK;
278 
279     if (oldvalid && newvalid) {
280         /*
281          * Changing other fields while VALID is 1 is UNPREDICTABLE;
282          * we choose to log and ignore the write.
283          */
284         if (cs->gicr_vpendbaser ^ newval) {
285             qemu_log_mask(LOG_GUEST_ERROR,
286                           "%s: Changing GICR_VPENDBASER when VALID=1 "
287                           "is UNPREDICTABLE\n", __func__);
288         }
289         return;
290     }
291     if (!oldvalid && !newvalid) {
292         cs->gicr_vpendbaser = newval;
293         return;
294     }
295 
296     if (newvalid) {
297         /*
298          * Valid going from 0 to 1: update hppvlpi from tables.
299          * If IDAI is 0 we are allowed to use the info we cached in
300          * the IMPDEF area of the table.
301          * PendingLast is RES1 when we make this transition.
302          */
303         pendinglast = true;
304     } else {
305         /*
306          * Valid going from 1 to 0:
307          * Set PendingLast if there was a pending enabled interrupt
308          * for the vPE that was just descheduled.
309          * If we cache info in the IMPDEF area, write it out here.
310          */
311         pendinglast = cs->hppvlpi.prio != 0xff;
312     }
313 
314     newval = FIELD_DP64(newval, GICR_VPENDBASER, PENDINGLAST, pendinglast);
315     cs->gicr_vpendbaser = newval;
316     gicv3_redist_update_vlpi(cs);
317 }
318 
319 static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
320                               uint64_t *data, MemTxAttrs attrs)
321 {
322     switch (offset) {
323     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
324         *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR);
325         return MEMTX_OK;
326     default:
327         return MEMTX_ERROR;
328     }
329 }
330 
331 static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset,
332                                uint64_t value, MemTxAttrs attrs)
333 {
334     switch (offset) {
335     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
336         gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value);
337         gicv3_redist_update(cs);
338         return MEMTX_OK;
339     default:
340         return MEMTX_ERROR;
341     }
342 }
343 
344 static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
345                               uint64_t *data, MemTxAttrs attrs)
346 {
347     switch (offset) {
348     case GICR_CTLR:
349         *data = cs->gicr_ctlr;
350         return MEMTX_OK;
351     case GICR_IIDR:
352         *data = gicv3_iidr();
353         return MEMTX_OK;
354     case GICR_TYPER:
355         *data = extract64(cs->gicr_typer, 0, 32);
356         return MEMTX_OK;
357     case GICR_TYPER + 4:
358         *data = extract64(cs->gicr_typer, 32, 32);
359         return MEMTX_OK;
360     case GICR_STATUSR:
361         /* RAZ/WI for us (this is an optional register and our implementation
362          * does not track RO/WO/reserved violations to report them to the guest)
363          */
364         *data = 0;
365         return MEMTX_OK;
366     case GICR_WAKER:
367         *data = cs->gicr_waker;
368         return MEMTX_OK;
369     case GICR_PROPBASER:
370         *data = extract64(cs->gicr_propbaser, 0, 32);
371         return MEMTX_OK;
372     case GICR_PROPBASER + 4:
373         *data = extract64(cs->gicr_propbaser, 32, 32);
374         return MEMTX_OK;
375     case GICR_PENDBASER:
376         *data = extract64(cs->gicr_pendbaser, 0, 32);
377         return MEMTX_OK;
378     case GICR_PENDBASER + 4:
379         *data = extract64(cs->gicr_pendbaser, 32, 32);
380         return MEMTX_OK;
381     case GICR_IGROUPR0:
382         if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
383             *data = 0;
384             return MEMTX_OK;
385         }
386         *data = cs->gicr_igroupr0;
387         return MEMTX_OK;
388     case GICR_ISENABLER0:
389     case GICR_ICENABLER0:
390         *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0);
391         return MEMTX_OK;
392     case GICR_ISPENDR0:
393     case GICR_ICPENDR0:
394     {
395         /* The pending register reads as the logical OR of the pending
396          * latch and the input line level for level-triggered interrupts.
397          */
398         uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
399         *data = gicr_read_bitmap_reg(cs, attrs, val);
400         return MEMTX_OK;
401     }
402     case GICR_ISACTIVER0:
403     case GICR_ICACTIVER0:
404         *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0);
405         return MEMTX_OK;
406     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
407     {
408         int i, irq = offset - GICR_IPRIORITYR;
409         uint32_t value = 0;
410 
411         for (i = irq + 3; i >= irq; i--) {
412             value <<= 8;
413             value |= gicr_read_ipriorityr(cs, attrs, i);
414         }
415         *data = value;
416         return MEMTX_OK;
417     }
418     case GICR_INMIR0:
419         *data = cs->gic->nmi_support ?
420                 gicr_read_bitmap_reg(cs, attrs, cs->gicr_inmir0) : 0;
421         return MEMTX_OK;
422     case GICR_ICFGR0:
423     case GICR_ICFGR1:
424     {
425         /* Our edge_trigger bitmap is one bit per irq; take the correct
426          * half of it, and spread it out into the odd bits.
427          */
428         uint32_t value;
429 
430         value = cs->edge_trigger & mask_group(cs, attrs);
431         value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16);
432         value = half_shuffle32(value) << 1;
433         *data = value;
434         return MEMTX_OK;
435     }
436     case GICR_IGRPMODR0:
437         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
438             /* RAZ/WI if security disabled, or if
439              * security enabled and this is an NS access
440              */
441             *data = 0;
442             return MEMTX_OK;
443         }
444         *data = cs->gicr_igrpmodr0;
445         return MEMTX_OK;
446     case GICR_NSACR:
447         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
448             /* RAZ/WI if security disabled, or if
449              * security enabled and this is an NS access
450              */
451             *data = 0;
452             return MEMTX_OK;
453         }
454         *data = cs->gicr_nsacr;
455         return MEMTX_OK;
456     case GICR_IDREGS ... GICR_IDREGS + 0x2f:
457         *data = gicv3_idreg(cs->gic, offset - GICR_IDREGS, GICV3_PIDR0_REDIST);
458         return MEMTX_OK;
459         /*
460          * VLPI frame registers. We don't need a version check for
461          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
462          * prevent pre-v4 GIC from passing us offsets this high.
463          */
464     case GICR_VPROPBASER:
465         *data = extract64(cs->gicr_vpropbaser, 0, 32);
466         return MEMTX_OK;
467     case GICR_VPROPBASER + 4:
468         *data = extract64(cs->gicr_vpropbaser, 32, 32);
469         return MEMTX_OK;
470     case GICR_VPENDBASER:
471         *data = extract64(cs->gicr_vpendbaser, 0, 32);
472         return MEMTX_OK;
473     case GICR_VPENDBASER + 4:
474         *data = extract64(cs->gicr_vpendbaser, 32, 32);
475         return MEMTX_OK;
476     default:
477         return MEMTX_ERROR;
478     }
479 }
480 
481 static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
482                                uint64_t value, MemTxAttrs attrs)
483 {
484     switch (offset) {
485     case GICR_CTLR:
486         /* For our implementation, GICR_TYPER.DPGS is 0 and so all
487          * the DPG bits are RAZ/WI. We don't do anything asynchronously,
488          * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
489          * implement LPIs) so Enable_LPIs is programmable.
490          */
491         if (cs->gicr_typer & GICR_TYPER_PLPIS) {
492             if (value & GICR_CTLR_ENABLE_LPIS) {
493                 cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS;
494                 /* Check for any pending interr in pending table */
495                 gicv3_redist_update_lpi(cs);
496             } else {
497                 cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS;
498                 /* cs->hppi might have been an LPI; recalculate */
499                 gicv3_redist_update(cs);
500             }
501         }
502         return MEMTX_OK;
503     case GICR_STATUSR:
504         /* RAZ/WI for our implementation */
505         return MEMTX_OK;
506     case GICR_WAKER:
507         /* Only the ProcessorSleep bit is writable. When the guest sets
508          * it, it requests that we transition the channel between the
509          * redistributor and the cpu interface to quiescent, and that
510          * we set the ChildrenAsleep bit once the interface has reached the
511          * quiescent state.
512          * Setting the ProcessorSleep to 0 reverses the quiescing, and
513          * ChildrenAsleep is cleared once the transition is complete.
514          * Since our interface is not asynchronous, we complete these
515          * transitions instantaneously, so we set ChildrenAsleep to the
516          * same value as ProcessorSleep here.
517          */
518         value &= GICR_WAKER_ProcessorSleep;
519         if (value & GICR_WAKER_ProcessorSleep) {
520             value |= GICR_WAKER_ChildrenAsleep;
521         }
522         cs->gicr_waker = value;
523         return MEMTX_OK;
524     case GICR_PROPBASER:
525         cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value);
526         return MEMTX_OK;
527     case GICR_PROPBASER + 4:
528         cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value);
529         return MEMTX_OK;
530     case GICR_PENDBASER:
531         cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value);
532         return MEMTX_OK;
533     case GICR_PENDBASER + 4:
534         cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value);
535         return MEMTX_OK;
536     case GICR_IGROUPR0:
537         if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
538             return MEMTX_OK;
539         }
540         cs->gicr_igroupr0 = value;
541         gicv3_redist_update(cs);
542         return MEMTX_OK;
543     case GICR_ISENABLER0:
544         gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
545         return MEMTX_OK;
546     case GICR_ICENABLER0:
547         gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
548         return MEMTX_OK;
549     case GICR_ISPENDR0:
550         gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
551         return MEMTX_OK;
552     case GICR_ICPENDR0:
553         gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
554         return MEMTX_OK;
555     case GICR_ISACTIVER0:
556         gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
557         return MEMTX_OK;
558     case GICR_ICACTIVER0:
559         gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
560         return MEMTX_OK;
561     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
562     {
563         int i, irq = offset - GICR_IPRIORITYR;
564 
565         for (i = irq; i < irq + 4; i++, value >>= 8) {
566             gicr_write_ipriorityr(cs, attrs, i, value);
567         }
568         gicv3_redist_update(cs);
569         return MEMTX_OK;
570     }
571     case GICR_INMIR0:
572         if (cs->gic->nmi_support) {
573             gicr_write_bitmap_reg(cs, attrs, &cs->gicr_inmir0, value);
574         }
575         return MEMTX_OK;
576 
577     case GICR_ICFGR0:
578         /* Register is all RAZ/WI or RAO/WI bits */
579         return MEMTX_OK;
580     case GICR_ICFGR1:
581     {
582         uint32_t mask;
583 
584         /* Since our edge_trigger bitmap is one bit per irq, our input
585          * 32-bits will compress down into 16 bits which we need
586          * to write into the bitmap.
587          */
588         value = half_unshuffle32(value >> 1) << 16;
589         mask = mask_group(cs, attrs) & 0xffff0000U;
590 
591         cs->edge_trigger &= ~mask;
592         cs->edge_trigger |= (value & mask);
593 
594         gicv3_redist_update(cs);
595         return MEMTX_OK;
596     }
597     case GICR_IGRPMODR0:
598         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
599             /* RAZ/WI if security disabled, or if
600              * security enabled and this is an NS access
601              */
602             return MEMTX_OK;
603         }
604         cs->gicr_igrpmodr0 = value;
605         gicv3_redist_update(cs);
606         return MEMTX_OK;
607     case GICR_NSACR:
608         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
609             /* RAZ/WI if security disabled, or if
610              * security enabled and this is an NS access
611              */
612             return MEMTX_OK;
613         }
614         cs->gicr_nsacr = value;
615         /* no update required as this only affects access permission checks */
616         return MEMTX_OK;
617     case GICR_IIDR:
618     case GICR_TYPER:
619     case GICR_IDREGS ... GICR_IDREGS + 0x2f:
620         /* RO registers, ignore the write */
621         qemu_log_mask(LOG_GUEST_ERROR,
622                       "%s: invalid guest write to RO register at offset "
623                       HWADDR_FMT_plx "\n", __func__, offset);
624         return MEMTX_OK;
625         /*
626          * VLPI frame registers. We don't need a version check for
627          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
628          * prevent pre-v4 GIC from passing us offsets this high.
629          */
630     case GICR_VPROPBASER:
631         cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 0, 32, value);
632         return MEMTX_OK;
633     case GICR_VPROPBASER + 4:
634         cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 32, 32, value);
635         return MEMTX_OK;
636     case GICR_VPENDBASER:
637         gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 0, 32, value));
638         return MEMTX_OK;
639     case GICR_VPENDBASER + 4:
640         gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 32, 32, value));
641         return MEMTX_OK;
642     default:
643         return MEMTX_ERROR;
644     }
645 }
646 
647 static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
648                                uint64_t *data, MemTxAttrs attrs)
649 {
650     switch (offset) {
651     case GICR_TYPER:
652         *data = cs->gicr_typer;
653         return MEMTX_OK;
654     case GICR_PROPBASER:
655         *data = cs->gicr_propbaser;
656         return MEMTX_OK;
657     case GICR_PENDBASER:
658         *data = cs->gicr_pendbaser;
659         return MEMTX_OK;
660         /*
661          * VLPI frame registers. We don't need a version check for
662          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
663          * prevent pre-v4 GIC from passing us offsets this high.
664          */
665     case GICR_VPROPBASER:
666         *data = cs->gicr_vpropbaser;
667         return MEMTX_OK;
668     case GICR_VPENDBASER:
669         *data = cs->gicr_vpendbaser;
670         return MEMTX_OK;
671     default:
672         return MEMTX_ERROR;
673     }
674 }
675 
676 static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
677                                 uint64_t value, MemTxAttrs attrs)
678 {
679     switch (offset) {
680     case GICR_PROPBASER:
681         cs->gicr_propbaser = value;
682         return MEMTX_OK;
683     case GICR_PENDBASER:
684         cs->gicr_pendbaser = value;
685         return MEMTX_OK;
686     case GICR_TYPER:
687         /* RO register, ignore the write */
688         qemu_log_mask(LOG_GUEST_ERROR,
689                       "%s: invalid guest write to RO register at offset "
690                       HWADDR_FMT_plx "\n", __func__, offset);
691         return MEMTX_OK;
692         /*
693          * VLPI frame registers. We don't need a version check for
694          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
695          * prevent pre-v4 GIC from passing us offsets this high.
696          */
697     case GICR_VPROPBASER:
698         cs->gicr_vpropbaser = value;
699         return MEMTX_OK;
700     case GICR_VPENDBASER:
701         gicr_write_vpendbaser(cs, value);
702         return MEMTX_OK;
703     default:
704         return MEMTX_ERROR;
705     }
706 }
707 
708 MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
709                               unsigned size, MemTxAttrs attrs)
710 {
711     GICv3RedistRegion *region = opaque;
712     GICv3State *s = region->gic;
713     GICv3CPUState *cs;
714     MemTxResult r;
715     int cpuidx;
716 
717     assert((offset & (size - 1)) == 0);
718 
719     /*
720      * There are (for GICv3) two 64K redistributor pages per CPU.
721      * In some cases the redistributor pages for all CPUs are not
722      * contiguous (eg on the virt board they are split into two
723      * parts if there are too many CPUs to all fit in the same place
724      * in the memory map); if so then the GIC has multiple MemoryRegions
725      * for the redistributors.
726      */
727     cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
728     offset %= gicv3_redist_size(s);
729 
730     cs = &s->cpu[cpuidx];
731 
732     switch (size) {
733     case 1:
734         r = gicr_readb(cs, offset, data, attrs);
735         break;
736     case 4:
737         r = gicr_readl(cs, offset, data, attrs);
738         break;
739     case 8:
740         r = gicr_readll(cs, offset, data, attrs);
741         break;
742     default:
743         r = MEMTX_ERROR;
744         break;
745     }
746 
747     if (r != MEMTX_OK) {
748         qemu_log_mask(LOG_GUEST_ERROR,
749                       "%s: invalid guest read at offset " HWADDR_FMT_plx
750                       " size %u\n", __func__, offset, size);
751         trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
752                                    size, attrs.secure);
753         /* The spec requires that reserved registers are RAZ/WI;
754          * so use MEMTX_ERROR returns from leaf functions as a way to
755          * trigger the guest-error logging but don't return it to
756          * the caller, or we'll cause a spurious guest data abort.
757          */
758         r = MEMTX_OK;
759         *data = 0;
760     } else {
761         trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data,
762                                 size, attrs.secure);
763     }
764     return r;
765 }
766 
767 MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
768                                unsigned size, MemTxAttrs attrs)
769 {
770     GICv3RedistRegion *region = opaque;
771     GICv3State *s = region->gic;
772     GICv3CPUState *cs;
773     MemTxResult r;
774     int cpuidx;
775 
776     assert((offset & (size - 1)) == 0);
777 
778     /*
779      * There are (for GICv3) two 64K redistributor pages per CPU.
780      * In some cases the redistributor pages for all CPUs are not
781      * contiguous (eg on the virt board they are split into two
782      * parts if there are too many CPUs to all fit in the same place
783      * in the memory map); if so then the GIC has multiple MemoryRegions
784      * for the redistributors.
785      */
786     cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
787     offset %= gicv3_redist_size(s);
788 
789     cs = &s->cpu[cpuidx];
790 
791     switch (size) {
792     case 1:
793         r = gicr_writeb(cs, offset, data, attrs);
794         break;
795     case 4:
796         r = gicr_writel(cs, offset, data, attrs);
797         break;
798     case 8:
799         r = gicr_writell(cs, offset, data, attrs);
800         break;
801     default:
802         r = MEMTX_ERROR;
803         break;
804     }
805 
806     if (r != MEMTX_OK) {
807         qemu_log_mask(LOG_GUEST_ERROR,
808                       "%s: invalid guest write at offset " HWADDR_FMT_plx
809                       " size %u\n", __func__, offset, size);
810         trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
811                                     size, attrs.secure);
812         /* The spec requires that reserved registers are RAZ/WI;
813          * so use MEMTX_ERROR returns from leaf functions as a way to
814          * trigger the guest-error logging but don't return it to
815          * the caller, or we'll cause a spurious guest data abort.
816          */
817         r = MEMTX_OK;
818     } else {
819         trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data,
820                                  size, attrs.secure);
821     }
822     return r;
823 }
824 
825 static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq)
826 {
827     uint64_t lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
828 
829     update_for_one_lpi(cs, irq, lpict_baddr,
830                        cs->gic->gicd_ctlr & GICD_CTLR_DS,
831                        &cs->hpplpi);
832 }
833 
834 void gicv3_redist_update_lpi_only(GICv3CPUState *cs)
835 {
836     /*
837      * This function scans the LPI pending table and for each pending
838      * LPI, reads the corresponding entry from LPI configuration table
839      * to extract the priority info and determine if the current LPI
840      * priority is lower than the last computed high priority lpi interrupt.
841      * If yes, replace current LPI as the new high priority lpi interrupt.
842      */
843     uint64_t lpipt_baddr, lpict_baddr;
844     uint64_t idbits;
845 
846     idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
847                  GICD_TYPER_IDBITS);
848 
849     if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
850         return;
851     }
852 
853     lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
854     lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
855 
856     update_for_all_lpis(cs, lpipt_baddr, lpict_baddr, idbits,
857                         cs->gic->gicd_ctlr & GICD_CTLR_DS, &cs->hpplpi);
858 }
859 
860 void gicv3_redist_update_lpi(GICv3CPUState *cs)
861 {
862     gicv3_redist_update_lpi_only(cs);
863     gicv3_redist_update(cs);
864 }
865 
866 void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level)
867 {
868     /*
869      * This function updates the pending bit in lpi pending table for
870      * the irq being activated or deactivated.
871      */
872     uint64_t lpipt_baddr;
873 
874     lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
875     if (!set_pending_table_bit(cs, lpipt_baddr, irq, level)) {
876         /* no change in the value of pending bit, return */
877         return;
878     }
879 
880     /*
881      * check if this LPI is better than the current hpplpi, if yes
882      * just set hpplpi.prio and .irq without doing a full rescan
883      */
884     if (level) {
885         gicv3_redist_check_lpi_priority(cs, irq);
886         gicv3_redist_update(cs);
887     } else {
888         if (irq == cs->hpplpi.irq) {
889             gicv3_redist_update_lpi(cs);
890         }
891     }
892 }
893 
894 void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
895 {
896     uint64_t idbits;
897 
898     idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
899                  GICD_TYPER_IDBITS);
900 
901     if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
902         (irq > (1ULL << (idbits + 1)) - 1) || irq < GICV3_LPI_INTID_START) {
903         return;
904     }
905 
906     /* set/clear the pending bit for this irq */
907     gicv3_redist_lpi_pending(cs, irq, level);
908 }
909 
910 void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq)
911 {
912     /*
913      * The only cached information for LPIs we have is the HPPLPI.
914      * We could be cleverer about identifying when we don't need
915      * to do a full rescan of the pending table, but until we find
916      * this is a performance issue, just always recalculate.
917      */
918     gicv3_redist_update_lpi(cs);
919 }
920 
921 void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq)
922 {
923     /*
924      * Move the specified LPI's pending state from the source redistributor
925      * to the destination.
926      *
927      * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
928      * we choose to NOP. If LPIs are disabled on source there's nothing
929      * to be transferred anyway.
930      */
931     uint64_t idbits;
932     uint32_t pendt_size;
933     uint64_t src_baddr;
934 
935     if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
936         !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
937         return;
938     }
939 
940     idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
941                  GICD_TYPER_IDBITS);
942     idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
943                  idbits);
944 
945     pendt_size = 1ULL << (idbits + 1);
946     if ((irq / 8) >= pendt_size) {
947         return;
948     }
949 
950     src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
951 
952     if (!set_pending_table_bit(src, src_baddr, irq, 0)) {
953         /* Not pending on source, nothing to do */
954         return;
955     }
956     if (irq == src->hpplpi.irq) {
957         /*
958          * We just made this LPI not-pending so only need to update
959          * if it was previously the highest priority pending LPI
960          */
961         gicv3_redist_update_lpi(src);
962     }
963     /* Mark it pending on the destination */
964     gicv3_redist_lpi_pending(dest, irq, 1);
965 }
966 
967 void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest)
968 {
969     /*
970      * We must move all pending LPIs from the source redistributor
971      * to the destination. That is, for every pending LPI X on
972      * src, we must set it not-pending on src and pending on dest.
973      * LPIs that are already pending on dest are not cleared.
974      *
975      * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
976      * we choose to NOP. If LPIs are disabled on source there's nothing
977      * to be transferred anyway.
978      */
979     AddressSpace *as = &src->gic->dma_as;
980     uint64_t idbits;
981     uint32_t pendt_size;
982     uint64_t src_baddr, dest_baddr;
983     int i;
984 
985     if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
986         !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
987         return;
988     }
989 
990     idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
991                  GICD_TYPER_IDBITS);
992     idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
993                  idbits);
994 
995     pendt_size = 1ULL << (idbits + 1);
996     src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
997     dest_baddr = dest->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
998 
999     for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
1000         uint8_t src_pend, dest_pend;
1001 
1002         address_space_read(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
1003                            &src_pend, sizeof(src_pend));
1004         if (!src_pend) {
1005             continue;
1006         }
1007         address_space_read(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
1008                            &dest_pend, sizeof(dest_pend));
1009         dest_pend |= src_pend;
1010         src_pend = 0;
1011         address_space_write(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
1012                             &src_pend, sizeof(src_pend));
1013         address_space_write(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
1014                             &dest_pend, sizeof(dest_pend));
1015     }
1016 
1017     gicv3_redist_update_lpi(src);
1018     gicv3_redist_update_lpi(dest);
1019 }
1020 
1021 void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level)
1022 {
1023     /*
1024      * Change the pending state of the specified vLPI.
1025      * Unlike gicv3_redist_process_vlpi(), we know here that the
1026      * vCPU is definitely resident on this redistributor, and that
1027      * the irq is in range.
1028      */
1029     uint64_t vptbase, ctbase;
1030 
1031     vptbase = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, PHYADDR) << 16;
1032 
1033     if (set_pending_table_bit(cs, vptbase, irq, level)) {
1034         if (level) {
1035             /* Check whether this vLPI is now the best */
1036             ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
1037             update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi);
1038             gicv3_cpuif_virt_irq_fiq_update(cs);
1039         } else {
1040             /* Only need to recalculate if this was previously the best vLPI */
1041             if (irq == cs->hppvlpi.irq) {
1042                 gicv3_redist_update_vlpi(cs);
1043             }
1044         }
1045     }
1046 }
1047 
1048 void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr,
1049                                int doorbell, int level)
1050 {
1051     bool bit_changed;
1052     bool resident = vcpu_resident(cs, vptaddr);
1053     uint64_t ctbase;
1054 
1055     if (resident) {
1056         uint32_t idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS);
1057         if (irq >= (1ULL << (idbits + 1))) {
1058             return;
1059         }
1060     }
1061 
1062     bit_changed = set_pending_table_bit(cs, vptaddr, irq, level);
1063     if (resident && bit_changed) {
1064         if (level) {
1065             /* Check whether this vLPI is now the best */
1066             ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
1067             update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi);
1068             gicv3_cpuif_virt_irq_fiq_update(cs);
1069         } else {
1070             /* Only need to recalculate if this was previously the best vLPI */
1071             if (irq == cs->hppvlpi.irq) {
1072                 gicv3_redist_update_vlpi(cs);
1073             }
1074         }
1075     }
1076 
1077     if (!resident && level && doorbell != INTID_SPURIOUS &&
1078         (cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
1079         /* vCPU is not currently resident: ring the doorbell */
1080         gicv3_redist_process_lpi(cs, doorbell, 1);
1081     }
1082 }
1083 
1084 void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr,
1085                            GICv3CPUState *dest, uint64_t dest_vptaddr,
1086                            int irq, int doorbell)
1087 {
1088     /*
1089      * Move the specified vLPI's pending state from the source redistributor
1090      * to the destination.
1091      */
1092     if (!set_pending_table_bit(src, src_vptaddr, irq, 0)) {
1093         /* Not pending on source, nothing to do */
1094         return;
1095     }
1096     if (vcpu_resident(src, src_vptaddr) && irq == src->hppvlpi.irq) {
1097         /*
1098          * Update src's cached highest-priority pending vLPI if we just made
1099          * it not-pending
1100          */
1101         gicv3_redist_update_vlpi(src);
1102     }
1103     /*
1104      * Mark the vLPI pending on the destination (ringing the doorbell
1105      * if the vCPU isn't resident)
1106      */
1107     gicv3_redist_process_vlpi(dest, irq, dest_vptaddr, doorbell, irq);
1108 }
1109 
1110 void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr)
1111 {
1112     if (!vcpu_resident(cs, vptaddr)) {
1113         /* We don't have anything cached if the vCPU isn't resident */
1114         return;
1115     }
1116 
1117     /* Otherwise, our only cached information is the HPPVLPI info */
1118     gicv3_redist_update_vlpi(cs);
1119 }
1120 
1121 void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr)
1122 {
1123     /*
1124      * The only cached information for LPIs we have is the HPPLPI.
1125      * We could be cleverer about identifying when we don't need
1126      * to do a full rescan of the pending table, but until we find
1127      * this is a performance issue, just always recalculate.
1128      */
1129     gicv3_redist_vinvall(cs, vptaddr);
1130 }
1131 
1132 void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
1133 {
1134     /* Update redistributor state for a change in an external PPI input line */
1135     if (level == extract32(cs->level, irq, 1)) {
1136         return;
1137     }
1138 
1139     trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level);
1140 
1141     cs->level = deposit32(cs->level, irq, 1, level);
1142 
1143     if (level) {
1144         /* 0->1 edges latch the pending bit for edge-triggered interrupts */
1145         if (extract32(cs->edge_trigger, irq, 1)) {
1146             cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
1147         }
1148     }
1149 
1150     gicv3_redist_update(cs);
1151 }
1152 
1153 void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns)
1154 {
1155     /* Update redistributor state for a generated SGI */
1156     int irqgrp = gicv3_irq_group(cs->gic, cs, irq);
1157 
1158     /* If we are asked for a Secure Group 1 SGI and it's actually
1159      * configured as Secure Group 0 this is OK (subject to the usual
1160      * NSACR checks).
1161      */
1162     if (grp == GICV3_G1 && irqgrp == GICV3_G0) {
1163         grp = GICV3_G0;
1164     }
1165 
1166     if (grp != irqgrp) {
1167         return;
1168     }
1169 
1170     if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
1171         /* If security is enabled we must test the NSACR bits */
1172         int nsaccess = gicr_ns_access(cs, irq);
1173 
1174         if ((irqgrp == GICV3_G0 && nsaccess < 1) ||
1175             (irqgrp == GICV3_G1 && nsaccess < 2)) {
1176             return;
1177         }
1178     }
1179 
1180     /* OK, we can accept the SGI */
1181     trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq);
1182     cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
1183     gicv3_redist_update(cs);
1184 }
1185