xref: /openbmc/qemu/hw/intc/arm_gicv3_redist.c (revision b76eb5f4dbf9f43b1dcb543111ad983e22670efd)
1 /*
2  * ARM GICv3 emulation: Redistributor
3  *
4  * Copyright (c) 2015 Huawei.
5  * Copyright (c) 2016 Linaro Limited.
6  * Written by Shlomo Pongratz, Peter Maydell
7  *
8  * This code is licensed under the GPL, version 2 or (at your option)
9  * any later version.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/log.h"
14 #include "trace.h"
15 #include "gicv3_internal.h"
16 
17 static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs)
18 {
19     /* Return a 32-bit mask which should be applied for this set of 32
20      * interrupts; each bit is 1 if access is permitted by the
21      * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
22      * not affect config register accesses, unlike GICD_NSACR.)
23      */
24     if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
25         /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
26         return cs->gicr_igroupr0;
27     }
28     return 0xFFFFFFFFU;
29 }
30 
31 static int gicr_ns_access(GICv3CPUState *cs, int irq)
32 {
33     /* Return the 2 bit NSACR.NS_access field for this SGI */
34     assert(irq < 16);
35     return extract32(cs->gicr_nsacr, irq * 2, 2);
36 }
37 
38 static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
39                                       uint32_t *reg, uint32_t val)
40 {
41     /* Helper routine to implement writing to a "set-bitmap" register */
42     val &= mask_group(cs, attrs);
43     *reg |= val;
44     gicv3_redist_update(cs);
45 }
46 
47 static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
48                                         uint32_t *reg, uint32_t val)
49 {
50     /* Helper routine to implement writing to a "clear-bitmap" register */
51     val &= mask_group(cs, attrs);
52     *reg &= ~val;
53     gicv3_redist_update(cs);
54 }
55 
56 static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
57                                      uint32_t reg)
58 {
59     reg &= mask_group(cs, attrs);
60     return reg;
61 }
62 
63 /**
64  * update_for_one_lpi: Update pending information if this LPI is better
65  *
66  * @cs: GICv3CPUState
67  * @irq: interrupt to look up in the LPI Configuration table
68  * @ctbase: physical address of the LPI Configuration table to use
69  * @ds: true if priority value should not be shifted
70  * @hpp: points to pending information to update
71  *
72  * Look up @irq in the Configuration table specified by @ctbase
73  * to see if it is enabled and what its priority is. If it is an
74  * enabled interrupt with a higher priority than that currently
75  * recorded in @hpp, update @hpp.
76  */
77 static void update_for_one_lpi(GICv3CPUState *cs, int irq,
78                                uint64_t ctbase, bool ds, PendingIrq *hpp)
79 {
80     uint8_t lpite;
81     uint8_t prio;
82 
83     address_space_read(&cs->gic->dma_as,
84                        ctbase + ((irq - GICV3_LPI_INTID_START) * sizeof(lpite)),
85                        MEMTXATTRS_UNSPECIFIED, &lpite, sizeof(lpite));
86 
87     if (!(lpite & LPI_CTE_ENABLED)) {
88         return;
89     }
90 
91     if (ds) {
92         prio = lpite & LPI_PRIORITY_MASK;
93     } else {
94         prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80;
95     }
96 
97     if ((prio < hpp->prio) ||
98         ((prio == hpp->prio) && (irq <= hpp->irq))) {
99         hpp->irq = irq;
100         hpp->prio = prio;
101         /* LPIs and vLPIs are always non-secure Grp1 interrupts */
102         hpp->grp = GICV3_G1NS;
103     }
104 }
105 
106 /**
107  * update_for_all_lpis: Fully scan LPI tables and find best pending LPI
108  *
109  * @cs: GICv3CPUState
110  * @ptbase: physical address of LPI Pending table
111  * @ctbase: physical address of LPI Configuration table
112  * @ptsizebits: size of tables, specified as number of interrupt ID bits minus 1
113  * @ds: true if priority value should not be shifted
114  * @hpp: points to pending information to set
115  *
116  * Recalculate the highest priority pending enabled LPI from scratch,
117  * and set @hpp accordingly.
118  *
119  * We scan the LPI pending table @ptbase; for each pending LPI, we read the
120  * corresponding entry in the LPI configuration table @ctbase to extract
121  * the priority and enabled information.
122  *
123  * We take @ptsizebits in the form idbits-1 because this is the way that
124  * LPI table sizes are architecturally specified in GICR_PROPBASER.IDBits
125  * and in the VMAPP command's VPT_size field.
126  */
127 static void update_for_all_lpis(GICv3CPUState *cs, uint64_t ptbase,
128                                 uint64_t ctbase, unsigned ptsizebits,
129                                 bool ds, PendingIrq *hpp)
130 {
131     AddressSpace *as = &cs->gic->dma_as;
132     uint8_t pend;
133     uint32_t pendt_size = (1ULL << (ptsizebits + 1));
134     int i, bit;
135 
136     hpp->prio = 0xff;
137 
138     for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
139         address_space_read(as, ptbase + i, MEMTXATTRS_UNSPECIFIED, &pend, 1);
140         while (pend) {
141             bit = ctz32(pend);
142             update_for_one_lpi(cs, i * 8 + bit, ctbase, ds, hpp);
143             pend &= ~(1 << bit);
144         }
145     }
146 }
147 
148 /**
149  * set_lpi_pending_bit: Set or clear pending bit for an LPI
150  *
151  * @cs: GICv3CPUState
152  * @ptbase: physical address of LPI Pending table
153  * @irq: LPI to change pending state for
154  * @level: false to clear pending state, true to set
155  *
156  * Returns true if we needed to do something, false if the pending bit
157  * was already at @level.
158  */
159 static bool set_pending_table_bit(GICv3CPUState *cs, uint64_t ptbase,
160                                   int irq, bool level)
161 {
162     AddressSpace *as = &cs->gic->dma_as;
163     uint64_t addr = ptbase + irq / 8;
164     uint8_t pend;
165 
166     address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
167     if (extract32(pend, irq % 8, 1) == level) {
168         /* Bit already at requested state, no action required */
169         return false;
170     }
171     pend = deposit32(pend, irq % 8, 1, level ? 1 : 0);
172     address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
173     return true;
174 }
175 
176 static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
177                                     int irq)
178 {
179     /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
180      * honouring security state (these are RAZ/WI for Group 0 or Secure
181      * Group 1 interrupts).
182      */
183     uint32_t prio;
184 
185     prio = cs->gicr_ipriorityr[irq];
186 
187     if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
188         if (!(cs->gicr_igroupr0 & (1U << irq))) {
189             /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
190             return 0;
191         }
192         /* NS view of the interrupt priority */
193         prio = (prio << 1) & 0xff;
194     }
195     return prio;
196 }
197 
198 static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
199                                   uint8_t value)
200 {
201     /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
202      * honouring security state (these are RAZ/WI for Group 0 or Secure
203      * Group 1 interrupts).
204      */
205     if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
206         if (!(cs->gicr_igroupr0 & (1U << irq))) {
207             /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
208             return;
209         }
210         /* NS view of the interrupt priority */
211         value = 0x80 | (value >> 1);
212     }
213     cs->gicr_ipriorityr[irq] = value;
214 }
215 
216 static void gicv3_redist_update_vlpi_only(GICv3CPUState *cs)
217 {
218     uint64_t ptbase, ctbase, idbits;
219 
220     if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) {
221         cs->hppvlpi.prio = 0xff;
222         return;
223     }
224 
225     ptbase = cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK;
226     ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
227     idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS);
228 
229     update_for_all_lpis(cs, ptbase, ctbase, idbits, true, &cs->hppvlpi);
230 }
231 
232 static void gicv3_redist_update_vlpi(GICv3CPUState *cs)
233 {
234     gicv3_redist_update_vlpi_only(cs);
235     gicv3_cpuif_virt_irq_fiq_update(cs);
236 }
237 
238 static void gicr_write_vpendbaser(GICv3CPUState *cs, uint64_t newval)
239 {
240     /* Write @newval to GICR_VPENDBASER, handling its effects */
241     bool oldvalid = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID);
242     bool newvalid = FIELD_EX64(newval, GICR_VPENDBASER, VALID);
243     bool pendinglast;
244 
245     /*
246      * The DIRTY bit is read-only and for us is always zero;
247      * other fields are writeable.
248      */
249     newval &= R_GICR_VPENDBASER_INNERCACHE_MASK |
250         R_GICR_VPENDBASER_SHAREABILITY_MASK |
251         R_GICR_VPENDBASER_PHYADDR_MASK |
252         R_GICR_VPENDBASER_OUTERCACHE_MASK |
253         R_GICR_VPENDBASER_PENDINGLAST_MASK |
254         R_GICR_VPENDBASER_IDAI_MASK |
255         R_GICR_VPENDBASER_VALID_MASK;
256 
257     if (oldvalid && newvalid) {
258         /*
259          * Changing other fields while VALID is 1 is UNPREDICTABLE;
260          * we choose to log and ignore the write.
261          */
262         if (cs->gicr_vpendbaser ^ newval) {
263             qemu_log_mask(LOG_GUEST_ERROR,
264                           "%s: Changing GICR_VPENDBASER when VALID=1 "
265                           "is UNPREDICTABLE\n", __func__);
266         }
267         return;
268     }
269     if (!oldvalid && !newvalid) {
270         cs->gicr_vpendbaser = newval;
271         return;
272     }
273 
274     if (newvalid) {
275         /*
276          * Valid going from 0 to 1: update hppvlpi from tables.
277          * If IDAI is 0 we are allowed to use the info we cached in
278          * the IMPDEF area of the table.
279          * PendingLast is RES1 when we make this transition.
280          */
281         pendinglast = true;
282     } else {
283         /*
284          * Valid going from 1 to 0:
285          * Set PendingLast if there was a pending enabled interrupt
286          * for the vPE that was just descheduled.
287          * If we cache info in the IMPDEF area, write it out here.
288          */
289         pendinglast = cs->hppvlpi.prio != 0xff;
290     }
291 
292     newval = FIELD_DP64(newval, GICR_VPENDBASER, PENDINGLAST, pendinglast);
293     cs->gicr_vpendbaser = newval;
294     gicv3_redist_update_vlpi(cs);
295 }
296 
297 static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
298                               uint64_t *data, MemTxAttrs attrs)
299 {
300     switch (offset) {
301     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
302         *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR);
303         return MEMTX_OK;
304     default:
305         return MEMTX_ERROR;
306     }
307 }
308 
309 static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset,
310                                uint64_t value, MemTxAttrs attrs)
311 {
312     switch (offset) {
313     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
314         gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value);
315         gicv3_redist_update(cs);
316         return MEMTX_OK;
317     default:
318         return MEMTX_ERROR;
319     }
320 }
321 
322 static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
323                               uint64_t *data, MemTxAttrs attrs)
324 {
325     switch (offset) {
326     case GICR_CTLR:
327         *data = cs->gicr_ctlr;
328         return MEMTX_OK;
329     case GICR_IIDR:
330         *data = gicv3_iidr();
331         return MEMTX_OK;
332     case GICR_TYPER:
333         *data = extract64(cs->gicr_typer, 0, 32);
334         return MEMTX_OK;
335     case GICR_TYPER + 4:
336         *data = extract64(cs->gicr_typer, 32, 32);
337         return MEMTX_OK;
338     case GICR_STATUSR:
339         /* RAZ/WI for us (this is an optional register and our implementation
340          * does not track RO/WO/reserved violations to report them to the guest)
341          */
342         *data = 0;
343         return MEMTX_OK;
344     case GICR_WAKER:
345         *data = cs->gicr_waker;
346         return MEMTX_OK;
347     case GICR_PROPBASER:
348         *data = extract64(cs->gicr_propbaser, 0, 32);
349         return MEMTX_OK;
350     case GICR_PROPBASER + 4:
351         *data = extract64(cs->gicr_propbaser, 32, 32);
352         return MEMTX_OK;
353     case GICR_PENDBASER:
354         *data = extract64(cs->gicr_pendbaser, 0, 32);
355         return MEMTX_OK;
356     case GICR_PENDBASER + 4:
357         *data = extract64(cs->gicr_pendbaser, 32, 32);
358         return MEMTX_OK;
359     case GICR_IGROUPR0:
360         if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
361             *data = 0;
362             return MEMTX_OK;
363         }
364         *data = cs->gicr_igroupr0;
365         return MEMTX_OK;
366     case GICR_ISENABLER0:
367     case GICR_ICENABLER0:
368         *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0);
369         return MEMTX_OK;
370     case GICR_ISPENDR0:
371     case GICR_ICPENDR0:
372     {
373         /* The pending register reads as the logical OR of the pending
374          * latch and the input line level for level-triggered interrupts.
375          */
376         uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
377         *data = gicr_read_bitmap_reg(cs, attrs, val);
378         return MEMTX_OK;
379     }
380     case GICR_ISACTIVER0:
381     case GICR_ICACTIVER0:
382         *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0);
383         return MEMTX_OK;
384     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
385     {
386         int i, irq = offset - GICR_IPRIORITYR;
387         uint32_t value = 0;
388 
389         for (i = irq + 3; i >= irq; i--) {
390             value <<= 8;
391             value |= gicr_read_ipriorityr(cs, attrs, i);
392         }
393         *data = value;
394         return MEMTX_OK;
395     }
396     case GICR_ICFGR0:
397     case GICR_ICFGR1:
398     {
399         /* Our edge_trigger bitmap is one bit per irq; take the correct
400          * half of it, and spread it out into the odd bits.
401          */
402         uint32_t value;
403 
404         value = cs->edge_trigger & mask_group(cs, attrs);
405         value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16);
406         value = half_shuffle32(value) << 1;
407         *data = value;
408         return MEMTX_OK;
409     }
410     case GICR_IGRPMODR0:
411         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
412             /* RAZ/WI if security disabled, or if
413              * security enabled and this is an NS access
414              */
415             *data = 0;
416             return MEMTX_OK;
417         }
418         *data = cs->gicr_igrpmodr0;
419         return MEMTX_OK;
420     case GICR_NSACR:
421         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
422             /* RAZ/WI if security disabled, or if
423              * security enabled and this is an NS access
424              */
425             *data = 0;
426             return MEMTX_OK;
427         }
428         *data = cs->gicr_nsacr;
429         return MEMTX_OK;
430     case GICR_IDREGS ... GICR_IDREGS + 0x2f:
431         *data = gicv3_idreg(offset - GICR_IDREGS, GICV3_PIDR0_REDIST);
432         return MEMTX_OK;
433         /*
434          * VLPI frame registers. We don't need a version check for
435          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
436          * prevent pre-v4 GIC from passing us offsets this high.
437          */
438     case GICR_VPROPBASER:
439         *data = extract64(cs->gicr_vpropbaser, 0, 32);
440         return MEMTX_OK;
441     case GICR_VPROPBASER + 4:
442         *data = extract64(cs->gicr_vpropbaser, 32, 32);
443         return MEMTX_OK;
444     case GICR_VPENDBASER:
445         *data = extract64(cs->gicr_vpendbaser, 0, 32);
446         return MEMTX_OK;
447     case GICR_VPENDBASER + 4:
448         *data = extract64(cs->gicr_vpendbaser, 32, 32);
449         return MEMTX_OK;
450     default:
451         return MEMTX_ERROR;
452     }
453 }
454 
455 static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
456                                uint64_t value, MemTxAttrs attrs)
457 {
458     switch (offset) {
459     case GICR_CTLR:
460         /* For our implementation, GICR_TYPER.DPGS is 0 and so all
461          * the DPG bits are RAZ/WI. We don't do anything asynchronously,
462          * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
463          * implement LPIs) so Enable_LPIs is programmable.
464          */
465         if (cs->gicr_typer & GICR_TYPER_PLPIS) {
466             if (value & GICR_CTLR_ENABLE_LPIS) {
467                 cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS;
468                 /* Check for any pending interr in pending table */
469                 gicv3_redist_update_lpi(cs);
470             } else {
471                 cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS;
472                 /* cs->hppi might have been an LPI; recalculate */
473                 gicv3_redist_update(cs);
474             }
475         }
476         return MEMTX_OK;
477     case GICR_STATUSR:
478         /* RAZ/WI for our implementation */
479         return MEMTX_OK;
480     case GICR_WAKER:
481         /* Only the ProcessorSleep bit is writeable. When the guest sets
482          * it it requests that we transition the channel between the
483          * redistributor and the cpu interface to quiescent, and that
484          * we set the ChildrenAsleep bit once the inteface has reached the
485          * quiescent state.
486          * Setting the ProcessorSleep to 0 reverses the quiescing, and
487          * ChildrenAsleep is cleared once the transition is complete.
488          * Since our interface is not asynchronous, we complete these
489          * transitions instantaneously, so we set ChildrenAsleep to the
490          * same value as ProcessorSleep here.
491          */
492         value &= GICR_WAKER_ProcessorSleep;
493         if (value & GICR_WAKER_ProcessorSleep) {
494             value |= GICR_WAKER_ChildrenAsleep;
495         }
496         cs->gicr_waker = value;
497         return MEMTX_OK;
498     case GICR_PROPBASER:
499         cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value);
500         return MEMTX_OK;
501     case GICR_PROPBASER + 4:
502         cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value);
503         return MEMTX_OK;
504     case GICR_PENDBASER:
505         cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value);
506         return MEMTX_OK;
507     case GICR_PENDBASER + 4:
508         cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value);
509         return MEMTX_OK;
510     case GICR_IGROUPR0:
511         if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
512             return MEMTX_OK;
513         }
514         cs->gicr_igroupr0 = value;
515         gicv3_redist_update(cs);
516         return MEMTX_OK;
517     case GICR_ISENABLER0:
518         gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
519         return MEMTX_OK;
520     case GICR_ICENABLER0:
521         gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
522         return MEMTX_OK;
523     case GICR_ISPENDR0:
524         gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
525         return MEMTX_OK;
526     case GICR_ICPENDR0:
527         gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
528         return MEMTX_OK;
529     case GICR_ISACTIVER0:
530         gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
531         return MEMTX_OK;
532     case GICR_ICACTIVER0:
533         gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
534         return MEMTX_OK;
535     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
536     {
537         int i, irq = offset - GICR_IPRIORITYR;
538 
539         for (i = irq; i < irq + 4; i++, value >>= 8) {
540             gicr_write_ipriorityr(cs, attrs, i, value);
541         }
542         gicv3_redist_update(cs);
543         return MEMTX_OK;
544     }
545     case GICR_ICFGR0:
546         /* Register is all RAZ/WI or RAO/WI bits */
547         return MEMTX_OK;
548     case GICR_ICFGR1:
549     {
550         uint32_t mask;
551 
552         /* Since our edge_trigger bitmap is one bit per irq, our input
553          * 32-bits will compress down into 16 bits which we need
554          * to write into the bitmap.
555          */
556         value = half_unshuffle32(value >> 1) << 16;
557         mask = mask_group(cs, attrs) & 0xffff0000U;
558 
559         cs->edge_trigger &= ~mask;
560         cs->edge_trigger |= (value & mask);
561 
562         gicv3_redist_update(cs);
563         return MEMTX_OK;
564     }
565     case GICR_IGRPMODR0:
566         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
567             /* RAZ/WI if security disabled, or if
568              * security enabled and this is an NS access
569              */
570             return MEMTX_OK;
571         }
572         cs->gicr_igrpmodr0 = value;
573         gicv3_redist_update(cs);
574         return MEMTX_OK;
575     case GICR_NSACR:
576         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
577             /* RAZ/WI if security disabled, or if
578              * security enabled and this is an NS access
579              */
580             return MEMTX_OK;
581         }
582         cs->gicr_nsacr = value;
583         /* no update required as this only affects access permission checks */
584         return MEMTX_OK;
585     case GICR_IIDR:
586     case GICR_TYPER:
587     case GICR_IDREGS ... GICR_IDREGS + 0x2f:
588         /* RO registers, ignore the write */
589         qemu_log_mask(LOG_GUEST_ERROR,
590                       "%s: invalid guest write to RO register at offset "
591                       TARGET_FMT_plx "\n", __func__, offset);
592         return MEMTX_OK;
593         /*
594          * VLPI frame registers. We don't need a version check for
595          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
596          * prevent pre-v4 GIC from passing us offsets this high.
597          */
598     case GICR_VPROPBASER:
599         cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 0, 32, value);
600         return MEMTX_OK;
601     case GICR_VPROPBASER + 4:
602         cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 32, 32, value);
603         return MEMTX_OK;
604     case GICR_VPENDBASER:
605         gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 0, 32, value));
606         return MEMTX_OK;
607     case GICR_VPENDBASER + 4:
608         gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 32, 32, value));
609         return MEMTX_OK;
610     default:
611         return MEMTX_ERROR;
612     }
613 }
614 
615 static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
616                                uint64_t *data, MemTxAttrs attrs)
617 {
618     switch (offset) {
619     case GICR_TYPER:
620         *data = cs->gicr_typer;
621         return MEMTX_OK;
622     case GICR_PROPBASER:
623         *data = cs->gicr_propbaser;
624         return MEMTX_OK;
625     case GICR_PENDBASER:
626         *data = cs->gicr_pendbaser;
627         return MEMTX_OK;
628         /*
629          * VLPI frame registers. We don't need a version check for
630          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
631          * prevent pre-v4 GIC from passing us offsets this high.
632          */
633     case GICR_VPROPBASER:
634         *data = cs->gicr_vpropbaser;
635         return MEMTX_OK;
636     case GICR_VPENDBASER:
637         *data = cs->gicr_vpendbaser;
638         return MEMTX_OK;
639     default:
640         return MEMTX_ERROR;
641     }
642 }
643 
644 static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
645                                 uint64_t value, MemTxAttrs attrs)
646 {
647     switch (offset) {
648     case GICR_PROPBASER:
649         cs->gicr_propbaser = value;
650         return MEMTX_OK;
651     case GICR_PENDBASER:
652         cs->gicr_pendbaser = value;
653         return MEMTX_OK;
654     case GICR_TYPER:
655         /* RO register, ignore the write */
656         qemu_log_mask(LOG_GUEST_ERROR,
657                       "%s: invalid guest write to RO register at offset "
658                       TARGET_FMT_plx "\n", __func__, offset);
659         return MEMTX_OK;
660         /*
661          * VLPI frame registers. We don't need a version check for
662          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
663          * prevent pre-v4 GIC from passing us offsets this high.
664          */
665     case GICR_VPROPBASER:
666         cs->gicr_vpropbaser = value;
667         return MEMTX_OK;
668     case GICR_VPENDBASER:
669         gicr_write_vpendbaser(cs, value);
670         return MEMTX_OK;
671     default:
672         return MEMTX_ERROR;
673     }
674 }
675 
676 MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
677                               unsigned size, MemTxAttrs attrs)
678 {
679     GICv3RedistRegion *region = opaque;
680     GICv3State *s = region->gic;
681     GICv3CPUState *cs;
682     MemTxResult r;
683     int cpuidx;
684 
685     assert((offset & (size - 1)) == 0);
686 
687     /*
688      * There are (for GICv3) two 64K redistributor pages per CPU.
689      * In some cases the redistributor pages for all CPUs are not
690      * contiguous (eg on the virt board they are split into two
691      * parts if there are too many CPUs to all fit in the same place
692      * in the memory map); if so then the GIC has multiple MemoryRegions
693      * for the redistributors.
694      */
695     cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
696     offset %= gicv3_redist_size(s);
697 
698     cs = &s->cpu[cpuidx];
699 
700     switch (size) {
701     case 1:
702         r = gicr_readb(cs, offset, data, attrs);
703         break;
704     case 4:
705         r = gicr_readl(cs, offset, data, attrs);
706         break;
707     case 8:
708         r = gicr_readll(cs, offset, data, attrs);
709         break;
710     default:
711         r = MEMTX_ERROR;
712         break;
713     }
714 
715     if (r != MEMTX_OK) {
716         qemu_log_mask(LOG_GUEST_ERROR,
717                       "%s: invalid guest read at offset " TARGET_FMT_plx
718                       " size %u\n", __func__, offset, size);
719         trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
720                                    size, attrs.secure);
721         /* The spec requires that reserved registers are RAZ/WI;
722          * so use MEMTX_ERROR returns from leaf functions as a way to
723          * trigger the guest-error logging but don't return it to
724          * the caller, or we'll cause a spurious guest data abort.
725          */
726         r = MEMTX_OK;
727         *data = 0;
728     } else {
729         trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data,
730                                 size, attrs.secure);
731     }
732     return r;
733 }
734 
735 MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
736                                unsigned size, MemTxAttrs attrs)
737 {
738     GICv3RedistRegion *region = opaque;
739     GICv3State *s = region->gic;
740     GICv3CPUState *cs;
741     MemTxResult r;
742     int cpuidx;
743 
744     assert((offset & (size - 1)) == 0);
745 
746     /*
747      * There are (for GICv3) two 64K redistributor pages per CPU.
748      * In some cases the redistributor pages for all CPUs are not
749      * contiguous (eg on the virt board they are split into two
750      * parts if there are too many CPUs to all fit in the same place
751      * in the memory map); if so then the GIC has multiple MemoryRegions
752      * for the redistributors.
753      */
754     cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
755     offset %= gicv3_redist_size(s);
756 
757     cs = &s->cpu[cpuidx];
758 
759     switch (size) {
760     case 1:
761         r = gicr_writeb(cs, offset, data, attrs);
762         break;
763     case 4:
764         r = gicr_writel(cs, offset, data, attrs);
765         break;
766     case 8:
767         r = gicr_writell(cs, offset, data, attrs);
768         break;
769     default:
770         r = MEMTX_ERROR;
771         break;
772     }
773 
774     if (r != MEMTX_OK) {
775         qemu_log_mask(LOG_GUEST_ERROR,
776                       "%s: invalid guest write at offset " TARGET_FMT_plx
777                       " size %u\n", __func__, offset, size);
778         trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
779                                     size, attrs.secure);
780         /* The spec requires that reserved registers are RAZ/WI;
781          * so use MEMTX_ERROR returns from leaf functions as a way to
782          * trigger the guest-error logging but don't return it to
783          * the caller, or we'll cause a spurious guest data abort.
784          */
785         r = MEMTX_OK;
786     } else {
787         trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data,
788                                  size, attrs.secure);
789     }
790     return r;
791 }
792 
793 static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq)
794 {
795     uint64_t lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
796 
797     update_for_one_lpi(cs, irq, lpict_baddr,
798                        cs->gic->gicd_ctlr & GICD_CTLR_DS,
799                        &cs->hpplpi);
800 }
801 
802 void gicv3_redist_update_lpi_only(GICv3CPUState *cs)
803 {
804     /*
805      * This function scans the LPI pending table and for each pending
806      * LPI, reads the corresponding entry from LPI configuration table
807      * to extract the priority info and determine if the current LPI
808      * priority is lower than the last computed high priority lpi interrupt.
809      * If yes, replace current LPI as the new high priority lpi interrupt.
810      */
811     uint64_t lpipt_baddr, lpict_baddr;
812     uint64_t idbits;
813 
814     idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
815                  GICD_TYPER_IDBITS);
816 
817     if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
818         return;
819     }
820 
821     lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
822     lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
823 
824     update_for_all_lpis(cs, lpipt_baddr, lpict_baddr, idbits,
825                         cs->gic->gicd_ctlr & GICD_CTLR_DS, &cs->hpplpi);
826 }
827 
828 void gicv3_redist_update_lpi(GICv3CPUState *cs)
829 {
830     gicv3_redist_update_lpi_only(cs);
831     gicv3_redist_update(cs);
832 }
833 
834 void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level)
835 {
836     /*
837      * This function updates the pending bit in lpi pending table for
838      * the irq being activated or deactivated.
839      */
840     uint64_t lpipt_baddr;
841 
842     lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
843     if (!set_pending_table_bit(cs, lpipt_baddr, irq, level)) {
844         /* no change in the value of pending bit, return */
845         return;
846     }
847 
848     /*
849      * check if this LPI is better than the current hpplpi, if yes
850      * just set hpplpi.prio and .irq without doing a full rescan
851      */
852     if (level) {
853         gicv3_redist_check_lpi_priority(cs, irq);
854         gicv3_redist_update(cs);
855     } else {
856         if (irq == cs->hpplpi.irq) {
857             gicv3_redist_update_lpi(cs);
858         }
859     }
860 }
861 
862 void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
863 {
864     uint64_t idbits;
865 
866     idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
867                  GICD_TYPER_IDBITS);
868 
869     if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
870         (irq > (1ULL << (idbits + 1)) - 1) || irq < GICV3_LPI_INTID_START) {
871         return;
872     }
873 
874     /* set/clear the pending bit for this irq */
875     gicv3_redist_lpi_pending(cs, irq, level);
876 }
877 
878 void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq)
879 {
880     /*
881      * The only cached information for LPIs we have is the HPPLPI.
882      * We could be cleverer about identifying when we don't need
883      * to do a full rescan of the pending table, but until we find
884      * this is a performance issue, just always recalculate.
885      */
886     gicv3_redist_update_lpi(cs);
887 }
888 
889 void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq)
890 {
891     /*
892      * Move the specified LPI's pending state from the source redistributor
893      * to the destination.
894      *
895      * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
896      * we choose to NOP. If LPIs are disabled on source there's nothing
897      * to be transferred anyway.
898      */
899     AddressSpace *as = &src->gic->dma_as;
900     uint64_t idbits;
901     uint32_t pendt_size;
902     uint64_t src_baddr;
903     uint8_t src_pend;
904 
905     if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
906         !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
907         return;
908     }
909 
910     idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
911                  GICD_TYPER_IDBITS);
912     idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
913                  idbits);
914 
915     pendt_size = 1ULL << (idbits + 1);
916     if ((irq / 8) >= pendt_size) {
917         return;
918     }
919 
920     src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
921 
922     address_space_read(as, src_baddr + (irq / 8),
923                        MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend));
924     if (!extract32(src_pend, irq % 8, 1)) {
925         /* Not pending on source, nothing to do */
926         return;
927     }
928     src_pend &= ~(1 << (irq % 8));
929     address_space_write(as, src_baddr + (irq / 8),
930                         MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend));
931     if (irq == src->hpplpi.irq) {
932         /*
933          * We just made this LPI not-pending so only need to update
934          * if it was previously the highest priority pending LPI
935          */
936         gicv3_redist_update_lpi(src);
937     }
938     /* Mark it pending on the destination */
939     gicv3_redist_lpi_pending(dest, irq, 1);
940 }
941 
942 void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest)
943 {
944     /*
945      * We must move all pending LPIs from the source redistributor
946      * to the destination. That is, for every pending LPI X on
947      * src, we must set it not-pending on src and pending on dest.
948      * LPIs that are already pending on dest are not cleared.
949      *
950      * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
951      * we choose to NOP. If LPIs are disabled on source there's nothing
952      * to be transferred anyway.
953      */
954     AddressSpace *as = &src->gic->dma_as;
955     uint64_t idbits;
956     uint32_t pendt_size;
957     uint64_t src_baddr, dest_baddr;
958     int i;
959 
960     if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
961         !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
962         return;
963     }
964 
965     idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
966                  GICD_TYPER_IDBITS);
967     idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
968                  idbits);
969 
970     pendt_size = 1ULL << (idbits + 1);
971     src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
972     dest_baddr = dest->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
973 
974     for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
975         uint8_t src_pend, dest_pend;
976 
977         address_space_read(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
978                            &src_pend, sizeof(src_pend));
979         if (!src_pend) {
980             continue;
981         }
982         address_space_read(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
983                            &dest_pend, sizeof(dest_pend));
984         dest_pend |= src_pend;
985         src_pend = 0;
986         address_space_write(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
987                             &src_pend, sizeof(src_pend));
988         address_space_write(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
989                             &dest_pend, sizeof(dest_pend));
990     }
991 
992     gicv3_redist_update_lpi(src);
993     gicv3_redist_update_lpi(dest);
994 }
995 
996 void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level)
997 {
998     /*
999      * The redistributor handling for changing the pending state
1000      * of a vLPI will be added in a subsequent commit.
1001      */
1002 }
1003 
1004 void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr,
1005                                int doorbell, int level)
1006 {
1007     /*
1008      * The redistributor handling for being handed a VLPI by the ITS
1009      * will be added in a subsequent commit.
1010      */
1011 }
1012 
1013 void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr,
1014                            GICv3CPUState *dest, uint64_t dest_vptaddr,
1015                            int irq, int doorbell)
1016 {
1017     /*
1018      * The redistributor handling for moving a VLPI will be added
1019      * in a subsequent commit.
1020      */
1021 }
1022 
1023 void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr)
1024 {
1025     /* The redistributor handling will be added in a subsequent commit */
1026 }
1027 
1028 void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr)
1029 {
1030     /*
1031      * The redistributor handling for invalidating cached information
1032      * about a VLPI will be added in a subsequent commit.
1033      */
1034 }
1035 
1036 void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
1037 {
1038     /* Update redistributor state for a change in an external PPI input line */
1039     if (level == extract32(cs->level, irq, 1)) {
1040         return;
1041     }
1042 
1043     trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level);
1044 
1045     cs->level = deposit32(cs->level, irq, 1, level);
1046 
1047     if (level) {
1048         /* 0->1 edges latch the pending bit for edge-triggered interrupts */
1049         if (extract32(cs->edge_trigger, irq, 1)) {
1050             cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
1051         }
1052     }
1053 
1054     gicv3_redist_update(cs);
1055 }
1056 
1057 void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns)
1058 {
1059     /* Update redistributor state for a generated SGI */
1060     int irqgrp = gicv3_irq_group(cs->gic, cs, irq);
1061 
1062     /* If we are asked for a Secure Group 1 SGI and it's actually
1063      * configured as Secure Group 0 this is OK (subject to the usual
1064      * NSACR checks).
1065      */
1066     if (grp == GICV3_G1 && irqgrp == GICV3_G0) {
1067         grp = GICV3_G0;
1068     }
1069 
1070     if (grp != irqgrp) {
1071         return;
1072     }
1073 
1074     if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
1075         /* If security is enabled we must test the NSACR bits */
1076         int nsaccess = gicr_ns_access(cs, irq);
1077 
1078         if ((irqgrp == GICV3_G0 && nsaccess < 1) ||
1079             (irqgrp == GICV3_G1 && nsaccess < 2)) {
1080             return;
1081         }
1082     }
1083 
1084     /* OK, we can accept the SGI */
1085     trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq);
1086     cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
1087     gicv3_redist_update(cs);
1088 }
1089