xref: /openbmc/qemu/hw/intc/arm_gicv3_redist.c (revision 641be69745c49d3c35efb62ee41d21d701b210ba)
1 /*
2  * ARM GICv3 emulation: Redistributor
3  *
4  * Copyright (c) 2015 Huawei.
5  * Copyright (c) 2016 Linaro Limited.
6  * Written by Shlomo Pongratz, Peter Maydell
7  *
8  * This code is licensed under the GPL, version 2 or (at your option)
9  * any later version.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/log.h"
14 #include "trace.h"
15 #include "gicv3_internal.h"
16 
17 static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs)
18 {
19     /* Return a 32-bit mask which should be applied for this set of 32
20      * interrupts; each bit is 1 if access is permitted by the
21      * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
22      * not affect config register accesses, unlike GICD_NSACR.)
23      */
24     if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
25         /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
26         return cs->gicr_igroupr0;
27     }
28     return 0xFFFFFFFFU;
29 }
30 
31 static int gicr_ns_access(GICv3CPUState *cs, int irq)
32 {
33     /* Return the 2 bit NSACR.NS_access field for this SGI */
34     assert(irq < 16);
35     return extract32(cs->gicr_nsacr, irq * 2, 2);
36 }
37 
38 static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
39                                       uint32_t *reg, uint32_t val)
40 {
41     /* Helper routine to implement writing to a "set-bitmap" register */
42     val &= mask_group(cs, attrs);
43     *reg |= val;
44     gicv3_redist_update(cs);
45 }
46 
47 static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
48                                         uint32_t *reg, uint32_t val)
49 {
50     /* Helper routine to implement writing to a "clear-bitmap" register */
51     val &= mask_group(cs, attrs);
52     *reg &= ~val;
53     gicv3_redist_update(cs);
54 }
55 
56 static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
57                                      uint32_t reg)
58 {
59     reg &= mask_group(cs, attrs);
60     return reg;
61 }
62 
63 static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
64                                     int irq)
65 {
66     /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
67      * honouring security state (these are RAZ/WI for Group 0 or Secure
68      * Group 1 interrupts).
69      */
70     uint32_t prio;
71 
72     prio = cs->gicr_ipriorityr[irq];
73 
74     if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
75         if (!(cs->gicr_igroupr0 & (1U << irq))) {
76             /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
77             return 0;
78         }
79         /* NS view of the interrupt priority */
80         prio = (prio << 1) & 0xff;
81     }
82     return prio;
83 }
84 
85 static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
86                                   uint8_t value)
87 {
88     /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
89      * honouring security state (these are RAZ/WI for Group 0 or Secure
90      * Group 1 interrupts).
91      */
92     if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
93         if (!(cs->gicr_igroupr0 & (1U << irq))) {
94             /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
95             return;
96         }
97         /* NS view of the interrupt priority */
98         value = 0x80 | (value >> 1);
99     }
100     cs->gicr_ipriorityr[irq] = value;
101 }
102 
103 static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
104                               uint64_t *data, MemTxAttrs attrs)
105 {
106     switch (offset) {
107     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
108         *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR);
109         return MEMTX_OK;
110     default:
111         return MEMTX_ERROR;
112     }
113 }
114 
115 static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset,
116                                uint64_t value, MemTxAttrs attrs)
117 {
118     switch (offset) {
119     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
120         gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value);
121         gicv3_redist_update(cs);
122         return MEMTX_OK;
123     default:
124         return MEMTX_ERROR;
125     }
126 }
127 
128 static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
129                               uint64_t *data, MemTxAttrs attrs)
130 {
131     switch (offset) {
132     case GICR_CTLR:
133         *data = cs->gicr_ctlr;
134         return MEMTX_OK;
135     case GICR_IIDR:
136         *data = gicv3_iidr();
137         return MEMTX_OK;
138     case GICR_TYPER:
139         *data = extract64(cs->gicr_typer, 0, 32);
140         return MEMTX_OK;
141     case GICR_TYPER + 4:
142         *data = extract64(cs->gicr_typer, 32, 32);
143         return MEMTX_OK;
144     case GICR_STATUSR:
145         /* RAZ/WI for us (this is an optional register and our implementation
146          * does not track RO/WO/reserved violations to report them to the guest)
147          */
148         *data = 0;
149         return MEMTX_OK;
150     case GICR_WAKER:
151         *data = cs->gicr_waker;
152         return MEMTX_OK;
153     case GICR_PROPBASER:
154         *data = extract64(cs->gicr_propbaser, 0, 32);
155         return MEMTX_OK;
156     case GICR_PROPBASER + 4:
157         *data = extract64(cs->gicr_propbaser, 32, 32);
158         return MEMTX_OK;
159     case GICR_PENDBASER:
160         *data = extract64(cs->gicr_pendbaser, 0, 32);
161         return MEMTX_OK;
162     case GICR_PENDBASER + 4:
163         *data = extract64(cs->gicr_pendbaser, 32, 32);
164         return MEMTX_OK;
165     case GICR_IGROUPR0:
166         if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
167             *data = 0;
168             return MEMTX_OK;
169         }
170         *data = cs->gicr_igroupr0;
171         return MEMTX_OK;
172     case GICR_ISENABLER0:
173     case GICR_ICENABLER0:
174         *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0);
175         return MEMTX_OK;
176     case GICR_ISPENDR0:
177     case GICR_ICPENDR0:
178     {
179         /* The pending register reads as the logical OR of the pending
180          * latch and the input line level for level-triggered interrupts.
181          */
182         uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
183         *data = gicr_read_bitmap_reg(cs, attrs, val);
184         return MEMTX_OK;
185     }
186     case GICR_ISACTIVER0:
187     case GICR_ICACTIVER0:
188         *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0);
189         return MEMTX_OK;
190     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
191     {
192         int i, irq = offset - GICR_IPRIORITYR;
193         uint32_t value = 0;
194 
195         for (i = irq + 3; i >= irq; i--) {
196             value <<= 8;
197             value |= gicr_read_ipriorityr(cs, attrs, i);
198         }
199         *data = value;
200         return MEMTX_OK;
201     }
202     case GICR_ICFGR0:
203     case GICR_ICFGR1:
204     {
205         /* Our edge_trigger bitmap is one bit per irq; take the correct
206          * half of it, and spread it out into the odd bits.
207          */
208         uint32_t value;
209 
210         value = cs->edge_trigger & mask_group(cs, attrs);
211         value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16);
212         value = half_shuffle32(value) << 1;
213         *data = value;
214         return MEMTX_OK;
215     }
216     case GICR_IGRPMODR0:
217         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
218             /* RAZ/WI if security disabled, or if
219              * security enabled and this is an NS access
220              */
221             *data = 0;
222             return MEMTX_OK;
223         }
224         *data = cs->gicr_igrpmodr0;
225         return MEMTX_OK;
226     case GICR_NSACR:
227         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
228             /* RAZ/WI if security disabled, or if
229              * security enabled and this is an NS access
230              */
231             *data = 0;
232             return MEMTX_OK;
233         }
234         *data = cs->gicr_nsacr;
235         return MEMTX_OK;
236     case GICR_IDREGS ... GICR_IDREGS + 0x2f:
237         *data = gicv3_idreg(offset - GICR_IDREGS, GICV3_PIDR0_REDIST);
238         return MEMTX_OK;
239         /*
240          * VLPI frame registers. We don't need a version check for
241          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
242          * prevent pre-v4 GIC from passing us offsets this high.
243          */
244     case GICR_VPROPBASER:
245         *data = extract64(cs->gicr_vpropbaser, 0, 32);
246         return MEMTX_OK;
247     case GICR_VPROPBASER + 4:
248         *data = extract64(cs->gicr_vpropbaser, 32, 32);
249         return MEMTX_OK;
250     case GICR_VPENDBASER:
251         *data = extract64(cs->gicr_vpendbaser, 0, 32);
252         return MEMTX_OK;
253     case GICR_VPENDBASER + 4:
254         *data = extract64(cs->gicr_vpendbaser, 32, 32);
255         return MEMTX_OK;
256     default:
257         return MEMTX_ERROR;
258     }
259 }
260 
261 static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
262                                uint64_t value, MemTxAttrs attrs)
263 {
264     switch (offset) {
265     case GICR_CTLR:
266         /* For our implementation, GICR_TYPER.DPGS is 0 and so all
267          * the DPG bits are RAZ/WI. We don't do anything asynchronously,
268          * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
269          * implement LPIs) so Enable_LPIs is programmable.
270          */
271         if (cs->gicr_typer & GICR_TYPER_PLPIS) {
272             if (value & GICR_CTLR_ENABLE_LPIS) {
273                 cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS;
274                 /* Check for any pending interr in pending table */
275                 gicv3_redist_update_lpi(cs);
276             } else {
277                 cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS;
278                 /* cs->hppi might have been an LPI; recalculate */
279                 gicv3_redist_update(cs);
280             }
281         }
282         return MEMTX_OK;
283     case GICR_STATUSR:
284         /* RAZ/WI for our implementation */
285         return MEMTX_OK;
286     case GICR_WAKER:
287         /* Only the ProcessorSleep bit is writeable. When the guest sets
288          * it it requests that we transition the channel between the
289          * redistributor and the cpu interface to quiescent, and that
290          * we set the ChildrenAsleep bit once the inteface has reached the
291          * quiescent state.
292          * Setting the ProcessorSleep to 0 reverses the quiescing, and
293          * ChildrenAsleep is cleared once the transition is complete.
294          * Since our interface is not asynchronous, we complete these
295          * transitions instantaneously, so we set ChildrenAsleep to the
296          * same value as ProcessorSleep here.
297          */
298         value &= GICR_WAKER_ProcessorSleep;
299         if (value & GICR_WAKER_ProcessorSleep) {
300             value |= GICR_WAKER_ChildrenAsleep;
301         }
302         cs->gicr_waker = value;
303         return MEMTX_OK;
304     case GICR_PROPBASER:
305         cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value);
306         return MEMTX_OK;
307     case GICR_PROPBASER + 4:
308         cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value);
309         return MEMTX_OK;
310     case GICR_PENDBASER:
311         cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value);
312         return MEMTX_OK;
313     case GICR_PENDBASER + 4:
314         cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value);
315         return MEMTX_OK;
316     case GICR_IGROUPR0:
317         if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
318             return MEMTX_OK;
319         }
320         cs->gicr_igroupr0 = value;
321         gicv3_redist_update(cs);
322         return MEMTX_OK;
323     case GICR_ISENABLER0:
324         gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
325         return MEMTX_OK;
326     case GICR_ICENABLER0:
327         gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
328         return MEMTX_OK;
329     case GICR_ISPENDR0:
330         gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
331         return MEMTX_OK;
332     case GICR_ICPENDR0:
333         gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
334         return MEMTX_OK;
335     case GICR_ISACTIVER0:
336         gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
337         return MEMTX_OK;
338     case GICR_ICACTIVER0:
339         gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
340         return MEMTX_OK;
341     case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
342     {
343         int i, irq = offset - GICR_IPRIORITYR;
344 
345         for (i = irq; i < irq + 4; i++, value >>= 8) {
346             gicr_write_ipriorityr(cs, attrs, i, value);
347         }
348         gicv3_redist_update(cs);
349         return MEMTX_OK;
350     }
351     case GICR_ICFGR0:
352         /* Register is all RAZ/WI or RAO/WI bits */
353         return MEMTX_OK;
354     case GICR_ICFGR1:
355     {
356         uint32_t mask;
357 
358         /* Since our edge_trigger bitmap is one bit per irq, our input
359          * 32-bits will compress down into 16 bits which we need
360          * to write into the bitmap.
361          */
362         value = half_unshuffle32(value >> 1) << 16;
363         mask = mask_group(cs, attrs) & 0xffff0000U;
364 
365         cs->edge_trigger &= ~mask;
366         cs->edge_trigger |= (value & mask);
367 
368         gicv3_redist_update(cs);
369         return MEMTX_OK;
370     }
371     case GICR_IGRPMODR0:
372         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
373             /* RAZ/WI if security disabled, or if
374              * security enabled and this is an NS access
375              */
376             return MEMTX_OK;
377         }
378         cs->gicr_igrpmodr0 = value;
379         gicv3_redist_update(cs);
380         return MEMTX_OK;
381     case GICR_NSACR:
382         if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
383             /* RAZ/WI if security disabled, or if
384              * security enabled and this is an NS access
385              */
386             return MEMTX_OK;
387         }
388         cs->gicr_nsacr = value;
389         /* no update required as this only affects access permission checks */
390         return MEMTX_OK;
391     case GICR_IIDR:
392     case GICR_TYPER:
393     case GICR_IDREGS ... GICR_IDREGS + 0x2f:
394         /* RO registers, ignore the write */
395         qemu_log_mask(LOG_GUEST_ERROR,
396                       "%s: invalid guest write to RO register at offset "
397                       TARGET_FMT_plx "\n", __func__, offset);
398         return MEMTX_OK;
399         /*
400          * VLPI frame registers. We don't need a version check for
401          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
402          * prevent pre-v4 GIC from passing us offsets this high.
403          */
404     case GICR_VPROPBASER:
405         cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 0, 32, value);
406         return MEMTX_OK;
407     case GICR_VPROPBASER + 4:
408         cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 32, 32, value);
409         return MEMTX_OK;
410     case GICR_VPENDBASER:
411         cs->gicr_vpendbaser = deposit64(cs->gicr_vpendbaser, 0, 32, value);
412         return MEMTX_OK;
413     case GICR_VPENDBASER + 4:
414         cs->gicr_vpendbaser = deposit64(cs->gicr_vpendbaser, 32, 32, value);
415         return MEMTX_OK;
416     default:
417         return MEMTX_ERROR;
418     }
419 }
420 
421 static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
422                                uint64_t *data, MemTxAttrs attrs)
423 {
424     switch (offset) {
425     case GICR_TYPER:
426         *data = cs->gicr_typer;
427         return MEMTX_OK;
428     case GICR_PROPBASER:
429         *data = cs->gicr_propbaser;
430         return MEMTX_OK;
431     case GICR_PENDBASER:
432         *data = cs->gicr_pendbaser;
433         return MEMTX_OK;
434         /*
435          * VLPI frame registers. We don't need a version check for
436          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
437          * prevent pre-v4 GIC from passing us offsets this high.
438          */
439     case GICR_VPROPBASER:
440         *data = cs->gicr_vpropbaser;
441         return MEMTX_OK;
442     case GICR_VPENDBASER:
443         *data = cs->gicr_vpendbaser;
444         return MEMTX_OK;
445     default:
446         return MEMTX_ERROR;
447     }
448 }
449 
450 static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
451                                 uint64_t value, MemTxAttrs attrs)
452 {
453     switch (offset) {
454     case GICR_PROPBASER:
455         cs->gicr_propbaser = value;
456         return MEMTX_OK;
457     case GICR_PENDBASER:
458         cs->gicr_pendbaser = value;
459         return MEMTX_OK;
460     case GICR_TYPER:
461         /* RO register, ignore the write */
462         qemu_log_mask(LOG_GUEST_ERROR,
463                       "%s: invalid guest write to RO register at offset "
464                       TARGET_FMT_plx "\n", __func__, offset);
465         return MEMTX_OK;
466         /*
467          * VLPI frame registers. We don't need a version check for
468          * VPROPBASER and VPENDBASER because gicv3_redist_size() will
469          * prevent pre-v4 GIC from passing us offsets this high.
470          */
471     case GICR_VPROPBASER:
472         cs->gicr_vpropbaser = value;
473         return MEMTX_OK;
474     case GICR_VPENDBASER:
475         cs->gicr_vpendbaser = value;
476         return MEMTX_OK;
477     default:
478         return MEMTX_ERROR;
479     }
480 }
481 
482 MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
483                               unsigned size, MemTxAttrs attrs)
484 {
485     GICv3RedistRegion *region = opaque;
486     GICv3State *s = region->gic;
487     GICv3CPUState *cs;
488     MemTxResult r;
489     int cpuidx;
490 
491     assert((offset & (size - 1)) == 0);
492 
493     /*
494      * There are (for GICv3) two 64K redistributor pages per CPU.
495      * In some cases the redistributor pages for all CPUs are not
496      * contiguous (eg on the virt board they are split into two
497      * parts if there are too many CPUs to all fit in the same place
498      * in the memory map); if so then the GIC has multiple MemoryRegions
499      * for the redistributors.
500      */
501     cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
502     offset %= gicv3_redist_size(s);
503 
504     cs = &s->cpu[cpuidx];
505 
506     switch (size) {
507     case 1:
508         r = gicr_readb(cs, offset, data, attrs);
509         break;
510     case 4:
511         r = gicr_readl(cs, offset, data, attrs);
512         break;
513     case 8:
514         r = gicr_readll(cs, offset, data, attrs);
515         break;
516     default:
517         r = MEMTX_ERROR;
518         break;
519     }
520 
521     if (r != MEMTX_OK) {
522         qemu_log_mask(LOG_GUEST_ERROR,
523                       "%s: invalid guest read at offset " TARGET_FMT_plx
524                       " size %u\n", __func__, offset, size);
525         trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
526                                    size, attrs.secure);
527         /* The spec requires that reserved registers are RAZ/WI;
528          * so use MEMTX_ERROR returns from leaf functions as a way to
529          * trigger the guest-error logging but don't return it to
530          * the caller, or we'll cause a spurious guest data abort.
531          */
532         r = MEMTX_OK;
533         *data = 0;
534     } else {
535         trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data,
536                                 size, attrs.secure);
537     }
538     return r;
539 }
540 
541 MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
542                                unsigned size, MemTxAttrs attrs)
543 {
544     GICv3RedistRegion *region = opaque;
545     GICv3State *s = region->gic;
546     GICv3CPUState *cs;
547     MemTxResult r;
548     int cpuidx;
549 
550     assert((offset & (size - 1)) == 0);
551 
552     /*
553      * There are (for GICv3) two 64K redistributor pages per CPU.
554      * In some cases the redistributor pages for all CPUs are not
555      * contiguous (eg on the virt board they are split into two
556      * parts if there are too many CPUs to all fit in the same place
557      * in the memory map); if so then the GIC has multiple MemoryRegions
558      * for the redistributors.
559      */
560     cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
561     offset %= gicv3_redist_size(s);
562 
563     cs = &s->cpu[cpuidx];
564 
565     switch (size) {
566     case 1:
567         r = gicr_writeb(cs, offset, data, attrs);
568         break;
569     case 4:
570         r = gicr_writel(cs, offset, data, attrs);
571         break;
572     case 8:
573         r = gicr_writell(cs, offset, data, attrs);
574         break;
575     default:
576         r = MEMTX_ERROR;
577         break;
578     }
579 
580     if (r != MEMTX_OK) {
581         qemu_log_mask(LOG_GUEST_ERROR,
582                       "%s: invalid guest write at offset " TARGET_FMT_plx
583                       " size %u\n", __func__, offset, size);
584         trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
585                                     size, attrs.secure);
586         /* The spec requires that reserved registers are RAZ/WI;
587          * so use MEMTX_ERROR returns from leaf functions as a way to
588          * trigger the guest-error logging but don't return it to
589          * the caller, or we'll cause a spurious guest data abort.
590          */
591         r = MEMTX_OK;
592     } else {
593         trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data,
594                                  size, attrs.secure);
595     }
596     return r;
597 }
598 
599 static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq)
600 {
601     AddressSpace *as = &cs->gic->dma_as;
602     uint64_t lpict_baddr;
603     uint8_t lpite;
604     uint8_t prio;
605 
606     lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
607 
608     address_space_read(as, lpict_baddr + ((irq - GICV3_LPI_INTID_START) *
609                        sizeof(lpite)), MEMTXATTRS_UNSPECIFIED, &lpite,
610                        sizeof(lpite));
611 
612     if (!(lpite & LPI_CTE_ENABLED)) {
613         return;
614     }
615 
616     if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
617         prio = lpite & LPI_PRIORITY_MASK;
618     } else {
619         prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80;
620     }
621 
622     if ((prio < cs->hpplpi.prio) ||
623         ((prio == cs->hpplpi.prio) && (irq <= cs->hpplpi.irq))) {
624         cs->hpplpi.irq = irq;
625         cs->hpplpi.prio = prio;
626         /* LPIs are always non-secure Grp1 interrupts */
627         cs->hpplpi.grp = GICV3_G1NS;
628     }
629 }
630 
631 void gicv3_redist_update_lpi_only(GICv3CPUState *cs)
632 {
633     /*
634      * This function scans the LPI pending table and for each pending
635      * LPI, reads the corresponding entry from LPI configuration table
636      * to extract the priority info and determine if the current LPI
637      * priority is lower than the last computed high priority lpi interrupt.
638      * If yes, replace current LPI as the new high priority lpi interrupt.
639      */
640     AddressSpace *as = &cs->gic->dma_as;
641     uint64_t lpipt_baddr;
642     uint32_t pendt_size = 0;
643     uint8_t pend;
644     int i, bit;
645     uint64_t idbits;
646 
647     idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
648                  GICD_TYPER_IDBITS);
649 
650     if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
651         return;
652     }
653 
654     cs->hpplpi.prio = 0xff;
655 
656     lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
657 
658     /* Determine the highest priority pending interrupt among LPIs */
659     pendt_size = (1ULL << (idbits + 1));
660 
661     for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
662         address_space_read(as, lpipt_baddr + i, MEMTXATTRS_UNSPECIFIED, &pend,
663                            sizeof(pend));
664 
665         while (pend) {
666             bit = ctz32(pend);
667             gicv3_redist_check_lpi_priority(cs, i * 8 + bit);
668             pend &= ~(1 << bit);
669         }
670     }
671 }
672 
673 void gicv3_redist_update_lpi(GICv3CPUState *cs)
674 {
675     gicv3_redist_update_lpi_only(cs);
676     gicv3_redist_update(cs);
677 }
678 
679 void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level)
680 {
681     /*
682      * This function updates the pending bit in lpi pending table for
683      * the irq being activated or deactivated.
684      */
685     AddressSpace *as = &cs->gic->dma_as;
686     uint64_t lpipt_baddr;
687     bool ispend = false;
688     uint8_t pend;
689 
690     /*
691      * get the bit value corresponding to this irq in the
692      * lpi pending table
693      */
694     lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
695 
696     address_space_read(as, lpipt_baddr + ((irq / 8) * sizeof(pend)),
697                        MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend));
698 
699     ispend = extract32(pend, irq % 8, 1);
700 
701     /* no change in the value of pending bit, return */
702     if (ispend == level) {
703         return;
704     }
705     pend = deposit32(pend, irq % 8, 1, level ? 1 : 0);
706 
707     address_space_write(as, lpipt_baddr + ((irq / 8) * sizeof(pend)),
708                         MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend));
709 
710     /*
711      * check if this LPI is better than the current hpplpi, if yes
712      * just set hpplpi.prio and .irq without doing a full rescan
713      */
714     if (level) {
715         gicv3_redist_check_lpi_priority(cs, irq);
716         gicv3_redist_update(cs);
717     } else {
718         if (irq == cs->hpplpi.irq) {
719             gicv3_redist_update_lpi(cs);
720         }
721     }
722 }
723 
724 void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
725 {
726     uint64_t idbits;
727 
728     idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
729                  GICD_TYPER_IDBITS);
730 
731     if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
732         (irq > (1ULL << (idbits + 1)) - 1) || irq < GICV3_LPI_INTID_START) {
733         return;
734     }
735 
736     /* set/clear the pending bit for this irq */
737     gicv3_redist_lpi_pending(cs, irq, level);
738 }
739 
740 void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq)
741 {
742     /*
743      * The only cached information for LPIs we have is the HPPLPI.
744      * We could be cleverer about identifying when we don't need
745      * to do a full rescan of the pending table, but until we find
746      * this is a performance issue, just always recalculate.
747      */
748     gicv3_redist_update_lpi(cs);
749 }
750 
751 void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq)
752 {
753     /*
754      * Move the specified LPI's pending state from the source redistributor
755      * to the destination.
756      *
757      * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
758      * we choose to NOP. If LPIs are disabled on source there's nothing
759      * to be transferred anyway.
760      */
761     AddressSpace *as = &src->gic->dma_as;
762     uint64_t idbits;
763     uint32_t pendt_size;
764     uint64_t src_baddr;
765     uint8_t src_pend;
766 
767     if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
768         !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
769         return;
770     }
771 
772     idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
773                  GICD_TYPER_IDBITS);
774     idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
775                  idbits);
776 
777     pendt_size = 1ULL << (idbits + 1);
778     if ((irq / 8) >= pendt_size) {
779         return;
780     }
781 
782     src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
783 
784     address_space_read(as, src_baddr + (irq / 8),
785                        MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend));
786     if (!extract32(src_pend, irq % 8, 1)) {
787         /* Not pending on source, nothing to do */
788         return;
789     }
790     src_pend &= ~(1 << (irq % 8));
791     address_space_write(as, src_baddr + (irq / 8),
792                         MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend));
793     if (irq == src->hpplpi.irq) {
794         /*
795          * We just made this LPI not-pending so only need to update
796          * if it was previously the highest priority pending LPI
797          */
798         gicv3_redist_update_lpi(src);
799     }
800     /* Mark it pending on the destination */
801     gicv3_redist_lpi_pending(dest, irq, 1);
802 }
803 
804 void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest)
805 {
806     /*
807      * We must move all pending LPIs from the source redistributor
808      * to the destination. That is, for every pending LPI X on
809      * src, we must set it not-pending on src and pending on dest.
810      * LPIs that are already pending on dest are not cleared.
811      *
812      * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
813      * we choose to NOP. If LPIs are disabled on source there's nothing
814      * to be transferred anyway.
815      */
816     AddressSpace *as = &src->gic->dma_as;
817     uint64_t idbits;
818     uint32_t pendt_size;
819     uint64_t src_baddr, dest_baddr;
820     int i;
821 
822     if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
823         !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
824         return;
825     }
826 
827     idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
828                  GICD_TYPER_IDBITS);
829     idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
830                  idbits);
831 
832     pendt_size = 1ULL << (idbits + 1);
833     src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
834     dest_baddr = dest->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
835 
836     for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
837         uint8_t src_pend, dest_pend;
838 
839         address_space_read(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
840                            &src_pend, sizeof(src_pend));
841         if (!src_pend) {
842             continue;
843         }
844         address_space_read(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
845                            &dest_pend, sizeof(dest_pend));
846         dest_pend |= src_pend;
847         src_pend = 0;
848         address_space_write(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
849                             &src_pend, sizeof(src_pend));
850         address_space_write(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
851                             &dest_pend, sizeof(dest_pend));
852     }
853 
854     gicv3_redist_update_lpi(src);
855     gicv3_redist_update_lpi(dest);
856 }
857 
858 void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr,
859                                int doorbell, int level)
860 {
861     /*
862      * The redistributor handling for being handed a VLPI by the ITS
863      * will be added in a subsequent commit.
864      */
865 }
866 
867 void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr,
868                            GICv3CPUState *dest, uint64_t dest_vptaddr,
869                            int irq, int doorbell)
870 {
871     /*
872      * The redistributor handling for moving a VLPI will be added
873      * in a subsequent commit.
874      */
875 }
876 
877 void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr)
878 {
879     /* The redistributor handling will be added in a subsequent commit */
880 }
881 
882 void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr)
883 {
884     /*
885      * The redistributor handling for invalidating cached information
886      * about a VLPI will be added in a subsequent commit.
887      */
888 }
889 
890 void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
891 {
892     /* Update redistributor state for a change in an external PPI input line */
893     if (level == extract32(cs->level, irq, 1)) {
894         return;
895     }
896 
897     trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level);
898 
899     cs->level = deposit32(cs->level, irq, 1, level);
900 
901     if (level) {
902         /* 0->1 edges latch the pending bit for edge-triggered interrupts */
903         if (extract32(cs->edge_trigger, irq, 1)) {
904             cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
905         }
906     }
907 
908     gicv3_redist_update(cs);
909 }
910 
911 void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns)
912 {
913     /* Update redistributor state for a generated SGI */
914     int irqgrp = gicv3_irq_group(cs->gic, cs, irq);
915 
916     /* If we are asked for a Secure Group 1 SGI and it's actually
917      * configured as Secure Group 0 this is OK (subject to the usual
918      * NSACR checks).
919      */
920     if (grp == GICV3_G1 && irqgrp == GICV3_G0) {
921         grp = GICV3_G0;
922     }
923 
924     if (grp != irqgrp) {
925         return;
926     }
927 
928     if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
929         /* If security is enabled we must test the NSACR bits */
930         int nsaccess = gicr_ns_access(cs, irq);
931 
932         if ((irqgrp == GICV3_G0 && nsaccess < 1) ||
933             (irqgrp == GICV3_G1 && nsaccess < 2)) {
934             return;
935         }
936     }
937 
938     /* OK, we can accept the SGI */
939     trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq);
940     cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
941     gicv3_redist_update(cs);
942 }
943