xref: /openbmc/qemu/hw/intc/arm_gicv3.c (revision 35658f6e)
1 /*
2  * ARM Generic Interrupt Controller v3
3  *
4  * Copyright (c) 2015 Huawei.
5  * Copyright (c) 2016 Linaro Limited
6  * Written by Shlomo Pongratz, Peter Maydell
7  *
8  * This code is licensed under the GPL, version 2 or (at your option)
9  * any later version.
10  */
11 
12 /* This file contains implementation code for an interrupt controller
13  * which implements the GICv3 architecture. Specifically this is where
14  * the device class itself and the functions for handling interrupts
15  * coming in and going out live.
16  */
17 
18 #include "qemu/osdep.h"
19 #include "qapi/error.h"
20 #include "hw/sysbus.h"
21 #include "hw/intc/arm_gicv3.h"
22 #include "gicv3_internal.h"
23 
24 static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio)
25 {
26     /* Return true if this IRQ at this priority should take
27      * precedence over the current recorded highest priority
28      * pending interrupt for this CPU. We also return true if
29      * the current recorded highest priority pending interrupt
30      * is the same as this one (a property which the calling code
31      * relies on).
32      */
33     if (prio < cs->hppi.prio) {
34         return true;
35     }
36     /* If multiple pending interrupts have the same priority then it is an
37      * IMPDEF choice which of them to signal to the CPU. We choose to
38      * signal the one with the lowest interrupt number.
39      */
40     if (prio == cs->hppi.prio && irq <= cs->hppi.irq) {
41         return true;
42     }
43     return false;
44 }
45 
46 static uint32_t gicd_int_pending(GICv3State *s, int irq)
47 {
48     /* Recalculate which distributor interrupts are actually pending
49      * in the group of 32 interrupts starting at irq (which should be a multiple
50      * of 32), and return a 32-bit integer which has a bit set for each
51      * interrupt that is eligible to be signaled to the CPU interface.
52      *
53      * An interrupt is pending if:
54      *  + the PENDING latch is set OR it is level triggered and the input is 1
55      *  + its ENABLE bit is set
56      *  + the GICD enable bit for its group is set
57      * Conveniently we can bulk-calculate this with bitwise operations.
58      */
59     uint32_t pend, grpmask;
60     uint32_t pending = *gic_bmp_ptr32(s->pending, irq);
61     uint32_t edge_trigger = *gic_bmp_ptr32(s->edge_trigger, irq);
62     uint32_t level = *gic_bmp_ptr32(s->level, irq);
63     uint32_t group = *gic_bmp_ptr32(s->group, irq);
64     uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq);
65     uint32_t enable = *gic_bmp_ptr32(s->enabled, irq);
66 
67     pend = pending | (~edge_trigger & level);
68     pend &= enable;
69 
70     if (s->gicd_ctlr & GICD_CTLR_DS) {
71         grpmod = 0;
72     }
73 
74     grpmask = 0;
75     if (s->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
76         grpmask |= group;
77     }
78     if (s->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
79         grpmask |= (~group & grpmod);
80     }
81     if (s->gicd_ctlr & GICD_CTLR_EN_GRP0) {
82         grpmask |= (~group & ~grpmod);
83     }
84     pend &= grpmask;
85 
86     return pend;
87 }
88 
89 static uint32_t gicr_int_pending(GICv3CPUState *cs)
90 {
91     /* Recalculate which redistributor interrupts are actually pending,
92      * and return a 32-bit integer which has a bit set for each interrupt
93      * that is eligible to be signaled to the CPU interface.
94      *
95      * An interrupt is pending if:
96      *  + the PENDING latch is set OR it is level triggered and the input is 1
97      *  + its ENABLE bit is set
98      *  + the GICD enable bit for its group is set
99      * Conveniently we can bulk-calculate this with bitwise operations.
100      */
101     uint32_t pend, grpmask, grpmod;
102 
103     pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
104     pend &= cs->gicr_ienabler0;
105 
106     if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
107         grpmod = 0;
108     } else {
109         grpmod = cs->gicr_igrpmodr0;
110     }
111 
112     grpmask = 0;
113     if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
114         grpmask |= cs->gicr_igroupr0;
115     }
116     if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
117         grpmask |= (~cs->gicr_igroupr0 & grpmod);
118     }
119     if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP0) {
120         grpmask |= (~cs->gicr_igroupr0 & ~grpmod);
121     }
122     pend &= grpmask;
123 
124     return pend;
125 }
126 
127 /* Update the interrupt status after state in a redistributor
128  * or CPU interface has changed, but don't tell the CPU i/f.
129  */
130 static void gicv3_redist_update_noirqset(GICv3CPUState *cs)
131 {
132     /* Find the highest priority pending interrupt among the
133      * redistributor interrupts (SGIs and PPIs).
134      */
135     bool seenbetter = false;
136     uint8_t prio;
137     int i;
138     uint32_t pend;
139 
140     /* Find out which redistributor interrupts are eligible to be
141      * signaled to the CPU interface.
142      */
143     pend = gicr_int_pending(cs);
144 
145     if (pend) {
146         for (i = 0; i < GIC_INTERNAL; i++) {
147             if (!(pend & (1 << i))) {
148                 continue;
149             }
150             prio = cs->gicr_ipriorityr[i];
151             if (irqbetter(cs, i, prio)) {
152                 cs->hppi.irq = i;
153                 cs->hppi.prio = prio;
154                 seenbetter = true;
155             }
156         }
157     }
158 
159     if (seenbetter) {
160         cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
161     }
162 
163     /* If the best interrupt we just found would preempt whatever
164      * was the previous best interrupt before this update, then
165      * we know it's definitely the best one now.
166      * If we didn't find an interrupt that would preempt the previous
167      * best, and the previous best is outside our range (or there was no
168      * previous pending interrupt at all), then that is still valid, and
169      * we leave it as the best.
170      * Otherwise, we need to do a full update (because the previous best
171      * interrupt has reduced in priority and any other interrupt could
172      * now be the new best one).
173      */
174     if (!seenbetter && cs->hppi.prio != 0xff && cs->hppi.irq < GIC_INTERNAL) {
175         gicv3_full_update_noirqset(cs->gic);
176     }
177 }
178 
179 /* Update the GIC status after state in a redistributor or
180  * CPU interface has changed, and inform the CPU i/f of
181  * its new highest priority pending interrupt.
182  */
183 void gicv3_redist_update(GICv3CPUState *cs)
184 {
185     gicv3_redist_update_noirqset(cs);
186     gicv3_cpuif_update(cs);
187 }
188 
189 /* Update the GIC status after state in the distributor has
190  * changed affecting @len interrupts starting at @start,
191  * but don't tell the CPU i/f.
192  */
193 static void gicv3_update_noirqset(GICv3State *s, int start, int len)
194 {
195     int i;
196     uint8_t prio;
197     uint32_t pend = 0;
198 
199     assert(start >= GIC_INTERNAL);
200     assert(len > 0);
201 
202     for (i = 0; i < s->num_cpu; i++) {
203         s->cpu[i].seenbetter = false;
204     }
205 
206     /* Find the highest priority pending interrupt in this range. */
207     for (i = start; i < start + len; i++) {
208         GICv3CPUState *cs;
209 
210         if (i == start || (i & 0x1f) == 0) {
211             /* Calculate the next 32 bits worth of pending status */
212             pend = gicd_int_pending(s, i & ~0x1f);
213         }
214 
215         if (!(pend & (1 << (i & 0x1f)))) {
216             continue;
217         }
218         cs = s->gicd_irouter_target[i];
219         if (!cs) {
220             /* Interrupts targeting no implemented CPU should remain pending
221              * and not be forwarded to any CPU.
222              */
223             continue;
224         }
225         prio = s->gicd_ipriority[i];
226         if (irqbetter(cs, i, prio)) {
227             cs->hppi.irq = i;
228             cs->hppi.prio = prio;
229             cs->seenbetter = true;
230         }
231     }
232 
233     /* If the best interrupt we just found would preempt whatever
234      * was the previous best interrupt before this update, then
235      * we know it's definitely the best one now.
236      * If we didn't find an interrupt that would preempt the previous
237      * best, and the previous best is outside our range (or there was
238      * no previous pending interrupt at all), then that
239      * is still valid, and we leave it as the best.
240      * Otherwise, we need to do a full update (because the previous best
241      * interrupt has reduced in priority and any other interrupt could
242      * now be the new best one).
243      */
244     for (i = 0; i < s->num_cpu; i++) {
245         GICv3CPUState *cs = &s->cpu[i];
246 
247         if (cs->seenbetter) {
248             cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
249         }
250 
251         if (!cs->seenbetter && cs->hppi.prio != 0xff &&
252             cs->hppi.irq >= start && cs->hppi.irq < start + len) {
253             gicv3_full_update_noirqset(s);
254             break;
255         }
256     }
257 }
258 
259 void gicv3_update(GICv3State *s, int start, int len)
260 {
261     int i;
262 
263     gicv3_update_noirqset(s, start, len);
264     for (i = 0; i < s->num_cpu; i++) {
265         gicv3_cpuif_update(&s->cpu[i]);
266     }
267 }
268 
269 void gicv3_full_update_noirqset(GICv3State *s)
270 {
271     /* Completely recalculate the GIC status from scratch, but
272      * don't update any outbound IRQ lines.
273      */
274     int i;
275 
276     for (i = 0; i < s->num_cpu; i++) {
277         s->cpu[i].hppi.prio = 0xff;
278     }
279 
280     /* Note that we can guarantee that these functions will not
281      * recursively call back into gicv3_full_update(), because
282      * at each point the "previous best" is always outside the
283      * range we ask them to update.
284      */
285     gicv3_update_noirqset(s, GIC_INTERNAL, s->num_irq - GIC_INTERNAL);
286 
287     for (i = 0; i < s->num_cpu; i++) {
288         gicv3_redist_update_noirqset(&s->cpu[i]);
289     }
290 }
291 
292 void gicv3_full_update(GICv3State *s)
293 {
294     /* Completely recalculate the GIC status from scratch, including
295      * updating outbound IRQ lines.
296      */
297     int i;
298 
299     gicv3_full_update_noirqset(s);
300     for (i = 0; i < s->num_cpu; i++) {
301         gicv3_cpuif_update(&s->cpu[i]);
302     }
303 }
304 
305 /* Process a change in an external IRQ input. */
306 static void gicv3_set_irq(void *opaque, int irq, int level)
307 {
308     /* Meaning of the 'irq' parameter:
309      *  [0..N-1] : external interrupts
310      *  [N..N+31] : PPI (internal) interrupts for CPU 0
311      *  [N+32..N+63] : PPI (internal interrupts for CPU 1
312      *  ...
313      */
314     GICv3State *s = opaque;
315 
316     if (irq < (s->num_irq - GIC_INTERNAL)) {
317         /* external interrupt (SPI) */
318         gicv3_dist_set_irq(s, irq + GIC_INTERNAL, level);
319     } else {
320         /* per-cpu interrupt (PPI) */
321         int cpu;
322 
323         irq -= (s->num_irq - GIC_INTERNAL);
324         cpu = irq / GIC_INTERNAL;
325         irq %= GIC_INTERNAL;
326         assert(cpu < s->num_cpu);
327         /* Raising SGIs via this function would be a bug in how the board
328          * model wires up interrupts.
329          */
330         assert(irq >= GIC_NR_SGIS);
331         gicv3_redist_set_irq(&s->cpu[cpu], irq, level);
332     }
333 }
334 
335 static void arm_gicv3_post_load(GICv3State *s)
336 {
337     /* Recalculate our cached idea of the current highest priority
338      * pending interrupt, but don't set IRQ or FIQ lines.
339      */
340     gicv3_full_update_noirqset(s);
341     /* Repopulate the cache of GICv3CPUState pointers for target CPUs */
342     gicv3_cache_all_target_cpustates(s);
343 }
344 
345 static const MemoryRegionOps gic_ops[] = {
346     {
347         .read_with_attrs = gicv3_dist_read,
348         .write_with_attrs = gicv3_dist_write,
349         .endianness = DEVICE_NATIVE_ENDIAN,
350     },
351     {
352         .read_with_attrs = gicv3_redist_read,
353         .write_with_attrs = gicv3_redist_write,
354         .endianness = DEVICE_NATIVE_ENDIAN,
355     }
356 };
357 
358 static void arm_gic_realize(DeviceState *dev, Error **errp)
359 {
360     /* Device instance realize function for the GIC sysbus device */
361     GICv3State *s = ARM_GICV3(dev);
362     ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s);
363     Error *local_err = NULL;
364 
365     agc->parent_realize(dev, &local_err);
366     if (local_err) {
367         error_propagate(errp, local_err);
368         return;
369     }
370 
371     gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops);
372 
373     gicv3_init_cpuif(s);
374 }
375 
376 static void arm_gicv3_class_init(ObjectClass *klass, void *data)
377 {
378     DeviceClass *dc = DEVICE_CLASS(klass);
379     ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass);
380     ARMGICv3Class *agc = ARM_GICV3_CLASS(klass);
381 
382     agcc->post_load = arm_gicv3_post_load;
383     agc->parent_realize = dc->realize;
384     dc->realize = arm_gic_realize;
385 }
386 
387 static const TypeInfo arm_gicv3_info = {
388     .name = TYPE_ARM_GICV3,
389     .parent = TYPE_ARM_GICV3_COMMON,
390     .instance_size = sizeof(GICv3State),
391     .class_init = arm_gicv3_class_init,
392     .class_size = sizeof(ARMGICv3Class),
393 };
394 
395 static void arm_gicv3_register_types(void)
396 {
397     type_register_static(&arm_gicv3_info);
398 }
399 
400 type_init(arm_gicv3_register_types)
401