xref: /openbmc/qemu/hw/intc/arm_gicv3.c (revision 954a6c4f)
1 /*
2  * ARM Generic Interrupt Controller v3 (emulation)
3  *
4  * Copyright (c) 2015 Huawei.
5  * Copyright (c) 2016 Linaro Limited
6  * Written by Shlomo Pongratz, Peter Maydell
7  *
8  * This code is licensed under the GPL, version 2 or (at your option)
9  * any later version.
10  */
11 
12 /* This file contains implementation code for an interrupt controller
13  * which implements the GICv3 architecture. Specifically this is where
14  * the device class itself and the functions for handling interrupts
15  * coming in and going out live.
16  */
17 
18 #include "qemu/osdep.h"
19 #include "qapi/error.h"
20 #include "qemu/module.h"
21 #include "hw/intc/arm_gicv3.h"
22 #include "gicv3_internal.h"
23 
24 static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio)
25 {
26     /* Return true if this IRQ at this priority should take
27      * precedence over the current recorded highest priority
28      * pending interrupt for this CPU. We also return true if
29      * the current recorded highest priority pending interrupt
30      * is the same as this one (a property which the calling code
31      * relies on).
32      */
33     if (prio < cs->hppi.prio) {
34         return true;
35     }
36     /* If multiple pending interrupts have the same priority then it is an
37      * IMPDEF choice which of them to signal to the CPU. We choose to
38      * signal the one with the lowest interrupt number.
39      */
40     if (prio == cs->hppi.prio && irq <= cs->hppi.irq) {
41         return true;
42     }
43     return false;
44 }
45 
46 static uint32_t gicd_int_pending(GICv3State *s, int irq)
47 {
48     /* Recalculate which distributor interrupts are actually pending
49      * in the group of 32 interrupts starting at irq (which should be a multiple
50      * of 32), and return a 32-bit integer which has a bit set for each
51      * interrupt that is eligible to be signaled to the CPU interface.
52      *
53      * An interrupt is pending if:
54      *  + the PENDING latch is set OR it is level triggered and the input is 1
55      *  + its ENABLE bit is set
56      *  + the GICD enable bit for its group is set
57      *  + its ACTIVE bit is not set (otherwise it would be Active+Pending)
58      * Conveniently we can bulk-calculate this with bitwise operations.
59      */
60     uint32_t pend, grpmask;
61     uint32_t pending = *gic_bmp_ptr32(s->pending, irq);
62     uint32_t edge_trigger = *gic_bmp_ptr32(s->edge_trigger, irq);
63     uint32_t level = *gic_bmp_ptr32(s->level, irq);
64     uint32_t group = *gic_bmp_ptr32(s->group, irq);
65     uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq);
66     uint32_t enable = *gic_bmp_ptr32(s->enabled, irq);
67     uint32_t active = *gic_bmp_ptr32(s->active, irq);
68 
69     pend = pending | (~edge_trigger & level);
70     pend &= enable;
71     pend &= ~active;
72 
73     if (s->gicd_ctlr & GICD_CTLR_DS) {
74         grpmod = 0;
75     }
76 
77     grpmask = 0;
78     if (s->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
79         grpmask |= group;
80     }
81     if (s->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
82         grpmask |= (~group & grpmod);
83     }
84     if (s->gicd_ctlr & GICD_CTLR_EN_GRP0) {
85         grpmask |= (~group & ~grpmod);
86     }
87     pend &= grpmask;
88 
89     return pend;
90 }
91 
92 static uint32_t gicr_int_pending(GICv3CPUState *cs)
93 {
94     /* Recalculate which redistributor interrupts are actually pending,
95      * and return a 32-bit integer which has a bit set for each interrupt
96      * that is eligible to be signaled to the CPU interface.
97      *
98      * An interrupt is pending if:
99      *  + the PENDING latch is set OR it is level triggered and the input is 1
100      *  + its ENABLE bit is set
101      *  + the GICD enable bit for its group is set
102      *  + its ACTIVE bit is not set (otherwise it would be Active+Pending)
103      * Conveniently we can bulk-calculate this with bitwise operations.
104      */
105     uint32_t pend, grpmask, grpmod;
106 
107     pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
108     pend &= cs->gicr_ienabler0;
109     pend &= ~cs->gicr_iactiver0;
110 
111     if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
112         grpmod = 0;
113     } else {
114         grpmod = cs->gicr_igrpmodr0;
115     }
116 
117     grpmask = 0;
118     if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
119         grpmask |= cs->gicr_igroupr0;
120     }
121     if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
122         grpmask |= (~cs->gicr_igroupr0 & grpmod);
123     }
124     if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP0) {
125         grpmask |= (~cs->gicr_igroupr0 & ~grpmod);
126     }
127     pend &= grpmask;
128 
129     return pend;
130 }
131 
132 /* Update the interrupt status after state in a redistributor
133  * or CPU interface has changed, but don't tell the CPU i/f.
134  */
135 static void gicv3_redist_update_noirqset(GICv3CPUState *cs)
136 {
137     /* Find the highest priority pending interrupt among the
138      * redistributor interrupts (SGIs and PPIs).
139      */
140     bool seenbetter = false;
141     uint8_t prio;
142     int i;
143     uint32_t pend;
144 
145     /* Find out which redistributor interrupts are eligible to be
146      * signaled to the CPU interface.
147      */
148     pend = gicr_int_pending(cs);
149 
150     if (pend) {
151         for (i = 0; i < GIC_INTERNAL; i++) {
152             if (!(pend & (1 << i))) {
153                 continue;
154             }
155             prio = cs->gicr_ipriorityr[i];
156             if (irqbetter(cs, i, prio)) {
157                 cs->hppi.irq = i;
158                 cs->hppi.prio = prio;
159                 seenbetter = true;
160             }
161         }
162     }
163 
164     if (seenbetter) {
165         cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
166     }
167 
168     if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable &&
169         (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) &&
170         (cs->hpplpi.prio != 0xff)) {
171         if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio)) {
172             cs->hppi.irq = cs->hpplpi.irq;
173             cs->hppi.prio = cs->hpplpi.prio;
174             cs->hppi.grp = cs->hpplpi.grp;
175             seenbetter = true;
176         }
177     }
178 
179     /* If the best interrupt we just found would preempt whatever
180      * was the previous best interrupt before this update, then
181      * we know it's definitely the best one now.
182      * If we didn't find an interrupt that would preempt the previous
183      * best, and the previous best is outside our range (or there was no
184      * previous pending interrupt at all), then that is still valid, and
185      * we leave it as the best.
186      * Otherwise, we need to do a full update (because the previous best
187      * interrupt has reduced in priority and any other interrupt could
188      * now be the new best one).
189      */
190     if (!seenbetter && cs->hppi.prio != 0xff &&
191         (cs->hppi.irq < GIC_INTERNAL ||
192          cs->hppi.irq >= GICV3_LPI_INTID_START)) {
193         gicv3_full_update_noirqset(cs->gic);
194     }
195 }
196 
197 /* Update the GIC status after state in a redistributor or
198  * CPU interface has changed, and inform the CPU i/f of
199  * its new highest priority pending interrupt.
200  */
201 void gicv3_redist_update(GICv3CPUState *cs)
202 {
203     gicv3_redist_update_noirqset(cs);
204     gicv3_cpuif_update(cs);
205 }
206 
207 /* Update the GIC status after state in the distributor has
208  * changed affecting @len interrupts starting at @start,
209  * but don't tell the CPU i/f.
210  */
211 static void gicv3_update_noirqset(GICv3State *s, int start, int len)
212 {
213     int i;
214     uint8_t prio;
215     uint32_t pend = 0;
216 
217     assert(start >= GIC_INTERNAL);
218     assert(len > 0);
219 
220     for (i = 0; i < s->num_cpu; i++) {
221         s->cpu[i].seenbetter = false;
222     }
223 
224     /* Find the highest priority pending interrupt in this range. */
225     for (i = start; i < start + len; i++) {
226         GICv3CPUState *cs;
227 
228         if (i == start || (i & 0x1f) == 0) {
229             /* Calculate the next 32 bits worth of pending status */
230             pend = gicd_int_pending(s, i & ~0x1f);
231         }
232 
233         if (!(pend & (1 << (i & 0x1f)))) {
234             continue;
235         }
236         cs = s->gicd_irouter_target[i];
237         if (!cs) {
238             /* Interrupts targeting no implemented CPU should remain pending
239              * and not be forwarded to any CPU.
240              */
241             continue;
242         }
243         prio = s->gicd_ipriority[i];
244         if (irqbetter(cs, i, prio)) {
245             cs->hppi.irq = i;
246             cs->hppi.prio = prio;
247             cs->seenbetter = true;
248         }
249     }
250 
251     /* If the best interrupt we just found would preempt whatever
252      * was the previous best interrupt before this update, then
253      * we know it's definitely the best one now.
254      * If we didn't find an interrupt that would preempt the previous
255      * best, and the previous best is outside our range (or there was
256      * no previous pending interrupt at all), then that
257      * is still valid, and we leave it as the best.
258      * Otherwise, we need to do a full update (because the previous best
259      * interrupt has reduced in priority and any other interrupt could
260      * now be the new best one).
261      */
262     for (i = 0; i < s->num_cpu; i++) {
263         GICv3CPUState *cs = &s->cpu[i];
264 
265         if (cs->seenbetter) {
266             cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
267         }
268 
269         if (!cs->seenbetter && cs->hppi.prio != 0xff &&
270             cs->hppi.irq >= start && cs->hppi.irq < start + len) {
271             gicv3_full_update_noirqset(s);
272             break;
273         }
274     }
275 }
276 
277 void gicv3_update(GICv3State *s, int start, int len)
278 {
279     int i;
280 
281     gicv3_update_noirqset(s, start, len);
282     for (i = 0; i < s->num_cpu; i++) {
283         gicv3_cpuif_update(&s->cpu[i]);
284     }
285 }
286 
287 void gicv3_full_update_noirqset(GICv3State *s)
288 {
289     /* Completely recalculate the GIC status from scratch, but
290      * don't update any outbound IRQ lines.
291      */
292     int i;
293 
294     for (i = 0; i < s->num_cpu; i++) {
295         s->cpu[i].hppi.prio = 0xff;
296     }
297 
298     /* Note that we can guarantee that these functions will not
299      * recursively call back into gicv3_full_update(), because
300      * at each point the "previous best" is always outside the
301      * range we ask them to update.
302      */
303     gicv3_update_noirqset(s, GIC_INTERNAL, s->num_irq - GIC_INTERNAL);
304 
305     for (i = 0; i < s->num_cpu; i++) {
306         gicv3_redist_update_noirqset(&s->cpu[i]);
307     }
308 }
309 
310 void gicv3_full_update(GICv3State *s)
311 {
312     /* Completely recalculate the GIC status from scratch, including
313      * updating outbound IRQ lines.
314      */
315     int i;
316 
317     gicv3_full_update_noirqset(s);
318     for (i = 0; i < s->num_cpu; i++) {
319         gicv3_cpuif_update(&s->cpu[i]);
320     }
321 }
322 
323 /* Process a change in an external IRQ input. */
324 static void gicv3_set_irq(void *opaque, int irq, int level)
325 {
326     /* Meaning of the 'irq' parameter:
327      *  [0..N-1] : external interrupts
328      *  [N..N+31] : PPI (internal) interrupts for CPU 0
329      *  [N+32..N+63] : PPI (internal interrupts for CPU 1
330      *  ...
331      */
332     GICv3State *s = opaque;
333 
334     if (irq < (s->num_irq - GIC_INTERNAL)) {
335         /* external interrupt (SPI) */
336         gicv3_dist_set_irq(s, irq + GIC_INTERNAL, level);
337     } else {
338         /* per-cpu interrupt (PPI) */
339         int cpu;
340 
341         irq -= (s->num_irq - GIC_INTERNAL);
342         cpu = irq / GIC_INTERNAL;
343         irq %= GIC_INTERNAL;
344         assert(cpu < s->num_cpu);
345         /* Raising SGIs via this function would be a bug in how the board
346          * model wires up interrupts.
347          */
348         assert(irq >= GIC_NR_SGIS);
349         gicv3_redist_set_irq(&s->cpu[cpu], irq, level);
350     }
351 }
352 
353 static void arm_gicv3_post_load(GICv3State *s)
354 {
355     int i;
356     /* Recalculate our cached idea of the current highest priority
357      * pending interrupt, but don't set IRQ or FIQ lines.
358      */
359     for (i = 0; i < s->num_cpu; i++) {
360         gicv3_redist_update_lpi_only(&s->cpu[i]);
361     }
362     gicv3_full_update_noirqset(s);
363     /* Repopulate the cache of GICv3CPUState pointers for target CPUs */
364     gicv3_cache_all_target_cpustates(s);
365 }
366 
367 static const MemoryRegionOps gic_ops[] = {
368     {
369         .read_with_attrs = gicv3_dist_read,
370         .write_with_attrs = gicv3_dist_write,
371         .endianness = DEVICE_NATIVE_ENDIAN,
372         .valid.min_access_size = 1,
373         .valid.max_access_size = 8,
374         .impl.min_access_size = 1,
375         .impl.max_access_size = 8,
376     },
377     {
378         .read_with_attrs = gicv3_redist_read,
379         .write_with_attrs = gicv3_redist_write,
380         .endianness = DEVICE_NATIVE_ENDIAN,
381         .valid.min_access_size = 1,
382         .valid.max_access_size = 8,
383         .impl.min_access_size = 1,
384         .impl.max_access_size = 8,
385     }
386 };
387 
388 static void arm_gic_realize(DeviceState *dev, Error **errp)
389 {
390     /* Device instance realize function for the GIC sysbus device */
391     GICv3State *s = ARM_GICV3(dev);
392     ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s);
393     Error *local_err = NULL;
394 
395     agc->parent_realize(dev, &local_err);
396     if (local_err) {
397         error_propagate(errp, local_err);
398         return;
399     }
400 
401     gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops);
402 
403     gicv3_init_cpuif(s);
404 }
405 
406 static void arm_gicv3_class_init(ObjectClass *klass, void *data)
407 {
408     DeviceClass *dc = DEVICE_CLASS(klass);
409     ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass);
410     ARMGICv3Class *agc = ARM_GICV3_CLASS(klass);
411 
412     agcc->post_load = arm_gicv3_post_load;
413     device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize);
414 }
415 
416 static const TypeInfo arm_gicv3_info = {
417     .name = TYPE_ARM_GICV3,
418     .parent = TYPE_ARM_GICV3_COMMON,
419     .instance_size = sizeof(GICv3State),
420     .class_init = arm_gicv3_class_init,
421     .class_size = sizeof(ARMGICv3Class),
422 };
423 
424 static void arm_gicv3_register_types(void)
425 {
426     type_register_static(&arm_gicv3_info);
427 }
428 
429 type_init(arm_gicv3_register_types)
430