1 /*
2 * ARM Generic Interrupt Controller v3 (emulation)
3 *
4 * Copyright (c) 2015 Huawei.
5 * Copyright (c) 2016 Linaro Limited
6 * Written by Shlomo Pongratz, Peter Maydell
7 *
8 * This code is licensed under the GPL, version 2 or (at your option)
9 * any later version.
10 */
11
12 /* This file contains implementation code for an interrupt controller
13 * which implements the GICv3 architecture. Specifically this is where
14 * the device class itself and the functions for handling interrupts
15 * coming in and going out live.
16 */
17
18 #include "qemu/osdep.h"
19 #include "qapi/error.h"
20 #include "qemu/module.h"
21 #include "hw/intc/arm_gicv3.h"
22 #include "gicv3_internal.h"
23
irqbetter(GICv3CPUState * cs,int irq,uint8_t prio,bool nmi)24 static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio, bool nmi)
25 {
26 /* Return true if this IRQ at this priority should take
27 * precedence over the current recorded highest priority
28 * pending interrupt for this CPU. We also return true if
29 * the current recorded highest priority pending interrupt
30 * is the same as this one (a property which the calling code
31 * relies on).
32 */
33 if (prio != cs->hppi.prio) {
34 return prio < cs->hppi.prio;
35 }
36
37 /*
38 * The same priority IRQ with non-maskable property should signal to
39 * the CPU as it have the priority higher than the labelled 0x80 or 0x00.
40 */
41 if (nmi != cs->hppi.nmi) {
42 return nmi;
43 }
44
45 /* If multiple pending interrupts have the same priority then it is an
46 * IMPDEF choice which of them to signal to the CPU. We choose to
47 * signal the one with the lowest interrupt number.
48 */
49 if (irq <= cs->hppi.irq) {
50 return true;
51 }
52 return false;
53 }
54
gicd_int_pending(GICv3State * s,int irq)55 static uint32_t gicd_int_pending(GICv3State *s, int irq)
56 {
57 /* Recalculate which distributor interrupts are actually pending
58 * in the group of 32 interrupts starting at irq (which should be a multiple
59 * of 32), and return a 32-bit integer which has a bit set for each
60 * interrupt that is eligible to be signaled to the CPU interface.
61 *
62 * An interrupt is pending if:
63 * + the PENDING latch is set OR it is level triggered and the input is 1
64 * + its ENABLE bit is set
65 * + the GICD enable bit for its group is set
66 * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
67 * Conveniently we can bulk-calculate this with bitwise operations.
68 */
69 uint32_t pend, grpmask;
70 uint32_t pending = *gic_bmp_ptr32(s->pending, irq);
71 uint32_t edge_trigger = *gic_bmp_ptr32(s->edge_trigger, irq);
72 uint32_t level = *gic_bmp_ptr32(s->level, irq);
73 uint32_t group = *gic_bmp_ptr32(s->group, irq);
74 uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq);
75 uint32_t enable = *gic_bmp_ptr32(s->enabled, irq);
76 uint32_t active = *gic_bmp_ptr32(s->active, irq);
77
78 pend = pending | (~edge_trigger & level);
79 pend &= enable;
80 pend &= ~active;
81
82 if (s->gicd_ctlr & GICD_CTLR_DS) {
83 grpmod = 0;
84 }
85
86 grpmask = 0;
87 if (s->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
88 grpmask |= group;
89 }
90 if (s->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
91 grpmask |= (~group & grpmod);
92 }
93 if (s->gicd_ctlr & GICD_CTLR_EN_GRP0) {
94 grpmask |= (~group & ~grpmod);
95 }
96 pend &= grpmask;
97
98 return pend;
99 }
100
gicr_int_pending(GICv3CPUState * cs)101 static uint32_t gicr_int_pending(GICv3CPUState *cs)
102 {
103 /* Recalculate which redistributor interrupts are actually pending,
104 * and return a 32-bit integer which has a bit set for each interrupt
105 * that is eligible to be signaled to the CPU interface.
106 *
107 * An interrupt is pending if:
108 * + the PENDING latch is set OR it is level triggered and the input is 1
109 * + its ENABLE bit is set
110 * + the GICD enable bit for its group is set
111 * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
112 * Conveniently we can bulk-calculate this with bitwise operations.
113 */
114 uint32_t pend, grpmask, grpmod;
115
116 pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
117 pend &= cs->gicr_ienabler0;
118 pend &= ~cs->gicr_iactiver0;
119
120 if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
121 grpmod = 0;
122 } else {
123 grpmod = cs->gicr_igrpmodr0;
124 }
125
126 grpmask = 0;
127 if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
128 grpmask |= cs->gicr_igroupr0;
129 }
130 if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
131 grpmask |= (~cs->gicr_igroupr0 & grpmod);
132 }
133 if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP0) {
134 grpmask |= (~cs->gicr_igroupr0 & ~grpmod);
135 }
136 pend &= grpmask;
137
138 return pend;
139 }
140
gicv3_get_priority(GICv3CPUState * cs,bool is_redist,int irq,uint8_t * prio)141 static bool gicv3_get_priority(GICv3CPUState *cs, bool is_redist, int irq,
142 uint8_t *prio)
143 {
144 uint32_t nmi = 0x0;
145
146 if (is_redist) {
147 nmi = extract32(cs->gicr_inmir0, irq, 1);
148 } else {
149 nmi = *gic_bmp_ptr32(cs->gic->nmi, irq);
150 nmi = nmi & (1 << (irq & 0x1f));
151 }
152
153 if (nmi) {
154 /* DS = 0 & Non-secure NMI */
155 if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
156 ((is_redist && extract32(cs->gicr_igroupr0, irq, 1)) ||
157 (!is_redist && gicv3_gicd_group_test(cs->gic, irq)))) {
158 *prio = 0x80;
159 } else {
160 *prio = 0x0;
161 }
162
163 return true;
164 }
165
166 if (is_redist) {
167 *prio = cs->gicr_ipriorityr[irq];
168 } else {
169 *prio = cs->gic->gicd_ipriority[irq];
170 }
171
172 return false;
173 }
174
175 /* Update the interrupt status after state in a redistributor
176 * or CPU interface has changed, but don't tell the CPU i/f.
177 */
gicv3_redist_update_noirqset(GICv3CPUState * cs)178 static void gicv3_redist_update_noirqset(GICv3CPUState *cs)
179 {
180 /* Find the highest priority pending interrupt among the
181 * redistributor interrupts (SGIs and PPIs).
182 */
183 bool seenbetter = false;
184 uint8_t prio;
185 int i;
186 uint32_t pend;
187 bool nmi = false;
188
189 /* Find out which redistributor interrupts are eligible to be
190 * signaled to the CPU interface.
191 */
192 pend = gicr_int_pending(cs);
193
194 if (pend) {
195 for (i = 0; i < GIC_INTERNAL; i++) {
196 if (!(pend & (1 << i))) {
197 continue;
198 }
199 nmi = gicv3_get_priority(cs, true, i, &prio);
200 if (irqbetter(cs, i, prio, nmi)) {
201 cs->hppi.irq = i;
202 cs->hppi.prio = prio;
203 cs->hppi.nmi = nmi;
204 seenbetter = true;
205 }
206 }
207 }
208
209 if (seenbetter) {
210 cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
211 }
212
213 if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable &&
214 (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) &&
215 (cs->hpplpi.prio != 0xff)) {
216 if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio, cs->hpplpi.nmi)) {
217 cs->hppi.irq = cs->hpplpi.irq;
218 cs->hppi.prio = cs->hpplpi.prio;
219 cs->hppi.nmi = cs->hpplpi.nmi;
220 cs->hppi.grp = cs->hpplpi.grp;
221 seenbetter = true;
222 }
223 }
224
225 /* If the best interrupt we just found would preempt whatever
226 * was the previous best interrupt before this update, then
227 * we know it's definitely the best one now.
228 * If we didn't find an interrupt that would preempt the previous
229 * best, and the previous best is outside our range (or there was no
230 * previous pending interrupt at all), then that is still valid, and
231 * we leave it as the best.
232 * Otherwise, we need to do a full update (because the previous best
233 * interrupt has reduced in priority and any other interrupt could
234 * now be the new best one).
235 */
236 if (!seenbetter && cs->hppi.prio != 0xff &&
237 (cs->hppi.irq < GIC_INTERNAL ||
238 cs->hppi.irq >= GICV3_LPI_INTID_START)) {
239 gicv3_full_update_noirqset(cs->gic);
240 }
241 }
242
243 /* Update the GIC status after state in a redistributor or
244 * CPU interface has changed, and inform the CPU i/f of
245 * its new highest priority pending interrupt.
246 */
gicv3_redist_update(GICv3CPUState * cs)247 void gicv3_redist_update(GICv3CPUState *cs)
248 {
249 gicv3_redist_update_noirqset(cs);
250 gicv3_cpuif_update(cs);
251 }
252
253 /* Update the GIC status after state in the distributor has
254 * changed affecting @len interrupts starting at @start,
255 * but don't tell the CPU i/f.
256 */
gicv3_update_noirqset(GICv3State * s,int start,int len)257 static void gicv3_update_noirqset(GICv3State *s, int start, int len)
258 {
259 int i;
260 uint8_t prio;
261 uint32_t pend = 0;
262 bool nmi = false;
263
264 assert(start >= GIC_INTERNAL);
265 assert(len > 0);
266
267 for (i = 0; i < s->num_cpu; i++) {
268 s->cpu[i].seenbetter = false;
269 }
270
271 /* Find the highest priority pending interrupt in this range. */
272 for (i = start; i < start + len; i++) {
273 GICv3CPUState *cs;
274
275 if (i == start || (i & 0x1f) == 0) {
276 /* Calculate the next 32 bits worth of pending status */
277 pend = gicd_int_pending(s, i & ~0x1f);
278 }
279
280 if (!(pend & (1 << (i & 0x1f)))) {
281 continue;
282 }
283 cs = s->gicd_irouter_target[i];
284 if (!cs) {
285 /* Interrupts targeting no implemented CPU should remain pending
286 * and not be forwarded to any CPU.
287 */
288 continue;
289 }
290 nmi = gicv3_get_priority(cs, false, i, &prio);
291 if (irqbetter(cs, i, prio, nmi)) {
292 cs->hppi.irq = i;
293 cs->hppi.prio = prio;
294 cs->hppi.nmi = nmi;
295 cs->seenbetter = true;
296 }
297 }
298
299 /* If the best interrupt we just found would preempt whatever
300 * was the previous best interrupt before this update, then
301 * we know it's definitely the best one now.
302 * If we didn't find an interrupt that would preempt the previous
303 * best, and the previous best is outside our range (or there was
304 * no previous pending interrupt at all), then that
305 * is still valid, and we leave it as the best.
306 * Otherwise, we need to do a full update (because the previous best
307 * interrupt has reduced in priority and any other interrupt could
308 * now be the new best one).
309 */
310 for (i = 0; i < s->num_cpu; i++) {
311 GICv3CPUState *cs = &s->cpu[i];
312
313 if (cs->seenbetter) {
314 cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
315 }
316
317 if (!cs->seenbetter && cs->hppi.prio != 0xff &&
318 cs->hppi.irq >= start && cs->hppi.irq < start + len) {
319 gicv3_full_update_noirqset(s);
320 break;
321 }
322 }
323 }
324
gicv3_update(GICv3State * s,int start,int len)325 void gicv3_update(GICv3State *s, int start, int len)
326 {
327 int i;
328
329 gicv3_update_noirqset(s, start, len);
330 for (i = 0; i < s->num_cpu; i++) {
331 gicv3_cpuif_update(&s->cpu[i]);
332 }
333 }
334
gicv3_full_update_noirqset(GICv3State * s)335 void gicv3_full_update_noirqset(GICv3State *s)
336 {
337 /* Completely recalculate the GIC status from scratch, but
338 * don't update any outbound IRQ lines.
339 */
340 int i;
341
342 for (i = 0; i < s->num_cpu; i++) {
343 s->cpu[i].hppi.prio = 0xff;
344 s->cpu[i].hppi.nmi = false;
345 }
346
347 /* Note that we can guarantee that these functions will not
348 * recursively call back into gicv3_full_update(), because
349 * at each point the "previous best" is always outside the
350 * range we ask them to update.
351 */
352 gicv3_update_noirqset(s, GIC_INTERNAL, s->num_irq - GIC_INTERNAL);
353
354 for (i = 0; i < s->num_cpu; i++) {
355 gicv3_redist_update_noirqset(&s->cpu[i]);
356 }
357 }
358
gicv3_full_update(GICv3State * s)359 void gicv3_full_update(GICv3State *s)
360 {
361 /* Completely recalculate the GIC status from scratch, including
362 * updating outbound IRQ lines.
363 */
364 int i;
365
366 gicv3_full_update_noirqset(s);
367 for (i = 0; i < s->num_cpu; i++) {
368 gicv3_cpuif_update(&s->cpu[i]);
369 }
370 }
371
372 /* Process a change in an external IRQ input. */
gicv3_set_irq(void * opaque,int irq,int level)373 static void gicv3_set_irq(void *opaque, int irq, int level)
374 {
375 /* Meaning of the 'irq' parameter:
376 * [0..N-1] : external interrupts
377 * [N..N+31] : PPI (internal) interrupts for CPU 0
378 * [N+32..N+63] : PPI (internal interrupts for CPU 1
379 * ...
380 */
381 GICv3State *s = opaque;
382
383 if (irq < (s->num_irq - GIC_INTERNAL)) {
384 /* external interrupt (SPI) */
385 gicv3_dist_set_irq(s, irq + GIC_INTERNAL, level);
386 } else {
387 /* per-cpu interrupt (PPI) */
388 int cpu;
389
390 irq -= (s->num_irq - GIC_INTERNAL);
391 cpu = irq / GIC_INTERNAL;
392 irq %= GIC_INTERNAL;
393 assert(cpu < s->num_cpu);
394 /* Raising SGIs via this function would be a bug in how the board
395 * model wires up interrupts.
396 */
397 assert(irq >= GIC_NR_SGIS);
398 gicv3_redist_set_irq(&s->cpu[cpu], irq, level);
399 }
400 }
401
arm_gicv3_post_load(GICv3State * s)402 static void arm_gicv3_post_load(GICv3State *s)
403 {
404 int i;
405 /* Recalculate our cached idea of the current highest priority
406 * pending interrupt, but don't set IRQ or FIQ lines.
407 */
408 for (i = 0; i < s->num_cpu; i++) {
409 gicv3_redist_update_lpi_only(&s->cpu[i]);
410 }
411 gicv3_full_update_noirqset(s);
412 /* Repopulate the cache of GICv3CPUState pointers for target CPUs */
413 gicv3_cache_all_target_cpustates(s);
414 }
415
416 static const MemoryRegionOps gic_ops[] = {
417 {
418 .read_with_attrs = gicv3_dist_read,
419 .write_with_attrs = gicv3_dist_write,
420 .endianness = DEVICE_NATIVE_ENDIAN,
421 .valid.min_access_size = 1,
422 .valid.max_access_size = 8,
423 .impl.min_access_size = 1,
424 .impl.max_access_size = 8,
425 },
426 {
427 .read_with_attrs = gicv3_redist_read,
428 .write_with_attrs = gicv3_redist_write,
429 .endianness = DEVICE_NATIVE_ENDIAN,
430 .valid.min_access_size = 1,
431 .valid.max_access_size = 8,
432 .impl.min_access_size = 1,
433 .impl.max_access_size = 8,
434 }
435 };
436
arm_gic_realize(DeviceState * dev,Error ** errp)437 static void arm_gic_realize(DeviceState *dev, Error **errp)
438 {
439 /* Device instance realize function for the GIC sysbus device */
440 GICv3State *s = ARM_GICV3(dev);
441 ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s);
442 Error *local_err = NULL;
443
444 agc->parent_realize(dev, &local_err);
445 if (local_err) {
446 error_propagate(errp, local_err);
447 return;
448 }
449
450 gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops);
451
452 gicv3_init_cpuif(s);
453 }
454
arm_gicv3_class_init(ObjectClass * klass,void * data)455 static void arm_gicv3_class_init(ObjectClass *klass, void *data)
456 {
457 DeviceClass *dc = DEVICE_CLASS(klass);
458 ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass);
459 ARMGICv3Class *agc = ARM_GICV3_CLASS(klass);
460
461 agcc->post_load = arm_gicv3_post_load;
462 device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize);
463 }
464
465 static const TypeInfo arm_gicv3_info = {
466 .name = TYPE_ARM_GICV3,
467 .parent = TYPE_ARM_GICV3_COMMON,
468 .instance_size = sizeof(GICv3State),
469 .class_init = arm_gicv3_class_init,
470 .class_size = sizeof(ARMGICv3Class),
471 };
472
arm_gicv3_register_types(void)473 static void arm_gicv3_register_types(void)
474 {
475 type_register_static(&arm_gicv3_info);
476 }
477
478 type_init(arm_gicv3_register_types)
479