xref: /openbmc/qemu/hw/intc/arm_gic.c (revision a1d7b8d896f98139dff177508895e55caf483f95)
1 /*
2  * ARM Generic/Distributed Interrupt Controller
3  *
4  * Copyright (c) 2006-2007 CodeSourcery.
5  * Written by Paul Brook
6  *
7  * This code is licensed under the GPL.
8  */
9 
10 /* This file contains implementation code for the RealView EB interrupt
11  * controller, MPCore distributed interrupt controller and ARMv7-M
12  * Nested Vectored Interrupt Controller.
13  * It is compiled in two ways:
14  *  (1) as a standalone file to produce a sysbus device which is a GIC
15  *  that can be used on the realview board and as one of the builtin
16  *  private peripherals for the ARM MP CPUs (11MPCore, A9, etc)
17  *  (2) by being directly #included into armv7m_nvic.c to produce the
18  *  armv7m_nvic device.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "hw/sysbus.h"
23 #include "gic_internal.h"
24 #include "qapi/error.h"
25 #include "qom/cpu.h"
26 #include "qemu/log.h"
27 #include "trace.h"
28 #include "sysemu/kvm.h"
29 
30 /* #define DEBUG_GIC */
31 
32 #ifdef DEBUG_GIC
33 #define DEBUG_GIC_GATE 1
34 #else
35 #define DEBUG_GIC_GATE 0
36 #endif
37 
38 #define DPRINTF(fmt, ...) do {                                          \
39         if (DEBUG_GIC_GATE) {                                           \
40             fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__);      \
41         }                                                               \
42     } while (0)
43 
44 static const uint8_t gic_id_11mpcore[] = {
45     0x00, 0x00, 0x00, 0x00, 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1
46 };
47 
48 static const uint8_t gic_id_gicv1[] = {
49     0x04, 0x00, 0x00, 0x00, 0x90, 0xb3, 0x1b, 0x00, 0x0d, 0xf0, 0x05, 0xb1
50 };
51 
52 static const uint8_t gic_id_gicv2[] = {
53     0x04, 0x00, 0x00, 0x00, 0x90, 0xb4, 0x2b, 0x00, 0x0d, 0xf0, 0x05, 0xb1
54 };
55 
56 static inline int gic_get_current_cpu(GICState *s)
57 {
58     if (s->num_cpu > 1) {
59         return current_cpu->cpu_index;
60     }
61     return 0;
62 }
63 
64 static inline int gic_get_current_vcpu(GICState *s)
65 {
66     return gic_get_current_cpu(s) + GIC_NCPU;
67 }
68 
69 /* Return true if this GIC config has interrupt groups, which is
70  * true if we're a GICv2, or a GICv1 with the security extensions.
71  */
72 static inline bool gic_has_groups(GICState *s)
73 {
74     return s->revision == 2 || s->security_extn;
75 }
76 
77 static inline bool gic_cpu_ns_access(GICState *s, int cpu, MemTxAttrs attrs)
78 {
79     return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure;
80 }
81 
82 /* TODO: Many places that call this routine could be optimized.  */
83 /* Update interrupt status after enabled or pending bits have been changed.  */
84 static void gic_update(GICState *s)
85 {
86     int best_irq;
87     int best_prio;
88     int irq;
89     int irq_level, fiq_level;
90     int cpu;
91     int cm;
92 
93     for (cpu = 0; cpu < s->num_cpu; cpu++) {
94         cm = 1 << cpu;
95         s->current_pending[cpu] = 1023;
96         if (!(s->ctlr & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1))
97             || !(s->cpu_ctlr[cpu] & (GICC_CTLR_EN_GRP0 | GICC_CTLR_EN_GRP1))) {
98             qemu_irq_lower(s->parent_irq[cpu]);
99             qemu_irq_lower(s->parent_fiq[cpu]);
100             continue;
101         }
102         best_prio = 0x100;
103         best_irq = 1023;
104         for (irq = 0; irq < s->num_irq; irq++) {
105             if (GIC_DIST_TEST_ENABLED(irq, cm) &&
106                 gic_test_pending(s, irq, cm) &&
107                 (!GIC_DIST_TEST_ACTIVE(irq, cm)) &&
108                 (irq < GIC_INTERNAL || GIC_DIST_TARGET(irq) & cm)) {
109                 if (GIC_DIST_GET_PRIORITY(irq, cpu) < best_prio) {
110                     best_prio = GIC_DIST_GET_PRIORITY(irq, cpu);
111                     best_irq = irq;
112                 }
113             }
114         }
115 
116         if (best_irq != 1023) {
117             trace_gic_update_bestirq(cpu, best_irq, best_prio,
118                 s->priority_mask[cpu], s->running_priority[cpu]);
119         }
120 
121         irq_level = fiq_level = 0;
122 
123         if (best_prio < s->priority_mask[cpu]) {
124             s->current_pending[cpu] = best_irq;
125             if (best_prio < s->running_priority[cpu]) {
126                 int group = GIC_DIST_TEST_GROUP(best_irq, cm);
127 
128                 if (extract32(s->ctlr, group, 1) &&
129                     extract32(s->cpu_ctlr[cpu], group, 1)) {
130                     if (group == 0 && s->cpu_ctlr[cpu] & GICC_CTLR_FIQ_EN) {
131                         DPRINTF("Raised pending FIQ %d (cpu %d)\n",
132                                 best_irq, cpu);
133                         fiq_level = 1;
134                         trace_gic_update_set_irq(cpu, "fiq", fiq_level);
135                     } else {
136                         DPRINTF("Raised pending IRQ %d (cpu %d)\n",
137                                 best_irq, cpu);
138                         irq_level = 1;
139                         trace_gic_update_set_irq(cpu, "irq", irq_level);
140                     }
141                 }
142             }
143         }
144 
145         qemu_set_irq(s->parent_irq[cpu], irq_level);
146         qemu_set_irq(s->parent_fiq[cpu], fiq_level);
147     }
148 }
149 
150 static void gic_set_irq_11mpcore(GICState *s, int irq, int level,
151                                  int cm, int target)
152 {
153     if (level) {
154         GIC_DIST_SET_LEVEL(irq, cm);
155         if (GIC_DIST_TEST_EDGE_TRIGGER(irq) || GIC_DIST_TEST_ENABLED(irq, cm)) {
156             DPRINTF("Set %d pending mask %x\n", irq, target);
157             GIC_DIST_SET_PENDING(irq, target);
158         }
159     } else {
160         GIC_DIST_CLEAR_LEVEL(irq, cm);
161     }
162 }
163 
164 static void gic_set_irq_generic(GICState *s, int irq, int level,
165                                 int cm, int target)
166 {
167     if (level) {
168         GIC_DIST_SET_LEVEL(irq, cm);
169         DPRINTF("Set %d pending mask %x\n", irq, target);
170         if (GIC_DIST_TEST_EDGE_TRIGGER(irq)) {
171             GIC_DIST_SET_PENDING(irq, target);
172         }
173     } else {
174         GIC_DIST_CLEAR_LEVEL(irq, cm);
175     }
176 }
177 
178 /* Process a change in an external IRQ input.  */
179 static void gic_set_irq(void *opaque, int irq, int level)
180 {
181     /* Meaning of the 'irq' parameter:
182      *  [0..N-1] : external interrupts
183      *  [N..N+31] : PPI (internal) interrupts for CPU 0
184      *  [N+32..N+63] : PPI (internal interrupts for CPU 1
185      *  ...
186      */
187     GICState *s = (GICState *)opaque;
188     int cm, target;
189     if (irq < (s->num_irq - GIC_INTERNAL)) {
190         /* The first external input line is internal interrupt 32.  */
191         cm = ALL_CPU_MASK;
192         irq += GIC_INTERNAL;
193         target = GIC_DIST_TARGET(irq);
194     } else {
195         int cpu;
196         irq -= (s->num_irq - GIC_INTERNAL);
197         cpu = irq / GIC_INTERNAL;
198         irq %= GIC_INTERNAL;
199         cm = 1 << cpu;
200         target = cm;
201     }
202 
203     assert(irq >= GIC_NR_SGIS);
204 
205     if (level == GIC_DIST_TEST_LEVEL(irq, cm)) {
206         return;
207     }
208 
209     if (s->revision == REV_11MPCORE) {
210         gic_set_irq_11mpcore(s, irq, level, cm, target);
211     } else {
212         gic_set_irq_generic(s, irq, level, cm, target);
213     }
214     trace_gic_set_irq(irq, level, cm, target);
215 
216     gic_update(s);
217 }
218 
219 static uint16_t gic_get_current_pending_irq(GICState *s, int cpu,
220                                             MemTxAttrs attrs)
221 {
222     uint16_t pending_irq = s->current_pending[cpu];
223 
224     if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) {
225         int group = gic_test_group(s, pending_irq, cpu);
226 
227         /* On a GIC without the security extensions, reading this register
228          * behaves in the same way as a secure access to a GIC with them.
229          */
230         bool secure = !gic_cpu_ns_access(s, cpu, attrs);
231 
232         if (group == 0 && !secure) {
233             /* Group0 interrupts hidden from Non-secure access */
234             return 1023;
235         }
236         if (group == 1 && secure && !(s->cpu_ctlr[cpu] & GICC_CTLR_ACK_CTL)) {
237             /* Group1 interrupts only seen by Secure access if
238              * AckCtl bit set.
239              */
240             return 1022;
241         }
242     }
243     return pending_irq;
244 }
245 
246 static int gic_get_group_priority(GICState *s, int cpu, int irq)
247 {
248     /* Return the group priority of the specified interrupt
249      * (which is the top bits of its priority, with the number
250      * of bits masked determined by the applicable binary point register).
251      */
252     int bpr;
253     uint32_t mask;
254 
255     if (gic_has_groups(s) &&
256         !(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) &&
257         gic_test_group(s, irq, cpu)) {
258         bpr = s->abpr[cpu] - 1;
259         assert(bpr >= 0);
260     } else {
261         bpr = s->bpr[cpu];
262     }
263 
264     /* a BPR of 0 means the group priority bits are [7:1];
265      * a BPR of 1 means they are [7:2], and so on down to
266      * a BPR of 7 meaning no group priority bits at all.
267      */
268     mask = ~0U << ((bpr & 7) + 1);
269 
270     return gic_get_priority(s, irq, cpu) & mask;
271 }
272 
273 static void gic_activate_irq(GICState *s, int cpu, int irq)
274 {
275     /* Set the appropriate Active Priority Register bit for this IRQ,
276      * and update the running priority.
277      */
278     int prio = gic_get_group_priority(s, cpu, irq);
279     int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR;
280     int preemption_level = prio >> (min_bpr + 1);
281     int regno = preemption_level / 32;
282     int bitno = preemption_level % 32;
283     uint32_t *papr = NULL;
284 
285     if (gic_is_vcpu(cpu)) {
286         assert(regno == 0);
287         papr = &s->h_apr[gic_get_vcpu_real_id(cpu)];
288     } else if (gic_has_groups(s) && gic_test_group(s, irq, cpu)) {
289         papr = &s->nsapr[regno][cpu];
290     } else {
291         papr = &s->apr[regno][cpu];
292     }
293 
294     *papr |= (1 << bitno);
295 
296     s->running_priority[cpu] = prio;
297     gic_set_active(s, irq, cpu);
298 }
299 
300 static int gic_get_prio_from_apr_bits(GICState *s, int cpu)
301 {
302     /* Recalculate the current running priority for this CPU based
303      * on the set bits in the Active Priority Registers.
304      */
305     int i;
306 
307     if (gic_is_vcpu(cpu)) {
308         uint32_t apr = s->h_apr[gic_get_vcpu_real_id(cpu)];
309         if (apr) {
310             return ctz32(apr) << (GIC_VIRT_MIN_BPR + 1);
311         } else {
312             return 0x100;
313         }
314     }
315 
316     for (i = 0; i < GIC_NR_APRS; i++) {
317         uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu];
318         if (!apr) {
319             continue;
320         }
321         return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1);
322     }
323     return 0x100;
324 }
325 
326 static void gic_drop_prio(GICState *s, int cpu, int group)
327 {
328     /* Drop the priority of the currently active interrupt in the
329      * specified group.
330      *
331      * Note that we can guarantee (because of the requirement to nest
332      * GICC_IAR reads [which activate an interrupt and raise priority]
333      * with GICC_EOIR writes [which drop the priority for the interrupt])
334      * that the interrupt we're being called for is the highest priority
335      * active interrupt, meaning that it has the lowest set bit in the
336      * APR registers.
337      *
338      * If the guest does not honour the ordering constraints then the
339      * behaviour of the GIC is UNPREDICTABLE, which for us means that
340      * the values of the APR registers might become incorrect and the
341      * running priority will be wrong, so interrupts that should preempt
342      * might not do so, and interrupts that should not preempt might do so.
343      */
344     if (gic_is_vcpu(cpu)) {
345         int rcpu = gic_get_vcpu_real_id(cpu);
346 
347         if (s->h_apr[rcpu]) {
348             /* Clear lowest set bit */
349             s->h_apr[rcpu] &= s->h_apr[rcpu] - 1;
350         }
351     } else {
352         int i;
353 
354         for (i = 0; i < GIC_NR_APRS; i++) {
355             uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu];
356             if (!*papr) {
357                 continue;
358             }
359             /* Clear lowest set bit */
360             *papr &= *papr - 1;
361             break;
362         }
363     }
364 
365     s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu);
366 }
367 
368 uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
369 {
370     int ret, irq, src;
371     int cm = 1 << cpu;
372 
373     /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately
374      * for the case where this GIC supports grouping and the pending interrupt
375      * is in the wrong group.
376      */
377     irq = gic_get_current_pending_irq(s, cpu, attrs);
378     trace_gic_acknowledge_irq(cpu, irq);
379 
380     if (irq >= GIC_MAXIRQ) {
381         DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq);
382         return irq;
383     }
384 
385     if (gic_get_priority(s, irq, cpu) >= s->running_priority[cpu]) {
386         DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq);
387         return 1023;
388     }
389 
390     if (s->revision == REV_11MPCORE) {
391         /* Clear pending flags for both level and edge triggered interrupts.
392          * Level triggered IRQs will be reasserted once they become inactive.
393          */
394         gic_clear_pending(s, irq, cpu);
395         ret = irq;
396     } else {
397         if (irq < GIC_NR_SGIS) {
398             /* Lookup the source CPU for the SGI and clear this in the
399              * sgi_pending map.  Return the src and clear the overall pending
400              * state on this CPU if the SGI is not pending from any CPUs.
401              */
402             assert(s->sgi_pending[irq][cpu] != 0);
403             src = ctz32(s->sgi_pending[irq][cpu]);
404             s->sgi_pending[irq][cpu] &= ~(1 << src);
405             if (s->sgi_pending[irq][cpu] == 0) {
406                 gic_clear_pending(s, irq, cpu);
407             }
408             ret = irq | ((src & 0x7) << 10);
409         } else {
410             /* Clear pending state for both level and edge triggered
411              * interrupts. (level triggered interrupts with an active line
412              * remain pending, see gic_test_pending)
413              */
414             gic_clear_pending(s, irq, cpu);
415             ret = irq;
416         }
417     }
418 
419     gic_activate_irq(s, cpu, irq);
420     gic_update(s);
421     DPRINTF("ACK %d\n", irq);
422     return ret;
423 }
424 
425 void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val,
426                       MemTxAttrs attrs)
427 {
428     if (s->security_extn && !attrs.secure) {
429         if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
430             return; /* Ignore Non-secure access of Group0 IRQ */
431         }
432         val = 0x80 | (val >> 1); /* Non-secure view */
433     }
434 
435     if (irq < GIC_INTERNAL) {
436         s->priority1[irq][cpu] = val;
437     } else {
438         s->priority2[(irq) - GIC_INTERNAL] = val;
439     }
440 }
441 
442 static uint32_t gic_dist_get_priority(GICState *s, int cpu, int irq,
443                                  MemTxAttrs attrs)
444 {
445     uint32_t prio = GIC_DIST_GET_PRIORITY(irq, cpu);
446 
447     if (s->security_extn && !attrs.secure) {
448         if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
449             return 0; /* Non-secure access cannot read priority of Group0 IRQ */
450         }
451         prio = (prio << 1) & 0xff; /* Non-secure view */
452     }
453     return prio;
454 }
455 
456 static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask,
457                                   MemTxAttrs attrs)
458 {
459     if (gic_cpu_ns_access(s, cpu, attrs)) {
460         if (s->priority_mask[cpu] & 0x80) {
461             /* Priority Mask in upper half */
462             pmask = 0x80 | (pmask >> 1);
463         } else {
464             /* Non-secure write ignored if priority mask is in lower half */
465             return;
466         }
467     }
468     s->priority_mask[cpu] = pmask;
469 }
470 
471 static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs)
472 {
473     uint32_t pmask = s->priority_mask[cpu];
474 
475     if (gic_cpu_ns_access(s, cpu, attrs)) {
476         if (pmask & 0x80) {
477             /* Priority Mask in upper half, return Non-secure view */
478             pmask = (pmask << 1) & 0xff;
479         } else {
480             /* Priority Mask in lower half, RAZ */
481             pmask = 0;
482         }
483     }
484     return pmask;
485 }
486 
487 static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs)
488 {
489     uint32_t ret = s->cpu_ctlr[cpu];
490 
491     if (gic_cpu_ns_access(s, cpu, attrs)) {
492         /* Construct the NS banked view of GICC_CTLR from the correct
493          * bits of the S banked view. We don't need to move the bypass
494          * control bits because we don't implement that (IMPDEF) part
495          * of the GIC architecture.
496          */
497         ret = (ret & (GICC_CTLR_EN_GRP1 | GICC_CTLR_EOIMODE_NS)) >> 1;
498     }
499     return ret;
500 }
501 
502 static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value,
503                                 MemTxAttrs attrs)
504 {
505     uint32_t mask;
506 
507     if (gic_cpu_ns_access(s, cpu, attrs)) {
508         /* The NS view can only write certain bits in the register;
509          * the rest are unchanged
510          */
511         mask = GICC_CTLR_EN_GRP1;
512         if (s->revision == 2) {
513             mask |= GICC_CTLR_EOIMODE_NS;
514         }
515         s->cpu_ctlr[cpu] &= ~mask;
516         s->cpu_ctlr[cpu] |= (value << 1) & mask;
517     } else {
518         if (s->revision == 2) {
519             mask = s->security_extn ? GICC_CTLR_V2_S_MASK : GICC_CTLR_V2_MASK;
520         } else {
521             mask = s->security_extn ? GICC_CTLR_V1_S_MASK : GICC_CTLR_V1_MASK;
522         }
523         s->cpu_ctlr[cpu] = value & mask;
524     }
525     DPRINTF("CPU Interface %d: Group0 Interrupts %sabled, "
526             "Group1 Interrupts %sabled\n", cpu,
527             (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP0) ? "En" : "Dis",
528             (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP1) ? "En" : "Dis");
529 }
530 
531 static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs)
532 {
533     if ((s->revision != REV_11MPCORE) && (s->running_priority[cpu] > 0xff)) {
534         /* Idle priority */
535         return 0xff;
536     }
537 
538     if (gic_cpu_ns_access(s, cpu, attrs)) {
539         if (s->running_priority[cpu] & 0x80) {
540             /* Running priority in upper half of range: return the Non-secure
541              * view of the priority.
542              */
543             return s->running_priority[cpu] << 1;
544         } else {
545             /* Running priority in lower half of range: RAZ */
546             return 0;
547         }
548     } else {
549         return s->running_priority[cpu];
550     }
551 }
552 
553 /* Return true if we should split priority drop and interrupt deactivation,
554  * ie whether the relevant EOIMode bit is set.
555  */
556 static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs)
557 {
558     if (s->revision != 2) {
559         /* Before GICv2 prio-drop and deactivate are not separable */
560         return false;
561     }
562     if (gic_cpu_ns_access(s, cpu, attrs)) {
563         return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS;
564     }
565     return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE;
566 }
567 
568 static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
569 {
570     int group;
571 
572     if (irq >= s->num_irq) {
573         /*
574          * This handles two cases:
575          * 1. If software writes the ID of a spurious interrupt [ie 1023]
576          * to the GICC_DIR, the GIC ignores that write.
577          * 2. If software writes the number of a non-existent interrupt
578          * this must be a subcase of "value written is not an active interrupt"
579          * and so this is UNPREDICTABLE. We choose to ignore it.
580          */
581         return;
582     }
583 
584     group = gic_has_groups(s) && gic_test_group(s, irq, cpu);
585 
586     if (!gic_eoi_split(s, cpu, attrs)) {
587         /* This is UNPREDICTABLE; we choose to ignore it */
588         qemu_log_mask(LOG_GUEST_ERROR,
589                       "gic_deactivate_irq: GICC_DIR write when EOIMode clear");
590         return;
591     }
592 
593     if (gic_cpu_ns_access(s, cpu, attrs) && !group) {
594         DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq);
595         return;
596     }
597 
598     gic_clear_active(s, irq, cpu);
599 }
600 
601 static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
602 {
603     int cm = 1 << cpu;
604     int group;
605 
606     DPRINTF("EOI %d\n", irq);
607     if (irq >= s->num_irq) {
608         /* This handles two cases:
609          * 1. If software writes the ID of a spurious interrupt [ie 1023]
610          * to the GICC_EOIR, the GIC ignores that write.
611          * 2. If software writes the number of a non-existent interrupt
612          * this must be a subcase of "value written does not match the last
613          * valid interrupt value read from the Interrupt Acknowledge
614          * register" and so this is UNPREDICTABLE. We choose to ignore it.
615          */
616         return;
617     }
618     if (s->running_priority[cpu] == 0x100) {
619         return; /* No active IRQ.  */
620     }
621 
622     if (s->revision == REV_11MPCORE) {
623         /* Mark level triggered interrupts as pending if they are still
624            raised.  */
625         if (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_ENABLED(irq, cm)
626             && GIC_DIST_TEST_LEVEL(irq, cm)
627             && (GIC_DIST_TARGET(irq) & cm) != 0) {
628             DPRINTF("Set %d pending mask %x\n", irq, cm);
629             GIC_DIST_SET_PENDING(irq, cm);
630         }
631     }
632 
633     group = gic_has_groups(s) && gic_test_group(s, irq, cpu);
634 
635     if (gic_cpu_ns_access(s, cpu, attrs) && !group) {
636         DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq);
637         return;
638     }
639 
640     /* Secure EOI with GICC_CTLR.AckCtl == 0 when the IRQ is a Group 1
641      * interrupt is UNPREDICTABLE. We choose to handle it as if AckCtl == 1,
642      * i.e. go ahead and complete the irq anyway.
643      */
644 
645     gic_drop_prio(s, cpu, group);
646 
647     /* In GICv2 the guest can choose to split priority-drop and deactivate */
648     if (!gic_eoi_split(s, cpu, attrs)) {
649         gic_clear_active(s, irq, cpu);
650     }
651     gic_update(s);
652 }
653 
654 static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
655 {
656     GICState *s = (GICState *)opaque;
657     uint32_t res;
658     int irq;
659     int i;
660     int cpu;
661     int cm;
662     int mask;
663 
664     cpu = gic_get_current_cpu(s);
665     cm = 1 << cpu;
666     if (offset < 0x100) {
667         if (offset == 0) {      /* GICD_CTLR */
668             if (s->security_extn && !attrs.secure) {
669                 /* The NS bank of this register is just an alias of the
670                  * EnableGrp1 bit in the S bank version.
671                  */
672                 return extract32(s->ctlr, 1, 1);
673             } else {
674                 return s->ctlr;
675             }
676         }
677         if (offset == 4)
678             /* Interrupt Controller Type Register */
679             return ((s->num_irq / 32) - 1)
680                     | ((s->num_cpu - 1) << 5)
681                     | (s->security_extn << 10);
682         if (offset < 0x08)
683             return 0;
684         if (offset >= 0x80) {
685             /* Interrupt Group Registers: these RAZ/WI if this is an NS
686              * access to a GIC with the security extensions, or if the GIC
687              * doesn't have groups at all.
688              */
689             res = 0;
690             if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) {
691                 /* Every byte offset holds 8 group status bits */
692                 irq = (offset - 0x080) * 8 + GIC_BASE_IRQ;
693                 if (irq >= s->num_irq) {
694                     goto bad_reg;
695                 }
696                 for (i = 0; i < 8; i++) {
697                     if (GIC_DIST_TEST_GROUP(irq + i, cm)) {
698                         res |= (1 << i);
699                     }
700                 }
701             }
702             return res;
703         }
704         goto bad_reg;
705     } else if (offset < 0x200) {
706         /* Interrupt Set/Clear Enable.  */
707         if (offset < 0x180)
708             irq = (offset - 0x100) * 8;
709         else
710             irq = (offset - 0x180) * 8;
711         irq += GIC_BASE_IRQ;
712         if (irq >= s->num_irq)
713             goto bad_reg;
714         res = 0;
715         for (i = 0; i < 8; i++) {
716             if (s->security_extn && !attrs.secure &&
717                 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
718                 continue; /* Ignore Non-secure access of Group0 IRQ */
719             }
720 
721             if (GIC_DIST_TEST_ENABLED(irq + i, cm)) {
722                 res |= (1 << i);
723             }
724         }
725     } else if (offset < 0x300) {
726         /* Interrupt Set/Clear Pending.  */
727         if (offset < 0x280)
728             irq = (offset - 0x200) * 8;
729         else
730             irq = (offset - 0x280) * 8;
731         irq += GIC_BASE_IRQ;
732         if (irq >= s->num_irq)
733             goto bad_reg;
734         res = 0;
735         mask = (irq < GIC_INTERNAL) ?  cm : ALL_CPU_MASK;
736         for (i = 0; i < 8; i++) {
737             if (s->security_extn && !attrs.secure &&
738                 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
739                 continue; /* Ignore Non-secure access of Group0 IRQ */
740             }
741 
742             if (gic_test_pending(s, irq + i, mask)) {
743                 res |= (1 << i);
744             }
745         }
746     } else if (offset < 0x400) {
747         /* Interrupt Set/Clear Active.  */
748         if (offset < 0x380) {
749             irq = (offset - 0x300) * 8;
750         } else if (s->revision == 2) {
751             irq = (offset - 0x380) * 8;
752         } else {
753             goto bad_reg;
754         }
755 
756         irq += GIC_BASE_IRQ;
757         if (irq >= s->num_irq)
758             goto bad_reg;
759         res = 0;
760         mask = (irq < GIC_INTERNAL) ?  cm : ALL_CPU_MASK;
761         for (i = 0; i < 8; i++) {
762             if (s->security_extn && !attrs.secure &&
763                 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
764                 continue; /* Ignore Non-secure access of Group0 IRQ */
765             }
766 
767             if (GIC_DIST_TEST_ACTIVE(irq + i, mask)) {
768                 res |= (1 << i);
769             }
770         }
771     } else if (offset < 0x800) {
772         /* Interrupt Priority.  */
773         irq = (offset - 0x400) + GIC_BASE_IRQ;
774         if (irq >= s->num_irq)
775             goto bad_reg;
776         res = gic_dist_get_priority(s, cpu, irq, attrs);
777     } else if (offset < 0xc00) {
778         /* Interrupt CPU Target.  */
779         if (s->num_cpu == 1 && s->revision != REV_11MPCORE) {
780             /* For uniprocessor GICs these RAZ/WI */
781             res = 0;
782         } else {
783             irq = (offset - 0x800) + GIC_BASE_IRQ;
784             if (irq >= s->num_irq) {
785                 goto bad_reg;
786             }
787             if (irq < 29 && s->revision == REV_11MPCORE) {
788                 res = 0;
789             } else if (irq < GIC_INTERNAL) {
790                 res = cm;
791             } else {
792                 res = GIC_DIST_TARGET(irq);
793             }
794         }
795     } else if (offset < 0xf00) {
796         /* Interrupt Configuration.  */
797         irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ;
798         if (irq >= s->num_irq)
799             goto bad_reg;
800         res = 0;
801         for (i = 0; i < 4; i++) {
802             if (s->security_extn && !attrs.secure &&
803                 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
804                 continue; /* Ignore Non-secure access of Group0 IRQ */
805             }
806 
807             if (GIC_DIST_TEST_MODEL(irq + i)) {
808                 res |= (1 << (i * 2));
809             }
810             if (GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) {
811                 res |= (2 << (i * 2));
812             }
813         }
814     } else if (offset < 0xf10) {
815         goto bad_reg;
816     } else if (offset < 0xf30) {
817         if (s->revision == REV_11MPCORE) {
818             goto bad_reg;
819         }
820 
821         if (offset < 0xf20) {
822             /* GICD_CPENDSGIRn */
823             irq = (offset - 0xf10);
824         } else {
825             irq = (offset - 0xf20);
826             /* GICD_SPENDSGIRn */
827         }
828 
829         if (s->security_extn && !attrs.secure &&
830             !GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
831             res = 0; /* Ignore Non-secure access of Group0 IRQ */
832         } else {
833             res = s->sgi_pending[irq][cpu];
834         }
835     } else if (offset < 0xfd0) {
836         goto bad_reg;
837     } else if (offset < 0x1000) {
838         if (offset & 3) {
839             res = 0;
840         } else {
841             switch (s->revision) {
842             case REV_11MPCORE:
843                 res = gic_id_11mpcore[(offset - 0xfd0) >> 2];
844                 break;
845             case 1:
846                 res = gic_id_gicv1[(offset - 0xfd0) >> 2];
847                 break;
848             case 2:
849                 res = gic_id_gicv2[(offset - 0xfd0) >> 2];
850                 break;
851             default:
852                 res = 0;
853             }
854         }
855     } else {
856         g_assert_not_reached();
857     }
858     return res;
859 bad_reg:
860     qemu_log_mask(LOG_GUEST_ERROR,
861                   "gic_dist_readb: Bad offset %x\n", (int)offset);
862     return 0;
863 }
864 
865 static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data,
866                                  unsigned size, MemTxAttrs attrs)
867 {
868     switch (size) {
869     case 1:
870         *data = gic_dist_readb(opaque, offset, attrs);
871         return MEMTX_OK;
872     case 2:
873         *data = gic_dist_readb(opaque, offset, attrs);
874         *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8;
875         return MEMTX_OK;
876     case 4:
877         *data = gic_dist_readb(opaque, offset, attrs);
878         *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8;
879         *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16;
880         *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24;
881         return MEMTX_OK;
882     default:
883         return MEMTX_ERROR;
884     }
885 }
886 
887 static void gic_dist_writeb(void *opaque, hwaddr offset,
888                             uint32_t value, MemTxAttrs attrs)
889 {
890     GICState *s = (GICState *)opaque;
891     int irq;
892     int i;
893     int cpu;
894 
895     cpu = gic_get_current_cpu(s);
896     if (offset < 0x100) {
897         if (offset == 0) {
898             if (s->security_extn && !attrs.secure) {
899                 /* NS version is just an alias of the S version's bit 1 */
900                 s->ctlr = deposit32(s->ctlr, 1, 1, value);
901             } else if (gic_has_groups(s)) {
902                 s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1);
903             } else {
904                 s->ctlr = value & GICD_CTLR_EN_GRP0;
905             }
906             DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n",
907                     s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis",
908                     s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis");
909         } else if (offset < 4) {
910             /* ignored.  */
911         } else if (offset >= 0x80) {
912             /* Interrupt Group Registers: RAZ/WI for NS access to secure
913              * GIC, or for GICs without groups.
914              */
915             if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) {
916                 /* Every byte offset holds 8 group status bits */
917                 irq = (offset - 0x80) * 8 + GIC_BASE_IRQ;
918                 if (irq >= s->num_irq) {
919                     goto bad_reg;
920                 }
921                 for (i = 0; i < 8; i++) {
922                     /* Group bits are banked for private interrupts */
923                     int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
924                     if (value & (1 << i)) {
925                         /* Group1 (Non-secure) */
926                         GIC_DIST_SET_GROUP(irq + i, cm);
927                     } else {
928                         /* Group0 (Secure) */
929                         GIC_DIST_CLEAR_GROUP(irq + i, cm);
930                     }
931                 }
932             }
933         } else {
934             goto bad_reg;
935         }
936     } else if (offset < 0x180) {
937         /* Interrupt Set Enable.  */
938         irq = (offset - 0x100) * 8 + GIC_BASE_IRQ;
939         if (irq >= s->num_irq)
940             goto bad_reg;
941         if (irq < GIC_NR_SGIS) {
942             value = 0xff;
943         }
944 
945         for (i = 0; i < 8; i++) {
946             if (value & (1 << i)) {
947                 int mask =
948                     (irq < GIC_INTERNAL) ? (1 << cpu)
949                                          : GIC_DIST_TARGET(irq + i);
950                 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
951 
952                 if (s->security_extn && !attrs.secure &&
953                     !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
954                     continue; /* Ignore Non-secure access of Group0 IRQ */
955                 }
956 
957                 if (!GIC_DIST_TEST_ENABLED(irq + i, cm)) {
958                     DPRINTF("Enabled IRQ %d\n", irq + i);
959                     trace_gic_enable_irq(irq + i);
960                 }
961                 GIC_DIST_SET_ENABLED(irq + i, cm);
962                 /* If a raised level triggered IRQ enabled then mark
963                    is as pending.  */
964                 if (GIC_DIST_TEST_LEVEL(irq + i, mask)
965                         && !GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) {
966                     DPRINTF("Set %d pending mask %x\n", irq + i, mask);
967                     GIC_DIST_SET_PENDING(irq + i, mask);
968                 }
969             }
970         }
971     } else if (offset < 0x200) {
972         /* Interrupt Clear Enable.  */
973         irq = (offset - 0x180) * 8 + GIC_BASE_IRQ;
974         if (irq >= s->num_irq)
975             goto bad_reg;
976         if (irq < GIC_NR_SGIS) {
977             value = 0;
978         }
979 
980         for (i = 0; i < 8; i++) {
981             if (value & (1 << i)) {
982                 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
983 
984                 if (s->security_extn && !attrs.secure &&
985                     !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
986                     continue; /* Ignore Non-secure access of Group0 IRQ */
987                 }
988 
989                 if (GIC_DIST_TEST_ENABLED(irq + i, cm)) {
990                     DPRINTF("Disabled IRQ %d\n", irq + i);
991                     trace_gic_disable_irq(irq + i);
992                 }
993                 GIC_DIST_CLEAR_ENABLED(irq + i, cm);
994             }
995         }
996     } else if (offset < 0x280) {
997         /* Interrupt Set Pending.  */
998         irq = (offset - 0x200) * 8 + GIC_BASE_IRQ;
999         if (irq >= s->num_irq)
1000             goto bad_reg;
1001         if (irq < GIC_NR_SGIS) {
1002             value = 0;
1003         }
1004 
1005         for (i = 0; i < 8; i++) {
1006             if (value & (1 << i)) {
1007                 if (s->security_extn && !attrs.secure &&
1008                     !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
1009                     continue; /* Ignore Non-secure access of Group0 IRQ */
1010                 }
1011 
1012                 GIC_DIST_SET_PENDING(irq + i, GIC_DIST_TARGET(irq + i));
1013             }
1014         }
1015     } else if (offset < 0x300) {
1016         /* Interrupt Clear Pending.  */
1017         irq = (offset - 0x280) * 8 + GIC_BASE_IRQ;
1018         if (irq >= s->num_irq)
1019             goto bad_reg;
1020         if (irq < GIC_NR_SGIS) {
1021             value = 0;
1022         }
1023 
1024         for (i = 0; i < 8; i++) {
1025             if (s->security_extn && !attrs.secure &&
1026                 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
1027                 continue; /* Ignore Non-secure access of Group0 IRQ */
1028             }
1029 
1030             /* ??? This currently clears the pending bit for all CPUs, even
1031                for per-CPU interrupts.  It's unclear whether this is the
1032                corect behavior.  */
1033             if (value & (1 << i)) {
1034                 GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK);
1035             }
1036         }
1037     } else if (offset < 0x380) {
1038         /* Interrupt Set Active.  */
1039         if (s->revision != 2) {
1040             goto bad_reg;
1041         }
1042 
1043         irq = (offset - 0x300) * 8 + GIC_BASE_IRQ;
1044         if (irq >= s->num_irq) {
1045             goto bad_reg;
1046         }
1047 
1048         /* This register is banked per-cpu for PPIs */
1049         int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK;
1050 
1051         for (i = 0; i < 8; i++) {
1052             if (s->security_extn && !attrs.secure &&
1053                 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
1054                 continue; /* Ignore Non-secure access of Group0 IRQ */
1055             }
1056 
1057             if (value & (1 << i)) {
1058                 GIC_DIST_SET_ACTIVE(irq + i, cm);
1059             }
1060         }
1061     } else if (offset < 0x400) {
1062         /* Interrupt Clear Active.  */
1063         if (s->revision != 2) {
1064             goto bad_reg;
1065         }
1066 
1067         irq = (offset - 0x380) * 8 + GIC_BASE_IRQ;
1068         if (irq >= s->num_irq) {
1069             goto bad_reg;
1070         }
1071 
1072         /* This register is banked per-cpu for PPIs */
1073         int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK;
1074 
1075         for (i = 0; i < 8; i++) {
1076             if (s->security_extn && !attrs.secure &&
1077                 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
1078                 continue; /* Ignore Non-secure access of Group0 IRQ */
1079             }
1080 
1081             if (value & (1 << i)) {
1082                 GIC_DIST_CLEAR_ACTIVE(irq + i, cm);
1083             }
1084         }
1085     } else if (offset < 0x800) {
1086         /* Interrupt Priority.  */
1087         irq = (offset - 0x400) + GIC_BASE_IRQ;
1088         if (irq >= s->num_irq)
1089             goto bad_reg;
1090         gic_dist_set_priority(s, cpu, irq, value, attrs);
1091     } else if (offset < 0xc00) {
1092         /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the
1093          * annoying exception of the 11MPCore's GIC.
1094          */
1095         if (s->num_cpu != 1 || s->revision == REV_11MPCORE) {
1096             irq = (offset - 0x800) + GIC_BASE_IRQ;
1097             if (irq >= s->num_irq) {
1098                 goto bad_reg;
1099             }
1100             if (irq < 29 && s->revision == REV_11MPCORE) {
1101                 value = 0;
1102             } else if (irq < GIC_INTERNAL) {
1103                 value = ALL_CPU_MASK;
1104             }
1105             s->irq_target[irq] = value & ALL_CPU_MASK;
1106         }
1107     } else if (offset < 0xf00) {
1108         /* Interrupt Configuration.  */
1109         irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ;
1110         if (irq >= s->num_irq)
1111             goto bad_reg;
1112         if (irq < GIC_NR_SGIS)
1113             value |= 0xaa;
1114         for (i = 0; i < 4; i++) {
1115             if (s->security_extn && !attrs.secure &&
1116                 !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
1117                 continue; /* Ignore Non-secure access of Group0 IRQ */
1118             }
1119 
1120             if (s->revision == REV_11MPCORE) {
1121                 if (value & (1 << (i * 2))) {
1122                     GIC_DIST_SET_MODEL(irq + i);
1123                 } else {
1124                     GIC_DIST_CLEAR_MODEL(irq + i);
1125                 }
1126             }
1127             if (value & (2 << (i * 2))) {
1128                 GIC_DIST_SET_EDGE_TRIGGER(irq + i);
1129             } else {
1130                 GIC_DIST_CLEAR_EDGE_TRIGGER(irq + i);
1131             }
1132         }
1133     } else if (offset < 0xf10) {
1134         /* 0xf00 is only handled for 32-bit writes.  */
1135         goto bad_reg;
1136     } else if (offset < 0xf20) {
1137         /* GICD_CPENDSGIRn */
1138         if (s->revision == REV_11MPCORE) {
1139             goto bad_reg;
1140         }
1141         irq = (offset - 0xf10);
1142 
1143         if (!s->security_extn || attrs.secure ||
1144             GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
1145             s->sgi_pending[irq][cpu] &= ~value;
1146             if (s->sgi_pending[irq][cpu] == 0) {
1147                 GIC_DIST_CLEAR_PENDING(irq, 1 << cpu);
1148             }
1149         }
1150     } else if (offset < 0xf30) {
1151         /* GICD_SPENDSGIRn */
1152         if (s->revision == REV_11MPCORE) {
1153             goto bad_reg;
1154         }
1155         irq = (offset - 0xf20);
1156 
1157         if (!s->security_extn || attrs.secure ||
1158             GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
1159             GIC_DIST_SET_PENDING(irq, 1 << cpu);
1160             s->sgi_pending[irq][cpu] |= value;
1161         }
1162     } else {
1163         goto bad_reg;
1164     }
1165     gic_update(s);
1166     return;
1167 bad_reg:
1168     qemu_log_mask(LOG_GUEST_ERROR,
1169                   "gic_dist_writeb: Bad offset %x\n", (int)offset);
1170 }
1171 
1172 static void gic_dist_writew(void *opaque, hwaddr offset,
1173                             uint32_t value, MemTxAttrs attrs)
1174 {
1175     gic_dist_writeb(opaque, offset, value & 0xff, attrs);
1176     gic_dist_writeb(opaque, offset + 1, value >> 8, attrs);
1177 }
1178 
1179 static void gic_dist_writel(void *opaque, hwaddr offset,
1180                             uint32_t value, MemTxAttrs attrs)
1181 {
1182     GICState *s = (GICState *)opaque;
1183     if (offset == 0xf00) {
1184         int cpu;
1185         int irq;
1186         int mask;
1187         int target_cpu;
1188 
1189         cpu = gic_get_current_cpu(s);
1190         irq = value & 0x3ff;
1191         switch ((value >> 24) & 3) {
1192         case 0:
1193             mask = (value >> 16) & ALL_CPU_MASK;
1194             break;
1195         case 1:
1196             mask = ALL_CPU_MASK ^ (1 << cpu);
1197             break;
1198         case 2:
1199             mask = 1 << cpu;
1200             break;
1201         default:
1202             DPRINTF("Bad Soft Int target filter\n");
1203             mask = ALL_CPU_MASK;
1204             break;
1205         }
1206         GIC_DIST_SET_PENDING(irq, mask);
1207         target_cpu = ctz32(mask);
1208         while (target_cpu < GIC_NCPU) {
1209             s->sgi_pending[irq][target_cpu] |= (1 << cpu);
1210             mask &= ~(1 << target_cpu);
1211             target_cpu = ctz32(mask);
1212         }
1213         gic_update(s);
1214         return;
1215     }
1216     gic_dist_writew(opaque, offset, value & 0xffff, attrs);
1217     gic_dist_writew(opaque, offset + 2, value >> 16, attrs);
1218 }
1219 
1220 static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data,
1221                                   unsigned size, MemTxAttrs attrs)
1222 {
1223     switch (size) {
1224     case 1:
1225         gic_dist_writeb(opaque, offset, data, attrs);
1226         return MEMTX_OK;
1227     case 2:
1228         gic_dist_writew(opaque, offset, data, attrs);
1229         return MEMTX_OK;
1230     case 4:
1231         gic_dist_writel(opaque, offset, data, attrs);
1232         return MEMTX_OK;
1233     default:
1234         return MEMTX_ERROR;
1235     }
1236 }
1237 
1238 static inline uint32_t gic_apr_ns_view(GICState *s, int cpu, int regno)
1239 {
1240     /* Return the Nonsecure view of GICC_APR<regno>. This is the
1241      * second half of GICC_NSAPR.
1242      */
1243     switch (GIC_MIN_BPR) {
1244     case 0:
1245         if (regno < 2) {
1246             return s->nsapr[regno + 2][cpu];
1247         }
1248         break;
1249     case 1:
1250         if (regno == 0) {
1251             return s->nsapr[regno + 1][cpu];
1252         }
1253         break;
1254     case 2:
1255         if (regno == 0) {
1256             return extract32(s->nsapr[0][cpu], 16, 16);
1257         }
1258         break;
1259     case 3:
1260         if (regno == 0) {
1261             return extract32(s->nsapr[0][cpu], 8, 8);
1262         }
1263         break;
1264     default:
1265         g_assert_not_reached();
1266     }
1267     return 0;
1268 }
1269 
1270 static inline void gic_apr_write_ns_view(GICState *s, int cpu, int regno,
1271                                          uint32_t value)
1272 {
1273     /* Write the Nonsecure view of GICC_APR<regno>. */
1274     switch (GIC_MIN_BPR) {
1275     case 0:
1276         if (regno < 2) {
1277             s->nsapr[regno + 2][cpu] = value;
1278         }
1279         break;
1280     case 1:
1281         if (regno == 0) {
1282             s->nsapr[regno + 1][cpu] = value;
1283         }
1284         break;
1285     case 2:
1286         if (regno == 0) {
1287             s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 16, 16, value);
1288         }
1289         break;
1290     case 3:
1291         if (regno == 0) {
1292             s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 8, 8, value);
1293         }
1294         break;
1295     default:
1296         g_assert_not_reached();
1297     }
1298 }
1299 
1300 static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
1301                                 uint64_t *data, MemTxAttrs attrs)
1302 {
1303     switch (offset) {
1304     case 0x00: /* Control */
1305         *data = gic_get_cpu_control(s, cpu, attrs);
1306         break;
1307     case 0x04: /* Priority mask */
1308         *data = gic_get_priority_mask(s, cpu, attrs);
1309         break;
1310     case 0x08: /* Binary Point */
1311         if (gic_cpu_ns_access(s, cpu, attrs)) {
1312             if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) {
1313                 /* NS view of BPR when CBPR is 1 */
1314                 *data = MIN(s->bpr[cpu] + 1, 7);
1315             } else {
1316                 /* BPR is banked. Non-secure copy stored in ABPR. */
1317                 *data = s->abpr[cpu];
1318             }
1319         } else {
1320             *data = s->bpr[cpu];
1321         }
1322         break;
1323     case 0x0c: /* Acknowledge */
1324         *data = gic_acknowledge_irq(s, cpu, attrs);
1325         break;
1326     case 0x14: /* Running Priority */
1327         *data = gic_get_running_priority(s, cpu, attrs);
1328         break;
1329     case 0x18: /* Highest Pending Interrupt */
1330         *data = gic_get_current_pending_irq(s, cpu, attrs);
1331         break;
1332     case 0x1c: /* Aliased Binary Point */
1333         /* GIC v2, no security: ABPR
1334          * GIC v1, no security: not implemented (RAZ/WI)
1335          * With security extensions, secure access: ABPR (alias of NS BPR)
1336          * With security extensions, nonsecure access: RAZ/WI
1337          */
1338         if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
1339             *data = 0;
1340         } else {
1341             *data = s->abpr[cpu];
1342         }
1343         break;
1344     case 0xd0: case 0xd4: case 0xd8: case 0xdc:
1345     {
1346         int regno = (offset - 0xd0) / 4;
1347 
1348         if (regno >= GIC_NR_APRS || s->revision != 2) {
1349             *data = 0;
1350         } else if (gic_cpu_ns_access(s, cpu, attrs)) {
1351             /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */
1352             *data = gic_apr_ns_view(s, regno, cpu);
1353         } else {
1354             *data = s->apr[regno][cpu];
1355         }
1356         break;
1357     }
1358     case 0xe0: case 0xe4: case 0xe8: case 0xec:
1359     {
1360         int regno = (offset - 0xe0) / 4;
1361 
1362         if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) ||
1363             gic_cpu_ns_access(s, cpu, attrs)) {
1364             *data = 0;
1365         } else {
1366             *data = s->nsapr[regno][cpu];
1367         }
1368         break;
1369     }
1370     default:
1371         qemu_log_mask(LOG_GUEST_ERROR,
1372                       "gic_cpu_read: Bad offset %x\n", (int)offset);
1373         *data = 0;
1374         break;
1375     }
1376     return MEMTX_OK;
1377 }
1378 
1379 static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
1380                                  uint32_t value, MemTxAttrs attrs)
1381 {
1382     switch (offset) {
1383     case 0x00: /* Control */
1384         gic_set_cpu_control(s, cpu, value, attrs);
1385         break;
1386     case 0x04: /* Priority mask */
1387         gic_set_priority_mask(s, cpu, value, attrs);
1388         break;
1389     case 0x08: /* Binary Point */
1390         if (gic_cpu_ns_access(s, cpu, attrs)) {
1391             if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) {
1392                 /* WI when CBPR is 1 */
1393                 return MEMTX_OK;
1394             } else {
1395                 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR);
1396             }
1397         } else {
1398             s->bpr[cpu] = MAX(value & 0x7, GIC_MIN_BPR);
1399         }
1400         break;
1401     case 0x10: /* End Of Interrupt */
1402         gic_complete_irq(s, cpu, value & 0x3ff, attrs);
1403         return MEMTX_OK;
1404     case 0x1c: /* Aliased Binary Point */
1405         if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
1406             /* unimplemented, or NS access: RAZ/WI */
1407             return MEMTX_OK;
1408         } else {
1409             s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR);
1410         }
1411         break;
1412     case 0xd0: case 0xd4: case 0xd8: case 0xdc:
1413     {
1414         int regno = (offset - 0xd0) / 4;
1415 
1416         if (regno >= GIC_NR_APRS || s->revision != 2) {
1417             return MEMTX_OK;
1418         }
1419         if (gic_cpu_ns_access(s, cpu, attrs)) {
1420             /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */
1421             gic_apr_write_ns_view(s, regno, cpu, value);
1422         } else {
1423             s->apr[regno][cpu] = value;
1424         }
1425         break;
1426     }
1427     case 0xe0: case 0xe4: case 0xe8: case 0xec:
1428     {
1429         int regno = (offset - 0xe0) / 4;
1430 
1431         if (regno >= GIC_NR_APRS || s->revision != 2) {
1432             return MEMTX_OK;
1433         }
1434         if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
1435             return MEMTX_OK;
1436         }
1437         s->nsapr[regno][cpu] = value;
1438         break;
1439     }
1440     case 0x1000:
1441         /* GICC_DIR */
1442         gic_deactivate_irq(s, cpu, value & 0x3ff, attrs);
1443         break;
1444     default:
1445         qemu_log_mask(LOG_GUEST_ERROR,
1446                       "gic_cpu_write: Bad offset %x\n", (int)offset);
1447         return MEMTX_OK;
1448     }
1449     gic_update(s);
1450     return MEMTX_OK;
1451 }
1452 
1453 /* Wrappers to read/write the GIC CPU interface for the current CPU */
1454 static MemTxResult gic_thiscpu_read(void *opaque, hwaddr addr, uint64_t *data,
1455                                     unsigned size, MemTxAttrs attrs)
1456 {
1457     GICState *s = (GICState *)opaque;
1458     return gic_cpu_read(s, gic_get_current_cpu(s), addr, data, attrs);
1459 }
1460 
1461 static MemTxResult gic_thiscpu_write(void *opaque, hwaddr addr,
1462                                      uint64_t value, unsigned size,
1463                                      MemTxAttrs attrs)
1464 {
1465     GICState *s = (GICState *)opaque;
1466     return gic_cpu_write(s, gic_get_current_cpu(s), addr, value, attrs);
1467 }
1468 
1469 /* Wrappers to read/write the GIC CPU interface for a specific CPU.
1470  * These just decode the opaque pointer into GICState* + cpu id.
1471  */
1472 static MemTxResult gic_do_cpu_read(void *opaque, hwaddr addr, uint64_t *data,
1473                                    unsigned size, MemTxAttrs attrs)
1474 {
1475     GICState **backref = (GICState **)opaque;
1476     GICState *s = *backref;
1477     int id = (backref - s->backref);
1478     return gic_cpu_read(s, id, addr, data, attrs);
1479 }
1480 
1481 static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr,
1482                                     uint64_t value, unsigned size,
1483                                     MemTxAttrs attrs)
1484 {
1485     GICState **backref = (GICState **)opaque;
1486     GICState *s = *backref;
1487     int id = (backref - s->backref);
1488     return gic_cpu_write(s, id, addr, value, attrs);
1489 }
1490 
1491 static const MemoryRegionOps gic_ops[2] = {
1492     {
1493         .read_with_attrs = gic_dist_read,
1494         .write_with_attrs = gic_dist_write,
1495         .endianness = DEVICE_NATIVE_ENDIAN,
1496     },
1497     {
1498         .read_with_attrs = gic_thiscpu_read,
1499         .write_with_attrs = gic_thiscpu_write,
1500         .endianness = DEVICE_NATIVE_ENDIAN,
1501     }
1502 };
1503 
1504 static const MemoryRegionOps gic_cpu_ops = {
1505     .read_with_attrs = gic_do_cpu_read,
1506     .write_with_attrs = gic_do_cpu_write,
1507     .endianness = DEVICE_NATIVE_ENDIAN,
1508 };
1509 
1510 static void arm_gic_realize(DeviceState *dev, Error **errp)
1511 {
1512     /* Device instance realize function for the GIC sysbus device */
1513     int i;
1514     GICState *s = ARM_GIC(dev);
1515     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1516     ARMGICClass *agc = ARM_GIC_GET_CLASS(s);
1517     Error *local_err = NULL;
1518 
1519     agc->parent_realize(dev, &local_err);
1520     if (local_err) {
1521         error_propagate(errp, local_err);
1522         return;
1523     }
1524 
1525     if (kvm_enabled() && !kvm_arm_supports_user_irq()) {
1526         error_setg(errp, "KVM with user space irqchip only works when the "
1527                          "host kernel supports KVM_CAP_ARM_USER_IRQ");
1528         return;
1529     }
1530 
1531     /* This creates distributor and main CPU interface (s->cpuiomem[0]) */
1532     gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops, NULL);
1533 
1534     /* Extra core-specific regions for the CPU interfaces. This is
1535      * necessary for "franken-GIC" implementations, for example on
1536      * Exynos 4.
1537      * NB that the memory region size of 0x100 applies for the 11MPCore
1538      * and also cores following the GIC v1 spec (ie A9).
1539      * GIC v2 defines a larger memory region (0x1000) so this will need
1540      * to be extended when we implement A15.
1541      */
1542     for (i = 0; i < s->num_cpu; i++) {
1543         s->backref[i] = s;
1544         memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops,
1545                               &s->backref[i], "gic_cpu", 0x100);
1546         sysbus_init_mmio(sbd, &s->cpuiomem[i+1]);
1547     }
1548 }
1549 
1550 static void arm_gic_class_init(ObjectClass *klass, void *data)
1551 {
1552     DeviceClass *dc = DEVICE_CLASS(klass);
1553     ARMGICClass *agc = ARM_GIC_CLASS(klass);
1554 
1555     device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize);
1556 }
1557 
1558 static const TypeInfo arm_gic_info = {
1559     .name = TYPE_ARM_GIC,
1560     .parent = TYPE_ARM_GIC_COMMON,
1561     .instance_size = sizeof(GICState),
1562     .class_init = arm_gic_class_init,
1563     .class_size = sizeof(ARMGICClass),
1564 };
1565 
1566 static void arm_gic_register_types(void)
1567 {
1568     type_register_static(&arm_gic_info);
1569 }
1570 
1571 type_init(arm_gic_register_types)
1572