xref: /openbmc/qemu/hw/intc/gic_internal.h (revision 8498321421e95a822dc7a5dd3184890766a029bb)
1 /*
2  * ARM GIC support - internal interfaces
3  *
4  * Copyright (c) 2012 Linaro Limited
5  * Written by Peter Maydell
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation, either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #ifndef QEMU_ARM_GIC_INTERNAL_H
22 #define QEMU_ARM_GIC_INTERNAL_H
23 
24 #include "hw/registerfields.h"
25 #include "hw/intc/arm_gic.h"
26 
27 #define ALL_CPU_MASK ((unsigned)(((1 << GIC_NCPU) - 1)))
28 
29 #define GIC_BASE_IRQ 0
30 
31 #define GIC_DIST_SET_ENABLED(irq, cm) (s->irq_state[irq].enabled |= (cm))
32 #define GIC_DIST_CLEAR_ENABLED(irq, cm) (s->irq_state[irq].enabled &= ~(cm))
33 #define GIC_DIST_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0)
34 #define GIC_DIST_SET_PENDING(irq, cm) (s->irq_state[irq].pending |= (cm))
35 #define GIC_DIST_CLEAR_PENDING(irq, cm) (s->irq_state[irq].pending &= ~(cm))
36 #define GIC_DIST_SET_ACTIVE(irq, cm) (s->irq_state[irq].active |= (cm))
37 #define GIC_DIST_CLEAR_ACTIVE(irq, cm) (s->irq_state[irq].active &= ~(cm))
38 #define GIC_DIST_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0)
39 #define GIC_DIST_SET_MODEL(irq) (s->irq_state[irq].model = true)
40 #define GIC_DIST_CLEAR_MODEL(irq) (s->irq_state[irq].model = false)
41 #define GIC_DIST_TEST_MODEL(irq) (s->irq_state[irq].model)
42 #define GIC_DIST_SET_LEVEL(irq, cm) (s->irq_state[irq].level |= (cm))
43 #define GIC_DIST_CLEAR_LEVEL(irq, cm) (s->irq_state[irq].level &= ~(cm))
44 #define GIC_DIST_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
45 #define GIC_DIST_SET_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger = true)
46 #define GIC_DIST_CLEAR_EDGE_TRIGGER(irq) \
47     (s->irq_state[irq].edge_trigger = false)
48 #define GIC_DIST_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger)
49 #define GIC_DIST_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ?            \
50                                     s->priority1[irq][cpu] :            \
51                                     s->priority2[(irq) - GIC_INTERNAL])
52 #define GIC_DIST_TARGET(irq) (s->irq_target[irq])
53 #define GIC_DIST_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm))
54 #define GIC_DIST_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm))
55 #define GIC_DIST_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0)
56 
57 #define GICD_CTLR_EN_GRP0 (1U << 0)
58 #define GICD_CTLR_EN_GRP1 (1U << 1)
59 
60 #define GICC_CTLR_EN_GRP0    (1U << 0)
61 #define GICC_CTLR_EN_GRP1    (1U << 1)
62 #define GICC_CTLR_ACK_CTL    (1U << 2)
63 #define GICC_CTLR_FIQ_EN     (1U << 3)
64 #define GICC_CTLR_CBPR       (1U << 4) /* GICv1: SBPR */
65 #define GICC_CTLR_EOIMODE    (1U << 9)
66 #define GICC_CTLR_EOIMODE_NS (1U << 10)
67 
68 REG32(GICH_HCR, 0x0)
69     FIELD(GICH_HCR, EN, 0, 1)
70     FIELD(GICH_HCR, UIE, 1, 1)
71     FIELD(GICH_HCR, LRENPIE, 2, 1)
72     FIELD(GICH_HCR, NPIE, 3, 1)
73     FIELD(GICH_HCR, VGRP0EIE, 4, 1)
74     FIELD(GICH_HCR, VGRP0DIE, 5, 1)
75     FIELD(GICH_HCR, VGRP1EIE, 6, 1)
76     FIELD(GICH_HCR, VGRP1DIE, 7, 1)
77     FIELD(GICH_HCR, EOICount, 27, 5)
78 
79 #define GICH_HCR_MASK \
80     (R_GICH_HCR_EN_MASK | R_GICH_HCR_UIE_MASK | \
81      R_GICH_HCR_LRENPIE_MASK | R_GICH_HCR_NPIE_MASK | \
82      R_GICH_HCR_VGRP0EIE_MASK | R_GICH_HCR_VGRP0DIE_MASK | \
83      R_GICH_HCR_VGRP1EIE_MASK | R_GICH_HCR_VGRP1DIE_MASK | \
84      R_GICH_HCR_EOICount_MASK)
85 
86 REG32(GICH_VTR, 0x4)
87     FIELD(GICH_VTR, ListRegs, 0, 6)
88     FIELD(GICH_VTR, PREbits, 26, 3)
89     FIELD(GICH_VTR, PRIbits, 29, 3)
90 
91 REG32(GICH_VMCR, 0x8)
92     FIELD(GICH_VMCR, VMCCtlr, 0, 10)
93     FIELD(GICH_VMCR, VMABP, 18, 3)
94     FIELD(GICH_VMCR, VMBP, 21, 3)
95     FIELD(GICH_VMCR, VMPriMask, 27, 5)
96 
97 REG32(GICH_MISR, 0x10)
98     FIELD(GICH_MISR, EOI, 0, 1)
99     FIELD(GICH_MISR, U, 1, 1)
100     FIELD(GICH_MISR, LRENP, 2, 1)
101     FIELD(GICH_MISR, NP, 3, 1)
102     FIELD(GICH_MISR, VGrp0E, 4, 1)
103     FIELD(GICH_MISR, VGrp0D, 5, 1)
104     FIELD(GICH_MISR, VGrp1E, 6, 1)
105     FIELD(GICH_MISR, VGrp1D, 7, 1)
106 
107 REG32(GICH_EISR0, 0x20)
108 REG32(GICH_EISR1, 0x24)
109 REG32(GICH_ELRSR0, 0x30)
110 REG32(GICH_ELRSR1, 0x34)
111 REG32(GICH_APR, 0xf0)
112 
113 REG32(GICH_LR0, 0x100)
114     FIELD(GICH_LR0, VirtualID, 0, 10)
115     FIELD(GICH_LR0, PhysicalID, 10, 10)
116     FIELD(GICH_LR0, CPUID, 10, 3)
117     FIELD(GICH_LR0, EOI, 19, 1)
118     FIELD(GICH_LR0, Priority, 23, 5)
119     FIELD(GICH_LR0, State, 28, 2)
120     FIELD(GICH_LR0, Grp1, 30, 1)
121     FIELD(GICH_LR0, HW, 31, 1)
122 
123 /* Last LR register */
124 REG32(GICH_LR63, 0x1fc)
125 
126 #define GICH_LR_MASK \
127     (R_GICH_LR0_VirtualID_MASK | R_GICH_LR0_PhysicalID_MASK | \
128      R_GICH_LR0_CPUID_MASK | R_GICH_LR0_EOI_MASK | \
129      R_GICH_LR0_Priority_MASK | R_GICH_LR0_State_MASK | \
130      R_GICH_LR0_Grp1_MASK | R_GICH_LR0_HW_MASK)
131 
132 #define GICH_LR_STATE_INVALID         0
133 #define GICH_LR_STATE_PENDING         1
134 #define GICH_LR_STATE_ACTIVE          2
135 #define GICH_LR_STATE_ACTIVE_PENDING  3
136 
137 #define GICH_LR_VIRT_ID(entry) (FIELD_EX32(entry, GICH_LR0, VirtualID))
138 #define GICH_LR_PHYS_ID(entry) (FIELD_EX32(entry, GICH_LR0, PhysicalID))
139 #define GICH_LR_CPUID(entry) (FIELD_EX32(entry, GICH_LR0, CPUID))
140 #define GICH_LR_EOI(entry) (FIELD_EX32(entry, GICH_LR0, EOI))
141 #define GICH_LR_PRIORITY(entry) (FIELD_EX32(entry, GICH_LR0, Priority) << 3)
142 #define GICH_LR_STATE(entry) (FIELD_EX32(entry, GICH_LR0, State))
143 #define GICH_LR_GROUP(entry) (FIELD_EX32(entry, GICH_LR0, Grp1))
144 #define GICH_LR_HW(entry) (FIELD_EX32(entry, GICH_LR0, HW))
145 
146 #define GICH_LR_CLEAR_PENDING(entry) \
147         ((entry) &= ~(GICH_LR_STATE_PENDING << R_GICH_LR0_State_SHIFT))
148 #define GICH_LR_SET_ACTIVE(entry) \
149         ((entry) |= (GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
150 #define GICH_LR_CLEAR_ACTIVE(entry) \
151         ((entry) &= ~(GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
152 
153 /* Valid bits for GICC_CTLR for GICv1, v1 with security extensions,
154  * GICv2 and GICv2 with security extensions:
155  */
156 #define GICC_CTLR_V1_MASK    0x1
157 #define GICC_CTLR_V1_S_MASK  0x1f
158 #define GICC_CTLR_V2_MASK    0x21f
159 #define GICC_CTLR_V2_S_MASK  0x61f
160 
161 /* The special cases for the revision property: */
162 #define REV_11MPCORE 0
163 
164 uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs);
165 void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val,
166                            MemTxAttrs attrs);
167 
168 static inline bool gic_test_pending(GICState *s, int irq, int cm)
169 {
170     if (s->revision == REV_11MPCORE) {
171         return s->irq_state[irq].pending & cm;
172     } else {
173         /* Edge-triggered interrupts are marked pending on a rising edge, but
174          * level-triggered interrupts are either considered pending when the
175          * level is active or if software has explicitly written to
176          * GICD_ISPENDR to set the state pending.
177          */
178         return (s->irq_state[irq].pending & cm) ||
179             (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_LEVEL(irq, cm));
180     }
181 }
182 
183 static inline bool gic_is_vcpu(int cpu)
184 {
185     return cpu >= GIC_NCPU;
186 }
187 
188 static inline int gic_get_vcpu_real_id(int cpu)
189 {
190     return (cpu >= GIC_NCPU) ? (cpu - GIC_NCPU) : cpu;
191 }
192 
193 /* Return true if the given vIRQ state exists in a LR and is either active or
194  * pending and active.
195  *
196  * This function is used to check that a guest's `end of interrupt' or
197  * `interrupts deactivation' request is valid, and matches with a LR of an
198  * already acknowledged vIRQ (i.e. has the active bit set in its state).
199  */
200 static inline bool gic_virq_is_valid(GICState *s, int irq, int vcpu)
201 {
202     int cpu = gic_get_vcpu_real_id(vcpu);
203     int lr_idx;
204 
205     for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
206         uint32_t *entry = &s->h_lr[lr_idx][cpu];
207 
208         if ((GICH_LR_VIRT_ID(*entry) == irq) &&
209             (GICH_LR_STATE(*entry) & GICH_LR_STATE_ACTIVE)) {
210             return true;
211         }
212     }
213 
214     return false;
215 }
216 
217 /* Return a pointer on the LR entry matching the given vIRQ.
218  *
219  * This function is used to retrieve an LR for which we know for sure that the
220  * corresponding vIRQ exists in the current context (i.e. its current state is
221  * not `invalid'):
222  *   - Either the corresponding vIRQ has been validated with gic_virq_is_valid()
223  *     so it is `active' or `active and pending',
224  *   - Or it was pending and has been selected by gic_get_best_virq(). It is now
225  *     `pending', `active' or `active and pending', depending on what the guest
226  *     already did with this vIRQ.
227  *
228  * Having multiple LRs with the same VirtualID leads to UNPREDICTABLE
229  * behaviour in the GIC. We choose to return the first one that matches.
230  */
231 static inline uint32_t *gic_get_lr_entry(GICState *s, int irq, int vcpu)
232 {
233     int cpu = gic_get_vcpu_real_id(vcpu);
234     int lr_idx;
235 
236     for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
237         uint32_t *entry = &s->h_lr[lr_idx][cpu];
238 
239         if ((GICH_LR_VIRT_ID(*entry) == irq) &&
240             (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID)) {
241             return entry;
242         }
243     }
244 
245     g_assert_not_reached();
246 }
247 
248 static inline bool gic_test_group(GICState *s, int irq, int cpu)
249 {
250     if (gic_is_vcpu(cpu)) {
251         uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
252         return GICH_LR_GROUP(*entry);
253     } else {
254         return GIC_DIST_TEST_GROUP(irq, 1 << cpu);
255     }
256 }
257 
258 static inline void gic_clear_pending(GICState *s, int irq, int cpu)
259 {
260     if (gic_is_vcpu(cpu)) {
261         uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
262         GICH_LR_CLEAR_PENDING(*entry);
263     } else {
264         /* Clear pending state for both level and edge triggered
265          * interrupts. (level triggered interrupts with an active line
266          * remain pending, see gic_test_pending)
267          */
268         GIC_DIST_CLEAR_PENDING(irq, GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK
269                                                              : (1 << cpu));
270     }
271 }
272 
273 static inline void gic_set_active(GICState *s, int irq, int cpu)
274 {
275     if (gic_is_vcpu(cpu)) {
276         uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
277         GICH_LR_SET_ACTIVE(*entry);
278     } else {
279         GIC_DIST_SET_ACTIVE(irq, 1 << cpu);
280     }
281 }
282 
283 static inline void gic_clear_active(GICState *s, int irq, int cpu)
284 {
285     if (gic_is_vcpu(cpu)) {
286         uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
287         GICH_LR_CLEAR_ACTIVE(*entry);
288 
289         if (GICH_LR_HW(*entry)) {
290             /* Hardware interrupt. We must forward the deactivation request to
291              * the distributor.
292              */
293             int phys_irq = GICH_LR_PHYS_ID(*entry);
294             int rcpu = gic_get_vcpu_real_id(cpu);
295 
296             if (phys_irq < GIC_NR_SGIS || phys_irq >= GIC_MAXIRQ) {
297                 /* UNPREDICTABLE behaviour, we choose to ignore the request */
298                 return;
299             }
300 
301             /* This is equivalent to a NS write to DIR on the physical CPU
302              * interface. Hence group0 interrupt deactivation is ignored if
303              * the GIC is secure.
304              */
305             if (!s->security_extn || GIC_DIST_TEST_GROUP(phys_irq, 1 << rcpu)) {
306                 GIC_DIST_CLEAR_ACTIVE(phys_irq, 1 << rcpu);
307             }
308         }
309     } else {
310         GIC_DIST_CLEAR_ACTIVE(irq, 1 << cpu);
311     }
312 }
313 
314 static inline int gic_get_priority(GICState *s, int irq, int cpu)
315 {
316     if (gic_is_vcpu(cpu)) {
317         uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
318         return GICH_LR_PRIORITY(*entry);
319     } else {
320         return GIC_DIST_GET_PRIORITY(irq, cpu);
321     }
322 }
323 
324 #endif /* QEMU_ARM_GIC_INTERNAL_H */
325