xref: /openbmc/qemu/hw/intc/gicv3_internal.h (revision d2c0c6aa)
1 /*
2  * ARM GICv3 support - internal interfaces
3  *
4  * Copyright (c) 2012 Linaro Limited
5  * Copyright (c) 2015 Huawei.
6  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7  * Written by Peter Maydell
8  * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License along
21  * with this program; if not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #ifndef QEMU_ARM_GICV3_INTERNAL_H
25 #define QEMU_ARM_GICV3_INTERNAL_H
26 
27 #include "hw/registerfields.h"
28 #include "hw/intc/arm_gicv3_common.h"
29 
30 /* Distributor registers, as offsets from the distributor base address */
31 #define GICD_CTLR            0x0000
32 #define GICD_TYPER           0x0004
33 #define GICD_IIDR            0x0008
34 #define GICD_STATUSR         0x0010
35 #define GICD_SETSPI_NSR      0x0040
36 #define GICD_CLRSPI_NSR      0x0048
37 #define GICD_SETSPI_SR       0x0050
38 #define GICD_CLRSPI_SR       0x0058
39 #define GICD_SEIR            0x0068
40 #define GICD_IGROUPR         0x0080
41 #define GICD_ISENABLER       0x0100
42 #define GICD_ICENABLER       0x0180
43 #define GICD_ISPENDR         0x0200
44 #define GICD_ICPENDR         0x0280
45 #define GICD_ISACTIVER       0x0300
46 #define GICD_ICACTIVER       0x0380
47 #define GICD_IPRIORITYR      0x0400
48 #define GICD_ITARGETSR       0x0800
49 #define GICD_ICFGR           0x0C00
50 #define GICD_IGRPMODR        0x0D00
51 #define GICD_NSACR           0x0E00
52 #define GICD_SGIR            0x0F00
53 #define GICD_CPENDSGIR       0x0F10
54 #define GICD_SPENDSGIR       0x0F20
55 #define GICD_INMIR           0x0F80
56 #define GICD_INMIRnE         0x3B00
57 #define GICD_IROUTER         0x6000
58 #define GICD_IDREGS          0xFFD0
59 
60 /* GICD_CTLR fields  */
61 #define GICD_CTLR_EN_GRP0           (1U << 0)
62 #define GICD_CTLR_EN_GRP1NS         (1U << 1) /* GICv3 5.3.20 */
63 #define GICD_CTLR_EN_GRP1S          (1U << 2)
64 #define GICD_CTLR_EN_GRP1_ALL       (GICD_CTLR_EN_GRP1NS | GICD_CTLR_EN_GRP1S)
65 /* Bit 4 is ARE if the system doesn't support TrustZone, ARE_S otherwise */
66 #define GICD_CTLR_ARE               (1U << 4)
67 #define GICD_CTLR_ARE_S             (1U << 4)
68 #define GICD_CTLR_ARE_NS            (1U << 5)
69 #define GICD_CTLR_DS                (1U << 6)
70 #define GICD_CTLR_E1NWF             (1U << 7)
71 #define GICD_CTLR_RWP               (1U << 31)
72 
73 #define GICD_TYPER_NMI_SHIFT           9
74 #define GICD_TYPER_LPIS_SHIFT          17
75 
76 /* 16 bits EventId */
77 #define GICD_TYPER_IDBITS            0xf
78 
79 /*
80  * Redistributor frame offsets from RD_base
81  */
82 #define GICR_SGI_OFFSET 0x10000
83 #define GICR_VLPI_OFFSET 0x20000
84 
85 /*
86  * Redistributor registers, offsets from RD_base
87  */
88 #define GICR_CTLR             0x0000
89 #define GICR_IIDR             0x0004
90 #define GICR_TYPER            0x0008
91 #define GICR_STATUSR          0x0010
92 #define GICR_WAKER            0x0014
93 #define GICR_SETLPIR          0x0040
94 #define GICR_CLRLPIR          0x0048
95 #define GICR_PROPBASER        0x0070
96 #define GICR_PENDBASER        0x0078
97 #define GICR_INVLPIR          0x00A0
98 #define GICR_INVALLR          0x00B0
99 #define GICR_SYNCR            0x00C0
100 #define GICR_IDREGS           0xFFD0
101 
102 /* SGI and PPI Redistributor registers, offsets from RD_base */
103 #define GICR_IGROUPR0         (GICR_SGI_OFFSET + 0x0080)
104 #define GICR_ISENABLER0       (GICR_SGI_OFFSET + 0x0100)
105 #define GICR_ICENABLER0       (GICR_SGI_OFFSET + 0x0180)
106 #define GICR_ISPENDR0         (GICR_SGI_OFFSET + 0x0200)
107 #define GICR_ICPENDR0         (GICR_SGI_OFFSET + 0x0280)
108 #define GICR_ISACTIVER0       (GICR_SGI_OFFSET + 0x0300)
109 #define GICR_ICACTIVER0       (GICR_SGI_OFFSET + 0x0380)
110 #define GICR_IPRIORITYR       (GICR_SGI_OFFSET + 0x0400)
111 #define GICR_ICFGR0           (GICR_SGI_OFFSET + 0x0C00)
112 #define GICR_ICFGR1           (GICR_SGI_OFFSET + 0x0C04)
113 #define GICR_IGRPMODR0        (GICR_SGI_OFFSET + 0x0D00)
114 #define GICR_NSACR            (GICR_SGI_OFFSET + 0x0E00)
115 #define GICR_INMIR0           (GICR_SGI_OFFSET + 0x0F80)
116 
117 /* VLPI redistributor registers, offsets from VLPI_base */
118 #define GICR_VPROPBASER       (GICR_VLPI_OFFSET + 0x70)
119 #define GICR_VPENDBASER       (GICR_VLPI_OFFSET + 0x78)
120 
121 #define GICR_CTLR_ENABLE_LPIS        (1U << 0)
122 #define GICR_CTLR_CES                (1U << 1)
123 #define GICR_CTLR_RWP                (1U << 3)
124 #define GICR_CTLR_DPG0               (1U << 24)
125 #define GICR_CTLR_DPG1NS             (1U << 25)
126 #define GICR_CTLR_DPG1S              (1U << 26)
127 #define GICR_CTLR_UWP                (1U << 31)
128 
129 #define GICR_TYPER_PLPIS             (1U << 0)
130 #define GICR_TYPER_VLPIS             (1U << 1)
131 #define GICR_TYPER_DIRECTLPI         (1U << 3)
132 #define GICR_TYPER_LAST              (1U << 4)
133 #define GICR_TYPER_DPGS              (1U << 5)
134 #define GICR_TYPER_PROCNUM           (0xFFFFU << 8)
135 #define GICR_TYPER_COMMONLPIAFF      (0x3 << 24)
136 #define GICR_TYPER_AFFINITYVALUE     (0xFFFFFFFFULL << 32)
137 
138 #define GICR_WAKER_ProcessorSleep    (1U << 1)
139 #define GICR_WAKER_ChildrenAsleep    (1U << 2)
140 
141 FIELD(GICR_PROPBASER, IDBITS, 0, 5)
142 FIELD(GICR_PROPBASER, INNERCACHE, 7, 3)
143 FIELD(GICR_PROPBASER, SHAREABILITY, 10, 2)
144 FIELD(GICR_PROPBASER, PHYADDR, 12, 40)
145 FIELD(GICR_PROPBASER, OUTERCACHE, 56, 3)
146 
147 FIELD(GICR_PENDBASER, INNERCACHE, 7, 3)
148 FIELD(GICR_PENDBASER, SHAREABILITY, 10, 2)
149 FIELD(GICR_PENDBASER, PHYADDR, 16, 36)
150 FIELD(GICR_PENDBASER, OUTERCACHE, 56, 3)
151 FIELD(GICR_PENDBASER, PTZ, 62, 1)
152 
153 #define GICR_PROPBASER_IDBITS_THRESHOLD          0xd
154 
155 /* These are the GICv4 VPROPBASER and VPENDBASER layouts; v4.1 is different */
156 FIELD(GICR_VPROPBASER, IDBITS, 0, 5)
157 FIELD(GICR_VPROPBASER, INNERCACHE, 7, 3)
158 FIELD(GICR_VPROPBASER, SHAREABILITY, 10, 2)
159 FIELD(GICR_VPROPBASER, PHYADDR, 12, 40)
160 FIELD(GICR_VPROPBASER, OUTERCACHE, 56, 3)
161 
162 FIELD(GICR_VPENDBASER, INNERCACHE, 7, 3)
163 FIELD(GICR_VPENDBASER, SHAREABILITY, 10, 2)
164 FIELD(GICR_VPENDBASER, PHYADDR, 16, 36)
165 FIELD(GICR_VPENDBASER, OUTERCACHE, 56, 3)
166 FIELD(GICR_VPENDBASER, DIRTY, 60, 1)
167 FIELD(GICR_VPENDBASER, PENDINGLAST, 61, 1)
168 FIELD(GICR_VPENDBASER, IDAI, 62, 1)
169 FIELD(GICR_VPENDBASER, VALID, 63, 1)
170 
171 #define ICC_CTLR_EL1_CBPR           (1U << 0)
172 #define ICC_CTLR_EL1_EOIMODE        (1U << 1)
173 #define ICC_CTLR_EL1_PMHE           (1U << 6)
174 #define ICC_CTLR_EL1_PRIBITS_SHIFT 8
175 #define ICC_CTLR_EL1_PRIBITS_MASK   (7U << ICC_CTLR_EL1_PRIBITS_SHIFT)
176 #define ICC_CTLR_EL1_IDBITS_SHIFT 11
177 #define ICC_CTLR_EL1_SEIS           (1U << 14)
178 #define ICC_CTLR_EL1_A3V            (1U << 15)
179 
180 #define ICC_PMR_PRIORITY_MASK    0xff
181 #define ICC_BPR_BINARYPOINT_MASK 0x07
182 #define ICC_IGRPEN_ENABLE        0x01
183 
184 #define ICC_CTLR_EL3_CBPR_EL1S (1U << 0)
185 #define ICC_CTLR_EL3_CBPR_EL1NS (1U << 1)
186 #define ICC_CTLR_EL3_EOIMODE_EL3 (1U << 2)
187 #define ICC_CTLR_EL3_EOIMODE_EL1S (1U << 3)
188 #define ICC_CTLR_EL3_EOIMODE_EL1NS (1U << 4)
189 #define ICC_CTLR_EL3_RM (1U << 5)
190 #define ICC_CTLR_EL3_PMHE (1U << 6)
191 #define ICC_CTLR_EL3_PRIBITS_SHIFT 8
192 #define ICC_CTLR_EL3_IDBITS_SHIFT 11
193 #define ICC_CTLR_EL3_SEIS (1U << 14)
194 #define ICC_CTLR_EL3_A3V (1U << 15)
195 #define ICC_CTLR_EL3_NDS (1U << 17)
196 
197 #define ICC_AP1R_EL1_NMI (1ULL << 63)
198 #define ICC_RPR_EL1_NSNMI (1ULL << 62)
199 #define ICC_RPR_EL1_NMI (1ULL << 63)
200 
201 #define ICH_VMCR_EL2_VENG0_SHIFT 0
202 #define ICH_VMCR_EL2_VENG0 (1U << ICH_VMCR_EL2_VENG0_SHIFT)
203 #define ICH_VMCR_EL2_VENG1_SHIFT 1
204 #define ICH_VMCR_EL2_VENG1 (1U << ICH_VMCR_EL2_VENG1_SHIFT)
205 #define ICH_VMCR_EL2_VACKCTL (1U << 2)
206 #define ICH_VMCR_EL2_VFIQEN (1U << 3)
207 #define ICH_VMCR_EL2_VCBPR_SHIFT 4
208 #define ICH_VMCR_EL2_VCBPR (1U << ICH_VMCR_EL2_VCBPR_SHIFT)
209 #define ICH_VMCR_EL2_VEOIM_SHIFT 9
210 #define ICH_VMCR_EL2_VEOIM (1U << ICH_VMCR_EL2_VEOIM_SHIFT)
211 #define ICH_VMCR_EL2_VBPR1_SHIFT 18
212 #define ICH_VMCR_EL2_VBPR1_LENGTH 3
213 #define ICH_VMCR_EL2_VBPR1_MASK (0x7U << ICH_VMCR_EL2_VBPR1_SHIFT)
214 #define ICH_VMCR_EL2_VBPR0_SHIFT 21
215 #define ICH_VMCR_EL2_VBPR0_LENGTH 3
216 #define ICH_VMCR_EL2_VBPR0_MASK (0x7U << ICH_VMCR_EL2_VBPR0_SHIFT)
217 #define ICH_VMCR_EL2_VPMR_SHIFT 24
218 #define ICH_VMCR_EL2_VPMR_LENGTH 8
219 #define ICH_VMCR_EL2_VPMR_MASK (0xffU << ICH_VMCR_EL2_VPMR_SHIFT)
220 
221 #define ICH_HCR_EL2_EN (1U << 0)
222 #define ICH_HCR_EL2_UIE (1U << 1)
223 #define ICH_HCR_EL2_LRENPIE (1U << 2)
224 #define ICH_HCR_EL2_NPIE (1U << 3)
225 #define ICH_HCR_EL2_VGRP0EIE (1U << 4)
226 #define ICH_HCR_EL2_VGRP0DIE (1U << 5)
227 #define ICH_HCR_EL2_VGRP1EIE (1U << 6)
228 #define ICH_HCR_EL2_VGRP1DIE (1U << 7)
229 #define ICH_HCR_EL2_TC (1U << 10)
230 #define ICH_HCR_EL2_TALL0 (1U << 11)
231 #define ICH_HCR_EL2_TALL1 (1U << 12)
232 #define ICH_HCR_EL2_TSEI (1U << 13)
233 #define ICH_HCR_EL2_TDIR (1U << 14)
234 #define ICH_HCR_EL2_EOICOUNT_SHIFT 27
235 #define ICH_HCR_EL2_EOICOUNT_LENGTH 5
236 #define ICH_HCR_EL2_EOICOUNT_MASK (0x1fU << ICH_HCR_EL2_EOICOUNT_SHIFT)
237 
238 #define ICH_LR_EL2_VINTID_SHIFT 0
239 #define ICH_LR_EL2_VINTID_LENGTH 32
240 #define ICH_LR_EL2_VINTID_MASK (0xffffffffULL << ICH_LR_EL2_VINTID_SHIFT)
241 #define ICH_LR_EL2_PINTID_SHIFT 32
242 #define ICH_LR_EL2_PINTID_LENGTH 10
243 #define ICH_LR_EL2_PINTID_MASK (0x3ffULL << ICH_LR_EL2_PINTID_SHIFT)
244 /* Note that EOI shares with the top bit of the pINTID field */
245 #define ICH_LR_EL2_EOI (1ULL << 41)
246 #define ICH_LR_EL2_PRIORITY_SHIFT 48
247 #define ICH_LR_EL2_PRIORITY_LENGTH 8
248 #define ICH_LR_EL2_PRIORITY_MASK (0xffULL << ICH_LR_EL2_PRIORITY_SHIFT)
249 #define ICH_LR_EL2_NMI (1ULL << 59)
250 #define ICH_LR_EL2_GROUP (1ULL << 60)
251 #define ICH_LR_EL2_HW (1ULL << 61)
252 #define ICH_LR_EL2_STATE_SHIFT 62
253 #define ICH_LR_EL2_STATE_LENGTH 2
254 #define ICH_LR_EL2_STATE_MASK (3ULL << ICH_LR_EL2_STATE_SHIFT)
255 /* values for the state field: */
256 #define ICH_LR_EL2_STATE_INVALID 0
257 #define ICH_LR_EL2_STATE_PENDING 1
258 #define ICH_LR_EL2_STATE_ACTIVE 2
259 #define ICH_LR_EL2_STATE_ACTIVE_PENDING 3
260 #define ICH_LR_EL2_STATE_PENDING_BIT (1ULL << ICH_LR_EL2_STATE_SHIFT)
261 #define ICH_LR_EL2_STATE_ACTIVE_BIT (2ULL << ICH_LR_EL2_STATE_SHIFT)
262 
263 #define ICH_MISR_EL2_EOI (1U << 0)
264 #define ICH_MISR_EL2_U (1U << 1)
265 #define ICH_MISR_EL2_LRENP (1U << 2)
266 #define ICH_MISR_EL2_NP (1U << 3)
267 #define ICH_MISR_EL2_VGRP0E (1U << 4)
268 #define ICH_MISR_EL2_VGRP0D (1U << 5)
269 #define ICH_MISR_EL2_VGRP1E (1U << 6)
270 #define ICH_MISR_EL2_VGRP1D (1U << 7)
271 
272 #define ICH_VTR_EL2_LISTREGS_SHIFT 0
273 #define ICH_VTR_EL2_TDS (1U << 19)
274 #define ICH_VTR_EL2_NV4 (1U << 20)
275 #define ICH_VTR_EL2_A3V (1U << 21)
276 #define ICH_VTR_EL2_SEIS (1U << 22)
277 #define ICH_VTR_EL2_IDBITS_SHIFT 23
278 #define ICH_VTR_EL2_PREBITS_SHIFT 26
279 #define ICH_VTR_EL2_PRIBITS_SHIFT 29
280 
281 #define ICV_AP1R_EL1_NMI (1ULL << 63)
282 #define ICV_RPR_EL1_NMI (1ULL << 63)
283 
284 /* ITS Registers */
285 
286 FIELD(GITS_BASER, SIZE, 0, 8)
287 FIELD(GITS_BASER, PAGESIZE, 8, 2)
288 FIELD(GITS_BASER, SHAREABILITY, 10, 2)
289 FIELD(GITS_BASER, PHYADDR, 12, 36)
290 FIELD(GITS_BASER, PHYADDRL_64K, 16, 32)
291 FIELD(GITS_BASER, PHYADDRH_64K, 12, 4)
292 FIELD(GITS_BASER, ENTRYSIZE, 48, 5)
293 FIELD(GITS_BASER, OUTERCACHE, 53, 3)
294 FIELD(GITS_BASER, TYPE, 56, 3)
295 FIELD(GITS_BASER, INNERCACHE, 59, 3)
296 FIELD(GITS_BASER, INDIRECT, 62, 1)
297 FIELD(GITS_BASER, VALID, 63, 1)
298 
299 FIELD(GITS_CBASER, SIZE, 0, 8)
300 FIELD(GITS_CBASER, SHAREABILITY, 10, 2)
301 FIELD(GITS_CBASER, PHYADDR, 12, 40)
302 FIELD(GITS_CBASER, OUTERCACHE, 53, 3)
303 FIELD(GITS_CBASER, INNERCACHE, 59, 3)
304 FIELD(GITS_CBASER, VALID, 63, 1)
305 
306 FIELD(GITS_CREADR, STALLED, 0, 1)
307 FIELD(GITS_CREADR, OFFSET, 5, 15)
308 
309 FIELD(GITS_CWRITER, RETRY, 0, 1)
310 FIELD(GITS_CWRITER, OFFSET, 5, 15)
311 
312 FIELD(GITS_CTLR, ENABLED, 0, 1)
313 FIELD(GITS_CTLR, QUIESCENT, 31, 1)
314 
315 FIELD(GITS_TYPER, PHYSICAL, 0, 1)
316 FIELD(GITS_TYPER, VIRTUAL, 1, 1)
317 FIELD(GITS_TYPER, ITT_ENTRY_SIZE, 4, 4)
318 FIELD(GITS_TYPER, IDBITS, 8, 5)
319 FIELD(GITS_TYPER, DEVBITS, 13, 5)
320 FIELD(GITS_TYPER, SEIS, 18, 1)
321 FIELD(GITS_TYPER, PTA, 19, 1)
322 FIELD(GITS_TYPER, CIDBITS, 32, 4)
323 FIELD(GITS_TYPER, CIL, 36, 1)
324 FIELD(GITS_TYPER, VMOVP, 37, 1)
325 
326 #define GITS_IDREGS           0xFFD0
327 
328 #define GITS_BASER_RO_MASK                  (R_GITS_BASER_ENTRYSIZE_MASK | \
329                                               R_GITS_BASER_TYPE_MASK)
330 
331 #define GITS_BASER_PAGESIZE_4K                0
332 #define GITS_BASER_PAGESIZE_16K               1
333 #define GITS_BASER_PAGESIZE_64K               2
334 
335 #define GITS_BASER_TYPE_DEVICE               1ULL
336 #define GITS_BASER_TYPE_VPE                  2ULL
337 #define GITS_BASER_TYPE_COLLECTION           4ULL
338 
339 #define GITS_PAGE_SIZE_4K       0x1000
340 #define GITS_PAGE_SIZE_16K      0x4000
341 #define GITS_PAGE_SIZE_64K      0x10000
342 
343 #define L1TABLE_ENTRY_SIZE         8
344 
345 #define LPI_CTE_ENABLED          TABLE_ENTRY_VALID_MASK
346 #define LPI_PRIORITY_MASK         0xfc
347 
348 #define GITS_CMDQ_ENTRY_WORDS 4
349 #define GITS_CMDQ_ENTRY_SIZE  (GITS_CMDQ_ENTRY_WORDS * sizeof(uint64_t))
350 
351 #define CMD_MASK                  0xff
352 
353 /* ITS Commands */
354 #define GITS_CMD_MOVI             0x01
355 #define GITS_CMD_INT              0x03
356 #define GITS_CMD_CLEAR            0x04
357 #define GITS_CMD_SYNC             0x05
358 #define GITS_CMD_MAPD             0x08
359 #define GITS_CMD_MAPC             0x09
360 #define GITS_CMD_MAPTI            0x0A
361 #define GITS_CMD_MAPI             0x0B
362 #define GITS_CMD_INV              0x0C
363 #define GITS_CMD_INVALL           0x0D
364 #define GITS_CMD_MOVALL           0x0E
365 #define GITS_CMD_DISCARD          0x0F
366 #define GITS_CMD_VMOVI            0x21
367 #define GITS_CMD_VMOVP            0x22
368 #define GITS_CMD_VSYNC            0x25
369 #define GITS_CMD_VMAPP            0x29
370 #define GITS_CMD_VMAPTI           0x2A
371 #define GITS_CMD_VMAPI            0x2B
372 #define GITS_CMD_VINVALL          0x2D
373 
374 /* MAPC command fields */
375 #define ICID_LENGTH                  16
376 #define ICID_MASK                 ((1U << ICID_LENGTH) - 1)
377 FIELD(MAPC, RDBASE, 16, 32)
378 
379 #define RDBASE_PROCNUM_LENGTH        16
380 #define RDBASE_PROCNUM_MASK       ((1ULL << RDBASE_PROCNUM_LENGTH) - 1)
381 
382 /* MAPD command fields */
383 #define ITTADDR_LENGTH               44
384 #define ITTADDR_SHIFT                 8
385 #define ITTADDR_MASK             MAKE_64BIT_MASK(ITTADDR_SHIFT, ITTADDR_LENGTH)
386 #define SIZE_MASK                 0x1f
387 
388 /* MAPI command fields */
389 #define EVENTID_MASK              ((1ULL << 32) - 1)
390 
391 /* MAPTI command fields */
392 #define pINTID_SHIFT                 32
393 #define pINTID_MASK               MAKE_64BIT_MASK(32, 32)
394 
395 #define DEVID_SHIFT                  32
396 #define DEVID_MASK                MAKE_64BIT_MASK(32, 32)
397 
398 #define VALID_SHIFT               63
399 #define CMD_FIELD_VALID_MASK      (1ULL << VALID_SHIFT)
400 #define L2_TABLE_VALID_MASK       CMD_FIELD_VALID_MASK
401 #define TABLE_ENTRY_VALID_MASK    (1ULL << 0)
402 
403 /* MOVALL command fields */
404 FIELD(MOVALL_2, RDBASE1, 16, 36)
405 FIELD(MOVALL_3, RDBASE2, 16, 36)
406 
407 /* MOVI command fields */
408 FIELD(MOVI_0, DEVICEID, 32, 32)
409 FIELD(MOVI_1, EVENTID, 0, 32)
410 FIELD(MOVI_2, ICID, 0, 16)
411 
412 /* INV command fields */
413 FIELD(INV_0, DEVICEID, 32, 32)
414 FIELD(INV_1, EVENTID, 0, 32)
415 
416 /* VMAPI, VMAPTI command fields */
417 FIELD(VMAPTI_0, DEVICEID, 32, 32)
418 FIELD(VMAPTI_1, EVENTID, 0, 32)
419 FIELD(VMAPTI_1, VPEID, 32, 16)
420 FIELD(VMAPTI_2, VINTID, 0, 32) /* VMAPTI only */
421 FIELD(VMAPTI_2, DOORBELL, 32, 32)
422 
423 /* VMAPP command fields */
424 FIELD(VMAPP_0, ALLOC, 8, 1) /* GICv4.1 only */
425 FIELD(VMAPP_0, PTZ, 9, 1) /* GICv4.1 only */
426 FIELD(VMAPP_0, VCONFADDR, 16, 36) /* GICv4.1 only */
427 FIELD(VMAPP_1, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */
428 FIELD(VMAPP_1, VPEID, 32, 16)
429 FIELD(VMAPP_2, RDBASE, 16, 36)
430 FIELD(VMAPP_2, V, 63, 1)
431 FIELD(VMAPP_3, VPTSIZE, 0, 8) /* For GICv4.0, bits [7:6] are RES0 */
432 FIELD(VMAPP_3, VPTADDR, 16, 36)
433 
434 /* VMOVP command fields */
435 FIELD(VMOVP_0, SEQNUM, 32, 16) /* not used for GITS_TYPER.VMOVP == 1 */
436 FIELD(VMOVP_1, ITSLIST, 0, 16) /* not used for GITS_TYPER.VMOVP == 1 */
437 FIELD(VMOVP_1, VPEID, 32, 16)
438 FIELD(VMOVP_2, RDBASE, 16, 36)
439 FIELD(VMOVP_2, DB, 63, 1) /* GICv4.1 only */
440 FIELD(VMOVP_3, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */
441 
442 /* VMOVI command fields */
443 FIELD(VMOVI_0, DEVICEID, 32, 32)
444 FIELD(VMOVI_1, EVENTID, 0, 32)
445 FIELD(VMOVI_1, VPEID, 32, 16)
446 FIELD(VMOVI_2, D, 0, 1)
447 FIELD(VMOVI_2, DOORBELL, 32, 32)
448 
449 /* VINVALL command fields */
450 FIELD(VINVALL_1, VPEID, 32, 16)
451 
452 /*
453  * 12 bytes Interrupt translation Table Entry size
454  * as per Table 5.3 in GICv3 spec
455  * ITE Lower 8 Bytes
456  *   Bits:    | 63 ... 48 | 47 ... 32 | 31 ... 26 | 25 ... 2 |   1     |  0    |
457  *   Values:  | vPEID     | ICID      | unused    |  IntNum  | IntType | Valid |
458  * ITE Higher 4 Bytes
459  *   Bits:    | 31 ... 25 | 24 ... 0 |
460  *   Values:  | unused    | Doorbell |
461  * (When Doorbell is unused, as it always is for INTYPE_PHYSICAL,
462  * the value of that field in memory cannot be relied upon -- older
463  * versions of QEMU did not correctly write to that memory.)
464  */
465 #define ITS_ITT_ENTRY_SIZE            0xC
466 
467 FIELD(ITE_L, VALID, 0, 1)
468 FIELD(ITE_L, INTTYPE, 1, 1)
469 FIELD(ITE_L, INTID, 2, 24)
470 FIELD(ITE_L, ICID, 32, 16)
471 FIELD(ITE_L, VPEID, 48, 16)
472 FIELD(ITE_H, DOORBELL, 0, 24)
473 
474 /* Possible values for ITE_L INTTYPE */
475 #define ITE_INTTYPE_VIRTUAL 0
476 #define ITE_INTTYPE_PHYSICAL 1
477 
478 /* 16 bits EventId */
479 #define ITS_IDBITS                   GICD_TYPER_IDBITS
480 
481 /* 16 bits DeviceId */
482 #define ITS_DEVBITS                   0xF
483 
484 /* 16 bits CollectionId */
485 #define ITS_CIDBITS                  0xF
486 
487 /*
488  * 8 bytes Device Table Entry size
489  * Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits
490  */
491 #define GITS_DTE_SIZE                 (0x8ULL)
492 
493 FIELD(DTE, VALID, 0, 1)
494 FIELD(DTE, SIZE, 1, 5)
495 FIELD(DTE, ITTADDR, 6, 44)
496 
497 /*
498  * 8 bytes Collection Table Entry size
499  * Valid = 1 bit, RDBase = 16 bits
500  */
501 #define GITS_CTE_SIZE                 (0x8ULL)
502 FIELD(CTE, VALID, 0, 1)
503 FIELD(CTE, RDBASE, 1, RDBASE_PROCNUM_LENGTH)
504 
505 /*
506  * 8 bytes VPE table entry size:
507  * Valid = 1 bit, VPTsize = 5 bits, VPTaddr = 36 bits, RDbase = 16 bits
508  *
509  * Field sizes for Valid and size are mandated; field sizes for RDbase
510  * and VPT_addr are IMPDEF.
511  */
512 #define GITS_VPE_SIZE 0x8ULL
513 
514 FIELD(VTE, VALID, 0, 1)
515 FIELD(VTE, VPTSIZE, 1, 5)
516 FIELD(VTE, VPTADDR, 6, 36)
517 FIELD(VTE, RDBASE, 42, RDBASE_PROCNUM_LENGTH)
518 
519 /* Special interrupt IDs */
520 #define INTID_SECURE 1020
521 #define INTID_NONSECURE 1021
522 #define INTID_NMI 1022
523 #define INTID_SPURIOUS 1023
524 
525 /* Functions internal to the emulated GICv3 */
526 
527 /**
528  * gicv3_redist_size:
529  * @s: GICv3State
530  *
531  * Return the size of the redistributor register frame in bytes
532  * (which depends on what GIC version this is)
533  */
gicv3_redist_size(GICv3State * s)534 static inline int gicv3_redist_size(GICv3State *s)
535 {
536     /*
537      * Redistributor size is controlled by the redistributor GICR_TYPER.VLPIS.
538      * It's the same for every redistributor in the GIC, so arbitrarily
539      * use the register field in the first one.
540      */
541     if (s->cpu[0].gicr_typer & GICR_TYPER_VLPIS) {
542         return GICV4_REDIST_SIZE;
543     } else {
544         return GICV3_REDIST_SIZE;
545     }
546 }
547 
548 /**
549  * gicv3_intid_is_special:
550  * @intid: interrupt ID
551  *
552  * Return true if @intid is a special interrupt ID (1020 to
553  * 1023 inclusive). This corresponds to the GIC spec pseudocode
554  * IsSpecial() function.
555  */
gicv3_intid_is_special(int intid)556 static inline bool gicv3_intid_is_special(int intid)
557 {
558     return intid >= INTID_SECURE && intid <= INTID_SPURIOUS;
559 }
560 
561 /**
562  * gicv3_redist_update:
563  * @cs: GICv3CPUState for this redistributor
564  *
565  * Recalculate the highest priority pending interrupt after a
566  * change to redistributor state, and inform the CPU accordingly.
567  */
568 void gicv3_redist_update(GICv3CPUState *cs);
569 
570 /**
571  * gicv3_update:
572  * @s: GICv3State
573  * @start: first interrupt whose state changed
574  * @len: length of the range of interrupts whose state changed
575  *
576  * Recalculate the highest priority pending interrupts after a
577  * change to the distributor state affecting @len interrupts
578  * starting at @start, and inform the CPUs accordingly.
579  */
580 void gicv3_update(GICv3State *s, int start, int len);
581 
582 /**
583  * gicv3_full_update_noirqset:
584  * @s: GICv3State
585  *
586  * Recalculate the cached information about highest priority
587  * pending interrupts, but don't inform the CPUs. This should be
588  * called after an incoming migration has loaded new state.
589  */
590 void gicv3_full_update_noirqset(GICv3State *s);
591 
592 /**
593  * gicv3_full_update:
594  * @s: GICv3State
595  *
596  * Recalculate the highest priority pending interrupts after
597  * a change that could affect the status of all interrupts,
598  * and inform the CPUs accordingly.
599  */
600 void gicv3_full_update(GICv3State *s);
601 MemTxResult gicv3_dist_read(void *opaque, hwaddr offset, uint64_t *data,
602                             unsigned size, MemTxAttrs attrs);
603 MemTxResult gicv3_dist_write(void *opaque, hwaddr addr, uint64_t data,
604                              unsigned size, MemTxAttrs attrs);
605 MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
606                               unsigned size, MemTxAttrs attrs);
607 MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
608                                unsigned size, MemTxAttrs attrs);
609 void gicv3_dist_set_irq(GICv3State *s, int irq, int level);
610 void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level);
611 void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level);
612 /**
613  * gicv3_redist_process_vlpi:
614  * @cs: GICv3CPUState
615  * @irq: (virtual) interrupt number
616  * @vptaddr: (guest) address of VLPI table
617  * @doorbell: doorbell (physical) interrupt number (1023 for "no doorbell")
618  * @level: level to set @irq to
619  *
620  * Process a virtual LPI being directly injected by the ITS. This function
621  * will update the VLPI table specified by @vptaddr and @vptsize. If the
622  * vCPU corresponding to that VLPI table is currently running on
623  * the CPU associated with this redistributor, directly inject the VLPI
624  * @irq. If the vCPU is not running on this CPU, raise the doorbell
625  * interrupt instead.
626  */
627 void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr,
628                                int doorbell, int level);
629 /**
630  * gicv3_redist_vlpi_pending:
631  * @cs: GICv3CPUState
632  * @irq: (virtual) interrupt number
633  * @level: level to set @irq to
634  *
635  * Set/clear the pending status of a virtual LPI in the vLPI table
636  * that this redistributor is currently using. (The difference between
637  * this and gicv3_redist_process_vlpi() is that this is called from
638  * the cpuif and does not need to do the not-running-on-this-vcpu checks.)
639  */
640 void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level);
641 
642 void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level);
643 /**
644  * gicv3_redist_update_lpi:
645  * @cs: GICv3CPUState
646  *
647  * Scan the LPI pending table and recalculate the highest priority
648  * pending LPI and also the overall highest priority pending interrupt.
649  */
650 void gicv3_redist_update_lpi(GICv3CPUState *cs);
651 /**
652  * gicv3_redist_update_lpi_only:
653  * @cs: GICv3CPUState
654  *
655  * Scan the LPI pending table and recalculate cs->hpplpi only,
656  * without calling gicv3_redist_update() to recalculate the overall
657  * highest priority pending interrupt. This should be called after
658  * an incoming migration has loaded new state.
659  */
660 void gicv3_redist_update_lpi_only(GICv3CPUState *cs);
661 /**
662  * gicv3_redist_inv_lpi:
663  * @cs: GICv3CPUState
664  * @irq: LPI to invalidate cached information for
665  *
666  * Forget or update any cached information associated with this LPI.
667  */
668 void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq);
669 /**
670  * gicv3_redist_inv_vlpi:
671  * @cs: GICv3CPUState
672  * @irq: vLPI to invalidate cached information for
673  * @vptaddr: (guest) address of vLPI table
674  *
675  * Forget or update any cached information associated with this vLPI.
676  */
677 void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr);
678 /**
679  * gicv3_redist_mov_lpi:
680  * @src: source redistributor
681  * @dest: destination redistributor
682  * @irq: LPI to update
683  *
684  * Move the pending state of the specified LPI from @src to @dest,
685  * as required by the ITS MOVI command.
686  */
687 void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq);
688 /**
689  * gicv3_redist_movall_lpis:
690  * @src: source redistributor
691  * @dest: destination redistributor
692  *
693  * Scan the LPI pending table for @src, and for each pending LPI there
694  * mark it as not-pending for @src and pending for @dest, as required
695  * by the ITS MOVALL command.
696  */
697 void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest);
698 /**
699  * gicv3_redist_mov_vlpi:
700  * @src: source redistributor
701  * @src_vptaddr: (guest) address of source VLPI table
702  * @dest: destination redistributor
703  * @dest_vptaddr: (guest) address of destination VLPI table
704  * @irq: VLPI to update
705  * @doorbell: doorbell for destination (1023 for "no doorbell")
706  *
707  * Move the pending state of the specified VLPI from @src to @dest,
708  * as required by the ITS VMOVI command.
709  */
710 void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr,
711                            GICv3CPUState *dest, uint64_t dest_vptaddr,
712                            int irq, int doorbell);
713 /**
714  * gicv3_redist_vinvall:
715  * @cs: GICv3CPUState
716  * @vptaddr: address of VLPI pending table
717  *
718  * On redistributor @cs, invalidate all cached information associated
719  * with the vCPU defined by @vptaddr.
720  */
721 void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr);
722 
723 void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns);
724 void gicv3_init_cpuif(GICv3State *s);
725 
726 /**
727  * gicv3_cpuif_update:
728  * @cs: GICv3CPUState for the CPU to update
729  *
730  * Recalculate whether to assert the IRQ or FIQ lines after a change
731  * to the current highest priority pending interrupt, the CPU's
732  * current running priority or the CPU's current exception level or
733  * security state.
734  */
735 void gicv3_cpuif_update(GICv3CPUState *cs);
736 
737 /*
738  * gicv3_cpuif_virt_irq_fiq_update:
739  * @cs: GICv3CPUState for the CPU to update
740  *
741  * Recalculate whether to assert the virtual IRQ or FIQ lines after
742  * a change to the current highest priority pending virtual interrupt.
743  * Note that this does not recalculate and change the maintenance
744  * interrupt status (for that, see gicv3_cpuif_virt_update()).
745  */
746 void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs);
747 
gicv3_iidr(void)748 static inline uint32_t gicv3_iidr(void)
749 {
750     /* Return the Implementer Identification Register value
751      * for the emulated GICv3, as reported in GICD_IIDR and GICR_IIDR.
752      *
753      * We claim to be an ARM r0p0 with a zero ProductID.
754      * This is the same as an r0p0 GIC-500.
755      */
756     return 0x43b;
757 }
758 
759 /* CoreSight PIDR0 values for ARM GICv3 implementations */
760 #define GICV3_PIDR0_DIST 0x92
761 #define GICV3_PIDR0_REDIST 0x93
762 #define GICV3_PIDR0_ITS 0x94
763 
gicv3_idreg(GICv3State * s,int regoffset,uint8_t pidr0)764 static inline uint32_t gicv3_idreg(GICv3State *s, int regoffset, uint8_t pidr0)
765 {
766     /* Return the value of the CoreSight ID register at the specified
767      * offset from the first ID register (as found in the distributor
768      * and redistributor register banks).
769      * These values indicate an ARM implementation of a GICv3 or v4.
770      */
771     static const uint8_t gicd_ids[] = {
772         0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x0B, 0x00, 0x0D, 0xF0, 0x05, 0xB1
773     };
774     uint32_t id;
775 
776     regoffset /= 4;
777 
778     if (regoffset == 4) {
779         return pidr0;
780     }
781     id = gicd_ids[regoffset];
782     if (regoffset == 6) {
783         /* PIDR2 bits [7:4] are the GIC architecture revision */
784         id |= s->revision << 4;
785     }
786     return id;
787 }
788 
789 /**
790  * gicv3_irq_group:
791  *
792  * Return the group which this interrupt is configured as (GICV3_G0,
793  * GICV3_G1 or GICV3_G1NS).
794  */
gicv3_irq_group(GICv3State * s,GICv3CPUState * cs,int irq)795 static inline int gicv3_irq_group(GICv3State *s, GICv3CPUState *cs, int irq)
796 {
797     bool grpbit, grpmodbit;
798 
799     if (irq < GIC_INTERNAL) {
800         grpbit = extract32(cs->gicr_igroupr0, irq, 1);
801         grpmodbit = extract32(cs->gicr_igrpmodr0, irq, 1);
802     } else {
803         grpbit = gicv3_gicd_group_test(s, irq);
804         grpmodbit = gicv3_gicd_grpmod_test(s, irq);
805     }
806     if (grpbit) {
807         return GICV3_G1NS;
808     }
809     if (s->gicd_ctlr & GICD_CTLR_DS) {
810         return GICV3_G0;
811     }
812     return grpmodbit ? GICV3_G1 : GICV3_G0;
813 }
814 
815 /**
816  * gicv3_redist_affid:
817  *
818  * Return the 32-bit affinity ID of the CPU connected to this redistributor
819  */
gicv3_redist_affid(GICv3CPUState * cs)820 static inline uint32_t gicv3_redist_affid(GICv3CPUState *cs)
821 {
822     return cs->gicr_typer >> 32;
823 }
824 
825 /**
826  * gicv3_cache_target_cpustate:
827  *
828  * Update the cached CPU state corresponding to the target for this interrupt
829  * (which is kept in s->gicd_irouter_target[]).
830  */
gicv3_cache_target_cpustate(GICv3State * s,int irq)831 static inline void gicv3_cache_target_cpustate(GICv3State *s, int irq)
832 {
833     GICv3CPUState *cs = NULL;
834     int i;
835     uint32_t tgtaff = extract64(s->gicd_irouter[irq], 0, 24) |
836         extract64(s->gicd_irouter[irq], 32, 8) << 24;
837 
838     for (i = 0; i < s->num_cpu; i++) {
839         if (s->cpu[i].gicr_typer >> 32 == tgtaff) {
840             cs = &s->cpu[i];
841             break;
842         }
843     }
844 
845     s->gicd_irouter_target[irq] = cs;
846 }
847 
848 /**
849  * gicv3_cache_all_target_cpustates:
850  *
851  * Populate the entire cache of CPU state pointers for interrupt targets
852  * (eg after inbound migration or CPU reset)
853  */
gicv3_cache_all_target_cpustates(GICv3State * s)854 static inline void gicv3_cache_all_target_cpustates(GICv3State *s)
855 {
856     int irq;
857 
858     for (irq = GIC_INTERNAL; irq < GICV3_MAXIRQ; irq++) {
859         gicv3_cache_target_cpustate(s, irq);
860     }
861 }
862 
863 void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s);
864 
865 #endif /* QEMU_ARM_GICV3_INTERNAL_H */
866