xref: /openbmc/qemu/hw/intc/gicv3_internal.h (revision 88b1716a407459c8189473e4667653cb8e4c3df7)
1 /*
2  * ARM GICv3 support - internal interfaces
3  *
4  * Copyright (c) 2012 Linaro Limited
5  * Copyright (c) 2015 Huawei.
6  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7  * Written by Peter Maydell
8  * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License along
21  * with this program; if not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #ifndef QEMU_ARM_GICV3_INTERNAL_H
25 #define QEMU_ARM_GICV3_INTERNAL_H
26 
27 #include "hw/registerfields.h"
28 #include "hw/intc/arm_gicv3_common.h"
29 
30 /* Distributor registers, as offsets from the distributor base address */
31 #define GICD_CTLR            0x0000
32 #define GICD_TYPER           0x0004
33 #define GICD_IIDR            0x0008
34 #define GICD_TYPER2          0x000C
35 #define GICD_STATUSR         0x0010
36 #define GICD_SETSPI_NSR      0x0040
37 #define GICD_CLRSPI_NSR      0x0048
38 #define GICD_SETSPI_SR       0x0050
39 #define GICD_CLRSPI_SR       0x0058
40 #define GICD_SEIR            0x0068
41 #define GICD_IGROUPR         0x0080
42 #define GICD_ISENABLER       0x0100
43 #define GICD_ICENABLER       0x0180
44 #define GICD_ISPENDR         0x0200
45 #define GICD_ICPENDR         0x0280
46 #define GICD_ISACTIVER       0x0300
47 #define GICD_ICACTIVER       0x0380
48 #define GICD_IPRIORITYR      0x0400
49 #define GICD_ITARGETSR       0x0800
50 #define GICD_ICFGR           0x0C00
51 #define GICD_IGRPMODR        0x0D00
52 #define GICD_NSACR           0x0E00
53 #define GICD_SGIR            0x0F00
54 #define GICD_CPENDSGIR       0x0F10
55 #define GICD_SPENDSGIR       0x0F20
56 #define GICD_INMIR           0x0F80
57 #define GICD_INMIRnE         0x3B00
58 #define GICD_IROUTER         0x6000
59 #define GICD_IDREGS          0xFFD0
60 
61 /* GICD_CTLR fields  */
62 #define GICD_CTLR_EN_GRP0           (1U << 0)
63 #define GICD_CTLR_EN_GRP1NS         (1U << 1) /* GICv3 5.3.20 */
64 #define GICD_CTLR_EN_GRP1S          (1U << 2)
65 #define GICD_CTLR_EN_GRP1_ALL       (GICD_CTLR_EN_GRP1NS | GICD_CTLR_EN_GRP1S)
66 /* Bit 4 is ARE if the system doesn't support TrustZone, ARE_S otherwise */
67 #define GICD_CTLR_ARE               (1U << 4)
68 #define GICD_CTLR_ARE_S             (1U << 4)
69 #define GICD_CTLR_ARE_NS            (1U << 5)
70 #define GICD_CTLR_DS                (1U << 6)
71 #define GICD_CTLR_E1NWF             (1U << 7)
72 #define GICD_CTLR_RWP               (1U << 31)
73 
74 #define GICD_TYPER_NMI_SHIFT           9
75 #define GICD_TYPER_LPIS_SHIFT          17
76 
77 /* 16 bits EventId */
78 #define GICD_TYPER_IDBITS            0xf
79 
80 /*
81  * Redistributor frame offsets from RD_base
82  */
83 #define GICR_SGI_OFFSET 0x10000
84 #define GICR_VLPI_OFFSET 0x20000
85 
86 /*
87  * Redistributor registers, offsets from RD_base
88  */
89 #define GICR_CTLR             0x0000
90 #define GICR_IIDR             0x0004
91 #define GICR_TYPER            0x0008
92 #define GICR_STATUSR          0x0010
93 #define GICR_WAKER            0x0014
94 #define GICR_SETLPIR          0x0040
95 #define GICR_CLRLPIR          0x0048
96 #define GICR_PROPBASER        0x0070
97 #define GICR_PENDBASER        0x0078
98 #define GICR_INVLPIR          0x00A0
99 #define GICR_INVALLR          0x00B0
100 #define GICR_SYNCR            0x00C0
101 #define GICR_IDREGS           0xFFD0
102 
103 /* SGI and PPI Redistributor registers, offsets from RD_base */
104 #define GICR_IGROUPR0         (GICR_SGI_OFFSET + 0x0080)
105 #define GICR_ISENABLER0       (GICR_SGI_OFFSET + 0x0100)
106 #define GICR_ICENABLER0       (GICR_SGI_OFFSET + 0x0180)
107 #define GICR_ISPENDR0         (GICR_SGI_OFFSET + 0x0200)
108 #define GICR_ICPENDR0         (GICR_SGI_OFFSET + 0x0280)
109 #define GICR_ISACTIVER0       (GICR_SGI_OFFSET + 0x0300)
110 #define GICR_ICACTIVER0       (GICR_SGI_OFFSET + 0x0380)
111 #define GICR_IPRIORITYR       (GICR_SGI_OFFSET + 0x0400)
112 #define GICR_ICFGR0           (GICR_SGI_OFFSET + 0x0C00)
113 #define GICR_ICFGR1           (GICR_SGI_OFFSET + 0x0C04)
114 #define GICR_IGRPMODR0        (GICR_SGI_OFFSET + 0x0D00)
115 #define GICR_NSACR            (GICR_SGI_OFFSET + 0x0E00)
116 #define GICR_INMIR0           (GICR_SGI_OFFSET + 0x0F80)
117 
118 /* VLPI redistributor registers, offsets from VLPI_base */
119 #define GICR_VPROPBASER       (GICR_VLPI_OFFSET + 0x70)
120 #define GICR_VPENDBASER       (GICR_VLPI_OFFSET + 0x78)
121 
122 #define GICR_CTLR_ENABLE_LPIS        (1U << 0)
123 #define GICR_CTLR_CES                (1U << 1)
124 #define GICR_CTLR_RWP                (1U << 3)
125 #define GICR_CTLR_DPG0               (1U << 24)
126 #define GICR_CTLR_DPG1NS             (1U << 25)
127 #define GICR_CTLR_DPG1S              (1U << 26)
128 #define GICR_CTLR_UWP                (1U << 31)
129 
130 #define GICR_TYPER_PLPIS             (1U << 0)
131 #define GICR_TYPER_VLPIS             (1U << 1)
132 #define GICR_TYPER_DIRECTLPI         (1U << 3)
133 #define GICR_TYPER_LAST              (1U << 4)
134 #define GICR_TYPER_DPGS              (1U << 5)
135 #define GICR_TYPER_PROCNUM           (0xFFFFU << 8)
136 #define GICR_TYPER_COMMONLPIAFF      (0x3 << 24)
137 #define GICR_TYPER_AFFINITYVALUE     (0xFFFFFFFFULL << 32)
138 
139 #define GICR_WAKER_ProcessorSleep    (1U << 1)
140 #define GICR_WAKER_ChildrenAsleep    (1U << 2)
141 
142 FIELD(GICR_PROPBASER, IDBITS, 0, 5)
143 FIELD(GICR_PROPBASER, INNERCACHE, 7, 3)
144 FIELD(GICR_PROPBASER, SHAREABILITY, 10, 2)
145 FIELD(GICR_PROPBASER, PHYADDR, 12, 40)
146 FIELD(GICR_PROPBASER, OUTERCACHE, 56, 3)
147 
148 FIELD(GICR_PENDBASER, INNERCACHE, 7, 3)
149 FIELD(GICR_PENDBASER, SHAREABILITY, 10, 2)
150 FIELD(GICR_PENDBASER, PHYADDR, 16, 36)
151 FIELD(GICR_PENDBASER, OUTERCACHE, 56, 3)
152 FIELD(GICR_PENDBASER, PTZ, 62, 1)
153 
154 #define GICR_PROPBASER_IDBITS_THRESHOLD          0xd
155 
156 /* These are the GICv4 VPROPBASER and VPENDBASER layouts; v4.1 is different */
157 FIELD(GICR_VPROPBASER, IDBITS, 0, 5)
158 FIELD(GICR_VPROPBASER, INNERCACHE, 7, 3)
159 FIELD(GICR_VPROPBASER, SHAREABILITY, 10, 2)
160 FIELD(GICR_VPROPBASER, PHYADDR, 12, 40)
161 FIELD(GICR_VPROPBASER, OUTERCACHE, 56, 3)
162 
163 FIELD(GICR_VPENDBASER, INNERCACHE, 7, 3)
164 FIELD(GICR_VPENDBASER, SHAREABILITY, 10, 2)
165 FIELD(GICR_VPENDBASER, PHYADDR, 16, 36)
166 FIELD(GICR_VPENDBASER, OUTERCACHE, 56, 3)
167 FIELD(GICR_VPENDBASER, DIRTY, 60, 1)
168 FIELD(GICR_VPENDBASER, PENDINGLAST, 61, 1)
169 FIELD(GICR_VPENDBASER, IDAI, 62, 1)
170 FIELD(GICR_VPENDBASER, VALID, 63, 1)
171 
172 #define ICC_CTLR_EL1_CBPR           (1U << 0)
173 #define ICC_CTLR_EL1_EOIMODE        (1U << 1)
174 #define ICC_CTLR_EL1_PMHE           (1U << 6)
175 #define ICC_CTLR_EL1_PRIBITS_SHIFT 8
176 #define ICC_CTLR_EL1_PRIBITS_MASK   (7U << ICC_CTLR_EL1_PRIBITS_SHIFT)
177 #define ICC_CTLR_EL1_IDBITS_SHIFT 11
178 #define ICC_CTLR_EL1_SEIS           (1U << 14)
179 #define ICC_CTLR_EL1_A3V            (1U << 15)
180 
181 #define ICC_PMR_PRIORITY_MASK    0xff
182 #define ICC_BPR_BINARYPOINT_MASK 0x07
183 #define ICC_IGRPEN_ENABLE        0x01
184 
185 #define ICC_CTLR_EL3_CBPR_EL1S (1U << 0)
186 #define ICC_CTLR_EL3_CBPR_EL1NS (1U << 1)
187 #define ICC_CTLR_EL3_EOIMODE_EL3 (1U << 2)
188 #define ICC_CTLR_EL3_EOIMODE_EL1S (1U << 3)
189 #define ICC_CTLR_EL3_EOIMODE_EL1NS (1U << 4)
190 #define ICC_CTLR_EL3_RM (1U << 5)
191 #define ICC_CTLR_EL3_PMHE (1U << 6)
192 #define ICC_CTLR_EL3_PRIBITS_SHIFT 8
193 #define ICC_CTLR_EL3_IDBITS_SHIFT 11
194 #define ICC_CTLR_EL3_SEIS (1U << 14)
195 #define ICC_CTLR_EL3_A3V (1U << 15)
196 #define ICC_CTLR_EL3_NDS (1U << 17)
197 
198 #define ICC_AP1R_EL1_NMI (1ULL << 63)
199 #define ICC_RPR_EL1_NSNMI (1ULL << 62)
200 #define ICC_RPR_EL1_NMI (1ULL << 63)
201 
202 #define ICH_VMCR_EL2_VENG0_SHIFT 0
203 #define ICH_VMCR_EL2_VENG0 (1U << ICH_VMCR_EL2_VENG0_SHIFT)
204 #define ICH_VMCR_EL2_VENG1_SHIFT 1
205 #define ICH_VMCR_EL2_VENG1 (1U << ICH_VMCR_EL2_VENG1_SHIFT)
206 #define ICH_VMCR_EL2_VACKCTL (1U << 2)
207 #define ICH_VMCR_EL2_VFIQEN (1U << 3)
208 #define ICH_VMCR_EL2_VCBPR_SHIFT 4
209 #define ICH_VMCR_EL2_VCBPR (1U << ICH_VMCR_EL2_VCBPR_SHIFT)
210 #define ICH_VMCR_EL2_VEOIM_SHIFT 9
211 #define ICH_VMCR_EL2_VEOIM (1U << ICH_VMCR_EL2_VEOIM_SHIFT)
212 #define ICH_VMCR_EL2_VBPR1_SHIFT 18
213 #define ICH_VMCR_EL2_VBPR1_LENGTH 3
214 #define ICH_VMCR_EL2_VBPR1_MASK (0x7U << ICH_VMCR_EL2_VBPR1_SHIFT)
215 #define ICH_VMCR_EL2_VBPR0_SHIFT 21
216 #define ICH_VMCR_EL2_VBPR0_LENGTH 3
217 #define ICH_VMCR_EL2_VBPR0_MASK (0x7U << ICH_VMCR_EL2_VBPR0_SHIFT)
218 #define ICH_VMCR_EL2_VPMR_SHIFT 24
219 #define ICH_VMCR_EL2_VPMR_LENGTH 8
220 #define ICH_VMCR_EL2_VPMR_MASK (0xffU << ICH_VMCR_EL2_VPMR_SHIFT)
221 
222 #define ICH_HCR_EL2_EN (1U << 0)
223 #define ICH_HCR_EL2_UIE (1U << 1)
224 #define ICH_HCR_EL2_LRENPIE (1U << 2)
225 #define ICH_HCR_EL2_NPIE (1U << 3)
226 #define ICH_HCR_EL2_VGRP0EIE (1U << 4)
227 #define ICH_HCR_EL2_VGRP0DIE (1U << 5)
228 #define ICH_HCR_EL2_VGRP1EIE (1U << 6)
229 #define ICH_HCR_EL2_VGRP1DIE (1U << 7)
230 #define ICH_HCR_EL2_TC (1U << 10)
231 #define ICH_HCR_EL2_TALL0 (1U << 11)
232 #define ICH_HCR_EL2_TALL1 (1U << 12)
233 #define ICH_HCR_EL2_TSEI (1U << 13)
234 #define ICH_HCR_EL2_TDIR (1U << 14)
235 #define ICH_HCR_EL2_EOICOUNT_SHIFT 27
236 #define ICH_HCR_EL2_EOICOUNT_LENGTH 5
237 #define ICH_HCR_EL2_EOICOUNT_MASK (0x1fU << ICH_HCR_EL2_EOICOUNT_SHIFT)
238 
239 #define ICH_LR_EL2_VINTID_SHIFT 0
240 #define ICH_LR_EL2_VINTID_LENGTH 32
241 #define ICH_LR_EL2_VINTID_MASK (0xffffffffULL << ICH_LR_EL2_VINTID_SHIFT)
242 #define ICH_LR_EL2_PINTID_SHIFT 32
243 #define ICH_LR_EL2_PINTID_LENGTH 10
244 #define ICH_LR_EL2_PINTID_MASK (0x3ffULL << ICH_LR_EL2_PINTID_SHIFT)
245 /* Note that EOI shares with the top bit of the pINTID field */
246 #define ICH_LR_EL2_EOI (1ULL << 41)
247 #define ICH_LR_EL2_PRIORITY_SHIFT 48
248 #define ICH_LR_EL2_PRIORITY_LENGTH 8
249 #define ICH_LR_EL2_PRIORITY_MASK (0xffULL << ICH_LR_EL2_PRIORITY_SHIFT)
250 #define ICH_LR_EL2_NMI (1ULL << 59)
251 #define ICH_LR_EL2_GROUP (1ULL << 60)
252 #define ICH_LR_EL2_HW (1ULL << 61)
253 #define ICH_LR_EL2_STATE_SHIFT 62
254 #define ICH_LR_EL2_STATE_LENGTH 2
255 #define ICH_LR_EL2_STATE_MASK (3ULL << ICH_LR_EL2_STATE_SHIFT)
256 /* values for the state field: */
257 #define ICH_LR_EL2_STATE_INVALID 0
258 #define ICH_LR_EL2_STATE_PENDING 1
259 #define ICH_LR_EL2_STATE_ACTIVE 2
260 #define ICH_LR_EL2_STATE_ACTIVE_PENDING 3
261 #define ICH_LR_EL2_STATE_PENDING_BIT (1ULL << ICH_LR_EL2_STATE_SHIFT)
262 #define ICH_LR_EL2_STATE_ACTIVE_BIT (2ULL << ICH_LR_EL2_STATE_SHIFT)
263 
264 #define ICH_MISR_EL2_EOI (1U << 0)
265 #define ICH_MISR_EL2_U (1U << 1)
266 #define ICH_MISR_EL2_LRENP (1U << 2)
267 #define ICH_MISR_EL2_NP (1U << 3)
268 #define ICH_MISR_EL2_VGRP0E (1U << 4)
269 #define ICH_MISR_EL2_VGRP0D (1U << 5)
270 #define ICH_MISR_EL2_VGRP1E (1U << 6)
271 #define ICH_MISR_EL2_VGRP1D (1U << 7)
272 
273 #define ICH_VTR_EL2_LISTREGS_SHIFT 0
274 #define ICH_VTR_EL2_TDS (1U << 19)
275 #define ICH_VTR_EL2_NV4 (1U << 20)
276 #define ICH_VTR_EL2_A3V (1U << 21)
277 #define ICH_VTR_EL2_SEIS (1U << 22)
278 #define ICH_VTR_EL2_IDBITS_SHIFT 23
279 #define ICH_VTR_EL2_PREBITS_SHIFT 26
280 #define ICH_VTR_EL2_PRIBITS_SHIFT 29
281 
282 #define ICV_AP1R_EL1_NMI (1ULL << 63)
283 #define ICV_RPR_EL1_NMI (1ULL << 63)
284 
285 /* ITS Registers */
286 
287 FIELD(GITS_BASER, SIZE, 0, 8)
288 FIELD(GITS_BASER, PAGESIZE, 8, 2)
289 FIELD(GITS_BASER, SHAREABILITY, 10, 2)
290 FIELD(GITS_BASER, PHYADDR, 12, 36)
291 FIELD(GITS_BASER, PHYADDRL_64K, 16, 32)
292 FIELD(GITS_BASER, PHYADDRH_64K, 12, 4)
293 FIELD(GITS_BASER, ENTRYSIZE, 48, 5)
294 FIELD(GITS_BASER, OUTERCACHE, 53, 3)
295 FIELD(GITS_BASER, TYPE, 56, 3)
296 FIELD(GITS_BASER, INNERCACHE, 59, 3)
297 FIELD(GITS_BASER, INDIRECT, 62, 1)
298 FIELD(GITS_BASER, VALID, 63, 1)
299 
300 FIELD(GITS_CBASER, SIZE, 0, 8)
301 FIELD(GITS_CBASER, SHAREABILITY, 10, 2)
302 FIELD(GITS_CBASER, PHYADDR, 12, 40)
303 FIELD(GITS_CBASER, OUTERCACHE, 53, 3)
304 FIELD(GITS_CBASER, INNERCACHE, 59, 3)
305 FIELD(GITS_CBASER, VALID, 63, 1)
306 
307 FIELD(GITS_CREADR, STALLED, 0, 1)
308 FIELD(GITS_CREADR, OFFSET, 5, 15)
309 
310 FIELD(GITS_CWRITER, RETRY, 0, 1)
311 FIELD(GITS_CWRITER, OFFSET, 5, 15)
312 
313 FIELD(GITS_CTLR, ENABLED, 0, 1)
314 FIELD(GITS_CTLR, QUIESCENT, 31, 1)
315 
316 FIELD(GITS_TYPER, PHYSICAL, 0, 1)
317 FIELD(GITS_TYPER, VIRTUAL, 1, 1)
318 FIELD(GITS_TYPER, ITT_ENTRY_SIZE, 4, 4)
319 FIELD(GITS_TYPER, IDBITS, 8, 5)
320 FIELD(GITS_TYPER, DEVBITS, 13, 5)
321 FIELD(GITS_TYPER, SEIS, 18, 1)
322 FIELD(GITS_TYPER, PTA, 19, 1)
323 FIELD(GITS_TYPER, CIDBITS, 32, 4)
324 FIELD(GITS_TYPER, CIL, 36, 1)
325 FIELD(GITS_TYPER, VMOVP, 37, 1)
326 
327 #define GITS_IDREGS           0xFFD0
328 
329 #define GITS_BASER_RO_MASK                  (R_GITS_BASER_ENTRYSIZE_MASK | \
330                                               R_GITS_BASER_TYPE_MASK)
331 
332 #define GITS_BASER_PAGESIZE_4K                0
333 #define GITS_BASER_PAGESIZE_16K               1
334 #define GITS_BASER_PAGESIZE_64K               2
335 
336 #define GITS_BASER_TYPE_DEVICE               1ULL
337 #define GITS_BASER_TYPE_VPE                  2ULL
338 #define GITS_BASER_TYPE_COLLECTION           4ULL
339 
340 #define GITS_PAGE_SIZE_4K       0x1000
341 #define GITS_PAGE_SIZE_16K      0x4000
342 #define GITS_PAGE_SIZE_64K      0x10000
343 
344 #define L1TABLE_ENTRY_SIZE         8
345 
346 #define LPI_CTE_ENABLED          TABLE_ENTRY_VALID_MASK
347 #define LPI_PRIORITY_MASK         0xfc
348 
349 #define GITS_CMDQ_ENTRY_WORDS 4
350 #define GITS_CMDQ_ENTRY_SIZE  (GITS_CMDQ_ENTRY_WORDS * sizeof(uint64_t))
351 
352 #define CMD_MASK                  0xff
353 
354 /* ITS Commands */
355 #define GITS_CMD_MOVI             0x01
356 #define GITS_CMD_INT              0x03
357 #define GITS_CMD_CLEAR            0x04
358 #define GITS_CMD_SYNC             0x05
359 #define GITS_CMD_MAPD             0x08
360 #define GITS_CMD_MAPC             0x09
361 #define GITS_CMD_MAPTI            0x0A
362 #define GITS_CMD_MAPI             0x0B
363 #define GITS_CMD_INV              0x0C
364 #define GITS_CMD_INVALL           0x0D
365 #define GITS_CMD_MOVALL           0x0E
366 #define GITS_CMD_DISCARD          0x0F
367 #define GITS_CMD_VMOVI            0x21
368 #define GITS_CMD_VMOVP            0x22
369 #define GITS_CMD_VSYNC            0x25
370 #define GITS_CMD_VMAPP            0x29
371 #define GITS_CMD_VMAPTI           0x2A
372 #define GITS_CMD_VMAPI            0x2B
373 #define GITS_CMD_VINVALL          0x2D
374 
375 /* MAPC command fields */
376 #define ICID_LENGTH                  16
377 #define ICID_MASK                 ((1U << ICID_LENGTH) - 1)
378 FIELD(MAPC, RDBASE, 16, 32)
379 
380 #define RDBASE_PROCNUM_LENGTH        16
381 #define RDBASE_PROCNUM_MASK       ((1ULL << RDBASE_PROCNUM_LENGTH) - 1)
382 
383 /* MAPD command fields */
384 #define ITTADDR_LENGTH               44
385 #define ITTADDR_SHIFT                 8
386 #define ITTADDR_MASK             MAKE_64BIT_MASK(ITTADDR_SHIFT, ITTADDR_LENGTH)
387 #define SIZE_MASK                 0x1f
388 
389 /* MAPI command fields */
390 #define EVENTID_MASK              ((1ULL << 32) - 1)
391 
392 /* MAPTI command fields */
393 #define pINTID_SHIFT                 32
394 #define pINTID_MASK               MAKE_64BIT_MASK(32, 32)
395 
396 #define DEVID_SHIFT                  32
397 #define DEVID_MASK                MAKE_64BIT_MASK(32, 32)
398 
399 #define VALID_SHIFT               63
400 #define CMD_FIELD_VALID_MASK      (1ULL << VALID_SHIFT)
401 #define L2_TABLE_VALID_MASK       CMD_FIELD_VALID_MASK
402 #define TABLE_ENTRY_VALID_MASK    (1ULL << 0)
403 
404 /* MOVALL command fields */
405 FIELD(MOVALL_2, RDBASE1, 16, 36)
406 FIELD(MOVALL_3, RDBASE2, 16, 36)
407 
408 /* MOVI command fields */
409 FIELD(MOVI_0, DEVICEID, 32, 32)
410 FIELD(MOVI_1, EVENTID, 0, 32)
411 FIELD(MOVI_2, ICID, 0, 16)
412 
413 /* INV command fields */
414 FIELD(INV_0, DEVICEID, 32, 32)
415 FIELD(INV_1, EVENTID, 0, 32)
416 
417 /* VMAPI, VMAPTI command fields */
418 FIELD(VMAPTI_0, DEVICEID, 32, 32)
419 FIELD(VMAPTI_1, EVENTID, 0, 32)
420 FIELD(VMAPTI_1, VPEID, 32, 16)
421 FIELD(VMAPTI_2, VINTID, 0, 32) /* VMAPTI only */
422 FIELD(VMAPTI_2, DOORBELL, 32, 32)
423 
424 /* VMAPP command fields */
425 FIELD(VMAPP_0, ALLOC, 8, 1) /* GICv4.1 only */
426 FIELD(VMAPP_0, PTZ, 9, 1) /* GICv4.1 only */
427 FIELD(VMAPP_0, VCONFADDR, 16, 36) /* GICv4.1 only */
428 FIELD(VMAPP_1, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */
429 FIELD(VMAPP_1, VPEID, 32, 16)
430 FIELD(VMAPP_2, RDBASE, 16, 36)
431 FIELD(VMAPP_2, V, 63, 1)
432 FIELD(VMAPP_3, VPTSIZE, 0, 8) /* For GICv4.0, bits [7:6] are RES0 */
433 FIELD(VMAPP_3, VPTADDR, 16, 36)
434 
435 /* VMOVP command fields */
436 FIELD(VMOVP_0, SEQNUM, 32, 16) /* not used for GITS_TYPER.VMOVP == 1 */
437 FIELD(VMOVP_1, ITSLIST, 0, 16) /* not used for GITS_TYPER.VMOVP == 1 */
438 FIELD(VMOVP_1, VPEID, 32, 16)
439 FIELD(VMOVP_2, RDBASE, 16, 36)
440 FIELD(VMOVP_2, DB, 63, 1) /* GICv4.1 only */
441 FIELD(VMOVP_3, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */
442 
443 /* VMOVI command fields */
444 FIELD(VMOVI_0, DEVICEID, 32, 32)
445 FIELD(VMOVI_1, EVENTID, 0, 32)
446 FIELD(VMOVI_1, VPEID, 32, 16)
447 FIELD(VMOVI_2, D, 0, 1)
448 FIELD(VMOVI_2, DOORBELL, 32, 32)
449 
450 /* VINVALL command fields */
451 FIELD(VINVALL_1, VPEID, 32, 16)
452 
453 /*
454  * 12 bytes Interrupt translation Table Entry size
455  * as per Table 5.3 in GICv3 spec
456  * ITE Lower 8 Bytes
457  *   Bits:    | 63 ... 48 | 47 ... 32 | 31 ... 26 | 25 ... 2 |   1     |  0    |
458  *   Values:  | vPEID     | ICID      | unused    |  IntNum  | IntType | Valid |
459  * ITE Higher 4 Bytes
460  *   Bits:    | 31 ... 25 | 24 ... 0 |
461  *   Values:  | unused    | Doorbell |
462  * (When Doorbell is unused, as it always is for INTYPE_PHYSICAL,
463  * the value of that field in memory cannot be relied upon -- older
464  * versions of QEMU did not correctly write to that memory.)
465  */
466 #define ITS_ITT_ENTRY_SIZE            0xC
467 
468 FIELD(ITE_L, VALID, 0, 1)
469 FIELD(ITE_L, INTTYPE, 1, 1)
470 FIELD(ITE_L, INTID, 2, 24)
471 FIELD(ITE_L, ICID, 32, 16)
472 FIELD(ITE_L, VPEID, 48, 16)
473 FIELD(ITE_H, DOORBELL, 0, 24)
474 
475 /* Possible values for ITE_L INTTYPE */
476 #define ITE_INTTYPE_VIRTUAL 0
477 #define ITE_INTTYPE_PHYSICAL 1
478 
479 /* 16 bits EventId */
480 #define ITS_IDBITS                   GICD_TYPER_IDBITS
481 
482 /* 16 bits DeviceId */
483 #define ITS_DEVBITS                   0xF
484 
485 /* 16 bits CollectionId */
486 #define ITS_CIDBITS                  0xF
487 
488 /*
489  * 8 bytes Device Table Entry size
490  * Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits
491  */
492 #define GITS_DTE_SIZE                 (0x8ULL)
493 
494 FIELD(DTE, VALID, 0, 1)
495 FIELD(DTE, SIZE, 1, 5)
496 FIELD(DTE, ITTADDR, 6, 44)
497 
498 /*
499  * 8 bytes Collection Table Entry size
500  * Valid = 1 bit, RDBase = 16 bits
501  */
502 #define GITS_CTE_SIZE                 (0x8ULL)
503 FIELD(CTE, VALID, 0, 1)
504 FIELD(CTE, RDBASE, 1, RDBASE_PROCNUM_LENGTH)
505 
506 /*
507  * 8 bytes VPE table entry size:
508  * Valid = 1 bit, VPTsize = 5 bits, VPTaddr = 36 bits, RDbase = 16 bits
509  *
510  * Field sizes for Valid and size are mandated; field sizes for RDbase
511  * and VPT_addr are IMPDEF.
512  */
513 #define GITS_VPE_SIZE 0x8ULL
514 
515 FIELD(VTE, VALID, 0, 1)
516 FIELD(VTE, VPTSIZE, 1, 5)
517 FIELD(VTE, VPTADDR, 6, 36)
518 FIELD(VTE, RDBASE, 42, RDBASE_PROCNUM_LENGTH)
519 
520 /* Special interrupt IDs */
521 #define INTID_SECURE 1020
522 #define INTID_NONSECURE 1021
523 #define INTID_NMI 1022
524 #define INTID_SPURIOUS 1023
525 
526 /* Functions internal to the emulated GICv3 */
527 
528 /**
529  * gicv3_redist_size:
530  * @s: GICv3State
531  *
532  * Return the size of the redistributor register frame in bytes
533  * (which depends on what GIC version this is)
534  */
535 static inline int gicv3_redist_size(GICv3State *s)
536 {
537     /*
538      * Redistributor size is controlled by the redistributor GICR_TYPER.VLPIS.
539      * It's the same for every redistributor in the GIC, so arbitrarily
540      * use the register field in the first one.
541      */
542     if (s->cpu[0].gicr_typer & GICR_TYPER_VLPIS) {
543         return GICV4_REDIST_SIZE;
544     } else {
545         return GICV3_REDIST_SIZE;
546     }
547 }
548 
549 /**
550  * gicv3_intid_is_special:
551  * @intid: interrupt ID
552  *
553  * Return true if @intid is a special interrupt ID (1020 to
554  * 1023 inclusive). This corresponds to the GIC spec pseudocode
555  * IsSpecial() function.
556  */
557 static inline bool gicv3_intid_is_special(int intid)
558 {
559     return intid >= INTID_SECURE && intid <= INTID_SPURIOUS;
560 }
561 
562 /**
563  * gicv3_redist_update:
564  * @cs: GICv3CPUState for this redistributor
565  *
566  * Recalculate the highest priority pending interrupt after a
567  * change to redistributor state, and inform the CPU accordingly.
568  */
569 void gicv3_redist_update(GICv3CPUState *cs);
570 
571 /**
572  * gicv3_update:
573  * @s: GICv3State
574  * @start: first interrupt whose state changed
575  * @len: length of the range of interrupts whose state changed
576  *
577  * Recalculate the highest priority pending interrupts after a
578  * change to the distributor state affecting @len interrupts
579  * starting at @start, and inform the CPUs accordingly.
580  */
581 void gicv3_update(GICv3State *s, int start, int len);
582 
583 /**
584  * gicv3_full_update_noirqset:
585  * @s: GICv3State
586  *
587  * Recalculate the cached information about highest priority
588  * pending interrupts, but don't inform the CPUs. This should be
589  * called after an incoming migration has loaded new state.
590  */
591 void gicv3_full_update_noirqset(GICv3State *s);
592 
593 /**
594  * gicv3_full_update:
595  * @s: GICv3State
596  *
597  * Recalculate the highest priority pending interrupts after
598  * a change that could affect the status of all interrupts,
599  * and inform the CPUs accordingly.
600  */
601 void gicv3_full_update(GICv3State *s);
602 MemTxResult gicv3_dist_read(void *opaque, hwaddr offset, uint64_t *data,
603                             unsigned size, MemTxAttrs attrs);
604 MemTxResult gicv3_dist_write(void *opaque, hwaddr addr, uint64_t data,
605                              unsigned size, MemTxAttrs attrs);
606 MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
607                               unsigned size, MemTxAttrs attrs);
608 MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
609                                unsigned size, MemTxAttrs attrs);
610 void gicv3_dist_set_irq(GICv3State *s, int irq, int level);
611 void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level);
612 void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level);
613 /**
614  * gicv3_redist_process_vlpi:
615  * @cs: GICv3CPUState
616  * @irq: (virtual) interrupt number
617  * @vptaddr: (guest) address of VLPI table
618  * @doorbell: doorbell (physical) interrupt number (1023 for "no doorbell")
619  * @level: level to set @irq to
620  *
621  * Process a virtual LPI being directly injected by the ITS. This function
622  * will update the VLPI table specified by @vptaddr and @vptsize. If the
623  * vCPU corresponding to that VLPI table is currently running on
624  * the CPU associated with this redistributor, directly inject the VLPI
625  * @irq. If the vCPU is not running on this CPU, raise the doorbell
626  * interrupt instead.
627  */
628 void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr,
629                                int doorbell, int level);
630 /**
631  * gicv3_redist_vlpi_pending:
632  * @cs: GICv3CPUState
633  * @irq: (virtual) interrupt number
634  * @level: level to set @irq to
635  *
636  * Set/clear the pending status of a virtual LPI in the vLPI table
637  * that this redistributor is currently using. (The difference between
638  * this and gicv3_redist_process_vlpi() is that this is called from
639  * the cpuif and does not need to do the not-running-on-this-vcpu checks.)
640  */
641 void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level);
642 
643 void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level);
644 /**
645  * gicv3_redist_update_lpi:
646  * @cs: GICv3CPUState
647  *
648  * Scan the LPI pending table and recalculate the highest priority
649  * pending LPI and also the overall highest priority pending interrupt.
650  */
651 void gicv3_redist_update_lpi(GICv3CPUState *cs);
652 /**
653  * gicv3_redist_update_lpi_only:
654  * @cs: GICv3CPUState
655  *
656  * Scan the LPI pending table and recalculate cs->hpplpi only,
657  * without calling gicv3_redist_update() to recalculate the overall
658  * highest priority pending interrupt. This should be called after
659  * an incoming migration has loaded new state.
660  */
661 void gicv3_redist_update_lpi_only(GICv3CPUState *cs);
662 /**
663  * gicv3_redist_inv_lpi:
664  * @cs: GICv3CPUState
665  * @irq: LPI to invalidate cached information for
666  *
667  * Forget or update any cached information associated with this LPI.
668  */
669 void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq);
670 /**
671  * gicv3_redist_inv_vlpi:
672  * @cs: GICv3CPUState
673  * @irq: vLPI to invalidate cached information for
674  * @vptaddr: (guest) address of vLPI table
675  *
676  * Forget or update any cached information associated with this vLPI.
677  */
678 void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr);
679 /**
680  * gicv3_redist_mov_lpi:
681  * @src: source redistributor
682  * @dest: destination redistributor
683  * @irq: LPI to update
684  *
685  * Move the pending state of the specified LPI from @src to @dest,
686  * as required by the ITS MOVI command.
687  */
688 void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq);
689 /**
690  * gicv3_redist_movall_lpis:
691  * @src: source redistributor
692  * @dest: destination redistributor
693  *
694  * Scan the LPI pending table for @src, and for each pending LPI there
695  * mark it as not-pending for @src and pending for @dest, as required
696  * by the ITS MOVALL command.
697  */
698 void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest);
699 /**
700  * gicv3_redist_mov_vlpi:
701  * @src: source redistributor
702  * @src_vptaddr: (guest) address of source VLPI table
703  * @dest: destination redistributor
704  * @dest_vptaddr: (guest) address of destination VLPI table
705  * @irq: VLPI to update
706  * @doorbell: doorbell for destination (1023 for "no doorbell")
707  *
708  * Move the pending state of the specified VLPI from @src to @dest,
709  * as required by the ITS VMOVI command.
710  */
711 void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr,
712                            GICv3CPUState *dest, uint64_t dest_vptaddr,
713                            int irq, int doorbell);
714 /**
715  * gicv3_redist_vinvall:
716  * @cs: GICv3CPUState
717  * @vptaddr: address of VLPI pending table
718  *
719  * On redistributor @cs, invalidate all cached information associated
720  * with the vCPU defined by @vptaddr.
721  */
722 void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr);
723 
724 void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns);
725 void gicv3_init_cpuif(GICv3State *s);
726 
727 /**
728  * gicv3_cpuif_update:
729  * @cs: GICv3CPUState for the CPU to update
730  *
731  * Recalculate whether to assert the IRQ or FIQ lines after a change
732  * to the current highest priority pending interrupt, the CPU's
733  * current running priority or the CPU's current exception level or
734  * security state.
735  */
736 void gicv3_cpuif_update(GICv3CPUState *cs);
737 
738 /*
739  * gicv3_cpuif_virt_irq_fiq_update:
740  * @cs: GICv3CPUState for the CPU to update
741  *
742  * Recalculate whether to assert the virtual IRQ or FIQ lines after
743  * a change to the current highest priority pending virtual interrupt.
744  * Note that this does not recalculate and change the maintenance
745  * interrupt status (for that, see gicv3_cpuif_virt_update()).
746  */
747 void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs);
748 
749 static inline uint32_t gicv3_iidr(void)
750 {
751     /* Return the Implementer Identification Register value
752      * for the emulated GICv3, as reported in GICD_IIDR and GICR_IIDR.
753      *
754      * We claim to be an ARM r0p0 with a zero ProductID.
755      * This is the same as an r0p0 GIC-500.
756      */
757     return 0x43b;
758 }
759 
760 /* CoreSight PIDR0 values for ARM GICv3 implementations */
761 #define GICV3_PIDR0_DIST 0x92
762 #define GICV3_PIDR0_REDIST 0x93
763 #define GICV3_PIDR0_ITS 0x94
764 
765 static inline uint32_t gicv3_idreg(GICv3State *s, int regoffset, uint8_t pidr0)
766 {
767     /* Return the value of the CoreSight ID register at the specified
768      * offset from the first ID register (as found in the distributor
769      * and redistributor register banks).
770      * These values indicate an ARM implementation of a GICv3 or v4.
771      */
772     static const uint8_t gicd_ids[] = {
773         0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x0B, 0x00, 0x0D, 0xF0, 0x05, 0xB1
774     };
775     uint32_t id;
776 
777     regoffset /= 4;
778 
779     if (regoffset == 4) {
780         return pidr0;
781     }
782     id = gicd_ids[regoffset];
783     if (regoffset == 6) {
784         /* PIDR2 bits [7:4] are the GIC architecture revision */
785         id |= s->revision << 4;
786     }
787     return id;
788 }
789 
790 /**
791  * gicv3_irq_group:
792  *
793  * Return the group which this interrupt is configured as (GICV3_G0,
794  * GICV3_G1 or GICV3_G1NS).
795  */
796 static inline int gicv3_irq_group(GICv3State *s, GICv3CPUState *cs, int irq)
797 {
798     bool grpbit, grpmodbit;
799 
800     if (irq < GIC_INTERNAL) {
801         grpbit = extract32(cs->gicr_igroupr0, irq, 1);
802         grpmodbit = extract32(cs->gicr_igrpmodr0, irq, 1);
803     } else {
804         grpbit = gicv3_gicd_group_test(s, irq);
805         grpmodbit = gicv3_gicd_grpmod_test(s, irq);
806     }
807     if (grpbit) {
808         return GICV3_G1NS;
809     }
810     if (s->gicd_ctlr & GICD_CTLR_DS) {
811         return GICV3_G0;
812     }
813     return grpmodbit ? GICV3_G1 : GICV3_G0;
814 }
815 
816 /**
817  * gicv3_redist_affid:
818  *
819  * Return the 32-bit affinity ID of the CPU connected to this redistributor
820  */
821 static inline uint32_t gicv3_redist_affid(GICv3CPUState *cs)
822 {
823     return cs->gicr_typer >> 32;
824 }
825 
826 /**
827  * gicv3_cache_target_cpustate:
828  *
829  * Update the cached CPU state corresponding to the target for this interrupt
830  * (which is kept in s->gicd_irouter_target[]).
831  */
832 static inline void gicv3_cache_target_cpustate(GICv3State *s, int irq)
833 {
834     GICv3CPUState *cs = NULL;
835     int i;
836     uint32_t tgtaff = extract64(s->gicd_irouter[irq], 0, 24) |
837         extract64(s->gicd_irouter[irq], 32, 8) << 24;
838 
839     for (i = 0; i < s->num_cpu; i++) {
840         if (s->cpu[i].gicr_typer >> 32 == tgtaff) {
841             cs = &s->cpu[i];
842             break;
843         }
844     }
845 
846     s->gicd_irouter_target[irq] = cs;
847 }
848 
849 /**
850  * gicv3_cache_all_target_cpustates:
851  *
852  * Populate the entire cache of CPU state pointers for interrupt targets
853  * (eg after inbound migration or CPU reset)
854  */
855 static inline void gicv3_cache_all_target_cpustates(GICv3State *s)
856 {
857     int irq;
858 
859     for (irq = GIC_INTERNAL; irq < GICV3_MAXIRQ; irq++) {
860         gicv3_cache_target_cpustate(s, irq);
861     }
862 }
863 
864 void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s);
865 
866 #endif /* QEMU_ARM_GICV3_INTERNAL_H */
867