xref: /openbmc/qemu/hw/intc/gicv3_internal.h (revision ae3b3ba15c73320f75c121b08266a25a9e5d4edb)
1 /*
2  * ARM GICv3 support - internal interfaces
3  *
4  * Copyright (c) 2012 Linaro Limited
5  * Copyright (c) 2015 Huawei.
6  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7  * Written by Peter Maydell
8  * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License along
21  * with this program; if not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #ifndef QEMU_ARM_GICV3_INTERNAL_H
25 #define QEMU_ARM_GICV3_INTERNAL_H
26 
27 #include "hw/registerfields.h"
28 #include "hw/intc/arm_gicv3_common.h"
29 
30 /* Distributor registers, as offsets from the distributor base address */
31 #define GICD_CTLR            0x0000
32 #define GICD_TYPER           0x0004
33 #define GICD_IIDR            0x0008
34 #define GICD_STATUSR         0x0010
35 #define GICD_SETSPI_NSR      0x0040
36 #define GICD_CLRSPI_NSR      0x0048
37 #define GICD_SETSPI_SR       0x0050
38 #define GICD_CLRSPI_SR       0x0058
39 #define GICD_SEIR            0x0068
40 #define GICD_IGROUPR         0x0080
41 #define GICD_ISENABLER       0x0100
42 #define GICD_ICENABLER       0x0180
43 #define GICD_ISPENDR         0x0200
44 #define GICD_ICPENDR         0x0280
45 #define GICD_ISACTIVER       0x0300
46 #define GICD_ICACTIVER       0x0380
47 #define GICD_IPRIORITYR      0x0400
48 #define GICD_ITARGETSR       0x0800
49 #define GICD_ICFGR           0x0C00
50 #define GICD_IGRPMODR        0x0D00
51 #define GICD_NSACR           0x0E00
52 #define GICD_SGIR            0x0F00
53 #define GICD_CPENDSGIR       0x0F10
54 #define GICD_SPENDSGIR       0x0F20
55 #define GICD_IROUTER         0x6000
56 #define GICD_IDREGS          0xFFD0
57 
58 /* GICD_CTLR fields  */
59 #define GICD_CTLR_EN_GRP0           (1U << 0)
60 #define GICD_CTLR_EN_GRP1NS         (1U << 1) /* GICv3 5.3.20 */
61 #define GICD_CTLR_EN_GRP1S          (1U << 2)
62 #define GICD_CTLR_EN_GRP1_ALL       (GICD_CTLR_EN_GRP1NS | GICD_CTLR_EN_GRP1S)
63 /* Bit 4 is ARE if the system doesn't support TrustZone, ARE_S otherwise */
64 #define GICD_CTLR_ARE               (1U << 4)
65 #define GICD_CTLR_ARE_S             (1U << 4)
66 #define GICD_CTLR_ARE_NS            (1U << 5)
67 #define GICD_CTLR_DS                (1U << 6)
68 #define GICD_CTLR_E1NWF             (1U << 7)
69 #define GICD_CTLR_RWP               (1U << 31)
70 
71 #define GICD_TYPER_LPIS_SHIFT          17
72 
73 /* 16 bits EventId */
74 #define GICD_TYPER_IDBITS            0xf
75 
76 /*
77  * Redistributor frame offsets from RD_base
78  */
79 #define GICR_SGI_OFFSET 0x10000
80 
81 /*
82  * Redistributor registers, offsets from RD_base
83  */
84 #define GICR_CTLR             0x0000
85 #define GICR_IIDR             0x0004
86 #define GICR_TYPER            0x0008
87 #define GICR_STATUSR          0x0010
88 #define GICR_WAKER            0x0014
89 #define GICR_SETLPIR          0x0040
90 #define GICR_CLRLPIR          0x0048
91 #define GICR_PROPBASER        0x0070
92 #define GICR_PENDBASER        0x0078
93 #define GICR_INVLPIR          0x00A0
94 #define GICR_INVALLR          0x00B0
95 #define GICR_SYNCR            0x00C0
96 #define GICR_IDREGS           0xFFD0
97 
98 /* SGI and PPI Redistributor registers, offsets from RD_base */
99 #define GICR_IGROUPR0         (GICR_SGI_OFFSET + 0x0080)
100 #define GICR_ISENABLER0       (GICR_SGI_OFFSET + 0x0100)
101 #define GICR_ICENABLER0       (GICR_SGI_OFFSET + 0x0180)
102 #define GICR_ISPENDR0         (GICR_SGI_OFFSET + 0x0200)
103 #define GICR_ICPENDR0         (GICR_SGI_OFFSET + 0x0280)
104 #define GICR_ISACTIVER0       (GICR_SGI_OFFSET + 0x0300)
105 #define GICR_ICACTIVER0       (GICR_SGI_OFFSET + 0x0380)
106 #define GICR_IPRIORITYR       (GICR_SGI_OFFSET + 0x0400)
107 #define GICR_ICFGR0           (GICR_SGI_OFFSET + 0x0C00)
108 #define GICR_ICFGR1           (GICR_SGI_OFFSET + 0x0C04)
109 #define GICR_IGRPMODR0        (GICR_SGI_OFFSET + 0x0D00)
110 #define GICR_NSACR            (GICR_SGI_OFFSET + 0x0E00)
111 
112 #define GICR_CTLR_ENABLE_LPIS        (1U << 0)
113 #define GICR_CTLR_CES                (1U << 1)
114 #define GICR_CTLR_RWP                (1U << 3)
115 #define GICR_CTLR_DPG0               (1U << 24)
116 #define GICR_CTLR_DPG1NS             (1U << 25)
117 #define GICR_CTLR_DPG1S              (1U << 26)
118 #define GICR_CTLR_UWP                (1U << 31)
119 
120 #define GICR_TYPER_PLPIS             (1U << 0)
121 #define GICR_TYPER_VLPIS             (1U << 1)
122 #define GICR_TYPER_DIRECTLPI         (1U << 3)
123 #define GICR_TYPER_LAST              (1U << 4)
124 #define GICR_TYPER_DPGS              (1U << 5)
125 #define GICR_TYPER_PROCNUM           (0xFFFFU << 8)
126 #define GICR_TYPER_COMMONLPIAFF      (0x3 << 24)
127 #define GICR_TYPER_AFFINITYVALUE     (0xFFFFFFFFULL << 32)
128 
129 #define GICR_WAKER_ProcessorSleep    (1U << 1)
130 #define GICR_WAKER_ChildrenAsleep    (1U << 2)
131 
132 FIELD(GICR_PROPBASER, IDBITS, 0, 5)
133 FIELD(GICR_PROPBASER, INNERCACHE, 7, 3)
134 FIELD(GICR_PROPBASER, SHAREABILITY, 10, 2)
135 FIELD(GICR_PROPBASER, PHYADDR, 12, 40)
136 FIELD(GICR_PROPBASER, OUTERCACHE, 56, 3)
137 
138 FIELD(GICR_PENDBASER, INNERCACHE, 7, 3)
139 FIELD(GICR_PENDBASER, SHAREABILITY, 10, 2)
140 FIELD(GICR_PENDBASER, PHYADDR, 16, 36)
141 FIELD(GICR_PENDBASER, OUTERCACHE, 56, 3)
142 FIELD(GICR_PENDBASER, PTZ, 62, 1)
143 
144 #define GICR_PROPBASER_IDBITS_THRESHOLD          0xd
145 
146 #define ICC_CTLR_EL1_CBPR           (1U << 0)
147 #define ICC_CTLR_EL1_EOIMODE        (1U << 1)
148 #define ICC_CTLR_EL1_PMHE           (1U << 6)
149 #define ICC_CTLR_EL1_PRIBITS_SHIFT 8
150 #define ICC_CTLR_EL1_PRIBITS_MASK   (7U << ICC_CTLR_EL1_PRIBITS_SHIFT)
151 #define ICC_CTLR_EL1_IDBITS_SHIFT 11
152 #define ICC_CTLR_EL1_SEIS           (1U << 14)
153 #define ICC_CTLR_EL1_A3V            (1U << 15)
154 
155 #define ICC_PMR_PRIORITY_MASK    0xff
156 #define ICC_BPR_BINARYPOINT_MASK 0x07
157 #define ICC_IGRPEN_ENABLE        0x01
158 
159 #define ICC_CTLR_EL3_CBPR_EL1S (1U << 0)
160 #define ICC_CTLR_EL3_CBPR_EL1NS (1U << 1)
161 #define ICC_CTLR_EL3_EOIMODE_EL3 (1U << 2)
162 #define ICC_CTLR_EL3_EOIMODE_EL1S (1U << 3)
163 #define ICC_CTLR_EL3_EOIMODE_EL1NS (1U << 4)
164 #define ICC_CTLR_EL3_RM (1U << 5)
165 #define ICC_CTLR_EL3_PMHE (1U << 6)
166 #define ICC_CTLR_EL3_PRIBITS_SHIFT 8
167 #define ICC_CTLR_EL3_IDBITS_SHIFT 11
168 #define ICC_CTLR_EL3_SEIS (1U << 14)
169 #define ICC_CTLR_EL3_A3V (1U << 15)
170 #define ICC_CTLR_EL3_NDS (1U << 17)
171 
172 #define ICH_VMCR_EL2_VENG0_SHIFT 0
173 #define ICH_VMCR_EL2_VENG0 (1U << ICH_VMCR_EL2_VENG0_SHIFT)
174 #define ICH_VMCR_EL2_VENG1_SHIFT 1
175 #define ICH_VMCR_EL2_VENG1 (1U << ICH_VMCR_EL2_VENG1_SHIFT)
176 #define ICH_VMCR_EL2_VACKCTL (1U << 2)
177 #define ICH_VMCR_EL2_VFIQEN (1U << 3)
178 #define ICH_VMCR_EL2_VCBPR_SHIFT 4
179 #define ICH_VMCR_EL2_VCBPR (1U << ICH_VMCR_EL2_VCBPR_SHIFT)
180 #define ICH_VMCR_EL2_VEOIM_SHIFT 9
181 #define ICH_VMCR_EL2_VEOIM (1U << ICH_VMCR_EL2_VEOIM_SHIFT)
182 #define ICH_VMCR_EL2_VBPR1_SHIFT 18
183 #define ICH_VMCR_EL2_VBPR1_LENGTH 3
184 #define ICH_VMCR_EL2_VBPR1_MASK (0x7U << ICH_VMCR_EL2_VBPR1_SHIFT)
185 #define ICH_VMCR_EL2_VBPR0_SHIFT 21
186 #define ICH_VMCR_EL2_VBPR0_LENGTH 3
187 #define ICH_VMCR_EL2_VBPR0_MASK (0x7U << ICH_VMCR_EL2_VBPR0_SHIFT)
188 #define ICH_VMCR_EL2_VPMR_SHIFT 24
189 #define ICH_VMCR_EL2_VPMR_LENGTH 8
190 #define ICH_VMCR_EL2_VPMR_MASK (0xffU << ICH_VMCR_EL2_VPMR_SHIFT)
191 
192 #define ICH_HCR_EL2_EN (1U << 0)
193 #define ICH_HCR_EL2_UIE (1U << 1)
194 #define ICH_HCR_EL2_LRENPIE (1U << 2)
195 #define ICH_HCR_EL2_NPIE (1U << 3)
196 #define ICH_HCR_EL2_VGRP0EIE (1U << 4)
197 #define ICH_HCR_EL2_VGRP0DIE (1U << 5)
198 #define ICH_HCR_EL2_VGRP1EIE (1U << 6)
199 #define ICH_HCR_EL2_VGRP1DIE (1U << 7)
200 #define ICH_HCR_EL2_TC (1U << 10)
201 #define ICH_HCR_EL2_TALL0 (1U << 11)
202 #define ICH_HCR_EL2_TALL1 (1U << 12)
203 #define ICH_HCR_EL2_TSEI (1U << 13)
204 #define ICH_HCR_EL2_TDIR (1U << 14)
205 #define ICH_HCR_EL2_EOICOUNT_SHIFT 27
206 #define ICH_HCR_EL2_EOICOUNT_LENGTH 5
207 #define ICH_HCR_EL2_EOICOUNT_MASK (0x1fU << ICH_HCR_EL2_EOICOUNT_SHIFT)
208 
209 #define ICH_LR_EL2_VINTID_SHIFT 0
210 #define ICH_LR_EL2_VINTID_LENGTH 32
211 #define ICH_LR_EL2_VINTID_MASK (0xffffffffULL << ICH_LR_EL2_VINTID_SHIFT)
212 #define ICH_LR_EL2_PINTID_SHIFT 32
213 #define ICH_LR_EL2_PINTID_LENGTH 10
214 #define ICH_LR_EL2_PINTID_MASK (0x3ffULL << ICH_LR_EL2_PINTID_SHIFT)
215 /* Note that EOI shares with the top bit of the pINTID field */
216 #define ICH_LR_EL2_EOI (1ULL << 41)
217 #define ICH_LR_EL2_PRIORITY_SHIFT 48
218 #define ICH_LR_EL2_PRIORITY_LENGTH 8
219 #define ICH_LR_EL2_PRIORITY_MASK (0xffULL << ICH_LR_EL2_PRIORITY_SHIFT)
220 #define ICH_LR_EL2_GROUP (1ULL << 60)
221 #define ICH_LR_EL2_HW (1ULL << 61)
222 #define ICH_LR_EL2_STATE_SHIFT 62
223 #define ICH_LR_EL2_STATE_LENGTH 2
224 #define ICH_LR_EL2_STATE_MASK (3ULL << ICH_LR_EL2_STATE_SHIFT)
225 /* values for the state field: */
226 #define ICH_LR_EL2_STATE_INVALID 0
227 #define ICH_LR_EL2_STATE_PENDING 1
228 #define ICH_LR_EL2_STATE_ACTIVE 2
229 #define ICH_LR_EL2_STATE_ACTIVE_PENDING 3
230 #define ICH_LR_EL2_STATE_PENDING_BIT (1ULL << ICH_LR_EL2_STATE_SHIFT)
231 #define ICH_LR_EL2_STATE_ACTIVE_BIT (2ULL << ICH_LR_EL2_STATE_SHIFT)
232 
233 #define ICH_MISR_EL2_EOI (1U << 0)
234 #define ICH_MISR_EL2_U (1U << 1)
235 #define ICH_MISR_EL2_LRENP (1U << 2)
236 #define ICH_MISR_EL2_NP (1U << 3)
237 #define ICH_MISR_EL2_VGRP0E (1U << 4)
238 #define ICH_MISR_EL2_VGRP0D (1U << 5)
239 #define ICH_MISR_EL2_VGRP1E (1U << 6)
240 #define ICH_MISR_EL2_VGRP1D (1U << 7)
241 
242 #define ICH_VTR_EL2_LISTREGS_SHIFT 0
243 #define ICH_VTR_EL2_TDS (1U << 19)
244 #define ICH_VTR_EL2_NV4 (1U << 20)
245 #define ICH_VTR_EL2_A3V (1U << 21)
246 #define ICH_VTR_EL2_SEIS (1U << 22)
247 #define ICH_VTR_EL2_IDBITS_SHIFT 23
248 #define ICH_VTR_EL2_PREBITS_SHIFT 26
249 #define ICH_VTR_EL2_PRIBITS_SHIFT 29
250 
251 /* ITS Registers */
252 
253 FIELD(GITS_BASER, SIZE, 0, 8)
254 FIELD(GITS_BASER, PAGESIZE, 8, 2)
255 FIELD(GITS_BASER, SHAREABILITY, 10, 2)
256 FIELD(GITS_BASER, PHYADDR, 12, 36)
257 FIELD(GITS_BASER, PHYADDRL_64K, 16, 32)
258 FIELD(GITS_BASER, PHYADDRH_64K, 12, 4)
259 FIELD(GITS_BASER, ENTRYSIZE, 48, 5)
260 FIELD(GITS_BASER, OUTERCACHE, 53, 3)
261 FIELD(GITS_BASER, TYPE, 56, 3)
262 FIELD(GITS_BASER, INNERCACHE, 59, 3)
263 FIELD(GITS_BASER, INDIRECT, 62, 1)
264 FIELD(GITS_BASER, VALID, 63, 1)
265 
266 FIELD(GITS_CBASER, SIZE, 0, 8)
267 FIELD(GITS_CBASER, SHAREABILITY, 10, 2)
268 FIELD(GITS_CBASER, PHYADDR, 12, 40)
269 FIELD(GITS_CBASER, OUTERCACHE, 53, 3)
270 FIELD(GITS_CBASER, INNERCACHE, 59, 3)
271 FIELD(GITS_CBASER, VALID, 63, 1)
272 
273 FIELD(GITS_CREADR, STALLED, 0, 1)
274 FIELD(GITS_CREADR, OFFSET, 5, 15)
275 
276 FIELD(GITS_CWRITER, RETRY, 0, 1)
277 FIELD(GITS_CWRITER, OFFSET, 5, 15)
278 
279 FIELD(GITS_CTLR, ENABLED, 0, 1)
280 FIELD(GITS_CTLR, QUIESCENT, 31, 1)
281 
282 FIELD(GITS_TYPER, PHYSICAL, 0, 1)
283 FIELD(GITS_TYPER, VIRTUAL, 1, 1)
284 FIELD(GITS_TYPER, ITT_ENTRY_SIZE, 4, 4)
285 FIELD(GITS_TYPER, IDBITS, 8, 5)
286 FIELD(GITS_TYPER, DEVBITS, 13, 5)
287 FIELD(GITS_TYPER, SEIS, 18, 1)
288 FIELD(GITS_TYPER, PTA, 19, 1)
289 FIELD(GITS_TYPER, CIDBITS, 32, 4)
290 FIELD(GITS_TYPER, CIL, 36, 1)
291 
292 #define GITS_IDREGS           0xFFD0
293 
294 #define GITS_BASER_RO_MASK                  (R_GITS_BASER_ENTRYSIZE_MASK | \
295                                               R_GITS_BASER_TYPE_MASK)
296 
297 #define GITS_BASER_PAGESIZE_4K                0
298 #define GITS_BASER_PAGESIZE_16K               1
299 #define GITS_BASER_PAGESIZE_64K               2
300 
301 #define GITS_BASER_TYPE_DEVICE               1ULL
302 #define GITS_BASER_TYPE_VPE                  2ULL
303 #define GITS_BASER_TYPE_COLLECTION           4ULL
304 
305 #define GITS_PAGE_SIZE_4K       0x1000
306 #define GITS_PAGE_SIZE_16K      0x4000
307 #define GITS_PAGE_SIZE_64K      0x10000
308 
309 #define L1TABLE_ENTRY_SIZE         8
310 
311 #define LPI_CTE_ENABLED          TABLE_ENTRY_VALID_MASK
312 #define LPI_PRIORITY_MASK         0xfc
313 
314 #define GITS_CMDQ_ENTRY_WORDS 4
315 #define GITS_CMDQ_ENTRY_SIZE  (GITS_CMDQ_ENTRY_WORDS * sizeof(uint64_t))
316 
317 #define CMD_MASK                  0xff
318 
319 /* ITS Commands */
320 #define GITS_CMD_MOVI             0x01
321 #define GITS_CMD_INT              0x03
322 #define GITS_CMD_CLEAR            0x04
323 #define GITS_CMD_SYNC             0x05
324 #define GITS_CMD_MAPD             0x08
325 #define GITS_CMD_MAPC             0x09
326 #define GITS_CMD_MAPTI            0x0A
327 #define GITS_CMD_MAPI             0x0B
328 #define GITS_CMD_INV              0x0C
329 #define GITS_CMD_INVALL           0x0D
330 #define GITS_CMD_MOVALL           0x0E
331 #define GITS_CMD_DISCARD          0x0F
332 #define GITS_CMD_VMOVI            0x21
333 #define GITS_CMD_VMOVP            0x22
334 #define GITS_CMD_VSYNC            0x25
335 #define GITS_CMD_VMAPP            0x29
336 #define GITS_CMD_VMAPTI           0x2A
337 #define GITS_CMD_VMAPI            0x2B
338 #define GITS_CMD_VINVALL          0x2D
339 
340 /* MAPC command fields */
341 #define ICID_LENGTH                  16
342 #define ICID_MASK                 ((1U << ICID_LENGTH) - 1)
343 FIELD(MAPC, RDBASE, 16, 32)
344 
345 #define RDBASE_PROCNUM_LENGTH        16
346 #define RDBASE_PROCNUM_MASK       ((1ULL << RDBASE_PROCNUM_LENGTH) - 1)
347 
348 /* MAPD command fields */
349 #define ITTADDR_LENGTH               44
350 #define ITTADDR_SHIFT                 8
351 #define ITTADDR_MASK             MAKE_64BIT_MASK(ITTADDR_SHIFT, ITTADDR_LENGTH)
352 #define SIZE_MASK                 0x1f
353 
354 /* MAPI command fields */
355 #define EVENTID_MASK              ((1ULL << 32) - 1)
356 
357 /* MAPTI command fields */
358 #define pINTID_SHIFT                 32
359 #define pINTID_MASK               MAKE_64BIT_MASK(32, 32)
360 
361 #define DEVID_SHIFT                  32
362 #define DEVID_MASK                MAKE_64BIT_MASK(32, 32)
363 
364 #define VALID_SHIFT               63
365 #define CMD_FIELD_VALID_MASK      (1ULL << VALID_SHIFT)
366 #define L2_TABLE_VALID_MASK       CMD_FIELD_VALID_MASK
367 #define TABLE_ENTRY_VALID_MASK    (1ULL << 0)
368 
369 /* MOVALL command fields */
370 FIELD(MOVALL_2, RDBASE1, 16, 36)
371 FIELD(MOVALL_3, RDBASE2, 16, 36)
372 
373 /* MOVI command fields */
374 FIELD(MOVI_0, DEVICEID, 32, 32)
375 FIELD(MOVI_1, EVENTID, 0, 32)
376 FIELD(MOVI_2, ICID, 0, 16)
377 
378 /* INV command fields */
379 FIELD(INV_0, DEVICEID, 32, 32)
380 FIELD(INV_1, EVENTID, 0, 32)
381 
382 /* VMAPI, VMAPTI command fields */
383 FIELD(VMAPTI_0, DEVICEID, 32, 32)
384 FIELD(VMAPTI_1, EVENTID, 0, 32)
385 FIELD(VMAPTI_1, VPEID, 32, 16)
386 FIELD(VMAPTI_2, VINTID, 0, 32) /* VMAPTI only */
387 FIELD(VMAPTI_2, DOORBELL, 32, 32)
388 
389 /* VMAPP command fields */
390 FIELD(VMAPP_0, ALLOC, 8, 1) /* GICv4.1 only */
391 FIELD(VMAPP_0, PTZ, 9, 1) /* GICv4.1 only */
392 FIELD(VMAPP_0, VCONFADDR, 16, 36) /* GICv4.1 only */
393 FIELD(VMAPP_1, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */
394 FIELD(VMAPP_1, VPEID, 32, 16)
395 FIELD(VMAPP_2, RDBASE, 16, 36)
396 FIELD(VMAPP_2, V, 63, 1)
397 FIELD(VMAPP_3, VPTSIZE, 0, 8) /* For GICv4.0, bits [7:6] are RES0 */
398 FIELD(VMAPP_3, VPTADDR, 16, 36)
399 
400 /* VMOVP command fields */
401 FIELD(VMOVP_0, SEQNUM, 32, 16) /* not used for GITS_TYPER.VMOVP == 1 */
402 FIELD(VMOVP_1, ITSLIST, 0, 16) /* not used for GITS_TYPER.VMOVP == 1 */
403 FIELD(VMOVP_1, VPEID, 32, 16)
404 FIELD(VMOVP_2, RDBASE, 16, 36)
405 FIELD(VMOVP_2, DB, 63, 1) /* GICv4.1 only */
406 FIELD(VMOVP_3, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */
407 
408 /* VMOVI command fields */
409 FIELD(VMOVI_0, DEVICEID, 32, 32)
410 FIELD(VMOVI_1, EVENTID, 0, 32)
411 FIELD(VMOVI_1, VPEID, 32, 16)
412 FIELD(VMOVI_2, D, 0, 1)
413 FIELD(VMOVI_2, DOORBELL, 32, 32)
414 
415 /* VINVALL command fields */
416 FIELD(VINVALL_1, VPEID, 32, 16)
417 
418 /*
419  * 12 bytes Interrupt translation Table Entry size
420  * as per Table 5.3 in GICv3 spec
421  * ITE Lower 8 Bytes
422  *   Bits:    | 63 ... 48 | 47 ... 32 | 31 ... 26 | 25 ... 2 |   1     |  0    |
423  *   Values:  | vPEID     | ICID      | unused    |  IntNum  | IntType | Valid |
424  * ITE Higher 4 Bytes
425  *   Bits:    | 31 ... 25 | 24 ... 0 |
426  *   Values:  | unused    | Doorbell |
427  * (When Doorbell is unused, as it always is for INTYPE_PHYSICAL,
428  * the value of that field in memory cannot be relied upon -- older
429  * versions of QEMU did not correctly write to that memory.)
430  */
431 #define ITS_ITT_ENTRY_SIZE            0xC
432 
433 FIELD(ITE_L, VALID, 0, 1)
434 FIELD(ITE_L, INTTYPE, 1, 1)
435 FIELD(ITE_L, INTID, 2, 24)
436 FIELD(ITE_L, ICID, 32, 16)
437 FIELD(ITE_L, VPEID, 48, 16)
438 FIELD(ITE_H, DOORBELL, 0, 24)
439 
440 /* Possible values for ITE_L INTTYPE */
441 #define ITE_INTTYPE_VIRTUAL 0
442 #define ITE_INTTYPE_PHYSICAL 1
443 
444 /* 16 bits EventId */
445 #define ITS_IDBITS                   GICD_TYPER_IDBITS
446 
447 /* 16 bits DeviceId */
448 #define ITS_DEVBITS                   0xF
449 
450 /* 16 bits CollectionId */
451 #define ITS_CIDBITS                  0xF
452 
453 /*
454  * 8 bytes Device Table Entry size
455  * Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits
456  */
457 #define GITS_DTE_SIZE                 (0x8ULL)
458 
459 FIELD(DTE, VALID, 0, 1)
460 FIELD(DTE, SIZE, 1, 5)
461 FIELD(DTE, ITTADDR, 6, 44)
462 
463 /*
464  * 8 bytes Collection Table Entry size
465  * Valid = 1 bit, RDBase = 16 bits
466  */
467 #define GITS_CTE_SIZE                 (0x8ULL)
468 FIELD(CTE, VALID, 0, 1)
469 FIELD(CTE, RDBASE, 1, RDBASE_PROCNUM_LENGTH)
470 
471 /*
472  * 8 bytes VPE table entry size:
473  * Valid = 1 bit, VPTsize = 5 bits, VPTaddr = 36 bits, RDbase = 16 bits
474  *
475  * Field sizes for Valid and size are mandated; field sizes for RDbase
476  * and VPT_addr are IMPDEF.
477  */
478 #define GITS_VPE_SIZE 0x8ULL
479 
480 FIELD(VTE, VALID, 0, 1)
481 FIELD(VTE, VPTSIZE, 1, 5)
482 FIELD(VTE, VPTADDR, 6, 36)
483 FIELD(VTE, RDBASE, 42, RDBASE_PROCNUM_LENGTH)
484 
485 /* Special interrupt IDs */
486 #define INTID_SECURE 1020
487 #define INTID_NONSECURE 1021
488 #define INTID_SPURIOUS 1023
489 
490 /* Functions internal to the emulated GICv3 */
491 
492 /**
493  * gicv3_redist_size:
494  * @s: GICv3State
495  *
496  * Return the size of the redistributor register frame in bytes
497  * (which depends on what GIC version this is)
498  */
499 static inline int gicv3_redist_size(GICv3State *s)
500 {
501     /*
502      * Redistributor size is controlled by the redistributor GICR_TYPER.VLPIS.
503      * It's the same for every redistributor in the GIC, so arbitrarily
504      * use the register field in the first one.
505      */
506     if (s->cpu[0].gicr_typer & GICR_TYPER_VLPIS) {
507         return GICV4_REDIST_SIZE;
508     } else {
509         return GICV3_REDIST_SIZE;
510     }
511 }
512 
513 /**
514  * gicv3_intid_is_special:
515  * @intid: interrupt ID
516  *
517  * Return true if @intid is a special interrupt ID (1020 to
518  * 1023 inclusive). This corresponds to the GIC spec pseudocode
519  * IsSpecial() function.
520  */
521 static inline bool gicv3_intid_is_special(int intid)
522 {
523     return intid >= INTID_SECURE && intid <= INTID_SPURIOUS;
524 }
525 
526 /**
527  * gicv3_redist_update:
528  * @cs: GICv3CPUState for this redistributor
529  *
530  * Recalculate the highest priority pending interrupt after a
531  * change to redistributor state, and inform the CPU accordingly.
532  */
533 void gicv3_redist_update(GICv3CPUState *cs);
534 
535 /**
536  * gicv3_update:
537  * @s: GICv3State
538  * @start: first interrupt whose state changed
539  * @len: length of the range of interrupts whose state changed
540  *
541  * Recalculate the highest priority pending interrupts after a
542  * change to the distributor state affecting @len interrupts
543  * starting at @start, and inform the CPUs accordingly.
544  */
545 void gicv3_update(GICv3State *s, int start, int len);
546 
547 /**
548  * gicv3_full_update_noirqset:
549  * @s: GICv3State
550  *
551  * Recalculate the cached information about highest priority
552  * pending interrupts, but don't inform the CPUs. This should be
553  * called after an incoming migration has loaded new state.
554  */
555 void gicv3_full_update_noirqset(GICv3State *s);
556 
557 /**
558  * gicv3_full_update:
559  * @s: GICv3State
560  *
561  * Recalculate the highest priority pending interrupts after
562  * a change that could affect the status of all interrupts,
563  * and inform the CPUs accordingly.
564  */
565 void gicv3_full_update(GICv3State *s);
566 MemTxResult gicv3_dist_read(void *opaque, hwaddr offset, uint64_t *data,
567                             unsigned size, MemTxAttrs attrs);
568 MemTxResult gicv3_dist_write(void *opaque, hwaddr addr, uint64_t data,
569                              unsigned size, MemTxAttrs attrs);
570 MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
571                               unsigned size, MemTxAttrs attrs);
572 MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
573                                unsigned size, MemTxAttrs attrs);
574 void gicv3_dist_set_irq(GICv3State *s, int irq, int level);
575 void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level);
576 void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level);
577 /**
578  * gicv3_redist_process_vlpi:
579  * @cs: GICv3CPUState
580  * @irq: (virtual) interrupt number
581  * @vptaddr: (guest) address of VLPI table
582  * @doorbell: doorbell (physical) interrupt number (1023 for "no doorbell")
583  * @level: level to set @irq to
584  *
585  * Process a virtual LPI being directly injected by the ITS. This function
586  * will update the VLPI table specified by @vptaddr and @vptsize. If the
587  * vCPU corresponding to that VLPI table is currently running on
588  * the CPU associated with this redistributor, directly inject the VLPI
589  * @irq. If the vCPU is not running on this CPU, raise the doorbell
590  * interrupt instead.
591  */
592 void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr,
593                                int doorbell, int level);
594 void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level);
595 /**
596  * gicv3_redist_update_lpi:
597  * @cs: GICv3CPUState
598  *
599  * Scan the LPI pending table and recalculate the highest priority
600  * pending LPI and also the overall highest priority pending interrupt.
601  */
602 void gicv3_redist_update_lpi(GICv3CPUState *cs);
603 /**
604  * gicv3_redist_update_lpi_only:
605  * @cs: GICv3CPUState
606  *
607  * Scan the LPI pending table and recalculate cs->hpplpi only,
608  * without calling gicv3_redist_update() to recalculate the overall
609  * highest priority pending interrupt. This should be called after
610  * an incoming migration has loaded new state.
611  */
612 void gicv3_redist_update_lpi_only(GICv3CPUState *cs);
613 /**
614  * gicv3_redist_inv_lpi:
615  * @cs: GICv3CPUState
616  * @irq: LPI to invalidate cached information for
617  *
618  * Forget or update any cached information associated with this LPI.
619  */
620 void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq);
621 /**
622  * gicv3_redist_inv_vlpi:
623  * @cs: GICv3CPUState
624  * @irq: vLPI to invalidate cached information for
625  * @vptaddr: (guest) address of vLPI table
626  *
627  * Forget or update any cached information associated with this vLPI.
628  */
629 void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr);
630 /**
631  * gicv3_redist_mov_lpi:
632  * @src: source redistributor
633  * @dest: destination redistributor
634  * @irq: LPI to update
635  *
636  * Move the pending state of the specified LPI from @src to @dest,
637  * as required by the ITS MOVI command.
638  */
639 void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq);
640 /**
641  * gicv3_redist_movall_lpis:
642  * @src: source redistributor
643  * @dest: destination redistributor
644  *
645  * Scan the LPI pending table for @src, and for each pending LPI there
646  * mark it as not-pending for @src and pending for @dest, as required
647  * by the ITS MOVALL command.
648  */
649 void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest);
650 /**
651  * gicv3_redist_mov_vlpi:
652  * @src: source redistributor
653  * @src_vptaddr: (guest) address of source VLPI table
654  * @dest: destination redistributor
655  * @dest_vptaddr: (guest) address of destination VLPI table
656  * @irq: VLPI to update
657  * @doorbell: doorbell for destination (1023 for "no doorbell")
658  *
659  * Move the pending state of the specified VLPI from @src to @dest,
660  * as required by the ITS VMOVI command.
661  */
662 void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr,
663                            GICv3CPUState *dest, uint64_t dest_vptaddr,
664                            int irq, int doorbell);
665 /**
666  * gicv3_redist_vinvall:
667  * @cs: GICv3CPUState
668  * @vptaddr: address of VLPI pending table
669  *
670  * On redistributor @cs, invalidate all cached information associated
671  * with the vCPU defined by @vptaddr.
672  */
673 void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr);
674 
675 void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns);
676 void gicv3_init_cpuif(GICv3State *s);
677 
678 /**
679  * gicv3_cpuif_update:
680  * @cs: GICv3CPUState for the CPU to update
681  *
682  * Recalculate whether to assert the IRQ or FIQ lines after a change
683  * to the current highest priority pending interrupt, the CPU's
684  * current running priority or the CPU's current exception level or
685  * security state.
686  */
687 void gicv3_cpuif_update(GICv3CPUState *cs);
688 
689 static inline uint32_t gicv3_iidr(void)
690 {
691     /* Return the Implementer Identification Register value
692      * for the emulated GICv3, as reported in GICD_IIDR and GICR_IIDR.
693      *
694      * We claim to be an ARM r0p0 with a zero ProductID.
695      * This is the same as an r0p0 GIC-500.
696      */
697     return 0x43b;
698 }
699 
700 /* CoreSight PIDR0 values for ARM GICv3 implementations */
701 #define GICV3_PIDR0_DIST 0x92
702 #define GICV3_PIDR0_REDIST 0x93
703 #define GICV3_PIDR0_ITS 0x94
704 
705 static inline uint32_t gicv3_idreg(int regoffset, uint8_t pidr0)
706 {
707     /* Return the value of the CoreSight ID register at the specified
708      * offset from the first ID register (as found in the distributor
709      * and redistributor register banks).
710      * These values indicate an ARM implementation of a GICv3.
711      */
712     static const uint8_t gicd_ids[] = {
713         0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x3B, 0x00, 0x0D, 0xF0, 0x05, 0xB1
714     };
715 
716     regoffset /= 4;
717 
718     if (regoffset == 4) {
719         return pidr0;
720     }
721     return gicd_ids[regoffset];
722 }
723 
724 /**
725  * gicv3_irq_group:
726  *
727  * Return the group which this interrupt is configured as (GICV3_G0,
728  * GICV3_G1 or GICV3_G1NS).
729  */
730 static inline int gicv3_irq_group(GICv3State *s, GICv3CPUState *cs, int irq)
731 {
732     bool grpbit, grpmodbit;
733 
734     if (irq < GIC_INTERNAL) {
735         grpbit = extract32(cs->gicr_igroupr0, irq, 1);
736         grpmodbit = extract32(cs->gicr_igrpmodr0, irq, 1);
737     } else {
738         grpbit = gicv3_gicd_group_test(s, irq);
739         grpmodbit = gicv3_gicd_grpmod_test(s, irq);
740     }
741     if (grpbit) {
742         return GICV3_G1NS;
743     }
744     if (s->gicd_ctlr & GICD_CTLR_DS) {
745         return GICV3_G0;
746     }
747     return grpmodbit ? GICV3_G1 : GICV3_G0;
748 }
749 
750 /**
751  * gicv3_redist_affid:
752  *
753  * Return the 32-bit affinity ID of the CPU connected to this redistributor
754  */
755 static inline uint32_t gicv3_redist_affid(GICv3CPUState *cs)
756 {
757     return cs->gicr_typer >> 32;
758 }
759 
760 /**
761  * gicv3_cache_target_cpustate:
762  *
763  * Update the cached CPU state corresponding to the target for this interrupt
764  * (which is kept in s->gicd_irouter_target[]).
765  */
766 static inline void gicv3_cache_target_cpustate(GICv3State *s, int irq)
767 {
768     GICv3CPUState *cs = NULL;
769     int i;
770     uint32_t tgtaff = extract64(s->gicd_irouter[irq], 0, 24) |
771         extract64(s->gicd_irouter[irq], 32, 8) << 24;
772 
773     for (i = 0; i < s->num_cpu; i++) {
774         if (s->cpu[i].gicr_typer >> 32 == tgtaff) {
775             cs = &s->cpu[i];
776             break;
777         }
778     }
779 
780     s->gicd_irouter_target[irq] = cs;
781 }
782 
783 /**
784  * gicv3_cache_all_target_cpustates:
785  *
786  * Populate the entire cache of CPU state pointers for interrupt targets
787  * (eg after inbound migration or CPU reset)
788  */
789 static inline void gicv3_cache_all_target_cpustates(GICv3State *s)
790 {
791     int irq;
792 
793     for (irq = GIC_INTERNAL; irq < GICV3_MAXIRQ; irq++) {
794         gicv3_cache_target_cpustate(s, irq);
795     }
796 }
797 
798 void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s);
799 
800 #endif /* QEMU_ARM_GICV3_INTERNAL_H */
801