xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision 9f54dc1c)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/intc/arm_gicv3_its_common.h"
18 #include "gicv3_internal.h"
19 #include "qom/object.h"
20 #include "qapi/error.h"
21 
22 typedef struct GICv3ITSClass GICv3ITSClass;
23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
25                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
26 
27 struct GICv3ITSClass {
28     GICv3ITSCommonClass parent_class;
29     void (*parent_reset)(DeviceState *dev);
30 };
31 
32 /*
33  * This is an internal enum used to distinguish between LPI triggered
34  * via command queue and LPI triggered via gits_translater write.
35  */
36 typedef enum ItsCmdType {
37     NONE = 0, /* internal indication for GITS_TRANSLATER write */
38     CLEAR = 1,
39     DISCARD = 2,
40     INTERRUPT = 3,
41 } ItsCmdType;
42 
43 typedef struct {
44     uint32_t iteh;
45     uint64_t itel;
46 } IteEntry;
47 
48 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
49 {
50     uint64_t result = 0;
51 
52     switch (page_sz) {
53     case GITS_PAGE_SIZE_4K:
54     case GITS_PAGE_SIZE_16K:
55         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
56         break;
57 
58     case GITS_PAGE_SIZE_64K:
59         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
60         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
61         break;
62 
63     default:
64         break;
65     }
66     return result;
67 }
68 
69 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
70                     MemTxResult *res)
71 {
72     AddressSpace *as = &s->gicv3->dma_as;
73     uint64_t l2t_addr;
74     uint64_t value;
75     bool valid_l2t;
76     uint32_t l2t_id;
77     uint32_t max_l2_entries;
78 
79     if (s->ct.indirect) {
80         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
81 
82         value = address_space_ldq_le(as,
83                                      s->ct.base_addr +
84                                      (l2t_id * L1TABLE_ENTRY_SIZE),
85                                      MEMTXATTRS_UNSPECIFIED, res);
86 
87         if (*res == MEMTX_OK) {
88             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
89 
90             if (valid_l2t) {
91                 max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
92 
93                 l2t_addr = value & ((1ULL << 51) - 1);
94 
95                 *cte =  address_space_ldq_le(as, l2t_addr +
96                                     ((icid % max_l2_entries) * GITS_CTE_SIZE),
97                                     MEMTXATTRS_UNSPECIFIED, res);
98            }
99        }
100     } else {
101         /* Flat level table */
102         *cte =  address_space_ldq_le(as, s->ct.base_addr +
103                                      (icid * GITS_CTE_SIZE),
104                                       MEMTXATTRS_UNSPECIFIED, res);
105     }
106 
107     return (*cte & TABLE_ENTRY_VALID_MASK) != 0;
108 }
109 
110 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
111                        IteEntry ite)
112 {
113     AddressSpace *as = &s->gicv3->dma_as;
114     uint64_t itt_addr;
115     MemTxResult res = MEMTX_OK;
116 
117     itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
118     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
119 
120     address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
121                          sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
122                          &res);
123 
124     if (res == MEMTX_OK) {
125         address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
126                              sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
127                              MEMTXATTRS_UNSPECIFIED, &res);
128     }
129     if (res != MEMTX_OK) {
130         return false;
131     } else {
132         return true;
133     }
134 }
135 
136 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
137                     uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
138 {
139     AddressSpace *as = &s->gicv3->dma_as;
140     uint64_t itt_addr;
141     bool status = false;
142     IteEntry ite = {};
143 
144     itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
145     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
146 
147     ite.itel = address_space_ldq_le(as, itt_addr +
148                                     (eventid * (sizeof(uint64_t) +
149                                     sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
150                                     res);
151 
152     if (*res == MEMTX_OK) {
153         ite.iteh = address_space_ldl_le(as, itt_addr +
154                                         (eventid * (sizeof(uint64_t) +
155                                         sizeof(uint32_t))) + sizeof(uint32_t),
156                                         MEMTXATTRS_UNSPECIFIED, res);
157 
158         if (*res == MEMTX_OK) {
159             if (ite.itel & TABLE_ENTRY_VALID_MASK) {
160                 if ((ite.itel >> ITE_ENTRY_INTTYPE_SHIFT) &
161                     GITS_TYPE_PHYSICAL) {
162                     *pIntid = (ite.itel & ITE_ENTRY_INTID_MASK) >>
163                                ITE_ENTRY_INTID_SHIFT;
164                     *icid = ite.iteh & ITE_ENTRY_ICID_MASK;
165                     status = true;
166                 }
167             }
168         }
169     }
170     return status;
171 }
172 
173 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
174 {
175     AddressSpace *as = &s->gicv3->dma_as;
176     uint64_t l2t_addr;
177     uint64_t value;
178     bool valid_l2t;
179     uint32_t l2t_id;
180     uint32_t max_l2_entries;
181 
182     if (s->dt.indirect) {
183         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
184 
185         value = address_space_ldq_le(as,
186                                      s->dt.base_addr +
187                                      (l2t_id * L1TABLE_ENTRY_SIZE),
188                                      MEMTXATTRS_UNSPECIFIED, res);
189 
190         if (*res == MEMTX_OK) {
191             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
192 
193             if (valid_l2t) {
194                 max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
195 
196                 l2t_addr = value & ((1ULL << 51) - 1);
197 
198                 value =  address_space_ldq_le(as, l2t_addr +
199                                    ((devid % max_l2_entries) * GITS_DTE_SIZE),
200                                    MEMTXATTRS_UNSPECIFIED, res);
201             }
202         }
203     } else {
204         /* Flat level table */
205         value = address_space_ldq_le(as, s->dt.base_addr +
206                                      (devid * GITS_DTE_SIZE),
207                                      MEMTXATTRS_UNSPECIFIED, res);
208     }
209 
210     return value;
211 }
212 
213 /*
214  * This function handles the processing of following commands based on
215  * the ItsCmdType parameter passed:-
216  * 1. triggering of lpi interrupt translation via ITS INT command
217  * 2. triggering of lpi interrupt translation via gits_translater register
218  * 3. handling of ITS CLEAR command
219  * 4. handling of ITS DISCARD command
220  */
221 static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
222                             ItsCmdType cmd)
223 {
224     AddressSpace *as = &s->gicv3->dma_as;
225     uint32_t devid, eventid;
226     MemTxResult res = MEMTX_OK;
227     bool dte_valid;
228     uint64_t dte = 0;
229     uint32_t max_eventid;
230     uint16_t icid = 0;
231     uint32_t pIntid = 0;
232     bool ite_valid = false;
233     uint64_t cte = 0;
234     bool cte_valid = false;
235     bool result = false;
236     uint64_t rdbase;
237 
238     if (cmd == NONE) {
239         devid = offset;
240     } else {
241         devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
242 
243         offset += NUM_BYTES_IN_DW;
244         value = address_space_ldq_le(as, s->cq.base_addr + offset,
245                                      MEMTXATTRS_UNSPECIFIED, &res);
246     }
247 
248     if (res != MEMTX_OK) {
249         return result;
250     }
251 
252     eventid = (value & EVENTID_MASK);
253 
254     dte = get_dte(s, devid, &res);
255 
256     if (res != MEMTX_OK) {
257         return result;
258     }
259     dte_valid = dte & TABLE_ENTRY_VALID_MASK;
260 
261     if (dte_valid) {
262         max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
263 
264         ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
265 
266         if (res != MEMTX_OK) {
267             return result;
268         }
269 
270         if (ite_valid) {
271             cte_valid = get_cte(s, icid, &cte, &res);
272         }
273 
274         if (res != MEMTX_OK) {
275             return result;
276         }
277     } else {
278         qemu_log_mask(LOG_GUEST_ERROR,
279                       "%s: invalid command attributes: "
280                       "invalid dte: %"PRIx64" for %d (MEM_TX: %d)\n",
281                       __func__, dte, devid, res);
282         return result;
283     }
284 
285 
286     /*
287      * In this implementation, in case of guest errors we ignore the
288      * command and move onto the next command in the queue.
289      */
290     if (devid > s->dt.maxids.max_devids) {
291         qemu_log_mask(LOG_GUEST_ERROR,
292                       "%s: invalid command attributes: devid %d>%d",
293                       __func__, devid, s->dt.maxids.max_devids);
294 
295     } else if (!dte_valid || !ite_valid || !cte_valid) {
296         qemu_log_mask(LOG_GUEST_ERROR,
297                       "%s: invalid command attributes: "
298                       "dte: %s, ite: %s, cte: %s\n",
299                       __func__,
300                       dte_valid ? "valid" : "invalid",
301                       ite_valid ? "valid" : "invalid",
302                       cte_valid ? "valid" : "invalid");
303     } else if (eventid > max_eventid) {
304         qemu_log_mask(LOG_GUEST_ERROR,
305                       "%s: invalid command attributes: eventid %d > %d\n",
306                       __func__, eventid, max_eventid);
307     } else {
308         /*
309          * Current implementation only supports rdbase == procnum
310          * Hence rdbase physical address is ignored
311          */
312         rdbase = (cte & GITS_CTE_RDBASE_PROCNUM_MASK) >> 1U;
313 
314         if (rdbase > s->gicv3->num_cpu) {
315             return result;
316         }
317 
318         if ((cmd == CLEAR) || (cmd == DISCARD)) {
319             gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
320         } else {
321             gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
322         }
323 
324         if (cmd == DISCARD) {
325             IteEntry ite = {};
326             /* remove mapping from interrupt translation table */
327             result = update_ite(s, eventid, dte, ite);
328         }
329     }
330 
331     return result;
332 }
333 
334 static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
335                           bool ignore_pInt)
336 {
337     AddressSpace *as = &s->gicv3->dma_as;
338     uint32_t devid, eventid;
339     uint32_t pIntid = 0;
340     uint32_t max_eventid, max_Intid;
341     bool dte_valid;
342     MemTxResult res = MEMTX_OK;
343     uint16_t icid = 0;
344     uint64_t dte = 0;
345     IteEntry ite;
346     uint32_t int_spurious = INTID_SPURIOUS;
347     bool result = false;
348 
349     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
350     offset += NUM_BYTES_IN_DW;
351     value = address_space_ldq_le(as, s->cq.base_addr + offset,
352                                  MEMTXATTRS_UNSPECIFIED, &res);
353 
354     if (res != MEMTX_OK) {
355         return result;
356     }
357 
358     eventid = (value & EVENTID_MASK);
359 
360     if (!ignore_pInt) {
361         pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
362     }
363 
364     offset += NUM_BYTES_IN_DW;
365     value = address_space_ldq_le(as, s->cq.base_addr + offset,
366                                  MEMTXATTRS_UNSPECIFIED, &res);
367 
368     if (res != MEMTX_OK) {
369         return result;
370     }
371 
372     icid = value & ICID_MASK;
373 
374     dte = get_dte(s, devid, &res);
375 
376     if (res != MEMTX_OK) {
377         return result;
378     }
379     dte_valid = dte & TABLE_ENTRY_VALID_MASK;
380 
381     max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
382 
383     if (!ignore_pInt) {
384         max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
385     }
386 
387     if ((devid > s->dt.maxids.max_devids) || (icid > s->ct.maxids.max_collids)
388             || !dte_valid || (eventid > max_eventid) ||
389             (!ignore_pInt && (((pIntid < GICV3_LPI_INTID_START) ||
390             (pIntid > max_Intid)) && (pIntid != INTID_SPURIOUS)))) {
391         qemu_log_mask(LOG_GUEST_ERROR,
392                       "%s: invalid command attributes "
393                       "devid %d or icid %d or eventid %d or pIntid %d or"
394                       "unmapped dte %d\n", __func__, devid, icid, eventid,
395                       pIntid, dte_valid);
396         /*
397          * in this implementation, in case of error
398          * we ignore this command and move onto the next
399          * command in the queue
400          */
401     } else {
402         /* add ite entry to interrupt translation table */
403         ite.itel = (dte_valid & TABLE_ENTRY_VALID_MASK) |
404                     (GITS_TYPE_PHYSICAL << ITE_ENTRY_INTTYPE_SHIFT);
405 
406         if (ignore_pInt) {
407             ite.itel |= (eventid << ITE_ENTRY_INTID_SHIFT);
408         } else {
409             ite.itel |= (pIntid << ITE_ENTRY_INTID_SHIFT);
410         }
411         ite.itel |= (int_spurious << ITE_ENTRY_INTSP_SHIFT);
412         ite.iteh = icid;
413 
414         result = update_ite(s, eventid, dte, ite);
415     }
416 
417     return result;
418 }
419 
420 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
421                        uint64_t rdbase)
422 {
423     AddressSpace *as = &s->gicv3->dma_as;
424     uint64_t value;
425     uint64_t l2t_addr;
426     bool valid_l2t;
427     uint32_t l2t_id;
428     uint32_t max_l2_entries;
429     uint64_t cte = 0;
430     MemTxResult res = MEMTX_OK;
431 
432     if (!s->ct.valid) {
433         return true;
434     }
435 
436     if (valid) {
437         /* add mapping entry to collection table */
438         cte = (valid & TABLE_ENTRY_VALID_MASK) | (rdbase << 1ULL);
439     }
440 
441     /*
442      * The specification defines the format of level 1 entries of a
443      * 2-level table, but the format of level 2 entries and the format
444      * of flat-mapped tables is IMPDEF.
445      */
446     if (s->ct.indirect) {
447         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
448 
449         value = address_space_ldq_le(as,
450                                      s->ct.base_addr +
451                                      (l2t_id * L1TABLE_ENTRY_SIZE),
452                                      MEMTXATTRS_UNSPECIFIED, &res);
453 
454         if (res != MEMTX_OK) {
455             return false;
456         }
457 
458         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
459 
460         if (valid_l2t) {
461             max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
462 
463             l2t_addr = value & ((1ULL << 51) - 1);
464 
465             address_space_stq_le(as, l2t_addr +
466                                  ((icid % max_l2_entries) * GITS_CTE_SIZE),
467                                  cte, MEMTXATTRS_UNSPECIFIED, &res);
468         }
469     } else {
470         /* Flat level table */
471         address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
472                              cte, MEMTXATTRS_UNSPECIFIED, &res);
473     }
474     if (res != MEMTX_OK) {
475         return false;
476     } else {
477         return true;
478     }
479 }
480 
481 static bool process_mapc(GICv3ITSState *s, uint32_t offset)
482 {
483     AddressSpace *as = &s->gicv3->dma_as;
484     uint16_t icid;
485     uint64_t rdbase;
486     bool valid;
487     MemTxResult res = MEMTX_OK;
488     bool result = false;
489     uint64_t value;
490 
491     offset += NUM_BYTES_IN_DW;
492     offset += NUM_BYTES_IN_DW;
493 
494     value = address_space_ldq_le(as, s->cq.base_addr + offset,
495                                  MEMTXATTRS_UNSPECIFIED, &res);
496 
497     if (res != MEMTX_OK) {
498         return result;
499     }
500 
501     icid = value & ICID_MASK;
502 
503     rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
504     rdbase &= RDBASE_PROCNUM_MASK;
505 
506     valid = (value & CMD_FIELD_VALID_MASK);
507 
508     if ((icid > s->ct.maxids.max_collids) || (rdbase > s->gicv3->num_cpu)) {
509         qemu_log_mask(LOG_GUEST_ERROR,
510                       "ITS MAPC: invalid collection table attributes "
511                       "icid %d rdbase %" PRIu64 "\n",  icid, rdbase);
512         /*
513          * in this implementation, in case of error
514          * we ignore this command and move onto the next
515          * command in the queue
516          */
517     } else {
518         result = update_cte(s, icid, valid, rdbase);
519     }
520 
521     return result;
522 }
523 
524 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
525                        uint8_t size, uint64_t itt_addr)
526 {
527     AddressSpace *as = &s->gicv3->dma_as;
528     uint64_t value;
529     uint64_t l2t_addr;
530     bool valid_l2t;
531     uint32_t l2t_id;
532     uint32_t max_l2_entries;
533     uint64_t dte = 0;
534     MemTxResult res = MEMTX_OK;
535 
536     if (s->dt.valid) {
537         if (valid) {
538             /* add mapping entry to device table */
539             dte = (valid & TABLE_ENTRY_VALID_MASK) |
540                   ((size & SIZE_MASK) << 1U) |
541                   (itt_addr << GITS_DTE_ITTADDR_SHIFT);
542         }
543     } else {
544         return true;
545     }
546 
547     /*
548      * The specification defines the format of level 1 entries of a
549      * 2-level table, but the format of level 2 entries and the format
550      * of flat-mapped tables is IMPDEF.
551      */
552     if (s->dt.indirect) {
553         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
554 
555         value = address_space_ldq_le(as,
556                                      s->dt.base_addr +
557                                      (l2t_id * L1TABLE_ENTRY_SIZE),
558                                      MEMTXATTRS_UNSPECIFIED, &res);
559 
560         if (res != MEMTX_OK) {
561             return false;
562         }
563 
564         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
565 
566         if (valid_l2t) {
567             max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
568 
569             l2t_addr = value & ((1ULL << 51) - 1);
570 
571             address_space_stq_le(as, l2t_addr +
572                                  ((devid % max_l2_entries) * GITS_DTE_SIZE),
573                                  dte, MEMTXATTRS_UNSPECIFIED, &res);
574         }
575     } else {
576         /* Flat level table */
577         address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
578                              dte, MEMTXATTRS_UNSPECIFIED, &res);
579     }
580     if (res != MEMTX_OK) {
581         return false;
582     } else {
583         return true;
584     }
585 }
586 
587 static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
588 {
589     AddressSpace *as = &s->gicv3->dma_as;
590     uint32_t devid;
591     uint8_t size;
592     uint64_t itt_addr;
593     bool valid;
594     MemTxResult res = MEMTX_OK;
595     bool result = false;
596 
597     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
598 
599     offset += NUM_BYTES_IN_DW;
600     value = address_space_ldq_le(as, s->cq.base_addr + offset,
601                                  MEMTXATTRS_UNSPECIFIED, &res);
602 
603     if (res != MEMTX_OK) {
604         return result;
605     }
606 
607     size = (value & SIZE_MASK);
608 
609     offset += NUM_BYTES_IN_DW;
610     value = address_space_ldq_le(as, s->cq.base_addr + offset,
611                                  MEMTXATTRS_UNSPECIFIED, &res);
612 
613     if (res != MEMTX_OK) {
614         return result;
615     }
616 
617     itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
618 
619     valid = (value & CMD_FIELD_VALID_MASK);
620 
621     if ((devid > s->dt.maxids.max_devids) ||
622         (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
623         qemu_log_mask(LOG_GUEST_ERROR,
624                       "ITS MAPD: invalid device table attributes "
625                       "devid %d or size %d\n", devid, size);
626         /*
627          * in this implementation, in case of error
628          * we ignore this command and move onto the next
629          * command in the queue
630          */
631     } else {
632         result = update_dte(s, devid, valid, size, itt_addr);
633     }
634 
635     return result;
636 }
637 
638 /*
639  * Current implementation blocks until all
640  * commands are processed
641  */
642 static void process_cmdq(GICv3ITSState *s)
643 {
644     uint32_t wr_offset = 0;
645     uint32_t rd_offset = 0;
646     uint32_t cq_offset = 0;
647     uint64_t data;
648     AddressSpace *as = &s->gicv3->dma_as;
649     MemTxResult res = MEMTX_OK;
650     bool result = true;
651     uint8_t cmd;
652     int i;
653 
654     if (!(s->ctlr & ITS_CTLR_ENABLED)) {
655         return;
656     }
657 
658     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
659 
660     if (wr_offset > s->cq.max_entries) {
661         qemu_log_mask(LOG_GUEST_ERROR,
662                       "%s: invalid write offset "
663                       "%d\n", __func__, wr_offset);
664         return;
665     }
666 
667     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
668 
669     if (rd_offset > s->cq.max_entries) {
670         qemu_log_mask(LOG_GUEST_ERROR,
671                       "%s: invalid read offset "
672                       "%d\n", __func__, rd_offset);
673         return;
674     }
675 
676     while (wr_offset != rd_offset) {
677         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
678         data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
679                                     MEMTXATTRS_UNSPECIFIED, &res);
680         if (res != MEMTX_OK) {
681             result = false;
682         }
683         cmd = (data & CMD_MASK);
684 
685         switch (cmd) {
686         case GITS_CMD_INT:
687             res = process_its_cmd(s, data, cq_offset, INTERRUPT);
688             break;
689         case GITS_CMD_CLEAR:
690             res = process_its_cmd(s, data, cq_offset, CLEAR);
691             break;
692         case GITS_CMD_SYNC:
693             /*
694              * Current implementation makes a blocking synchronous call
695              * for every command issued earlier, hence the internal state
696              * is already consistent by the time SYNC command is executed.
697              * Hence no further processing is required for SYNC command.
698              */
699             break;
700         case GITS_CMD_MAPD:
701             result = process_mapd(s, data, cq_offset);
702             break;
703         case GITS_CMD_MAPC:
704             result = process_mapc(s, cq_offset);
705             break;
706         case GITS_CMD_MAPTI:
707             result = process_mapti(s, data, cq_offset, false);
708             break;
709         case GITS_CMD_MAPI:
710             result = process_mapti(s, data, cq_offset, true);
711             break;
712         case GITS_CMD_DISCARD:
713             result = process_its_cmd(s, data, cq_offset, DISCARD);
714             break;
715         case GITS_CMD_INV:
716         case GITS_CMD_INVALL:
717             /*
718              * Current implementation doesn't cache any ITS tables,
719              * but the calculated lpi priority information. We only
720              * need to trigger lpi priority re-calculation to be in
721              * sync with LPI config table or pending table changes.
722              */
723             for (i = 0; i < s->gicv3->num_cpu; i++) {
724                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
725             }
726             break;
727         default:
728             break;
729         }
730         if (result) {
731             rd_offset++;
732             rd_offset %= s->cq.max_entries;
733             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
734         } else {
735             /*
736              * in this implementation, in case of dma read/write error
737              * we stall the command processing
738              */
739             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
740             qemu_log_mask(LOG_GUEST_ERROR,
741                           "%s: %x cmd processing failed\n", __func__, cmd);
742             break;
743         }
744     }
745 }
746 
747 /*
748  * This function extracts the ITS Device and Collection table specific
749  * parameters (like base_addr, size etc) from GITS_BASER register.
750  * It is called during ITS enable and also during post_load migration
751  */
752 static void extract_table_params(GICv3ITSState *s)
753 {
754     uint16_t num_pages = 0;
755     uint8_t  page_sz_type;
756     uint8_t type;
757     uint32_t page_sz = 0;
758     uint64_t value;
759 
760     for (int i = 0; i < 8; i++) {
761         value = s->baser[i];
762 
763         if (!value) {
764             continue;
765         }
766 
767         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
768 
769         switch (page_sz_type) {
770         case 0:
771             page_sz = GITS_PAGE_SIZE_4K;
772             break;
773 
774         case 1:
775             page_sz = GITS_PAGE_SIZE_16K;
776             break;
777 
778         case 2:
779         case 3:
780             page_sz = GITS_PAGE_SIZE_64K;
781             break;
782 
783         default:
784             g_assert_not_reached();
785         }
786 
787         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
788 
789         type = FIELD_EX64(value, GITS_BASER, TYPE);
790 
791         switch (type) {
792 
793         case GITS_BASER_TYPE_DEVICE:
794             memset(&s->dt, 0 , sizeof(s->dt));
795             s->dt.valid = FIELD_EX64(value, GITS_BASER, VALID);
796 
797             if (!s->dt.valid) {
798                 return;
799             }
800 
801             s->dt.page_sz = page_sz;
802             s->dt.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
803             s->dt.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
804 
805             if (!s->dt.indirect) {
806                 s->dt.max_entries = (num_pages * page_sz) / s->dt.entry_sz;
807             } else {
808                 s->dt.max_entries = (((num_pages * page_sz) /
809                                      L1TABLE_ENTRY_SIZE) *
810                                      (page_sz / s->dt.entry_sz));
811             }
812 
813             s->dt.maxids.max_devids = (1UL << (FIELD_EX64(s->typer, GITS_TYPER,
814                                        DEVBITS) + 1));
815 
816             s->dt.base_addr = baser_base_addr(value, page_sz);
817 
818             break;
819 
820         case GITS_BASER_TYPE_COLLECTION:
821             memset(&s->ct, 0 , sizeof(s->ct));
822             s->ct.valid = FIELD_EX64(value, GITS_BASER, VALID);
823 
824             /*
825              * GITS_TYPER.HCC is 0 for this implementation
826              * hence writes are discarded if ct.valid is 0
827              */
828             if (!s->ct.valid) {
829                 return;
830             }
831 
832             s->ct.page_sz = page_sz;
833             s->ct.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
834             s->ct.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
835 
836             if (!s->ct.indirect) {
837                 s->ct.max_entries = (num_pages * page_sz) / s->ct.entry_sz;
838             } else {
839                 s->ct.max_entries = (((num_pages * page_sz) /
840                                      L1TABLE_ENTRY_SIZE) *
841                                      (page_sz / s->ct.entry_sz));
842             }
843 
844             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
845                 s->ct.maxids.max_collids = (1UL << (FIELD_EX64(s->typer,
846                                             GITS_TYPER, CIDBITS) + 1));
847             } else {
848                 /* 16-bit CollectionId supported when CIL == 0 */
849                 s->ct.maxids.max_collids = (1UL << 16);
850             }
851 
852             s->ct.base_addr = baser_base_addr(value, page_sz);
853 
854             break;
855 
856         default:
857             break;
858         }
859     }
860 }
861 
862 static void extract_cmdq_params(GICv3ITSState *s)
863 {
864     uint16_t num_pages = 0;
865     uint64_t value = s->cbaser;
866 
867     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
868 
869     memset(&s->cq, 0 , sizeof(s->cq));
870     s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
871 
872     if (s->cq.valid) {
873         s->cq.max_entries = (num_pages * GITS_PAGE_SIZE_4K) /
874                              GITS_CMDQ_ENTRY_SIZE;
875         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
876         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
877     }
878 }
879 
880 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
881                                                uint64_t data, unsigned size,
882                                                MemTxAttrs attrs)
883 {
884     GICv3ITSState *s = (GICv3ITSState *)opaque;
885     bool result = true;
886     uint32_t devid = 0;
887 
888     switch (offset) {
889     case GITS_TRANSLATER:
890         if (s->ctlr & ITS_CTLR_ENABLED) {
891             devid = attrs.requester_id;
892             result = process_its_cmd(s, data, devid, NONE);
893         }
894         break;
895     default:
896         break;
897     }
898 
899     if (result) {
900         return MEMTX_OK;
901     } else {
902         return MEMTX_ERROR;
903     }
904 }
905 
906 static bool its_writel(GICv3ITSState *s, hwaddr offset,
907                               uint64_t value, MemTxAttrs attrs)
908 {
909     bool result = true;
910     int index;
911 
912     switch (offset) {
913     case GITS_CTLR:
914         if (value & R_GITS_CTLR_ENABLED_MASK) {
915             s->ctlr |= ITS_CTLR_ENABLED;
916             extract_table_params(s);
917             extract_cmdq_params(s);
918             s->creadr = 0;
919             process_cmdq(s);
920         } else {
921             s->ctlr &= ~ITS_CTLR_ENABLED;
922         }
923         break;
924     case GITS_CBASER:
925         /*
926          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
927          *                 already enabled
928          */
929         if (!(s->ctlr & ITS_CTLR_ENABLED)) {
930             s->cbaser = deposit64(s->cbaser, 0, 32, value);
931             s->creadr = 0;
932             s->cwriter = s->creadr;
933         }
934         break;
935     case GITS_CBASER + 4:
936         /*
937          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
938          *                 already enabled
939          */
940         if (!(s->ctlr & ITS_CTLR_ENABLED)) {
941             s->cbaser = deposit64(s->cbaser, 32, 32, value);
942             s->creadr = 0;
943             s->cwriter = s->creadr;
944         }
945         break;
946     case GITS_CWRITER:
947         s->cwriter = deposit64(s->cwriter, 0, 32,
948                                (value & ~R_GITS_CWRITER_RETRY_MASK));
949         if (s->cwriter != s->creadr) {
950             process_cmdq(s);
951         }
952         break;
953     case GITS_CWRITER + 4:
954         s->cwriter = deposit64(s->cwriter, 32, 32, value);
955         break;
956     case GITS_CREADR:
957         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
958             s->creadr = deposit64(s->creadr, 0, 32,
959                                   (value & ~R_GITS_CREADR_STALLED_MASK));
960         } else {
961             /* RO register, ignore the write */
962             qemu_log_mask(LOG_GUEST_ERROR,
963                           "%s: invalid guest write to RO register at offset "
964                           TARGET_FMT_plx "\n", __func__, offset);
965         }
966         break;
967     case GITS_CREADR + 4:
968         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
969             s->creadr = deposit64(s->creadr, 32, 32, value);
970         } else {
971             /* RO register, ignore the write */
972             qemu_log_mask(LOG_GUEST_ERROR,
973                           "%s: invalid guest write to RO register at offset "
974                           TARGET_FMT_plx "\n", __func__, offset);
975         }
976         break;
977     case GITS_BASER ... GITS_BASER + 0x3f:
978         /*
979          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
980          *                 already enabled
981          */
982         if (!(s->ctlr & ITS_CTLR_ENABLED)) {
983             index = (offset - GITS_BASER) / 8;
984 
985             if (offset & 7) {
986                 value <<= 32;
987                 value &= ~GITS_BASER_RO_MASK;
988                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
989                 s->baser[index] |= value;
990             } else {
991                 value &= ~GITS_BASER_RO_MASK;
992                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
993                 s->baser[index] |= value;
994             }
995         }
996         break;
997     case GITS_IIDR:
998     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
999         /* RO registers, ignore the write */
1000         qemu_log_mask(LOG_GUEST_ERROR,
1001                       "%s: invalid guest write to RO register at offset "
1002                       TARGET_FMT_plx "\n", __func__, offset);
1003         break;
1004     default:
1005         result = false;
1006         break;
1007     }
1008     return result;
1009 }
1010 
1011 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1012                              uint64_t *data, MemTxAttrs attrs)
1013 {
1014     bool result = true;
1015     int index;
1016 
1017     switch (offset) {
1018     case GITS_CTLR:
1019         *data = s->ctlr;
1020         break;
1021     case GITS_IIDR:
1022         *data = gicv3_iidr();
1023         break;
1024     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1025         /* ID registers */
1026         *data = gicv3_idreg(offset - GITS_IDREGS);
1027         break;
1028     case GITS_TYPER:
1029         *data = extract64(s->typer, 0, 32);
1030         break;
1031     case GITS_TYPER + 4:
1032         *data = extract64(s->typer, 32, 32);
1033         break;
1034     case GITS_CBASER:
1035         *data = extract64(s->cbaser, 0, 32);
1036         break;
1037     case GITS_CBASER + 4:
1038         *data = extract64(s->cbaser, 32, 32);
1039         break;
1040     case GITS_CREADR:
1041         *data = extract64(s->creadr, 0, 32);
1042         break;
1043     case GITS_CREADR + 4:
1044         *data = extract64(s->creadr, 32, 32);
1045         break;
1046     case GITS_CWRITER:
1047         *data = extract64(s->cwriter, 0, 32);
1048         break;
1049     case GITS_CWRITER + 4:
1050         *data = extract64(s->cwriter, 32, 32);
1051         break;
1052     case GITS_BASER ... GITS_BASER + 0x3f:
1053         index = (offset - GITS_BASER) / 8;
1054         if (offset & 7) {
1055             *data = extract64(s->baser[index], 32, 32);
1056         } else {
1057             *data = extract64(s->baser[index], 0, 32);
1058         }
1059         break;
1060     default:
1061         result = false;
1062         break;
1063     }
1064     return result;
1065 }
1066 
1067 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1068                                uint64_t value, MemTxAttrs attrs)
1069 {
1070     bool result = true;
1071     int index;
1072 
1073     switch (offset) {
1074     case GITS_BASER ... GITS_BASER + 0x3f:
1075         /*
1076          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1077          *                 already enabled
1078          */
1079         if (!(s->ctlr & ITS_CTLR_ENABLED)) {
1080             index = (offset - GITS_BASER) / 8;
1081             s->baser[index] &= GITS_BASER_RO_MASK;
1082             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1083         }
1084         break;
1085     case GITS_CBASER:
1086         /*
1087          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1088          *                 already enabled
1089          */
1090         if (!(s->ctlr & ITS_CTLR_ENABLED)) {
1091             s->cbaser = value;
1092             s->creadr = 0;
1093             s->cwriter = s->creadr;
1094         }
1095         break;
1096     case GITS_CWRITER:
1097         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1098         if (s->cwriter != s->creadr) {
1099             process_cmdq(s);
1100         }
1101         break;
1102     case GITS_CREADR:
1103         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1104             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1105         } else {
1106             /* RO register, ignore the write */
1107             qemu_log_mask(LOG_GUEST_ERROR,
1108                           "%s: invalid guest write to RO register at offset "
1109                           TARGET_FMT_plx "\n", __func__, offset);
1110         }
1111         break;
1112     case GITS_TYPER:
1113         /* RO registers, ignore the write */
1114         qemu_log_mask(LOG_GUEST_ERROR,
1115                       "%s: invalid guest write to RO register at offset "
1116                       TARGET_FMT_plx "\n", __func__, offset);
1117         break;
1118     default:
1119         result = false;
1120         break;
1121     }
1122     return result;
1123 }
1124 
1125 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1126                               uint64_t *data, MemTxAttrs attrs)
1127 {
1128     bool result = true;
1129     int index;
1130 
1131     switch (offset) {
1132     case GITS_TYPER:
1133         *data = s->typer;
1134         break;
1135     case GITS_BASER ... GITS_BASER + 0x3f:
1136         index = (offset - GITS_BASER) / 8;
1137         *data = s->baser[index];
1138         break;
1139     case GITS_CBASER:
1140         *data = s->cbaser;
1141         break;
1142     case GITS_CREADR:
1143         *data = s->creadr;
1144         break;
1145     case GITS_CWRITER:
1146         *data = s->cwriter;
1147         break;
1148     default:
1149         result = false;
1150         break;
1151     }
1152     return result;
1153 }
1154 
1155 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1156                                   unsigned size, MemTxAttrs attrs)
1157 {
1158     GICv3ITSState *s = (GICv3ITSState *)opaque;
1159     bool result;
1160 
1161     switch (size) {
1162     case 4:
1163         result = its_readl(s, offset, data, attrs);
1164         break;
1165     case 8:
1166         result = its_readll(s, offset, data, attrs);
1167         break;
1168     default:
1169         result = false;
1170         break;
1171     }
1172 
1173     if (!result) {
1174         qemu_log_mask(LOG_GUEST_ERROR,
1175                       "%s: invalid guest read at offset " TARGET_FMT_plx
1176                       "size %u\n", __func__, offset, size);
1177         /*
1178          * The spec requires that reserved registers are RAZ/WI;
1179          * so use false returns from leaf functions as a way to
1180          * trigger the guest-error logging but don't return it to
1181          * the caller, or we'll cause a spurious guest data abort.
1182          */
1183         *data = 0;
1184     }
1185     return MEMTX_OK;
1186 }
1187 
1188 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1189                                    unsigned size, MemTxAttrs attrs)
1190 {
1191     GICv3ITSState *s = (GICv3ITSState *)opaque;
1192     bool result;
1193 
1194     switch (size) {
1195     case 4:
1196         result = its_writel(s, offset, data, attrs);
1197         break;
1198     case 8:
1199         result = its_writell(s, offset, data, attrs);
1200         break;
1201     default:
1202         result = false;
1203         break;
1204     }
1205 
1206     if (!result) {
1207         qemu_log_mask(LOG_GUEST_ERROR,
1208                       "%s: invalid guest write at offset " TARGET_FMT_plx
1209                       "size %u\n", __func__, offset, size);
1210         /*
1211          * The spec requires that reserved registers are RAZ/WI;
1212          * so use false returns from leaf functions as a way to
1213          * trigger the guest-error logging but don't return it to
1214          * the caller, or we'll cause a spurious guest data abort.
1215          */
1216     }
1217     return MEMTX_OK;
1218 }
1219 
1220 static const MemoryRegionOps gicv3_its_control_ops = {
1221     .read_with_attrs = gicv3_its_read,
1222     .write_with_attrs = gicv3_its_write,
1223     .valid.min_access_size = 4,
1224     .valid.max_access_size = 8,
1225     .impl.min_access_size = 4,
1226     .impl.max_access_size = 8,
1227     .endianness = DEVICE_NATIVE_ENDIAN,
1228 };
1229 
1230 static const MemoryRegionOps gicv3_its_translation_ops = {
1231     .write_with_attrs = gicv3_its_translation_write,
1232     .valid.min_access_size = 2,
1233     .valid.max_access_size = 4,
1234     .impl.min_access_size = 2,
1235     .impl.max_access_size = 4,
1236     .endianness = DEVICE_NATIVE_ENDIAN,
1237 };
1238 
1239 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1240 {
1241     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1242     int i;
1243 
1244     for (i = 0; i < s->gicv3->num_cpu; i++) {
1245         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1246             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1247             return;
1248         }
1249     }
1250 
1251     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1252 
1253     address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
1254                        "gicv3-its-sysmem");
1255 
1256     /* set the ITS default features supported */
1257     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL,
1258                           GITS_TYPE_PHYSICAL);
1259     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1260                           ITS_ITT_ENTRY_SIZE - 1);
1261     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1262     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1263     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1264     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1265 }
1266 
1267 static void gicv3_its_reset(DeviceState *dev)
1268 {
1269     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1270     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1271 
1272     c->parent_reset(dev);
1273 
1274     /* Quiescent bit reset to 1 */
1275     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1276 
1277     /*
1278      * setting GITS_BASER0.Type = 0b001 (Device)
1279      *         GITS_BASER1.Type = 0b100 (Collection Table)
1280      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1281      *         GITS_BASER<0,1>.Page_Size = 64KB
1282      * and default translation table entry size to 16 bytes
1283      */
1284     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1285                              GITS_BASER_TYPE_DEVICE);
1286     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1287                              GITS_BASER_PAGESIZE_64K);
1288     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1289                              GITS_DTE_SIZE - 1);
1290 
1291     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1292                              GITS_BASER_TYPE_COLLECTION);
1293     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1294                              GITS_BASER_PAGESIZE_64K);
1295     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1296                              GITS_CTE_SIZE - 1);
1297 }
1298 
1299 static void gicv3_its_post_load(GICv3ITSState *s)
1300 {
1301     if (s->ctlr & ITS_CTLR_ENABLED) {
1302         extract_table_params(s);
1303         extract_cmdq_params(s);
1304     }
1305 }
1306 
1307 static Property gicv3_its_props[] = {
1308     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1309                      GICv3State *),
1310     DEFINE_PROP_END_OF_LIST(),
1311 };
1312 
1313 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1314 {
1315     DeviceClass *dc = DEVICE_CLASS(klass);
1316     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1317     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1318 
1319     dc->realize = gicv3_arm_its_realize;
1320     device_class_set_props(dc, gicv3_its_props);
1321     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1322     icc->post_load = gicv3_its_post_load;
1323 }
1324 
1325 static const TypeInfo gicv3_its_info = {
1326     .name = TYPE_ARM_GICV3_ITS,
1327     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1328     .instance_size = sizeof(GICv3ITSState),
1329     .class_init = gicv3_its_class_init,
1330     .class_size = sizeof(GICv3ITSClass),
1331 };
1332 
1333 static void gicv3_its_register_types(void)
1334 {
1335     type_register_static(&gicv3_its_info);
1336 }
1337 
1338 type_init(gicv3_its_register_types)
1339