xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision 0241f7316073f5e66b560195c36e719e369947d0)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/intc/arm_gicv3_its_common.h"
18 #include "gicv3_internal.h"
19 #include "qom/object.h"
20 #include "qapi/error.h"
21 
22 typedef struct GICv3ITSClass GICv3ITSClass;
23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
25                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
26 
27 struct GICv3ITSClass {
28     GICv3ITSCommonClass parent_class;
29     void (*parent_reset)(DeviceState *dev);
30 };
31 
32 /*
33  * This is an internal enum used to distinguish between LPI triggered
34  * via command queue and LPI triggered via gits_translater write.
35  */
36 typedef enum ItsCmdType {
37     NONE = 0, /* internal indication for GITS_TRANSLATER write */
38     CLEAR = 1,
39     DISCARD = 2,
40     INTERRUPT = 3,
41 } ItsCmdType;
42 
43 typedef struct {
44     uint32_t iteh;
45     uint64_t itel;
46 } IteEntry;
47 
48 /*
49  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
50  * if a command parameter is not correct. These include both "stall
51  * processing of the command queue" and "ignore this command, and
52  * keep processing the queue". In our implementation we choose that
53  * memory transaction errors reading the command packet provoke a
54  * stall, but errors in parameters cause us to ignore the command
55  * and continue processing.
56  * The process_* functions which handle individual ITS commands all
57  * return an ItsCmdResult which tells process_cmdq() whether it should
58  * stall or keep going.
59  */
60 typedef enum ItsCmdResult {
61     CMD_STALL = 0,
62     CMD_CONTINUE = 1,
63 } ItsCmdResult;
64 
65 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
66 {
67     uint64_t result = 0;
68 
69     switch (page_sz) {
70     case GITS_PAGE_SIZE_4K:
71     case GITS_PAGE_SIZE_16K:
72         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
73         break;
74 
75     case GITS_PAGE_SIZE_64K:
76         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
77         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
78         break;
79 
80     default:
81         break;
82     }
83     return result;
84 }
85 
86 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
87                     MemTxResult *res)
88 {
89     AddressSpace *as = &s->gicv3->dma_as;
90     uint64_t l2t_addr;
91     uint64_t value;
92     bool valid_l2t;
93     uint32_t l2t_id;
94     uint32_t num_l2_entries;
95 
96     if (s->ct.indirect) {
97         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
98 
99         value = address_space_ldq_le(as,
100                                      s->ct.base_addr +
101                                      (l2t_id * L1TABLE_ENTRY_SIZE),
102                                      MEMTXATTRS_UNSPECIFIED, res);
103 
104         if (*res == MEMTX_OK) {
105             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
106 
107             if (valid_l2t) {
108                 num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
109 
110                 l2t_addr = value & ((1ULL << 51) - 1);
111 
112                 *cte =  address_space_ldq_le(as, l2t_addr +
113                                     ((icid % num_l2_entries) * GITS_CTE_SIZE),
114                                     MEMTXATTRS_UNSPECIFIED, res);
115            }
116        }
117     } else {
118         /* Flat level table */
119         *cte =  address_space_ldq_le(as, s->ct.base_addr +
120                                      (icid * GITS_CTE_SIZE),
121                                       MEMTXATTRS_UNSPECIFIED, res);
122     }
123 
124     return FIELD_EX64(*cte, CTE, VALID);
125 }
126 
127 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
128                        IteEntry ite)
129 {
130     AddressSpace *as = &s->gicv3->dma_as;
131     uint64_t itt_addr;
132     MemTxResult res = MEMTX_OK;
133 
134     itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
135     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
136 
137     address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
138                          sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
139                          &res);
140 
141     if (res == MEMTX_OK) {
142         address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
143                              sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
144                              MEMTXATTRS_UNSPECIFIED, &res);
145     }
146     if (res != MEMTX_OK) {
147         return false;
148     } else {
149         return true;
150     }
151 }
152 
153 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
154                     uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
155 {
156     AddressSpace *as = &s->gicv3->dma_as;
157     uint64_t itt_addr;
158     bool status = false;
159     IteEntry ite = {};
160 
161     itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
162     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
163 
164     ite.itel = address_space_ldq_le(as, itt_addr +
165                                     (eventid * (sizeof(uint64_t) +
166                                     sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
167                                     res);
168 
169     if (*res == MEMTX_OK) {
170         ite.iteh = address_space_ldl_le(as, itt_addr +
171                                         (eventid * (sizeof(uint64_t) +
172                                         sizeof(uint32_t))) + sizeof(uint32_t),
173                                         MEMTXATTRS_UNSPECIFIED, res);
174 
175         if (*res == MEMTX_OK) {
176             if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
177                 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
178                 if (inttype == ITE_INTTYPE_PHYSICAL) {
179                     *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
180                     *icid = FIELD_EX32(ite.iteh, ITE_H, ICID);
181                     status = true;
182                 }
183             }
184         }
185     }
186     return status;
187 }
188 
189 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
190 {
191     AddressSpace *as = &s->gicv3->dma_as;
192     uint64_t l2t_addr;
193     uint64_t value;
194     bool valid_l2t;
195     uint32_t l2t_id;
196     uint32_t num_l2_entries;
197 
198     if (s->dt.indirect) {
199         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
200 
201         value = address_space_ldq_le(as,
202                                      s->dt.base_addr +
203                                      (l2t_id * L1TABLE_ENTRY_SIZE),
204                                      MEMTXATTRS_UNSPECIFIED, res);
205 
206         if (*res == MEMTX_OK) {
207             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
208 
209             if (valid_l2t) {
210                 num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
211 
212                 l2t_addr = value & ((1ULL << 51) - 1);
213 
214                 value =  address_space_ldq_le(as, l2t_addr +
215                                    ((devid % num_l2_entries) * GITS_DTE_SIZE),
216                                    MEMTXATTRS_UNSPECIFIED, res);
217             }
218         }
219     } else {
220         /* Flat level table */
221         value = address_space_ldq_le(as, s->dt.base_addr +
222                                      (devid * GITS_DTE_SIZE),
223                                      MEMTXATTRS_UNSPECIFIED, res);
224     }
225 
226     return value;
227 }
228 
229 /*
230  * This function handles the processing of following commands based on
231  * the ItsCmdType parameter passed:-
232  * 1. triggering of lpi interrupt translation via ITS INT command
233  * 2. triggering of lpi interrupt translation via gits_translater register
234  * 3. handling of ITS CLEAR command
235  * 4. handling of ITS DISCARD command
236  */
237 static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value,
238                                     uint32_t offset, ItsCmdType cmd)
239 {
240     AddressSpace *as = &s->gicv3->dma_as;
241     uint32_t devid, eventid;
242     MemTxResult res = MEMTX_OK;
243     bool dte_valid;
244     uint64_t dte = 0;
245     uint64_t num_eventids;
246     uint16_t icid = 0;
247     uint32_t pIntid = 0;
248     bool ite_valid = false;
249     uint64_t cte = 0;
250     bool cte_valid = false;
251     uint64_t rdbase;
252 
253     if (cmd == NONE) {
254         devid = offset;
255     } else {
256         devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
257 
258         offset += NUM_BYTES_IN_DW;
259         value = address_space_ldq_le(as, s->cq.base_addr + offset,
260                                      MEMTXATTRS_UNSPECIFIED, &res);
261     }
262 
263     if (res != MEMTX_OK) {
264         return CMD_STALL;
265     }
266 
267     eventid = (value & EVENTID_MASK);
268 
269     dte = get_dte(s, devid, &res);
270 
271     if (res != MEMTX_OK) {
272         return CMD_STALL;
273     }
274     dte_valid = FIELD_EX64(dte, DTE, VALID);
275 
276     if (!dte_valid) {
277         qemu_log_mask(LOG_GUEST_ERROR,
278                       "%s: invalid command attributes: "
279                       "invalid dte: %"PRIx64" for %d\n",
280                       __func__, dte, devid);
281         return CMD_CONTINUE;
282     }
283 
284     num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
285 
286     ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
287     if (res != MEMTX_OK) {
288         return CMD_STALL;
289     }
290 
291     if (!ite_valid) {
292         qemu_log_mask(LOG_GUEST_ERROR,
293                       "%s: invalid command attributes: invalid ITE\n",
294                       __func__);
295         return CMD_CONTINUE;
296     }
297 
298     cte_valid = get_cte(s, icid, &cte, &res);
299     if (res != MEMTX_OK) {
300         return CMD_STALL;
301     }
302     if (!cte_valid) {
303         qemu_log_mask(LOG_GUEST_ERROR,
304                       "%s: invalid command attributes: "
305                       "invalid cte: %"PRIx64"\n",
306                       __func__, cte);
307         return CMD_CONTINUE;
308     }
309 
310     if (devid >= s->dt.num_ids) {
311         qemu_log_mask(LOG_GUEST_ERROR,
312                       "%s: invalid command attributes: devid %d>=%d",
313                       __func__, devid, s->dt.num_ids);
314         return CMD_CONTINUE;
315     }
316     if (eventid >= num_eventids) {
317         qemu_log_mask(LOG_GUEST_ERROR,
318                       "%s: invalid command attributes: eventid %d >= %"
319                       PRId64 "\n",
320                       __func__, eventid, num_eventids);
321         return CMD_CONTINUE;
322     }
323 
324     /*
325      * Current implementation only supports rdbase == procnum
326      * Hence rdbase physical address is ignored
327      */
328     rdbase = FIELD_EX64(cte, CTE, RDBASE);
329 
330     if (rdbase >= s->gicv3->num_cpu) {
331         return CMD_CONTINUE;
332     }
333 
334     if ((cmd == CLEAR) || (cmd == DISCARD)) {
335         gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
336     } else {
337         gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
338     }
339 
340     if (cmd == DISCARD) {
341         IteEntry ite = {};
342         /* remove mapping from interrupt translation table */
343         return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
344     }
345     return CMD_CONTINUE;
346 }
347 
348 static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value,
349                                   uint32_t offset, bool ignore_pInt)
350 {
351     AddressSpace *as = &s->gicv3->dma_as;
352     uint32_t devid, eventid;
353     uint32_t pIntid = 0;
354     uint64_t num_eventids;
355     uint32_t num_intids;
356     bool dte_valid;
357     MemTxResult res = MEMTX_OK;
358     uint16_t icid = 0;
359     uint64_t dte = 0;
360     IteEntry ite = {};
361 
362     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
363     offset += NUM_BYTES_IN_DW;
364     value = address_space_ldq_le(as, s->cq.base_addr + offset,
365                                  MEMTXATTRS_UNSPECIFIED, &res);
366 
367     if (res != MEMTX_OK) {
368         return CMD_STALL;
369     }
370 
371     eventid = (value & EVENTID_MASK);
372 
373     if (ignore_pInt) {
374         pIntid = eventid;
375     } else {
376         pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
377     }
378 
379     offset += NUM_BYTES_IN_DW;
380     value = address_space_ldq_le(as, s->cq.base_addr + offset,
381                                  MEMTXATTRS_UNSPECIFIED, &res);
382 
383     if (res != MEMTX_OK) {
384         return CMD_STALL;
385     }
386 
387     icid = value & ICID_MASK;
388 
389     dte = get_dte(s, devid, &res);
390 
391     if (res != MEMTX_OK) {
392         return CMD_STALL;
393     }
394     dte_valid = FIELD_EX64(dte, DTE, VALID);
395     num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
396     num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
397 
398     if ((devid >= s->dt.num_ids) || (icid >= s->ct.num_ids)
399             || !dte_valid || (eventid >= num_eventids) ||
400             (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
401              (pIntid != INTID_SPURIOUS))) {
402         qemu_log_mask(LOG_GUEST_ERROR,
403                       "%s: invalid command attributes "
404                       "devid %d or icid %d or eventid %d or pIntid %d or"
405                       "unmapped dte %d\n", __func__, devid, icid, eventid,
406                       pIntid, dte_valid);
407         /*
408          * in this implementation, in case of error
409          * we ignore this command and move onto the next
410          * command in the queue
411          */
412         return CMD_CONTINUE;
413     }
414 
415     /* add ite entry to interrupt translation table */
416     ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
417     ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
418     ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
419     ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
420     ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
421 
422     return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
423 }
424 
425 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
426                        uint64_t rdbase)
427 {
428     AddressSpace *as = &s->gicv3->dma_as;
429     uint64_t value;
430     uint64_t l2t_addr;
431     bool valid_l2t;
432     uint32_t l2t_id;
433     uint32_t num_l2_entries;
434     uint64_t cte = 0;
435     MemTxResult res = MEMTX_OK;
436 
437     if (!s->ct.valid) {
438         return true;
439     }
440 
441     if (valid) {
442         /* add mapping entry to collection table */
443         cte = FIELD_DP64(cte, CTE, VALID, 1);
444         cte = FIELD_DP64(cte, CTE, RDBASE, rdbase);
445     }
446 
447     /*
448      * The specification defines the format of level 1 entries of a
449      * 2-level table, but the format of level 2 entries and the format
450      * of flat-mapped tables is IMPDEF.
451      */
452     if (s->ct.indirect) {
453         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
454 
455         value = address_space_ldq_le(as,
456                                      s->ct.base_addr +
457                                      (l2t_id * L1TABLE_ENTRY_SIZE),
458                                      MEMTXATTRS_UNSPECIFIED, &res);
459 
460         if (res != MEMTX_OK) {
461             return false;
462         }
463 
464         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
465 
466         if (valid_l2t) {
467             num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
468 
469             l2t_addr = value & ((1ULL << 51) - 1);
470 
471             address_space_stq_le(as, l2t_addr +
472                                  ((icid % num_l2_entries) * GITS_CTE_SIZE),
473                                  cte, MEMTXATTRS_UNSPECIFIED, &res);
474         }
475     } else {
476         /* Flat level table */
477         address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
478                              cte, MEMTXATTRS_UNSPECIFIED, &res);
479     }
480     if (res != MEMTX_OK) {
481         return false;
482     } else {
483         return true;
484     }
485 }
486 
487 static ItsCmdResult process_mapc(GICv3ITSState *s, uint32_t offset)
488 {
489     AddressSpace *as = &s->gicv3->dma_as;
490     uint16_t icid;
491     uint64_t rdbase;
492     bool valid;
493     MemTxResult res = MEMTX_OK;
494     ItsCmdResult result = CMD_STALL;
495     uint64_t value;
496 
497     offset += NUM_BYTES_IN_DW;
498     offset += NUM_BYTES_IN_DW;
499 
500     value = address_space_ldq_le(as, s->cq.base_addr + offset,
501                                  MEMTXATTRS_UNSPECIFIED, &res);
502 
503     if (res != MEMTX_OK) {
504         return result;
505     }
506 
507     icid = value & ICID_MASK;
508 
509     rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
510     rdbase &= RDBASE_PROCNUM_MASK;
511 
512     valid = (value & CMD_FIELD_VALID_MASK);
513 
514     if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) {
515         qemu_log_mask(LOG_GUEST_ERROR,
516                       "ITS MAPC: invalid collection table attributes "
517                       "icid %d rdbase %" PRIu64 "\n",  icid, rdbase);
518         /*
519          * in this implementation, in case of error
520          * we ignore this command and move onto the next
521          * command in the queue
522          */
523     } else {
524         result = update_cte(s, icid, valid, rdbase) ? CMD_CONTINUE : CMD_STALL;
525     }
526 
527     return result;
528 }
529 
530 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
531                        uint8_t size, uint64_t itt_addr)
532 {
533     AddressSpace *as = &s->gicv3->dma_as;
534     uint64_t value;
535     uint64_t l2t_addr;
536     bool valid_l2t;
537     uint32_t l2t_id;
538     uint32_t num_l2_entries;
539     uint64_t dte = 0;
540     MemTxResult res = MEMTX_OK;
541 
542     if (s->dt.valid) {
543         if (valid) {
544             /* add mapping entry to device table */
545             dte = FIELD_DP64(dte, DTE, VALID, 1);
546             dte = FIELD_DP64(dte, DTE, SIZE, size);
547             dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr);
548         }
549     } else {
550         return true;
551     }
552 
553     /*
554      * The specification defines the format of level 1 entries of a
555      * 2-level table, but the format of level 2 entries and the format
556      * of flat-mapped tables is IMPDEF.
557      */
558     if (s->dt.indirect) {
559         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
560 
561         value = address_space_ldq_le(as,
562                                      s->dt.base_addr +
563                                      (l2t_id * L1TABLE_ENTRY_SIZE),
564                                      MEMTXATTRS_UNSPECIFIED, &res);
565 
566         if (res != MEMTX_OK) {
567             return false;
568         }
569 
570         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
571 
572         if (valid_l2t) {
573             num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
574 
575             l2t_addr = value & ((1ULL << 51) - 1);
576 
577             address_space_stq_le(as, l2t_addr +
578                                  ((devid % num_l2_entries) * GITS_DTE_SIZE),
579                                  dte, MEMTXATTRS_UNSPECIFIED, &res);
580         }
581     } else {
582         /* Flat level table */
583         address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
584                              dte, MEMTXATTRS_UNSPECIFIED, &res);
585     }
586     if (res != MEMTX_OK) {
587         return false;
588     } else {
589         return true;
590     }
591 }
592 
593 static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value,
594                                  uint32_t offset)
595 {
596     AddressSpace *as = &s->gicv3->dma_as;
597     uint32_t devid;
598     uint8_t size;
599     uint64_t itt_addr;
600     bool valid;
601     MemTxResult res = MEMTX_OK;
602     ItsCmdResult result = CMD_STALL;
603 
604     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
605 
606     offset += NUM_BYTES_IN_DW;
607     value = address_space_ldq_le(as, s->cq.base_addr + offset,
608                                  MEMTXATTRS_UNSPECIFIED, &res);
609 
610     if (res != MEMTX_OK) {
611         return result;
612     }
613 
614     size = (value & SIZE_MASK);
615 
616     offset += NUM_BYTES_IN_DW;
617     value = address_space_ldq_le(as, s->cq.base_addr + offset,
618                                  MEMTXATTRS_UNSPECIFIED, &res);
619 
620     if (res != MEMTX_OK) {
621         return result;
622     }
623 
624     itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
625 
626     valid = (value & CMD_FIELD_VALID_MASK);
627 
628     if ((devid >= s->dt.num_ids) ||
629         (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
630         qemu_log_mask(LOG_GUEST_ERROR,
631                       "ITS MAPD: invalid device table attributes "
632                       "devid %d or size %d\n", devid, size);
633         /*
634          * in this implementation, in case of error
635          * we ignore this command and move onto the next
636          * command in the queue
637          */
638     } else {
639         result = update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL;
640     }
641 
642     return result;
643 }
644 
645 /*
646  * Current implementation blocks until all
647  * commands are processed
648  */
649 static void process_cmdq(GICv3ITSState *s)
650 {
651     uint32_t wr_offset = 0;
652     uint32_t rd_offset = 0;
653     uint32_t cq_offset = 0;
654     uint64_t data;
655     AddressSpace *as = &s->gicv3->dma_as;
656     MemTxResult res = MEMTX_OK;
657     uint8_t cmd;
658     int i;
659 
660     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
661         return;
662     }
663 
664     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
665 
666     if (wr_offset >= s->cq.num_entries) {
667         qemu_log_mask(LOG_GUEST_ERROR,
668                       "%s: invalid write offset "
669                       "%d\n", __func__, wr_offset);
670         return;
671     }
672 
673     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
674 
675     if (rd_offset >= s->cq.num_entries) {
676         qemu_log_mask(LOG_GUEST_ERROR,
677                       "%s: invalid read offset "
678                       "%d\n", __func__, rd_offset);
679         return;
680     }
681 
682     while (wr_offset != rd_offset) {
683         ItsCmdResult result = CMD_CONTINUE;
684 
685         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
686         data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
687                                     MEMTXATTRS_UNSPECIFIED, &res);
688         if (res != MEMTX_OK) {
689             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
690             qemu_log_mask(LOG_GUEST_ERROR,
691                           "%s: could not read command at 0x%" PRIx64 "\n",
692                           __func__, s->cq.base_addr + cq_offset);
693             break;
694         }
695 
696         cmd = (data & CMD_MASK);
697 
698         switch (cmd) {
699         case GITS_CMD_INT:
700             result = process_its_cmd(s, data, cq_offset, INTERRUPT);
701             break;
702         case GITS_CMD_CLEAR:
703             result = process_its_cmd(s, data, cq_offset, CLEAR);
704             break;
705         case GITS_CMD_SYNC:
706             /*
707              * Current implementation makes a blocking synchronous call
708              * for every command issued earlier, hence the internal state
709              * is already consistent by the time SYNC command is executed.
710              * Hence no further processing is required for SYNC command.
711              */
712             break;
713         case GITS_CMD_MAPD:
714             result = process_mapd(s, data, cq_offset);
715             break;
716         case GITS_CMD_MAPC:
717             result = process_mapc(s, cq_offset);
718             break;
719         case GITS_CMD_MAPTI:
720             result = process_mapti(s, data, cq_offset, false);
721             break;
722         case GITS_CMD_MAPI:
723             result = process_mapti(s, data, cq_offset, true);
724             break;
725         case GITS_CMD_DISCARD:
726             result = process_its_cmd(s, data, cq_offset, DISCARD);
727             break;
728         case GITS_CMD_INV:
729         case GITS_CMD_INVALL:
730             /*
731              * Current implementation doesn't cache any ITS tables,
732              * but the calculated lpi priority information. We only
733              * need to trigger lpi priority re-calculation to be in
734              * sync with LPI config table or pending table changes.
735              */
736             for (i = 0; i < s->gicv3->num_cpu; i++) {
737                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
738             }
739             break;
740         default:
741             break;
742         }
743         if (result == CMD_CONTINUE) {
744             rd_offset++;
745             rd_offset %= s->cq.num_entries;
746             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
747         } else {
748             /* CMD_STALL */
749             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
750             qemu_log_mask(LOG_GUEST_ERROR,
751                           "%s: 0x%x cmd processing failed, stalling\n",
752                           __func__, cmd);
753             break;
754         }
755     }
756 }
757 
758 /*
759  * This function extracts the ITS Device and Collection table specific
760  * parameters (like base_addr, size etc) from GITS_BASER register.
761  * It is called during ITS enable and also during post_load migration
762  */
763 static void extract_table_params(GICv3ITSState *s)
764 {
765     uint16_t num_pages = 0;
766     uint8_t  page_sz_type;
767     uint8_t type;
768     uint32_t page_sz = 0;
769     uint64_t value;
770 
771     for (int i = 0; i < 8; i++) {
772         TableDesc *td;
773         int idbits;
774 
775         value = s->baser[i];
776 
777         if (!value) {
778             continue;
779         }
780 
781         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
782 
783         switch (page_sz_type) {
784         case 0:
785             page_sz = GITS_PAGE_SIZE_4K;
786             break;
787 
788         case 1:
789             page_sz = GITS_PAGE_SIZE_16K;
790             break;
791 
792         case 2:
793         case 3:
794             page_sz = GITS_PAGE_SIZE_64K;
795             break;
796 
797         default:
798             g_assert_not_reached();
799         }
800 
801         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
802 
803         type = FIELD_EX64(value, GITS_BASER, TYPE);
804 
805         switch (type) {
806         case GITS_BASER_TYPE_DEVICE:
807             td = &s->dt;
808             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
809             break;
810         case GITS_BASER_TYPE_COLLECTION:
811             td = &s->ct;
812             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
813                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
814             } else {
815                 /* 16-bit CollectionId supported when CIL == 0 */
816                 idbits = 16;
817             }
818             break;
819         default:
820             /*
821              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
822              * ensures we will only see type values corresponding to
823              * the values set up in gicv3_its_reset().
824              */
825             g_assert_not_reached();
826         }
827 
828         memset(td, 0, sizeof(*td));
829         td->valid = FIELD_EX64(value, GITS_BASER, VALID);
830         /*
831          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
832          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
833          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
834          * for the register corresponding to the Collection table but we
835          * still have to process interrupts using non-memory-backed
836          * Collection table entries.)
837          */
838         if (!td->valid) {
839             continue;
840         }
841         td->page_sz = page_sz;
842         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
843         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
844         td->base_addr = baser_base_addr(value, page_sz);
845         if (!td->indirect) {
846             td->num_entries = (num_pages * page_sz) / td->entry_sz;
847         } else {
848             td->num_entries = (((num_pages * page_sz) /
849                                   L1TABLE_ENTRY_SIZE) *
850                                  (page_sz / td->entry_sz));
851         }
852         td->num_ids = 1ULL << idbits;
853     }
854 }
855 
856 static void extract_cmdq_params(GICv3ITSState *s)
857 {
858     uint16_t num_pages = 0;
859     uint64_t value = s->cbaser;
860 
861     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
862 
863     memset(&s->cq, 0 , sizeof(s->cq));
864     s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
865 
866     if (s->cq.valid) {
867         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
868                              GITS_CMDQ_ENTRY_SIZE;
869         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
870         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
871     }
872 }
873 
874 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
875                                                uint64_t data, unsigned size,
876                                                MemTxAttrs attrs)
877 {
878     GICv3ITSState *s = (GICv3ITSState *)opaque;
879     bool result = true;
880     uint32_t devid = 0;
881 
882     switch (offset) {
883     case GITS_TRANSLATER:
884         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
885             devid = attrs.requester_id;
886             result = process_its_cmd(s, data, devid, NONE);
887         }
888         break;
889     default:
890         break;
891     }
892 
893     if (result) {
894         return MEMTX_OK;
895     } else {
896         return MEMTX_ERROR;
897     }
898 }
899 
900 static bool its_writel(GICv3ITSState *s, hwaddr offset,
901                               uint64_t value, MemTxAttrs attrs)
902 {
903     bool result = true;
904     int index;
905 
906     switch (offset) {
907     case GITS_CTLR:
908         if (value & R_GITS_CTLR_ENABLED_MASK) {
909             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
910             extract_table_params(s);
911             extract_cmdq_params(s);
912             s->creadr = 0;
913             process_cmdq(s);
914         } else {
915             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
916         }
917         break;
918     case GITS_CBASER:
919         /*
920          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
921          *                 already enabled
922          */
923         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
924             s->cbaser = deposit64(s->cbaser, 0, 32, value);
925             s->creadr = 0;
926             s->cwriter = s->creadr;
927         }
928         break;
929     case GITS_CBASER + 4:
930         /*
931          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
932          *                 already enabled
933          */
934         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
935             s->cbaser = deposit64(s->cbaser, 32, 32, value);
936             s->creadr = 0;
937             s->cwriter = s->creadr;
938         }
939         break;
940     case GITS_CWRITER:
941         s->cwriter = deposit64(s->cwriter, 0, 32,
942                                (value & ~R_GITS_CWRITER_RETRY_MASK));
943         if (s->cwriter != s->creadr) {
944             process_cmdq(s);
945         }
946         break;
947     case GITS_CWRITER + 4:
948         s->cwriter = deposit64(s->cwriter, 32, 32, value);
949         break;
950     case GITS_CREADR:
951         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
952             s->creadr = deposit64(s->creadr, 0, 32,
953                                   (value & ~R_GITS_CREADR_STALLED_MASK));
954         } else {
955             /* RO register, ignore the write */
956             qemu_log_mask(LOG_GUEST_ERROR,
957                           "%s: invalid guest write to RO register at offset "
958                           TARGET_FMT_plx "\n", __func__, offset);
959         }
960         break;
961     case GITS_CREADR + 4:
962         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
963             s->creadr = deposit64(s->creadr, 32, 32, value);
964         } else {
965             /* RO register, ignore the write */
966             qemu_log_mask(LOG_GUEST_ERROR,
967                           "%s: invalid guest write to RO register at offset "
968                           TARGET_FMT_plx "\n", __func__, offset);
969         }
970         break;
971     case GITS_BASER ... GITS_BASER + 0x3f:
972         /*
973          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
974          *                 already enabled
975          */
976         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
977             index = (offset - GITS_BASER) / 8;
978 
979             if (offset & 7) {
980                 value <<= 32;
981                 value &= ~GITS_BASER_RO_MASK;
982                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
983                 s->baser[index] |= value;
984             } else {
985                 value &= ~GITS_BASER_RO_MASK;
986                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
987                 s->baser[index] |= value;
988             }
989         }
990         break;
991     case GITS_IIDR:
992     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
993         /* RO registers, ignore the write */
994         qemu_log_mask(LOG_GUEST_ERROR,
995                       "%s: invalid guest write to RO register at offset "
996                       TARGET_FMT_plx "\n", __func__, offset);
997         break;
998     default:
999         result = false;
1000         break;
1001     }
1002     return result;
1003 }
1004 
1005 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1006                              uint64_t *data, MemTxAttrs attrs)
1007 {
1008     bool result = true;
1009     int index;
1010 
1011     switch (offset) {
1012     case GITS_CTLR:
1013         *data = s->ctlr;
1014         break;
1015     case GITS_IIDR:
1016         *data = gicv3_iidr();
1017         break;
1018     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1019         /* ID registers */
1020         *data = gicv3_idreg(offset - GITS_IDREGS);
1021         break;
1022     case GITS_TYPER:
1023         *data = extract64(s->typer, 0, 32);
1024         break;
1025     case GITS_TYPER + 4:
1026         *data = extract64(s->typer, 32, 32);
1027         break;
1028     case GITS_CBASER:
1029         *data = extract64(s->cbaser, 0, 32);
1030         break;
1031     case GITS_CBASER + 4:
1032         *data = extract64(s->cbaser, 32, 32);
1033         break;
1034     case GITS_CREADR:
1035         *data = extract64(s->creadr, 0, 32);
1036         break;
1037     case GITS_CREADR + 4:
1038         *data = extract64(s->creadr, 32, 32);
1039         break;
1040     case GITS_CWRITER:
1041         *data = extract64(s->cwriter, 0, 32);
1042         break;
1043     case GITS_CWRITER + 4:
1044         *data = extract64(s->cwriter, 32, 32);
1045         break;
1046     case GITS_BASER ... GITS_BASER + 0x3f:
1047         index = (offset - GITS_BASER) / 8;
1048         if (offset & 7) {
1049             *data = extract64(s->baser[index], 32, 32);
1050         } else {
1051             *data = extract64(s->baser[index], 0, 32);
1052         }
1053         break;
1054     default:
1055         result = false;
1056         break;
1057     }
1058     return result;
1059 }
1060 
1061 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1062                                uint64_t value, MemTxAttrs attrs)
1063 {
1064     bool result = true;
1065     int index;
1066 
1067     switch (offset) {
1068     case GITS_BASER ... GITS_BASER + 0x3f:
1069         /*
1070          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1071          *                 already enabled
1072          */
1073         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1074             index = (offset - GITS_BASER) / 8;
1075             s->baser[index] &= GITS_BASER_RO_MASK;
1076             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1077         }
1078         break;
1079     case GITS_CBASER:
1080         /*
1081          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1082          *                 already enabled
1083          */
1084         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1085             s->cbaser = value;
1086             s->creadr = 0;
1087             s->cwriter = s->creadr;
1088         }
1089         break;
1090     case GITS_CWRITER:
1091         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1092         if (s->cwriter != s->creadr) {
1093             process_cmdq(s);
1094         }
1095         break;
1096     case GITS_CREADR:
1097         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1098             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1099         } else {
1100             /* RO register, ignore the write */
1101             qemu_log_mask(LOG_GUEST_ERROR,
1102                           "%s: invalid guest write to RO register at offset "
1103                           TARGET_FMT_plx "\n", __func__, offset);
1104         }
1105         break;
1106     case GITS_TYPER:
1107         /* RO registers, ignore the write */
1108         qemu_log_mask(LOG_GUEST_ERROR,
1109                       "%s: invalid guest write to RO register at offset "
1110                       TARGET_FMT_plx "\n", __func__, offset);
1111         break;
1112     default:
1113         result = false;
1114         break;
1115     }
1116     return result;
1117 }
1118 
1119 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1120                               uint64_t *data, MemTxAttrs attrs)
1121 {
1122     bool result = true;
1123     int index;
1124 
1125     switch (offset) {
1126     case GITS_TYPER:
1127         *data = s->typer;
1128         break;
1129     case GITS_BASER ... GITS_BASER + 0x3f:
1130         index = (offset - GITS_BASER) / 8;
1131         *data = s->baser[index];
1132         break;
1133     case GITS_CBASER:
1134         *data = s->cbaser;
1135         break;
1136     case GITS_CREADR:
1137         *data = s->creadr;
1138         break;
1139     case GITS_CWRITER:
1140         *data = s->cwriter;
1141         break;
1142     default:
1143         result = false;
1144         break;
1145     }
1146     return result;
1147 }
1148 
1149 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1150                                   unsigned size, MemTxAttrs attrs)
1151 {
1152     GICv3ITSState *s = (GICv3ITSState *)opaque;
1153     bool result;
1154 
1155     switch (size) {
1156     case 4:
1157         result = its_readl(s, offset, data, attrs);
1158         break;
1159     case 8:
1160         result = its_readll(s, offset, data, attrs);
1161         break;
1162     default:
1163         result = false;
1164         break;
1165     }
1166 
1167     if (!result) {
1168         qemu_log_mask(LOG_GUEST_ERROR,
1169                       "%s: invalid guest read at offset " TARGET_FMT_plx
1170                       "size %u\n", __func__, offset, size);
1171         /*
1172          * The spec requires that reserved registers are RAZ/WI;
1173          * so use false returns from leaf functions as a way to
1174          * trigger the guest-error logging but don't return it to
1175          * the caller, or we'll cause a spurious guest data abort.
1176          */
1177         *data = 0;
1178     }
1179     return MEMTX_OK;
1180 }
1181 
1182 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1183                                    unsigned size, MemTxAttrs attrs)
1184 {
1185     GICv3ITSState *s = (GICv3ITSState *)opaque;
1186     bool result;
1187 
1188     switch (size) {
1189     case 4:
1190         result = its_writel(s, offset, data, attrs);
1191         break;
1192     case 8:
1193         result = its_writell(s, offset, data, attrs);
1194         break;
1195     default:
1196         result = false;
1197         break;
1198     }
1199 
1200     if (!result) {
1201         qemu_log_mask(LOG_GUEST_ERROR,
1202                       "%s: invalid guest write at offset " TARGET_FMT_plx
1203                       "size %u\n", __func__, offset, size);
1204         /*
1205          * The spec requires that reserved registers are RAZ/WI;
1206          * so use false returns from leaf functions as a way to
1207          * trigger the guest-error logging but don't return it to
1208          * the caller, or we'll cause a spurious guest data abort.
1209          */
1210     }
1211     return MEMTX_OK;
1212 }
1213 
1214 static const MemoryRegionOps gicv3_its_control_ops = {
1215     .read_with_attrs = gicv3_its_read,
1216     .write_with_attrs = gicv3_its_write,
1217     .valid.min_access_size = 4,
1218     .valid.max_access_size = 8,
1219     .impl.min_access_size = 4,
1220     .impl.max_access_size = 8,
1221     .endianness = DEVICE_NATIVE_ENDIAN,
1222 };
1223 
1224 static const MemoryRegionOps gicv3_its_translation_ops = {
1225     .write_with_attrs = gicv3_its_translation_write,
1226     .valid.min_access_size = 2,
1227     .valid.max_access_size = 4,
1228     .impl.min_access_size = 2,
1229     .impl.max_access_size = 4,
1230     .endianness = DEVICE_NATIVE_ENDIAN,
1231 };
1232 
1233 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1234 {
1235     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1236     int i;
1237 
1238     for (i = 0; i < s->gicv3->num_cpu; i++) {
1239         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1240             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1241             return;
1242         }
1243     }
1244 
1245     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1246 
1247     address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
1248                        "gicv3-its-sysmem");
1249 
1250     /* set the ITS default features supported */
1251     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1252     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1253                           ITS_ITT_ENTRY_SIZE - 1);
1254     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1255     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1256     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1257     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1258 }
1259 
1260 static void gicv3_its_reset(DeviceState *dev)
1261 {
1262     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1263     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1264 
1265     c->parent_reset(dev);
1266 
1267     /* Quiescent bit reset to 1 */
1268     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1269 
1270     /*
1271      * setting GITS_BASER0.Type = 0b001 (Device)
1272      *         GITS_BASER1.Type = 0b100 (Collection Table)
1273      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1274      *         GITS_BASER<0,1>.Page_Size = 64KB
1275      * and default translation table entry size to 16 bytes
1276      */
1277     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1278                              GITS_BASER_TYPE_DEVICE);
1279     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1280                              GITS_BASER_PAGESIZE_64K);
1281     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1282                              GITS_DTE_SIZE - 1);
1283 
1284     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1285                              GITS_BASER_TYPE_COLLECTION);
1286     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1287                              GITS_BASER_PAGESIZE_64K);
1288     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1289                              GITS_CTE_SIZE - 1);
1290 }
1291 
1292 static void gicv3_its_post_load(GICv3ITSState *s)
1293 {
1294     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1295         extract_table_params(s);
1296         extract_cmdq_params(s);
1297     }
1298 }
1299 
1300 static Property gicv3_its_props[] = {
1301     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1302                      GICv3State *),
1303     DEFINE_PROP_END_OF_LIST(),
1304 };
1305 
1306 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1307 {
1308     DeviceClass *dc = DEVICE_CLASS(klass);
1309     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1310     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1311 
1312     dc->realize = gicv3_arm_its_realize;
1313     device_class_set_props(dc, gicv3_its_props);
1314     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1315     icc->post_load = gicv3_its_post_load;
1316 }
1317 
1318 static const TypeInfo gicv3_its_info = {
1319     .name = TYPE_ARM_GICV3_ITS,
1320     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1321     .instance_size = sizeof(GICv3ITSState),
1322     .class_init = gicv3_its_class_init,
1323     .class_size = sizeof(GICv3ITSClass),
1324 };
1325 
1326 static void gicv3_its_register_types(void)
1327 {
1328     type_register_static(&gicv3_its_info);
1329 }
1330 
1331 type_init(gicv3_its_register_types)
1332