xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision f0b4b2a28c4ab26505f13f07da07190387f848a4)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/intc/arm_gicv3_its_common.h"
18 #include "gicv3_internal.h"
19 #include "qom/object.h"
20 #include "qapi/error.h"
21 
22 typedef struct GICv3ITSClass GICv3ITSClass;
23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
25                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
26 
27 struct GICv3ITSClass {
28     GICv3ITSCommonClass parent_class;
29     void (*parent_reset)(DeviceState *dev);
30 };
31 
32 /*
33  * This is an internal enum used to distinguish between LPI triggered
34  * via command queue and LPI triggered via gits_translater write.
35  */
36 typedef enum ItsCmdType {
37     NONE = 0, /* internal indication for GITS_TRANSLATER write */
38     CLEAR = 1,
39     DISCARD = 2,
40     INTERRUPT = 3,
41 } ItsCmdType;
42 
43 typedef struct {
44     uint32_t iteh;
45     uint64_t itel;
46 } IteEntry;
47 
48 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
49 {
50     uint64_t result = 0;
51 
52     switch (page_sz) {
53     case GITS_PAGE_SIZE_4K:
54     case GITS_PAGE_SIZE_16K:
55         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
56         break;
57 
58     case GITS_PAGE_SIZE_64K:
59         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
60         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
61         break;
62 
63     default:
64         break;
65     }
66     return result;
67 }
68 
69 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
70                     MemTxResult *res)
71 {
72     AddressSpace *as = &s->gicv3->dma_as;
73     uint64_t l2t_addr;
74     uint64_t value;
75     bool valid_l2t;
76     uint32_t l2t_id;
77     uint32_t num_l2_entries;
78 
79     if (s->ct.indirect) {
80         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
81 
82         value = address_space_ldq_le(as,
83                                      s->ct.base_addr +
84                                      (l2t_id * L1TABLE_ENTRY_SIZE),
85                                      MEMTXATTRS_UNSPECIFIED, res);
86 
87         if (*res == MEMTX_OK) {
88             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
89 
90             if (valid_l2t) {
91                 num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
92 
93                 l2t_addr = value & ((1ULL << 51) - 1);
94 
95                 *cte =  address_space_ldq_le(as, l2t_addr +
96                                     ((icid % num_l2_entries) * GITS_CTE_SIZE),
97                                     MEMTXATTRS_UNSPECIFIED, res);
98            }
99        }
100     } else {
101         /* Flat level table */
102         *cte =  address_space_ldq_le(as, s->ct.base_addr +
103                                      (icid * GITS_CTE_SIZE),
104                                       MEMTXATTRS_UNSPECIFIED, res);
105     }
106 
107     return FIELD_EX64(*cte, CTE, VALID);
108 }
109 
110 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
111                        IteEntry ite)
112 {
113     AddressSpace *as = &s->gicv3->dma_as;
114     uint64_t itt_addr;
115     MemTxResult res = MEMTX_OK;
116 
117     itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
118     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
119 
120     address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
121                          sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
122                          &res);
123 
124     if (res == MEMTX_OK) {
125         address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
126                              sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
127                              MEMTXATTRS_UNSPECIFIED, &res);
128     }
129     if (res != MEMTX_OK) {
130         return false;
131     } else {
132         return true;
133     }
134 }
135 
136 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
137                     uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
138 {
139     AddressSpace *as = &s->gicv3->dma_as;
140     uint64_t itt_addr;
141     bool status = false;
142     IteEntry ite = {};
143 
144     itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
145     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
146 
147     ite.itel = address_space_ldq_le(as, itt_addr +
148                                     (eventid * (sizeof(uint64_t) +
149                                     sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
150                                     res);
151 
152     if (*res == MEMTX_OK) {
153         ite.iteh = address_space_ldl_le(as, itt_addr +
154                                         (eventid * (sizeof(uint64_t) +
155                                         sizeof(uint32_t))) + sizeof(uint32_t),
156                                         MEMTXATTRS_UNSPECIFIED, res);
157 
158         if (*res == MEMTX_OK) {
159             if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
160                 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
161                 if (inttype == ITE_INTTYPE_PHYSICAL) {
162                     *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
163                     *icid = FIELD_EX32(ite.iteh, ITE_H, ICID);
164                     status = true;
165                 }
166             }
167         }
168     }
169     return status;
170 }
171 
172 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
173 {
174     AddressSpace *as = &s->gicv3->dma_as;
175     uint64_t l2t_addr;
176     uint64_t value;
177     bool valid_l2t;
178     uint32_t l2t_id;
179     uint32_t num_l2_entries;
180 
181     if (s->dt.indirect) {
182         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
183 
184         value = address_space_ldq_le(as,
185                                      s->dt.base_addr +
186                                      (l2t_id * L1TABLE_ENTRY_SIZE),
187                                      MEMTXATTRS_UNSPECIFIED, res);
188 
189         if (*res == MEMTX_OK) {
190             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
191 
192             if (valid_l2t) {
193                 num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
194 
195                 l2t_addr = value & ((1ULL << 51) - 1);
196 
197                 value =  address_space_ldq_le(as, l2t_addr +
198                                    ((devid % num_l2_entries) * GITS_DTE_SIZE),
199                                    MEMTXATTRS_UNSPECIFIED, res);
200             }
201         }
202     } else {
203         /* Flat level table */
204         value = address_space_ldq_le(as, s->dt.base_addr +
205                                      (devid * GITS_DTE_SIZE),
206                                      MEMTXATTRS_UNSPECIFIED, res);
207     }
208 
209     return value;
210 }
211 
212 /*
213  * This function handles the processing of following commands based on
214  * the ItsCmdType parameter passed:-
215  * 1. triggering of lpi interrupt translation via ITS INT command
216  * 2. triggering of lpi interrupt translation via gits_translater register
217  * 3. handling of ITS CLEAR command
218  * 4. handling of ITS DISCARD command
219  */
220 static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
221                             ItsCmdType cmd)
222 {
223     AddressSpace *as = &s->gicv3->dma_as;
224     uint32_t devid, eventid;
225     MemTxResult res = MEMTX_OK;
226     bool dte_valid;
227     uint64_t dte = 0;
228     uint64_t num_eventids;
229     uint16_t icid = 0;
230     uint32_t pIntid = 0;
231     bool ite_valid = false;
232     uint64_t cte = 0;
233     bool cte_valid = false;
234     bool result = false;
235     uint64_t rdbase;
236 
237     if (cmd == NONE) {
238         devid = offset;
239     } else {
240         devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
241 
242         offset += NUM_BYTES_IN_DW;
243         value = address_space_ldq_le(as, s->cq.base_addr + offset,
244                                      MEMTXATTRS_UNSPECIFIED, &res);
245     }
246 
247     if (res != MEMTX_OK) {
248         return result;
249     }
250 
251     eventid = (value & EVENTID_MASK);
252 
253     dte = get_dte(s, devid, &res);
254 
255     if (res != MEMTX_OK) {
256         return result;
257     }
258     dte_valid = FIELD_EX64(dte, DTE, VALID);
259 
260     if (dte_valid) {
261         num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
262 
263         ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
264 
265         if (res != MEMTX_OK) {
266             return result;
267         }
268 
269         if (ite_valid) {
270             cte_valid = get_cte(s, icid, &cte, &res);
271         }
272 
273         if (res != MEMTX_OK) {
274             return result;
275         }
276     } else {
277         qemu_log_mask(LOG_GUEST_ERROR,
278                       "%s: invalid command attributes: "
279                       "invalid dte: %"PRIx64" for %d (MEM_TX: %d)\n",
280                       __func__, dte, devid, res);
281         return result;
282     }
283 
284 
285     /*
286      * In this implementation, in case of guest errors we ignore the
287      * command and move onto the next command in the queue.
288      */
289     if (devid >= s->dt.num_ids) {
290         qemu_log_mask(LOG_GUEST_ERROR,
291                       "%s: invalid command attributes: devid %d>=%d",
292                       __func__, devid, s->dt.num_ids);
293 
294     } else if (!dte_valid || !ite_valid || !cte_valid) {
295         qemu_log_mask(LOG_GUEST_ERROR,
296                       "%s: invalid command attributes: "
297                       "dte: %s, ite: %s, cte: %s\n",
298                       __func__,
299                       dte_valid ? "valid" : "invalid",
300                       ite_valid ? "valid" : "invalid",
301                       cte_valid ? "valid" : "invalid");
302     } else if (eventid >= num_eventids) {
303         qemu_log_mask(LOG_GUEST_ERROR,
304                       "%s: invalid command attributes: eventid %d >= %"
305                       PRId64 "\n",
306                       __func__, eventid, num_eventids);
307     } else {
308         /*
309          * Current implementation only supports rdbase == procnum
310          * Hence rdbase physical address is ignored
311          */
312         rdbase = FIELD_EX64(cte, CTE, RDBASE);
313 
314         if (rdbase >= s->gicv3->num_cpu) {
315             return result;
316         }
317 
318         if ((cmd == CLEAR) || (cmd == DISCARD)) {
319             gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
320         } else {
321             gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
322         }
323 
324         if (cmd == DISCARD) {
325             IteEntry ite = {};
326             /* remove mapping from interrupt translation table */
327             result = update_ite(s, eventid, dte, ite);
328         }
329     }
330 
331     return result;
332 }
333 
334 static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
335                           bool ignore_pInt)
336 {
337     AddressSpace *as = &s->gicv3->dma_as;
338     uint32_t devid, eventid;
339     uint32_t pIntid = 0;
340     uint64_t num_eventids;
341     uint32_t num_intids;
342     bool dte_valid;
343     MemTxResult res = MEMTX_OK;
344     uint16_t icid = 0;
345     uint64_t dte = 0;
346     bool result = false;
347 
348     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
349     offset += NUM_BYTES_IN_DW;
350     value = address_space_ldq_le(as, s->cq.base_addr + offset,
351                                  MEMTXATTRS_UNSPECIFIED, &res);
352 
353     if (res != MEMTX_OK) {
354         return result;
355     }
356 
357     eventid = (value & EVENTID_MASK);
358 
359     if (ignore_pInt) {
360         pIntid = eventid;
361     } else {
362         pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
363     }
364 
365     offset += NUM_BYTES_IN_DW;
366     value = address_space_ldq_le(as, s->cq.base_addr + offset,
367                                  MEMTXATTRS_UNSPECIFIED, &res);
368 
369     if (res != MEMTX_OK) {
370         return result;
371     }
372 
373     icid = value & ICID_MASK;
374 
375     dte = get_dte(s, devid, &res);
376 
377     if (res != MEMTX_OK) {
378         return result;
379     }
380     dte_valid = FIELD_EX64(dte, DTE, VALID);
381     num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
382     num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
383 
384     if ((devid >= s->dt.num_ids) || (icid >= s->ct.num_ids)
385             || !dte_valid || (eventid >= num_eventids) ||
386             (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
387              (pIntid != INTID_SPURIOUS))) {
388         qemu_log_mask(LOG_GUEST_ERROR,
389                       "%s: invalid command attributes "
390                       "devid %d or icid %d or eventid %d or pIntid %d or"
391                       "unmapped dte %d\n", __func__, devid, icid, eventid,
392                       pIntid, dte_valid);
393         /*
394          * in this implementation, in case of error
395          * we ignore this command and move onto the next
396          * command in the queue
397          */
398     } else {
399         /* add ite entry to interrupt translation table */
400         IteEntry ite = {};
401         ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
402         ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
403         ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
404         ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
405         ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
406 
407         result = update_ite(s, eventid, dte, ite);
408     }
409 
410     return result;
411 }
412 
413 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
414                        uint64_t rdbase)
415 {
416     AddressSpace *as = &s->gicv3->dma_as;
417     uint64_t value;
418     uint64_t l2t_addr;
419     bool valid_l2t;
420     uint32_t l2t_id;
421     uint32_t num_l2_entries;
422     uint64_t cte = 0;
423     MemTxResult res = MEMTX_OK;
424 
425     if (!s->ct.valid) {
426         return true;
427     }
428 
429     if (valid) {
430         /* add mapping entry to collection table */
431         cte = FIELD_DP64(cte, CTE, VALID, 1);
432         cte = FIELD_DP64(cte, CTE, RDBASE, rdbase);
433     }
434 
435     /*
436      * The specification defines the format of level 1 entries of a
437      * 2-level table, but the format of level 2 entries and the format
438      * of flat-mapped tables is IMPDEF.
439      */
440     if (s->ct.indirect) {
441         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
442 
443         value = address_space_ldq_le(as,
444                                      s->ct.base_addr +
445                                      (l2t_id * L1TABLE_ENTRY_SIZE),
446                                      MEMTXATTRS_UNSPECIFIED, &res);
447 
448         if (res != MEMTX_OK) {
449             return false;
450         }
451 
452         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
453 
454         if (valid_l2t) {
455             num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
456 
457             l2t_addr = value & ((1ULL << 51) - 1);
458 
459             address_space_stq_le(as, l2t_addr +
460                                  ((icid % num_l2_entries) * GITS_CTE_SIZE),
461                                  cte, MEMTXATTRS_UNSPECIFIED, &res);
462         }
463     } else {
464         /* Flat level table */
465         address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
466                              cte, MEMTXATTRS_UNSPECIFIED, &res);
467     }
468     if (res != MEMTX_OK) {
469         return false;
470     } else {
471         return true;
472     }
473 }
474 
475 static bool process_mapc(GICv3ITSState *s, uint32_t offset)
476 {
477     AddressSpace *as = &s->gicv3->dma_as;
478     uint16_t icid;
479     uint64_t rdbase;
480     bool valid;
481     MemTxResult res = MEMTX_OK;
482     bool result = false;
483     uint64_t value;
484 
485     offset += NUM_BYTES_IN_DW;
486     offset += NUM_BYTES_IN_DW;
487 
488     value = address_space_ldq_le(as, s->cq.base_addr + offset,
489                                  MEMTXATTRS_UNSPECIFIED, &res);
490 
491     if (res != MEMTX_OK) {
492         return result;
493     }
494 
495     icid = value & ICID_MASK;
496 
497     rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
498     rdbase &= RDBASE_PROCNUM_MASK;
499 
500     valid = (value & CMD_FIELD_VALID_MASK);
501 
502     if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) {
503         qemu_log_mask(LOG_GUEST_ERROR,
504                       "ITS MAPC: invalid collection table attributes "
505                       "icid %d rdbase %" PRIu64 "\n",  icid, rdbase);
506         /*
507          * in this implementation, in case of error
508          * we ignore this command and move onto the next
509          * command in the queue
510          */
511     } else {
512         result = update_cte(s, icid, valid, rdbase);
513     }
514 
515     return result;
516 }
517 
518 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
519                        uint8_t size, uint64_t itt_addr)
520 {
521     AddressSpace *as = &s->gicv3->dma_as;
522     uint64_t value;
523     uint64_t l2t_addr;
524     bool valid_l2t;
525     uint32_t l2t_id;
526     uint32_t num_l2_entries;
527     uint64_t dte = 0;
528     MemTxResult res = MEMTX_OK;
529 
530     if (s->dt.valid) {
531         if (valid) {
532             /* add mapping entry to device table */
533             dte = FIELD_DP64(dte, DTE, VALID, 1);
534             dte = FIELD_DP64(dte, DTE, SIZE, size);
535             dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr);
536         }
537     } else {
538         return true;
539     }
540 
541     /*
542      * The specification defines the format of level 1 entries of a
543      * 2-level table, but the format of level 2 entries and the format
544      * of flat-mapped tables is IMPDEF.
545      */
546     if (s->dt.indirect) {
547         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
548 
549         value = address_space_ldq_le(as,
550                                      s->dt.base_addr +
551                                      (l2t_id * L1TABLE_ENTRY_SIZE),
552                                      MEMTXATTRS_UNSPECIFIED, &res);
553 
554         if (res != MEMTX_OK) {
555             return false;
556         }
557 
558         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
559 
560         if (valid_l2t) {
561             num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
562 
563             l2t_addr = value & ((1ULL << 51) - 1);
564 
565             address_space_stq_le(as, l2t_addr +
566                                  ((devid % num_l2_entries) * GITS_DTE_SIZE),
567                                  dte, MEMTXATTRS_UNSPECIFIED, &res);
568         }
569     } else {
570         /* Flat level table */
571         address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
572                              dte, MEMTXATTRS_UNSPECIFIED, &res);
573     }
574     if (res != MEMTX_OK) {
575         return false;
576     } else {
577         return true;
578     }
579 }
580 
581 static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
582 {
583     AddressSpace *as = &s->gicv3->dma_as;
584     uint32_t devid;
585     uint8_t size;
586     uint64_t itt_addr;
587     bool valid;
588     MemTxResult res = MEMTX_OK;
589     bool result = false;
590 
591     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
592 
593     offset += NUM_BYTES_IN_DW;
594     value = address_space_ldq_le(as, s->cq.base_addr + offset,
595                                  MEMTXATTRS_UNSPECIFIED, &res);
596 
597     if (res != MEMTX_OK) {
598         return result;
599     }
600 
601     size = (value & SIZE_MASK);
602 
603     offset += NUM_BYTES_IN_DW;
604     value = address_space_ldq_le(as, s->cq.base_addr + offset,
605                                  MEMTXATTRS_UNSPECIFIED, &res);
606 
607     if (res != MEMTX_OK) {
608         return result;
609     }
610 
611     itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
612 
613     valid = (value & CMD_FIELD_VALID_MASK);
614 
615     if ((devid >= s->dt.num_ids) ||
616         (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
617         qemu_log_mask(LOG_GUEST_ERROR,
618                       "ITS MAPD: invalid device table attributes "
619                       "devid %d or size %d\n", devid, size);
620         /*
621          * in this implementation, in case of error
622          * we ignore this command and move onto the next
623          * command in the queue
624          */
625     } else {
626         result = update_dte(s, devid, valid, size, itt_addr);
627     }
628 
629     return result;
630 }
631 
632 /*
633  * Current implementation blocks until all
634  * commands are processed
635  */
636 static void process_cmdq(GICv3ITSState *s)
637 {
638     uint32_t wr_offset = 0;
639     uint32_t rd_offset = 0;
640     uint32_t cq_offset = 0;
641     uint64_t data;
642     AddressSpace *as = &s->gicv3->dma_as;
643     MemTxResult res = MEMTX_OK;
644     bool result = true;
645     uint8_t cmd;
646     int i;
647 
648     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
649         return;
650     }
651 
652     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
653 
654     if (wr_offset >= s->cq.num_entries) {
655         qemu_log_mask(LOG_GUEST_ERROR,
656                       "%s: invalid write offset "
657                       "%d\n", __func__, wr_offset);
658         return;
659     }
660 
661     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
662 
663     if (rd_offset >= s->cq.num_entries) {
664         qemu_log_mask(LOG_GUEST_ERROR,
665                       "%s: invalid read offset "
666                       "%d\n", __func__, rd_offset);
667         return;
668     }
669 
670     while (wr_offset != rd_offset) {
671         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
672         data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
673                                     MEMTXATTRS_UNSPECIFIED, &res);
674         if (res != MEMTX_OK) {
675             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
676             qemu_log_mask(LOG_GUEST_ERROR,
677                           "%s: could not read command at 0x%" PRIx64 "\n",
678                           __func__, s->cq.base_addr + cq_offset);
679             break;
680         }
681 
682         cmd = (data & CMD_MASK);
683 
684         switch (cmd) {
685         case GITS_CMD_INT:
686             result = process_its_cmd(s, data, cq_offset, INTERRUPT);
687             break;
688         case GITS_CMD_CLEAR:
689             result = process_its_cmd(s, data, cq_offset, CLEAR);
690             break;
691         case GITS_CMD_SYNC:
692             /*
693              * Current implementation makes a blocking synchronous call
694              * for every command issued earlier, hence the internal state
695              * is already consistent by the time SYNC command is executed.
696              * Hence no further processing is required for SYNC command.
697              */
698             break;
699         case GITS_CMD_MAPD:
700             result = process_mapd(s, data, cq_offset);
701             break;
702         case GITS_CMD_MAPC:
703             result = process_mapc(s, cq_offset);
704             break;
705         case GITS_CMD_MAPTI:
706             result = process_mapti(s, data, cq_offset, false);
707             break;
708         case GITS_CMD_MAPI:
709             result = process_mapti(s, data, cq_offset, true);
710             break;
711         case GITS_CMD_DISCARD:
712             result = process_its_cmd(s, data, cq_offset, DISCARD);
713             break;
714         case GITS_CMD_INV:
715         case GITS_CMD_INVALL:
716             /*
717              * Current implementation doesn't cache any ITS tables,
718              * but the calculated lpi priority information. We only
719              * need to trigger lpi priority re-calculation to be in
720              * sync with LPI config table or pending table changes.
721              */
722             for (i = 0; i < s->gicv3->num_cpu; i++) {
723                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
724             }
725             break;
726         default:
727             break;
728         }
729         if (result) {
730             rd_offset++;
731             rd_offset %= s->cq.num_entries;
732             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
733         } else {
734             /*
735              * in this implementation, in case of dma read/write error
736              * we stall the command processing
737              */
738             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
739             qemu_log_mask(LOG_GUEST_ERROR,
740                           "%s: %x cmd processing failed\n", __func__, cmd);
741             break;
742         }
743     }
744 }
745 
746 /*
747  * This function extracts the ITS Device and Collection table specific
748  * parameters (like base_addr, size etc) from GITS_BASER register.
749  * It is called during ITS enable and also during post_load migration
750  */
751 static void extract_table_params(GICv3ITSState *s)
752 {
753     uint16_t num_pages = 0;
754     uint8_t  page_sz_type;
755     uint8_t type;
756     uint32_t page_sz = 0;
757     uint64_t value;
758 
759     for (int i = 0; i < 8; i++) {
760         TableDesc *td;
761         int idbits;
762 
763         value = s->baser[i];
764 
765         if (!value) {
766             continue;
767         }
768 
769         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
770 
771         switch (page_sz_type) {
772         case 0:
773             page_sz = GITS_PAGE_SIZE_4K;
774             break;
775 
776         case 1:
777             page_sz = GITS_PAGE_SIZE_16K;
778             break;
779 
780         case 2:
781         case 3:
782             page_sz = GITS_PAGE_SIZE_64K;
783             break;
784 
785         default:
786             g_assert_not_reached();
787         }
788 
789         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
790 
791         type = FIELD_EX64(value, GITS_BASER, TYPE);
792 
793         switch (type) {
794         case GITS_BASER_TYPE_DEVICE:
795             td = &s->dt;
796             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
797             break;
798         case GITS_BASER_TYPE_COLLECTION:
799             td = &s->ct;
800             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
801                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
802             } else {
803                 /* 16-bit CollectionId supported when CIL == 0 */
804                 idbits = 16;
805             }
806             break;
807         default:
808             /*
809              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
810              * ensures we will only see type values corresponding to
811              * the values set up in gicv3_its_reset().
812              */
813             g_assert_not_reached();
814         }
815 
816         memset(td, 0, sizeof(*td));
817         td->valid = FIELD_EX64(value, GITS_BASER, VALID);
818         /*
819          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
820          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
821          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
822          * for the register corresponding to the Collection table but we
823          * still have to process interrupts using non-memory-backed
824          * Collection table entries.)
825          */
826         if (!td->valid) {
827             continue;
828         }
829         td->page_sz = page_sz;
830         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
831         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
832         td->base_addr = baser_base_addr(value, page_sz);
833         if (!td->indirect) {
834             td->num_entries = (num_pages * page_sz) / td->entry_sz;
835         } else {
836             td->num_entries = (((num_pages * page_sz) /
837                                   L1TABLE_ENTRY_SIZE) *
838                                  (page_sz / td->entry_sz));
839         }
840         td->num_ids = 1ULL << idbits;
841     }
842 }
843 
844 static void extract_cmdq_params(GICv3ITSState *s)
845 {
846     uint16_t num_pages = 0;
847     uint64_t value = s->cbaser;
848 
849     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
850 
851     memset(&s->cq, 0 , sizeof(s->cq));
852     s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
853 
854     if (s->cq.valid) {
855         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
856                              GITS_CMDQ_ENTRY_SIZE;
857         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
858         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
859     }
860 }
861 
862 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
863                                                uint64_t data, unsigned size,
864                                                MemTxAttrs attrs)
865 {
866     GICv3ITSState *s = (GICv3ITSState *)opaque;
867     bool result = true;
868     uint32_t devid = 0;
869 
870     switch (offset) {
871     case GITS_TRANSLATER:
872         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
873             devid = attrs.requester_id;
874             result = process_its_cmd(s, data, devid, NONE);
875         }
876         break;
877     default:
878         break;
879     }
880 
881     if (result) {
882         return MEMTX_OK;
883     } else {
884         return MEMTX_ERROR;
885     }
886 }
887 
888 static bool its_writel(GICv3ITSState *s, hwaddr offset,
889                               uint64_t value, MemTxAttrs attrs)
890 {
891     bool result = true;
892     int index;
893 
894     switch (offset) {
895     case GITS_CTLR:
896         if (value & R_GITS_CTLR_ENABLED_MASK) {
897             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
898             extract_table_params(s);
899             extract_cmdq_params(s);
900             s->creadr = 0;
901             process_cmdq(s);
902         } else {
903             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
904         }
905         break;
906     case GITS_CBASER:
907         /*
908          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
909          *                 already enabled
910          */
911         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
912             s->cbaser = deposit64(s->cbaser, 0, 32, value);
913             s->creadr = 0;
914             s->cwriter = s->creadr;
915         }
916         break;
917     case GITS_CBASER + 4:
918         /*
919          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
920          *                 already enabled
921          */
922         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
923             s->cbaser = deposit64(s->cbaser, 32, 32, value);
924             s->creadr = 0;
925             s->cwriter = s->creadr;
926         }
927         break;
928     case GITS_CWRITER:
929         s->cwriter = deposit64(s->cwriter, 0, 32,
930                                (value & ~R_GITS_CWRITER_RETRY_MASK));
931         if (s->cwriter != s->creadr) {
932             process_cmdq(s);
933         }
934         break;
935     case GITS_CWRITER + 4:
936         s->cwriter = deposit64(s->cwriter, 32, 32, value);
937         break;
938     case GITS_CREADR:
939         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
940             s->creadr = deposit64(s->creadr, 0, 32,
941                                   (value & ~R_GITS_CREADR_STALLED_MASK));
942         } else {
943             /* RO register, ignore the write */
944             qemu_log_mask(LOG_GUEST_ERROR,
945                           "%s: invalid guest write to RO register at offset "
946                           TARGET_FMT_plx "\n", __func__, offset);
947         }
948         break;
949     case GITS_CREADR + 4:
950         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
951             s->creadr = deposit64(s->creadr, 32, 32, value);
952         } else {
953             /* RO register, ignore the write */
954             qemu_log_mask(LOG_GUEST_ERROR,
955                           "%s: invalid guest write to RO register at offset "
956                           TARGET_FMT_plx "\n", __func__, offset);
957         }
958         break;
959     case GITS_BASER ... GITS_BASER + 0x3f:
960         /*
961          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
962          *                 already enabled
963          */
964         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
965             index = (offset - GITS_BASER) / 8;
966 
967             if (offset & 7) {
968                 value <<= 32;
969                 value &= ~GITS_BASER_RO_MASK;
970                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
971                 s->baser[index] |= value;
972             } else {
973                 value &= ~GITS_BASER_RO_MASK;
974                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
975                 s->baser[index] |= value;
976             }
977         }
978         break;
979     case GITS_IIDR:
980     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
981         /* RO registers, ignore the write */
982         qemu_log_mask(LOG_GUEST_ERROR,
983                       "%s: invalid guest write to RO register at offset "
984                       TARGET_FMT_plx "\n", __func__, offset);
985         break;
986     default:
987         result = false;
988         break;
989     }
990     return result;
991 }
992 
993 static bool its_readl(GICv3ITSState *s, hwaddr offset,
994                              uint64_t *data, MemTxAttrs attrs)
995 {
996     bool result = true;
997     int index;
998 
999     switch (offset) {
1000     case GITS_CTLR:
1001         *data = s->ctlr;
1002         break;
1003     case GITS_IIDR:
1004         *data = gicv3_iidr();
1005         break;
1006     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1007         /* ID registers */
1008         *data = gicv3_idreg(offset - GITS_IDREGS);
1009         break;
1010     case GITS_TYPER:
1011         *data = extract64(s->typer, 0, 32);
1012         break;
1013     case GITS_TYPER + 4:
1014         *data = extract64(s->typer, 32, 32);
1015         break;
1016     case GITS_CBASER:
1017         *data = extract64(s->cbaser, 0, 32);
1018         break;
1019     case GITS_CBASER + 4:
1020         *data = extract64(s->cbaser, 32, 32);
1021         break;
1022     case GITS_CREADR:
1023         *data = extract64(s->creadr, 0, 32);
1024         break;
1025     case GITS_CREADR + 4:
1026         *data = extract64(s->creadr, 32, 32);
1027         break;
1028     case GITS_CWRITER:
1029         *data = extract64(s->cwriter, 0, 32);
1030         break;
1031     case GITS_CWRITER + 4:
1032         *data = extract64(s->cwriter, 32, 32);
1033         break;
1034     case GITS_BASER ... GITS_BASER + 0x3f:
1035         index = (offset - GITS_BASER) / 8;
1036         if (offset & 7) {
1037             *data = extract64(s->baser[index], 32, 32);
1038         } else {
1039             *data = extract64(s->baser[index], 0, 32);
1040         }
1041         break;
1042     default:
1043         result = false;
1044         break;
1045     }
1046     return result;
1047 }
1048 
1049 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1050                                uint64_t value, MemTxAttrs attrs)
1051 {
1052     bool result = true;
1053     int index;
1054 
1055     switch (offset) {
1056     case GITS_BASER ... GITS_BASER + 0x3f:
1057         /*
1058          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1059          *                 already enabled
1060          */
1061         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1062             index = (offset - GITS_BASER) / 8;
1063             s->baser[index] &= GITS_BASER_RO_MASK;
1064             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1065         }
1066         break;
1067     case GITS_CBASER:
1068         /*
1069          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1070          *                 already enabled
1071          */
1072         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1073             s->cbaser = value;
1074             s->creadr = 0;
1075             s->cwriter = s->creadr;
1076         }
1077         break;
1078     case GITS_CWRITER:
1079         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1080         if (s->cwriter != s->creadr) {
1081             process_cmdq(s);
1082         }
1083         break;
1084     case GITS_CREADR:
1085         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1086             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1087         } else {
1088             /* RO register, ignore the write */
1089             qemu_log_mask(LOG_GUEST_ERROR,
1090                           "%s: invalid guest write to RO register at offset "
1091                           TARGET_FMT_plx "\n", __func__, offset);
1092         }
1093         break;
1094     case GITS_TYPER:
1095         /* RO registers, ignore the write */
1096         qemu_log_mask(LOG_GUEST_ERROR,
1097                       "%s: invalid guest write to RO register at offset "
1098                       TARGET_FMT_plx "\n", __func__, offset);
1099         break;
1100     default:
1101         result = false;
1102         break;
1103     }
1104     return result;
1105 }
1106 
1107 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1108                               uint64_t *data, MemTxAttrs attrs)
1109 {
1110     bool result = true;
1111     int index;
1112 
1113     switch (offset) {
1114     case GITS_TYPER:
1115         *data = s->typer;
1116         break;
1117     case GITS_BASER ... GITS_BASER + 0x3f:
1118         index = (offset - GITS_BASER) / 8;
1119         *data = s->baser[index];
1120         break;
1121     case GITS_CBASER:
1122         *data = s->cbaser;
1123         break;
1124     case GITS_CREADR:
1125         *data = s->creadr;
1126         break;
1127     case GITS_CWRITER:
1128         *data = s->cwriter;
1129         break;
1130     default:
1131         result = false;
1132         break;
1133     }
1134     return result;
1135 }
1136 
1137 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1138                                   unsigned size, MemTxAttrs attrs)
1139 {
1140     GICv3ITSState *s = (GICv3ITSState *)opaque;
1141     bool result;
1142 
1143     switch (size) {
1144     case 4:
1145         result = its_readl(s, offset, data, attrs);
1146         break;
1147     case 8:
1148         result = its_readll(s, offset, data, attrs);
1149         break;
1150     default:
1151         result = false;
1152         break;
1153     }
1154 
1155     if (!result) {
1156         qemu_log_mask(LOG_GUEST_ERROR,
1157                       "%s: invalid guest read at offset " TARGET_FMT_plx
1158                       "size %u\n", __func__, offset, size);
1159         /*
1160          * The spec requires that reserved registers are RAZ/WI;
1161          * so use false returns from leaf functions as a way to
1162          * trigger the guest-error logging but don't return it to
1163          * the caller, or we'll cause a spurious guest data abort.
1164          */
1165         *data = 0;
1166     }
1167     return MEMTX_OK;
1168 }
1169 
1170 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1171                                    unsigned size, MemTxAttrs attrs)
1172 {
1173     GICv3ITSState *s = (GICv3ITSState *)opaque;
1174     bool result;
1175 
1176     switch (size) {
1177     case 4:
1178         result = its_writel(s, offset, data, attrs);
1179         break;
1180     case 8:
1181         result = its_writell(s, offset, data, attrs);
1182         break;
1183     default:
1184         result = false;
1185         break;
1186     }
1187 
1188     if (!result) {
1189         qemu_log_mask(LOG_GUEST_ERROR,
1190                       "%s: invalid guest write at offset " TARGET_FMT_plx
1191                       "size %u\n", __func__, offset, size);
1192         /*
1193          * The spec requires that reserved registers are RAZ/WI;
1194          * so use false returns from leaf functions as a way to
1195          * trigger the guest-error logging but don't return it to
1196          * the caller, or we'll cause a spurious guest data abort.
1197          */
1198     }
1199     return MEMTX_OK;
1200 }
1201 
1202 static const MemoryRegionOps gicv3_its_control_ops = {
1203     .read_with_attrs = gicv3_its_read,
1204     .write_with_attrs = gicv3_its_write,
1205     .valid.min_access_size = 4,
1206     .valid.max_access_size = 8,
1207     .impl.min_access_size = 4,
1208     .impl.max_access_size = 8,
1209     .endianness = DEVICE_NATIVE_ENDIAN,
1210 };
1211 
1212 static const MemoryRegionOps gicv3_its_translation_ops = {
1213     .write_with_attrs = gicv3_its_translation_write,
1214     .valid.min_access_size = 2,
1215     .valid.max_access_size = 4,
1216     .impl.min_access_size = 2,
1217     .impl.max_access_size = 4,
1218     .endianness = DEVICE_NATIVE_ENDIAN,
1219 };
1220 
1221 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1222 {
1223     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1224     int i;
1225 
1226     for (i = 0; i < s->gicv3->num_cpu; i++) {
1227         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1228             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1229             return;
1230         }
1231     }
1232 
1233     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1234 
1235     address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
1236                        "gicv3-its-sysmem");
1237 
1238     /* set the ITS default features supported */
1239     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1240     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1241                           ITS_ITT_ENTRY_SIZE - 1);
1242     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1243     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1244     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1245     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1246 }
1247 
1248 static void gicv3_its_reset(DeviceState *dev)
1249 {
1250     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1251     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1252 
1253     c->parent_reset(dev);
1254 
1255     /* Quiescent bit reset to 1 */
1256     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1257 
1258     /*
1259      * setting GITS_BASER0.Type = 0b001 (Device)
1260      *         GITS_BASER1.Type = 0b100 (Collection Table)
1261      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1262      *         GITS_BASER<0,1>.Page_Size = 64KB
1263      * and default translation table entry size to 16 bytes
1264      */
1265     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1266                              GITS_BASER_TYPE_DEVICE);
1267     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1268                              GITS_BASER_PAGESIZE_64K);
1269     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1270                              GITS_DTE_SIZE - 1);
1271 
1272     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1273                              GITS_BASER_TYPE_COLLECTION);
1274     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1275                              GITS_BASER_PAGESIZE_64K);
1276     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1277                              GITS_CTE_SIZE - 1);
1278 }
1279 
1280 static void gicv3_its_post_load(GICv3ITSState *s)
1281 {
1282     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1283         extract_table_params(s);
1284         extract_cmdq_params(s);
1285     }
1286 }
1287 
1288 static Property gicv3_its_props[] = {
1289     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1290                      GICv3State *),
1291     DEFINE_PROP_END_OF_LIST(),
1292 };
1293 
1294 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1295 {
1296     DeviceClass *dc = DEVICE_CLASS(klass);
1297     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1298     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1299 
1300     dc->realize = gicv3_arm_its_realize;
1301     device_class_set_props(dc, gicv3_its_props);
1302     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1303     icc->post_load = gicv3_its_post_load;
1304 }
1305 
1306 static const TypeInfo gicv3_its_info = {
1307     .name = TYPE_ARM_GICV3_ITS,
1308     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1309     .instance_size = sizeof(GICv3ITSState),
1310     .class_init = gicv3_its_class_init,
1311     .class_size = sizeof(GICv3ITSClass),
1312 };
1313 
1314 static void gicv3_its_register_types(void)
1315 {
1316     type_register_static(&gicv3_its_info);
1317 }
1318 
1319 type_init(gicv3_its_register_types)
1320