xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision 764d6ba10cce25d20ef9f3e11a83a9783dadf65f)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/intc/arm_gicv3_its_common.h"
18 #include "gicv3_internal.h"
19 #include "qom/object.h"
20 #include "qapi/error.h"
21 
22 typedef struct GICv3ITSClass GICv3ITSClass;
23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
25                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
26 
27 struct GICv3ITSClass {
28     GICv3ITSCommonClass parent_class;
29     void (*parent_reset)(DeviceState *dev);
30 };
31 
32 /*
33  * This is an internal enum used to distinguish between LPI triggered
34  * via command queue and LPI triggered via gits_translater write.
35  */
36 typedef enum ItsCmdType {
37     NONE = 0, /* internal indication for GITS_TRANSLATER write */
38     CLEAR = 1,
39     DISCARD = 2,
40     INTERRUPT = 3,
41 } ItsCmdType;
42 
43 typedef struct {
44     uint32_t iteh;
45     uint64_t itel;
46 } IteEntry;
47 
48 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
49 {
50     uint64_t result = 0;
51 
52     switch (page_sz) {
53     case GITS_PAGE_SIZE_4K:
54     case GITS_PAGE_SIZE_16K:
55         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
56         break;
57 
58     case GITS_PAGE_SIZE_64K:
59         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
60         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
61         break;
62 
63     default:
64         break;
65     }
66     return result;
67 }
68 
69 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
70                     MemTxResult *res)
71 {
72     AddressSpace *as = &s->gicv3->dma_as;
73     uint64_t l2t_addr;
74     uint64_t value;
75     bool valid_l2t;
76     uint32_t l2t_id;
77     uint32_t max_l2_entries;
78 
79     if (s->ct.indirect) {
80         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
81 
82         value = address_space_ldq_le(as,
83                                      s->ct.base_addr +
84                                      (l2t_id * L1TABLE_ENTRY_SIZE),
85                                      MEMTXATTRS_UNSPECIFIED, res);
86 
87         if (*res == MEMTX_OK) {
88             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
89 
90             if (valid_l2t) {
91                 max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
92 
93                 l2t_addr = value & ((1ULL << 51) - 1);
94 
95                 *cte =  address_space_ldq_le(as, l2t_addr +
96                                     ((icid % max_l2_entries) * GITS_CTE_SIZE),
97                                     MEMTXATTRS_UNSPECIFIED, res);
98            }
99        }
100     } else {
101         /* Flat level table */
102         *cte =  address_space_ldq_le(as, s->ct.base_addr +
103                                      (icid * GITS_CTE_SIZE),
104                                       MEMTXATTRS_UNSPECIFIED, res);
105     }
106 
107     return (*cte & TABLE_ENTRY_VALID_MASK) != 0;
108 }
109 
110 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
111                        IteEntry ite)
112 {
113     AddressSpace *as = &s->gicv3->dma_as;
114     uint64_t itt_addr;
115     MemTxResult res = MEMTX_OK;
116 
117     itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
118     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
119 
120     address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
121                          sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
122                          &res);
123 
124     if (res == MEMTX_OK) {
125         address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
126                              sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
127                              MEMTXATTRS_UNSPECIFIED, &res);
128     }
129     if (res != MEMTX_OK) {
130         return false;
131     } else {
132         return true;
133     }
134 }
135 
136 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
137                     uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
138 {
139     AddressSpace *as = &s->gicv3->dma_as;
140     uint64_t itt_addr;
141     bool status = false;
142     IteEntry ite = {};
143 
144     itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
145     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
146 
147     ite.itel = address_space_ldq_le(as, itt_addr +
148                                     (eventid * (sizeof(uint64_t) +
149                                     sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
150                                     res);
151 
152     if (*res == MEMTX_OK) {
153         ite.iteh = address_space_ldl_le(as, itt_addr +
154                                         (eventid * (sizeof(uint64_t) +
155                                         sizeof(uint32_t))) + sizeof(uint32_t),
156                                         MEMTXATTRS_UNSPECIFIED, res);
157 
158         if (*res == MEMTX_OK) {
159             if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
160                 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
161                 if (inttype == ITE_INTTYPE_PHYSICAL) {
162                     *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
163                     *icid = FIELD_EX32(ite.iteh, ITE_H, ICID);
164                     status = true;
165                 }
166             }
167         }
168     }
169     return status;
170 }
171 
172 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
173 {
174     AddressSpace *as = &s->gicv3->dma_as;
175     uint64_t l2t_addr;
176     uint64_t value;
177     bool valid_l2t;
178     uint32_t l2t_id;
179     uint32_t max_l2_entries;
180 
181     if (s->dt.indirect) {
182         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
183 
184         value = address_space_ldq_le(as,
185                                      s->dt.base_addr +
186                                      (l2t_id * L1TABLE_ENTRY_SIZE),
187                                      MEMTXATTRS_UNSPECIFIED, res);
188 
189         if (*res == MEMTX_OK) {
190             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
191 
192             if (valid_l2t) {
193                 max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
194 
195                 l2t_addr = value & ((1ULL << 51) - 1);
196 
197                 value =  address_space_ldq_le(as, l2t_addr +
198                                    ((devid % max_l2_entries) * GITS_DTE_SIZE),
199                                    MEMTXATTRS_UNSPECIFIED, res);
200             }
201         }
202     } else {
203         /* Flat level table */
204         value = address_space_ldq_le(as, s->dt.base_addr +
205                                      (devid * GITS_DTE_SIZE),
206                                      MEMTXATTRS_UNSPECIFIED, res);
207     }
208 
209     return value;
210 }
211 
212 /*
213  * This function handles the processing of following commands based on
214  * the ItsCmdType parameter passed:-
215  * 1. triggering of lpi interrupt translation via ITS INT command
216  * 2. triggering of lpi interrupt translation via gits_translater register
217  * 3. handling of ITS CLEAR command
218  * 4. handling of ITS DISCARD command
219  */
220 static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
221                             ItsCmdType cmd)
222 {
223     AddressSpace *as = &s->gicv3->dma_as;
224     uint32_t devid, eventid;
225     MemTxResult res = MEMTX_OK;
226     bool dte_valid;
227     uint64_t dte = 0;
228     uint32_t max_eventid;
229     uint16_t icid = 0;
230     uint32_t pIntid = 0;
231     bool ite_valid = false;
232     uint64_t cte = 0;
233     bool cte_valid = false;
234     bool result = false;
235     uint64_t rdbase;
236 
237     if (cmd == NONE) {
238         devid = offset;
239     } else {
240         devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
241 
242         offset += NUM_BYTES_IN_DW;
243         value = address_space_ldq_le(as, s->cq.base_addr + offset,
244                                      MEMTXATTRS_UNSPECIFIED, &res);
245     }
246 
247     if (res != MEMTX_OK) {
248         return result;
249     }
250 
251     eventid = (value & EVENTID_MASK);
252 
253     dte = get_dte(s, devid, &res);
254 
255     if (res != MEMTX_OK) {
256         return result;
257     }
258     dte_valid = dte & TABLE_ENTRY_VALID_MASK;
259 
260     if (dte_valid) {
261         max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
262 
263         ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
264 
265         if (res != MEMTX_OK) {
266             return result;
267         }
268 
269         if (ite_valid) {
270             cte_valid = get_cte(s, icid, &cte, &res);
271         }
272 
273         if (res != MEMTX_OK) {
274             return result;
275         }
276     } else {
277         qemu_log_mask(LOG_GUEST_ERROR,
278                       "%s: invalid command attributes: "
279                       "invalid dte: %"PRIx64" for %d (MEM_TX: %d)\n",
280                       __func__, dte, devid, res);
281         return result;
282     }
283 
284 
285     /*
286      * In this implementation, in case of guest errors we ignore the
287      * command and move onto the next command in the queue.
288      */
289     if (devid > s->dt.max_ids) {
290         qemu_log_mask(LOG_GUEST_ERROR,
291                       "%s: invalid command attributes: devid %d>%d",
292                       __func__, devid, s->dt.max_ids);
293 
294     } else if (!dte_valid || !ite_valid || !cte_valid) {
295         qemu_log_mask(LOG_GUEST_ERROR,
296                       "%s: invalid command attributes: "
297                       "dte: %s, ite: %s, cte: %s\n",
298                       __func__,
299                       dte_valid ? "valid" : "invalid",
300                       ite_valid ? "valid" : "invalid",
301                       cte_valid ? "valid" : "invalid");
302     } else if (eventid > max_eventid) {
303         qemu_log_mask(LOG_GUEST_ERROR,
304                       "%s: invalid command attributes: eventid %d > %d\n",
305                       __func__, eventid, max_eventid);
306     } else {
307         /*
308          * Current implementation only supports rdbase == procnum
309          * Hence rdbase physical address is ignored
310          */
311         rdbase = (cte & GITS_CTE_RDBASE_PROCNUM_MASK) >> 1U;
312 
313         if (rdbase >= s->gicv3->num_cpu) {
314             return result;
315         }
316 
317         if ((cmd == CLEAR) || (cmd == DISCARD)) {
318             gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
319         } else {
320             gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
321         }
322 
323         if (cmd == DISCARD) {
324             IteEntry ite = {};
325             /* remove mapping from interrupt translation table */
326             result = update_ite(s, eventid, dte, ite);
327         }
328     }
329 
330     return result;
331 }
332 
333 static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
334                           bool ignore_pInt)
335 {
336     AddressSpace *as = &s->gicv3->dma_as;
337     uint32_t devid, eventid;
338     uint32_t pIntid = 0;
339     uint32_t max_eventid, max_Intid;
340     bool dte_valid;
341     MemTxResult res = MEMTX_OK;
342     uint16_t icid = 0;
343     uint64_t dte = 0;
344     bool result = false;
345 
346     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
347     offset += NUM_BYTES_IN_DW;
348     value = address_space_ldq_le(as, s->cq.base_addr + offset,
349                                  MEMTXATTRS_UNSPECIFIED, &res);
350 
351     if (res != MEMTX_OK) {
352         return result;
353     }
354 
355     eventid = (value & EVENTID_MASK);
356 
357     if (!ignore_pInt) {
358         pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
359     }
360 
361     offset += NUM_BYTES_IN_DW;
362     value = address_space_ldq_le(as, s->cq.base_addr + offset,
363                                  MEMTXATTRS_UNSPECIFIED, &res);
364 
365     if (res != MEMTX_OK) {
366         return result;
367     }
368 
369     icid = value & ICID_MASK;
370 
371     dte = get_dte(s, devid, &res);
372 
373     if (res != MEMTX_OK) {
374         return result;
375     }
376     dte_valid = dte & TABLE_ENTRY_VALID_MASK;
377 
378     max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
379 
380     if (!ignore_pInt) {
381         max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
382     }
383 
384     if ((devid > s->dt.max_ids) || (icid > s->ct.max_ids)
385             || !dte_valid || (eventid > max_eventid) ||
386             (!ignore_pInt && (((pIntid < GICV3_LPI_INTID_START) ||
387             (pIntid > max_Intid)) && (pIntid != INTID_SPURIOUS)))) {
388         qemu_log_mask(LOG_GUEST_ERROR,
389                       "%s: invalid command attributes "
390                       "devid %d or icid %d or eventid %d or pIntid %d or"
391                       "unmapped dte %d\n", __func__, devid, icid, eventid,
392                       pIntid, dte_valid);
393         /*
394          * in this implementation, in case of error
395          * we ignore this command and move onto the next
396          * command in the queue
397          */
398     } else {
399         /* add ite entry to interrupt translation table */
400         IteEntry ite = {};
401         ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
402         ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
403         if (ignore_pInt) {
404             ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, eventid);
405         } else {
406             ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
407         }
408         ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
409         ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
410 
411         result = update_ite(s, eventid, dte, ite);
412     }
413 
414     return result;
415 }
416 
417 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
418                        uint64_t rdbase)
419 {
420     AddressSpace *as = &s->gicv3->dma_as;
421     uint64_t value;
422     uint64_t l2t_addr;
423     bool valid_l2t;
424     uint32_t l2t_id;
425     uint32_t max_l2_entries;
426     uint64_t cte = 0;
427     MemTxResult res = MEMTX_OK;
428 
429     if (!s->ct.valid) {
430         return true;
431     }
432 
433     if (valid) {
434         /* add mapping entry to collection table */
435         cte = (valid & TABLE_ENTRY_VALID_MASK) | (rdbase << 1ULL);
436     }
437 
438     /*
439      * The specification defines the format of level 1 entries of a
440      * 2-level table, but the format of level 2 entries and the format
441      * of flat-mapped tables is IMPDEF.
442      */
443     if (s->ct.indirect) {
444         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
445 
446         value = address_space_ldq_le(as,
447                                      s->ct.base_addr +
448                                      (l2t_id * L1TABLE_ENTRY_SIZE),
449                                      MEMTXATTRS_UNSPECIFIED, &res);
450 
451         if (res != MEMTX_OK) {
452             return false;
453         }
454 
455         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
456 
457         if (valid_l2t) {
458             max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
459 
460             l2t_addr = value & ((1ULL << 51) - 1);
461 
462             address_space_stq_le(as, l2t_addr +
463                                  ((icid % max_l2_entries) * GITS_CTE_SIZE),
464                                  cte, MEMTXATTRS_UNSPECIFIED, &res);
465         }
466     } else {
467         /* Flat level table */
468         address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
469                              cte, MEMTXATTRS_UNSPECIFIED, &res);
470     }
471     if (res != MEMTX_OK) {
472         return false;
473     } else {
474         return true;
475     }
476 }
477 
478 static bool process_mapc(GICv3ITSState *s, uint32_t offset)
479 {
480     AddressSpace *as = &s->gicv3->dma_as;
481     uint16_t icid;
482     uint64_t rdbase;
483     bool valid;
484     MemTxResult res = MEMTX_OK;
485     bool result = false;
486     uint64_t value;
487 
488     offset += NUM_BYTES_IN_DW;
489     offset += NUM_BYTES_IN_DW;
490 
491     value = address_space_ldq_le(as, s->cq.base_addr + offset,
492                                  MEMTXATTRS_UNSPECIFIED, &res);
493 
494     if (res != MEMTX_OK) {
495         return result;
496     }
497 
498     icid = value & ICID_MASK;
499 
500     rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
501     rdbase &= RDBASE_PROCNUM_MASK;
502 
503     valid = (value & CMD_FIELD_VALID_MASK);
504 
505     if ((icid > s->ct.max_ids) || (rdbase >= s->gicv3->num_cpu)) {
506         qemu_log_mask(LOG_GUEST_ERROR,
507                       "ITS MAPC: invalid collection table attributes "
508                       "icid %d rdbase %" PRIu64 "\n",  icid, rdbase);
509         /*
510          * in this implementation, in case of error
511          * we ignore this command and move onto the next
512          * command in the queue
513          */
514     } else {
515         result = update_cte(s, icid, valid, rdbase);
516     }
517 
518     return result;
519 }
520 
521 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
522                        uint8_t size, uint64_t itt_addr)
523 {
524     AddressSpace *as = &s->gicv3->dma_as;
525     uint64_t value;
526     uint64_t l2t_addr;
527     bool valid_l2t;
528     uint32_t l2t_id;
529     uint32_t max_l2_entries;
530     uint64_t dte = 0;
531     MemTxResult res = MEMTX_OK;
532 
533     if (s->dt.valid) {
534         if (valid) {
535             /* add mapping entry to device table */
536             dte = (valid & TABLE_ENTRY_VALID_MASK) |
537                   ((size & SIZE_MASK) << 1U) |
538                   (itt_addr << GITS_DTE_ITTADDR_SHIFT);
539         }
540     } else {
541         return true;
542     }
543 
544     /*
545      * The specification defines the format of level 1 entries of a
546      * 2-level table, but the format of level 2 entries and the format
547      * of flat-mapped tables is IMPDEF.
548      */
549     if (s->dt.indirect) {
550         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
551 
552         value = address_space_ldq_le(as,
553                                      s->dt.base_addr +
554                                      (l2t_id * L1TABLE_ENTRY_SIZE),
555                                      MEMTXATTRS_UNSPECIFIED, &res);
556 
557         if (res != MEMTX_OK) {
558             return false;
559         }
560 
561         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
562 
563         if (valid_l2t) {
564             max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
565 
566             l2t_addr = value & ((1ULL << 51) - 1);
567 
568             address_space_stq_le(as, l2t_addr +
569                                  ((devid % max_l2_entries) * GITS_DTE_SIZE),
570                                  dte, MEMTXATTRS_UNSPECIFIED, &res);
571         }
572     } else {
573         /* Flat level table */
574         address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
575                              dte, MEMTXATTRS_UNSPECIFIED, &res);
576     }
577     if (res != MEMTX_OK) {
578         return false;
579     } else {
580         return true;
581     }
582 }
583 
584 static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
585 {
586     AddressSpace *as = &s->gicv3->dma_as;
587     uint32_t devid;
588     uint8_t size;
589     uint64_t itt_addr;
590     bool valid;
591     MemTxResult res = MEMTX_OK;
592     bool result = false;
593 
594     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
595 
596     offset += NUM_BYTES_IN_DW;
597     value = address_space_ldq_le(as, s->cq.base_addr + offset,
598                                  MEMTXATTRS_UNSPECIFIED, &res);
599 
600     if (res != MEMTX_OK) {
601         return result;
602     }
603 
604     size = (value & SIZE_MASK);
605 
606     offset += NUM_BYTES_IN_DW;
607     value = address_space_ldq_le(as, s->cq.base_addr + offset,
608                                  MEMTXATTRS_UNSPECIFIED, &res);
609 
610     if (res != MEMTX_OK) {
611         return result;
612     }
613 
614     itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
615 
616     valid = (value & CMD_FIELD_VALID_MASK);
617 
618     if ((devid > s->dt.max_ids) ||
619         (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
620         qemu_log_mask(LOG_GUEST_ERROR,
621                       "ITS MAPD: invalid device table attributes "
622                       "devid %d or size %d\n", devid, size);
623         /*
624          * in this implementation, in case of error
625          * we ignore this command and move onto the next
626          * command in the queue
627          */
628     } else {
629         result = update_dte(s, devid, valid, size, itt_addr);
630     }
631 
632     return result;
633 }
634 
635 /*
636  * Current implementation blocks until all
637  * commands are processed
638  */
639 static void process_cmdq(GICv3ITSState *s)
640 {
641     uint32_t wr_offset = 0;
642     uint32_t rd_offset = 0;
643     uint32_t cq_offset = 0;
644     uint64_t data;
645     AddressSpace *as = &s->gicv3->dma_as;
646     MemTxResult res = MEMTX_OK;
647     bool result = true;
648     uint8_t cmd;
649     int i;
650 
651     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
652         return;
653     }
654 
655     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
656 
657     if (wr_offset > s->cq.max_entries) {
658         qemu_log_mask(LOG_GUEST_ERROR,
659                       "%s: invalid write offset "
660                       "%d\n", __func__, wr_offset);
661         return;
662     }
663 
664     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
665 
666     if (rd_offset > s->cq.max_entries) {
667         qemu_log_mask(LOG_GUEST_ERROR,
668                       "%s: invalid read offset "
669                       "%d\n", __func__, rd_offset);
670         return;
671     }
672 
673     while (wr_offset != rd_offset) {
674         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
675         data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
676                                     MEMTXATTRS_UNSPECIFIED, &res);
677         if (res != MEMTX_OK) {
678             result = false;
679         }
680         cmd = (data & CMD_MASK);
681 
682         switch (cmd) {
683         case GITS_CMD_INT:
684             res = process_its_cmd(s, data, cq_offset, INTERRUPT);
685             break;
686         case GITS_CMD_CLEAR:
687             res = process_its_cmd(s, data, cq_offset, CLEAR);
688             break;
689         case GITS_CMD_SYNC:
690             /*
691              * Current implementation makes a blocking synchronous call
692              * for every command issued earlier, hence the internal state
693              * is already consistent by the time SYNC command is executed.
694              * Hence no further processing is required for SYNC command.
695              */
696             break;
697         case GITS_CMD_MAPD:
698             result = process_mapd(s, data, cq_offset);
699             break;
700         case GITS_CMD_MAPC:
701             result = process_mapc(s, cq_offset);
702             break;
703         case GITS_CMD_MAPTI:
704             result = process_mapti(s, data, cq_offset, false);
705             break;
706         case GITS_CMD_MAPI:
707             result = process_mapti(s, data, cq_offset, true);
708             break;
709         case GITS_CMD_DISCARD:
710             result = process_its_cmd(s, data, cq_offset, DISCARD);
711             break;
712         case GITS_CMD_INV:
713         case GITS_CMD_INVALL:
714             /*
715              * Current implementation doesn't cache any ITS tables,
716              * but the calculated lpi priority information. We only
717              * need to trigger lpi priority re-calculation to be in
718              * sync with LPI config table or pending table changes.
719              */
720             for (i = 0; i < s->gicv3->num_cpu; i++) {
721                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
722             }
723             break;
724         default:
725             break;
726         }
727         if (result) {
728             rd_offset++;
729             rd_offset %= s->cq.max_entries;
730             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
731         } else {
732             /*
733              * in this implementation, in case of dma read/write error
734              * we stall the command processing
735              */
736             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
737             qemu_log_mask(LOG_GUEST_ERROR,
738                           "%s: %x cmd processing failed\n", __func__, cmd);
739             break;
740         }
741     }
742 }
743 
744 /*
745  * This function extracts the ITS Device and Collection table specific
746  * parameters (like base_addr, size etc) from GITS_BASER register.
747  * It is called during ITS enable and also during post_load migration
748  */
749 static void extract_table_params(GICv3ITSState *s)
750 {
751     uint16_t num_pages = 0;
752     uint8_t  page_sz_type;
753     uint8_t type;
754     uint32_t page_sz = 0;
755     uint64_t value;
756 
757     for (int i = 0; i < 8; i++) {
758         TableDesc *td;
759         int idbits;
760 
761         value = s->baser[i];
762 
763         if (!value) {
764             continue;
765         }
766 
767         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
768 
769         switch (page_sz_type) {
770         case 0:
771             page_sz = GITS_PAGE_SIZE_4K;
772             break;
773 
774         case 1:
775             page_sz = GITS_PAGE_SIZE_16K;
776             break;
777 
778         case 2:
779         case 3:
780             page_sz = GITS_PAGE_SIZE_64K;
781             break;
782 
783         default:
784             g_assert_not_reached();
785         }
786 
787         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
788 
789         type = FIELD_EX64(value, GITS_BASER, TYPE);
790 
791         switch (type) {
792         case GITS_BASER_TYPE_DEVICE:
793             td = &s->dt;
794             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
795             break;
796         case GITS_BASER_TYPE_COLLECTION:
797             td = &s->ct;
798             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
799                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
800             } else {
801                 /* 16-bit CollectionId supported when CIL == 0 */
802                 idbits = 16;
803             }
804             break;
805         default:
806             /*
807              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
808              * ensures we will only see type values corresponding to
809              * the values set up in gicv3_its_reset().
810              */
811             g_assert_not_reached();
812         }
813 
814         memset(td, 0, sizeof(*td));
815         td->valid = FIELD_EX64(value, GITS_BASER, VALID);
816         /*
817          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
818          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
819          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
820          * for the register corresponding to the Collection table but we
821          * still have to process interrupts using non-memory-backed
822          * Collection table entries.)
823          */
824         if (!td->valid) {
825             continue;
826         }
827         td->page_sz = page_sz;
828         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
829         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
830         td->base_addr = baser_base_addr(value, page_sz);
831         if (!td->indirect) {
832             td->max_entries = (num_pages * page_sz) / td->entry_sz;
833         } else {
834             td->max_entries = (((num_pages * page_sz) /
835                                   L1TABLE_ENTRY_SIZE) *
836                                  (page_sz / td->entry_sz));
837         }
838         td->max_ids = 1ULL << idbits;
839     }
840 }
841 
842 static void extract_cmdq_params(GICv3ITSState *s)
843 {
844     uint16_t num_pages = 0;
845     uint64_t value = s->cbaser;
846 
847     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
848 
849     memset(&s->cq, 0 , sizeof(s->cq));
850     s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
851 
852     if (s->cq.valid) {
853         s->cq.max_entries = (num_pages * GITS_PAGE_SIZE_4K) /
854                              GITS_CMDQ_ENTRY_SIZE;
855         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
856         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
857     }
858 }
859 
860 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
861                                                uint64_t data, unsigned size,
862                                                MemTxAttrs attrs)
863 {
864     GICv3ITSState *s = (GICv3ITSState *)opaque;
865     bool result = true;
866     uint32_t devid = 0;
867 
868     switch (offset) {
869     case GITS_TRANSLATER:
870         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
871             devid = attrs.requester_id;
872             result = process_its_cmd(s, data, devid, NONE);
873         }
874         break;
875     default:
876         break;
877     }
878 
879     if (result) {
880         return MEMTX_OK;
881     } else {
882         return MEMTX_ERROR;
883     }
884 }
885 
886 static bool its_writel(GICv3ITSState *s, hwaddr offset,
887                               uint64_t value, MemTxAttrs attrs)
888 {
889     bool result = true;
890     int index;
891 
892     switch (offset) {
893     case GITS_CTLR:
894         if (value & R_GITS_CTLR_ENABLED_MASK) {
895             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
896             extract_table_params(s);
897             extract_cmdq_params(s);
898             s->creadr = 0;
899             process_cmdq(s);
900         } else {
901             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
902         }
903         break;
904     case GITS_CBASER:
905         /*
906          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
907          *                 already enabled
908          */
909         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
910             s->cbaser = deposit64(s->cbaser, 0, 32, value);
911             s->creadr = 0;
912             s->cwriter = s->creadr;
913         }
914         break;
915     case GITS_CBASER + 4:
916         /*
917          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
918          *                 already enabled
919          */
920         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
921             s->cbaser = deposit64(s->cbaser, 32, 32, value);
922             s->creadr = 0;
923             s->cwriter = s->creadr;
924         }
925         break;
926     case GITS_CWRITER:
927         s->cwriter = deposit64(s->cwriter, 0, 32,
928                                (value & ~R_GITS_CWRITER_RETRY_MASK));
929         if (s->cwriter != s->creadr) {
930             process_cmdq(s);
931         }
932         break;
933     case GITS_CWRITER + 4:
934         s->cwriter = deposit64(s->cwriter, 32, 32, value);
935         break;
936     case GITS_CREADR:
937         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
938             s->creadr = deposit64(s->creadr, 0, 32,
939                                   (value & ~R_GITS_CREADR_STALLED_MASK));
940         } else {
941             /* RO register, ignore the write */
942             qemu_log_mask(LOG_GUEST_ERROR,
943                           "%s: invalid guest write to RO register at offset "
944                           TARGET_FMT_plx "\n", __func__, offset);
945         }
946         break;
947     case GITS_CREADR + 4:
948         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
949             s->creadr = deposit64(s->creadr, 32, 32, value);
950         } else {
951             /* RO register, ignore the write */
952             qemu_log_mask(LOG_GUEST_ERROR,
953                           "%s: invalid guest write to RO register at offset "
954                           TARGET_FMT_plx "\n", __func__, offset);
955         }
956         break;
957     case GITS_BASER ... GITS_BASER + 0x3f:
958         /*
959          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
960          *                 already enabled
961          */
962         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
963             index = (offset - GITS_BASER) / 8;
964 
965             if (offset & 7) {
966                 value <<= 32;
967                 value &= ~GITS_BASER_RO_MASK;
968                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
969                 s->baser[index] |= value;
970             } else {
971                 value &= ~GITS_BASER_RO_MASK;
972                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
973                 s->baser[index] |= value;
974             }
975         }
976         break;
977     case GITS_IIDR:
978     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
979         /* RO registers, ignore the write */
980         qemu_log_mask(LOG_GUEST_ERROR,
981                       "%s: invalid guest write to RO register at offset "
982                       TARGET_FMT_plx "\n", __func__, offset);
983         break;
984     default:
985         result = false;
986         break;
987     }
988     return result;
989 }
990 
991 static bool its_readl(GICv3ITSState *s, hwaddr offset,
992                              uint64_t *data, MemTxAttrs attrs)
993 {
994     bool result = true;
995     int index;
996 
997     switch (offset) {
998     case GITS_CTLR:
999         *data = s->ctlr;
1000         break;
1001     case GITS_IIDR:
1002         *data = gicv3_iidr();
1003         break;
1004     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1005         /* ID registers */
1006         *data = gicv3_idreg(offset - GITS_IDREGS);
1007         break;
1008     case GITS_TYPER:
1009         *data = extract64(s->typer, 0, 32);
1010         break;
1011     case GITS_TYPER + 4:
1012         *data = extract64(s->typer, 32, 32);
1013         break;
1014     case GITS_CBASER:
1015         *data = extract64(s->cbaser, 0, 32);
1016         break;
1017     case GITS_CBASER + 4:
1018         *data = extract64(s->cbaser, 32, 32);
1019         break;
1020     case GITS_CREADR:
1021         *data = extract64(s->creadr, 0, 32);
1022         break;
1023     case GITS_CREADR + 4:
1024         *data = extract64(s->creadr, 32, 32);
1025         break;
1026     case GITS_CWRITER:
1027         *data = extract64(s->cwriter, 0, 32);
1028         break;
1029     case GITS_CWRITER + 4:
1030         *data = extract64(s->cwriter, 32, 32);
1031         break;
1032     case GITS_BASER ... GITS_BASER + 0x3f:
1033         index = (offset - GITS_BASER) / 8;
1034         if (offset & 7) {
1035             *data = extract64(s->baser[index], 32, 32);
1036         } else {
1037             *data = extract64(s->baser[index], 0, 32);
1038         }
1039         break;
1040     default:
1041         result = false;
1042         break;
1043     }
1044     return result;
1045 }
1046 
1047 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1048                                uint64_t value, MemTxAttrs attrs)
1049 {
1050     bool result = true;
1051     int index;
1052 
1053     switch (offset) {
1054     case GITS_BASER ... GITS_BASER + 0x3f:
1055         /*
1056          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1057          *                 already enabled
1058          */
1059         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1060             index = (offset - GITS_BASER) / 8;
1061             s->baser[index] &= GITS_BASER_RO_MASK;
1062             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1063         }
1064         break;
1065     case GITS_CBASER:
1066         /*
1067          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1068          *                 already enabled
1069          */
1070         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1071             s->cbaser = value;
1072             s->creadr = 0;
1073             s->cwriter = s->creadr;
1074         }
1075         break;
1076     case GITS_CWRITER:
1077         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1078         if (s->cwriter != s->creadr) {
1079             process_cmdq(s);
1080         }
1081         break;
1082     case GITS_CREADR:
1083         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1084             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1085         } else {
1086             /* RO register, ignore the write */
1087             qemu_log_mask(LOG_GUEST_ERROR,
1088                           "%s: invalid guest write to RO register at offset "
1089                           TARGET_FMT_plx "\n", __func__, offset);
1090         }
1091         break;
1092     case GITS_TYPER:
1093         /* RO registers, ignore the write */
1094         qemu_log_mask(LOG_GUEST_ERROR,
1095                       "%s: invalid guest write to RO register at offset "
1096                       TARGET_FMT_plx "\n", __func__, offset);
1097         break;
1098     default:
1099         result = false;
1100         break;
1101     }
1102     return result;
1103 }
1104 
1105 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1106                               uint64_t *data, MemTxAttrs attrs)
1107 {
1108     bool result = true;
1109     int index;
1110 
1111     switch (offset) {
1112     case GITS_TYPER:
1113         *data = s->typer;
1114         break;
1115     case GITS_BASER ... GITS_BASER + 0x3f:
1116         index = (offset - GITS_BASER) / 8;
1117         *data = s->baser[index];
1118         break;
1119     case GITS_CBASER:
1120         *data = s->cbaser;
1121         break;
1122     case GITS_CREADR:
1123         *data = s->creadr;
1124         break;
1125     case GITS_CWRITER:
1126         *data = s->cwriter;
1127         break;
1128     default:
1129         result = false;
1130         break;
1131     }
1132     return result;
1133 }
1134 
1135 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1136                                   unsigned size, MemTxAttrs attrs)
1137 {
1138     GICv3ITSState *s = (GICv3ITSState *)opaque;
1139     bool result;
1140 
1141     switch (size) {
1142     case 4:
1143         result = its_readl(s, offset, data, attrs);
1144         break;
1145     case 8:
1146         result = its_readll(s, offset, data, attrs);
1147         break;
1148     default:
1149         result = false;
1150         break;
1151     }
1152 
1153     if (!result) {
1154         qemu_log_mask(LOG_GUEST_ERROR,
1155                       "%s: invalid guest read at offset " TARGET_FMT_plx
1156                       "size %u\n", __func__, offset, size);
1157         /*
1158          * The spec requires that reserved registers are RAZ/WI;
1159          * so use false returns from leaf functions as a way to
1160          * trigger the guest-error logging but don't return it to
1161          * the caller, or we'll cause a spurious guest data abort.
1162          */
1163         *data = 0;
1164     }
1165     return MEMTX_OK;
1166 }
1167 
1168 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1169                                    unsigned size, MemTxAttrs attrs)
1170 {
1171     GICv3ITSState *s = (GICv3ITSState *)opaque;
1172     bool result;
1173 
1174     switch (size) {
1175     case 4:
1176         result = its_writel(s, offset, data, attrs);
1177         break;
1178     case 8:
1179         result = its_writell(s, offset, data, attrs);
1180         break;
1181     default:
1182         result = false;
1183         break;
1184     }
1185 
1186     if (!result) {
1187         qemu_log_mask(LOG_GUEST_ERROR,
1188                       "%s: invalid guest write at offset " TARGET_FMT_plx
1189                       "size %u\n", __func__, offset, size);
1190         /*
1191          * The spec requires that reserved registers are RAZ/WI;
1192          * so use false returns from leaf functions as a way to
1193          * trigger the guest-error logging but don't return it to
1194          * the caller, or we'll cause a spurious guest data abort.
1195          */
1196     }
1197     return MEMTX_OK;
1198 }
1199 
1200 static const MemoryRegionOps gicv3_its_control_ops = {
1201     .read_with_attrs = gicv3_its_read,
1202     .write_with_attrs = gicv3_its_write,
1203     .valid.min_access_size = 4,
1204     .valid.max_access_size = 8,
1205     .impl.min_access_size = 4,
1206     .impl.max_access_size = 8,
1207     .endianness = DEVICE_NATIVE_ENDIAN,
1208 };
1209 
1210 static const MemoryRegionOps gicv3_its_translation_ops = {
1211     .write_with_attrs = gicv3_its_translation_write,
1212     .valid.min_access_size = 2,
1213     .valid.max_access_size = 4,
1214     .impl.min_access_size = 2,
1215     .impl.max_access_size = 4,
1216     .endianness = DEVICE_NATIVE_ENDIAN,
1217 };
1218 
1219 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1220 {
1221     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1222     int i;
1223 
1224     for (i = 0; i < s->gicv3->num_cpu; i++) {
1225         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1226             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1227             return;
1228         }
1229     }
1230 
1231     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1232 
1233     address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
1234                        "gicv3-its-sysmem");
1235 
1236     /* set the ITS default features supported */
1237     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1238     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1239                           ITS_ITT_ENTRY_SIZE - 1);
1240     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1241     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1242     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1243     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1244 }
1245 
1246 static void gicv3_its_reset(DeviceState *dev)
1247 {
1248     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1249     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1250 
1251     c->parent_reset(dev);
1252 
1253     /* Quiescent bit reset to 1 */
1254     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1255 
1256     /*
1257      * setting GITS_BASER0.Type = 0b001 (Device)
1258      *         GITS_BASER1.Type = 0b100 (Collection Table)
1259      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1260      *         GITS_BASER<0,1>.Page_Size = 64KB
1261      * and default translation table entry size to 16 bytes
1262      */
1263     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1264                              GITS_BASER_TYPE_DEVICE);
1265     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1266                              GITS_BASER_PAGESIZE_64K);
1267     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1268                              GITS_DTE_SIZE - 1);
1269 
1270     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1271                              GITS_BASER_TYPE_COLLECTION);
1272     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1273                              GITS_BASER_PAGESIZE_64K);
1274     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1275                              GITS_CTE_SIZE - 1);
1276 }
1277 
1278 static void gicv3_its_post_load(GICv3ITSState *s)
1279 {
1280     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1281         extract_table_params(s);
1282         extract_cmdq_params(s);
1283     }
1284 }
1285 
1286 static Property gicv3_its_props[] = {
1287     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1288                      GICv3State *),
1289     DEFINE_PROP_END_OF_LIST(),
1290 };
1291 
1292 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1293 {
1294     DeviceClass *dc = DEVICE_CLASS(klass);
1295     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1296     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1297 
1298     dc->realize = gicv3_arm_its_realize;
1299     device_class_set_props(dc, gicv3_its_props);
1300     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1301     icc->post_load = gicv3_its_post_load;
1302 }
1303 
1304 static const TypeInfo gicv3_its_info = {
1305     .name = TYPE_ARM_GICV3_ITS,
1306     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1307     .instance_size = sizeof(GICv3ITSState),
1308     .class_init = gicv3_its_class_init,
1309     .class_size = sizeof(GICv3ITSClass),
1310 };
1311 
1312 static void gicv3_its_register_types(void)
1313 {
1314     type_register_static(&gicv3_its_info);
1315 }
1316 
1317 type_init(gicv3_its_register_types)
1318