xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision e5487a413904973ca77999c904be8949da2e8f31)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/intc/arm_gicv3_its_common.h"
18 #include "gicv3_internal.h"
19 #include "qom/object.h"
20 #include "qapi/error.h"
21 
22 typedef struct GICv3ITSClass GICv3ITSClass;
23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
25                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
26 
27 struct GICv3ITSClass {
28     GICv3ITSCommonClass parent_class;
29     void (*parent_reset)(DeviceState *dev);
30 };
31 
32 /*
33  * This is an internal enum used to distinguish between LPI triggered
34  * via command queue and LPI triggered via gits_translater write.
35  */
36 typedef enum ItsCmdType {
37     NONE = 0, /* internal indication for GITS_TRANSLATER write */
38     CLEAR = 1,
39     DISCARD = 2,
40     INTERRUPT = 3,
41 } ItsCmdType;
42 
43 typedef struct {
44     uint32_t iteh;
45     uint64_t itel;
46 } IteEntry;
47 
48 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
49 {
50     uint64_t result = 0;
51 
52     switch (page_sz) {
53     case GITS_PAGE_SIZE_4K:
54     case GITS_PAGE_SIZE_16K:
55         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
56         break;
57 
58     case GITS_PAGE_SIZE_64K:
59         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
60         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
61         break;
62 
63     default:
64         break;
65     }
66     return result;
67 }
68 
69 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
70                     MemTxResult *res)
71 {
72     AddressSpace *as = &s->gicv3->dma_as;
73     uint64_t l2t_addr;
74     uint64_t value;
75     bool valid_l2t;
76     uint32_t l2t_id;
77     uint32_t max_l2_entries;
78 
79     if (s->ct.indirect) {
80         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
81 
82         value = address_space_ldq_le(as,
83                                      s->ct.base_addr +
84                                      (l2t_id * L1TABLE_ENTRY_SIZE),
85                                      MEMTXATTRS_UNSPECIFIED, res);
86 
87         if (*res == MEMTX_OK) {
88             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
89 
90             if (valid_l2t) {
91                 max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
92 
93                 l2t_addr = value & ((1ULL << 51) - 1);
94 
95                 *cte =  address_space_ldq_le(as, l2t_addr +
96                                     ((icid % max_l2_entries) * GITS_CTE_SIZE),
97                                     MEMTXATTRS_UNSPECIFIED, res);
98            }
99        }
100     } else {
101         /* Flat level table */
102         *cte =  address_space_ldq_le(as, s->ct.base_addr +
103                                      (icid * GITS_CTE_SIZE),
104                                       MEMTXATTRS_UNSPECIFIED, res);
105     }
106 
107     return (*cte & TABLE_ENTRY_VALID_MASK) != 0;
108 }
109 
110 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
111                        IteEntry ite)
112 {
113     AddressSpace *as = &s->gicv3->dma_as;
114     uint64_t itt_addr;
115     MemTxResult res = MEMTX_OK;
116 
117     itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
118     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
119 
120     address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
121                          sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
122                          &res);
123 
124     if (res == MEMTX_OK) {
125         address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
126                              sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
127                              MEMTXATTRS_UNSPECIFIED, &res);
128     }
129     if (res != MEMTX_OK) {
130         return false;
131     } else {
132         return true;
133     }
134 }
135 
136 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
137                     uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
138 {
139     AddressSpace *as = &s->gicv3->dma_as;
140     uint64_t itt_addr;
141     bool status = false;
142     IteEntry ite = {};
143 
144     itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
145     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
146 
147     ite.itel = address_space_ldq_le(as, itt_addr +
148                                     (eventid * (sizeof(uint64_t) +
149                                     sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
150                                     res);
151 
152     if (*res == MEMTX_OK) {
153         ite.iteh = address_space_ldl_le(as, itt_addr +
154                                         (eventid * (sizeof(uint64_t) +
155                                         sizeof(uint32_t))) + sizeof(uint32_t),
156                                         MEMTXATTRS_UNSPECIFIED, res);
157 
158         if (*res == MEMTX_OK) {
159             if (ite.itel & TABLE_ENTRY_VALID_MASK) {
160                 if ((ite.itel >> ITE_ENTRY_INTTYPE_SHIFT) &
161                     GITS_TYPE_PHYSICAL) {
162                     *pIntid = (ite.itel & ITE_ENTRY_INTID_MASK) >>
163                                ITE_ENTRY_INTID_SHIFT;
164                     *icid = ite.iteh & ITE_ENTRY_ICID_MASK;
165                     status = true;
166                 }
167             }
168         }
169     }
170     return status;
171 }
172 
173 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
174 {
175     AddressSpace *as = &s->gicv3->dma_as;
176     uint64_t l2t_addr;
177     uint64_t value;
178     bool valid_l2t;
179     uint32_t l2t_id;
180     uint32_t max_l2_entries;
181 
182     if (s->dt.indirect) {
183         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
184 
185         value = address_space_ldq_le(as,
186                                      s->dt.base_addr +
187                                      (l2t_id * L1TABLE_ENTRY_SIZE),
188                                      MEMTXATTRS_UNSPECIFIED, res);
189 
190         if (*res == MEMTX_OK) {
191             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
192 
193             if (valid_l2t) {
194                 max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
195 
196                 l2t_addr = value & ((1ULL << 51) - 1);
197 
198                 value =  address_space_ldq_le(as, l2t_addr +
199                                    ((devid % max_l2_entries) * GITS_DTE_SIZE),
200                                    MEMTXATTRS_UNSPECIFIED, res);
201             }
202         }
203     } else {
204         /* Flat level table */
205         value = address_space_ldq_le(as, s->dt.base_addr +
206                                      (devid * GITS_DTE_SIZE),
207                                      MEMTXATTRS_UNSPECIFIED, res);
208     }
209 
210     return value;
211 }
212 
213 /*
214  * This function handles the processing of following commands based on
215  * the ItsCmdType parameter passed:-
216  * 1. triggering of lpi interrupt translation via ITS INT command
217  * 2. triggering of lpi interrupt translation via gits_translater register
218  * 3. handling of ITS CLEAR command
219  * 4. handling of ITS DISCARD command
220  */
221 static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
222                             ItsCmdType cmd)
223 {
224     AddressSpace *as = &s->gicv3->dma_as;
225     uint32_t devid, eventid;
226     MemTxResult res = MEMTX_OK;
227     bool dte_valid;
228     uint64_t dte = 0;
229     uint32_t max_eventid;
230     uint16_t icid = 0;
231     uint32_t pIntid = 0;
232     bool ite_valid = false;
233     uint64_t cte = 0;
234     bool cte_valid = false;
235     bool result = false;
236     uint64_t rdbase;
237 
238     if (cmd == NONE) {
239         devid = offset;
240     } else {
241         devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
242 
243         offset += NUM_BYTES_IN_DW;
244         value = address_space_ldq_le(as, s->cq.base_addr + offset,
245                                      MEMTXATTRS_UNSPECIFIED, &res);
246     }
247 
248     if (res != MEMTX_OK) {
249         return result;
250     }
251 
252     eventid = (value & EVENTID_MASK);
253 
254     dte = get_dte(s, devid, &res);
255 
256     if (res != MEMTX_OK) {
257         return result;
258     }
259     dte_valid = dte & TABLE_ENTRY_VALID_MASK;
260 
261     if (dte_valid) {
262         max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
263 
264         ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
265 
266         if (res != MEMTX_OK) {
267             return result;
268         }
269 
270         if (ite_valid) {
271             cte_valid = get_cte(s, icid, &cte, &res);
272         }
273 
274         if (res != MEMTX_OK) {
275             return result;
276         }
277     } else {
278         qemu_log_mask(LOG_GUEST_ERROR,
279                       "%s: invalid command attributes: "
280                       "invalid dte: %"PRIx64" for %d (MEM_TX: %d)\n",
281                       __func__, dte, devid, res);
282         return result;
283     }
284 
285 
286     /*
287      * In this implementation, in case of guest errors we ignore the
288      * command and move onto the next command in the queue.
289      */
290     if (devid > s->dt.max_ids) {
291         qemu_log_mask(LOG_GUEST_ERROR,
292                       "%s: invalid command attributes: devid %d>%d",
293                       __func__, devid, s->dt.max_ids);
294 
295     } else if (!dte_valid || !ite_valid || !cte_valid) {
296         qemu_log_mask(LOG_GUEST_ERROR,
297                       "%s: invalid command attributes: "
298                       "dte: %s, ite: %s, cte: %s\n",
299                       __func__,
300                       dte_valid ? "valid" : "invalid",
301                       ite_valid ? "valid" : "invalid",
302                       cte_valid ? "valid" : "invalid");
303     } else if (eventid > max_eventid) {
304         qemu_log_mask(LOG_GUEST_ERROR,
305                       "%s: invalid command attributes: eventid %d > %d\n",
306                       __func__, eventid, max_eventid);
307     } else {
308         /*
309          * Current implementation only supports rdbase == procnum
310          * Hence rdbase physical address is ignored
311          */
312         rdbase = (cte & GITS_CTE_RDBASE_PROCNUM_MASK) >> 1U;
313 
314         if (rdbase >= s->gicv3->num_cpu) {
315             return result;
316         }
317 
318         if ((cmd == CLEAR) || (cmd == DISCARD)) {
319             gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
320         } else {
321             gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
322         }
323 
324         if (cmd == DISCARD) {
325             IteEntry ite = {};
326             /* remove mapping from interrupt translation table */
327             result = update_ite(s, eventid, dte, ite);
328         }
329     }
330 
331     return result;
332 }
333 
334 static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
335                           bool ignore_pInt)
336 {
337     AddressSpace *as = &s->gicv3->dma_as;
338     uint32_t devid, eventid;
339     uint32_t pIntid = 0;
340     uint32_t max_eventid, max_Intid;
341     bool dte_valid;
342     MemTxResult res = MEMTX_OK;
343     uint16_t icid = 0;
344     uint64_t dte = 0;
345     IteEntry ite;
346     uint32_t int_spurious = INTID_SPURIOUS;
347     bool result = false;
348 
349     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
350     offset += NUM_BYTES_IN_DW;
351     value = address_space_ldq_le(as, s->cq.base_addr + offset,
352                                  MEMTXATTRS_UNSPECIFIED, &res);
353 
354     if (res != MEMTX_OK) {
355         return result;
356     }
357 
358     eventid = (value & EVENTID_MASK);
359 
360     if (!ignore_pInt) {
361         pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
362     }
363 
364     offset += NUM_BYTES_IN_DW;
365     value = address_space_ldq_le(as, s->cq.base_addr + offset,
366                                  MEMTXATTRS_UNSPECIFIED, &res);
367 
368     if (res != MEMTX_OK) {
369         return result;
370     }
371 
372     icid = value & ICID_MASK;
373 
374     dte = get_dte(s, devid, &res);
375 
376     if (res != MEMTX_OK) {
377         return result;
378     }
379     dte_valid = dte & TABLE_ENTRY_VALID_MASK;
380 
381     max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
382 
383     if (!ignore_pInt) {
384         max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
385     }
386 
387     if ((devid > s->dt.max_ids) || (icid > s->ct.max_ids)
388             || !dte_valid || (eventid > max_eventid) ||
389             (!ignore_pInt && (((pIntid < GICV3_LPI_INTID_START) ||
390             (pIntid > max_Intid)) && (pIntid != INTID_SPURIOUS)))) {
391         qemu_log_mask(LOG_GUEST_ERROR,
392                       "%s: invalid command attributes "
393                       "devid %d or icid %d or eventid %d or pIntid %d or"
394                       "unmapped dte %d\n", __func__, devid, icid, eventid,
395                       pIntid, dte_valid);
396         /*
397          * in this implementation, in case of error
398          * we ignore this command and move onto the next
399          * command in the queue
400          */
401     } else {
402         /* add ite entry to interrupt translation table */
403         ite.itel = (dte_valid & TABLE_ENTRY_VALID_MASK) |
404                     (GITS_TYPE_PHYSICAL << ITE_ENTRY_INTTYPE_SHIFT);
405 
406         if (ignore_pInt) {
407             ite.itel |= (eventid << ITE_ENTRY_INTID_SHIFT);
408         } else {
409             ite.itel |= (pIntid << ITE_ENTRY_INTID_SHIFT);
410         }
411         ite.itel |= (int_spurious << ITE_ENTRY_INTSP_SHIFT);
412         ite.iteh = icid;
413 
414         result = update_ite(s, eventid, dte, ite);
415     }
416 
417     return result;
418 }
419 
420 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
421                        uint64_t rdbase)
422 {
423     AddressSpace *as = &s->gicv3->dma_as;
424     uint64_t value;
425     uint64_t l2t_addr;
426     bool valid_l2t;
427     uint32_t l2t_id;
428     uint32_t max_l2_entries;
429     uint64_t cte = 0;
430     MemTxResult res = MEMTX_OK;
431 
432     if (!s->ct.valid) {
433         return true;
434     }
435 
436     if (valid) {
437         /* add mapping entry to collection table */
438         cte = (valid & TABLE_ENTRY_VALID_MASK) | (rdbase << 1ULL);
439     }
440 
441     /*
442      * The specification defines the format of level 1 entries of a
443      * 2-level table, but the format of level 2 entries and the format
444      * of flat-mapped tables is IMPDEF.
445      */
446     if (s->ct.indirect) {
447         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
448 
449         value = address_space_ldq_le(as,
450                                      s->ct.base_addr +
451                                      (l2t_id * L1TABLE_ENTRY_SIZE),
452                                      MEMTXATTRS_UNSPECIFIED, &res);
453 
454         if (res != MEMTX_OK) {
455             return false;
456         }
457 
458         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
459 
460         if (valid_l2t) {
461             max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
462 
463             l2t_addr = value & ((1ULL << 51) - 1);
464 
465             address_space_stq_le(as, l2t_addr +
466                                  ((icid % max_l2_entries) * GITS_CTE_SIZE),
467                                  cte, MEMTXATTRS_UNSPECIFIED, &res);
468         }
469     } else {
470         /* Flat level table */
471         address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
472                              cte, MEMTXATTRS_UNSPECIFIED, &res);
473     }
474     if (res != MEMTX_OK) {
475         return false;
476     } else {
477         return true;
478     }
479 }
480 
481 static bool process_mapc(GICv3ITSState *s, uint32_t offset)
482 {
483     AddressSpace *as = &s->gicv3->dma_as;
484     uint16_t icid;
485     uint64_t rdbase;
486     bool valid;
487     MemTxResult res = MEMTX_OK;
488     bool result = false;
489     uint64_t value;
490 
491     offset += NUM_BYTES_IN_DW;
492     offset += NUM_BYTES_IN_DW;
493 
494     value = address_space_ldq_le(as, s->cq.base_addr + offset,
495                                  MEMTXATTRS_UNSPECIFIED, &res);
496 
497     if (res != MEMTX_OK) {
498         return result;
499     }
500 
501     icid = value & ICID_MASK;
502 
503     rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
504     rdbase &= RDBASE_PROCNUM_MASK;
505 
506     valid = (value & CMD_FIELD_VALID_MASK);
507 
508     if ((icid > s->ct.max_ids) || (rdbase >= s->gicv3->num_cpu)) {
509         qemu_log_mask(LOG_GUEST_ERROR,
510                       "ITS MAPC: invalid collection table attributes "
511                       "icid %d rdbase %" PRIu64 "\n",  icid, rdbase);
512         /*
513          * in this implementation, in case of error
514          * we ignore this command and move onto the next
515          * command in the queue
516          */
517     } else {
518         result = update_cte(s, icid, valid, rdbase);
519     }
520 
521     return result;
522 }
523 
524 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
525                        uint8_t size, uint64_t itt_addr)
526 {
527     AddressSpace *as = &s->gicv3->dma_as;
528     uint64_t value;
529     uint64_t l2t_addr;
530     bool valid_l2t;
531     uint32_t l2t_id;
532     uint32_t max_l2_entries;
533     uint64_t dte = 0;
534     MemTxResult res = MEMTX_OK;
535 
536     if (s->dt.valid) {
537         if (valid) {
538             /* add mapping entry to device table */
539             dte = (valid & TABLE_ENTRY_VALID_MASK) |
540                   ((size & SIZE_MASK) << 1U) |
541                   (itt_addr << GITS_DTE_ITTADDR_SHIFT);
542         }
543     } else {
544         return true;
545     }
546 
547     /*
548      * The specification defines the format of level 1 entries of a
549      * 2-level table, but the format of level 2 entries and the format
550      * of flat-mapped tables is IMPDEF.
551      */
552     if (s->dt.indirect) {
553         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
554 
555         value = address_space_ldq_le(as,
556                                      s->dt.base_addr +
557                                      (l2t_id * L1TABLE_ENTRY_SIZE),
558                                      MEMTXATTRS_UNSPECIFIED, &res);
559 
560         if (res != MEMTX_OK) {
561             return false;
562         }
563 
564         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
565 
566         if (valid_l2t) {
567             max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
568 
569             l2t_addr = value & ((1ULL << 51) - 1);
570 
571             address_space_stq_le(as, l2t_addr +
572                                  ((devid % max_l2_entries) * GITS_DTE_SIZE),
573                                  dte, MEMTXATTRS_UNSPECIFIED, &res);
574         }
575     } else {
576         /* Flat level table */
577         address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
578                              dte, MEMTXATTRS_UNSPECIFIED, &res);
579     }
580     if (res != MEMTX_OK) {
581         return false;
582     } else {
583         return true;
584     }
585 }
586 
587 static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
588 {
589     AddressSpace *as = &s->gicv3->dma_as;
590     uint32_t devid;
591     uint8_t size;
592     uint64_t itt_addr;
593     bool valid;
594     MemTxResult res = MEMTX_OK;
595     bool result = false;
596 
597     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
598 
599     offset += NUM_BYTES_IN_DW;
600     value = address_space_ldq_le(as, s->cq.base_addr + offset,
601                                  MEMTXATTRS_UNSPECIFIED, &res);
602 
603     if (res != MEMTX_OK) {
604         return result;
605     }
606 
607     size = (value & SIZE_MASK);
608 
609     offset += NUM_BYTES_IN_DW;
610     value = address_space_ldq_le(as, s->cq.base_addr + offset,
611                                  MEMTXATTRS_UNSPECIFIED, &res);
612 
613     if (res != MEMTX_OK) {
614         return result;
615     }
616 
617     itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
618 
619     valid = (value & CMD_FIELD_VALID_MASK);
620 
621     if ((devid > s->dt.max_ids) ||
622         (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
623         qemu_log_mask(LOG_GUEST_ERROR,
624                       "ITS MAPD: invalid device table attributes "
625                       "devid %d or size %d\n", devid, size);
626         /*
627          * in this implementation, in case of error
628          * we ignore this command and move onto the next
629          * command in the queue
630          */
631     } else {
632         result = update_dte(s, devid, valid, size, itt_addr);
633     }
634 
635     return result;
636 }
637 
638 /*
639  * Current implementation blocks until all
640  * commands are processed
641  */
642 static void process_cmdq(GICv3ITSState *s)
643 {
644     uint32_t wr_offset = 0;
645     uint32_t rd_offset = 0;
646     uint32_t cq_offset = 0;
647     uint64_t data;
648     AddressSpace *as = &s->gicv3->dma_as;
649     MemTxResult res = MEMTX_OK;
650     bool result = true;
651     uint8_t cmd;
652     int i;
653 
654     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
655         return;
656     }
657 
658     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
659 
660     if (wr_offset > s->cq.max_entries) {
661         qemu_log_mask(LOG_GUEST_ERROR,
662                       "%s: invalid write offset "
663                       "%d\n", __func__, wr_offset);
664         return;
665     }
666 
667     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
668 
669     if (rd_offset > s->cq.max_entries) {
670         qemu_log_mask(LOG_GUEST_ERROR,
671                       "%s: invalid read offset "
672                       "%d\n", __func__, rd_offset);
673         return;
674     }
675 
676     while (wr_offset != rd_offset) {
677         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
678         data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
679                                     MEMTXATTRS_UNSPECIFIED, &res);
680         if (res != MEMTX_OK) {
681             result = false;
682         }
683         cmd = (data & CMD_MASK);
684 
685         switch (cmd) {
686         case GITS_CMD_INT:
687             res = process_its_cmd(s, data, cq_offset, INTERRUPT);
688             break;
689         case GITS_CMD_CLEAR:
690             res = process_its_cmd(s, data, cq_offset, CLEAR);
691             break;
692         case GITS_CMD_SYNC:
693             /*
694              * Current implementation makes a blocking synchronous call
695              * for every command issued earlier, hence the internal state
696              * is already consistent by the time SYNC command is executed.
697              * Hence no further processing is required for SYNC command.
698              */
699             break;
700         case GITS_CMD_MAPD:
701             result = process_mapd(s, data, cq_offset);
702             break;
703         case GITS_CMD_MAPC:
704             result = process_mapc(s, cq_offset);
705             break;
706         case GITS_CMD_MAPTI:
707             result = process_mapti(s, data, cq_offset, false);
708             break;
709         case GITS_CMD_MAPI:
710             result = process_mapti(s, data, cq_offset, true);
711             break;
712         case GITS_CMD_DISCARD:
713             result = process_its_cmd(s, data, cq_offset, DISCARD);
714             break;
715         case GITS_CMD_INV:
716         case GITS_CMD_INVALL:
717             /*
718              * Current implementation doesn't cache any ITS tables,
719              * but the calculated lpi priority information. We only
720              * need to trigger lpi priority re-calculation to be in
721              * sync with LPI config table or pending table changes.
722              */
723             for (i = 0; i < s->gicv3->num_cpu; i++) {
724                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
725             }
726             break;
727         default:
728             break;
729         }
730         if (result) {
731             rd_offset++;
732             rd_offset %= s->cq.max_entries;
733             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
734         } else {
735             /*
736              * in this implementation, in case of dma read/write error
737              * we stall the command processing
738              */
739             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
740             qemu_log_mask(LOG_GUEST_ERROR,
741                           "%s: %x cmd processing failed\n", __func__, cmd);
742             break;
743         }
744     }
745 }
746 
747 /*
748  * This function extracts the ITS Device and Collection table specific
749  * parameters (like base_addr, size etc) from GITS_BASER register.
750  * It is called during ITS enable and also during post_load migration
751  */
752 static void extract_table_params(GICv3ITSState *s)
753 {
754     uint16_t num_pages = 0;
755     uint8_t  page_sz_type;
756     uint8_t type;
757     uint32_t page_sz = 0;
758     uint64_t value;
759 
760     for (int i = 0; i < 8; i++) {
761         TableDesc *td;
762         int idbits;
763 
764         value = s->baser[i];
765 
766         if (!value) {
767             continue;
768         }
769 
770         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
771 
772         switch (page_sz_type) {
773         case 0:
774             page_sz = GITS_PAGE_SIZE_4K;
775             break;
776 
777         case 1:
778             page_sz = GITS_PAGE_SIZE_16K;
779             break;
780 
781         case 2:
782         case 3:
783             page_sz = GITS_PAGE_SIZE_64K;
784             break;
785 
786         default:
787             g_assert_not_reached();
788         }
789 
790         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
791 
792         type = FIELD_EX64(value, GITS_BASER, TYPE);
793 
794         switch (type) {
795         case GITS_BASER_TYPE_DEVICE:
796             td = &s->dt;
797             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
798             break;
799         case GITS_BASER_TYPE_COLLECTION:
800             td = &s->ct;
801             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
802                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
803             } else {
804                 /* 16-bit CollectionId supported when CIL == 0 */
805                 idbits = 16;
806             }
807             break;
808         default:
809             /*
810              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
811              * ensures we will only see type values corresponding to
812              * the values set up in gicv3_its_reset().
813              */
814             g_assert_not_reached();
815         }
816 
817         memset(td, 0, sizeof(*td));
818         td->valid = FIELD_EX64(value, GITS_BASER, VALID);
819         /*
820          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
821          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
822          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
823          * for the register corresponding to the Collection table but we
824          * still have to process interrupts using non-memory-backed
825          * Collection table entries.)
826          */
827         if (!td->valid) {
828             continue;
829         }
830         td->page_sz = page_sz;
831         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
832         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
833         td->base_addr = baser_base_addr(value, page_sz);
834         if (!td->indirect) {
835             td->max_entries = (num_pages * page_sz) / td->entry_sz;
836         } else {
837             td->max_entries = (((num_pages * page_sz) /
838                                   L1TABLE_ENTRY_SIZE) *
839                                  (page_sz / td->entry_sz));
840         }
841         td->max_ids = 1ULL << idbits;
842     }
843 }
844 
845 static void extract_cmdq_params(GICv3ITSState *s)
846 {
847     uint16_t num_pages = 0;
848     uint64_t value = s->cbaser;
849 
850     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
851 
852     memset(&s->cq, 0 , sizeof(s->cq));
853     s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
854 
855     if (s->cq.valid) {
856         s->cq.max_entries = (num_pages * GITS_PAGE_SIZE_4K) /
857                              GITS_CMDQ_ENTRY_SIZE;
858         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
859         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
860     }
861 }
862 
863 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
864                                                uint64_t data, unsigned size,
865                                                MemTxAttrs attrs)
866 {
867     GICv3ITSState *s = (GICv3ITSState *)opaque;
868     bool result = true;
869     uint32_t devid = 0;
870 
871     switch (offset) {
872     case GITS_TRANSLATER:
873         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
874             devid = attrs.requester_id;
875             result = process_its_cmd(s, data, devid, NONE);
876         }
877         break;
878     default:
879         break;
880     }
881 
882     if (result) {
883         return MEMTX_OK;
884     } else {
885         return MEMTX_ERROR;
886     }
887 }
888 
889 static bool its_writel(GICv3ITSState *s, hwaddr offset,
890                               uint64_t value, MemTxAttrs attrs)
891 {
892     bool result = true;
893     int index;
894 
895     switch (offset) {
896     case GITS_CTLR:
897         if (value & R_GITS_CTLR_ENABLED_MASK) {
898             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
899             extract_table_params(s);
900             extract_cmdq_params(s);
901             s->creadr = 0;
902             process_cmdq(s);
903         } else {
904             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
905         }
906         break;
907     case GITS_CBASER:
908         /*
909          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
910          *                 already enabled
911          */
912         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
913             s->cbaser = deposit64(s->cbaser, 0, 32, value);
914             s->creadr = 0;
915             s->cwriter = s->creadr;
916         }
917         break;
918     case GITS_CBASER + 4:
919         /*
920          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
921          *                 already enabled
922          */
923         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
924             s->cbaser = deposit64(s->cbaser, 32, 32, value);
925             s->creadr = 0;
926             s->cwriter = s->creadr;
927         }
928         break;
929     case GITS_CWRITER:
930         s->cwriter = deposit64(s->cwriter, 0, 32,
931                                (value & ~R_GITS_CWRITER_RETRY_MASK));
932         if (s->cwriter != s->creadr) {
933             process_cmdq(s);
934         }
935         break;
936     case GITS_CWRITER + 4:
937         s->cwriter = deposit64(s->cwriter, 32, 32, value);
938         break;
939     case GITS_CREADR:
940         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
941             s->creadr = deposit64(s->creadr, 0, 32,
942                                   (value & ~R_GITS_CREADR_STALLED_MASK));
943         } else {
944             /* RO register, ignore the write */
945             qemu_log_mask(LOG_GUEST_ERROR,
946                           "%s: invalid guest write to RO register at offset "
947                           TARGET_FMT_plx "\n", __func__, offset);
948         }
949         break;
950     case GITS_CREADR + 4:
951         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
952             s->creadr = deposit64(s->creadr, 32, 32, value);
953         } else {
954             /* RO register, ignore the write */
955             qemu_log_mask(LOG_GUEST_ERROR,
956                           "%s: invalid guest write to RO register at offset "
957                           TARGET_FMT_plx "\n", __func__, offset);
958         }
959         break;
960     case GITS_BASER ... GITS_BASER + 0x3f:
961         /*
962          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
963          *                 already enabled
964          */
965         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
966             index = (offset - GITS_BASER) / 8;
967 
968             if (offset & 7) {
969                 value <<= 32;
970                 value &= ~GITS_BASER_RO_MASK;
971                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
972                 s->baser[index] |= value;
973             } else {
974                 value &= ~GITS_BASER_RO_MASK;
975                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
976                 s->baser[index] |= value;
977             }
978         }
979         break;
980     case GITS_IIDR:
981     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
982         /* RO registers, ignore the write */
983         qemu_log_mask(LOG_GUEST_ERROR,
984                       "%s: invalid guest write to RO register at offset "
985                       TARGET_FMT_plx "\n", __func__, offset);
986         break;
987     default:
988         result = false;
989         break;
990     }
991     return result;
992 }
993 
994 static bool its_readl(GICv3ITSState *s, hwaddr offset,
995                              uint64_t *data, MemTxAttrs attrs)
996 {
997     bool result = true;
998     int index;
999 
1000     switch (offset) {
1001     case GITS_CTLR:
1002         *data = s->ctlr;
1003         break;
1004     case GITS_IIDR:
1005         *data = gicv3_iidr();
1006         break;
1007     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1008         /* ID registers */
1009         *data = gicv3_idreg(offset - GITS_IDREGS);
1010         break;
1011     case GITS_TYPER:
1012         *data = extract64(s->typer, 0, 32);
1013         break;
1014     case GITS_TYPER + 4:
1015         *data = extract64(s->typer, 32, 32);
1016         break;
1017     case GITS_CBASER:
1018         *data = extract64(s->cbaser, 0, 32);
1019         break;
1020     case GITS_CBASER + 4:
1021         *data = extract64(s->cbaser, 32, 32);
1022         break;
1023     case GITS_CREADR:
1024         *data = extract64(s->creadr, 0, 32);
1025         break;
1026     case GITS_CREADR + 4:
1027         *data = extract64(s->creadr, 32, 32);
1028         break;
1029     case GITS_CWRITER:
1030         *data = extract64(s->cwriter, 0, 32);
1031         break;
1032     case GITS_CWRITER + 4:
1033         *data = extract64(s->cwriter, 32, 32);
1034         break;
1035     case GITS_BASER ... GITS_BASER + 0x3f:
1036         index = (offset - GITS_BASER) / 8;
1037         if (offset & 7) {
1038             *data = extract64(s->baser[index], 32, 32);
1039         } else {
1040             *data = extract64(s->baser[index], 0, 32);
1041         }
1042         break;
1043     default:
1044         result = false;
1045         break;
1046     }
1047     return result;
1048 }
1049 
1050 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1051                                uint64_t value, MemTxAttrs attrs)
1052 {
1053     bool result = true;
1054     int index;
1055 
1056     switch (offset) {
1057     case GITS_BASER ... GITS_BASER + 0x3f:
1058         /*
1059          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1060          *                 already enabled
1061          */
1062         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1063             index = (offset - GITS_BASER) / 8;
1064             s->baser[index] &= GITS_BASER_RO_MASK;
1065             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1066         }
1067         break;
1068     case GITS_CBASER:
1069         /*
1070          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1071          *                 already enabled
1072          */
1073         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1074             s->cbaser = value;
1075             s->creadr = 0;
1076             s->cwriter = s->creadr;
1077         }
1078         break;
1079     case GITS_CWRITER:
1080         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1081         if (s->cwriter != s->creadr) {
1082             process_cmdq(s);
1083         }
1084         break;
1085     case GITS_CREADR:
1086         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1087             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1088         } else {
1089             /* RO register, ignore the write */
1090             qemu_log_mask(LOG_GUEST_ERROR,
1091                           "%s: invalid guest write to RO register at offset "
1092                           TARGET_FMT_plx "\n", __func__, offset);
1093         }
1094         break;
1095     case GITS_TYPER:
1096         /* RO registers, ignore the write */
1097         qemu_log_mask(LOG_GUEST_ERROR,
1098                       "%s: invalid guest write to RO register at offset "
1099                       TARGET_FMT_plx "\n", __func__, offset);
1100         break;
1101     default:
1102         result = false;
1103         break;
1104     }
1105     return result;
1106 }
1107 
1108 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1109                               uint64_t *data, MemTxAttrs attrs)
1110 {
1111     bool result = true;
1112     int index;
1113 
1114     switch (offset) {
1115     case GITS_TYPER:
1116         *data = s->typer;
1117         break;
1118     case GITS_BASER ... GITS_BASER + 0x3f:
1119         index = (offset - GITS_BASER) / 8;
1120         *data = s->baser[index];
1121         break;
1122     case GITS_CBASER:
1123         *data = s->cbaser;
1124         break;
1125     case GITS_CREADR:
1126         *data = s->creadr;
1127         break;
1128     case GITS_CWRITER:
1129         *data = s->cwriter;
1130         break;
1131     default:
1132         result = false;
1133         break;
1134     }
1135     return result;
1136 }
1137 
1138 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1139                                   unsigned size, MemTxAttrs attrs)
1140 {
1141     GICv3ITSState *s = (GICv3ITSState *)opaque;
1142     bool result;
1143 
1144     switch (size) {
1145     case 4:
1146         result = its_readl(s, offset, data, attrs);
1147         break;
1148     case 8:
1149         result = its_readll(s, offset, data, attrs);
1150         break;
1151     default:
1152         result = false;
1153         break;
1154     }
1155 
1156     if (!result) {
1157         qemu_log_mask(LOG_GUEST_ERROR,
1158                       "%s: invalid guest read at offset " TARGET_FMT_plx
1159                       "size %u\n", __func__, offset, size);
1160         /*
1161          * The spec requires that reserved registers are RAZ/WI;
1162          * so use false returns from leaf functions as a way to
1163          * trigger the guest-error logging but don't return it to
1164          * the caller, or we'll cause a spurious guest data abort.
1165          */
1166         *data = 0;
1167     }
1168     return MEMTX_OK;
1169 }
1170 
1171 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1172                                    unsigned size, MemTxAttrs attrs)
1173 {
1174     GICv3ITSState *s = (GICv3ITSState *)opaque;
1175     bool result;
1176 
1177     switch (size) {
1178     case 4:
1179         result = its_writel(s, offset, data, attrs);
1180         break;
1181     case 8:
1182         result = its_writell(s, offset, data, attrs);
1183         break;
1184     default:
1185         result = false;
1186         break;
1187     }
1188 
1189     if (!result) {
1190         qemu_log_mask(LOG_GUEST_ERROR,
1191                       "%s: invalid guest write at offset " TARGET_FMT_plx
1192                       "size %u\n", __func__, offset, size);
1193         /*
1194          * The spec requires that reserved registers are RAZ/WI;
1195          * so use false returns from leaf functions as a way to
1196          * trigger the guest-error logging but don't return it to
1197          * the caller, or we'll cause a spurious guest data abort.
1198          */
1199     }
1200     return MEMTX_OK;
1201 }
1202 
1203 static const MemoryRegionOps gicv3_its_control_ops = {
1204     .read_with_attrs = gicv3_its_read,
1205     .write_with_attrs = gicv3_its_write,
1206     .valid.min_access_size = 4,
1207     .valid.max_access_size = 8,
1208     .impl.min_access_size = 4,
1209     .impl.max_access_size = 8,
1210     .endianness = DEVICE_NATIVE_ENDIAN,
1211 };
1212 
1213 static const MemoryRegionOps gicv3_its_translation_ops = {
1214     .write_with_attrs = gicv3_its_translation_write,
1215     .valid.min_access_size = 2,
1216     .valid.max_access_size = 4,
1217     .impl.min_access_size = 2,
1218     .impl.max_access_size = 4,
1219     .endianness = DEVICE_NATIVE_ENDIAN,
1220 };
1221 
1222 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1223 {
1224     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1225     int i;
1226 
1227     for (i = 0; i < s->gicv3->num_cpu; i++) {
1228         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1229             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1230             return;
1231         }
1232     }
1233 
1234     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1235 
1236     address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
1237                        "gicv3-its-sysmem");
1238 
1239     /* set the ITS default features supported */
1240     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL,
1241                           GITS_TYPE_PHYSICAL);
1242     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1243                           ITS_ITT_ENTRY_SIZE - 1);
1244     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1245     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1246     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1247     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1248 }
1249 
1250 static void gicv3_its_reset(DeviceState *dev)
1251 {
1252     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1253     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1254 
1255     c->parent_reset(dev);
1256 
1257     /* Quiescent bit reset to 1 */
1258     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1259 
1260     /*
1261      * setting GITS_BASER0.Type = 0b001 (Device)
1262      *         GITS_BASER1.Type = 0b100 (Collection Table)
1263      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1264      *         GITS_BASER<0,1>.Page_Size = 64KB
1265      * and default translation table entry size to 16 bytes
1266      */
1267     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1268                              GITS_BASER_TYPE_DEVICE);
1269     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1270                              GITS_BASER_PAGESIZE_64K);
1271     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1272                              GITS_DTE_SIZE - 1);
1273 
1274     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1275                              GITS_BASER_TYPE_COLLECTION);
1276     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1277                              GITS_BASER_PAGESIZE_64K);
1278     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1279                              GITS_CTE_SIZE - 1);
1280 }
1281 
1282 static void gicv3_its_post_load(GICv3ITSState *s)
1283 {
1284     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1285         extract_table_params(s);
1286         extract_cmdq_params(s);
1287     }
1288 }
1289 
1290 static Property gicv3_its_props[] = {
1291     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1292                      GICv3State *),
1293     DEFINE_PROP_END_OF_LIST(),
1294 };
1295 
1296 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1297 {
1298     DeviceClass *dc = DEVICE_CLASS(klass);
1299     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1300     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1301 
1302     dc->realize = gicv3_arm_its_realize;
1303     device_class_set_props(dc, gicv3_its_props);
1304     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1305     icc->post_load = gicv3_its_post_load;
1306 }
1307 
1308 static const TypeInfo gicv3_its_info = {
1309     .name = TYPE_ARM_GICV3_ITS,
1310     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1311     .instance_size = sizeof(GICv3ITSState),
1312     .class_init = gicv3_its_class_init,
1313     .class_size = sizeof(GICv3ITSClass),
1314 };
1315 
1316 static void gicv3_its_register_types(void)
1317 {
1318     type_register_static(&gicv3_its_info);
1319 }
1320 
1321 type_init(gicv3_its_register_types)
1322